xref: /freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp (revision 6be3386466ab79a84b48429ae66244f21526d3df)
1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/IntrinsicsARM.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/MC/SubtargetFeature.h"
28 #include "llvm/Support/Casting.h"
29 #include "llvm/Support/MachineValueType.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Transforms/Utils/LoopUtils.h"
32 #include <algorithm>
33 #include <cassert>
34 #include <cstdint>
35 #include <utility>
36 
37 using namespace llvm;
38 
39 #define DEBUG_TYPE "armtti"
40 
41 static cl::opt<bool> EnableMaskedLoadStores(
42   "enable-arm-maskedldst", cl::Hidden, cl::init(true),
43   cl::desc("Enable the generation of masked loads and stores"));
44 
45 static cl::opt<bool> DisableLowOverheadLoops(
46   "disable-arm-loloops", cl::Hidden, cl::init(false),
47   cl::desc("Disable the generation of low-overhead loops"));
48 
49 extern cl::opt<TailPredication::Mode> EnableTailPredication;
50 
51 extern cl::opt<bool> EnableMaskedGatherScatters;
52 
53 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
54                                      const Function *Callee) const {
55   const TargetMachine &TM = getTLI()->getTargetMachine();
56   const FeatureBitset &CallerBits =
57       TM.getSubtargetImpl(*Caller)->getFeatureBits();
58   const FeatureBitset &CalleeBits =
59       TM.getSubtargetImpl(*Callee)->getFeatureBits();
60 
61   // To inline a callee, all features not in the allowed list must match exactly.
62   bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) ==
63                     (CalleeBits & ~InlineFeaturesAllowed);
64   // For features in the allowed list, the callee's features must be a subset of
65   // the callers'.
66   bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) ==
67                      (CalleeBits & InlineFeaturesAllowed);
68   return MatchExact && MatchSubset;
69 }
70 
71 bool ARMTTIImpl::shouldFavorBackedgeIndex(const Loop *L) const {
72   if (L->getHeader()->getParent()->hasOptSize())
73     return false;
74   if (ST->hasMVEIntegerOps())
75     return false;
76   return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1;
77 }
78 
79 bool ARMTTIImpl::shouldFavorPostInc() const {
80   if (ST->hasMVEIntegerOps())
81     return true;
82   return false;
83 }
84 
85 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
86                               TTI::TargetCostKind CostKind) {
87   assert(Ty->isIntegerTy());
88 
89  unsigned Bits = Ty->getPrimitiveSizeInBits();
90  if (Bits == 0 || Imm.getActiveBits() >= 64)
91    return 4;
92 
93   int64_t SImmVal = Imm.getSExtValue();
94   uint64_t ZImmVal = Imm.getZExtValue();
95   if (!ST->isThumb()) {
96     if ((SImmVal >= 0 && SImmVal < 65536) ||
97         (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
98         (ARM_AM::getSOImmVal(~ZImmVal) != -1))
99       return 1;
100     return ST->hasV6T2Ops() ? 2 : 3;
101   }
102   if (ST->isThumb2()) {
103     if ((SImmVal >= 0 && SImmVal < 65536) ||
104         (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
105         (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
106       return 1;
107     return ST->hasV6T2Ops() ? 2 : 3;
108   }
109   // Thumb1, any i8 imm cost 1.
110   if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
111     return 1;
112   if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
113     return 2;
114   // Load from constantpool.
115   return 3;
116 }
117 
118 // Constants smaller than 256 fit in the immediate field of
119 // Thumb1 instructions so we return a zero cost and 1 otherwise.
120 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
121                                       const APInt &Imm, Type *Ty) {
122   if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
123     return 0;
124 
125   return 1;
126 }
127 
128 int ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm,
129                                   Type *Ty, TTI::TargetCostKind CostKind) {
130   // Division by a constant can be turned into multiplication, but only if we
131   // know it's constant. So it's not so much that the immediate is cheap (it's
132   // not), but that the alternative is worse.
133   // FIXME: this is probably unneeded with GlobalISel.
134   if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
135        Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
136       Idx == 1)
137     return 0;
138 
139   if (Opcode == Instruction::And) {
140     // UXTB/UXTH
141     if (Imm == 255 || Imm == 65535)
142       return 0;
143     // Conversion to BIC is free, and means we can use ~Imm instead.
144     return std::min(getIntImmCost(Imm, Ty, CostKind),
145                     getIntImmCost(~Imm, Ty, CostKind));
146   }
147 
148   if (Opcode == Instruction::Add)
149     // Conversion to SUB is free, and means we can use -Imm instead.
150     return std::min(getIntImmCost(Imm, Ty, CostKind),
151                     getIntImmCost(-Imm, Ty, CostKind));
152 
153   if (Opcode == Instruction::ICmp && Imm.isNegative() &&
154       Ty->getIntegerBitWidth() == 32) {
155     int64_t NegImm = -Imm.getSExtValue();
156     if (ST->isThumb2() && NegImm < 1<<12)
157       // icmp X, #-C -> cmn X, #C
158       return 0;
159     if (ST->isThumb() && NegImm < 1<<8)
160       // icmp X, #-C -> adds X, #C
161       return 0;
162   }
163 
164   // xor a, -1 can always be folded to MVN
165   if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
166     return 0;
167 
168   return getIntImmCost(Imm, Ty, CostKind);
169 }
170 
171 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
172                                  TTI::TargetCostKind CostKind,
173                                  const Instruction *I) {
174   int ISD = TLI->InstructionOpcodeToISD(Opcode);
175   assert(ISD && "Invalid opcode");
176 
177   // TODO: Allow non-throughput costs that aren't binary.
178   auto AdjustCost = [&CostKind](int Cost) {
179     if (CostKind != TTI::TCK_RecipThroughput)
180       return Cost == 0 ? 0 : 1;
181     return Cost;
182   };
183 
184   EVT SrcTy = TLI->getValueType(DL, Src);
185   EVT DstTy = TLI->getValueType(DL, Dst);
186 
187   if (!SrcTy.isSimple() || !DstTy.isSimple())
188     return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I));
189 
190   // The extend of a load is free
191   if (I && isa<LoadInst>(I->getOperand(0))) {
192     static const TypeConversionCostTblEntry LoadConversionTbl[] = {
193         {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
194         {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
195         {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
196         {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
197         {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
198         {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
199         {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
200         {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
201         {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
202         {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
203         {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
204         {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
205     };
206     if (const auto *Entry = ConvertCostTableLookup(
207             LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
208       return AdjustCost(Entry->Cost);
209 
210     static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
211         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
212         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
213         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
214         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
215         {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
216         {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
217         // The following extend from a legal type to an illegal type, so need to
218         // split the load. This introduced an extra load operation, but the
219         // extend is still "free".
220         {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1},
221         {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1},
222         {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3},
223         {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3},
224         {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1},
225         {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1},
226     };
227     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
228       if (const auto *Entry =
229               ConvertCostTableLookup(MVELoadConversionTbl, ISD,
230                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
231         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
232     }
233 
234     static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
235         // FPExtends are similar but also require the VCVT instructions.
236         {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1},
237         {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3},
238     };
239     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
240       if (const auto *Entry =
241               ConvertCostTableLookup(MVEFLoadConversionTbl, ISD,
242                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
243         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
244     }
245   }
246 
247   // The truncate of a store is free. This is the mirror of extends above.
248   if (I && I->hasOneUse() && isa<StoreInst>(*I->user_begin())) {
249     static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
250         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0},
251         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0},
252         {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0},
253         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1},
254         {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3},
255         {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1},
256     };
257     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
258       if (const auto *Entry =
259               ConvertCostTableLookup(MVELoadConversionTbl, ISD, SrcTy.getSimpleVT(),
260                                      DstTy.getSimpleVT()))
261         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
262     }
263 
264     static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
265         {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1},
266         {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3},
267     };
268     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
269       if (const auto *Entry =
270               ConvertCostTableLookup(MVEFLoadConversionTbl, ISD, SrcTy.getSimpleVT(),
271                                      DstTy.getSimpleVT()))
272         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
273     }
274   }
275 
276   // NEON vector operations that can extend their inputs.
277   if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
278       I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
279     static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
280       // vaddl
281       { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
282       { ISD::ADD, MVT::v8i16, MVT::v8i8,  0 },
283       // vsubl
284       { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
285       { ISD::SUB, MVT::v8i16, MVT::v8i8,  0 },
286       // vmull
287       { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
288       { ISD::MUL, MVT::v8i16, MVT::v8i8,  0 },
289       // vshll
290       { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
291       { ISD::SHL, MVT::v8i16, MVT::v8i8,  0 },
292     };
293 
294     auto *User = cast<Instruction>(*I->user_begin());
295     int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
296     if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
297                                              DstTy.getSimpleVT(),
298                                              SrcTy.getSimpleVT())) {
299       return AdjustCost(Entry->Cost);
300     }
301   }
302 
303   // Single to/from double precision conversions.
304   if (Src->isVectorTy() && ST->hasNEON() &&
305       ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 &&
306         DstTy.getScalarType() == MVT::f32) ||
307        (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 &&
308         DstTy.getScalarType() == MVT::f64))) {
309     static const CostTblEntry NEONFltDblTbl[] = {
310         // Vector fptrunc/fpext conversions.
311         {ISD::FP_ROUND, MVT::v2f64, 2},
312         {ISD::FP_EXTEND, MVT::v2f32, 2},
313         {ISD::FP_EXTEND, MVT::v4f32, 4}};
314 
315     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
316     if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
317       return AdjustCost(LT.first * Entry->Cost);
318   }
319 
320   // Some arithmetic, load and store operations have specific instructions
321   // to cast up/down their types automatically at no extra cost.
322   // TODO: Get these tables to know at least what the related operations are.
323   static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
324     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
325     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
326     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
327     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
328     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64, 0 },
329     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i32, 1 },
330 
331     // The number of vmovl instructions for the extension.
332     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
333     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
334     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
335     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
336     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
337     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
338     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
339     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
340     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
341     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
342     { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
343     { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
344     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
345     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
346     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
347     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
348     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
349     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
350 
351     // Operations that we legalize using splitting.
352     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i32, 6 },
353     { ISD::TRUNCATE,    MVT::v8i8, MVT::v8i32, 3 },
354 
355     // Vector float <-> i32 conversions.
356     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
357     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
358 
359     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
360     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
361     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
362     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
363     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
364     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
365     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
366     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
367     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
368     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
369     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
370     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
371     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
372     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
373     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
374     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
375     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
376     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
377     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
378     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
379 
380     { ISD::FP_TO_SINT,  MVT::v4i32, MVT::v4f32, 1 },
381     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f32, 1 },
382     { ISD::FP_TO_SINT,  MVT::v4i8, MVT::v4f32, 3 },
383     { ISD::FP_TO_UINT,  MVT::v4i8, MVT::v4f32, 3 },
384     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f32, 2 },
385     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f32, 2 },
386 
387     // Vector double <-> i32 conversions.
388     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
389     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
390 
391     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
392     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
393     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
394     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
395     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
396     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
397 
398     { ISD::FP_TO_SINT,  MVT::v2i32, MVT::v2f64, 2 },
399     { ISD::FP_TO_UINT,  MVT::v2i32, MVT::v2f64, 2 },
400     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 4 },
401     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 4 },
402     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 8 },
403     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 8 }
404   };
405 
406   if (SrcTy.isVector() && ST->hasNEON()) {
407     if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
408                                                    DstTy.getSimpleVT(),
409                                                    SrcTy.getSimpleVT()))
410       return AdjustCost(Entry->Cost);
411   }
412 
413   // Scalar float to integer conversions.
414   static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
415     { ISD::FP_TO_SINT,  MVT::i1, MVT::f32, 2 },
416     { ISD::FP_TO_UINT,  MVT::i1, MVT::f32, 2 },
417     { ISD::FP_TO_SINT,  MVT::i1, MVT::f64, 2 },
418     { ISD::FP_TO_UINT,  MVT::i1, MVT::f64, 2 },
419     { ISD::FP_TO_SINT,  MVT::i8, MVT::f32, 2 },
420     { ISD::FP_TO_UINT,  MVT::i8, MVT::f32, 2 },
421     { ISD::FP_TO_SINT,  MVT::i8, MVT::f64, 2 },
422     { ISD::FP_TO_UINT,  MVT::i8, MVT::f64, 2 },
423     { ISD::FP_TO_SINT,  MVT::i16, MVT::f32, 2 },
424     { ISD::FP_TO_UINT,  MVT::i16, MVT::f32, 2 },
425     { ISD::FP_TO_SINT,  MVT::i16, MVT::f64, 2 },
426     { ISD::FP_TO_UINT,  MVT::i16, MVT::f64, 2 },
427     { ISD::FP_TO_SINT,  MVT::i32, MVT::f32, 2 },
428     { ISD::FP_TO_UINT,  MVT::i32, MVT::f32, 2 },
429     { ISD::FP_TO_SINT,  MVT::i32, MVT::f64, 2 },
430     { ISD::FP_TO_UINT,  MVT::i32, MVT::f64, 2 },
431     { ISD::FP_TO_SINT,  MVT::i64, MVT::f32, 10 },
432     { ISD::FP_TO_UINT,  MVT::i64, MVT::f32, 10 },
433     { ISD::FP_TO_SINT,  MVT::i64, MVT::f64, 10 },
434     { ISD::FP_TO_UINT,  MVT::i64, MVT::f64, 10 }
435   };
436   if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
437     if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
438                                                    DstTy.getSimpleVT(),
439                                                    SrcTy.getSimpleVT()))
440       return AdjustCost(Entry->Cost);
441   }
442 
443   // Scalar integer to float conversions.
444   static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
445     { ISD::SINT_TO_FP,  MVT::f32, MVT::i1, 2 },
446     { ISD::UINT_TO_FP,  MVT::f32, MVT::i1, 2 },
447     { ISD::SINT_TO_FP,  MVT::f64, MVT::i1, 2 },
448     { ISD::UINT_TO_FP,  MVT::f64, MVT::i1, 2 },
449     { ISD::SINT_TO_FP,  MVT::f32, MVT::i8, 2 },
450     { ISD::UINT_TO_FP,  MVT::f32, MVT::i8, 2 },
451     { ISD::SINT_TO_FP,  MVT::f64, MVT::i8, 2 },
452     { ISD::UINT_TO_FP,  MVT::f64, MVT::i8, 2 },
453     { ISD::SINT_TO_FP,  MVT::f32, MVT::i16, 2 },
454     { ISD::UINT_TO_FP,  MVT::f32, MVT::i16, 2 },
455     { ISD::SINT_TO_FP,  MVT::f64, MVT::i16, 2 },
456     { ISD::UINT_TO_FP,  MVT::f64, MVT::i16, 2 },
457     { ISD::SINT_TO_FP,  MVT::f32, MVT::i32, 2 },
458     { ISD::UINT_TO_FP,  MVT::f32, MVT::i32, 2 },
459     { ISD::SINT_TO_FP,  MVT::f64, MVT::i32, 2 },
460     { ISD::UINT_TO_FP,  MVT::f64, MVT::i32, 2 },
461     { ISD::SINT_TO_FP,  MVT::f32, MVT::i64, 10 },
462     { ISD::UINT_TO_FP,  MVT::f32, MVT::i64, 10 },
463     { ISD::SINT_TO_FP,  MVT::f64, MVT::i64, 10 },
464     { ISD::UINT_TO_FP,  MVT::f64, MVT::i64, 10 }
465   };
466 
467   if (SrcTy.isInteger() && ST->hasNEON()) {
468     if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
469                                                    ISD, DstTy.getSimpleVT(),
470                                                    SrcTy.getSimpleVT()))
471       return AdjustCost(Entry->Cost);
472   }
473 
474   // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
475   // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
476   // are linearised so take more.
477   static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
478     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
479     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
480     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
481     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
482     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
483     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
484     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
485     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
486     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
487     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
488     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
489     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
490   };
491 
492   if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
493     if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
494                                                    ISD, DstTy.getSimpleVT(),
495                                                    SrcTy.getSimpleVT()))
496       return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
497   }
498 
499   if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) {
500     // As general rule, fp converts that were not matched above are scalarized
501     // and cost 1 vcvt for each lane, so long as the instruction is available.
502     // If not it will become a series of function calls.
503     const int CallCost = getCallInstrCost(nullptr, Dst, {Src}, CostKind);
504     int Lanes = 1;
505     if (SrcTy.isFixedLengthVector())
506       Lanes = SrcTy.getVectorNumElements();
507     auto IsLegal = [this](EVT VT) {
508       EVT EltVT = VT.getScalarType();
509       return (EltVT == MVT::f32 && ST->hasVFP2Base()) ||
510              (EltVT == MVT::f64 && ST->hasFP64()) ||
511              (EltVT == MVT::f16 && ST->hasFullFP16());
512     };
513 
514     if (IsLegal(SrcTy) && IsLegal(DstTy))
515       return Lanes;
516     else
517       return Lanes * CallCost;
518   }
519 
520   // Scalar integer conversion costs.
521   static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
522     // i16 -> i64 requires two dependent operations.
523     { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
524 
525     // Truncates on i64 are assumed to be free.
526     { ISD::TRUNCATE,    MVT::i32, MVT::i64, 0 },
527     { ISD::TRUNCATE,    MVT::i16, MVT::i64, 0 },
528     { ISD::TRUNCATE,    MVT::i8,  MVT::i64, 0 },
529     { ISD::TRUNCATE,    MVT::i1,  MVT::i64, 0 }
530   };
531 
532   if (SrcTy.isInteger()) {
533     if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
534                                                    DstTy.getSimpleVT(),
535                                                    SrcTy.getSimpleVT()))
536       return AdjustCost(Entry->Cost);
537   }
538 
539   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
540                      ? ST->getMVEVectorCostFactor()
541                      : 1;
542   return AdjustCost(
543     BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I));
544 }
545 
546 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
547                                    unsigned Index) {
548   // Penalize inserting into an D-subregister. We end up with a three times
549   // lower estimated throughput on swift.
550   if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
551       ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
552     return 3;
553 
554   if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
555                         Opcode == Instruction::ExtractElement)) {
556     // Cross-class copies are expensive on many microarchitectures,
557     // so assume they are expensive by default.
558     if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
559       return 3;
560 
561     // Even if it's not a cross class copy, this likely leads to mixing
562     // of NEON and VFP code and should be therefore penalized.
563     if (ValTy->isVectorTy() &&
564         ValTy->getScalarSizeInBits() <= 32)
565       return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
566   }
567 
568   if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
569                                  Opcode == Instruction::ExtractElement)) {
570     // We say MVE moves costs at least the MVEVectorCostFactor, even though
571     // they are scalar instructions. This helps prevent mixing scalar and
572     // vector, to prevent vectorising where we end up just scalarising the
573     // result anyway.
574     return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index),
575                     ST->getMVEVectorCostFactor()) *
576            cast<FixedVectorType>(ValTy)->getNumElements() / 2;
577   }
578 
579   return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
580 }
581 
582 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
583                                    TTI::TargetCostKind CostKind,
584                                    const Instruction *I) {
585   // TODO: Handle other cost kinds.
586   if (CostKind != TTI::TCK_RecipThroughput)
587     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I);
588 
589   int ISD = TLI->InstructionOpcodeToISD(Opcode);
590   // On NEON a vector select gets lowered to vbsl.
591   if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
592     // Lowering of some vector selects is currently far from perfect.
593     static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
594       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
595       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
596       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
597     };
598 
599     EVT SelCondTy = TLI->getValueType(DL, CondTy);
600     EVT SelValTy = TLI->getValueType(DL, ValTy);
601     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
602       if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
603                                                      SelCondTy.getSimpleVT(),
604                                                      SelValTy.getSimpleVT()))
605         return Entry->Cost;
606     }
607 
608     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
609     return LT.first;
610   }
611 
612   int BaseCost = ST->hasMVEIntegerOps() && ValTy->isVectorTy()
613                      ? ST->getMVEVectorCostFactor()
614                      : 1;
615   return BaseCost * BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind,
616                                               I);
617 }
618 
619 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
620                                           const SCEV *Ptr) {
621   // Address computations in vectorized code with non-consecutive addresses will
622   // likely result in more instructions compared to scalar code where the
623   // computation can more often be merged into the index mode. The resulting
624   // extra micro-ops can significantly decrease throughput.
625   unsigned NumVectorInstToHideOverhead = 10;
626   int MaxMergeDistance = 64;
627 
628   if (ST->hasNEON()) {
629     if (Ty->isVectorTy() && SE &&
630         !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
631       return NumVectorInstToHideOverhead;
632 
633     // In many cases the address computation is not merged into the instruction
634     // addressing mode.
635     return 1;
636   }
637   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
638 }
639 
640 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
641   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
642     // If a VCTP is part of a chain, it's already profitable and shouldn't be
643     // optimized, else LSR may block tail-predication.
644     switch (II->getIntrinsicID()) {
645     case Intrinsic::arm_mve_vctp8:
646     case Intrinsic::arm_mve_vctp16:
647     case Intrinsic::arm_mve_vctp32:
648     case Intrinsic::arm_mve_vctp64:
649       return true;
650     default:
651       break;
652     }
653   }
654   return false;
655 }
656 
657 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
658   if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
659     return false;
660 
661   if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
662     // Don't support v2i1 yet.
663     if (VecTy->getNumElements() == 2)
664       return false;
665 
666     // We don't support extending fp types.
667      unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
668     if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
669       return false;
670   }
671 
672   unsigned EltWidth = DataTy->getScalarSizeInBits();
673   return (EltWidth == 32 && Alignment >= 4) ||
674          (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
675 }
676 
677 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
678   if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
679     return false;
680 
681   // This method is called in 2 places:
682   //  - from the vectorizer with a scalar type, in which case we need to get
683   //  this as good as we can with the limited info we have (and rely on the cost
684   //  model for the rest).
685   //  - from the masked intrinsic lowering pass with the actual vector type.
686   // For MVE, we have a custom lowering pass that will already have custom
687   // legalised any gathers that we can to MVE intrinsics, and want to expand all
688   // the rest. The pass runs before the masked intrinsic lowering pass, so if we
689   // are here, we know we want to expand.
690   if (isa<VectorType>(Ty))
691     return false;
692 
693   unsigned EltWidth = Ty->getScalarSizeInBits();
694   return ((EltWidth == 32 && Alignment >= 4) ||
695           (EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
696 }
697 
698 int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
699   const MemCpyInst *MI = dyn_cast<MemCpyInst>(I);
700   assert(MI && "MemcpyInst expected");
701   ConstantInt *C = dyn_cast<ConstantInt>(MI->getLength());
702 
703   // To model the cost of a library call, we assume 1 for the call, and
704   // 3 for the argument setup.
705   const unsigned LibCallCost = 4;
706 
707   // If 'size' is not a constant, a library call will be generated.
708   if (!C)
709     return LibCallCost;
710 
711   const unsigned Size = C->getValue().getZExtValue();
712   const Align DstAlign = *MI->getDestAlign();
713   const Align SrcAlign = *MI->getSourceAlign();
714   const Function *F = I->getParent()->getParent();
715   const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
716   std::vector<EVT> MemOps;
717 
718   // MemOps will be poplulated with a list of data types that needs to be
719   // loaded and stored. That's why we multiply the number of elements by 2 to
720   // get the cost for this memcpy.
721   if (getTLI()->findOptimalMemOpLowering(
722           MemOps, Limit,
723           MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
724                       /*IsVolatile*/ true),
725           MI->getDestAddressSpace(), MI->getSourceAddressSpace(),
726           F->getAttributes()))
727     return MemOps.size() * 2;
728 
729   // If we can't find an optimal memop lowering, return the default cost
730   return LibCallCost;
731 }
732 
733 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
734                                int Index, VectorType *SubTp) {
735   if (ST->hasNEON()) {
736     if (Kind == TTI::SK_Broadcast) {
737       static const CostTblEntry NEONDupTbl[] = {
738           // VDUP handles these cases.
739           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
740           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
741           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
742           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
743           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
744           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
745 
746           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
747           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
748           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
749           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
750 
751       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
752 
753       if (const auto *Entry =
754               CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
755         return LT.first * Entry->Cost;
756     }
757     if (Kind == TTI::SK_Reverse) {
758       static const CostTblEntry NEONShuffleTbl[] = {
759           // Reverse shuffle cost one instruction if we are shuffling within a
760           // double word (vrev) or two if we shuffle a quad word (vrev, vext).
761           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
762           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
763           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
764           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
765           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
766           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
767 
768           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
769           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
770           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
771           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
772 
773       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
774 
775       if (const auto *Entry =
776               CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
777         return LT.first * Entry->Cost;
778     }
779     if (Kind == TTI::SK_Select) {
780       static const CostTblEntry NEONSelShuffleTbl[] = {
781           // Select shuffle cost table for ARM. Cost is the number of
782           // instructions
783           // required to create the shuffled vector.
784 
785           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
786           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
787           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
788           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
789 
790           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
791           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
792           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
793 
794           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
795 
796           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
797 
798       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
799       if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
800                                               ISD::VECTOR_SHUFFLE, LT.second))
801         return LT.first * Entry->Cost;
802     }
803   }
804   if (ST->hasMVEIntegerOps()) {
805     if (Kind == TTI::SK_Broadcast) {
806       static const CostTblEntry MVEDupTbl[] = {
807           // VDUP handles these cases.
808           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
809           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
810           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
811           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
812           {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
813 
814       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
815 
816       if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
817                                               LT.second))
818         return LT.first * Entry->Cost * ST->getMVEVectorCostFactor();
819     }
820   }
821   int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
822                      ? ST->getMVEVectorCostFactor()
823                      : 1;
824   return BaseCost * BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
825 }
826 
827 int ARMTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
828                                        TTI::TargetCostKind CostKind,
829                                        TTI::OperandValueKind Op1Info,
830                                        TTI::OperandValueKind Op2Info,
831                                        TTI::OperandValueProperties Opd1PropInfo,
832                                        TTI::OperandValueProperties Opd2PropInfo,
833                                        ArrayRef<const Value *> Args,
834                                        const Instruction *CxtI) {
835   // TODO: Handle more cost kinds.
836   if (CostKind != TTI::TCK_RecipThroughput)
837     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
838                                          Op2Info, Opd1PropInfo,
839                                          Opd2PropInfo, Args, CxtI);
840 
841   int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
842   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
843 
844   if (ST->hasNEON()) {
845     const unsigned FunctionCallDivCost = 20;
846     const unsigned ReciprocalDivCost = 10;
847     static const CostTblEntry CostTbl[] = {
848       // Division.
849       // These costs are somewhat random. Choose a cost of 20 to indicate that
850       // vectorizing devision (added function call) is going to be very expensive.
851       // Double registers types.
852       { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
853       { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
854       { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
855       { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
856       { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
857       { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
858       { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
859       { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
860       { ISD::SDIV, MVT::v4i16,     ReciprocalDivCost},
861       { ISD::UDIV, MVT::v4i16,     ReciprocalDivCost},
862       { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
863       { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
864       { ISD::SDIV, MVT::v8i8,      ReciprocalDivCost},
865       { ISD::UDIV, MVT::v8i8,      ReciprocalDivCost},
866       { ISD::SREM, MVT::v8i8,  8 * FunctionCallDivCost},
867       { ISD::UREM, MVT::v8i8,  8 * FunctionCallDivCost},
868       // Quad register types.
869       { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
870       { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
871       { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
872       { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
873       { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
874       { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
875       { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
876       { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
877       { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
878       { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
879       { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
880       { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
881       { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
882       { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
883       { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
884       { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
885       // Multiplication.
886     };
887 
888     if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
889       return LT.first * Entry->Cost;
890 
891     int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
892                                              Op2Info,
893                                              Opd1PropInfo, Opd2PropInfo);
894 
895     // This is somewhat of a hack. The problem that we are facing is that SROA
896     // creates a sequence of shift, and, or instructions to construct values.
897     // These sequences are recognized by the ISel and have zero-cost. Not so for
898     // the vectorized code. Because we have support for v2i64 but not i64 those
899     // sequences look particularly beneficial to vectorize.
900     // To work around this we increase the cost of v2i64 operations to make them
901     // seem less beneficial.
902     if (LT.second == MVT::v2i64 &&
903         Op2Info == TargetTransformInfo::OK_UniformConstantValue)
904       Cost += 4;
905 
906     return Cost;
907   }
908 
909   // If this operation is a shift on arm/thumb2, it might well be folded into
910   // the following instruction, hence having a cost of 0.
911   auto LooksLikeAFreeShift = [&]() {
912     if (ST->isThumb1Only() || Ty->isVectorTy())
913       return false;
914 
915     if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
916       return false;
917     if (Op2Info != TargetTransformInfo::OK_UniformConstantValue)
918       return false;
919 
920     // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
921     switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
922     case Instruction::Add:
923     case Instruction::Sub:
924     case Instruction::And:
925     case Instruction::Xor:
926     case Instruction::Or:
927     case Instruction::ICmp:
928       return true;
929     default:
930       return false;
931     }
932   };
933   if (LooksLikeAFreeShift())
934     return 0;
935 
936   int BaseCost = ST->hasMVEIntegerOps() && Ty->isVectorTy()
937                      ? ST->getMVEVectorCostFactor()
938                      : 1;
939 
940   // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
941   // without treating floats as more expensive that scalars or increasing the
942   // costs for custom operations. The results is also multiplied by the
943   // MVEVectorCostFactor where appropriate.
944   if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
945     return LT.first * BaseCost;
946 
947   // Else this is expand, assume that we need to scalarize this op.
948   if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
949     unsigned Num = VTy->getNumElements();
950     unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType(),
951                                            CostKind);
952     // Return the cost of multiple scalar invocation plus the cost of
953     // inserting and extracting the values.
954     return BaseT::getScalarizationOverhead(VTy, Args) + Num * Cost;
955   }
956 
957   return BaseCost;
958 }
959 
960 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
961                                 MaybeAlign Alignment, unsigned AddressSpace,
962                                 TTI::TargetCostKind CostKind,
963                                 const Instruction *I) {
964   // TODO: Handle other cost kinds.
965   if (CostKind != TTI::TCK_RecipThroughput)
966     return 1;
967 
968   // Type legalization can't handle structs
969   if (TLI->getValueType(DL, Src, true) == MVT::Other)
970     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
971                                   CostKind);
972 
973   if (ST->hasNEON() && Src->isVectorTy() &&
974       (Alignment && *Alignment != Align(16)) &&
975       cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
976     // Unaligned loads/stores are extremely inefficient.
977     // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
978     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
979     return LT.first * 4;
980   }
981 
982   // MVE can optimize a fpext(load(4xhalf)) using an extending integer load.
983   // Same for stores.
984   if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I &&
985       ((Opcode == Instruction::Load && I->hasOneUse() &&
986         isa<FPExtInst>(*I->user_begin())) ||
987        (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) {
988     FixedVectorType *SrcVTy = cast<FixedVectorType>(Src);
989     Type *DstTy =
990         Opcode == Instruction::Load
991             ? (*I->user_begin())->getType()
992             : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType();
993     if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() &&
994         DstTy->getScalarType()->isFloatTy())
995       return ST->getMVEVectorCostFactor();
996   }
997 
998   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
999                      ? ST->getMVEVectorCostFactor()
1000                      : 1;
1001   return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1002                                            CostKind, I);
1003 }
1004 
1005 int ARMTTIImpl::getInterleavedMemoryOpCost(
1006     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1007     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1008     bool UseMaskForCond, bool UseMaskForGaps) {
1009   assert(Factor >= 2 && "Invalid interleave factor");
1010   assert(isa<VectorType>(VecTy) && "Expect a vector type");
1011 
1012   // vldN/vstN doesn't support vector types of i64/f64 element.
1013   bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
1014 
1015   if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
1016       !UseMaskForCond && !UseMaskForGaps) {
1017     unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1018     auto *SubVecTy =
1019         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1020 
1021     // vldN/vstN only support legal vector types of size 64 or 128 in bits.
1022     // Accesses having vector types that are a multiple of 128 bits can be
1023     // matched to more than one vldN/vstN instruction.
1024     int BaseCost = ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor() : 1;
1025     if (NumElts % Factor == 0 &&
1026         TLI->isLegalInterleavedAccessType(Factor, SubVecTy, DL))
1027       return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1028 
1029     // Some smaller than legal interleaved patterns are cheap as we can make
1030     // use of the vmovn or vrev patterns to interleave a standard load. This is
1031     // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
1032     // promoted differently). The cost of 2 here is then a load and vrev or
1033     // vmovn.
1034     if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
1035         VecTy->isIntOrIntVectorTy() && DL.getTypeSizeInBits(SubVecTy) <= 64)
1036       return 2 * BaseCost;
1037   }
1038 
1039   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1040                                            Alignment, AddressSpace, CostKind,
1041                                            UseMaskForCond, UseMaskForGaps);
1042 }
1043 
1044 unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1045                                             const Value *Ptr, bool VariableMask,
1046                                             Align Alignment,
1047                                             TTI::TargetCostKind CostKind,
1048                                             const Instruction *I) {
1049   using namespace PatternMatch;
1050   if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
1051     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1052                                          Alignment, CostKind, I);
1053 
1054   assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
1055   auto *VTy = cast<FixedVectorType>(DataTy);
1056 
1057   // TODO: Splitting, once we do that.
1058 
1059   unsigned NumElems = VTy->getNumElements();
1060   unsigned EltSize = VTy->getScalarSizeInBits();
1061   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy);
1062 
1063   // For now, it is assumed that for the MVE gather instructions the loads are
1064   // all effectively serialised. This means the cost is the scalar cost
1065   // multiplied by the number of elements being loaded. This is possibly very
1066   // conservative, but even so we still end up vectorising loops because the
1067   // cost per iteration for many loops is lower than for scalar loops.
1068   unsigned VectorCost = NumElems * LT.first;
1069   // The scalarization cost should be a lot higher. We use the number of vector
1070   // elements plus the scalarization overhead.
1071   unsigned ScalarCost =
1072       NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, {});
1073 
1074   if (Alignment < EltSize / 8)
1075     return ScalarCost;
1076 
1077   unsigned ExtSize = EltSize;
1078   // Check whether there's a single user that asks for an extended type
1079   if (I != nullptr) {
1080     // Dependent of the caller of this function, a gather instruction will
1081     // either have opcode Instruction::Load or be a call to the masked_gather
1082     // intrinsic
1083     if ((I->getOpcode() == Instruction::Load ||
1084          match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
1085         I->hasOneUse()) {
1086       const User *Us = *I->users().begin();
1087       if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
1088         // only allow valid type combinations
1089         unsigned TypeSize =
1090             cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1091         if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1092              (TypeSize == 16 && EltSize == 8)) &&
1093             TypeSize * NumElems == 128) {
1094           ExtSize = TypeSize;
1095         }
1096       }
1097     }
1098     // Check whether the input data needs to be truncated
1099     TruncInst *T;
1100     if ((I->getOpcode() == Instruction::Store ||
1101          match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1102         (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1103       // Only allow valid type combinations
1104       unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1105       if (((EltSize == 16 && TypeSize == 32) ||
1106            (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1107           TypeSize * NumElems == 128)
1108         ExtSize = TypeSize;
1109     }
1110   }
1111 
1112   if (ExtSize * NumElems != 128 || NumElems < 4)
1113     return ScalarCost;
1114 
1115   // Any (aligned) i32 gather will not need to be scalarised.
1116   if (ExtSize == 32)
1117     return VectorCost;
1118   // For smaller types, we need to ensure that the gep's inputs are correctly
1119   // extended from a small enough value. Other sizes (including i64) are
1120   // scalarized for now.
1121   if (ExtSize != 8 && ExtSize != 16)
1122     return ScalarCost;
1123 
1124   if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
1125     Ptr = BC->getOperand(0);
1126   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1127     if (GEP->getNumOperands() != 2)
1128       return ScalarCost;
1129     unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1130     // Scale needs to be correct (which is only relevant for i16s).
1131     if (Scale != 1 && Scale * 8 != ExtSize)
1132       return ScalarCost;
1133     // And we need to zext (not sext) the indexes from a small enough type.
1134     if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1135       if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1136         return VectorCost;
1137     }
1138     return ScalarCost;
1139   }
1140   return ScalarCost;
1141 }
1142 
1143 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
1144   if (!F->isIntrinsic())
1145     BaseT::isLoweredToCall(F);
1146 
1147   // Assume all Arm-specific intrinsics map to an instruction.
1148   if (F->getName().startswith("llvm.arm"))
1149     return false;
1150 
1151   switch (F->getIntrinsicID()) {
1152   default: break;
1153   case Intrinsic::powi:
1154   case Intrinsic::sin:
1155   case Intrinsic::cos:
1156   case Intrinsic::pow:
1157   case Intrinsic::log:
1158   case Intrinsic::log10:
1159   case Intrinsic::log2:
1160   case Intrinsic::exp:
1161   case Intrinsic::exp2:
1162     return true;
1163   case Intrinsic::sqrt:
1164   case Intrinsic::fabs:
1165   case Intrinsic::copysign:
1166   case Intrinsic::floor:
1167   case Intrinsic::ceil:
1168   case Intrinsic::trunc:
1169   case Intrinsic::rint:
1170   case Intrinsic::nearbyint:
1171   case Intrinsic::round:
1172   case Intrinsic::canonicalize:
1173   case Intrinsic::lround:
1174   case Intrinsic::llround:
1175   case Intrinsic::lrint:
1176   case Intrinsic::llrint:
1177     if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
1178       return true;
1179     if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
1180       return true;
1181     // Some operations can be handled by vector instructions and assume
1182     // unsupported vectors will be expanded into supported scalar ones.
1183     // TODO Handle scalar operations properly.
1184     return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
1185   case Intrinsic::masked_store:
1186   case Intrinsic::masked_load:
1187   case Intrinsic::masked_gather:
1188   case Intrinsic::masked_scatter:
1189     return !ST->hasMVEIntegerOps();
1190   case Intrinsic::sadd_with_overflow:
1191   case Intrinsic::uadd_with_overflow:
1192   case Intrinsic::ssub_with_overflow:
1193   case Intrinsic::usub_with_overflow:
1194   case Intrinsic::sadd_sat:
1195   case Intrinsic::uadd_sat:
1196   case Intrinsic::ssub_sat:
1197   case Intrinsic::usub_sat:
1198     return false;
1199   }
1200 
1201   return BaseT::isLoweredToCall(F);
1202 }
1203 
1204 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1205                                           AssumptionCache &AC,
1206                                           TargetLibraryInfo *LibInfo,
1207                                           HardwareLoopInfo &HWLoopInfo) {
1208   // Low-overhead branches are only supported in the 'low-overhead branch'
1209   // extension of v8.1-m.
1210   if (!ST->hasLOB() || DisableLowOverheadLoops) {
1211     LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
1212     return false;
1213   }
1214 
1215   if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
1216     LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
1217     return false;
1218   }
1219 
1220   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1221   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
1222     LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
1223     return false;
1224   }
1225 
1226   const SCEV *TripCountSCEV =
1227     SE.getAddExpr(BackedgeTakenCount,
1228                   SE.getOne(BackedgeTakenCount->getType()));
1229 
1230   // We need to store the trip count in LR, a 32-bit register.
1231   if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
1232     LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
1233     return false;
1234   }
1235 
1236   // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
1237   // point in generating a hardware loop if that's going to happen.
1238   auto MaybeCall = [this](Instruction &I) {
1239     const ARMTargetLowering *TLI = getTLI();
1240     unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
1241     EVT VT = TLI->getValueType(DL, I.getType(), true);
1242     if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
1243       return true;
1244 
1245     // Check if an intrinsic will be lowered to a call and assume that any
1246     // other CallInst will generate a bl.
1247     if (auto *Call = dyn_cast<CallInst>(&I)) {
1248       if (isa<IntrinsicInst>(Call)) {
1249         if (const Function *F = Call->getCalledFunction())
1250           return isLoweredToCall(F);
1251       }
1252       return true;
1253     }
1254 
1255     // FPv5 provides conversions between integer, double-precision,
1256     // single-precision, and half-precision formats.
1257     switch (I.getOpcode()) {
1258     default:
1259       break;
1260     case Instruction::FPToSI:
1261     case Instruction::FPToUI:
1262     case Instruction::SIToFP:
1263     case Instruction::UIToFP:
1264     case Instruction::FPTrunc:
1265     case Instruction::FPExt:
1266       return !ST->hasFPARMv8Base();
1267     }
1268 
1269     // FIXME: Unfortunately the approach of checking the Operation Action does
1270     // not catch all cases of Legalization that use library calls. Our
1271     // Legalization step categorizes some transformations into library calls as
1272     // Custom, Expand or even Legal when doing type legalization. So for now
1273     // we have to special case for instance the SDIV of 64bit integers and the
1274     // use of floating point emulation.
1275     if (VT.isInteger() && VT.getSizeInBits() >= 64) {
1276       switch (ISD) {
1277       default:
1278         break;
1279       case ISD::SDIV:
1280       case ISD::UDIV:
1281       case ISD::SREM:
1282       case ISD::UREM:
1283       case ISD::SDIVREM:
1284       case ISD::UDIVREM:
1285         return true;
1286       }
1287     }
1288 
1289     // Assume all other non-float operations are supported.
1290     if (!VT.isFloatingPoint())
1291       return false;
1292 
1293     // We'll need a library call to handle most floats when using soft.
1294     if (TLI->useSoftFloat()) {
1295       switch (I.getOpcode()) {
1296       default:
1297         return true;
1298       case Instruction::Alloca:
1299       case Instruction::Load:
1300       case Instruction::Store:
1301       case Instruction::Select:
1302       case Instruction::PHI:
1303         return false;
1304       }
1305     }
1306 
1307     // We'll need a libcall to perform double precision operations on a single
1308     // precision only FPU.
1309     if (I.getType()->isDoubleTy() && !ST->hasFP64())
1310       return true;
1311 
1312     // Likewise for half precision arithmetic.
1313     if (I.getType()->isHalfTy() && !ST->hasFullFP16())
1314       return true;
1315 
1316     return false;
1317   };
1318 
1319   auto IsHardwareLoopIntrinsic = [](Instruction &I) {
1320     if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
1321       switch (Call->getIntrinsicID()) {
1322       default:
1323         break;
1324       case Intrinsic::set_loop_iterations:
1325       case Intrinsic::test_set_loop_iterations:
1326       case Intrinsic::loop_decrement:
1327       case Intrinsic::loop_decrement_reg:
1328         return true;
1329       }
1330     }
1331     return false;
1332   };
1333 
1334   // Scan the instructions to see if there's any that we know will turn into a
1335   // call or if this loop is already a low-overhead loop.
1336   auto ScanLoop = [&](Loop *L) {
1337     for (auto *BB : L->getBlocks()) {
1338       for (auto &I : *BB) {
1339         if (MaybeCall(I) || IsHardwareLoopIntrinsic(I)) {
1340           LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
1341           return false;
1342         }
1343       }
1344     }
1345     return true;
1346   };
1347 
1348   // Visit inner loops.
1349   for (auto Inner : *L)
1350     if (!ScanLoop(Inner))
1351       return false;
1352 
1353   if (!ScanLoop(L))
1354     return false;
1355 
1356   // TODO: Check whether the trip count calculation is expensive. If L is the
1357   // inner loop but we know it has a low trip count, calculating that trip
1358   // count (in the parent loop) may be detrimental.
1359 
1360   LLVMContext &C = L->getHeader()->getContext();
1361   HWLoopInfo.CounterInReg = true;
1362   HWLoopInfo.IsNestingLegal = false;
1363   HWLoopInfo.PerformEntryTest = true;
1364   HWLoopInfo.CountType = Type::getInt32Ty(C);
1365   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
1366   return true;
1367 }
1368 
1369 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
1370   // We don't allow icmp's, and because we only look at single block loops,
1371   // we simply count the icmps, i.e. there should only be 1 for the backedge.
1372   if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
1373     return false;
1374 
1375   if (isa<FCmpInst>(&I))
1376     return false;
1377 
1378   // We could allow extending/narrowing FP loads/stores, but codegen is
1379   // too inefficient so reject this for now.
1380   if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
1381     return false;
1382 
1383   // Extends have to be extending-loads
1384   if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
1385     if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
1386       return false;
1387 
1388   // Truncs have to be narrowing-stores
1389   if (isa<TruncInst>(&I) )
1390     if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
1391       return false;
1392 
1393   return true;
1394 }
1395 
1396 // To set up a tail-predicated loop, we need to know the total number of
1397 // elements processed by that loop. Thus, we need to determine the element
1398 // size and:
1399 // 1) it should be uniform for all operations in the vector loop, so we
1400 //    e.g. don't want any widening/narrowing operations.
1401 // 2) it should be smaller than i64s because we don't have vector operations
1402 //    that work on i64s.
1403 // 3) we don't want elements to be reversed or shuffled, to make sure the
1404 //    tail-predication masks/predicates the right lanes.
1405 //
1406 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1407                                  const DataLayout &DL,
1408                                  const LoopAccessInfo *LAI) {
1409   LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n");
1410 
1411   // If there are live-out values, it is probably a reduction, which needs a
1412   // final reduction step after the loop. MVE has a VADDV instruction to reduce
1413   // integer vectors, but doesn't have an equivalent one for float vectors. A
1414   // live-out value that is not recognised as a reduction will result in the
1415   // tail-predicated loop to be reverted to a non-predicated loop and this is
1416   // very expensive, i.e. it has a significant performance impact. So, in this
1417   // case it's better not to tail-predicate the loop, which is what we check
1418   // here. Thus, we allow only 1 live-out value, which has to be an integer
1419   // reduction, which matches the loops supported by ARMLowOverheadLoops.
1420   // It is important to keep ARMLowOverheadLoops and canTailPredicateLoop in
1421   // sync with each other.
1422   SmallVector< Instruction *, 8 > LiveOuts;
1423   LiveOuts = llvm::findDefsUsedOutsideOfLoop(L);
1424   bool IntReductionsDisabled =
1425       EnableTailPredication == TailPredication::EnabledNoReductions ||
1426       EnableTailPredication == TailPredication::ForceEnabledNoReductions;
1427 
1428   for (auto *I : LiveOuts) {
1429     if (!I->getType()->isIntegerTy()) {
1430       LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer "
1431                            "live-out value\n");
1432       return false;
1433     }
1434     if (I->getOpcode() != Instruction::Add) {
1435       LLVM_DEBUG(dbgs() << "Only add reductions supported\n");
1436       return false;
1437     }
1438     if (IntReductionsDisabled) {
1439       LLVM_DEBUG(dbgs() << "Integer add reductions not enabled\n");
1440       return false;
1441     }
1442   }
1443 
1444   // Next, check that all instructions can be tail-predicated.
1445   PredicatedScalarEvolution PSE = LAI->getPSE();
1446   SmallVector<Instruction *, 16> LoadStores;
1447   int ICmpCount = 0;
1448   int Stride = 0;
1449 
1450   for (BasicBlock *BB : L->blocks()) {
1451     for (Instruction &I : BB->instructionsWithoutDebug()) {
1452       if (isa<PHINode>(&I))
1453         continue;
1454       if (!canTailPredicateInstruction(I, ICmpCount)) {
1455         LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
1456         return false;
1457       }
1458 
1459       Type *T  = I.getType();
1460       if (T->isPointerTy())
1461         T = T->getPointerElementType();
1462 
1463       if (T->getScalarSizeInBits() > 32) {
1464         LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
1465         return false;
1466       }
1467 
1468       if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
1469         Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1);
1470         int64_t NextStride = getPtrStride(PSE, Ptr, L);
1471         // TODO: for now only allow consecutive strides of 1. We could support
1472         // other strides as long as it is uniform, but let's keep it simple for
1473         // now.
1474         if (Stride == 0 && NextStride == 1) {
1475           Stride = NextStride;
1476           continue;
1477         }
1478         if (Stride != NextStride) {
1479           LLVM_DEBUG(dbgs() << "Different strides found, can't "
1480                                "tail-predicate\n.");
1481           return false;
1482         }
1483       }
1484     }
1485   }
1486 
1487   LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
1488   return true;
1489 }
1490 
1491 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI,
1492                                              ScalarEvolution &SE,
1493                                              AssumptionCache &AC,
1494                                              TargetLibraryInfo *TLI,
1495                                              DominatorTree *DT,
1496                                              const LoopAccessInfo *LAI) {
1497   if (!EnableTailPredication) {
1498     LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
1499     return false;
1500   }
1501 
1502   // Creating a predicated vector loop is the first step for generating a
1503   // tail-predicated hardware loop, for which we need the MVE masked
1504   // load/stores instructions:
1505   if (!ST->hasMVEIntegerOps())
1506     return false;
1507 
1508   // For now, restrict this to single block loops.
1509   if (L->getNumBlocks() > 1) {
1510     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
1511                          "loop.\n");
1512     return false;
1513   }
1514 
1515   assert(L->empty() && "preferPredicateOverEpilogue: inner-loop expected");
1516 
1517   HardwareLoopInfo HWLoopInfo(L);
1518   if (!HWLoopInfo.canAnalyze(*LI)) {
1519     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1520                          "analyzable.\n");
1521     return false;
1522   }
1523 
1524   // This checks if we have the low-overhead branch architecture
1525   // extension, and if we will create a hardware-loop:
1526   if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) {
1527     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1528                          "profitable.\n");
1529     return false;
1530   }
1531 
1532   if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) {
1533     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1534                          "a candidate.\n");
1535     return false;
1536   }
1537 
1538   return canTailPredicateLoop(L, LI, SE, DL, LAI);
1539 }
1540 
1541 bool ARMTTIImpl::emitGetActiveLaneMask() const {
1542   if (!ST->hasMVEIntegerOps() || !EnableTailPredication)
1543     return false;
1544 
1545   // Intrinsic @llvm.get.active.lane.mask is supported.
1546   // It is used in the MVETailPredication pass, which requires the number of
1547   // elements processed by this vector loop to setup the tail-predicated
1548   // loop.
1549   return true;
1550 }
1551 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1552                                          TTI::UnrollingPreferences &UP) {
1553   // Only currently enable these preferences for M-Class cores.
1554   if (!ST->isMClass())
1555     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
1556 
1557   // Disable loop unrolling for Oz and Os.
1558   UP.OptSizeThreshold = 0;
1559   UP.PartialOptSizeThreshold = 0;
1560   if (L->getHeader()->getParent()->hasOptSize())
1561     return;
1562 
1563   // Only enable on Thumb-2 targets.
1564   if (!ST->isThumb2())
1565     return;
1566 
1567   SmallVector<BasicBlock*, 4> ExitingBlocks;
1568   L->getExitingBlocks(ExitingBlocks);
1569   LLVM_DEBUG(dbgs() << "Loop has:\n"
1570                     << "Blocks: " << L->getNumBlocks() << "\n"
1571                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
1572 
1573   // Only allow another exit other than the latch. This acts as an early exit
1574   // as it mirrors the profitability calculation of the runtime unroller.
1575   if (ExitingBlocks.size() > 2)
1576     return;
1577 
1578   // Limit the CFG of the loop body for targets with a branch predictor.
1579   // Allowing 4 blocks permits if-then-else diamonds in the body.
1580   if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
1581     return;
1582 
1583   // Scan the loop: don't unroll loops with calls as this could prevent
1584   // inlining.
1585   unsigned Cost = 0;
1586   for (auto *BB : L->getBlocks()) {
1587     for (auto &I : *BB) {
1588       // Don't unroll vectorised loop. MVE does not benefit from it as much as
1589       // scalar code.
1590       if (I.getType()->isVectorTy())
1591         return;
1592 
1593       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
1594         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
1595           if (!isLoweredToCall(F))
1596             continue;
1597         }
1598         return;
1599       }
1600 
1601       SmallVector<const Value*, 4> Operands(I.value_op_begin(),
1602                                             I.value_op_end());
1603       Cost += getUserCost(&I, Operands, TargetTransformInfo::TCK_CodeSize);
1604     }
1605   }
1606 
1607   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
1608 
1609   UP.Partial = true;
1610   UP.Runtime = true;
1611   UP.UpperBound = true;
1612   UP.UnrollRemainder = true;
1613   UP.DefaultUnrollRuntimeCount = 4;
1614   UP.UnrollAndJam = true;
1615   UP.UnrollAndJamInnerLoopThreshold = 60;
1616 
1617   // Force unrolling small loops can be very useful because of the branch
1618   // taken cost of the backedge.
1619   if (Cost < 12)
1620     UP.Force = true;
1621 }
1622 
1623 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1624                                        TTI::PeelingPreferences &PP) {
1625   BaseT::getPeelingPreferences(L, SE, PP);
1626 }
1627 
1628 bool ARMTTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
1629                                        TTI::ReductionFlags Flags) const {
1630   return ST->hasMVEIntegerOps();
1631 }
1632