xref: /freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp (revision 0a36787e4c1fa0cf77dcf83be0867178476e372b)
1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/IntrinsicsARM.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/MC/SubtargetFeature.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Support/MachineValueType.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Transforms/InstCombine/InstCombiner.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/LoopUtils.h"
36 #include <algorithm>
37 #include <cassert>
38 #include <cstdint>
39 #include <utility>
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "armtti"
44 
45 static cl::opt<bool> EnableMaskedLoadStores(
46   "enable-arm-maskedldst", cl::Hidden, cl::init(true),
47   cl::desc("Enable the generation of masked loads and stores"));
48 
49 static cl::opt<bool> DisableLowOverheadLoops(
50   "disable-arm-loloops", cl::Hidden, cl::init(false),
51   cl::desc("Disable the generation of low-overhead loops"));
52 
53 static cl::opt<bool>
54     AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true),
55                   cl::desc("Enable the generation of WLS loops"));
56 
57 extern cl::opt<TailPredication::Mode> EnableTailPredication;
58 
59 extern cl::opt<bool> EnableMaskedGatherScatters;
60 
61 extern cl::opt<unsigned> MVEMaxSupportedInterleaveFactor;
62 
63 /// Convert a vector load intrinsic into a simple llvm load instruction.
64 /// This is beneficial when the underlying object being addressed comes
65 /// from a constant, since we get constant-folding for free.
66 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign,
67                                InstCombiner::BuilderTy &Builder) {
68   auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
69 
70   if (!IntrAlign)
71     return nullptr;
72 
73   unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign
74                            ? MemAlign
75                            : IntrAlign->getLimitedValue();
76 
77   if (!isPowerOf2_32(Alignment))
78     return nullptr;
79 
80   auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
81                                           PointerType::get(II.getType(), 0));
82   return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
83 }
84 
85 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
86                                      const Function *Callee) const {
87   const TargetMachine &TM = getTLI()->getTargetMachine();
88   const FeatureBitset &CallerBits =
89       TM.getSubtargetImpl(*Caller)->getFeatureBits();
90   const FeatureBitset &CalleeBits =
91       TM.getSubtargetImpl(*Callee)->getFeatureBits();
92 
93   // To inline a callee, all features not in the allowed list must match exactly.
94   bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) ==
95                     (CalleeBits & ~InlineFeaturesAllowed);
96   // For features in the allowed list, the callee's features must be a subset of
97   // the callers'.
98   bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) ==
99                      (CalleeBits & InlineFeaturesAllowed);
100   return MatchExact && MatchSubset;
101 }
102 
103 bool ARMTTIImpl::shouldFavorBackedgeIndex(const Loop *L) const {
104   if (L->getHeader()->getParent()->hasOptSize())
105     return false;
106   if (ST->hasMVEIntegerOps())
107     return false;
108   return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1;
109 }
110 
111 bool ARMTTIImpl::shouldFavorPostInc() const {
112   if (ST->hasMVEIntegerOps())
113     return true;
114   return false;
115 }
116 
117 Optional<Instruction *>
118 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
119   using namespace PatternMatch;
120   Intrinsic::ID IID = II.getIntrinsicID();
121   switch (IID) {
122   default:
123     break;
124   case Intrinsic::arm_neon_vld1: {
125     Align MemAlign =
126         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
127                           &IC.getAssumptionCache(), &IC.getDominatorTree());
128     if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) {
129       return IC.replaceInstUsesWith(II, V);
130     }
131     break;
132   }
133 
134   case Intrinsic::arm_neon_vld2:
135   case Intrinsic::arm_neon_vld3:
136   case Intrinsic::arm_neon_vld4:
137   case Intrinsic::arm_neon_vld2lane:
138   case Intrinsic::arm_neon_vld3lane:
139   case Intrinsic::arm_neon_vld4lane:
140   case Intrinsic::arm_neon_vst1:
141   case Intrinsic::arm_neon_vst2:
142   case Intrinsic::arm_neon_vst3:
143   case Intrinsic::arm_neon_vst4:
144   case Intrinsic::arm_neon_vst2lane:
145   case Intrinsic::arm_neon_vst3lane:
146   case Intrinsic::arm_neon_vst4lane: {
147     Align MemAlign =
148         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
149                           &IC.getAssumptionCache(), &IC.getDominatorTree());
150     unsigned AlignArg = II.getNumArgOperands() - 1;
151     Value *AlignArgOp = II.getArgOperand(AlignArg);
152     MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue();
153     if (Align && *Align < MemAlign) {
154       return IC.replaceOperand(
155           II, AlignArg,
156           ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(),
157                            false));
158     }
159     break;
160   }
161 
162   case Intrinsic::arm_mve_pred_i2v: {
163     Value *Arg = II.getArgOperand(0);
164     Value *ArgArg;
165     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
166                        PatternMatch::m_Value(ArgArg))) &&
167         II.getType() == ArgArg->getType()) {
168       return IC.replaceInstUsesWith(II, ArgArg);
169     }
170     Constant *XorMask;
171     if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
172                              PatternMatch::m_Value(ArgArg)),
173                          PatternMatch::m_Constant(XorMask))) &&
174         II.getType() == ArgArg->getType()) {
175       if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
176         if (CI->getValue().trunc(16).isAllOnesValue()) {
177           auto TrueVector = IC.Builder.CreateVectorSplat(
178               cast<FixedVectorType>(II.getType())->getNumElements(),
179               IC.Builder.getTrue());
180           return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
181         }
182       }
183     }
184     KnownBits ScalarKnown(32);
185     if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16),
186                                 ScalarKnown, 0)) {
187       return &II;
188     }
189     break;
190   }
191   case Intrinsic::arm_mve_pred_v2i: {
192     Value *Arg = II.getArgOperand(0);
193     Value *ArgArg;
194     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(
195                        PatternMatch::m_Value(ArgArg)))) {
196       return IC.replaceInstUsesWith(II, ArgArg);
197     }
198     if (!II.getMetadata(LLVMContext::MD_range)) {
199       Type *IntTy32 = Type::getInt32Ty(II.getContext());
200       Metadata *M[] = {
201           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
202           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF))};
203       II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M));
204       return &II;
205     }
206     break;
207   }
208   case Intrinsic::arm_mve_vadc:
209   case Intrinsic::arm_mve_vadc_predicated: {
210     unsigned CarryOp =
211         (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2;
212     assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 &&
213            "Bad type for intrinsic!");
214 
215     KnownBits CarryKnown(32);
216     if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29),
217                                 CarryKnown)) {
218       return &II;
219     }
220     break;
221   }
222   case Intrinsic::arm_mve_vmldava: {
223     Instruction *I = cast<Instruction>(&II);
224     if (I->hasOneUse()) {
225       auto *User = cast<Instruction>(*I->user_begin());
226       Value *OpZ;
227       if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) &&
228           match(I->getOperand(3), m_Zero())) {
229         Value *OpX = I->getOperand(4);
230         Value *OpY = I->getOperand(5);
231         Type *OpTy = OpX->getType();
232 
233         IC.Builder.SetInsertPoint(User);
234         Value *V =
235             IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy},
236                                        {I->getOperand(0), I->getOperand(1),
237                                         I->getOperand(2), OpZ, OpX, OpY});
238 
239         IC.replaceInstUsesWith(*User, V);
240         return IC.eraseInstFromFunction(*User);
241       }
242     }
243     return None;
244   }
245   }
246   return None;
247 }
248 
249 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
250                               TTI::TargetCostKind CostKind) {
251   assert(Ty->isIntegerTy());
252 
253  unsigned Bits = Ty->getPrimitiveSizeInBits();
254  if (Bits == 0 || Imm.getActiveBits() >= 64)
255    return 4;
256 
257   int64_t SImmVal = Imm.getSExtValue();
258   uint64_t ZImmVal = Imm.getZExtValue();
259   if (!ST->isThumb()) {
260     if ((SImmVal >= 0 && SImmVal < 65536) ||
261         (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
262         (ARM_AM::getSOImmVal(~ZImmVal) != -1))
263       return 1;
264     return ST->hasV6T2Ops() ? 2 : 3;
265   }
266   if (ST->isThumb2()) {
267     if ((SImmVal >= 0 && SImmVal < 65536) ||
268         (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
269         (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
270       return 1;
271     return ST->hasV6T2Ops() ? 2 : 3;
272   }
273   // Thumb1, any i8 imm cost 1.
274   if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
275     return 1;
276   if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
277     return 2;
278   // Load from constantpool.
279   return 3;
280 }
281 
282 // Constants smaller than 256 fit in the immediate field of
283 // Thumb1 instructions so we return a zero cost and 1 otherwise.
284 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
285                                       const APInt &Imm, Type *Ty) {
286   if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
287     return 0;
288 
289   return 1;
290 }
291 
292 // Checks whether Inst is part of a min(max()) or max(min()) pattern
293 // that will match to an SSAT instruction
294 static bool isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) {
295   Value *LHS, *RHS;
296   ConstantInt *C;
297   SelectPatternFlavor InstSPF = matchSelectPattern(Inst, LHS, RHS).Flavor;
298 
299   if (InstSPF == SPF_SMAX &&
300       PatternMatch::match(RHS, PatternMatch::m_ConstantInt(C)) &&
301       C->getValue() == Imm && Imm.isNegative() && (-Imm).isPowerOf2()) {
302 
303     auto isSSatMin = [&](Value *MinInst) {
304       if (isa<SelectInst>(MinInst)) {
305         Value *MinLHS, *MinRHS;
306         ConstantInt *MinC;
307         SelectPatternFlavor MinSPF =
308             matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor;
309         if (MinSPF == SPF_SMIN &&
310             PatternMatch::match(MinRHS, PatternMatch::m_ConstantInt(MinC)) &&
311             MinC->getValue() == ((-Imm) - 1))
312           return true;
313       }
314       return false;
315     };
316 
317     if (isSSatMin(Inst->getOperand(1)) ||
318         (Inst->hasNUses(2) && (isSSatMin(*Inst->user_begin()) ||
319                                isSSatMin(*(++Inst->user_begin())))))
320       return true;
321   }
322   return false;
323 }
324 
325 int ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
326                                   const APInt &Imm, Type *Ty,
327                                   TTI::TargetCostKind CostKind,
328                                   Instruction *Inst) {
329   // Division by a constant can be turned into multiplication, but only if we
330   // know it's constant. So it's not so much that the immediate is cheap (it's
331   // not), but that the alternative is worse.
332   // FIXME: this is probably unneeded with GlobalISel.
333   if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
334        Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
335       Idx == 1)
336     return 0;
337 
338   if (Opcode == Instruction::And) {
339     // UXTB/UXTH
340     if (Imm == 255 || Imm == 65535)
341       return 0;
342     // Conversion to BIC is free, and means we can use ~Imm instead.
343     return std::min(getIntImmCost(Imm, Ty, CostKind),
344                     getIntImmCost(~Imm, Ty, CostKind));
345   }
346 
347   if (Opcode == Instruction::Add)
348     // Conversion to SUB is free, and means we can use -Imm instead.
349     return std::min(getIntImmCost(Imm, Ty, CostKind),
350                     getIntImmCost(-Imm, Ty, CostKind));
351 
352   if (Opcode == Instruction::ICmp && Imm.isNegative() &&
353       Ty->getIntegerBitWidth() == 32) {
354     int64_t NegImm = -Imm.getSExtValue();
355     if (ST->isThumb2() && NegImm < 1<<12)
356       // icmp X, #-C -> cmn X, #C
357       return 0;
358     if (ST->isThumb() && NegImm < 1<<8)
359       // icmp X, #-C -> adds X, #C
360       return 0;
361   }
362 
363   // xor a, -1 can always be folded to MVN
364   if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
365     return 0;
366 
367   // Ensures negative constant of min(max()) or max(min()) patterns that
368   // match to SSAT instructions don't get hoisted
369   if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) &&
370       Ty->getIntegerBitWidth() <= 32) {
371     if (isSSATMinMaxPattern(Inst, Imm) ||
372         (isa<ICmpInst>(Inst) && Inst->hasOneUse() &&
373          isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm)))
374       return 0;
375   }
376 
377   return getIntImmCost(Imm, Ty, CostKind);
378 }
379 
380 int ARMTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
381   if (CostKind == TTI::TCK_RecipThroughput &&
382       (ST->hasNEON() || ST->hasMVEIntegerOps())) {
383     // FIXME: The vectorizer is highly sensistive to the cost of these
384     // instructions, which suggests that it may be using the costs incorrectly.
385     // But, for now, just make them free to avoid performance regressions for
386     // vector targets.
387     return 0;
388   }
389   return BaseT::getCFInstrCost(Opcode, CostKind);
390 }
391 
392 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
393                                  TTI::CastContextHint CCH,
394                                  TTI::TargetCostKind CostKind,
395                                  const Instruction *I) {
396   int ISD = TLI->InstructionOpcodeToISD(Opcode);
397   assert(ISD && "Invalid opcode");
398 
399   // TODO: Allow non-throughput costs that aren't binary.
400   auto AdjustCost = [&CostKind](int Cost) {
401     if (CostKind != TTI::TCK_RecipThroughput)
402       return Cost == 0 ? 0 : 1;
403     return Cost;
404   };
405   auto IsLegalFPType = [this](EVT VT) {
406     EVT EltVT = VT.getScalarType();
407     return (EltVT == MVT::f32 && ST->hasVFP2Base()) ||
408             (EltVT == MVT::f64 && ST->hasFP64()) ||
409             (EltVT == MVT::f16 && ST->hasFullFP16());
410   };
411 
412   EVT SrcTy = TLI->getValueType(DL, Src);
413   EVT DstTy = TLI->getValueType(DL, Dst);
414 
415   if (!SrcTy.isSimple() || !DstTy.isSimple())
416     return AdjustCost(
417         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
418 
419   // Extending masked load/Truncating masked stores is expensive because we
420   // currently don't split them. This means that we'll likely end up
421   // loading/storing each element individually (hence the high cost).
422   if ((ST->hasMVEIntegerOps() &&
423        (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt ||
424         Opcode == Instruction::SExt)) ||
425       (ST->hasMVEFloatOps() &&
426        (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) &&
427        IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)))
428     if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128)
429       return 2 * DstTy.getVectorNumElements() * ST->getMVEVectorCostFactor();
430 
431   // The extend of other kinds of load is free
432   if (CCH == TTI::CastContextHint::Normal ||
433       CCH == TTI::CastContextHint::Masked) {
434     static const TypeConversionCostTblEntry LoadConversionTbl[] = {
435         {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
436         {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
437         {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
438         {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
439         {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
440         {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
441         {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
442         {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
443         {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
444         {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
445         {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
446         {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
447     };
448     if (const auto *Entry = ConvertCostTableLookup(
449             LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
450       return AdjustCost(Entry->Cost);
451 
452     static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
453         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
454         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
455         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
456         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
457         {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
458         {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
459         // The following extend from a legal type to an illegal type, so need to
460         // split the load. This introduced an extra load operation, but the
461         // extend is still "free".
462         {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1},
463         {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1},
464         {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3},
465         {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3},
466         {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1},
467         {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1},
468     };
469     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
470       if (const auto *Entry =
471               ConvertCostTableLookup(MVELoadConversionTbl, ISD,
472                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
473         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
474     }
475 
476     static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
477         // FPExtends are similar but also require the VCVT instructions.
478         {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1},
479         {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3},
480     };
481     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
482       if (const auto *Entry =
483               ConvertCostTableLookup(MVEFLoadConversionTbl, ISD,
484                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
485         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
486     }
487 
488     // The truncate of a store is free. This is the mirror of extends above.
489     static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = {
490         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0},
491         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0},
492         {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0},
493         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1},
494         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i8, 1},
495         {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3},
496         {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1},
497     };
498     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
499       if (const auto *Entry =
500               ConvertCostTableLookup(MVEStoreConversionTbl, ISD,
501                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
502         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
503     }
504 
505     static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = {
506         {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1},
507         {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3},
508     };
509     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
510       if (const auto *Entry =
511               ConvertCostTableLookup(MVEFStoreConversionTbl, ISD,
512                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
513         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
514     }
515   }
516 
517   // NEON vector operations that can extend their inputs.
518   if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
519       I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
520     static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
521       // vaddl
522       { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
523       { ISD::ADD, MVT::v8i16, MVT::v8i8,  0 },
524       // vsubl
525       { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
526       { ISD::SUB, MVT::v8i16, MVT::v8i8,  0 },
527       // vmull
528       { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
529       { ISD::MUL, MVT::v8i16, MVT::v8i8,  0 },
530       // vshll
531       { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
532       { ISD::SHL, MVT::v8i16, MVT::v8i8,  0 },
533     };
534 
535     auto *User = cast<Instruction>(*I->user_begin());
536     int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
537     if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
538                                              DstTy.getSimpleVT(),
539                                              SrcTy.getSimpleVT())) {
540       return AdjustCost(Entry->Cost);
541     }
542   }
543 
544   // Single to/from double precision conversions.
545   if (Src->isVectorTy() && ST->hasNEON() &&
546       ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 &&
547         DstTy.getScalarType() == MVT::f32) ||
548        (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 &&
549         DstTy.getScalarType() == MVT::f64))) {
550     static const CostTblEntry NEONFltDblTbl[] = {
551         // Vector fptrunc/fpext conversions.
552         {ISD::FP_ROUND, MVT::v2f64, 2},
553         {ISD::FP_EXTEND, MVT::v2f32, 2},
554         {ISD::FP_EXTEND, MVT::v4f32, 4}};
555 
556     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
557     if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
558       return AdjustCost(LT.first * Entry->Cost);
559   }
560 
561   // Some arithmetic, load and store operations have specific instructions
562   // to cast up/down their types automatically at no extra cost.
563   // TODO: Get these tables to know at least what the related operations are.
564   static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
565     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
566     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
567     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
568     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
569     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64, 0 },
570     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i32, 1 },
571 
572     // The number of vmovl instructions for the extension.
573     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
574     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
575     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
576     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
577     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
578     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
579     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
580     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
581     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
582     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
583     { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
584     { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
585     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
586     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
587     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
588     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
589     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
590     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
591 
592     // Operations that we legalize using splitting.
593     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i32, 6 },
594     { ISD::TRUNCATE,    MVT::v8i8, MVT::v8i32, 3 },
595 
596     // Vector float <-> i32 conversions.
597     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
598     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
599 
600     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
601     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
602     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
603     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
604     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
605     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
606     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
607     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
608     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
609     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
610     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
611     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
612     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
613     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
614     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
615     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
616     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
617     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
618     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
619     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
620 
621     { ISD::FP_TO_SINT,  MVT::v4i32, MVT::v4f32, 1 },
622     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f32, 1 },
623     { ISD::FP_TO_SINT,  MVT::v4i8, MVT::v4f32, 3 },
624     { ISD::FP_TO_UINT,  MVT::v4i8, MVT::v4f32, 3 },
625     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f32, 2 },
626     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f32, 2 },
627 
628     // Vector double <-> i32 conversions.
629     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
630     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
631 
632     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
633     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
634     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
635     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
636     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
637     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
638 
639     { ISD::FP_TO_SINT,  MVT::v2i32, MVT::v2f64, 2 },
640     { ISD::FP_TO_UINT,  MVT::v2i32, MVT::v2f64, 2 },
641     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 4 },
642     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 4 },
643     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 8 },
644     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 8 }
645   };
646 
647   if (SrcTy.isVector() && ST->hasNEON()) {
648     if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
649                                                    DstTy.getSimpleVT(),
650                                                    SrcTy.getSimpleVT()))
651       return AdjustCost(Entry->Cost);
652   }
653 
654   // Scalar float to integer conversions.
655   static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
656     { ISD::FP_TO_SINT,  MVT::i1, MVT::f32, 2 },
657     { ISD::FP_TO_UINT,  MVT::i1, MVT::f32, 2 },
658     { ISD::FP_TO_SINT,  MVT::i1, MVT::f64, 2 },
659     { ISD::FP_TO_UINT,  MVT::i1, MVT::f64, 2 },
660     { ISD::FP_TO_SINT,  MVT::i8, MVT::f32, 2 },
661     { ISD::FP_TO_UINT,  MVT::i8, MVT::f32, 2 },
662     { ISD::FP_TO_SINT,  MVT::i8, MVT::f64, 2 },
663     { ISD::FP_TO_UINT,  MVT::i8, MVT::f64, 2 },
664     { ISD::FP_TO_SINT,  MVT::i16, MVT::f32, 2 },
665     { ISD::FP_TO_UINT,  MVT::i16, MVT::f32, 2 },
666     { ISD::FP_TO_SINT,  MVT::i16, MVT::f64, 2 },
667     { ISD::FP_TO_UINT,  MVT::i16, MVT::f64, 2 },
668     { ISD::FP_TO_SINT,  MVT::i32, MVT::f32, 2 },
669     { ISD::FP_TO_UINT,  MVT::i32, MVT::f32, 2 },
670     { ISD::FP_TO_SINT,  MVT::i32, MVT::f64, 2 },
671     { ISD::FP_TO_UINT,  MVT::i32, MVT::f64, 2 },
672     { ISD::FP_TO_SINT,  MVT::i64, MVT::f32, 10 },
673     { ISD::FP_TO_UINT,  MVT::i64, MVT::f32, 10 },
674     { ISD::FP_TO_SINT,  MVT::i64, MVT::f64, 10 },
675     { ISD::FP_TO_UINT,  MVT::i64, MVT::f64, 10 }
676   };
677   if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
678     if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
679                                                    DstTy.getSimpleVT(),
680                                                    SrcTy.getSimpleVT()))
681       return AdjustCost(Entry->Cost);
682   }
683 
684   // Scalar integer to float conversions.
685   static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
686     { ISD::SINT_TO_FP,  MVT::f32, MVT::i1, 2 },
687     { ISD::UINT_TO_FP,  MVT::f32, MVT::i1, 2 },
688     { ISD::SINT_TO_FP,  MVT::f64, MVT::i1, 2 },
689     { ISD::UINT_TO_FP,  MVT::f64, MVT::i1, 2 },
690     { ISD::SINT_TO_FP,  MVT::f32, MVT::i8, 2 },
691     { ISD::UINT_TO_FP,  MVT::f32, MVT::i8, 2 },
692     { ISD::SINT_TO_FP,  MVT::f64, MVT::i8, 2 },
693     { ISD::UINT_TO_FP,  MVT::f64, MVT::i8, 2 },
694     { ISD::SINT_TO_FP,  MVT::f32, MVT::i16, 2 },
695     { ISD::UINT_TO_FP,  MVT::f32, MVT::i16, 2 },
696     { ISD::SINT_TO_FP,  MVT::f64, MVT::i16, 2 },
697     { ISD::UINT_TO_FP,  MVT::f64, MVT::i16, 2 },
698     { ISD::SINT_TO_FP,  MVT::f32, MVT::i32, 2 },
699     { ISD::UINT_TO_FP,  MVT::f32, MVT::i32, 2 },
700     { ISD::SINT_TO_FP,  MVT::f64, MVT::i32, 2 },
701     { ISD::UINT_TO_FP,  MVT::f64, MVT::i32, 2 },
702     { ISD::SINT_TO_FP,  MVT::f32, MVT::i64, 10 },
703     { ISD::UINT_TO_FP,  MVT::f32, MVT::i64, 10 },
704     { ISD::SINT_TO_FP,  MVT::f64, MVT::i64, 10 },
705     { ISD::UINT_TO_FP,  MVT::f64, MVT::i64, 10 }
706   };
707 
708   if (SrcTy.isInteger() && ST->hasNEON()) {
709     if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
710                                                    ISD, DstTy.getSimpleVT(),
711                                                    SrcTy.getSimpleVT()))
712       return AdjustCost(Entry->Cost);
713   }
714 
715   // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
716   // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
717   // are linearised so take more.
718   static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
719     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
720     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
721     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
722     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
723     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
724     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
725     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
726     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
727     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
728     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
729     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
730     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
731   };
732 
733   if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
734     if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
735                                                    ISD, DstTy.getSimpleVT(),
736                                                    SrcTy.getSimpleVT()))
737       return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
738   }
739 
740   if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) {
741     // As general rule, fp converts that were not matched above are scalarized
742     // and cost 1 vcvt for each lane, so long as the instruction is available.
743     // If not it will become a series of function calls.
744     const int CallCost = getCallInstrCost(nullptr, Dst, {Src}, CostKind);
745     int Lanes = 1;
746     if (SrcTy.isFixedLengthVector())
747       Lanes = SrcTy.getVectorNumElements();
748 
749     if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))
750       return Lanes;
751     else
752       return Lanes * CallCost;
753   }
754 
755   if (ISD == ISD::TRUNCATE && ST->hasMVEIntegerOps() &&
756       SrcTy.isFixedLengthVector()) {
757     // Treat a truncate with larger than legal source (128bits for MVE) as
758     // expensive, 2 instructions per lane.
759     if ((SrcTy.getScalarType() == MVT::i8 ||
760          SrcTy.getScalarType() == MVT::i16 ||
761          SrcTy.getScalarType() == MVT::i32) &&
762         SrcTy.getSizeInBits() > 128 &&
763         SrcTy.getSizeInBits() > DstTy.getSizeInBits())
764       return SrcTy.getVectorNumElements() * 2;
765   }
766 
767   // Scalar integer conversion costs.
768   static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
769     // i16 -> i64 requires two dependent operations.
770     { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
771 
772     // Truncates on i64 are assumed to be free.
773     { ISD::TRUNCATE,    MVT::i32, MVT::i64, 0 },
774     { ISD::TRUNCATE,    MVT::i16, MVT::i64, 0 },
775     { ISD::TRUNCATE,    MVT::i8,  MVT::i64, 0 },
776     { ISD::TRUNCATE,    MVT::i1,  MVT::i64, 0 }
777   };
778 
779   if (SrcTy.isInteger()) {
780     if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
781                                                    DstTy.getSimpleVT(),
782                                                    SrcTy.getSimpleVT()))
783       return AdjustCost(Entry->Cost);
784   }
785 
786   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
787                      ? ST->getMVEVectorCostFactor()
788                      : 1;
789   return AdjustCost(
790       BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
791 }
792 
793 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
794                                    unsigned Index) {
795   // Penalize inserting into an D-subregister. We end up with a three times
796   // lower estimated throughput on swift.
797   if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
798       ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
799     return 3;
800 
801   if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
802                         Opcode == Instruction::ExtractElement)) {
803     // Cross-class copies are expensive on many microarchitectures,
804     // so assume they are expensive by default.
805     if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
806       return 3;
807 
808     // Even if it's not a cross class copy, this likely leads to mixing
809     // of NEON and VFP code and should be therefore penalized.
810     if (ValTy->isVectorTy() &&
811         ValTy->getScalarSizeInBits() <= 32)
812       return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
813   }
814 
815   if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
816                                  Opcode == Instruction::ExtractElement)) {
817     // We say MVE moves costs at least the MVEVectorCostFactor, even though
818     // they are scalar instructions. This helps prevent mixing scalar and
819     // vector, to prevent vectorising where we end up just scalarising the
820     // result anyway.
821     return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index),
822                     ST->getMVEVectorCostFactor()) *
823            cast<FixedVectorType>(ValTy)->getNumElements() / 2;
824   }
825 
826   return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
827 }
828 
829 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
830                                    CmpInst::Predicate VecPred,
831                                    TTI::TargetCostKind CostKind,
832                                    const Instruction *I) {
833   int ISD = TLI->InstructionOpcodeToISD(Opcode);
834 
835   // Thumb scalar code size cost for select.
836   if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT &&
837       ST->isThumb() && !ValTy->isVectorTy()) {
838     // Assume expensive structs.
839     if (TLI->getValueType(DL, ValTy, true) == MVT::Other)
840       return TTI::TCC_Expensive;
841 
842     // Select costs can vary because they:
843     // - may require one or more conditional mov (including an IT),
844     // - can't operate directly on immediates,
845     // - require live flags, which we can't copy around easily.
846     int Cost = TLI->getTypeLegalizationCost(DL, ValTy).first;
847 
848     // Possible IT instruction for Thumb2, or more for Thumb1.
849     ++Cost;
850 
851     // i1 values may need rematerialising by using mov immediates and/or
852     // flag setting instructions.
853     if (ValTy->isIntegerTy(1))
854       ++Cost;
855 
856     return Cost;
857   }
858 
859   // On NEON a vector select gets lowered to vbsl.
860   if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT && CondTy) {
861     // Lowering of some vector selects is currently far from perfect.
862     static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
863       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
864       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
865       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
866     };
867 
868     EVT SelCondTy = TLI->getValueType(DL, CondTy);
869     EVT SelValTy = TLI->getValueType(DL, ValTy);
870     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
871       if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
872                                                      SelCondTy.getSimpleVT(),
873                                                      SelValTy.getSimpleVT()))
874         return Entry->Cost;
875     }
876 
877     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
878     return LT.first;
879   }
880 
881   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
882   // for "multiple beats" potentially needed by MVE instructions.
883   int BaseCost = 1;
884   if (CostKind != TTI::TCK_CodeSize && ST->hasMVEIntegerOps() &&
885       ValTy->isVectorTy())
886     BaseCost = ST->getMVEVectorCostFactor();
887 
888   return BaseCost *
889          BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
890 }
891 
892 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
893                                           const SCEV *Ptr) {
894   // Address computations in vectorized code with non-consecutive addresses will
895   // likely result in more instructions compared to scalar code where the
896   // computation can more often be merged into the index mode. The resulting
897   // extra micro-ops can significantly decrease throughput.
898   unsigned NumVectorInstToHideOverhead = 10;
899   int MaxMergeDistance = 64;
900 
901   if (ST->hasNEON()) {
902     if (Ty->isVectorTy() && SE &&
903         !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
904       return NumVectorInstToHideOverhead;
905 
906     // In many cases the address computation is not merged into the instruction
907     // addressing mode.
908     return 1;
909   }
910   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
911 }
912 
913 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
914   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
915     // If a VCTP is part of a chain, it's already profitable and shouldn't be
916     // optimized, else LSR may block tail-predication.
917     switch (II->getIntrinsicID()) {
918     case Intrinsic::arm_mve_vctp8:
919     case Intrinsic::arm_mve_vctp16:
920     case Intrinsic::arm_mve_vctp32:
921     case Intrinsic::arm_mve_vctp64:
922       return true;
923     default:
924       break;
925     }
926   }
927   return false;
928 }
929 
930 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
931   if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
932     return false;
933 
934   if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
935     // Don't support v2i1 yet.
936     if (VecTy->getNumElements() == 2)
937       return false;
938 
939     // We don't support extending fp types.
940      unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
941     if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
942       return false;
943   }
944 
945   unsigned EltWidth = DataTy->getScalarSizeInBits();
946   return (EltWidth == 32 && Alignment >= 4) ||
947          (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
948 }
949 
950 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
951   if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
952     return false;
953 
954   // This method is called in 2 places:
955   //  - from the vectorizer with a scalar type, in which case we need to get
956   //  this as good as we can with the limited info we have (and rely on the cost
957   //  model for the rest).
958   //  - from the masked intrinsic lowering pass with the actual vector type.
959   // For MVE, we have a custom lowering pass that will already have custom
960   // legalised any gathers that we can to MVE intrinsics, and want to expand all
961   // the rest. The pass runs before the masked intrinsic lowering pass, so if we
962   // are here, we know we want to expand.
963   if (isa<VectorType>(Ty))
964     return false;
965 
966   unsigned EltWidth = Ty->getScalarSizeInBits();
967   return ((EltWidth == 32 && Alignment >= 4) ||
968           (EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
969 }
970 
971 /// Given a memcpy/memset/memmove instruction, return the number of memory
972 /// operations performed, via querying findOptimalMemOpLowering. Returns -1 if a
973 /// call is used.
974 int ARMTTIImpl::getNumMemOps(const IntrinsicInst *I) const {
975   MemOp MOp;
976   unsigned DstAddrSpace = ~0u;
977   unsigned SrcAddrSpace = ~0u;
978   const Function *F = I->getParent()->getParent();
979 
980   if (const auto *MC = dyn_cast<MemTransferInst>(I)) {
981     ConstantInt *C = dyn_cast<ConstantInt>(MC->getLength());
982     // If 'size' is not a constant, a library call will be generated.
983     if (!C)
984       return -1;
985 
986     const unsigned Size = C->getValue().getZExtValue();
987     const Align DstAlign = *MC->getDestAlign();
988     const Align SrcAlign = *MC->getSourceAlign();
989 
990     MOp = MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
991                       /*IsVolatile*/ false);
992     DstAddrSpace = MC->getDestAddressSpace();
993     SrcAddrSpace = MC->getSourceAddressSpace();
994   }
995   else if (const auto *MS = dyn_cast<MemSetInst>(I)) {
996     ConstantInt *C = dyn_cast<ConstantInt>(MS->getLength());
997     // If 'size' is not a constant, a library call will be generated.
998     if (!C)
999       return -1;
1000 
1001     const unsigned Size = C->getValue().getZExtValue();
1002     const Align DstAlign = *MS->getDestAlign();
1003 
1004     MOp = MemOp::Set(Size, /*DstAlignCanChange*/ false, DstAlign,
1005                      /*IsZeroMemset*/ false, /*IsVolatile*/ false);
1006     DstAddrSpace = MS->getDestAddressSpace();
1007   }
1008   else
1009     llvm_unreachable("Expected a memcpy/move or memset!");
1010 
1011   unsigned Limit, Factor = 2;
1012   switch(I->getIntrinsicID()) {
1013     case Intrinsic::memcpy:
1014       Limit = TLI->getMaxStoresPerMemcpy(F->hasMinSize());
1015       break;
1016     case Intrinsic::memmove:
1017       Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
1018       break;
1019     case Intrinsic::memset:
1020       Limit = TLI->getMaxStoresPerMemset(F->hasMinSize());
1021       Factor = 1;
1022       break;
1023     default:
1024       llvm_unreachable("Expected a memcpy/move or memset!");
1025   }
1026 
1027   // MemOps will be poplulated with a list of data types that needs to be
1028   // loaded and stored. That's why we multiply the number of elements by 2 to
1029   // get the cost for this memcpy.
1030   std::vector<EVT> MemOps;
1031   if (getTLI()->findOptimalMemOpLowering(
1032           MemOps, Limit, MOp, DstAddrSpace,
1033           SrcAddrSpace, F->getAttributes()))
1034     return MemOps.size() * Factor;
1035 
1036   // If we can't find an optimal memop lowering, return the default cost
1037   return -1;
1038 }
1039 
1040 int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
1041   int NumOps = getNumMemOps(cast<IntrinsicInst>(I));
1042 
1043   // To model the cost of a library call, we assume 1 for the call, and
1044   // 3 for the argument setup.
1045   if (NumOps == -1)
1046     return 4;
1047   return NumOps;
1048 }
1049 
1050 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
1051                                int Index, VectorType *SubTp) {
1052   if (ST->hasNEON()) {
1053     if (Kind == TTI::SK_Broadcast) {
1054       static const CostTblEntry NEONDupTbl[] = {
1055           // VDUP handles these cases.
1056           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1057           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1058           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1059           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1060           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1061           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1062 
1063           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1064           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1065           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1066           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
1067 
1068       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1069 
1070       if (const auto *Entry =
1071               CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
1072         return LT.first * Entry->Cost;
1073     }
1074     if (Kind == TTI::SK_Reverse) {
1075       static const CostTblEntry NEONShuffleTbl[] = {
1076           // Reverse shuffle cost one instruction if we are shuffling within a
1077           // double word (vrev) or two if we shuffle a quad word (vrev, vext).
1078           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1079           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1080           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1081           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1082           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1083           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1084 
1085           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1086           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1087           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
1088           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
1089 
1090       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1091 
1092       if (const auto *Entry =
1093               CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
1094         return LT.first * Entry->Cost;
1095     }
1096     if (Kind == TTI::SK_Select) {
1097       static const CostTblEntry NEONSelShuffleTbl[] = {
1098           // Select shuffle cost table for ARM. Cost is the number of
1099           // instructions
1100           // required to create the shuffled vector.
1101 
1102           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1103           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1104           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1105           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1106 
1107           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1108           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1109           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
1110 
1111           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
1112 
1113           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
1114 
1115       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1116       if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
1117                                               ISD::VECTOR_SHUFFLE, LT.second))
1118         return LT.first * Entry->Cost;
1119     }
1120   }
1121   if (ST->hasMVEIntegerOps()) {
1122     if (Kind == TTI::SK_Broadcast) {
1123       static const CostTblEntry MVEDupTbl[] = {
1124           // VDUP handles these cases.
1125           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1126           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1127           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
1128           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1129           {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
1130 
1131       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1132 
1133       if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
1134                                               LT.second))
1135         return LT.first * Entry->Cost * ST->getMVEVectorCostFactor();
1136     }
1137   }
1138   int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
1139                      ? ST->getMVEVectorCostFactor()
1140                      : 1;
1141   return BaseCost * BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
1142 }
1143 
1144 int ARMTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
1145                                        TTI::TargetCostKind CostKind,
1146                                        TTI::OperandValueKind Op1Info,
1147                                        TTI::OperandValueKind Op2Info,
1148                                        TTI::OperandValueProperties Opd1PropInfo,
1149                                        TTI::OperandValueProperties Opd2PropInfo,
1150                                        ArrayRef<const Value *> Args,
1151                                        const Instruction *CxtI) {
1152   int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
1153   if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) {
1154     // Make operations on i1 relatively expensive as this often involves
1155     // combining predicates. AND and XOR should be easier to handle with IT
1156     // blocks.
1157     switch (ISDOpcode) {
1158     default:
1159       break;
1160     case ISD::AND:
1161     case ISD::XOR:
1162       return 2;
1163     case ISD::OR:
1164       return 3;
1165     }
1166   }
1167 
1168   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1169 
1170   if (ST->hasNEON()) {
1171     const unsigned FunctionCallDivCost = 20;
1172     const unsigned ReciprocalDivCost = 10;
1173     static const CostTblEntry CostTbl[] = {
1174       // Division.
1175       // These costs are somewhat random. Choose a cost of 20 to indicate that
1176       // vectorizing devision (added function call) is going to be very expensive.
1177       // Double registers types.
1178       { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1179       { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1180       { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
1181       { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
1182       { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1183       { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1184       { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
1185       { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
1186       { ISD::SDIV, MVT::v4i16,     ReciprocalDivCost},
1187       { ISD::UDIV, MVT::v4i16,     ReciprocalDivCost},
1188       { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
1189       { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
1190       { ISD::SDIV, MVT::v8i8,      ReciprocalDivCost},
1191       { ISD::UDIV, MVT::v8i8,      ReciprocalDivCost},
1192       { ISD::SREM, MVT::v8i8,  8 * FunctionCallDivCost},
1193       { ISD::UREM, MVT::v8i8,  8 * FunctionCallDivCost},
1194       // Quad register types.
1195       { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1196       { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1197       { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
1198       { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
1199       { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1200       { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1201       { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
1202       { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
1203       { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1204       { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1205       { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
1206       { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
1207       { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1208       { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1209       { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
1210       { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
1211       // Multiplication.
1212     };
1213 
1214     if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
1215       return LT.first * Entry->Cost;
1216 
1217     int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
1218                                              Op2Info,
1219                                              Opd1PropInfo, Opd2PropInfo);
1220 
1221     // This is somewhat of a hack. The problem that we are facing is that SROA
1222     // creates a sequence of shift, and, or instructions to construct values.
1223     // These sequences are recognized by the ISel and have zero-cost. Not so for
1224     // the vectorized code. Because we have support for v2i64 but not i64 those
1225     // sequences look particularly beneficial to vectorize.
1226     // To work around this we increase the cost of v2i64 operations to make them
1227     // seem less beneficial.
1228     if (LT.second == MVT::v2i64 &&
1229         Op2Info == TargetTransformInfo::OK_UniformConstantValue)
1230       Cost += 4;
1231 
1232     return Cost;
1233   }
1234 
1235   // If this operation is a shift on arm/thumb2, it might well be folded into
1236   // the following instruction, hence having a cost of 0.
1237   auto LooksLikeAFreeShift = [&]() {
1238     if (ST->isThumb1Only() || Ty->isVectorTy())
1239       return false;
1240 
1241     if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
1242       return false;
1243     if (Op2Info != TargetTransformInfo::OK_UniformConstantValue)
1244       return false;
1245 
1246     // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
1247     switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
1248     case Instruction::Add:
1249     case Instruction::Sub:
1250     case Instruction::And:
1251     case Instruction::Xor:
1252     case Instruction::Or:
1253     case Instruction::ICmp:
1254       return true;
1255     default:
1256       return false;
1257     }
1258   };
1259   if (LooksLikeAFreeShift())
1260     return 0;
1261 
1262   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
1263   // for "multiple beats" potentially needed by MVE instructions.
1264   int BaseCost = 1;
1265   if (CostKind != TTI::TCK_CodeSize && ST->hasMVEIntegerOps() &&
1266       Ty->isVectorTy())
1267     BaseCost = ST->getMVEVectorCostFactor();
1268 
1269   // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
1270   // without treating floats as more expensive that scalars or increasing the
1271   // costs for custom operations. The results is also multiplied by the
1272   // MVEVectorCostFactor where appropriate.
1273   if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
1274     return LT.first * BaseCost;
1275 
1276   // Else this is expand, assume that we need to scalarize this op.
1277   if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1278     unsigned Num = VTy->getNumElements();
1279     unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType(),
1280                                            CostKind);
1281     // Return the cost of multiple scalar invocation plus the cost of
1282     // inserting and extracting the values.
1283     return BaseT::getScalarizationOverhead(VTy, Args) + Num * Cost;
1284   }
1285 
1286   return BaseCost;
1287 }
1288 
1289 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1290                                 MaybeAlign Alignment, unsigned AddressSpace,
1291                                 TTI::TargetCostKind CostKind,
1292                                 const Instruction *I) {
1293   // TODO: Handle other cost kinds.
1294   if (CostKind != TTI::TCK_RecipThroughput)
1295     return 1;
1296 
1297   // Type legalization can't handle structs
1298   if (TLI->getValueType(DL, Src, true) == MVT::Other)
1299     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1300                                   CostKind);
1301 
1302   if (ST->hasNEON() && Src->isVectorTy() &&
1303       (Alignment && *Alignment != Align(16)) &&
1304       cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
1305     // Unaligned loads/stores are extremely inefficient.
1306     // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
1307     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1308     return LT.first * 4;
1309   }
1310 
1311   // MVE can optimize a fpext(load(4xhalf)) using an extending integer load.
1312   // Same for stores.
1313   if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I &&
1314       ((Opcode == Instruction::Load && I->hasOneUse() &&
1315         isa<FPExtInst>(*I->user_begin())) ||
1316        (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) {
1317     FixedVectorType *SrcVTy = cast<FixedVectorType>(Src);
1318     Type *DstTy =
1319         Opcode == Instruction::Load
1320             ? (*I->user_begin())->getType()
1321             : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType();
1322     if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() &&
1323         DstTy->getScalarType()->isFloatTy())
1324       return ST->getMVEVectorCostFactor();
1325   }
1326 
1327   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
1328                      ? ST->getMVEVectorCostFactor()
1329                      : 1;
1330   return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1331                                            CostKind, I);
1332 }
1333 
1334 unsigned ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
1335                                            Align Alignment,
1336                                            unsigned AddressSpace,
1337                                            TTI::TargetCostKind CostKind) {
1338   if (ST->hasMVEIntegerOps()) {
1339     if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment))
1340       return ST->getMVEVectorCostFactor();
1341     if (Opcode == Instruction::Store && isLegalMaskedStore(Src, Alignment))
1342       return ST->getMVEVectorCostFactor();
1343   }
1344   if (!isa<FixedVectorType>(Src))
1345     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1346                                         CostKind);
1347   // Scalar cost, which is currently very high due to the efficiency of the
1348   // generated code.
1349   return cast<FixedVectorType>(Src)->getNumElements() * 8;
1350 }
1351 
1352 int ARMTTIImpl::getInterleavedMemoryOpCost(
1353     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1354     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1355     bool UseMaskForCond, bool UseMaskForGaps) {
1356   assert(Factor >= 2 && "Invalid interleave factor");
1357   assert(isa<VectorType>(VecTy) && "Expect a vector type");
1358 
1359   // vldN/vstN doesn't support vector types of i64/f64 element.
1360   bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
1361 
1362   if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
1363       !UseMaskForCond && !UseMaskForGaps) {
1364     unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1365     auto *SubVecTy =
1366         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1367 
1368     // vldN/vstN only support legal vector types of size 64 or 128 in bits.
1369     // Accesses having vector types that are a multiple of 128 bits can be
1370     // matched to more than one vldN/vstN instruction.
1371     int BaseCost = ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor() : 1;
1372     if (NumElts % Factor == 0 &&
1373         TLI->isLegalInterleavedAccessType(Factor, SubVecTy, DL))
1374       return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1375 
1376     // Some smaller than legal interleaved patterns are cheap as we can make
1377     // use of the vmovn or vrev patterns to interleave a standard load. This is
1378     // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
1379     // promoted differently). The cost of 2 here is then a load and vrev or
1380     // vmovn.
1381     if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
1382         VecTy->isIntOrIntVectorTy() &&
1383         DL.getTypeSizeInBits(SubVecTy).getFixedSize() <= 64)
1384       return 2 * BaseCost;
1385   }
1386 
1387   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1388                                            Alignment, AddressSpace, CostKind,
1389                                            UseMaskForCond, UseMaskForGaps);
1390 }
1391 
1392 unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1393                                             const Value *Ptr, bool VariableMask,
1394                                             Align Alignment,
1395                                             TTI::TargetCostKind CostKind,
1396                                             const Instruction *I) {
1397   using namespace PatternMatch;
1398   if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
1399     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1400                                          Alignment, CostKind, I);
1401 
1402   assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
1403   auto *VTy = cast<FixedVectorType>(DataTy);
1404 
1405   // TODO: Splitting, once we do that.
1406 
1407   unsigned NumElems = VTy->getNumElements();
1408   unsigned EltSize = VTy->getScalarSizeInBits();
1409   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy);
1410 
1411   // For now, it is assumed that for the MVE gather instructions the loads are
1412   // all effectively serialised. This means the cost is the scalar cost
1413   // multiplied by the number of elements being loaded. This is possibly very
1414   // conservative, but even so we still end up vectorising loops because the
1415   // cost per iteration for many loops is lower than for scalar loops.
1416   unsigned VectorCost = NumElems * LT.first * ST->getMVEVectorCostFactor();
1417   // The scalarization cost should be a lot higher. We use the number of vector
1418   // elements plus the scalarization overhead.
1419   unsigned ScalarCost =
1420       NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, {});
1421 
1422   if (EltSize < 8 || Alignment < EltSize / 8)
1423     return ScalarCost;
1424 
1425   unsigned ExtSize = EltSize;
1426   // Check whether there's a single user that asks for an extended type
1427   if (I != nullptr) {
1428     // Dependent of the caller of this function, a gather instruction will
1429     // either have opcode Instruction::Load or be a call to the masked_gather
1430     // intrinsic
1431     if ((I->getOpcode() == Instruction::Load ||
1432          match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
1433         I->hasOneUse()) {
1434       const User *Us = *I->users().begin();
1435       if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
1436         // only allow valid type combinations
1437         unsigned TypeSize =
1438             cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1439         if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1440              (TypeSize == 16 && EltSize == 8)) &&
1441             TypeSize * NumElems == 128) {
1442           ExtSize = TypeSize;
1443         }
1444       }
1445     }
1446     // Check whether the input data needs to be truncated
1447     TruncInst *T;
1448     if ((I->getOpcode() == Instruction::Store ||
1449          match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1450         (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1451       // Only allow valid type combinations
1452       unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1453       if (((EltSize == 16 && TypeSize == 32) ||
1454            (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1455           TypeSize * NumElems == 128)
1456         ExtSize = TypeSize;
1457     }
1458   }
1459 
1460   if (ExtSize * NumElems != 128 || NumElems < 4)
1461     return ScalarCost;
1462 
1463   // Any (aligned) i32 gather will not need to be scalarised.
1464   if (ExtSize == 32)
1465     return VectorCost;
1466   // For smaller types, we need to ensure that the gep's inputs are correctly
1467   // extended from a small enough value. Other sizes (including i64) are
1468   // scalarized for now.
1469   if (ExtSize != 8 && ExtSize != 16)
1470     return ScalarCost;
1471 
1472   if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
1473     Ptr = BC->getOperand(0);
1474   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1475     if (GEP->getNumOperands() != 2)
1476       return ScalarCost;
1477     unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1478     // Scale needs to be correct (which is only relevant for i16s).
1479     if (Scale != 1 && Scale * 8 != ExtSize)
1480       return ScalarCost;
1481     // And we need to zext (not sext) the indexes from a small enough type.
1482     if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1483       if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1484         return VectorCost;
1485     }
1486     return ScalarCost;
1487   }
1488   return ScalarCost;
1489 }
1490 
1491 int ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
1492                                            bool IsPairwiseForm,
1493                                            TTI::TargetCostKind CostKind) {
1494   EVT ValVT = TLI->getValueType(DL, ValTy);
1495   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1496   if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD)
1497     return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1498                                              CostKind);
1499 
1500   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1501 
1502   static const CostTblEntry CostTblAdd[]{
1503       {ISD::ADD, MVT::v16i8, 1},
1504       {ISD::ADD, MVT::v8i16, 1},
1505       {ISD::ADD, MVT::v4i32, 1},
1506   };
1507   if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second))
1508     return Entry->Cost * ST->getMVEVectorCostFactor() * LT.first;
1509 
1510   return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1511                                            CostKind);
1512 }
1513 
1514 InstructionCost
1515 ARMTTIImpl::getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned,
1516                                         Type *ResTy, VectorType *ValTy,
1517                                         TTI::TargetCostKind CostKind) {
1518   EVT ValVT = TLI->getValueType(DL, ValTy);
1519   EVT ResVT = TLI->getValueType(DL, ResTy);
1520   if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
1521     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1522     if ((LT.second == MVT::v16i8 && ResVT.getSizeInBits() <= 32) ||
1523         (LT.second == MVT::v8i16 &&
1524          ResVT.getSizeInBits() <= (IsMLA ? 64 : 32)) ||
1525         (LT.second == MVT::v4i32 && ResVT.getSizeInBits() <= 64))
1526       return ST->getMVEVectorCostFactor() * LT.first;
1527   }
1528 
1529   return BaseT::getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, ValTy,
1530                                             CostKind);
1531 }
1532 
1533 int ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1534                                       TTI::TargetCostKind CostKind) {
1535   switch (ICA.getID()) {
1536   case Intrinsic::get_active_lane_mask:
1537     // Currently we make a somewhat optimistic assumption that
1538     // active_lane_mask's are always free. In reality it may be freely folded
1539     // into a tail predicated loop, expanded into a VCPT or expanded into a lot
1540     // of add/icmp code. We may need to improve this in the future, but being
1541     // able to detect if it is free or not involves looking at a lot of other
1542     // code. We currently assume that the vectorizer inserted these, and knew
1543     // what it was doing in adding one.
1544     if (ST->hasMVEIntegerOps())
1545       return 0;
1546     break;
1547   case Intrinsic::sadd_sat:
1548   case Intrinsic::ssub_sat:
1549   case Intrinsic::uadd_sat:
1550   case Intrinsic::usub_sat: {
1551     if (!ST->hasMVEIntegerOps())
1552       break;
1553     // Get the Return type, either directly of from ICA.ReturnType and ICA.VF.
1554     Type *VT = ICA.getReturnType();
1555     if (!VT->isVectorTy() && !ICA.getVectorFactor().isScalar())
1556       VT = VectorType::get(VT, ICA.getVectorFactor());
1557 
1558     std::pair<int, MVT> LT =
1559         TLI->getTypeLegalizationCost(DL, VT);
1560     if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1561         LT.second == MVT::v16i8) {
1562       // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
1563       // need to extend the type, as it uses shr(qadd(shl, shl)).
1564       unsigned Instrs = LT.second.getScalarSizeInBits() ==
1565                                 ICA.getReturnType()->getScalarSizeInBits()
1566                             ? 1
1567                             : 4;
1568       return LT.first * ST->getMVEVectorCostFactor() * Instrs;
1569     }
1570     break;
1571   }
1572   }
1573 
1574   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1575 }
1576 
1577 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
1578   if (!F->isIntrinsic())
1579     BaseT::isLoweredToCall(F);
1580 
1581   // Assume all Arm-specific intrinsics map to an instruction.
1582   if (F->getName().startswith("llvm.arm"))
1583     return false;
1584 
1585   switch (F->getIntrinsicID()) {
1586   default: break;
1587   case Intrinsic::powi:
1588   case Intrinsic::sin:
1589   case Intrinsic::cos:
1590   case Intrinsic::pow:
1591   case Intrinsic::log:
1592   case Intrinsic::log10:
1593   case Intrinsic::log2:
1594   case Intrinsic::exp:
1595   case Intrinsic::exp2:
1596     return true;
1597   case Intrinsic::sqrt:
1598   case Intrinsic::fabs:
1599   case Intrinsic::copysign:
1600   case Intrinsic::floor:
1601   case Intrinsic::ceil:
1602   case Intrinsic::trunc:
1603   case Intrinsic::rint:
1604   case Intrinsic::nearbyint:
1605   case Intrinsic::round:
1606   case Intrinsic::canonicalize:
1607   case Intrinsic::lround:
1608   case Intrinsic::llround:
1609   case Intrinsic::lrint:
1610   case Intrinsic::llrint:
1611     if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
1612       return true;
1613     if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
1614       return true;
1615     // Some operations can be handled by vector instructions and assume
1616     // unsupported vectors will be expanded into supported scalar ones.
1617     // TODO Handle scalar operations properly.
1618     return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
1619   case Intrinsic::masked_store:
1620   case Intrinsic::masked_load:
1621   case Intrinsic::masked_gather:
1622   case Intrinsic::masked_scatter:
1623     return !ST->hasMVEIntegerOps();
1624   case Intrinsic::sadd_with_overflow:
1625   case Intrinsic::uadd_with_overflow:
1626   case Intrinsic::ssub_with_overflow:
1627   case Intrinsic::usub_with_overflow:
1628   case Intrinsic::sadd_sat:
1629   case Intrinsic::uadd_sat:
1630   case Intrinsic::ssub_sat:
1631   case Intrinsic::usub_sat:
1632     return false;
1633   }
1634 
1635   return BaseT::isLoweredToCall(F);
1636 }
1637 
1638 bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) {
1639   unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
1640   EVT VT = TLI->getValueType(DL, I.getType(), true);
1641   if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
1642     return true;
1643 
1644   // Check if an intrinsic will be lowered to a call and assume that any
1645   // other CallInst will generate a bl.
1646   if (auto *Call = dyn_cast<CallInst>(&I)) {
1647     if (auto *II = dyn_cast<IntrinsicInst>(Call)) {
1648       switch(II->getIntrinsicID()) {
1649         case Intrinsic::memcpy:
1650         case Intrinsic::memset:
1651         case Intrinsic::memmove:
1652           return getNumMemOps(II) == -1;
1653         default:
1654           if (const Function *F = Call->getCalledFunction())
1655             return isLoweredToCall(F);
1656       }
1657     }
1658     return true;
1659   }
1660 
1661   // FPv5 provides conversions between integer, double-precision,
1662   // single-precision, and half-precision formats.
1663   switch (I.getOpcode()) {
1664   default:
1665     break;
1666   case Instruction::FPToSI:
1667   case Instruction::FPToUI:
1668   case Instruction::SIToFP:
1669   case Instruction::UIToFP:
1670   case Instruction::FPTrunc:
1671   case Instruction::FPExt:
1672     return !ST->hasFPARMv8Base();
1673   }
1674 
1675   // FIXME: Unfortunately the approach of checking the Operation Action does
1676   // not catch all cases of Legalization that use library calls. Our
1677   // Legalization step categorizes some transformations into library calls as
1678   // Custom, Expand or even Legal when doing type legalization. So for now
1679   // we have to special case for instance the SDIV of 64bit integers and the
1680   // use of floating point emulation.
1681   if (VT.isInteger() && VT.getSizeInBits() >= 64) {
1682     switch (ISD) {
1683     default:
1684       break;
1685     case ISD::SDIV:
1686     case ISD::UDIV:
1687     case ISD::SREM:
1688     case ISD::UREM:
1689     case ISD::SDIVREM:
1690     case ISD::UDIVREM:
1691       return true;
1692     }
1693   }
1694 
1695   // Assume all other non-float operations are supported.
1696   if (!VT.isFloatingPoint())
1697     return false;
1698 
1699   // We'll need a library call to handle most floats when using soft.
1700   if (TLI->useSoftFloat()) {
1701     switch (I.getOpcode()) {
1702     default:
1703       return true;
1704     case Instruction::Alloca:
1705     case Instruction::Load:
1706     case Instruction::Store:
1707     case Instruction::Select:
1708     case Instruction::PHI:
1709       return false;
1710     }
1711   }
1712 
1713   // We'll need a libcall to perform double precision operations on a single
1714   // precision only FPU.
1715   if (I.getType()->isDoubleTy() && !ST->hasFP64())
1716     return true;
1717 
1718   // Likewise for half precision arithmetic.
1719   if (I.getType()->isHalfTy() && !ST->hasFullFP16())
1720     return true;
1721 
1722   return false;
1723 }
1724 
1725 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1726                                           AssumptionCache &AC,
1727                                           TargetLibraryInfo *LibInfo,
1728                                           HardwareLoopInfo &HWLoopInfo) {
1729   // Low-overhead branches are only supported in the 'low-overhead branch'
1730   // extension of v8.1-m.
1731   if (!ST->hasLOB() || DisableLowOverheadLoops) {
1732     LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
1733     return false;
1734   }
1735 
1736   if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
1737     LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
1738     return false;
1739   }
1740 
1741   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1742   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
1743     LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
1744     return false;
1745   }
1746 
1747   const SCEV *TripCountSCEV =
1748     SE.getAddExpr(BackedgeTakenCount,
1749                   SE.getOne(BackedgeTakenCount->getType()));
1750 
1751   // We need to store the trip count in LR, a 32-bit register.
1752   if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
1753     LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
1754     return false;
1755   }
1756 
1757   // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
1758   // point in generating a hardware loop if that's going to happen.
1759 
1760   auto IsHardwareLoopIntrinsic = [](Instruction &I) {
1761     if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
1762       switch (Call->getIntrinsicID()) {
1763       default:
1764         break;
1765       case Intrinsic::start_loop_iterations:
1766       case Intrinsic::test_set_loop_iterations:
1767       case Intrinsic::loop_decrement:
1768       case Intrinsic::loop_decrement_reg:
1769         return true;
1770       }
1771     }
1772     return false;
1773   };
1774 
1775   // Scan the instructions to see if there's any that we know will turn into a
1776   // call or if this loop is already a low-overhead loop or will become a tail
1777   // predicated loop.
1778   bool IsTailPredLoop = false;
1779   auto ScanLoop = [&](Loop *L) {
1780     for (auto *BB : L->getBlocks()) {
1781       for (auto &I : *BB) {
1782         if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I) ||
1783             isa<InlineAsm>(I)) {
1784           LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
1785           return false;
1786         }
1787         if (auto *II = dyn_cast<IntrinsicInst>(&I))
1788           IsTailPredLoop |=
1789               II->getIntrinsicID() == Intrinsic::get_active_lane_mask ||
1790               II->getIntrinsicID() == Intrinsic::arm_mve_vctp8 ||
1791               II->getIntrinsicID() == Intrinsic::arm_mve_vctp16 ||
1792               II->getIntrinsicID() == Intrinsic::arm_mve_vctp32 ||
1793               II->getIntrinsicID() == Intrinsic::arm_mve_vctp64;
1794       }
1795     }
1796     return true;
1797   };
1798 
1799   // Visit inner loops.
1800   for (auto Inner : *L)
1801     if (!ScanLoop(Inner))
1802       return false;
1803 
1804   if (!ScanLoop(L))
1805     return false;
1806 
1807   // TODO: Check whether the trip count calculation is expensive. If L is the
1808   // inner loop but we know it has a low trip count, calculating that trip
1809   // count (in the parent loop) may be detrimental.
1810 
1811   LLVMContext &C = L->getHeader()->getContext();
1812   HWLoopInfo.CounterInReg = true;
1813   HWLoopInfo.IsNestingLegal = false;
1814   HWLoopInfo.PerformEntryTest = AllowWLSLoops && !IsTailPredLoop;
1815   HWLoopInfo.CountType = Type::getInt32Ty(C);
1816   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
1817   return true;
1818 }
1819 
1820 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
1821   // We don't allow icmp's, and because we only look at single block loops,
1822   // we simply count the icmps, i.e. there should only be 1 for the backedge.
1823   if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
1824     return false;
1825 
1826   if (isa<FCmpInst>(&I))
1827     return false;
1828 
1829   // We could allow extending/narrowing FP loads/stores, but codegen is
1830   // too inefficient so reject this for now.
1831   if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
1832     return false;
1833 
1834   // Extends have to be extending-loads
1835   if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
1836     if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
1837       return false;
1838 
1839   // Truncs have to be narrowing-stores
1840   if (isa<TruncInst>(&I) )
1841     if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
1842       return false;
1843 
1844   return true;
1845 }
1846 
1847 // To set up a tail-predicated loop, we need to know the total number of
1848 // elements processed by that loop. Thus, we need to determine the element
1849 // size and:
1850 // 1) it should be uniform for all operations in the vector loop, so we
1851 //    e.g. don't want any widening/narrowing operations.
1852 // 2) it should be smaller than i64s because we don't have vector operations
1853 //    that work on i64s.
1854 // 3) we don't want elements to be reversed or shuffled, to make sure the
1855 //    tail-predication masks/predicates the right lanes.
1856 //
1857 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1858                                  const DataLayout &DL,
1859                                  const LoopAccessInfo *LAI) {
1860   LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n");
1861 
1862   // If there are live-out values, it is probably a reduction. We can predicate
1863   // most reduction operations freely under MVE using a combination of
1864   // prefer-predicated-reduction-select and inloop reductions. We limit this to
1865   // floating point and integer reductions, but don't check for operators
1866   // specifically here. If the value ends up not being a reduction (and so the
1867   // vectorizer cannot tailfold the loop), we should fall back to standard
1868   // vectorization automatically.
1869   SmallVector< Instruction *, 8 > LiveOuts;
1870   LiveOuts = llvm::findDefsUsedOutsideOfLoop(L);
1871   bool ReductionsDisabled =
1872       EnableTailPredication == TailPredication::EnabledNoReductions ||
1873       EnableTailPredication == TailPredication::ForceEnabledNoReductions;
1874 
1875   for (auto *I : LiveOuts) {
1876     if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() &&
1877         !I->getType()->isHalfTy()) {
1878       LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float "
1879                            "live-out value\n");
1880       return false;
1881     }
1882     if (ReductionsDisabled) {
1883       LLVM_DEBUG(dbgs() << "Reductions not enabled\n");
1884       return false;
1885     }
1886   }
1887 
1888   // Next, check that all instructions can be tail-predicated.
1889   PredicatedScalarEvolution PSE = LAI->getPSE();
1890   SmallVector<Instruction *, 16> LoadStores;
1891   int ICmpCount = 0;
1892 
1893   for (BasicBlock *BB : L->blocks()) {
1894     for (Instruction &I : BB->instructionsWithoutDebug()) {
1895       if (isa<PHINode>(&I))
1896         continue;
1897       if (!canTailPredicateInstruction(I, ICmpCount)) {
1898         LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
1899         return false;
1900       }
1901 
1902       Type *T  = I.getType();
1903       if (T->isPointerTy())
1904         T = T->getPointerElementType();
1905 
1906       if (T->getScalarSizeInBits() > 32) {
1907         LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
1908         return false;
1909       }
1910       if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
1911         Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1);
1912         int64_t NextStride = getPtrStride(PSE, Ptr, L);
1913         if (NextStride == 1) {
1914           // TODO: for now only allow consecutive strides of 1. We could support
1915           // other strides as long as it is uniform, but let's keep it simple
1916           // for now.
1917           continue;
1918         } else if (NextStride == -1 ||
1919                    (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) ||
1920                    (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) {
1921           LLVM_DEBUG(dbgs()
1922                      << "Consecutive strides of 2 found, vld2/vstr2 can't "
1923                         "be tail-predicated\n.");
1924           return false;
1925           // TODO: don't tail predicate if there is a reversed load?
1926         } else if (EnableMaskedGatherScatters) {
1927           // Gather/scatters do allow loading from arbitrary strides, at
1928           // least if they are loop invariant.
1929           // TODO: Loop variant strides should in theory work, too, but
1930           // this requires further testing.
1931           const SCEV *PtrScev =
1932               replaceSymbolicStrideSCEV(PSE, llvm::ValueToValueMap(), Ptr);
1933           if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) {
1934             const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1935             if (PSE.getSE()->isLoopInvariant(Step, L))
1936               continue;
1937           }
1938         }
1939         LLVM_DEBUG(dbgs() << "Bad stride found, can't "
1940                              "tail-predicate\n.");
1941         return false;
1942       }
1943     }
1944   }
1945 
1946   LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
1947   return true;
1948 }
1949 
1950 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI,
1951                                              ScalarEvolution &SE,
1952                                              AssumptionCache &AC,
1953                                              TargetLibraryInfo *TLI,
1954                                              DominatorTree *DT,
1955                                              const LoopAccessInfo *LAI) {
1956   if (!EnableTailPredication) {
1957     LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
1958     return false;
1959   }
1960 
1961   // Creating a predicated vector loop is the first step for generating a
1962   // tail-predicated hardware loop, for which we need the MVE masked
1963   // load/stores instructions:
1964   if (!ST->hasMVEIntegerOps())
1965     return false;
1966 
1967   // For now, restrict this to single block loops.
1968   if (L->getNumBlocks() > 1) {
1969     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
1970                          "loop.\n");
1971     return false;
1972   }
1973 
1974   assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected");
1975 
1976   HardwareLoopInfo HWLoopInfo(L);
1977   if (!HWLoopInfo.canAnalyze(*LI)) {
1978     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1979                          "analyzable.\n");
1980     return false;
1981   }
1982 
1983   // This checks if we have the low-overhead branch architecture
1984   // extension, and if we will create a hardware-loop:
1985   if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) {
1986     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1987                          "profitable.\n");
1988     return false;
1989   }
1990 
1991   if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) {
1992     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1993                          "a candidate.\n");
1994     return false;
1995   }
1996 
1997   return canTailPredicateLoop(L, LI, SE, DL, LAI);
1998 }
1999 
2000 bool ARMTTIImpl::emitGetActiveLaneMask() const {
2001   if (!ST->hasMVEIntegerOps() || !EnableTailPredication)
2002     return false;
2003 
2004   // Intrinsic @llvm.get.active.lane.mask is supported.
2005   // It is used in the MVETailPredication pass, which requires the number of
2006   // elements processed by this vector loop to setup the tail-predicated
2007   // loop.
2008   return true;
2009 }
2010 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
2011                                          TTI::UnrollingPreferences &UP) {
2012   // Only currently enable these preferences for M-Class cores.
2013   if (!ST->isMClass())
2014     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
2015 
2016   // Disable loop unrolling for Oz and Os.
2017   UP.OptSizeThreshold = 0;
2018   UP.PartialOptSizeThreshold = 0;
2019   if (L->getHeader()->getParent()->hasOptSize())
2020     return;
2021 
2022   // Only enable on Thumb-2 targets.
2023   if (!ST->isThumb2())
2024     return;
2025 
2026   SmallVector<BasicBlock*, 4> ExitingBlocks;
2027   L->getExitingBlocks(ExitingBlocks);
2028   LLVM_DEBUG(dbgs() << "Loop has:\n"
2029                     << "Blocks: " << L->getNumBlocks() << "\n"
2030                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
2031 
2032   // Only allow another exit other than the latch. This acts as an early exit
2033   // as it mirrors the profitability calculation of the runtime unroller.
2034   if (ExitingBlocks.size() > 2)
2035     return;
2036 
2037   // Limit the CFG of the loop body for targets with a branch predictor.
2038   // Allowing 4 blocks permits if-then-else diamonds in the body.
2039   if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
2040     return;
2041 
2042   // Don't unroll vectorized loops, including the remainder loop
2043   if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
2044     return;
2045 
2046   // Scan the loop: don't unroll loops with calls as this could prevent
2047   // inlining.
2048   unsigned Cost = 0;
2049   for (auto *BB : L->getBlocks()) {
2050     for (auto &I : *BB) {
2051       // Don't unroll vectorised loop. MVE does not benefit from it as much as
2052       // scalar code.
2053       if (I.getType()->isVectorTy())
2054         return;
2055 
2056       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
2057         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
2058           if (!isLoweredToCall(F))
2059             continue;
2060         }
2061         return;
2062       }
2063 
2064       SmallVector<const Value*, 4> Operands(I.operand_values());
2065       Cost +=
2066         getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency);
2067     }
2068   }
2069 
2070   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
2071 
2072   UP.Partial = true;
2073   UP.Runtime = true;
2074   UP.UpperBound = true;
2075   UP.UnrollRemainder = true;
2076   UP.DefaultUnrollRuntimeCount = 4;
2077   UP.UnrollAndJam = true;
2078   UP.UnrollAndJamInnerLoopThreshold = 60;
2079 
2080   // Force unrolling small loops can be very useful because of the branch
2081   // taken cost of the backedge.
2082   if (Cost < 12)
2083     UP.Force = true;
2084 }
2085 
2086 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
2087                                        TTI::PeelingPreferences &PP) {
2088   BaseT::getPeelingPreferences(L, SE, PP);
2089 }
2090 
2091 bool ARMTTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
2092                                        TTI::ReductionFlags Flags) const {
2093   return ST->hasMVEIntegerOps();
2094 }
2095 
2096 bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty,
2097                                        TTI::ReductionFlags Flags) const {
2098   if (!ST->hasMVEIntegerOps())
2099     return false;
2100 
2101   unsigned ScalarBits = Ty->getScalarSizeInBits();
2102   switch (Opcode) {
2103   case Instruction::Add:
2104     return ScalarBits <= 64;
2105   default:
2106     return false;
2107   }
2108 }
2109 
2110 bool ARMTTIImpl::preferPredicatedReductionSelect(
2111     unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const {
2112   if (!ST->hasMVEIntegerOps())
2113     return false;
2114   return true;
2115 }
2116