xref: /freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp (revision eea7c61590ae8968b3f1f609cf0bc8633222a94f)
1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/IntrinsicsARM.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/MC/SubtargetFeature.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Support/MachineValueType.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Transforms/InstCombine/InstCombiner.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/LoopUtils.h"
36 #include <algorithm>
37 #include <cassert>
38 #include <cstdint>
39 #include <utility>
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "armtti"
44 
45 static cl::opt<bool> EnableMaskedLoadStores(
46   "enable-arm-maskedldst", cl::Hidden, cl::init(true),
47   cl::desc("Enable the generation of masked loads and stores"));
48 
49 static cl::opt<bool> DisableLowOverheadLoops(
50   "disable-arm-loloops", cl::Hidden, cl::init(false),
51   cl::desc("Disable the generation of low-overhead loops"));
52 
53 static cl::opt<bool>
54     AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true),
55                   cl::desc("Enable the generation of WLS loops"));
56 
57 extern cl::opt<TailPredication::Mode> EnableTailPredication;
58 
59 extern cl::opt<bool> EnableMaskedGatherScatters;
60 
61 extern cl::opt<unsigned> MVEMaxSupportedInterleaveFactor;
62 
63 /// Convert a vector load intrinsic into a simple llvm load instruction.
64 /// This is beneficial when the underlying object being addressed comes
65 /// from a constant, since we get constant-folding for free.
66 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign,
67                                InstCombiner::BuilderTy &Builder) {
68   auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
69 
70   if (!IntrAlign)
71     return nullptr;
72 
73   unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign
74                            ? MemAlign
75                            : IntrAlign->getLimitedValue();
76 
77   if (!isPowerOf2_32(Alignment))
78     return nullptr;
79 
80   auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
81                                           PointerType::get(II.getType(), 0));
82   return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
83 }
84 
85 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
86                                      const Function *Callee) const {
87   const TargetMachine &TM = getTLI()->getTargetMachine();
88   const FeatureBitset &CallerBits =
89       TM.getSubtargetImpl(*Caller)->getFeatureBits();
90   const FeatureBitset &CalleeBits =
91       TM.getSubtargetImpl(*Callee)->getFeatureBits();
92 
93   // To inline a callee, all features not in the allowed list must match exactly.
94   bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) ==
95                     (CalleeBits & ~InlineFeaturesAllowed);
96   // For features in the allowed list, the callee's features must be a subset of
97   // the callers'.
98   bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) ==
99                      (CalleeBits & InlineFeaturesAllowed);
100   return MatchExact && MatchSubset;
101 }
102 
103 TTI::AddressingModeKind
104 ARMTTIImpl::getPreferredAddressingMode(const Loop *L,
105                                        ScalarEvolution *SE) const {
106   if (ST->hasMVEIntegerOps())
107     return TTI::AMK_PostIndexed;
108 
109   if (L->getHeader()->getParent()->hasOptSize())
110     return TTI::AMK_None;
111 
112   if (ST->isMClass() && ST->isThumb2() &&
113       L->getNumBlocks() == 1)
114     return TTI::AMK_PreIndexed;
115 
116   return TTI::AMK_None;
117 }
118 
119 Optional<Instruction *>
120 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
121   using namespace PatternMatch;
122   Intrinsic::ID IID = II.getIntrinsicID();
123   switch (IID) {
124   default:
125     break;
126   case Intrinsic::arm_neon_vld1: {
127     Align MemAlign =
128         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
129                           &IC.getAssumptionCache(), &IC.getDominatorTree());
130     if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) {
131       return IC.replaceInstUsesWith(II, V);
132     }
133     break;
134   }
135 
136   case Intrinsic::arm_neon_vld2:
137   case Intrinsic::arm_neon_vld3:
138   case Intrinsic::arm_neon_vld4:
139   case Intrinsic::arm_neon_vld2lane:
140   case Intrinsic::arm_neon_vld3lane:
141   case Intrinsic::arm_neon_vld4lane:
142   case Intrinsic::arm_neon_vst1:
143   case Intrinsic::arm_neon_vst2:
144   case Intrinsic::arm_neon_vst3:
145   case Intrinsic::arm_neon_vst4:
146   case Intrinsic::arm_neon_vst2lane:
147   case Intrinsic::arm_neon_vst3lane:
148   case Intrinsic::arm_neon_vst4lane: {
149     Align MemAlign =
150         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
151                           &IC.getAssumptionCache(), &IC.getDominatorTree());
152     unsigned AlignArg = II.getNumArgOperands() - 1;
153     Value *AlignArgOp = II.getArgOperand(AlignArg);
154     MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue();
155     if (Align && *Align < MemAlign) {
156       return IC.replaceOperand(
157           II, AlignArg,
158           ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(),
159                            false));
160     }
161     break;
162   }
163 
164   case Intrinsic::arm_mve_pred_i2v: {
165     Value *Arg = II.getArgOperand(0);
166     Value *ArgArg;
167     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
168                        PatternMatch::m_Value(ArgArg))) &&
169         II.getType() == ArgArg->getType()) {
170       return IC.replaceInstUsesWith(II, ArgArg);
171     }
172     Constant *XorMask;
173     if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
174                              PatternMatch::m_Value(ArgArg)),
175                          PatternMatch::m_Constant(XorMask))) &&
176         II.getType() == ArgArg->getType()) {
177       if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
178         if (CI->getValue().trunc(16).isAllOnesValue()) {
179           auto TrueVector = IC.Builder.CreateVectorSplat(
180               cast<FixedVectorType>(II.getType())->getNumElements(),
181               IC.Builder.getTrue());
182           return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
183         }
184       }
185     }
186     KnownBits ScalarKnown(32);
187     if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16),
188                                 ScalarKnown, 0)) {
189       return &II;
190     }
191     break;
192   }
193   case Intrinsic::arm_mve_pred_v2i: {
194     Value *Arg = II.getArgOperand(0);
195     Value *ArgArg;
196     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(
197                        PatternMatch::m_Value(ArgArg)))) {
198       return IC.replaceInstUsesWith(II, ArgArg);
199     }
200     if (!II.getMetadata(LLVMContext::MD_range)) {
201       Type *IntTy32 = Type::getInt32Ty(II.getContext());
202       Metadata *M[] = {
203           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
204           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0x10000))};
205       II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M));
206       return &II;
207     }
208     break;
209   }
210   case Intrinsic::arm_mve_vadc:
211   case Intrinsic::arm_mve_vadc_predicated: {
212     unsigned CarryOp =
213         (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2;
214     assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 &&
215            "Bad type for intrinsic!");
216 
217     KnownBits CarryKnown(32);
218     if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29),
219                                 CarryKnown)) {
220       return &II;
221     }
222     break;
223   }
224   case Intrinsic::arm_mve_vmldava: {
225     Instruction *I = cast<Instruction>(&II);
226     if (I->hasOneUse()) {
227       auto *User = cast<Instruction>(*I->user_begin());
228       Value *OpZ;
229       if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) &&
230           match(I->getOperand(3), m_Zero())) {
231         Value *OpX = I->getOperand(4);
232         Value *OpY = I->getOperand(5);
233         Type *OpTy = OpX->getType();
234 
235         IC.Builder.SetInsertPoint(User);
236         Value *V =
237             IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy},
238                                        {I->getOperand(0), I->getOperand(1),
239                                         I->getOperand(2), OpZ, OpX, OpY});
240 
241         IC.replaceInstUsesWith(*User, V);
242         return IC.eraseInstFromFunction(*User);
243       }
244     }
245     return None;
246   }
247   }
248   return None;
249 }
250 
251 InstructionCost ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
252                                           TTI::TargetCostKind CostKind) {
253   assert(Ty->isIntegerTy());
254 
255  unsigned Bits = Ty->getPrimitiveSizeInBits();
256  if (Bits == 0 || Imm.getActiveBits() >= 64)
257    return 4;
258 
259   int64_t SImmVal = Imm.getSExtValue();
260   uint64_t ZImmVal = Imm.getZExtValue();
261   if (!ST->isThumb()) {
262     if ((SImmVal >= 0 && SImmVal < 65536) ||
263         (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
264         (ARM_AM::getSOImmVal(~ZImmVal) != -1))
265       return 1;
266     return ST->hasV6T2Ops() ? 2 : 3;
267   }
268   if (ST->isThumb2()) {
269     if ((SImmVal >= 0 && SImmVal < 65536) ||
270         (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
271         (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
272       return 1;
273     return ST->hasV6T2Ops() ? 2 : 3;
274   }
275   // Thumb1, any i8 imm cost 1.
276   if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
277     return 1;
278   if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
279     return 2;
280   // Load from constantpool.
281   return 3;
282 }
283 
284 // Constants smaller than 256 fit in the immediate field of
285 // Thumb1 instructions so we return a zero cost and 1 otherwise.
286 InstructionCost ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
287                                                   const APInt &Imm, Type *Ty) {
288   if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
289     return 0;
290 
291   return 1;
292 }
293 
294 // Checks whether Inst is part of a min(max()) or max(min()) pattern
295 // that will match to an SSAT instruction
296 static bool isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) {
297   Value *LHS, *RHS;
298   ConstantInt *C;
299   SelectPatternFlavor InstSPF = matchSelectPattern(Inst, LHS, RHS).Flavor;
300 
301   if (InstSPF == SPF_SMAX &&
302       PatternMatch::match(RHS, PatternMatch::m_ConstantInt(C)) &&
303       C->getValue() == Imm && Imm.isNegative() && (-Imm).isPowerOf2()) {
304 
305     auto isSSatMin = [&](Value *MinInst) {
306       if (isa<SelectInst>(MinInst)) {
307         Value *MinLHS, *MinRHS;
308         ConstantInt *MinC;
309         SelectPatternFlavor MinSPF =
310             matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor;
311         if (MinSPF == SPF_SMIN &&
312             PatternMatch::match(MinRHS, PatternMatch::m_ConstantInt(MinC)) &&
313             MinC->getValue() == ((-Imm) - 1))
314           return true;
315       }
316       return false;
317     };
318 
319     if (isSSatMin(Inst->getOperand(1)) ||
320         (Inst->hasNUses(2) && (isSSatMin(*Inst->user_begin()) ||
321                                isSSatMin(*(++Inst->user_begin())))))
322       return true;
323   }
324   return false;
325 }
326 
327 InstructionCost ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
328                                               const APInt &Imm, Type *Ty,
329                                               TTI::TargetCostKind CostKind,
330                                               Instruction *Inst) {
331   // Division by a constant can be turned into multiplication, but only if we
332   // know it's constant. So it's not so much that the immediate is cheap (it's
333   // not), but that the alternative is worse.
334   // FIXME: this is probably unneeded with GlobalISel.
335   if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
336        Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
337       Idx == 1)
338     return 0;
339 
340   // Leave any gep offsets for the CodeGenPrepare, which will do a better job at
341   // splitting any large offsets.
342   if (Opcode == Instruction::GetElementPtr && Idx != 0)
343     return 0;
344 
345   if (Opcode == Instruction::And) {
346     // UXTB/UXTH
347     if (Imm == 255 || Imm == 65535)
348       return 0;
349     // Conversion to BIC is free, and means we can use ~Imm instead.
350     return std::min(getIntImmCost(Imm, Ty, CostKind),
351                     getIntImmCost(~Imm, Ty, CostKind));
352   }
353 
354   if (Opcode == Instruction::Add)
355     // Conversion to SUB is free, and means we can use -Imm instead.
356     return std::min(getIntImmCost(Imm, Ty, CostKind),
357                     getIntImmCost(-Imm, Ty, CostKind));
358 
359   if (Opcode == Instruction::ICmp && Imm.isNegative() &&
360       Ty->getIntegerBitWidth() == 32) {
361     int64_t NegImm = -Imm.getSExtValue();
362     if (ST->isThumb2() && NegImm < 1<<12)
363       // icmp X, #-C -> cmn X, #C
364       return 0;
365     if (ST->isThumb() && NegImm < 1<<8)
366       // icmp X, #-C -> adds X, #C
367       return 0;
368   }
369 
370   // xor a, -1 can always be folded to MVN
371   if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
372     return 0;
373 
374   // Ensures negative constant of min(max()) or max(min()) patterns that
375   // match to SSAT instructions don't get hoisted
376   if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) &&
377       Ty->getIntegerBitWidth() <= 32) {
378     if (isSSATMinMaxPattern(Inst, Imm) ||
379         (isa<ICmpInst>(Inst) && Inst->hasOneUse() &&
380          isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm)))
381       return 0;
382   }
383 
384   return getIntImmCost(Imm, Ty, CostKind);
385 }
386 
387 InstructionCost ARMTTIImpl::getCFInstrCost(unsigned Opcode,
388                                            TTI::TargetCostKind CostKind,
389                                            const Instruction *I) {
390   if (CostKind == TTI::TCK_RecipThroughput &&
391       (ST->hasNEON() || ST->hasMVEIntegerOps())) {
392     // FIXME: The vectorizer is highly sensistive to the cost of these
393     // instructions, which suggests that it may be using the costs incorrectly.
394     // But, for now, just make them free to avoid performance regressions for
395     // vector targets.
396     return 0;
397   }
398   return BaseT::getCFInstrCost(Opcode, CostKind, I);
399 }
400 
401 InstructionCost ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
402                                              Type *Src,
403                                              TTI::CastContextHint CCH,
404                                              TTI::TargetCostKind CostKind,
405                                              const Instruction *I) {
406   int ISD = TLI->InstructionOpcodeToISD(Opcode);
407   assert(ISD && "Invalid opcode");
408 
409   // TODO: Allow non-throughput costs that aren't binary.
410   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
411     if (CostKind != TTI::TCK_RecipThroughput)
412       return Cost == 0 ? 0 : 1;
413     return Cost;
414   };
415   auto IsLegalFPType = [this](EVT VT) {
416     EVT EltVT = VT.getScalarType();
417     return (EltVT == MVT::f32 && ST->hasVFP2Base()) ||
418             (EltVT == MVT::f64 && ST->hasFP64()) ||
419             (EltVT == MVT::f16 && ST->hasFullFP16());
420   };
421 
422   EVT SrcTy = TLI->getValueType(DL, Src);
423   EVT DstTy = TLI->getValueType(DL, Dst);
424 
425   if (!SrcTy.isSimple() || !DstTy.isSimple())
426     return AdjustCost(
427         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
428 
429   // Extending masked load/Truncating masked stores is expensive because we
430   // currently don't split them. This means that we'll likely end up
431   // loading/storing each element individually (hence the high cost).
432   if ((ST->hasMVEIntegerOps() &&
433        (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt ||
434         Opcode == Instruction::SExt)) ||
435       (ST->hasMVEFloatOps() &&
436        (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) &&
437        IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)))
438     if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128)
439       return 2 * DstTy.getVectorNumElements() *
440              ST->getMVEVectorCostFactor(CostKind);
441 
442   // The extend of other kinds of load is free
443   if (CCH == TTI::CastContextHint::Normal ||
444       CCH == TTI::CastContextHint::Masked) {
445     static const TypeConversionCostTblEntry LoadConversionTbl[] = {
446         {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
447         {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
448         {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
449         {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
450         {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
451         {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
452         {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
453         {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
454         {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
455         {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
456         {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
457         {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
458     };
459     if (const auto *Entry = ConvertCostTableLookup(
460             LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
461       return AdjustCost(Entry->Cost);
462 
463     static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
464         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
465         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
466         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
467         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
468         {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
469         {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
470         // The following extend from a legal type to an illegal type, so need to
471         // split the load. This introduced an extra load operation, but the
472         // extend is still "free".
473         {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1},
474         {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1},
475         {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3},
476         {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3},
477         {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1},
478         {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1},
479     };
480     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
481       if (const auto *Entry =
482               ConvertCostTableLookup(MVELoadConversionTbl, ISD,
483                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
484         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
485     }
486 
487     static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
488         // FPExtends are similar but also require the VCVT instructions.
489         {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1},
490         {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3},
491     };
492     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
493       if (const auto *Entry =
494               ConvertCostTableLookup(MVEFLoadConversionTbl, ISD,
495                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
496         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
497     }
498 
499     // The truncate of a store is free. This is the mirror of extends above.
500     static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = {
501         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0},
502         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0},
503         {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0},
504         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1},
505         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i8, 1},
506         {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3},
507         {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1},
508     };
509     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
510       if (const auto *Entry =
511               ConvertCostTableLookup(MVEStoreConversionTbl, ISD,
512                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
513         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
514     }
515 
516     static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = {
517         {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1},
518         {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3},
519     };
520     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
521       if (const auto *Entry =
522               ConvertCostTableLookup(MVEFStoreConversionTbl, ISD,
523                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
524         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
525     }
526   }
527 
528   // NEON vector operations that can extend their inputs.
529   if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
530       I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
531     static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
532       // vaddl
533       { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
534       { ISD::ADD, MVT::v8i16, MVT::v8i8,  0 },
535       // vsubl
536       { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
537       { ISD::SUB, MVT::v8i16, MVT::v8i8,  0 },
538       // vmull
539       { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
540       { ISD::MUL, MVT::v8i16, MVT::v8i8,  0 },
541       // vshll
542       { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
543       { ISD::SHL, MVT::v8i16, MVT::v8i8,  0 },
544     };
545 
546     auto *User = cast<Instruction>(*I->user_begin());
547     int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
548     if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
549                                              DstTy.getSimpleVT(),
550                                              SrcTy.getSimpleVT())) {
551       return AdjustCost(Entry->Cost);
552     }
553   }
554 
555   // Single to/from double precision conversions.
556   if (Src->isVectorTy() && ST->hasNEON() &&
557       ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 &&
558         DstTy.getScalarType() == MVT::f32) ||
559        (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 &&
560         DstTy.getScalarType() == MVT::f64))) {
561     static const CostTblEntry NEONFltDblTbl[] = {
562         // Vector fptrunc/fpext conversions.
563         {ISD::FP_ROUND, MVT::v2f64, 2},
564         {ISD::FP_EXTEND, MVT::v2f32, 2},
565         {ISD::FP_EXTEND, MVT::v4f32, 4}};
566 
567     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
568     if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
569       return AdjustCost(LT.first * Entry->Cost);
570   }
571 
572   // Some arithmetic, load and store operations have specific instructions
573   // to cast up/down their types automatically at no extra cost.
574   // TODO: Get these tables to know at least what the related operations are.
575   static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
576     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
577     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
578     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
579     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
580     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64, 0 },
581     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i32, 1 },
582 
583     // The number of vmovl instructions for the extension.
584     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
585     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
586     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
587     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
588     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
589     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
590     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
591     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
592     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
593     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
594     { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
595     { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
596     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
597     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
598     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
599     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
600     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
601     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
602 
603     // Operations that we legalize using splitting.
604     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i32, 6 },
605     { ISD::TRUNCATE,    MVT::v8i8, MVT::v8i32, 3 },
606 
607     // Vector float <-> i32 conversions.
608     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
609     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
610 
611     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
612     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
613     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
614     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
615     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
616     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
617     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
618     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
619     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
620     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
621     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
622     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
623     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
624     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
625     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
626     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
627     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
628     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
629     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
630     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
631 
632     { ISD::FP_TO_SINT,  MVT::v4i32, MVT::v4f32, 1 },
633     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f32, 1 },
634     { ISD::FP_TO_SINT,  MVT::v4i8, MVT::v4f32, 3 },
635     { ISD::FP_TO_UINT,  MVT::v4i8, MVT::v4f32, 3 },
636     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f32, 2 },
637     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f32, 2 },
638 
639     // Vector double <-> i32 conversions.
640     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
641     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
642 
643     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
644     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
645     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
646     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
647     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
648     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
649 
650     { ISD::FP_TO_SINT,  MVT::v2i32, MVT::v2f64, 2 },
651     { ISD::FP_TO_UINT,  MVT::v2i32, MVT::v2f64, 2 },
652     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 4 },
653     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 4 },
654     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 8 },
655     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 8 }
656   };
657 
658   if (SrcTy.isVector() && ST->hasNEON()) {
659     if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
660                                                    DstTy.getSimpleVT(),
661                                                    SrcTy.getSimpleVT()))
662       return AdjustCost(Entry->Cost);
663   }
664 
665   // Scalar float to integer conversions.
666   static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
667     { ISD::FP_TO_SINT,  MVT::i1, MVT::f32, 2 },
668     { ISD::FP_TO_UINT,  MVT::i1, MVT::f32, 2 },
669     { ISD::FP_TO_SINT,  MVT::i1, MVT::f64, 2 },
670     { ISD::FP_TO_UINT,  MVT::i1, MVT::f64, 2 },
671     { ISD::FP_TO_SINT,  MVT::i8, MVT::f32, 2 },
672     { ISD::FP_TO_UINT,  MVT::i8, MVT::f32, 2 },
673     { ISD::FP_TO_SINT,  MVT::i8, MVT::f64, 2 },
674     { ISD::FP_TO_UINT,  MVT::i8, MVT::f64, 2 },
675     { ISD::FP_TO_SINT,  MVT::i16, MVT::f32, 2 },
676     { ISD::FP_TO_UINT,  MVT::i16, MVT::f32, 2 },
677     { ISD::FP_TO_SINT,  MVT::i16, MVT::f64, 2 },
678     { ISD::FP_TO_UINT,  MVT::i16, MVT::f64, 2 },
679     { ISD::FP_TO_SINT,  MVT::i32, MVT::f32, 2 },
680     { ISD::FP_TO_UINT,  MVT::i32, MVT::f32, 2 },
681     { ISD::FP_TO_SINT,  MVT::i32, MVT::f64, 2 },
682     { ISD::FP_TO_UINT,  MVT::i32, MVT::f64, 2 },
683     { ISD::FP_TO_SINT,  MVT::i64, MVT::f32, 10 },
684     { ISD::FP_TO_UINT,  MVT::i64, MVT::f32, 10 },
685     { ISD::FP_TO_SINT,  MVT::i64, MVT::f64, 10 },
686     { ISD::FP_TO_UINT,  MVT::i64, MVT::f64, 10 }
687   };
688   if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
689     if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
690                                                    DstTy.getSimpleVT(),
691                                                    SrcTy.getSimpleVT()))
692       return AdjustCost(Entry->Cost);
693   }
694 
695   // Scalar integer to float conversions.
696   static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
697     { ISD::SINT_TO_FP,  MVT::f32, MVT::i1, 2 },
698     { ISD::UINT_TO_FP,  MVT::f32, MVT::i1, 2 },
699     { ISD::SINT_TO_FP,  MVT::f64, MVT::i1, 2 },
700     { ISD::UINT_TO_FP,  MVT::f64, MVT::i1, 2 },
701     { ISD::SINT_TO_FP,  MVT::f32, MVT::i8, 2 },
702     { ISD::UINT_TO_FP,  MVT::f32, MVT::i8, 2 },
703     { ISD::SINT_TO_FP,  MVT::f64, MVT::i8, 2 },
704     { ISD::UINT_TO_FP,  MVT::f64, MVT::i8, 2 },
705     { ISD::SINT_TO_FP,  MVT::f32, MVT::i16, 2 },
706     { ISD::UINT_TO_FP,  MVT::f32, MVT::i16, 2 },
707     { ISD::SINT_TO_FP,  MVT::f64, MVT::i16, 2 },
708     { ISD::UINT_TO_FP,  MVT::f64, MVT::i16, 2 },
709     { ISD::SINT_TO_FP,  MVT::f32, MVT::i32, 2 },
710     { ISD::UINT_TO_FP,  MVT::f32, MVT::i32, 2 },
711     { ISD::SINT_TO_FP,  MVT::f64, MVT::i32, 2 },
712     { ISD::UINT_TO_FP,  MVT::f64, MVT::i32, 2 },
713     { ISD::SINT_TO_FP,  MVT::f32, MVT::i64, 10 },
714     { ISD::UINT_TO_FP,  MVT::f32, MVT::i64, 10 },
715     { ISD::SINT_TO_FP,  MVT::f64, MVT::i64, 10 },
716     { ISD::UINT_TO_FP,  MVT::f64, MVT::i64, 10 }
717   };
718 
719   if (SrcTy.isInteger() && ST->hasNEON()) {
720     if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
721                                                    ISD, DstTy.getSimpleVT(),
722                                                    SrcTy.getSimpleVT()))
723       return AdjustCost(Entry->Cost);
724   }
725 
726   // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
727   // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
728   // are linearised so take more.
729   static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
730     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
731     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
732     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
733     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
734     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
735     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
736     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
737     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
738     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
739     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
740     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
741     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
742   };
743 
744   if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
745     if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
746                                                    ISD, DstTy.getSimpleVT(),
747                                                    SrcTy.getSimpleVT()))
748       return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
749   }
750 
751   if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) {
752     // As general rule, fp converts that were not matched above are scalarized
753     // and cost 1 vcvt for each lane, so long as the instruction is available.
754     // If not it will become a series of function calls.
755     const InstructionCost CallCost =
756         getCallInstrCost(nullptr, Dst, {Src}, CostKind);
757     int Lanes = 1;
758     if (SrcTy.isFixedLengthVector())
759       Lanes = SrcTy.getVectorNumElements();
760 
761     if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))
762       return Lanes;
763     else
764       return Lanes * CallCost;
765   }
766 
767   if (ISD == ISD::TRUNCATE && ST->hasMVEIntegerOps() &&
768       SrcTy.isFixedLengthVector()) {
769     // Treat a truncate with larger than legal source (128bits for MVE) as
770     // expensive, 2 instructions per lane.
771     if ((SrcTy.getScalarType() == MVT::i8 ||
772          SrcTy.getScalarType() == MVT::i16 ||
773          SrcTy.getScalarType() == MVT::i32) &&
774         SrcTy.getSizeInBits() > 128 &&
775         SrcTy.getSizeInBits() > DstTy.getSizeInBits())
776       return SrcTy.getVectorNumElements() * 2;
777   }
778 
779   // Scalar integer conversion costs.
780   static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
781     // i16 -> i64 requires two dependent operations.
782     { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
783 
784     // Truncates on i64 are assumed to be free.
785     { ISD::TRUNCATE,    MVT::i32, MVT::i64, 0 },
786     { ISD::TRUNCATE,    MVT::i16, MVT::i64, 0 },
787     { ISD::TRUNCATE,    MVT::i8,  MVT::i64, 0 },
788     { ISD::TRUNCATE,    MVT::i1,  MVT::i64, 0 }
789   };
790 
791   if (SrcTy.isInteger()) {
792     if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
793                                                    DstTy.getSimpleVT(),
794                                                    SrcTy.getSimpleVT()))
795       return AdjustCost(Entry->Cost);
796   }
797 
798   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
799                      ? ST->getMVEVectorCostFactor(CostKind)
800                      : 1;
801   return AdjustCost(
802       BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
803 }
804 
805 InstructionCost ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
806                                                unsigned Index) {
807   // Penalize inserting into an D-subregister. We end up with a three times
808   // lower estimated throughput on swift.
809   if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
810       ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
811     return 3;
812 
813   if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
814                         Opcode == Instruction::ExtractElement)) {
815     // Cross-class copies are expensive on many microarchitectures,
816     // so assume they are expensive by default.
817     if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
818       return 3;
819 
820     // Even if it's not a cross class copy, this likely leads to mixing
821     // of NEON and VFP code and should be therefore penalized.
822     if (ValTy->isVectorTy() &&
823         ValTy->getScalarSizeInBits() <= 32)
824       return std::max<InstructionCost>(
825           BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
826   }
827 
828   if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
829                                  Opcode == Instruction::ExtractElement)) {
830     // Integer cross-lane moves are more expensive than float, which can
831     // sometimes just be vmovs. Integer involve being passes to GPR registers,
832     // causing more of a delay.
833     std::pair<InstructionCost, MVT> LT =
834         getTLI()->getTypeLegalizationCost(DL, ValTy->getScalarType());
835     return LT.first * (ValTy->getScalarType()->isIntegerTy() ? 4 : 1);
836   }
837 
838   return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
839 }
840 
841 InstructionCost ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
842                                                Type *CondTy,
843                                                CmpInst::Predicate VecPred,
844                                                TTI::TargetCostKind CostKind,
845                                                const Instruction *I) {
846   int ISD = TLI->InstructionOpcodeToISD(Opcode);
847 
848   // Thumb scalar code size cost for select.
849   if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT &&
850       ST->isThumb() && !ValTy->isVectorTy()) {
851     // Assume expensive structs.
852     if (TLI->getValueType(DL, ValTy, true) == MVT::Other)
853       return TTI::TCC_Expensive;
854 
855     // Select costs can vary because they:
856     // - may require one or more conditional mov (including an IT),
857     // - can't operate directly on immediates,
858     // - require live flags, which we can't copy around easily.
859     InstructionCost Cost = TLI->getTypeLegalizationCost(DL, ValTy).first;
860 
861     // Possible IT instruction for Thumb2, or more for Thumb1.
862     ++Cost;
863 
864     // i1 values may need rematerialising by using mov immediates and/or
865     // flag setting instructions.
866     if (ValTy->isIntegerTy(1))
867       ++Cost;
868 
869     return Cost;
870   }
871 
872   // If this is a vector min/max/abs, use the cost of that intrinsic directly
873   // instead. Hopefully when min/max intrinsics are more prevalent this code
874   // will not be needed.
875   const Instruction *Sel = I;
876   if ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && Sel &&
877       Sel->hasOneUse())
878     Sel = cast<Instruction>(Sel->user_back());
879   if (Sel && ValTy->isVectorTy() &&
880       (ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy())) {
881     const Value *LHS, *RHS;
882     SelectPatternFlavor SPF = matchSelectPattern(Sel, LHS, RHS).Flavor;
883     unsigned IID = 0;
884     switch (SPF) {
885     case SPF_ABS:
886       IID = Intrinsic::abs;
887       break;
888     case SPF_SMIN:
889       IID = Intrinsic::smin;
890       break;
891     case SPF_SMAX:
892       IID = Intrinsic::smax;
893       break;
894     case SPF_UMIN:
895       IID = Intrinsic::umin;
896       break;
897     case SPF_UMAX:
898       IID = Intrinsic::umax;
899       break;
900     case SPF_FMINNUM:
901       IID = Intrinsic::minnum;
902       break;
903     case SPF_FMAXNUM:
904       IID = Intrinsic::maxnum;
905       break;
906     default:
907       break;
908     }
909     if (IID) {
910       // The ICmp is free, the select gets the cost of the min/max/etc
911       if (Sel != I)
912         return 0;
913       IntrinsicCostAttributes CostAttrs(IID, ValTy, {ValTy, ValTy});
914       return getIntrinsicInstrCost(CostAttrs, CostKind);
915     }
916   }
917 
918   // On NEON a vector select gets lowered to vbsl.
919   if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT && CondTy) {
920     // Lowering of some vector selects is currently far from perfect.
921     static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
922       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
923       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
924       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
925     };
926 
927     EVT SelCondTy = TLI->getValueType(DL, CondTy);
928     EVT SelValTy = TLI->getValueType(DL, ValTy);
929     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
930       if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
931                                                      SelCondTy.getSimpleVT(),
932                                                      SelValTy.getSimpleVT()))
933         return Entry->Cost;
934     }
935 
936     std::pair<InstructionCost, MVT> LT =
937         TLI->getTypeLegalizationCost(DL, ValTy);
938     return LT.first;
939   }
940 
941   if (ST->hasMVEIntegerOps() && ValTy->isVectorTy() &&
942       (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
943       cast<FixedVectorType>(ValTy)->getNumElements() > 1) {
944     FixedVectorType *VecValTy = cast<FixedVectorType>(ValTy);
945     FixedVectorType *VecCondTy = dyn_cast_or_null<FixedVectorType>(CondTy);
946     if (!VecCondTy)
947       VecCondTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(VecValTy));
948 
949     // If we don't have mve.fp any fp operations will need to be scalarized.
950     if (Opcode == Instruction::FCmp && !ST->hasMVEFloatOps()) {
951       // One scalaization insert, one scalarization extract and the cost of the
952       // fcmps.
953       return BaseT::getScalarizationOverhead(VecValTy, false, true) +
954              BaseT::getScalarizationOverhead(VecCondTy, true, false) +
955              VecValTy->getNumElements() *
956                  getCmpSelInstrCost(Opcode, ValTy->getScalarType(),
957                                     VecCondTy->getScalarType(), VecPred, CostKind,
958                                     I);
959     }
960 
961     std::pair<InstructionCost, MVT> LT =
962         TLI->getTypeLegalizationCost(DL, ValTy);
963     int BaseCost = ST->getMVEVectorCostFactor(CostKind);
964     // There are two types - the input that specifies the type of the compare
965     // and the output vXi1 type. Because we don't know how the output will be
966     // split, we may need an expensive shuffle to get two in sync. This has the
967     // effect of making larger than legal compares (v8i32 for example)
968     // expensive.
969     if (LT.second.getVectorNumElements() > 2) {
970       if (LT.first > 1)
971         return LT.first * BaseCost +
972                BaseT::getScalarizationOverhead(VecCondTy, true, false);
973       return BaseCost;
974     }
975   }
976 
977   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
978   // for "multiple beats" potentially needed by MVE instructions.
979   int BaseCost = 1;
980   if (ST->hasMVEIntegerOps() && ValTy->isVectorTy())
981     BaseCost = ST->getMVEVectorCostFactor(CostKind);
982 
983   return BaseCost *
984          BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
985 }
986 
987 InstructionCost ARMTTIImpl::getAddressComputationCost(Type *Ty,
988                                                       ScalarEvolution *SE,
989                                                       const SCEV *Ptr) {
990   // Address computations in vectorized code with non-consecutive addresses will
991   // likely result in more instructions compared to scalar code where the
992   // computation can more often be merged into the index mode. The resulting
993   // extra micro-ops can significantly decrease throughput.
994   unsigned NumVectorInstToHideOverhead = 10;
995   int MaxMergeDistance = 64;
996 
997   if (ST->hasNEON()) {
998     if (Ty->isVectorTy() && SE &&
999         !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
1000       return NumVectorInstToHideOverhead;
1001 
1002     // In many cases the address computation is not merged into the instruction
1003     // addressing mode.
1004     return 1;
1005   }
1006   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
1007 }
1008 
1009 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
1010   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1011     // If a VCTP is part of a chain, it's already profitable and shouldn't be
1012     // optimized, else LSR may block tail-predication.
1013     switch (II->getIntrinsicID()) {
1014     case Intrinsic::arm_mve_vctp8:
1015     case Intrinsic::arm_mve_vctp16:
1016     case Intrinsic::arm_mve_vctp32:
1017     case Intrinsic::arm_mve_vctp64:
1018       return true;
1019     default:
1020       break;
1021     }
1022   }
1023   return false;
1024 }
1025 
1026 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
1027   if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
1028     return false;
1029 
1030   if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
1031     // Don't support v2i1 yet.
1032     if (VecTy->getNumElements() == 2)
1033       return false;
1034 
1035     // We don't support extending fp types.
1036      unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
1037     if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
1038       return false;
1039   }
1040 
1041   unsigned EltWidth = DataTy->getScalarSizeInBits();
1042   return (EltWidth == 32 && Alignment >= 4) ||
1043          (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
1044 }
1045 
1046 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
1047   if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
1048     return false;
1049 
1050   // This method is called in 2 places:
1051   //  - from the vectorizer with a scalar type, in which case we need to get
1052   //  this as good as we can with the limited info we have (and rely on the cost
1053   //  model for the rest).
1054   //  - from the masked intrinsic lowering pass with the actual vector type.
1055   // For MVE, we have a custom lowering pass that will already have custom
1056   // legalised any gathers that we can to MVE intrinsics, and want to expand all
1057   // the rest. The pass runs before the masked intrinsic lowering pass, so if we
1058   // are here, we know we want to expand.
1059   if (isa<VectorType>(Ty))
1060     return false;
1061 
1062   unsigned EltWidth = Ty->getScalarSizeInBits();
1063   return ((EltWidth == 32 && Alignment >= 4) ||
1064           (EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
1065 }
1066 
1067 /// Given a memcpy/memset/memmove instruction, return the number of memory
1068 /// operations performed, via querying findOptimalMemOpLowering. Returns -1 if a
1069 /// call is used.
1070 int ARMTTIImpl::getNumMemOps(const IntrinsicInst *I) const {
1071   MemOp MOp;
1072   unsigned DstAddrSpace = ~0u;
1073   unsigned SrcAddrSpace = ~0u;
1074   const Function *F = I->getParent()->getParent();
1075 
1076   if (const auto *MC = dyn_cast<MemTransferInst>(I)) {
1077     ConstantInt *C = dyn_cast<ConstantInt>(MC->getLength());
1078     // If 'size' is not a constant, a library call will be generated.
1079     if (!C)
1080       return -1;
1081 
1082     const unsigned Size = C->getValue().getZExtValue();
1083     const Align DstAlign = *MC->getDestAlign();
1084     const Align SrcAlign = *MC->getSourceAlign();
1085 
1086     MOp = MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
1087                       /*IsVolatile*/ false);
1088     DstAddrSpace = MC->getDestAddressSpace();
1089     SrcAddrSpace = MC->getSourceAddressSpace();
1090   }
1091   else if (const auto *MS = dyn_cast<MemSetInst>(I)) {
1092     ConstantInt *C = dyn_cast<ConstantInt>(MS->getLength());
1093     // If 'size' is not a constant, a library call will be generated.
1094     if (!C)
1095       return -1;
1096 
1097     const unsigned Size = C->getValue().getZExtValue();
1098     const Align DstAlign = *MS->getDestAlign();
1099 
1100     MOp = MemOp::Set(Size, /*DstAlignCanChange*/ false, DstAlign,
1101                      /*IsZeroMemset*/ false, /*IsVolatile*/ false);
1102     DstAddrSpace = MS->getDestAddressSpace();
1103   }
1104   else
1105     llvm_unreachable("Expected a memcpy/move or memset!");
1106 
1107   unsigned Limit, Factor = 2;
1108   switch(I->getIntrinsicID()) {
1109     case Intrinsic::memcpy:
1110       Limit = TLI->getMaxStoresPerMemcpy(F->hasMinSize());
1111       break;
1112     case Intrinsic::memmove:
1113       Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
1114       break;
1115     case Intrinsic::memset:
1116       Limit = TLI->getMaxStoresPerMemset(F->hasMinSize());
1117       Factor = 1;
1118       break;
1119     default:
1120       llvm_unreachable("Expected a memcpy/move or memset!");
1121   }
1122 
1123   // MemOps will be poplulated with a list of data types that needs to be
1124   // loaded and stored. That's why we multiply the number of elements by 2 to
1125   // get the cost for this memcpy.
1126   std::vector<EVT> MemOps;
1127   if (getTLI()->findOptimalMemOpLowering(
1128           MemOps, Limit, MOp, DstAddrSpace,
1129           SrcAddrSpace, F->getAttributes()))
1130     return MemOps.size() * Factor;
1131 
1132   // If we can't find an optimal memop lowering, return the default cost
1133   return -1;
1134 }
1135 
1136 InstructionCost ARMTTIImpl::getMemcpyCost(const Instruction *I) {
1137   int NumOps = getNumMemOps(cast<IntrinsicInst>(I));
1138 
1139   // To model the cost of a library call, we assume 1 for the call, and
1140   // 3 for the argument setup.
1141   if (NumOps == -1)
1142     return 4;
1143   return NumOps;
1144 }
1145 
1146 InstructionCost ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1147                                            VectorType *Tp, ArrayRef<int> Mask,
1148                                            int Index, VectorType *SubTp) {
1149   Kind = improveShuffleKindFromMask(Kind, Mask);
1150   if (ST->hasNEON()) {
1151     if (Kind == TTI::SK_Broadcast) {
1152       static const CostTblEntry NEONDupTbl[] = {
1153           // VDUP handles these cases.
1154           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1155           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1156           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1157           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1158           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1159           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1160 
1161           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1162           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1163           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1164           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
1165 
1166       std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1167       if (const auto *Entry =
1168               CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
1169         return LT.first * Entry->Cost;
1170     }
1171     if (Kind == TTI::SK_Reverse) {
1172       static const CostTblEntry NEONShuffleTbl[] = {
1173           // Reverse shuffle cost one instruction if we are shuffling within a
1174           // double word (vrev) or two if we shuffle a quad word (vrev, vext).
1175           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1176           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1177           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1178           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1179           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1180           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1181 
1182           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1183           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1184           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
1185           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
1186 
1187       std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1188       if (const auto *Entry =
1189               CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
1190         return LT.first * Entry->Cost;
1191     }
1192     if (Kind == TTI::SK_Select) {
1193       static const CostTblEntry NEONSelShuffleTbl[] = {
1194           // Select shuffle cost table for ARM. Cost is the number of
1195           // instructions
1196           // required to create the shuffled vector.
1197 
1198           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1199           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1200           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1201           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1202 
1203           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1204           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1205           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
1206 
1207           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
1208 
1209           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
1210 
1211       std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1212       if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
1213                                               ISD::VECTOR_SHUFFLE, LT.second))
1214         return LT.first * Entry->Cost;
1215     }
1216   }
1217   if (ST->hasMVEIntegerOps()) {
1218     if (Kind == TTI::SK_Broadcast) {
1219       static const CostTblEntry MVEDupTbl[] = {
1220           // VDUP handles these cases.
1221           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1222           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1223           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
1224           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1225           {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
1226 
1227       std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1228       if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
1229                                               LT.second))
1230         return LT.first * Entry->Cost *
1231                ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput);
1232     }
1233 
1234     if (!Mask.empty()) {
1235       std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1236       if (Mask.size() <= LT.second.getVectorNumElements() &&
1237           (isVREVMask(Mask, LT.second, 16) || isVREVMask(Mask, LT.second, 32) ||
1238            isVREVMask(Mask, LT.second, 64)))
1239         return ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) * LT.first;
1240     }
1241   }
1242 
1243   int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
1244                      ? ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput)
1245                      : 1;
1246   return BaseCost * BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp);
1247 }
1248 
1249 InstructionCost ARMTTIImpl::getArithmeticInstrCost(
1250     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1251     TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
1252     TTI::OperandValueProperties Opd1PropInfo,
1253     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
1254     const Instruction *CxtI) {
1255   int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
1256   if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) {
1257     // Make operations on i1 relatively expensive as this often involves
1258     // combining predicates. AND and XOR should be easier to handle with IT
1259     // blocks.
1260     switch (ISDOpcode) {
1261     default:
1262       break;
1263     case ISD::AND:
1264     case ISD::XOR:
1265       return 2;
1266     case ISD::OR:
1267       return 3;
1268     }
1269   }
1270 
1271   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1272 
1273   if (ST->hasNEON()) {
1274     const unsigned FunctionCallDivCost = 20;
1275     const unsigned ReciprocalDivCost = 10;
1276     static const CostTblEntry CostTbl[] = {
1277       // Division.
1278       // These costs are somewhat random. Choose a cost of 20 to indicate that
1279       // vectorizing devision (added function call) is going to be very expensive.
1280       // Double registers types.
1281       { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1282       { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1283       { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
1284       { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
1285       { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1286       { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1287       { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
1288       { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
1289       { ISD::SDIV, MVT::v4i16,     ReciprocalDivCost},
1290       { ISD::UDIV, MVT::v4i16,     ReciprocalDivCost},
1291       { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
1292       { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
1293       { ISD::SDIV, MVT::v8i8,      ReciprocalDivCost},
1294       { ISD::UDIV, MVT::v8i8,      ReciprocalDivCost},
1295       { ISD::SREM, MVT::v8i8,  8 * FunctionCallDivCost},
1296       { ISD::UREM, MVT::v8i8,  8 * FunctionCallDivCost},
1297       // Quad register types.
1298       { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1299       { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1300       { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
1301       { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
1302       { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1303       { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1304       { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
1305       { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
1306       { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1307       { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1308       { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
1309       { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
1310       { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1311       { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1312       { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
1313       { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
1314       // Multiplication.
1315     };
1316 
1317     if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
1318       return LT.first * Entry->Cost;
1319 
1320     InstructionCost Cost = BaseT::getArithmeticInstrCost(
1321         Opcode, Ty, CostKind, Op1Info, Op2Info, Opd1PropInfo, Opd2PropInfo);
1322 
1323     // This is somewhat of a hack. The problem that we are facing is that SROA
1324     // creates a sequence of shift, and, or instructions to construct values.
1325     // These sequences are recognized by the ISel and have zero-cost. Not so for
1326     // the vectorized code. Because we have support for v2i64 but not i64 those
1327     // sequences look particularly beneficial to vectorize.
1328     // To work around this we increase the cost of v2i64 operations to make them
1329     // seem less beneficial.
1330     if (LT.second == MVT::v2i64 &&
1331         Op2Info == TargetTransformInfo::OK_UniformConstantValue)
1332       Cost += 4;
1333 
1334     return Cost;
1335   }
1336 
1337   // If this operation is a shift on arm/thumb2, it might well be folded into
1338   // the following instruction, hence having a cost of 0.
1339   auto LooksLikeAFreeShift = [&]() {
1340     if (ST->isThumb1Only() || Ty->isVectorTy())
1341       return false;
1342 
1343     if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
1344       return false;
1345     if (Op2Info != TargetTransformInfo::OK_UniformConstantValue)
1346       return false;
1347 
1348     // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
1349     switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
1350     case Instruction::Add:
1351     case Instruction::Sub:
1352     case Instruction::And:
1353     case Instruction::Xor:
1354     case Instruction::Or:
1355     case Instruction::ICmp:
1356       return true;
1357     default:
1358       return false;
1359     }
1360   };
1361   if (LooksLikeAFreeShift())
1362     return 0;
1363 
1364   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
1365   // for "multiple beats" potentially needed by MVE instructions.
1366   int BaseCost = 1;
1367   if (ST->hasMVEIntegerOps() && Ty->isVectorTy())
1368     BaseCost = ST->getMVEVectorCostFactor(CostKind);
1369 
1370   // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
1371   // without treating floats as more expensive that scalars or increasing the
1372   // costs for custom operations. The results is also multiplied by the
1373   // MVEVectorCostFactor where appropriate.
1374   if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
1375     return LT.first * BaseCost;
1376 
1377   // Else this is expand, assume that we need to scalarize this op.
1378   if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1379     unsigned Num = VTy->getNumElements();
1380     InstructionCost Cost =
1381         getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind);
1382     // Return the cost of multiple scalar invocation plus the cost of
1383     // inserting and extracting the values.
1384     SmallVector<Type *> Tys(Args.size(), Ty);
1385     return BaseT::getScalarizationOverhead(VTy, Args, Tys) + Num * Cost;
1386   }
1387 
1388   return BaseCost;
1389 }
1390 
1391 InstructionCost ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1392                                             MaybeAlign Alignment,
1393                                             unsigned AddressSpace,
1394                                             TTI::TargetCostKind CostKind,
1395                                             const Instruction *I) {
1396   // TODO: Handle other cost kinds.
1397   if (CostKind != TTI::TCK_RecipThroughput)
1398     return 1;
1399 
1400   // Type legalization can't handle structs
1401   if (TLI->getValueType(DL, Src, true) == MVT::Other)
1402     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1403                                   CostKind);
1404 
1405   if (ST->hasNEON() && Src->isVectorTy() &&
1406       (Alignment && *Alignment != Align(16)) &&
1407       cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
1408     // Unaligned loads/stores are extremely inefficient.
1409     // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
1410     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1411     return LT.first * 4;
1412   }
1413 
1414   // MVE can optimize a fpext(load(4xhalf)) using an extending integer load.
1415   // Same for stores.
1416   if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I &&
1417       ((Opcode == Instruction::Load && I->hasOneUse() &&
1418         isa<FPExtInst>(*I->user_begin())) ||
1419        (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) {
1420     FixedVectorType *SrcVTy = cast<FixedVectorType>(Src);
1421     Type *DstTy =
1422         Opcode == Instruction::Load
1423             ? (*I->user_begin())->getType()
1424             : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType();
1425     if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() &&
1426         DstTy->getScalarType()->isFloatTy())
1427       return ST->getMVEVectorCostFactor(CostKind);
1428   }
1429 
1430   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
1431                      ? ST->getMVEVectorCostFactor(CostKind)
1432                      : 1;
1433   return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1434                                            CostKind, I);
1435 }
1436 
1437 InstructionCost
1438 ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1439                                   unsigned AddressSpace,
1440                                   TTI::TargetCostKind CostKind) {
1441   if (ST->hasMVEIntegerOps()) {
1442     if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment))
1443       return ST->getMVEVectorCostFactor(CostKind);
1444     if (Opcode == Instruction::Store && isLegalMaskedStore(Src, Alignment))
1445       return ST->getMVEVectorCostFactor(CostKind);
1446   }
1447   if (!isa<FixedVectorType>(Src))
1448     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1449                                         CostKind);
1450   // Scalar cost, which is currently very high due to the efficiency of the
1451   // generated code.
1452   return cast<FixedVectorType>(Src)->getNumElements() * 8;
1453 }
1454 
1455 InstructionCost ARMTTIImpl::getInterleavedMemoryOpCost(
1456     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1457     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1458     bool UseMaskForCond, bool UseMaskForGaps) {
1459   assert(Factor >= 2 && "Invalid interleave factor");
1460   assert(isa<VectorType>(VecTy) && "Expect a vector type");
1461 
1462   // vldN/vstN doesn't support vector types of i64/f64 element.
1463   bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
1464 
1465   if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
1466       !UseMaskForCond && !UseMaskForGaps) {
1467     unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1468     auto *SubVecTy =
1469         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1470 
1471     // vldN/vstN only support legal vector types of size 64 or 128 in bits.
1472     // Accesses having vector types that are a multiple of 128 bits can be
1473     // matched to more than one vldN/vstN instruction.
1474     int BaseCost =
1475         ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor(CostKind) : 1;
1476     if (NumElts % Factor == 0 &&
1477         TLI->isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL))
1478       return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1479 
1480     // Some smaller than legal interleaved patterns are cheap as we can make
1481     // use of the vmovn or vrev patterns to interleave a standard load. This is
1482     // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
1483     // promoted differently). The cost of 2 here is then a load and vrev or
1484     // vmovn.
1485     if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
1486         VecTy->isIntOrIntVectorTy() &&
1487         DL.getTypeSizeInBits(SubVecTy).getFixedSize() <= 64)
1488       return 2 * BaseCost;
1489   }
1490 
1491   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1492                                            Alignment, AddressSpace, CostKind,
1493                                            UseMaskForCond, UseMaskForGaps);
1494 }
1495 
1496 InstructionCost ARMTTIImpl::getGatherScatterOpCost(
1497     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1498     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
1499   using namespace PatternMatch;
1500   if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
1501     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1502                                          Alignment, CostKind, I);
1503 
1504   assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
1505   auto *VTy = cast<FixedVectorType>(DataTy);
1506 
1507   // TODO: Splitting, once we do that.
1508 
1509   unsigned NumElems = VTy->getNumElements();
1510   unsigned EltSize = VTy->getScalarSizeInBits();
1511   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy);
1512 
1513   // For now, it is assumed that for the MVE gather instructions the loads are
1514   // all effectively serialised. This means the cost is the scalar cost
1515   // multiplied by the number of elements being loaded. This is possibly very
1516   // conservative, but even so we still end up vectorising loops because the
1517   // cost per iteration for many loops is lower than for scalar loops.
1518   InstructionCost VectorCost =
1519       NumElems * LT.first * ST->getMVEVectorCostFactor(CostKind);
1520   // The scalarization cost should be a lot higher. We use the number of vector
1521   // elements plus the scalarization overhead.
1522   InstructionCost ScalarCost =
1523       NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, true, false) +
1524       BaseT::getScalarizationOverhead(VTy, false, true);
1525 
1526   if (EltSize < 8 || Alignment < EltSize / 8)
1527     return ScalarCost;
1528 
1529   unsigned ExtSize = EltSize;
1530   // Check whether there's a single user that asks for an extended type
1531   if (I != nullptr) {
1532     // Dependent of the caller of this function, a gather instruction will
1533     // either have opcode Instruction::Load or be a call to the masked_gather
1534     // intrinsic
1535     if ((I->getOpcode() == Instruction::Load ||
1536          match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
1537         I->hasOneUse()) {
1538       const User *Us = *I->users().begin();
1539       if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
1540         // only allow valid type combinations
1541         unsigned TypeSize =
1542             cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1543         if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1544              (TypeSize == 16 && EltSize == 8)) &&
1545             TypeSize * NumElems == 128) {
1546           ExtSize = TypeSize;
1547         }
1548       }
1549     }
1550     // Check whether the input data needs to be truncated
1551     TruncInst *T;
1552     if ((I->getOpcode() == Instruction::Store ||
1553          match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1554         (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1555       // Only allow valid type combinations
1556       unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1557       if (((EltSize == 16 && TypeSize == 32) ||
1558            (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1559           TypeSize * NumElems == 128)
1560         ExtSize = TypeSize;
1561     }
1562   }
1563 
1564   if (ExtSize * NumElems != 128 || NumElems < 4)
1565     return ScalarCost;
1566 
1567   // Any (aligned) i32 gather will not need to be scalarised.
1568   if (ExtSize == 32)
1569     return VectorCost;
1570   // For smaller types, we need to ensure that the gep's inputs are correctly
1571   // extended from a small enough value. Other sizes (including i64) are
1572   // scalarized for now.
1573   if (ExtSize != 8 && ExtSize != 16)
1574     return ScalarCost;
1575 
1576   if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
1577     Ptr = BC->getOperand(0);
1578   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1579     if (GEP->getNumOperands() != 2)
1580       return ScalarCost;
1581     unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1582     // Scale needs to be correct (which is only relevant for i16s).
1583     if (Scale != 1 && Scale * 8 != ExtSize)
1584       return ScalarCost;
1585     // And we need to zext (not sext) the indexes from a small enough type.
1586     if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1587       if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1588         return VectorCost;
1589     }
1590     return ScalarCost;
1591   }
1592   return ScalarCost;
1593 }
1594 
1595 InstructionCost
1596 ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
1597                                        Optional<FastMathFlags> FMF,
1598                                        TTI::TargetCostKind CostKind) {
1599   if (TTI::requiresOrderedReduction(FMF))
1600     return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1601 
1602   EVT ValVT = TLI->getValueType(DL, ValTy);
1603   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1604   if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD)
1605     return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1606 
1607   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1608 
1609   static const CostTblEntry CostTblAdd[]{
1610       {ISD::ADD, MVT::v16i8, 1},
1611       {ISD::ADD, MVT::v8i16, 1},
1612       {ISD::ADD, MVT::v4i32, 1},
1613   };
1614   if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second))
1615     return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first;
1616 
1617   return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1618 }
1619 
1620 InstructionCost
1621 ARMTTIImpl::getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned,
1622                                         Type *ResTy, VectorType *ValTy,
1623                                         TTI::TargetCostKind CostKind) {
1624   EVT ValVT = TLI->getValueType(DL, ValTy);
1625   EVT ResVT = TLI->getValueType(DL, ResTy);
1626   if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
1627     std::pair<InstructionCost, MVT> LT =
1628         TLI->getTypeLegalizationCost(DL, ValTy);
1629     if ((LT.second == MVT::v16i8 && ResVT.getSizeInBits() <= 32) ||
1630         (LT.second == MVT::v8i16 &&
1631          ResVT.getSizeInBits() <= (IsMLA ? 64 : 32)) ||
1632         (LT.second == MVT::v4i32 && ResVT.getSizeInBits() <= 64))
1633       return ST->getMVEVectorCostFactor(CostKind) * LT.first;
1634   }
1635 
1636   return BaseT::getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, ValTy,
1637                                             CostKind);
1638 }
1639 
1640 InstructionCost
1641 ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1642                                   TTI::TargetCostKind CostKind) {
1643   switch (ICA.getID()) {
1644   case Intrinsic::get_active_lane_mask:
1645     // Currently we make a somewhat optimistic assumption that
1646     // active_lane_mask's are always free. In reality it may be freely folded
1647     // into a tail predicated loop, expanded into a VCPT or expanded into a lot
1648     // of add/icmp code. We may need to improve this in the future, but being
1649     // able to detect if it is free or not involves looking at a lot of other
1650     // code. We currently assume that the vectorizer inserted these, and knew
1651     // what it was doing in adding one.
1652     if (ST->hasMVEIntegerOps())
1653       return 0;
1654     break;
1655   case Intrinsic::sadd_sat:
1656   case Intrinsic::ssub_sat:
1657   case Intrinsic::uadd_sat:
1658   case Intrinsic::usub_sat: {
1659     if (!ST->hasMVEIntegerOps())
1660       break;
1661     Type *VT = ICA.getReturnType();
1662 
1663     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VT);
1664     if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1665         LT.second == MVT::v16i8) {
1666       // This is a base cost of 1 for the vqadd, plus 3 extract shifts if we
1667       // need to extend the type, as it uses shr(qadd(shl, shl)).
1668       unsigned Instrs =
1669           LT.second.getScalarSizeInBits() == VT->getScalarSizeInBits() ? 1 : 4;
1670       return LT.first * ST->getMVEVectorCostFactor(CostKind) * Instrs;
1671     }
1672     break;
1673   }
1674   case Intrinsic::abs:
1675   case Intrinsic::smin:
1676   case Intrinsic::smax:
1677   case Intrinsic::umin:
1678   case Intrinsic::umax: {
1679     if (!ST->hasMVEIntegerOps())
1680       break;
1681     Type *VT = ICA.getReturnType();
1682 
1683     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VT);
1684     if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1685         LT.second == MVT::v16i8)
1686       return LT.first * ST->getMVEVectorCostFactor(CostKind);
1687     break;
1688   }
1689   case Intrinsic::minnum:
1690   case Intrinsic::maxnum: {
1691     if (!ST->hasMVEFloatOps())
1692       break;
1693     Type *VT = ICA.getReturnType();
1694     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VT);
1695     if (LT.second == MVT::v4f32 || LT.second == MVT::v8f16)
1696       return LT.first * ST->getMVEVectorCostFactor(CostKind);
1697     break;
1698   }
1699   }
1700 
1701   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1702 }
1703 
1704 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
1705   if (!F->isIntrinsic())
1706     BaseT::isLoweredToCall(F);
1707 
1708   // Assume all Arm-specific intrinsics map to an instruction.
1709   if (F->getName().startswith("llvm.arm"))
1710     return false;
1711 
1712   switch (F->getIntrinsicID()) {
1713   default: break;
1714   case Intrinsic::powi:
1715   case Intrinsic::sin:
1716   case Intrinsic::cos:
1717   case Intrinsic::pow:
1718   case Intrinsic::log:
1719   case Intrinsic::log10:
1720   case Intrinsic::log2:
1721   case Intrinsic::exp:
1722   case Intrinsic::exp2:
1723     return true;
1724   case Intrinsic::sqrt:
1725   case Intrinsic::fabs:
1726   case Intrinsic::copysign:
1727   case Intrinsic::floor:
1728   case Intrinsic::ceil:
1729   case Intrinsic::trunc:
1730   case Intrinsic::rint:
1731   case Intrinsic::nearbyint:
1732   case Intrinsic::round:
1733   case Intrinsic::canonicalize:
1734   case Intrinsic::lround:
1735   case Intrinsic::llround:
1736   case Intrinsic::lrint:
1737   case Intrinsic::llrint:
1738     if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
1739       return true;
1740     if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
1741       return true;
1742     // Some operations can be handled by vector instructions and assume
1743     // unsupported vectors will be expanded into supported scalar ones.
1744     // TODO Handle scalar operations properly.
1745     return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
1746   case Intrinsic::masked_store:
1747   case Intrinsic::masked_load:
1748   case Intrinsic::masked_gather:
1749   case Intrinsic::masked_scatter:
1750     return !ST->hasMVEIntegerOps();
1751   case Intrinsic::sadd_with_overflow:
1752   case Intrinsic::uadd_with_overflow:
1753   case Intrinsic::ssub_with_overflow:
1754   case Intrinsic::usub_with_overflow:
1755   case Intrinsic::sadd_sat:
1756   case Intrinsic::uadd_sat:
1757   case Intrinsic::ssub_sat:
1758   case Intrinsic::usub_sat:
1759     return false;
1760   }
1761 
1762   return BaseT::isLoweredToCall(F);
1763 }
1764 
1765 bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) {
1766   unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
1767   EVT VT = TLI->getValueType(DL, I.getType(), true);
1768   if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
1769     return true;
1770 
1771   // Check if an intrinsic will be lowered to a call and assume that any
1772   // other CallInst will generate a bl.
1773   if (auto *Call = dyn_cast<CallInst>(&I)) {
1774     if (auto *II = dyn_cast<IntrinsicInst>(Call)) {
1775       switch(II->getIntrinsicID()) {
1776         case Intrinsic::memcpy:
1777         case Intrinsic::memset:
1778         case Intrinsic::memmove:
1779           return getNumMemOps(II) == -1;
1780         default:
1781           if (const Function *F = Call->getCalledFunction())
1782             return isLoweredToCall(F);
1783       }
1784     }
1785     return true;
1786   }
1787 
1788   // FPv5 provides conversions between integer, double-precision,
1789   // single-precision, and half-precision formats.
1790   switch (I.getOpcode()) {
1791   default:
1792     break;
1793   case Instruction::FPToSI:
1794   case Instruction::FPToUI:
1795   case Instruction::SIToFP:
1796   case Instruction::UIToFP:
1797   case Instruction::FPTrunc:
1798   case Instruction::FPExt:
1799     return !ST->hasFPARMv8Base();
1800   }
1801 
1802   // FIXME: Unfortunately the approach of checking the Operation Action does
1803   // not catch all cases of Legalization that use library calls. Our
1804   // Legalization step categorizes some transformations into library calls as
1805   // Custom, Expand or even Legal when doing type legalization. So for now
1806   // we have to special case for instance the SDIV of 64bit integers and the
1807   // use of floating point emulation.
1808   if (VT.isInteger() && VT.getSizeInBits() >= 64) {
1809     switch (ISD) {
1810     default:
1811       break;
1812     case ISD::SDIV:
1813     case ISD::UDIV:
1814     case ISD::SREM:
1815     case ISD::UREM:
1816     case ISD::SDIVREM:
1817     case ISD::UDIVREM:
1818       return true;
1819     }
1820   }
1821 
1822   // Assume all other non-float operations are supported.
1823   if (!VT.isFloatingPoint())
1824     return false;
1825 
1826   // We'll need a library call to handle most floats when using soft.
1827   if (TLI->useSoftFloat()) {
1828     switch (I.getOpcode()) {
1829     default:
1830       return true;
1831     case Instruction::Alloca:
1832     case Instruction::Load:
1833     case Instruction::Store:
1834     case Instruction::Select:
1835     case Instruction::PHI:
1836       return false;
1837     }
1838   }
1839 
1840   // We'll need a libcall to perform double precision operations on a single
1841   // precision only FPU.
1842   if (I.getType()->isDoubleTy() && !ST->hasFP64())
1843     return true;
1844 
1845   // Likewise for half precision arithmetic.
1846   if (I.getType()->isHalfTy() && !ST->hasFullFP16())
1847     return true;
1848 
1849   return false;
1850 }
1851 
1852 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1853                                           AssumptionCache &AC,
1854                                           TargetLibraryInfo *LibInfo,
1855                                           HardwareLoopInfo &HWLoopInfo) {
1856   // Low-overhead branches are only supported in the 'low-overhead branch'
1857   // extension of v8.1-m.
1858   if (!ST->hasLOB() || DisableLowOverheadLoops) {
1859     LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
1860     return false;
1861   }
1862 
1863   if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
1864     LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
1865     return false;
1866   }
1867 
1868   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1869   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
1870     LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
1871     return false;
1872   }
1873 
1874   const SCEV *TripCountSCEV =
1875     SE.getAddExpr(BackedgeTakenCount,
1876                   SE.getOne(BackedgeTakenCount->getType()));
1877 
1878   // We need to store the trip count in LR, a 32-bit register.
1879   if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
1880     LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
1881     return false;
1882   }
1883 
1884   // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
1885   // point in generating a hardware loop if that's going to happen.
1886 
1887   auto IsHardwareLoopIntrinsic = [](Instruction &I) {
1888     if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
1889       switch (Call->getIntrinsicID()) {
1890       default:
1891         break;
1892       case Intrinsic::start_loop_iterations:
1893       case Intrinsic::test_start_loop_iterations:
1894       case Intrinsic::loop_decrement:
1895       case Intrinsic::loop_decrement_reg:
1896         return true;
1897       }
1898     }
1899     return false;
1900   };
1901 
1902   // Scan the instructions to see if there's any that we know will turn into a
1903   // call or if this loop is already a low-overhead loop or will become a tail
1904   // predicated loop.
1905   bool IsTailPredLoop = false;
1906   auto ScanLoop = [&](Loop *L) {
1907     for (auto *BB : L->getBlocks()) {
1908       for (auto &I : *BB) {
1909         if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I) ||
1910             isa<InlineAsm>(I)) {
1911           LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
1912           return false;
1913         }
1914         if (auto *II = dyn_cast<IntrinsicInst>(&I))
1915           IsTailPredLoop |=
1916               II->getIntrinsicID() == Intrinsic::get_active_lane_mask ||
1917               II->getIntrinsicID() == Intrinsic::arm_mve_vctp8 ||
1918               II->getIntrinsicID() == Intrinsic::arm_mve_vctp16 ||
1919               II->getIntrinsicID() == Intrinsic::arm_mve_vctp32 ||
1920               II->getIntrinsicID() == Intrinsic::arm_mve_vctp64;
1921       }
1922     }
1923     return true;
1924   };
1925 
1926   // Visit inner loops.
1927   for (auto Inner : *L)
1928     if (!ScanLoop(Inner))
1929       return false;
1930 
1931   if (!ScanLoop(L))
1932     return false;
1933 
1934   // TODO: Check whether the trip count calculation is expensive. If L is the
1935   // inner loop but we know it has a low trip count, calculating that trip
1936   // count (in the parent loop) may be detrimental.
1937 
1938   LLVMContext &C = L->getHeader()->getContext();
1939   HWLoopInfo.CounterInReg = true;
1940   HWLoopInfo.IsNestingLegal = false;
1941   HWLoopInfo.PerformEntryTest = AllowWLSLoops && !IsTailPredLoop;
1942   HWLoopInfo.CountType = Type::getInt32Ty(C);
1943   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
1944   return true;
1945 }
1946 
1947 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
1948   // We don't allow icmp's, and because we only look at single block loops,
1949   // we simply count the icmps, i.e. there should only be 1 for the backedge.
1950   if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
1951     return false;
1952 
1953   if (isa<FCmpInst>(&I))
1954     return false;
1955 
1956   // We could allow extending/narrowing FP loads/stores, but codegen is
1957   // too inefficient so reject this for now.
1958   if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
1959     return false;
1960 
1961   // Extends have to be extending-loads
1962   if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
1963     if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
1964       return false;
1965 
1966   // Truncs have to be narrowing-stores
1967   if (isa<TruncInst>(&I) )
1968     if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
1969       return false;
1970 
1971   return true;
1972 }
1973 
1974 // To set up a tail-predicated loop, we need to know the total number of
1975 // elements processed by that loop. Thus, we need to determine the element
1976 // size and:
1977 // 1) it should be uniform for all operations in the vector loop, so we
1978 //    e.g. don't want any widening/narrowing operations.
1979 // 2) it should be smaller than i64s because we don't have vector operations
1980 //    that work on i64s.
1981 // 3) we don't want elements to be reversed or shuffled, to make sure the
1982 //    tail-predication masks/predicates the right lanes.
1983 //
1984 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1985                                  const DataLayout &DL,
1986                                  const LoopAccessInfo *LAI) {
1987   LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n");
1988 
1989   // If there are live-out values, it is probably a reduction. We can predicate
1990   // most reduction operations freely under MVE using a combination of
1991   // prefer-predicated-reduction-select and inloop reductions. We limit this to
1992   // floating point and integer reductions, but don't check for operators
1993   // specifically here. If the value ends up not being a reduction (and so the
1994   // vectorizer cannot tailfold the loop), we should fall back to standard
1995   // vectorization automatically.
1996   SmallVector< Instruction *, 8 > LiveOuts;
1997   LiveOuts = llvm::findDefsUsedOutsideOfLoop(L);
1998   bool ReductionsDisabled =
1999       EnableTailPredication == TailPredication::EnabledNoReductions ||
2000       EnableTailPredication == TailPredication::ForceEnabledNoReductions;
2001 
2002   for (auto *I : LiveOuts) {
2003     if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() &&
2004         !I->getType()->isHalfTy()) {
2005       LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float "
2006                            "live-out value\n");
2007       return false;
2008     }
2009     if (ReductionsDisabled) {
2010       LLVM_DEBUG(dbgs() << "Reductions not enabled\n");
2011       return false;
2012     }
2013   }
2014 
2015   // Next, check that all instructions can be tail-predicated.
2016   PredicatedScalarEvolution PSE = LAI->getPSE();
2017   SmallVector<Instruction *, 16> LoadStores;
2018   int ICmpCount = 0;
2019 
2020   for (BasicBlock *BB : L->blocks()) {
2021     for (Instruction &I : BB->instructionsWithoutDebug()) {
2022       if (isa<PHINode>(&I))
2023         continue;
2024       if (!canTailPredicateInstruction(I, ICmpCount)) {
2025         LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
2026         return false;
2027       }
2028 
2029       Type *T  = I.getType();
2030       if (T->isPointerTy())
2031         T = T->getPointerElementType();
2032 
2033       if (T->getScalarSizeInBits() > 32) {
2034         LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
2035         return false;
2036       }
2037       if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
2038         Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1);
2039         int64_t NextStride = getPtrStride(PSE, Ptr, L);
2040         if (NextStride == 1) {
2041           // TODO: for now only allow consecutive strides of 1. We could support
2042           // other strides as long as it is uniform, but let's keep it simple
2043           // for now.
2044           continue;
2045         } else if (NextStride == -1 ||
2046                    (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) ||
2047                    (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) {
2048           LLVM_DEBUG(dbgs()
2049                      << "Consecutive strides of 2 found, vld2/vstr2 can't "
2050                         "be tail-predicated\n.");
2051           return false;
2052           // TODO: don't tail predicate if there is a reversed load?
2053         } else if (EnableMaskedGatherScatters) {
2054           // Gather/scatters do allow loading from arbitrary strides, at
2055           // least if they are loop invariant.
2056           // TODO: Loop variant strides should in theory work, too, but
2057           // this requires further testing.
2058           const SCEV *PtrScev =
2059               replaceSymbolicStrideSCEV(PSE, llvm::ValueToValueMap(), Ptr);
2060           if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) {
2061             const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
2062             if (PSE.getSE()->isLoopInvariant(Step, L))
2063               continue;
2064           }
2065         }
2066         LLVM_DEBUG(dbgs() << "Bad stride found, can't "
2067                              "tail-predicate\n.");
2068         return false;
2069       }
2070     }
2071   }
2072 
2073   LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
2074   return true;
2075 }
2076 
2077 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI,
2078                                              ScalarEvolution &SE,
2079                                              AssumptionCache &AC,
2080                                              TargetLibraryInfo *TLI,
2081                                              DominatorTree *DT,
2082                                              const LoopAccessInfo *LAI) {
2083   if (!EnableTailPredication) {
2084     LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
2085     return false;
2086   }
2087 
2088   // Creating a predicated vector loop is the first step for generating a
2089   // tail-predicated hardware loop, for which we need the MVE masked
2090   // load/stores instructions:
2091   if (!ST->hasMVEIntegerOps())
2092     return false;
2093 
2094   // For now, restrict this to single block loops.
2095   if (L->getNumBlocks() > 1) {
2096     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
2097                          "loop.\n");
2098     return false;
2099   }
2100 
2101   assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected");
2102 
2103   HardwareLoopInfo HWLoopInfo(L);
2104   if (!HWLoopInfo.canAnalyze(*LI)) {
2105     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2106                          "analyzable.\n");
2107     return false;
2108   }
2109 
2110   // This checks if we have the low-overhead branch architecture
2111   // extension, and if we will create a hardware-loop:
2112   if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) {
2113     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2114                          "profitable.\n");
2115     return false;
2116   }
2117 
2118   if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) {
2119     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2120                          "a candidate.\n");
2121     return false;
2122   }
2123 
2124   return canTailPredicateLoop(L, LI, SE, DL, LAI);
2125 }
2126 
2127 bool ARMTTIImpl::emitGetActiveLaneMask() const {
2128   if (!ST->hasMVEIntegerOps() || !EnableTailPredication)
2129     return false;
2130 
2131   // Intrinsic @llvm.get.active.lane.mask is supported.
2132   // It is used in the MVETailPredication pass, which requires the number of
2133   // elements processed by this vector loop to setup the tail-predicated
2134   // loop.
2135   return true;
2136 }
2137 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
2138                                          TTI::UnrollingPreferences &UP) {
2139   // Enable Upper bound unrolling universally, not dependant upon the conditions
2140   // below.
2141   UP.UpperBound = true;
2142 
2143   // Only currently enable these preferences for M-Class cores.
2144   if (!ST->isMClass())
2145     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
2146 
2147   // Disable loop unrolling for Oz and Os.
2148   UP.OptSizeThreshold = 0;
2149   UP.PartialOptSizeThreshold = 0;
2150   if (L->getHeader()->getParent()->hasOptSize())
2151     return;
2152 
2153   SmallVector<BasicBlock*, 4> ExitingBlocks;
2154   L->getExitingBlocks(ExitingBlocks);
2155   LLVM_DEBUG(dbgs() << "Loop has:\n"
2156                     << "Blocks: " << L->getNumBlocks() << "\n"
2157                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
2158 
2159   // Only allow another exit other than the latch. This acts as an early exit
2160   // as it mirrors the profitability calculation of the runtime unroller.
2161   if (ExitingBlocks.size() > 2)
2162     return;
2163 
2164   // Limit the CFG of the loop body for targets with a branch predictor.
2165   // Allowing 4 blocks permits if-then-else diamonds in the body.
2166   if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
2167     return;
2168 
2169   // Don't unroll vectorized loops, including the remainder loop
2170   if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
2171     return;
2172 
2173   // Scan the loop: don't unroll loops with calls as this could prevent
2174   // inlining.
2175   InstructionCost Cost = 0;
2176   for (auto *BB : L->getBlocks()) {
2177     for (auto &I : *BB) {
2178       // Don't unroll vectorised loop. MVE does not benefit from it as much as
2179       // scalar code.
2180       if (I.getType()->isVectorTy())
2181         return;
2182 
2183       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
2184         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
2185           if (!isLoweredToCall(F))
2186             continue;
2187         }
2188         return;
2189       }
2190 
2191       SmallVector<const Value*, 4> Operands(I.operand_values());
2192       Cost +=
2193         getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency);
2194     }
2195   }
2196 
2197   // On v6m cores, there are very few registers available. We can easily end up
2198   // spilling and reloading more registers in an unrolled loop. Look at the
2199   // number of LCSSA phis as a rough measure of how many registers will need to
2200   // be live out of the loop, reducing the default unroll count if more than 1
2201   // value is needed.  In the long run, all of this should be being learnt by a
2202   // machine.
2203   unsigned UnrollCount = 4;
2204   if (ST->isThumb1Only()) {
2205     unsigned ExitingValues = 0;
2206     SmallVector<BasicBlock *, 4> ExitBlocks;
2207     L->getExitBlocks(ExitBlocks);
2208     for (auto *Exit : ExitBlocks) {
2209       // Count the number of LCSSA phis. Exclude values coming from GEP's as
2210       // only the last is expected to be needed for address operands.
2211       unsigned LiveOuts = count_if(Exit->phis(), [](auto &PH) {
2212         return PH.getNumOperands() != 1 ||
2213                !isa<GetElementPtrInst>(PH.getOperand(0));
2214       });
2215       ExitingValues = ExitingValues < LiveOuts ? LiveOuts : ExitingValues;
2216     }
2217     if (ExitingValues)
2218       UnrollCount /= ExitingValues;
2219     if (UnrollCount <= 1)
2220       return;
2221   }
2222 
2223   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
2224   LLVM_DEBUG(dbgs() << "Default Runtime Unroll Count: " << UnrollCount << "\n");
2225 
2226   UP.Partial = true;
2227   UP.Runtime = true;
2228   UP.UnrollRemainder = true;
2229   UP.DefaultUnrollRuntimeCount = UnrollCount;
2230   UP.UnrollAndJam = true;
2231   UP.UnrollAndJamInnerLoopThreshold = 60;
2232 
2233   // Force unrolling small loops can be very useful because of the branch
2234   // taken cost of the backedge.
2235   if (Cost < 12)
2236     UP.Force = true;
2237 }
2238 
2239 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
2240                                        TTI::PeelingPreferences &PP) {
2241   BaseT::getPeelingPreferences(L, SE, PP);
2242 }
2243 
2244 bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty,
2245                                        TTI::ReductionFlags Flags) const {
2246   if (!ST->hasMVEIntegerOps())
2247     return false;
2248 
2249   unsigned ScalarBits = Ty->getScalarSizeInBits();
2250   switch (Opcode) {
2251   case Instruction::Add:
2252     return ScalarBits <= 64;
2253   default:
2254     return false;
2255   }
2256 }
2257 
2258 bool ARMTTIImpl::preferPredicatedReductionSelect(
2259     unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const {
2260   if (!ST->hasMVEIntegerOps())
2261     return false;
2262   return true;
2263 }
2264