1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ARMTargetTransformInfo.h" 10 #include "ARMSubtarget.h" 11 #include "MCTargetDesc/ARMAddressingModes.h" 12 #include "llvm/ADT/APInt.h" 13 #include "llvm/ADT/SmallVector.h" 14 #include "llvm/Analysis/LoopInfo.h" 15 #include "llvm/CodeGen/CostTable.h" 16 #include "llvm/CodeGen/ISDOpcodes.h" 17 #include "llvm/CodeGen/ValueTypes.h" 18 #include "llvm/IR/BasicBlock.h" 19 #include "llvm/IR/DataLayout.h" 20 #include "llvm/IR/DerivedTypes.h" 21 #include "llvm/IR/Instruction.h" 22 #include "llvm/IR/Instructions.h" 23 #include "llvm/IR/Intrinsics.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/IR/IntrinsicsARM.h" 26 #include "llvm/IR/PatternMatch.h" 27 #include "llvm/IR/Type.h" 28 #include "llvm/MC/SubtargetFeature.h" 29 #include "llvm/Support/Casting.h" 30 #include "llvm/Support/KnownBits.h" 31 #include "llvm/Support/MachineValueType.h" 32 #include "llvm/Target/TargetMachine.h" 33 #include "llvm/Transforms/InstCombine/InstCombiner.h" 34 #include "llvm/Transforms/Utils/Local.h" 35 #include "llvm/Transforms/Utils/LoopUtils.h" 36 #include <algorithm> 37 #include <cassert> 38 #include <cstdint> 39 #include <utility> 40 41 using namespace llvm; 42 43 #define DEBUG_TYPE "armtti" 44 45 static cl::opt<bool> EnableMaskedLoadStores( 46 "enable-arm-maskedldst", cl::Hidden, cl::init(true), 47 cl::desc("Enable the generation of masked loads and stores")); 48 49 static cl::opt<bool> DisableLowOverheadLoops( 50 "disable-arm-loloops", cl::Hidden, cl::init(false), 51 cl::desc("Disable the generation of low-overhead loops")); 52 53 static cl::opt<bool> 54 AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true), 55 cl::desc("Enable the generation of WLS loops")); 56 57 extern cl::opt<TailPredication::Mode> EnableTailPredication; 58 59 extern cl::opt<bool> EnableMaskedGatherScatters; 60 61 extern cl::opt<unsigned> MVEMaxSupportedInterleaveFactor; 62 63 /// Convert a vector load intrinsic into a simple llvm load instruction. 64 /// This is beneficial when the underlying object being addressed comes 65 /// from a constant, since we get constant-folding for free. 66 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign, 67 InstCombiner::BuilderTy &Builder) { 68 auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1)); 69 70 if (!IntrAlign) 71 return nullptr; 72 73 unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign 74 ? MemAlign 75 : IntrAlign->getLimitedValue(); 76 77 if (!isPowerOf2_32(Alignment)) 78 return nullptr; 79 80 auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0), 81 PointerType::get(II.getType(), 0)); 82 return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment)); 83 } 84 85 bool ARMTTIImpl::areInlineCompatible(const Function *Caller, 86 const Function *Callee) const { 87 const TargetMachine &TM = getTLI()->getTargetMachine(); 88 const FeatureBitset &CallerBits = 89 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 90 const FeatureBitset &CalleeBits = 91 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 92 93 // To inline a callee, all features not in the allowed list must match exactly. 94 bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) == 95 (CalleeBits & ~InlineFeaturesAllowed); 96 // For features in the allowed list, the callee's features must be a subset of 97 // the callers'. 98 bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) == 99 (CalleeBits & InlineFeaturesAllowed); 100 return MatchExact && MatchSubset; 101 } 102 103 TTI::AddressingModeKind 104 ARMTTIImpl::getPreferredAddressingMode(const Loop *L, 105 ScalarEvolution *SE) const { 106 if (ST->hasMVEIntegerOps()) 107 return TTI::AMK_PostIndexed; 108 109 if (L->getHeader()->getParent()->hasOptSize()) 110 return TTI::AMK_None; 111 112 if (ST->isMClass() && ST->isThumb2() && 113 L->getNumBlocks() == 1) 114 return TTI::AMK_PreIndexed; 115 116 return TTI::AMK_None; 117 } 118 119 Optional<Instruction *> 120 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const { 121 using namespace PatternMatch; 122 Intrinsic::ID IID = II.getIntrinsicID(); 123 switch (IID) { 124 default: 125 break; 126 case Intrinsic::arm_neon_vld1: { 127 Align MemAlign = 128 getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II, 129 &IC.getAssumptionCache(), &IC.getDominatorTree()); 130 if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) { 131 return IC.replaceInstUsesWith(II, V); 132 } 133 break; 134 } 135 136 case Intrinsic::arm_neon_vld2: 137 case Intrinsic::arm_neon_vld3: 138 case Intrinsic::arm_neon_vld4: 139 case Intrinsic::arm_neon_vld2lane: 140 case Intrinsic::arm_neon_vld3lane: 141 case Intrinsic::arm_neon_vld4lane: 142 case Intrinsic::arm_neon_vst1: 143 case Intrinsic::arm_neon_vst2: 144 case Intrinsic::arm_neon_vst3: 145 case Intrinsic::arm_neon_vst4: 146 case Intrinsic::arm_neon_vst2lane: 147 case Intrinsic::arm_neon_vst3lane: 148 case Intrinsic::arm_neon_vst4lane: { 149 Align MemAlign = 150 getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II, 151 &IC.getAssumptionCache(), &IC.getDominatorTree()); 152 unsigned AlignArg = II.arg_size() - 1; 153 Value *AlignArgOp = II.getArgOperand(AlignArg); 154 MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue(); 155 if (Align && *Align < MemAlign) { 156 return IC.replaceOperand( 157 II, AlignArg, 158 ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(), 159 false)); 160 } 161 break; 162 } 163 164 case Intrinsic::arm_mve_pred_i2v: { 165 Value *Arg = II.getArgOperand(0); 166 Value *ArgArg; 167 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>( 168 PatternMatch::m_Value(ArgArg))) && 169 II.getType() == ArgArg->getType()) { 170 return IC.replaceInstUsesWith(II, ArgArg); 171 } 172 Constant *XorMask; 173 if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>( 174 PatternMatch::m_Value(ArgArg)), 175 PatternMatch::m_Constant(XorMask))) && 176 II.getType() == ArgArg->getType()) { 177 if (auto *CI = dyn_cast<ConstantInt>(XorMask)) { 178 if (CI->getValue().trunc(16).isAllOnes()) { 179 auto TrueVector = IC.Builder.CreateVectorSplat( 180 cast<FixedVectorType>(II.getType())->getNumElements(), 181 IC.Builder.getTrue()); 182 return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector); 183 } 184 } 185 } 186 KnownBits ScalarKnown(32); 187 if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16), 188 ScalarKnown, 0)) { 189 return &II; 190 } 191 break; 192 } 193 case Intrinsic::arm_mve_pred_v2i: { 194 Value *Arg = II.getArgOperand(0); 195 Value *ArgArg; 196 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>( 197 PatternMatch::m_Value(ArgArg)))) { 198 return IC.replaceInstUsesWith(II, ArgArg); 199 } 200 if (!II.getMetadata(LLVMContext::MD_range)) { 201 Type *IntTy32 = Type::getInt32Ty(II.getContext()); 202 Metadata *M[] = { 203 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)), 204 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0x10000))}; 205 II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M)); 206 return &II; 207 } 208 break; 209 } 210 case Intrinsic::arm_mve_vadc: 211 case Intrinsic::arm_mve_vadc_predicated: { 212 unsigned CarryOp = 213 (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2; 214 assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 && 215 "Bad type for intrinsic!"); 216 217 KnownBits CarryKnown(32); 218 if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29), 219 CarryKnown)) { 220 return &II; 221 } 222 break; 223 } 224 case Intrinsic::arm_mve_vmldava: { 225 Instruction *I = cast<Instruction>(&II); 226 if (I->hasOneUse()) { 227 auto *User = cast<Instruction>(*I->user_begin()); 228 Value *OpZ; 229 if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) && 230 match(I->getOperand(3), m_Zero())) { 231 Value *OpX = I->getOperand(4); 232 Value *OpY = I->getOperand(5); 233 Type *OpTy = OpX->getType(); 234 235 IC.Builder.SetInsertPoint(User); 236 Value *V = 237 IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy}, 238 {I->getOperand(0), I->getOperand(1), 239 I->getOperand(2), OpZ, OpX, OpY}); 240 241 IC.replaceInstUsesWith(*User, V); 242 return IC.eraseInstFromFunction(*User); 243 } 244 } 245 return None; 246 } 247 } 248 return None; 249 } 250 251 Optional<Value *> ARMTTIImpl::simplifyDemandedVectorEltsIntrinsic( 252 InstCombiner &IC, IntrinsicInst &II, APInt OrigDemandedElts, 253 APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, 254 std::function<void(Instruction *, unsigned, APInt, APInt &)> 255 SimplifyAndSetOp) const { 256 257 // Compute the demanded bits for a narrowing MVE intrinsic. The TopOpc is the 258 // opcode specifying a Top/Bottom instruction, which can change between 259 // instructions. 260 auto SimplifyNarrowInstrTopBottom =[&](unsigned TopOpc) { 261 unsigned NumElts = cast<FixedVectorType>(II.getType())->getNumElements(); 262 unsigned IsTop = cast<ConstantInt>(II.getOperand(TopOpc))->getZExtValue(); 263 264 // The only odd/even lanes of operand 0 will only be demanded depending 265 // on whether this is a top/bottom instruction. 266 APInt DemandedElts = 267 APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1) 268 : APInt::getHighBitsSet(2, 1)); 269 SimplifyAndSetOp(&II, 0, OrigDemandedElts & DemandedElts, UndefElts); 270 // The other lanes will be defined from the inserted elements. 271 UndefElts &= APInt::getSplat(NumElts, !IsTop ? APInt::getLowBitsSet(2, 1) 272 : APInt::getHighBitsSet(2, 1)); 273 return None; 274 }; 275 276 switch (II.getIntrinsicID()) { 277 default: 278 break; 279 case Intrinsic::arm_mve_vcvt_narrow: 280 SimplifyNarrowInstrTopBottom(2); 281 break; 282 case Intrinsic::arm_mve_vqmovn: 283 SimplifyNarrowInstrTopBottom(4); 284 break; 285 case Intrinsic::arm_mve_vshrn: 286 SimplifyNarrowInstrTopBottom(7); 287 break; 288 } 289 290 return None; 291 } 292 293 InstructionCost ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 294 TTI::TargetCostKind CostKind) { 295 assert(Ty->isIntegerTy()); 296 297 unsigned Bits = Ty->getPrimitiveSizeInBits(); 298 if (Bits == 0 || Imm.getActiveBits() >= 64) 299 return 4; 300 301 int64_t SImmVal = Imm.getSExtValue(); 302 uint64_t ZImmVal = Imm.getZExtValue(); 303 if (!ST->isThumb()) { 304 if ((SImmVal >= 0 && SImmVal < 65536) || 305 (ARM_AM::getSOImmVal(ZImmVal) != -1) || 306 (ARM_AM::getSOImmVal(~ZImmVal) != -1)) 307 return 1; 308 return ST->hasV6T2Ops() ? 2 : 3; 309 } 310 if (ST->isThumb2()) { 311 if ((SImmVal >= 0 && SImmVal < 65536) || 312 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) || 313 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1)) 314 return 1; 315 return ST->hasV6T2Ops() ? 2 : 3; 316 } 317 // Thumb1, any i8 imm cost 1. 318 if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256)) 319 return 1; 320 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal)) 321 return 2; 322 // Load from constantpool. 323 return 3; 324 } 325 326 // Constants smaller than 256 fit in the immediate field of 327 // Thumb1 instructions so we return a zero cost and 1 otherwise. 328 InstructionCost ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, 329 const APInt &Imm, Type *Ty) { 330 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256) 331 return 0; 332 333 return 1; 334 } 335 336 // Checks whether Inst is part of a min(max()) or max(min()) pattern 337 // that will match to an SSAT instruction. Returns the instruction being 338 // saturated, or null if no saturation pattern was found. 339 static Value *isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) { 340 Value *LHS, *RHS; 341 ConstantInt *C; 342 SelectPatternFlavor InstSPF = matchSelectPattern(Inst, LHS, RHS).Flavor; 343 344 if (InstSPF == SPF_SMAX && 345 PatternMatch::match(RHS, PatternMatch::m_ConstantInt(C)) && 346 C->getValue() == Imm && Imm.isNegative() && Imm.isNegatedPowerOf2()) { 347 348 auto isSSatMin = [&](Value *MinInst) { 349 if (isa<SelectInst>(MinInst)) { 350 Value *MinLHS, *MinRHS; 351 ConstantInt *MinC; 352 SelectPatternFlavor MinSPF = 353 matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor; 354 if (MinSPF == SPF_SMIN && 355 PatternMatch::match(MinRHS, PatternMatch::m_ConstantInt(MinC)) && 356 MinC->getValue() == ((-Imm) - 1)) 357 return true; 358 } 359 return false; 360 }; 361 362 if (isSSatMin(Inst->getOperand(1))) 363 return cast<Instruction>(Inst->getOperand(1))->getOperand(1); 364 if (Inst->hasNUses(2) && 365 (isSSatMin(*Inst->user_begin()) || isSSatMin(*(++Inst->user_begin())))) 366 return Inst->getOperand(1); 367 } 368 return nullptr; 369 } 370 371 // Look for a FP Saturation pattern, where the instruction can be simplified to 372 // a fptosi.sat. max(min(fptosi)). The constant in this case is always free. 373 static bool isFPSatMinMaxPattern(Instruction *Inst, const APInt &Imm) { 374 if (Imm.getBitWidth() != 64 || 375 Imm != APInt::getHighBitsSet(64, 33)) // -2147483648 376 return false; 377 Value *FP = isSSATMinMaxPattern(Inst, Imm); 378 if (!FP && isa<ICmpInst>(Inst) && Inst->hasOneUse()) 379 FP = isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm); 380 if (!FP) 381 return false; 382 return isa<FPToSIInst>(FP); 383 } 384 385 InstructionCost ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 386 const APInt &Imm, Type *Ty, 387 TTI::TargetCostKind CostKind, 388 Instruction *Inst) { 389 // Division by a constant can be turned into multiplication, but only if we 390 // know it's constant. So it's not so much that the immediate is cheap (it's 391 // not), but that the alternative is worse. 392 // FIXME: this is probably unneeded with GlobalISel. 393 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv || 394 Opcode == Instruction::SRem || Opcode == Instruction::URem) && 395 Idx == 1) 396 return 0; 397 398 // Leave any gep offsets for the CodeGenPrepare, which will do a better job at 399 // splitting any large offsets. 400 if (Opcode == Instruction::GetElementPtr && Idx != 0) 401 return 0; 402 403 if (Opcode == Instruction::And) { 404 // UXTB/UXTH 405 if (Imm == 255 || Imm == 65535) 406 return 0; 407 // Conversion to BIC is free, and means we can use ~Imm instead. 408 return std::min(getIntImmCost(Imm, Ty, CostKind), 409 getIntImmCost(~Imm, Ty, CostKind)); 410 } 411 412 if (Opcode == Instruction::Add) 413 // Conversion to SUB is free, and means we can use -Imm instead. 414 return std::min(getIntImmCost(Imm, Ty, CostKind), 415 getIntImmCost(-Imm, Ty, CostKind)); 416 417 if (Opcode == Instruction::ICmp && Imm.isNegative() && 418 Ty->getIntegerBitWidth() == 32) { 419 int64_t NegImm = -Imm.getSExtValue(); 420 if (ST->isThumb2() && NegImm < 1<<12) 421 // icmp X, #-C -> cmn X, #C 422 return 0; 423 if (ST->isThumb() && NegImm < 1<<8) 424 // icmp X, #-C -> adds X, #C 425 return 0; 426 } 427 428 // xor a, -1 can always be folded to MVN 429 if (Opcode == Instruction::Xor && Imm.isAllOnes()) 430 return 0; 431 432 // Ensures negative constant of min(max()) or max(min()) patterns that 433 // match to SSAT instructions don't get hoisted 434 if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) && 435 Ty->getIntegerBitWidth() <= 32) { 436 if (isSSATMinMaxPattern(Inst, Imm) || 437 (isa<ICmpInst>(Inst) && Inst->hasOneUse() && 438 isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm))) 439 return 0; 440 } 441 442 if (Inst && ST->hasVFP2Base() && isFPSatMinMaxPattern(Inst, Imm)) 443 return 0; 444 445 // We can convert <= -1 to < 0, which is generally quite cheap. 446 if (Inst && Opcode == Instruction::ICmp && Idx == 1 && Imm.isAllOnesValue()) { 447 ICmpInst::Predicate Pred = cast<ICmpInst>(Inst)->getPredicate(); 448 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) 449 return std::min(getIntImmCost(Imm, Ty, CostKind), 450 getIntImmCost(Imm + 1, Ty, CostKind)); 451 } 452 453 return getIntImmCost(Imm, Ty, CostKind); 454 } 455 456 InstructionCost ARMTTIImpl::getCFInstrCost(unsigned Opcode, 457 TTI::TargetCostKind CostKind, 458 const Instruction *I) { 459 if (CostKind == TTI::TCK_RecipThroughput && 460 (ST->hasNEON() || ST->hasMVEIntegerOps())) { 461 // FIXME: The vectorizer is highly sensistive to the cost of these 462 // instructions, which suggests that it may be using the costs incorrectly. 463 // But, for now, just make them free to avoid performance regressions for 464 // vector targets. 465 return 0; 466 } 467 return BaseT::getCFInstrCost(Opcode, CostKind, I); 468 } 469 470 InstructionCost ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 471 Type *Src, 472 TTI::CastContextHint CCH, 473 TTI::TargetCostKind CostKind, 474 const Instruction *I) { 475 int ISD = TLI->InstructionOpcodeToISD(Opcode); 476 assert(ISD && "Invalid opcode"); 477 478 // TODO: Allow non-throughput costs that aren't binary. 479 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { 480 if (CostKind != TTI::TCK_RecipThroughput) 481 return Cost == 0 ? 0 : 1; 482 return Cost; 483 }; 484 auto IsLegalFPType = [this](EVT VT) { 485 EVT EltVT = VT.getScalarType(); 486 return (EltVT == MVT::f32 && ST->hasVFP2Base()) || 487 (EltVT == MVT::f64 && ST->hasFP64()) || 488 (EltVT == MVT::f16 && ST->hasFullFP16()); 489 }; 490 491 EVT SrcTy = TLI->getValueType(DL, Src); 492 EVT DstTy = TLI->getValueType(DL, Dst); 493 494 if (!SrcTy.isSimple() || !DstTy.isSimple()) 495 return AdjustCost( 496 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 497 498 // Extending masked load/Truncating masked stores is expensive because we 499 // currently don't split them. This means that we'll likely end up 500 // loading/storing each element individually (hence the high cost). 501 if ((ST->hasMVEIntegerOps() && 502 (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt || 503 Opcode == Instruction::SExt)) || 504 (ST->hasMVEFloatOps() && 505 (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) && 506 IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))) 507 if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128) 508 return 2 * DstTy.getVectorNumElements() * 509 ST->getMVEVectorCostFactor(CostKind); 510 511 // The extend of other kinds of load is free 512 if (CCH == TTI::CastContextHint::Normal || 513 CCH == TTI::CastContextHint::Masked) { 514 static const TypeConversionCostTblEntry LoadConversionTbl[] = { 515 {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0}, 516 {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0}, 517 {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0}, 518 {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0}, 519 {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0}, 520 {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0}, 521 {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1}, 522 {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1}, 523 {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1}, 524 {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1}, 525 {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1}, 526 {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1}, 527 }; 528 if (const auto *Entry = ConvertCostTableLookup( 529 LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 530 return AdjustCost(Entry->Cost); 531 532 static const TypeConversionCostTblEntry MVELoadConversionTbl[] = { 533 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0}, 534 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0}, 535 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0}, 536 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0}, 537 {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0}, 538 {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0}, 539 // The following extend from a legal type to an illegal type, so need to 540 // split the load. This introduced an extra load operation, but the 541 // extend is still "free". 542 {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1}, 543 {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1}, 544 {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3}, 545 {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3}, 546 {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1}, 547 {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1}, 548 }; 549 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { 550 if (const auto *Entry = 551 ConvertCostTableLookup(MVELoadConversionTbl, ISD, 552 DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 553 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 554 } 555 556 static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = { 557 // FPExtends are similar but also require the VCVT instructions. 558 {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1}, 559 {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3}, 560 }; 561 if (SrcTy.isVector() && ST->hasMVEFloatOps()) { 562 if (const auto *Entry = 563 ConvertCostTableLookup(MVEFLoadConversionTbl, ISD, 564 DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 565 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 566 } 567 568 // The truncate of a store is free. This is the mirror of extends above. 569 static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = { 570 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0}, 571 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0}, 572 {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0}, 573 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1}, 574 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i8, 1}, 575 {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3}, 576 {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1}, 577 }; 578 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { 579 if (const auto *Entry = 580 ConvertCostTableLookup(MVEStoreConversionTbl, ISD, 581 SrcTy.getSimpleVT(), DstTy.getSimpleVT())) 582 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 583 } 584 585 static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = { 586 {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1}, 587 {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3}, 588 }; 589 if (SrcTy.isVector() && ST->hasMVEFloatOps()) { 590 if (const auto *Entry = 591 ConvertCostTableLookup(MVEFStoreConversionTbl, ISD, 592 SrcTy.getSimpleVT(), DstTy.getSimpleVT())) 593 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 594 } 595 } 596 597 // NEON vector operations that can extend their inputs. 598 if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) && 599 I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) { 600 static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = { 601 // vaddl 602 { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 }, 603 { ISD::ADD, MVT::v8i16, MVT::v8i8, 0 }, 604 // vsubl 605 { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 }, 606 { ISD::SUB, MVT::v8i16, MVT::v8i8, 0 }, 607 // vmull 608 { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 }, 609 { ISD::MUL, MVT::v8i16, MVT::v8i8, 0 }, 610 // vshll 611 { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 }, 612 { ISD::SHL, MVT::v8i16, MVT::v8i8, 0 }, 613 }; 614 615 auto *User = cast<Instruction>(*I->user_begin()); 616 int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode()); 617 if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD, 618 DstTy.getSimpleVT(), 619 SrcTy.getSimpleVT())) { 620 return AdjustCost(Entry->Cost); 621 } 622 } 623 624 // Single to/from double precision conversions. 625 if (Src->isVectorTy() && ST->hasNEON() && 626 ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 && 627 DstTy.getScalarType() == MVT::f32) || 628 (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 && 629 DstTy.getScalarType() == MVT::f64))) { 630 static const CostTblEntry NEONFltDblTbl[] = { 631 // Vector fptrunc/fpext conversions. 632 {ISD::FP_ROUND, MVT::v2f64, 2}, 633 {ISD::FP_EXTEND, MVT::v2f32, 2}, 634 {ISD::FP_EXTEND, MVT::v4f32, 4}}; 635 636 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 637 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second)) 638 return AdjustCost(LT.first * Entry->Cost); 639 } 640 641 // Some arithmetic, load and store operations have specific instructions 642 // to cast up/down their types automatically at no extra cost. 643 // TODO: Get these tables to know at least what the related operations are. 644 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = { 645 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 646 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 647 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 648 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 649 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 650 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 651 652 // The number of vmovl instructions for the extension. 653 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 654 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 655 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 656 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 657 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 3 }, 658 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 3 }, 659 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, 660 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, 661 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 662 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 663 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 664 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 665 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 666 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 667 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 668 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 669 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 670 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 671 672 // Operations that we legalize using splitting. 673 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 674 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 675 676 // Vector float <-> i32 conversions. 677 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 678 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 679 680 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 681 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 682 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 683 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 684 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 685 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 686 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 687 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 688 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 689 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 690 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 691 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 692 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 693 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 694 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 695 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 696 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 697 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 698 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 699 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 700 701 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 702 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 703 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, 704 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, 705 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 706 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 707 708 // Vector double <-> i32 conversions. 709 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 710 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 711 712 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 713 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 714 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 715 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 716 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 717 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 718 719 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 720 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 721 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 }, 722 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 }, 723 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 }, 724 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 } 725 }; 726 727 if (SrcTy.isVector() && ST->hasNEON()) { 728 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD, 729 DstTy.getSimpleVT(), 730 SrcTy.getSimpleVT())) 731 return AdjustCost(Entry->Cost); 732 } 733 734 // Scalar float to integer conversions. 735 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = { 736 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 }, 737 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 }, 738 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 }, 739 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 }, 740 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 }, 741 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 }, 742 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 }, 743 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 }, 744 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 }, 745 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 }, 746 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 }, 747 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 }, 748 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 }, 749 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 }, 750 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 }, 751 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 }, 752 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 }, 753 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 }, 754 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 }, 755 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 } 756 }; 757 if (SrcTy.isFloatingPoint() && ST->hasNEON()) { 758 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD, 759 DstTy.getSimpleVT(), 760 SrcTy.getSimpleVT())) 761 return AdjustCost(Entry->Cost); 762 } 763 764 // Scalar integer to float conversions. 765 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = { 766 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 }, 767 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 }, 768 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 }, 769 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 }, 770 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 }, 771 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 }, 772 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 }, 773 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 }, 774 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 }, 775 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 }, 776 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 }, 777 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 }, 778 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 }, 779 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 }, 780 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 }, 781 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 }, 782 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 }, 783 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 }, 784 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 }, 785 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 } 786 }; 787 788 if (SrcTy.isInteger() && ST->hasNEON()) { 789 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl, 790 ISD, DstTy.getSimpleVT(), 791 SrcTy.getSimpleVT())) 792 return AdjustCost(Entry->Cost); 793 } 794 795 // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one 796 // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext 797 // are linearised so take more. 798 static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = { 799 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 800 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 801 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 802 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 803 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 }, 804 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 }, 805 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 806 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 807 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 }, 808 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, 809 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 }, 810 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 }, 811 }; 812 813 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { 814 if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl, 815 ISD, DstTy.getSimpleVT(), 816 SrcTy.getSimpleVT())) 817 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 818 } 819 820 if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) { 821 // As general rule, fp converts that were not matched above are scalarized 822 // and cost 1 vcvt for each lane, so long as the instruction is available. 823 // If not it will become a series of function calls. 824 const InstructionCost CallCost = 825 getCallInstrCost(nullptr, Dst, {Src}, CostKind); 826 int Lanes = 1; 827 if (SrcTy.isFixedLengthVector()) 828 Lanes = SrcTy.getVectorNumElements(); 829 830 if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)) 831 return Lanes; 832 else 833 return Lanes * CallCost; 834 } 835 836 if (ISD == ISD::TRUNCATE && ST->hasMVEIntegerOps() && 837 SrcTy.isFixedLengthVector()) { 838 // Treat a truncate with larger than legal source (128bits for MVE) as 839 // expensive, 2 instructions per lane. 840 if ((SrcTy.getScalarType() == MVT::i8 || 841 SrcTy.getScalarType() == MVT::i16 || 842 SrcTy.getScalarType() == MVT::i32) && 843 SrcTy.getSizeInBits() > 128 && 844 SrcTy.getSizeInBits() > DstTy.getSizeInBits()) 845 return SrcTy.getVectorNumElements() * 2; 846 } 847 848 // Scalar integer conversion costs. 849 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = { 850 // i16 -> i64 requires two dependent operations. 851 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 }, 852 853 // Truncates on i64 are assumed to be free. 854 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 }, 855 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 }, 856 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 }, 857 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 } 858 }; 859 860 if (SrcTy.isInteger()) { 861 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD, 862 DstTy.getSimpleVT(), 863 SrcTy.getSimpleVT())) 864 return AdjustCost(Entry->Cost); 865 } 866 867 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy() 868 ? ST->getMVEVectorCostFactor(CostKind) 869 : 1; 870 return AdjustCost( 871 BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 872 } 873 874 InstructionCost ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, 875 unsigned Index) { 876 // Penalize inserting into an D-subregister. We end up with a three times 877 // lower estimated throughput on swift. 878 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement && 879 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32) 880 return 3; 881 882 if (ST->hasNEON() && (Opcode == Instruction::InsertElement || 883 Opcode == Instruction::ExtractElement)) { 884 // Cross-class copies are expensive on many microarchitectures, 885 // so assume they are expensive by default. 886 if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy()) 887 return 3; 888 889 // Even if it's not a cross class copy, this likely leads to mixing 890 // of NEON and VFP code and should be therefore penalized. 891 if (ValTy->isVectorTy() && 892 ValTy->getScalarSizeInBits() <= 32) 893 return std::max<InstructionCost>( 894 BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U); 895 } 896 897 if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement || 898 Opcode == Instruction::ExtractElement)) { 899 // Integer cross-lane moves are more expensive than float, which can 900 // sometimes just be vmovs. Integer involve being passes to GPR registers, 901 // causing more of a delay. 902 std::pair<InstructionCost, MVT> LT = 903 getTLI()->getTypeLegalizationCost(DL, ValTy->getScalarType()); 904 return LT.first * (ValTy->getScalarType()->isIntegerTy() ? 4 : 1); 905 } 906 907 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 908 } 909 910 InstructionCost ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 911 Type *CondTy, 912 CmpInst::Predicate VecPred, 913 TTI::TargetCostKind CostKind, 914 const Instruction *I) { 915 int ISD = TLI->InstructionOpcodeToISD(Opcode); 916 917 // Thumb scalar code size cost for select. 918 if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT && 919 ST->isThumb() && !ValTy->isVectorTy()) { 920 // Assume expensive structs. 921 if (TLI->getValueType(DL, ValTy, true) == MVT::Other) 922 return TTI::TCC_Expensive; 923 924 // Select costs can vary because they: 925 // - may require one or more conditional mov (including an IT), 926 // - can't operate directly on immediates, 927 // - require live flags, which we can't copy around easily. 928 InstructionCost Cost = TLI->getTypeLegalizationCost(DL, ValTy).first; 929 930 // Possible IT instruction for Thumb2, or more for Thumb1. 931 ++Cost; 932 933 // i1 values may need rematerialising by using mov immediates and/or 934 // flag setting instructions. 935 if (ValTy->isIntegerTy(1)) 936 ++Cost; 937 938 return Cost; 939 } 940 941 // If this is a vector min/max/abs, use the cost of that intrinsic directly 942 // instead. Hopefully when min/max intrinsics are more prevalent this code 943 // will not be needed. 944 const Instruction *Sel = I; 945 if ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && Sel && 946 Sel->hasOneUse()) 947 Sel = cast<Instruction>(Sel->user_back()); 948 if (Sel && ValTy->isVectorTy() && 949 (ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy())) { 950 const Value *LHS, *RHS; 951 SelectPatternFlavor SPF = matchSelectPattern(Sel, LHS, RHS).Flavor; 952 unsigned IID = 0; 953 switch (SPF) { 954 case SPF_ABS: 955 IID = Intrinsic::abs; 956 break; 957 case SPF_SMIN: 958 IID = Intrinsic::smin; 959 break; 960 case SPF_SMAX: 961 IID = Intrinsic::smax; 962 break; 963 case SPF_UMIN: 964 IID = Intrinsic::umin; 965 break; 966 case SPF_UMAX: 967 IID = Intrinsic::umax; 968 break; 969 case SPF_FMINNUM: 970 IID = Intrinsic::minnum; 971 break; 972 case SPF_FMAXNUM: 973 IID = Intrinsic::maxnum; 974 break; 975 default: 976 break; 977 } 978 if (IID) { 979 // The ICmp is free, the select gets the cost of the min/max/etc 980 if (Sel != I) 981 return 0; 982 IntrinsicCostAttributes CostAttrs(IID, ValTy, {ValTy, ValTy}); 983 return getIntrinsicInstrCost(CostAttrs, CostKind); 984 } 985 } 986 987 // On NEON a vector select gets lowered to vbsl. 988 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT && CondTy) { 989 // Lowering of some vector selects is currently far from perfect. 990 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = { 991 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 }, 992 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 }, 993 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 } 994 }; 995 996 EVT SelCondTy = TLI->getValueType(DL, CondTy); 997 EVT SelValTy = TLI->getValueType(DL, ValTy); 998 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 999 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD, 1000 SelCondTy.getSimpleVT(), 1001 SelValTy.getSimpleVT())) 1002 return Entry->Cost; 1003 } 1004 1005 std::pair<InstructionCost, MVT> LT = 1006 TLI->getTypeLegalizationCost(DL, ValTy); 1007 return LT.first; 1008 } 1009 1010 if (ST->hasMVEIntegerOps() && ValTy->isVectorTy() && 1011 (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 1012 cast<FixedVectorType>(ValTy)->getNumElements() > 1) { 1013 FixedVectorType *VecValTy = cast<FixedVectorType>(ValTy); 1014 FixedVectorType *VecCondTy = dyn_cast_or_null<FixedVectorType>(CondTy); 1015 if (!VecCondTy) 1016 VecCondTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(VecValTy)); 1017 1018 // If we don't have mve.fp any fp operations will need to be scalarized. 1019 if (Opcode == Instruction::FCmp && !ST->hasMVEFloatOps()) { 1020 // One scalaization insert, one scalarization extract and the cost of the 1021 // fcmps. 1022 return BaseT::getScalarizationOverhead(VecValTy, false, true) + 1023 BaseT::getScalarizationOverhead(VecCondTy, true, false) + 1024 VecValTy->getNumElements() * 1025 getCmpSelInstrCost(Opcode, ValTy->getScalarType(), 1026 VecCondTy->getScalarType(), VecPred, CostKind, 1027 I); 1028 } 1029 1030 std::pair<InstructionCost, MVT> LT = 1031 TLI->getTypeLegalizationCost(DL, ValTy); 1032 int BaseCost = ST->getMVEVectorCostFactor(CostKind); 1033 // There are two types - the input that specifies the type of the compare 1034 // and the output vXi1 type. Because we don't know how the output will be 1035 // split, we may need an expensive shuffle to get two in sync. This has the 1036 // effect of making larger than legal compares (v8i32 for example) 1037 // expensive. 1038 if (LT.second.getVectorNumElements() > 2) { 1039 if (LT.first > 1) 1040 return LT.first * BaseCost + 1041 BaseT::getScalarizationOverhead(VecCondTy, true, false); 1042 return BaseCost; 1043 } 1044 } 1045 1046 // Default to cheap (throughput/size of 1 instruction) but adjust throughput 1047 // for "multiple beats" potentially needed by MVE instructions. 1048 int BaseCost = 1; 1049 if (ST->hasMVEIntegerOps() && ValTy->isVectorTy()) 1050 BaseCost = ST->getMVEVectorCostFactor(CostKind); 1051 1052 return BaseCost * 1053 BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 1054 } 1055 1056 InstructionCost ARMTTIImpl::getAddressComputationCost(Type *Ty, 1057 ScalarEvolution *SE, 1058 const SCEV *Ptr) { 1059 // Address computations in vectorized code with non-consecutive addresses will 1060 // likely result in more instructions compared to scalar code where the 1061 // computation can more often be merged into the index mode. The resulting 1062 // extra micro-ops can significantly decrease throughput. 1063 unsigned NumVectorInstToHideOverhead = 10; 1064 int MaxMergeDistance = 64; 1065 1066 if (ST->hasNEON()) { 1067 if (Ty->isVectorTy() && SE && 1068 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) 1069 return NumVectorInstToHideOverhead; 1070 1071 // In many cases the address computation is not merged into the instruction 1072 // addressing mode. 1073 return 1; 1074 } 1075 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 1076 } 1077 1078 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) { 1079 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1080 // If a VCTP is part of a chain, it's already profitable and shouldn't be 1081 // optimized, else LSR may block tail-predication. 1082 switch (II->getIntrinsicID()) { 1083 case Intrinsic::arm_mve_vctp8: 1084 case Intrinsic::arm_mve_vctp16: 1085 case Intrinsic::arm_mve_vctp32: 1086 case Intrinsic::arm_mve_vctp64: 1087 return true; 1088 default: 1089 break; 1090 } 1091 } 1092 return false; 1093 } 1094 1095 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { 1096 if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps()) 1097 return false; 1098 1099 if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) { 1100 // Don't support v2i1 yet. 1101 if (VecTy->getNumElements() == 2) 1102 return false; 1103 1104 // We don't support extending fp types. 1105 unsigned VecWidth = DataTy->getPrimitiveSizeInBits(); 1106 if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy()) 1107 return false; 1108 } 1109 1110 unsigned EltWidth = DataTy->getScalarSizeInBits(); 1111 return (EltWidth == 32 && Alignment >= 4) || 1112 (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8); 1113 } 1114 1115 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) { 1116 if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps()) 1117 return false; 1118 1119 unsigned EltWidth = Ty->getScalarSizeInBits(); 1120 return ((EltWidth == 32 && Alignment >= 4) || 1121 (EltWidth == 16 && Alignment >= 2) || EltWidth == 8); 1122 } 1123 1124 /// Given a memcpy/memset/memmove instruction, return the number of memory 1125 /// operations performed, via querying findOptimalMemOpLowering. Returns -1 if a 1126 /// call is used. 1127 int ARMTTIImpl::getNumMemOps(const IntrinsicInst *I) const { 1128 MemOp MOp; 1129 unsigned DstAddrSpace = ~0u; 1130 unsigned SrcAddrSpace = ~0u; 1131 const Function *F = I->getParent()->getParent(); 1132 1133 if (const auto *MC = dyn_cast<MemTransferInst>(I)) { 1134 ConstantInt *C = dyn_cast<ConstantInt>(MC->getLength()); 1135 // If 'size' is not a constant, a library call will be generated. 1136 if (!C) 1137 return -1; 1138 1139 const unsigned Size = C->getValue().getZExtValue(); 1140 const Align DstAlign = *MC->getDestAlign(); 1141 const Align SrcAlign = *MC->getSourceAlign(); 1142 1143 MOp = MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign, 1144 /*IsVolatile*/ false); 1145 DstAddrSpace = MC->getDestAddressSpace(); 1146 SrcAddrSpace = MC->getSourceAddressSpace(); 1147 } 1148 else if (const auto *MS = dyn_cast<MemSetInst>(I)) { 1149 ConstantInt *C = dyn_cast<ConstantInt>(MS->getLength()); 1150 // If 'size' is not a constant, a library call will be generated. 1151 if (!C) 1152 return -1; 1153 1154 const unsigned Size = C->getValue().getZExtValue(); 1155 const Align DstAlign = *MS->getDestAlign(); 1156 1157 MOp = MemOp::Set(Size, /*DstAlignCanChange*/ false, DstAlign, 1158 /*IsZeroMemset*/ false, /*IsVolatile*/ false); 1159 DstAddrSpace = MS->getDestAddressSpace(); 1160 } 1161 else 1162 llvm_unreachable("Expected a memcpy/move or memset!"); 1163 1164 unsigned Limit, Factor = 2; 1165 switch(I->getIntrinsicID()) { 1166 case Intrinsic::memcpy: 1167 Limit = TLI->getMaxStoresPerMemcpy(F->hasMinSize()); 1168 break; 1169 case Intrinsic::memmove: 1170 Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize()); 1171 break; 1172 case Intrinsic::memset: 1173 Limit = TLI->getMaxStoresPerMemset(F->hasMinSize()); 1174 Factor = 1; 1175 break; 1176 default: 1177 llvm_unreachable("Expected a memcpy/move or memset!"); 1178 } 1179 1180 // MemOps will be poplulated with a list of data types that needs to be 1181 // loaded and stored. That's why we multiply the number of elements by 2 to 1182 // get the cost for this memcpy. 1183 std::vector<EVT> MemOps; 1184 if (getTLI()->findOptimalMemOpLowering( 1185 MemOps, Limit, MOp, DstAddrSpace, 1186 SrcAddrSpace, F->getAttributes())) 1187 return MemOps.size() * Factor; 1188 1189 // If we can't find an optimal memop lowering, return the default cost 1190 return -1; 1191 } 1192 1193 InstructionCost ARMTTIImpl::getMemcpyCost(const Instruction *I) { 1194 int NumOps = getNumMemOps(cast<IntrinsicInst>(I)); 1195 1196 // To model the cost of a library call, we assume 1 for the call, and 1197 // 3 for the argument setup. 1198 if (NumOps == -1) 1199 return 4; 1200 return NumOps; 1201 } 1202 1203 InstructionCost ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 1204 VectorType *Tp, ArrayRef<int> Mask, 1205 int Index, VectorType *SubTp) { 1206 Kind = improveShuffleKindFromMask(Kind, Mask); 1207 if (ST->hasNEON()) { 1208 if (Kind == TTI::SK_Broadcast) { 1209 static const CostTblEntry NEONDupTbl[] = { 1210 // VDUP handles these cases. 1211 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 1212 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 1213 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 1214 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 1215 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1}, 1216 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1}, 1217 1218 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 1219 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 1220 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 1221 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}}; 1222 1223 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1224 if (const auto *Entry = 1225 CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second)) 1226 return LT.first * Entry->Cost; 1227 } 1228 if (Kind == TTI::SK_Reverse) { 1229 static const CostTblEntry NEONShuffleTbl[] = { 1230 // Reverse shuffle cost one instruction if we are shuffling within a 1231 // double word (vrev) or two if we shuffle a quad word (vrev, vext). 1232 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 1233 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 1234 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 1235 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 1236 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1}, 1237 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1}, 1238 1239 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 1240 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 1241 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2}, 1242 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}}; 1243 1244 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1245 if (const auto *Entry = 1246 CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second)) 1247 return LT.first * Entry->Cost; 1248 } 1249 if (Kind == TTI::SK_Select) { 1250 static const CostTblEntry NEONSelShuffleTbl[] = { 1251 // Select shuffle cost table for ARM. Cost is the number of 1252 // instructions 1253 // required to create the shuffled vector. 1254 1255 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 1256 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 1257 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 1258 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 1259 1260 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 1261 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 1262 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2}, 1263 1264 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16}, 1265 1266 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}}; 1267 1268 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1269 if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl, 1270 ISD::VECTOR_SHUFFLE, LT.second)) 1271 return LT.first * Entry->Cost; 1272 } 1273 } 1274 if (ST->hasMVEIntegerOps()) { 1275 if (Kind == TTI::SK_Broadcast) { 1276 static const CostTblEntry MVEDupTbl[] = { 1277 // VDUP handles these cases. 1278 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 1279 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 1280 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}, 1281 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 1282 {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}}; 1283 1284 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1285 if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE, 1286 LT.second)) 1287 return LT.first * Entry->Cost * 1288 ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput); 1289 } 1290 1291 if (!Mask.empty()) { 1292 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1293 if (LT.second.isVector() && 1294 Mask.size() <= LT.second.getVectorNumElements() && 1295 (isVREVMask(Mask, LT.second, 16) || isVREVMask(Mask, LT.second, 32) || 1296 isVREVMask(Mask, LT.second, 64))) 1297 return ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) * LT.first; 1298 } 1299 } 1300 1301 int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy() 1302 ? ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) 1303 : 1; 1304 return BaseCost * BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp); 1305 } 1306 1307 InstructionCost ARMTTIImpl::getArithmeticInstrCost( 1308 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 1309 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 1310 TTI::OperandValueProperties Opd1PropInfo, 1311 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 1312 const Instruction *CxtI) { 1313 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); 1314 if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) { 1315 // Make operations on i1 relatively expensive as this often involves 1316 // combining predicates. AND and XOR should be easier to handle with IT 1317 // blocks. 1318 switch (ISDOpcode) { 1319 default: 1320 break; 1321 case ISD::AND: 1322 case ISD::XOR: 1323 return 2; 1324 case ISD::OR: 1325 return 3; 1326 } 1327 } 1328 1329 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 1330 1331 if (ST->hasNEON()) { 1332 const unsigned FunctionCallDivCost = 20; 1333 const unsigned ReciprocalDivCost = 10; 1334 static const CostTblEntry CostTbl[] = { 1335 // Division. 1336 // These costs are somewhat random. Choose a cost of 20 to indicate that 1337 // vectorizing devision (added function call) is going to be very expensive. 1338 // Double registers types. 1339 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 1340 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 1341 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost}, 1342 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost}, 1343 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 1344 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 1345 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost}, 1346 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost}, 1347 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost}, 1348 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost}, 1349 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost}, 1350 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost}, 1351 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost}, 1352 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost}, 1353 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost}, 1354 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost}, 1355 // Quad register types. 1356 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 1357 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 1358 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost}, 1359 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost}, 1360 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 1361 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 1362 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost}, 1363 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost}, 1364 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 1365 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 1366 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost}, 1367 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost}, 1368 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 1369 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 1370 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, 1371 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, 1372 // Multiplication. 1373 }; 1374 1375 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second)) 1376 return LT.first * Entry->Cost; 1377 1378 InstructionCost Cost = BaseT::getArithmeticInstrCost( 1379 Opcode, Ty, CostKind, Op1Info, Op2Info, Opd1PropInfo, Opd2PropInfo); 1380 1381 // This is somewhat of a hack. The problem that we are facing is that SROA 1382 // creates a sequence of shift, and, or instructions to construct values. 1383 // These sequences are recognized by the ISel and have zero-cost. Not so for 1384 // the vectorized code. Because we have support for v2i64 but not i64 those 1385 // sequences look particularly beneficial to vectorize. 1386 // To work around this we increase the cost of v2i64 operations to make them 1387 // seem less beneficial. 1388 if (LT.second == MVT::v2i64 && 1389 Op2Info == TargetTransformInfo::OK_UniformConstantValue) 1390 Cost += 4; 1391 1392 return Cost; 1393 } 1394 1395 // If this operation is a shift on arm/thumb2, it might well be folded into 1396 // the following instruction, hence having a cost of 0. 1397 auto LooksLikeAFreeShift = [&]() { 1398 if (ST->isThumb1Only() || Ty->isVectorTy()) 1399 return false; 1400 1401 if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift()) 1402 return false; 1403 if (Op2Info != TargetTransformInfo::OK_UniformConstantValue) 1404 return false; 1405 1406 // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB 1407 switch (cast<Instruction>(CxtI->user_back())->getOpcode()) { 1408 case Instruction::Add: 1409 case Instruction::Sub: 1410 case Instruction::And: 1411 case Instruction::Xor: 1412 case Instruction::Or: 1413 case Instruction::ICmp: 1414 return true; 1415 default: 1416 return false; 1417 } 1418 }; 1419 if (LooksLikeAFreeShift()) 1420 return 0; 1421 1422 // Default to cheap (throughput/size of 1 instruction) but adjust throughput 1423 // for "multiple beats" potentially needed by MVE instructions. 1424 int BaseCost = 1; 1425 if (ST->hasMVEIntegerOps() && Ty->isVectorTy()) 1426 BaseCost = ST->getMVEVectorCostFactor(CostKind); 1427 1428 // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost, 1429 // without treating floats as more expensive that scalars or increasing the 1430 // costs for custom operations. The results is also multiplied by the 1431 // MVEVectorCostFactor where appropriate. 1432 if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second)) 1433 return LT.first * BaseCost; 1434 1435 // Else this is expand, assume that we need to scalarize this op. 1436 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) { 1437 unsigned Num = VTy->getNumElements(); 1438 InstructionCost Cost = 1439 getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind); 1440 // Return the cost of multiple scalar invocation plus the cost of 1441 // inserting and extracting the values. 1442 SmallVector<Type *> Tys(Args.size(), Ty); 1443 return BaseT::getScalarizationOverhead(VTy, Args, Tys) + Num * Cost; 1444 } 1445 1446 return BaseCost; 1447 } 1448 1449 InstructionCost ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 1450 MaybeAlign Alignment, 1451 unsigned AddressSpace, 1452 TTI::TargetCostKind CostKind, 1453 const Instruction *I) { 1454 // TODO: Handle other cost kinds. 1455 if (CostKind != TTI::TCK_RecipThroughput) 1456 return 1; 1457 1458 // Type legalization can't handle structs 1459 if (TLI->getValueType(DL, Src, true) == MVT::Other) 1460 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1461 CostKind); 1462 1463 if (ST->hasNEON() && Src->isVectorTy() && 1464 (Alignment && *Alignment != Align(16)) && 1465 cast<VectorType>(Src)->getElementType()->isDoubleTy()) { 1466 // Unaligned loads/stores are extremely inefficient. 1467 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr. 1468 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1469 return LT.first * 4; 1470 } 1471 1472 // MVE can optimize a fpext(load(4xhalf)) using an extending integer load. 1473 // Same for stores. 1474 if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I && 1475 ((Opcode == Instruction::Load && I->hasOneUse() && 1476 isa<FPExtInst>(*I->user_begin())) || 1477 (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) { 1478 FixedVectorType *SrcVTy = cast<FixedVectorType>(Src); 1479 Type *DstTy = 1480 Opcode == Instruction::Load 1481 ? (*I->user_begin())->getType() 1482 : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType(); 1483 if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() && 1484 DstTy->getScalarType()->isFloatTy()) 1485 return ST->getMVEVectorCostFactor(CostKind); 1486 } 1487 1488 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy() 1489 ? ST->getMVEVectorCostFactor(CostKind) 1490 : 1; 1491 return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1492 CostKind, I); 1493 } 1494 1495 InstructionCost 1496 ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, 1497 unsigned AddressSpace, 1498 TTI::TargetCostKind CostKind) { 1499 if (ST->hasMVEIntegerOps()) { 1500 if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment)) 1501 return ST->getMVEVectorCostFactor(CostKind); 1502 if (Opcode == Instruction::Store && isLegalMaskedStore(Src, Alignment)) 1503 return ST->getMVEVectorCostFactor(CostKind); 1504 } 1505 if (!isa<FixedVectorType>(Src)) 1506 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1507 CostKind); 1508 // Scalar cost, which is currently very high due to the efficiency of the 1509 // generated code. 1510 return cast<FixedVectorType>(Src)->getNumElements() * 8; 1511 } 1512 1513 InstructionCost ARMTTIImpl::getInterleavedMemoryOpCost( 1514 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 1515 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 1516 bool UseMaskForCond, bool UseMaskForGaps) { 1517 assert(Factor >= 2 && "Invalid interleave factor"); 1518 assert(isa<VectorType>(VecTy) && "Expect a vector type"); 1519 1520 // vldN/vstN doesn't support vector types of i64/f64 element. 1521 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64; 1522 1523 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits && 1524 !UseMaskForCond && !UseMaskForGaps) { 1525 unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements(); 1526 auto *SubVecTy = 1527 FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor); 1528 1529 // vldN/vstN only support legal vector types of size 64 or 128 in bits. 1530 // Accesses having vector types that are a multiple of 128 bits can be 1531 // matched to more than one vldN/vstN instruction. 1532 int BaseCost = 1533 ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor(CostKind) : 1; 1534 if (NumElts % Factor == 0 && 1535 TLI->isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL)) 1536 return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL); 1537 1538 // Some smaller than legal interleaved patterns are cheap as we can make 1539 // use of the vmovn or vrev patterns to interleave a standard load. This is 1540 // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is 1541 // promoted differently). The cost of 2 here is then a load and vrev or 1542 // vmovn. 1543 if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 && 1544 VecTy->isIntOrIntVectorTy() && 1545 DL.getTypeSizeInBits(SubVecTy).getFixedSize() <= 64) 1546 return 2 * BaseCost; 1547 } 1548 1549 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 1550 Alignment, AddressSpace, CostKind, 1551 UseMaskForCond, UseMaskForGaps); 1552 } 1553 1554 InstructionCost ARMTTIImpl::getGatherScatterOpCost( 1555 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 1556 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { 1557 using namespace PatternMatch; 1558 if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters) 1559 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, 1560 Alignment, CostKind, I); 1561 1562 assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!"); 1563 auto *VTy = cast<FixedVectorType>(DataTy); 1564 1565 // TODO: Splitting, once we do that. 1566 1567 unsigned NumElems = VTy->getNumElements(); 1568 unsigned EltSize = VTy->getScalarSizeInBits(); 1569 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy); 1570 1571 // For now, it is assumed that for the MVE gather instructions the loads are 1572 // all effectively serialised. This means the cost is the scalar cost 1573 // multiplied by the number of elements being loaded. This is possibly very 1574 // conservative, but even so we still end up vectorising loops because the 1575 // cost per iteration for many loops is lower than for scalar loops. 1576 InstructionCost VectorCost = 1577 NumElems * LT.first * ST->getMVEVectorCostFactor(CostKind); 1578 // The scalarization cost should be a lot higher. We use the number of vector 1579 // elements plus the scalarization overhead. 1580 InstructionCost ScalarCost = 1581 NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, true, false) + 1582 BaseT::getScalarizationOverhead(VTy, false, true); 1583 1584 if (EltSize < 8 || Alignment < EltSize / 8) 1585 return ScalarCost; 1586 1587 unsigned ExtSize = EltSize; 1588 // Check whether there's a single user that asks for an extended type 1589 if (I != nullptr) { 1590 // Dependent of the caller of this function, a gather instruction will 1591 // either have opcode Instruction::Load or be a call to the masked_gather 1592 // intrinsic 1593 if ((I->getOpcode() == Instruction::Load || 1594 match(I, m_Intrinsic<Intrinsic::masked_gather>())) && 1595 I->hasOneUse()) { 1596 const User *Us = *I->users().begin(); 1597 if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) { 1598 // only allow valid type combinations 1599 unsigned TypeSize = 1600 cast<Instruction>(Us)->getType()->getScalarSizeInBits(); 1601 if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) || 1602 (TypeSize == 16 && EltSize == 8)) && 1603 TypeSize * NumElems == 128) { 1604 ExtSize = TypeSize; 1605 } 1606 } 1607 } 1608 // Check whether the input data needs to be truncated 1609 TruncInst *T; 1610 if ((I->getOpcode() == Instruction::Store || 1611 match(I, m_Intrinsic<Intrinsic::masked_scatter>())) && 1612 (T = dyn_cast<TruncInst>(I->getOperand(0)))) { 1613 // Only allow valid type combinations 1614 unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits(); 1615 if (((EltSize == 16 && TypeSize == 32) || 1616 (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) && 1617 TypeSize * NumElems == 128) 1618 ExtSize = TypeSize; 1619 } 1620 } 1621 1622 if (ExtSize * NumElems != 128 || NumElems < 4) 1623 return ScalarCost; 1624 1625 // Any (aligned) i32 gather will not need to be scalarised. 1626 if (ExtSize == 32) 1627 return VectorCost; 1628 // For smaller types, we need to ensure that the gep's inputs are correctly 1629 // extended from a small enough value. Other sizes (including i64) are 1630 // scalarized for now. 1631 if (ExtSize != 8 && ExtSize != 16) 1632 return ScalarCost; 1633 1634 if (const auto *BC = dyn_cast<BitCastInst>(Ptr)) 1635 Ptr = BC->getOperand(0); 1636 if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 1637 if (GEP->getNumOperands() != 2) 1638 return ScalarCost; 1639 unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType()); 1640 // Scale needs to be correct (which is only relevant for i16s). 1641 if (Scale != 1 && Scale * 8 != ExtSize) 1642 return ScalarCost; 1643 // And we need to zext (not sext) the indexes from a small enough type. 1644 if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) { 1645 if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize) 1646 return VectorCost; 1647 } 1648 return ScalarCost; 1649 } 1650 return ScalarCost; 1651 } 1652 1653 InstructionCost 1654 ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 1655 Optional<FastMathFlags> FMF, 1656 TTI::TargetCostKind CostKind) { 1657 if (TTI::requiresOrderedReduction(FMF)) 1658 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 1659 1660 EVT ValVT = TLI->getValueType(DL, ValTy); 1661 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1662 if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD) 1663 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 1664 1665 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1666 1667 static const CostTblEntry CostTblAdd[]{ 1668 {ISD::ADD, MVT::v16i8, 1}, 1669 {ISD::ADD, MVT::v8i16, 1}, 1670 {ISD::ADD, MVT::v4i32, 1}, 1671 }; 1672 if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second)) 1673 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first; 1674 1675 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 1676 } 1677 1678 InstructionCost 1679 ARMTTIImpl::getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned, 1680 Type *ResTy, VectorType *ValTy, 1681 TTI::TargetCostKind CostKind) { 1682 EVT ValVT = TLI->getValueType(DL, ValTy); 1683 EVT ResVT = TLI->getValueType(DL, ResTy); 1684 1685 if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) { 1686 std::pair<InstructionCost, MVT> LT = 1687 TLI->getTypeLegalizationCost(DL, ValTy); 1688 1689 // The legal cases are: 1690 // VADDV u/s 8/16/32 1691 // VMLAV u/s 8/16/32 1692 // VADDLV u/s 32 1693 // VMLALV u/s 16/32 1694 // Codegen currently cannot always handle larger than legal vectors very 1695 // well, especially for predicated reductions where the mask needs to be 1696 // split, so restrict to 128bit or smaller input types. 1697 unsigned RevVTSize = ResVT.getSizeInBits(); 1698 if (ValVT.getSizeInBits() <= 128 && 1699 ((LT.second == MVT::v16i8 && RevVTSize <= 32) || 1700 (LT.second == MVT::v8i16 && RevVTSize <= (IsMLA ? 64u : 32u)) || 1701 (LT.second == MVT::v4i32 && RevVTSize <= 64))) 1702 return ST->getMVEVectorCostFactor(CostKind) * LT.first; 1703 } 1704 1705 return BaseT::getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, ValTy, 1706 CostKind); 1707 } 1708 1709 InstructionCost 1710 ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 1711 TTI::TargetCostKind CostKind) { 1712 switch (ICA.getID()) { 1713 case Intrinsic::get_active_lane_mask: 1714 // Currently we make a somewhat optimistic assumption that 1715 // active_lane_mask's are always free. In reality it may be freely folded 1716 // into a tail predicated loop, expanded into a VCPT or expanded into a lot 1717 // of add/icmp code. We may need to improve this in the future, but being 1718 // able to detect if it is free or not involves looking at a lot of other 1719 // code. We currently assume that the vectorizer inserted these, and knew 1720 // what it was doing in adding one. 1721 if (ST->hasMVEIntegerOps()) 1722 return 0; 1723 break; 1724 case Intrinsic::sadd_sat: 1725 case Intrinsic::ssub_sat: 1726 case Intrinsic::uadd_sat: 1727 case Intrinsic::usub_sat: { 1728 if (!ST->hasMVEIntegerOps()) 1729 break; 1730 Type *VT = ICA.getReturnType(); 1731 1732 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VT); 1733 if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 || 1734 LT.second == MVT::v16i8) { 1735 // This is a base cost of 1 for the vqadd, plus 3 extract shifts if we 1736 // need to extend the type, as it uses shr(qadd(shl, shl)). 1737 unsigned Instrs = 1738 LT.second.getScalarSizeInBits() == VT->getScalarSizeInBits() ? 1 : 4; 1739 return LT.first * ST->getMVEVectorCostFactor(CostKind) * Instrs; 1740 } 1741 break; 1742 } 1743 case Intrinsic::abs: 1744 case Intrinsic::smin: 1745 case Intrinsic::smax: 1746 case Intrinsic::umin: 1747 case Intrinsic::umax: { 1748 if (!ST->hasMVEIntegerOps()) 1749 break; 1750 Type *VT = ICA.getReturnType(); 1751 1752 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VT); 1753 if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 || 1754 LT.second == MVT::v16i8) 1755 return LT.first * ST->getMVEVectorCostFactor(CostKind); 1756 break; 1757 } 1758 case Intrinsic::minnum: 1759 case Intrinsic::maxnum: { 1760 if (!ST->hasMVEFloatOps()) 1761 break; 1762 Type *VT = ICA.getReturnType(); 1763 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VT); 1764 if (LT.second == MVT::v4f32 || LT.second == MVT::v8f16) 1765 return LT.first * ST->getMVEVectorCostFactor(CostKind); 1766 break; 1767 } 1768 } 1769 1770 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 1771 } 1772 1773 bool ARMTTIImpl::isLoweredToCall(const Function *F) { 1774 if (!F->isIntrinsic()) 1775 BaseT::isLoweredToCall(F); 1776 1777 // Assume all Arm-specific intrinsics map to an instruction. 1778 if (F->getName().startswith("llvm.arm")) 1779 return false; 1780 1781 switch (F->getIntrinsicID()) { 1782 default: break; 1783 case Intrinsic::powi: 1784 case Intrinsic::sin: 1785 case Intrinsic::cos: 1786 case Intrinsic::pow: 1787 case Intrinsic::log: 1788 case Intrinsic::log10: 1789 case Intrinsic::log2: 1790 case Intrinsic::exp: 1791 case Intrinsic::exp2: 1792 return true; 1793 case Intrinsic::sqrt: 1794 case Intrinsic::fabs: 1795 case Intrinsic::copysign: 1796 case Intrinsic::floor: 1797 case Intrinsic::ceil: 1798 case Intrinsic::trunc: 1799 case Intrinsic::rint: 1800 case Intrinsic::nearbyint: 1801 case Intrinsic::round: 1802 case Intrinsic::canonicalize: 1803 case Intrinsic::lround: 1804 case Intrinsic::llround: 1805 case Intrinsic::lrint: 1806 case Intrinsic::llrint: 1807 if (F->getReturnType()->isDoubleTy() && !ST->hasFP64()) 1808 return true; 1809 if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16()) 1810 return true; 1811 // Some operations can be handled by vector instructions and assume 1812 // unsupported vectors will be expanded into supported scalar ones. 1813 // TODO Handle scalar operations properly. 1814 return !ST->hasFPARMv8Base() && !ST->hasVFP2Base(); 1815 case Intrinsic::masked_store: 1816 case Intrinsic::masked_load: 1817 case Intrinsic::masked_gather: 1818 case Intrinsic::masked_scatter: 1819 return !ST->hasMVEIntegerOps(); 1820 case Intrinsic::sadd_with_overflow: 1821 case Intrinsic::uadd_with_overflow: 1822 case Intrinsic::ssub_with_overflow: 1823 case Intrinsic::usub_with_overflow: 1824 case Intrinsic::sadd_sat: 1825 case Intrinsic::uadd_sat: 1826 case Intrinsic::ssub_sat: 1827 case Intrinsic::usub_sat: 1828 return false; 1829 } 1830 1831 return BaseT::isLoweredToCall(F); 1832 } 1833 1834 bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) { 1835 unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode()); 1836 EVT VT = TLI->getValueType(DL, I.getType(), true); 1837 if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall) 1838 return true; 1839 1840 // Check if an intrinsic will be lowered to a call and assume that any 1841 // other CallInst will generate a bl. 1842 if (auto *Call = dyn_cast<CallInst>(&I)) { 1843 if (auto *II = dyn_cast<IntrinsicInst>(Call)) { 1844 switch(II->getIntrinsicID()) { 1845 case Intrinsic::memcpy: 1846 case Intrinsic::memset: 1847 case Intrinsic::memmove: 1848 return getNumMemOps(II) == -1; 1849 default: 1850 if (const Function *F = Call->getCalledFunction()) 1851 return isLoweredToCall(F); 1852 } 1853 } 1854 return true; 1855 } 1856 1857 // FPv5 provides conversions between integer, double-precision, 1858 // single-precision, and half-precision formats. 1859 switch (I.getOpcode()) { 1860 default: 1861 break; 1862 case Instruction::FPToSI: 1863 case Instruction::FPToUI: 1864 case Instruction::SIToFP: 1865 case Instruction::UIToFP: 1866 case Instruction::FPTrunc: 1867 case Instruction::FPExt: 1868 return !ST->hasFPARMv8Base(); 1869 } 1870 1871 // FIXME: Unfortunately the approach of checking the Operation Action does 1872 // not catch all cases of Legalization that use library calls. Our 1873 // Legalization step categorizes some transformations into library calls as 1874 // Custom, Expand or even Legal when doing type legalization. So for now 1875 // we have to special case for instance the SDIV of 64bit integers and the 1876 // use of floating point emulation. 1877 if (VT.isInteger() && VT.getSizeInBits() >= 64) { 1878 switch (ISD) { 1879 default: 1880 break; 1881 case ISD::SDIV: 1882 case ISD::UDIV: 1883 case ISD::SREM: 1884 case ISD::UREM: 1885 case ISD::SDIVREM: 1886 case ISD::UDIVREM: 1887 return true; 1888 } 1889 } 1890 1891 // Assume all other non-float operations are supported. 1892 if (!VT.isFloatingPoint()) 1893 return false; 1894 1895 // We'll need a library call to handle most floats when using soft. 1896 if (TLI->useSoftFloat()) { 1897 switch (I.getOpcode()) { 1898 default: 1899 return true; 1900 case Instruction::Alloca: 1901 case Instruction::Load: 1902 case Instruction::Store: 1903 case Instruction::Select: 1904 case Instruction::PHI: 1905 return false; 1906 } 1907 } 1908 1909 // We'll need a libcall to perform double precision operations on a single 1910 // precision only FPU. 1911 if (I.getType()->isDoubleTy() && !ST->hasFP64()) 1912 return true; 1913 1914 // Likewise for half precision arithmetic. 1915 if (I.getType()->isHalfTy() && !ST->hasFullFP16()) 1916 return true; 1917 1918 return false; 1919 } 1920 1921 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 1922 AssumptionCache &AC, 1923 TargetLibraryInfo *LibInfo, 1924 HardwareLoopInfo &HWLoopInfo) { 1925 // Low-overhead branches are only supported in the 'low-overhead branch' 1926 // extension of v8.1-m. 1927 if (!ST->hasLOB() || DisableLowOverheadLoops) { 1928 LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n"); 1929 return false; 1930 } 1931 1932 if (!SE.hasLoopInvariantBackedgeTakenCount(L)) { 1933 LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n"); 1934 return false; 1935 } 1936 1937 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1938 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) { 1939 LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n"); 1940 return false; 1941 } 1942 1943 const SCEV *TripCountSCEV = 1944 SE.getAddExpr(BackedgeTakenCount, 1945 SE.getOne(BackedgeTakenCount->getType())); 1946 1947 // We need to store the trip count in LR, a 32-bit register. 1948 if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) { 1949 LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n"); 1950 return false; 1951 } 1952 1953 // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little 1954 // point in generating a hardware loop if that's going to happen. 1955 1956 auto IsHardwareLoopIntrinsic = [](Instruction &I) { 1957 if (auto *Call = dyn_cast<IntrinsicInst>(&I)) { 1958 switch (Call->getIntrinsicID()) { 1959 default: 1960 break; 1961 case Intrinsic::start_loop_iterations: 1962 case Intrinsic::test_start_loop_iterations: 1963 case Intrinsic::loop_decrement: 1964 case Intrinsic::loop_decrement_reg: 1965 return true; 1966 } 1967 } 1968 return false; 1969 }; 1970 1971 // Scan the instructions to see if there's any that we know will turn into a 1972 // call or if this loop is already a low-overhead loop or will become a tail 1973 // predicated loop. 1974 bool IsTailPredLoop = false; 1975 auto ScanLoop = [&](Loop *L) { 1976 for (auto *BB : L->getBlocks()) { 1977 for (auto &I : *BB) { 1978 if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I) || 1979 isa<InlineAsm>(I)) { 1980 LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n"); 1981 return false; 1982 } 1983 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 1984 IsTailPredLoop |= 1985 II->getIntrinsicID() == Intrinsic::get_active_lane_mask || 1986 II->getIntrinsicID() == Intrinsic::arm_mve_vctp8 || 1987 II->getIntrinsicID() == Intrinsic::arm_mve_vctp16 || 1988 II->getIntrinsicID() == Intrinsic::arm_mve_vctp32 || 1989 II->getIntrinsicID() == Intrinsic::arm_mve_vctp64; 1990 } 1991 } 1992 return true; 1993 }; 1994 1995 // Visit inner loops. 1996 for (auto Inner : *L) 1997 if (!ScanLoop(Inner)) 1998 return false; 1999 2000 if (!ScanLoop(L)) 2001 return false; 2002 2003 // TODO: Check whether the trip count calculation is expensive. If L is the 2004 // inner loop but we know it has a low trip count, calculating that trip 2005 // count (in the parent loop) may be detrimental. 2006 2007 LLVMContext &C = L->getHeader()->getContext(); 2008 HWLoopInfo.CounterInReg = true; 2009 HWLoopInfo.IsNestingLegal = false; 2010 HWLoopInfo.PerformEntryTest = AllowWLSLoops && !IsTailPredLoop; 2011 HWLoopInfo.CountType = Type::getInt32Ty(C); 2012 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); 2013 return true; 2014 } 2015 2016 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) { 2017 // We don't allow icmp's, and because we only look at single block loops, 2018 // we simply count the icmps, i.e. there should only be 1 for the backedge. 2019 if (isa<ICmpInst>(&I) && ++ICmpCount > 1) 2020 return false; 2021 // FIXME: This is a workaround for poor cost modelling. Min/Max intrinsics are 2022 // not currently canonical, but soon will be. Code without them uses icmp, and 2023 // so is not tail predicated as per the condition above. In order to get the 2024 // same performance we treat min and max the same as an icmp for tailpred 2025 // purposes for the moment (we often rely on non-tailpred and higher VF's to 2026 // pick more optimial instructions like VQDMULH. They need to be recognized 2027 // directly by the vectorizer). 2028 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 2029 if ((II->getIntrinsicID() == Intrinsic::smin || 2030 II->getIntrinsicID() == Intrinsic::smax || 2031 II->getIntrinsicID() == Intrinsic::umin || 2032 II->getIntrinsicID() == Intrinsic::umax) && 2033 ++ICmpCount > 1) 2034 return false; 2035 2036 if (isa<FCmpInst>(&I)) 2037 return false; 2038 2039 // We could allow extending/narrowing FP loads/stores, but codegen is 2040 // too inefficient so reject this for now. 2041 if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I)) 2042 return false; 2043 2044 // Extends have to be extending-loads 2045 if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) ) 2046 if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0))) 2047 return false; 2048 2049 // Truncs have to be narrowing-stores 2050 if (isa<TruncInst>(&I) ) 2051 if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin())) 2052 return false; 2053 2054 return true; 2055 } 2056 2057 // To set up a tail-predicated loop, we need to know the total number of 2058 // elements processed by that loop. Thus, we need to determine the element 2059 // size and: 2060 // 1) it should be uniform for all operations in the vector loop, so we 2061 // e.g. don't want any widening/narrowing operations. 2062 // 2) it should be smaller than i64s because we don't have vector operations 2063 // that work on i64s. 2064 // 3) we don't want elements to be reversed or shuffled, to make sure the 2065 // tail-predication masks/predicates the right lanes. 2066 // 2067 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE, 2068 const DataLayout &DL, 2069 const LoopAccessInfo *LAI) { 2070 LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n"); 2071 2072 // If there are live-out values, it is probably a reduction. We can predicate 2073 // most reduction operations freely under MVE using a combination of 2074 // prefer-predicated-reduction-select and inloop reductions. We limit this to 2075 // floating point and integer reductions, but don't check for operators 2076 // specifically here. If the value ends up not being a reduction (and so the 2077 // vectorizer cannot tailfold the loop), we should fall back to standard 2078 // vectorization automatically. 2079 SmallVector< Instruction *, 8 > LiveOuts; 2080 LiveOuts = llvm::findDefsUsedOutsideOfLoop(L); 2081 bool ReductionsDisabled = 2082 EnableTailPredication == TailPredication::EnabledNoReductions || 2083 EnableTailPredication == TailPredication::ForceEnabledNoReductions; 2084 2085 for (auto *I : LiveOuts) { 2086 if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() && 2087 !I->getType()->isHalfTy()) { 2088 LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float " 2089 "live-out value\n"); 2090 return false; 2091 } 2092 if (ReductionsDisabled) { 2093 LLVM_DEBUG(dbgs() << "Reductions not enabled\n"); 2094 return false; 2095 } 2096 } 2097 2098 // Next, check that all instructions can be tail-predicated. 2099 PredicatedScalarEvolution PSE = LAI->getPSE(); 2100 SmallVector<Instruction *, 16> LoadStores; 2101 int ICmpCount = 0; 2102 2103 for (BasicBlock *BB : L->blocks()) { 2104 for (Instruction &I : BB->instructionsWithoutDebug()) { 2105 if (isa<PHINode>(&I)) 2106 continue; 2107 if (!canTailPredicateInstruction(I, ICmpCount)) { 2108 LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump()); 2109 return false; 2110 } 2111 2112 Type *T = I.getType(); 2113 if (T->getScalarSizeInBits() > 32) { 2114 LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump()); 2115 return false; 2116 } 2117 if (isa<StoreInst>(I) || isa<LoadInst>(I)) { 2118 Value *Ptr = getLoadStorePointerOperand(&I); 2119 Type *AccessTy = getLoadStoreType(&I); 2120 int64_t NextStride = getPtrStride(PSE, AccessTy, Ptr, L); 2121 if (NextStride == 1) { 2122 // TODO: for now only allow consecutive strides of 1. We could support 2123 // other strides as long as it is uniform, but let's keep it simple 2124 // for now. 2125 continue; 2126 } else if (NextStride == -1 || 2127 (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) || 2128 (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) { 2129 LLVM_DEBUG(dbgs() 2130 << "Consecutive strides of 2 found, vld2/vstr2 can't " 2131 "be tail-predicated\n."); 2132 return false; 2133 // TODO: don't tail predicate if there is a reversed load? 2134 } else if (EnableMaskedGatherScatters) { 2135 // Gather/scatters do allow loading from arbitrary strides, at 2136 // least if they are loop invariant. 2137 // TODO: Loop variant strides should in theory work, too, but 2138 // this requires further testing. 2139 const SCEV *PtrScev = PSE.getSE()->getSCEV(Ptr); 2140 if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) { 2141 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 2142 if (PSE.getSE()->isLoopInvariant(Step, L)) 2143 continue; 2144 } 2145 } 2146 LLVM_DEBUG(dbgs() << "Bad stride found, can't " 2147 "tail-predicate\n."); 2148 return false; 2149 } 2150 } 2151 } 2152 2153 LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n"); 2154 return true; 2155 } 2156 2157 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, 2158 ScalarEvolution &SE, 2159 AssumptionCache &AC, 2160 TargetLibraryInfo *TLI, 2161 DominatorTree *DT, 2162 const LoopAccessInfo *LAI) { 2163 if (!EnableTailPredication) { 2164 LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n"); 2165 return false; 2166 } 2167 2168 // Creating a predicated vector loop is the first step for generating a 2169 // tail-predicated hardware loop, for which we need the MVE masked 2170 // load/stores instructions: 2171 if (!ST->hasMVEIntegerOps()) 2172 return false; 2173 2174 // For now, restrict this to single block loops. 2175 if (L->getNumBlocks() > 1) { 2176 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block " 2177 "loop.\n"); 2178 return false; 2179 } 2180 2181 assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected"); 2182 2183 HardwareLoopInfo HWLoopInfo(L); 2184 if (!HWLoopInfo.canAnalyze(*LI)) { 2185 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " 2186 "analyzable.\n"); 2187 return false; 2188 } 2189 2190 // This checks if we have the low-overhead branch architecture 2191 // extension, and if we will create a hardware-loop: 2192 if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) { 2193 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " 2194 "profitable.\n"); 2195 return false; 2196 } 2197 2198 if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) { 2199 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " 2200 "a candidate.\n"); 2201 return false; 2202 } 2203 2204 return canTailPredicateLoop(L, LI, SE, DL, LAI); 2205 } 2206 2207 bool ARMTTIImpl::emitGetActiveLaneMask() const { 2208 if (!ST->hasMVEIntegerOps() || !EnableTailPredication) 2209 return false; 2210 2211 // Intrinsic @llvm.get.active.lane.mask is supported. 2212 // It is used in the MVETailPredication pass, which requires the number of 2213 // elements processed by this vector loop to setup the tail-predicated 2214 // loop. 2215 return true; 2216 } 2217 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 2218 TTI::UnrollingPreferences &UP, 2219 OptimizationRemarkEmitter *ORE) { 2220 // Enable Upper bound unrolling universally, not dependant upon the conditions 2221 // below. 2222 UP.UpperBound = true; 2223 2224 // Only currently enable these preferences for M-Class cores. 2225 if (!ST->isMClass()) 2226 return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE); 2227 2228 // Disable loop unrolling for Oz and Os. 2229 UP.OptSizeThreshold = 0; 2230 UP.PartialOptSizeThreshold = 0; 2231 if (L->getHeader()->getParent()->hasOptSize()) 2232 return; 2233 2234 SmallVector<BasicBlock*, 4> ExitingBlocks; 2235 L->getExitingBlocks(ExitingBlocks); 2236 LLVM_DEBUG(dbgs() << "Loop has:\n" 2237 << "Blocks: " << L->getNumBlocks() << "\n" 2238 << "Exit blocks: " << ExitingBlocks.size() << "\n"); 2239 2240 // Only allow another exit other than the latch. This acts as an early exit 2241 // as it mirrors the profitability calculation of the runtime unroller. 2242 if (ExitingBlocks.size() > 2) 2243 return; 2244 2245 // Limit the CFG of the loop body for targets with a branch predictor. 2246 // Allowing 4 blocks permits if-then-else diamonds in the body. 2247 if (ST->hasBranchPredictor() && L->getNumBlocks() > 4) 2248 return; 2249 2250 // Don't unroll vectorized loops, including the remainder loop 2251 if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized")) 2252 return; 2253 2254 // Scan the loop: don't unroll loops with calls as this could prevent 2255 // inlining. 2256 InstructionCost Cost = 0; 2257 for (auto *BB : L->getBlocks()) { 2258 for (auto &I : *BB) { 2259 // Don't unroll vectorised loop. MVE does not benefit from it as much as 2260 // scalar code. 2261 if (I.getType()->isVectorTy()) 2262 return; 2263 2264 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 2265 if (const Function *F = cast<CallBase>(I).getCalledFunction()) { 2266 if (!isLoweredToCall(F)) 2267 continue; 2268 } 2269 return; 2270 } 2271 2272 SmallVector<const Value*, 4> Operands(I.operand_values()); 2273 Cost += 2274 getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency); 2275 } 2276 } 2277 2278 // On v6m cores, there are very few registers available. We can easily end up 2279 // spilling and reloading more registers in an unrolled loop. Look at the 2280 // number of LCSSA phis as a rough measure of how many registers will need to 2281 // be live out of the loop, reducing the default unroll count if more than 1 2282 // value is needed. In the long run, all of this should be being learnt by a 2283 // machine. 2284 unsigned UnrollCount = 4; 2285 if (ST->isThumb1Only()) { 2286 unsigned ExitingValues = 0; 2287 SmallVector<BasicBlock *, 4> ExitBlocks; 2288 L->getExitBlocks(ExitBlocks); 2289 for (auto *Exit : ExitBlocks) { 2290 // Count the number of LCSSA phis. Exclude values coming from GEP's as 2291 // only the last is expected to be needed for address operands. 2292 unsigned LiveOuts = count_if(Exit->phis(), [](auto &PH) { 2293 return PH.getNumOperands() != 1 || 2294 !isa<GetElementPtrInst>(PH.getOperand(0)); 2295 }); 2296 ExitingValues = ExitingValues < LiveOuts ? LiveOuts : ExitingValues; 2297 } 2298 if (ExitingValues) 2299 UnrollCount /= ExitingValues; 2300 if (UnrollCount <= 1) 2301 return; 2302 } 2303 2304 LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n"); 2305 LLVM_DEBUG(dbgs() << "Default Runtime Unroll Count: " << UnrollCount << "\n"); 2306 2307 UP.Partial = true; 2308 UP.Runtime = true; 2309 UP.UnrollRemainder = true; 2310 UP.DefaultUnrollRuntimeCount = UnrollCount; 2311 UP.UnrollAndJam = true; 2312 UP.UnrollAndJamInnerLoopThreshold = 60; 2313 2314 // Force unrolling small loops can be very useful because of the branch 2315 // taken cost of the backedge. 2316 if (Cost < 12) 2317 UP.Force = true; 2318 } 2319 2320 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 2321 TTI::PeelingPreferences &PP) { 2322 BaseT::getPeelingPreferences(L, SE, PP); 2323 } 2324 2325 bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty, 2326 TTI::ReductionFlags Flags) const { 2327 if (!ST->hasMVEIntegerOps()) 2328 return false; 2329 2330 unsigned ScalarBits = Ty->getScalarSizeInBits(); 2331 switch (Opcode) { 2332 case Instruction::Add: 2333 return ScalarBits <= 64; 2334 default: 2335 return false; 2336 } 2337 } 2338 2339 bool ARMTTIImpl::preferPredicatedReductionSelect( 2340 unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const { 2341 if (!ST->hasMVEIntegerOps()) 2342 return false; 2343 return true; 2344 } 2345