1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AArch64TargetTransformInfo.h" 10 #include "AArch64ExpandImm.h" 11 #include "AArch64PerfectShuffle.h" 12 #include "MCTargetDesc/AArch64AddressingModes.h" 13 #include "llvm/Analysis/IVDescriptors.h" 14 #include "llvm/Analysis/LoopInfo.h" 15 #include "llvm/Analysis/TargetTransformInfo.h" 16 #include "llvm/CodeGen/BasicTTIImpl.h" 17 #include "llvm/CodeGen/CostTable.h" 18 #include "llvm/CodeGen/TargetLowering.h" 19 #include "llvm/IR/IntrinsicInst.h" 20 #include "llvm/IR/Intrinsics.h" 21 #include "llvm/IR/IntrinsicsAArch64.h" 22 #include "llvm/IR/PatternMatch.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Transforms/InstCombine/InstCombiner.h" 25 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 26 #include <algorithm> 27 #include <optional> 28 using namespace llvm; 29 using namespace llvm::PatternMatch; 30 31 #define DEBUG_TYPE "aarch64tti" 32 33 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix", 34 cl::init(true), cl::Hidden); 35 36 static cl::opt<unsigned> SVEGatherOverhead("sve-gather-overhead", cl::init(10), 37 cl::Hidden); 38 39 static cl::opt<unsigned> SVEScatterOverhead("sve-scatter-overhead", 40 cl::init(10), cl::Hidden); 41 42 static cl::opt<unsigned> SVETailFoldInsnThreshold("sve-tail-folding-insn-threshold", 43 cl::init(15), cl::Hidden); 44 45 static cl::opt<unsigned> 46 NeonNonConstStrideOverhead("neon-nonconst-stride-overhead", cl::init(10), 47 cl::Hidden); 48 49 static cl::opt<unsigned> CallPenaltyChangeSM( 50 "call-penalty-sm-change", cl::init(5), cl::Hidden, 51 cl::desc( 52 "Penalty of calling a function that requires a change to PSTATE.SM")); 53 54 static cl::opt<unsigned> InlineCallPenaltyChangeSM( 55 "inline-call-penalty-sm-change", cl::init(10), cl::Hidden, 56 cl::desc("Penalty of inlining a call that requires a change to PSTATE.SM")); 57 58 static cl::opt<bool> EnableOrLikeSelectOpt("enable-aarch64-or-like-select", 59 cl::init(true), cl::Hidden); 60 61 namespace { 62 class TailFoldingOption { 63 // These bitfields will only ever be set to something non-zero in operator=, 64 // when setting the -sve-tail-folding option. This option should always be of 65 // the form (default|simple|all|disable)[+(Flag1|Flag2|etc)], where here 66 // InitialBits is one of (disabled|all|simple). EnableBits represents 67 // additional flags we're enabling, and DisableBits for those flags we're 68 // disabling. The default flag is tracked in the variable NeedsDefault, since 69 // at the time of setting the option we may not know what the default value 70 // for the CPU is. 71 TailFoldingOpts InitialBits = TailFoldingOpts::Disabled; 72 TailFoldingOpts EnableBits = TailFoldingOpts::Disabled; 73 TailFoldingOpts DisableBits = TailFoldingOpts::Disabled; 74 75 // This value needs to be initialised to true in case the user does not 76 // explicitly set the -sve-tail-folding option. 77 bool NeedsDefault = true; 78 79 void setInitialBits(TailFoldingOpts Bits) { InitialBits = Bits; } 80 81 void setNeedsDefault(bool V) { NeedsDefault = V; } 82 83 void setEnableBit(TailFoldingOpts Bit) { 84 EnableBits |= Bit; 85 DisableBits &= ~Bit; 86 } 87 88 void setDisableBit(TailFoldingOpts Bit) { 89 EnableBits &= ~Bit; 90 DisableBits |= Bit; 91 } 92 93 TailFoldingOpts getBits(TailFoldingOpts DefaultBits) const { 94 TailFoldingOpts Bits = TailFoldingOpts::Disabled; 95 96 assert((InitialBits == TailFoldingOpts::Disabled || !NeedsDefault) && 97 "Initial bits should only include one of " 98 "(disabled|all|simple|default)"); 99 Bits = NeedsDefault ? DefaultBits : InitialBits; 100 Bits |= EnableBits; 101 Bits &= ~DisableBits; 102 103 return Bits; 104 } 105 106 void reportError(std::string Opt) { 107 errs() << "invalid argument '" << Opt 108 << "' to -sve-tail-folding=; the option should be of the form\n" 109 " (disabled|all|default|simple)[+(reductions|recurrences" 110 "|reverse|noreductions|norecurrences|noreverse)]\n"; 111 report_fatal_error("Unrecognised tail-folding option"); 112 } 113 114 public: 115 116 void operator=(const std::string &Val) { 117 // If the user explicitly sets -sve-tail-folding= then treat as an error. 118 if (Val.empty()) { 119 reportError(""); 120 return; 121 } 122 123 // Since the user is explicitly setting the option we don't automatically 124 // need the default unless they require it. 125 setNeedsDefault(false); 126 127 SmallVector<StringRef, 4> TailFoldTypes; 128 StringRef(Val).split(TailFoldTypes, '+', -1, false); 129 130 unsigned StartIdx = 1; 131 if (TailFoldTypes[0] == "disabled") 132 setInitialBits(TailFoldingOpts::Disabled); 133 else if (TailFoldTypes[0] == "all") 134 setInitialBits(TailFoldingOpts::All); 135 else if (TailFoldTypes[0] == "default") 136 setNeedsDefault(true); 137 else if (TailFoldTypes[0] == "simple") 138 setInitialBits(TailFoldingOpts::Simple); 139 else { 140 StartIdx = 0; 141 setInitialBits(TailFoldingOpts::Disabled); 142 } 143 144 for (unsigned I = StartIdx; I < TailFoldTypes.size(); I++) { 145 if (TailFoldTypes[I] == "reductions") 146 setEnableBit(TailFoldingOpts::Reductions); 147 else if (TailFoldTypes[I] == "recurrences") 148 setEnableBit(TailFoldingOpts::Recurrences); 149 else if (TailFoldTypes[I] == "reverse") 150 setEnableBit(TailFoldingOpts::Reverse); 151 else if (TailFoldTypes[I] == "noreductions") 152 setDisableBit(TailFoldingOpts::Reductions); 153 else if (TailFoldTypes[I] == "norecurrences") 154 setDisableBit(TailFoldingOpts::Recurrences); 155 else if (TailFoldTypes[I] == "noreverse") 156 setDisableBit(TailFoldingOpts::Reverse); 157 else 158 reportError(Val); 159 } 160 } 161 162 bool satisfies(TailFoldingOpts DefaultBits, TailFoldingOpts Required) const { 163 return (getBits(DefaultBits) & Required) == Required; 164 } 165 }; 166 } // namespace 167 168 TailFoldingOption TailFoldingOptionLoc; 169 170 cl::opt<TailFoldingOption, true, cl::parser<std::string>> SVETailFolding( 171 "sve-tail-folding", 172 cl::desc( 173 "Control the use of vectorisation using tail-folding for SVE where the" 174 " option is specified in the form (Initial)[+(Flag1|Flag2|...)]:" 175 "\ndisabled (Initial) No loop types will vectorize using " 176 "tail-folding" 177 "\ndefault (Initial) Uses the default tail-folding settings for " 178 "the target CPU" 179 "\nall (Initial) All legal loop types will vectorize using " 180 "tail-folding" 181 "\nsimple (Initial) Use tail-folding for simple loops (not " 182 "reductions or recurrences)" 183 "\nreductions Use tail-folding for loops containing reductions" 184 "\nnoreductions Inverse of above" 185 "\nrecurrences Use tail-folding for loops containing fixed order " 186 "recurrences" 187 "\nnorecurrences Inverse of above" 188 "\nreverse Use tail-folding for loops requiring reversed " 189 "predicates" 190 "\nnoreverse Inverse of above"), 191 cl::location(TailFoldingOptionLoc)); 192 193 // Experimental option that will only be fully functional when the 194 // code-generator is changed to use SVE instead of NEON for all fixed-width 195 // operations. 196 static cl::opt<bool> EnableFixedwidthAutovecInStreamingMode( 197 "enable-fixedwidth-autovec-in-streaming-mode", cl::init(false), cl::Hidden); 198 199 // Experimental option that will only be fully functional when the cost-model 200 // and code-generator have been changed to avoid using scalable vector 201 // instructions that are not legal in streaming SVE mode. 202 static cl::opt<bool> EnableScalableAutovecInStreamingMode( 203 "enable-scalable-autovec-in-streaming-mode", cl::init(false), cl::Hidden); 204 205 static bool isSMEABIRoutineCall(const CallInst &CI) { 206 const auto *F = CI.getCalledFunction(); 207 return F && StringSwitch<bool>(F->getName()) 208 .Case("__arm_sme_state", true) 209 .Case("__arm_tpidr2_save", true) 210 .Case("__arm_tpidr2_restore", true) 211 .Case("__arm_za_disable", true) 212 .Default(false); 213 } 214 215 /// Returns true if the function has explicit operations that can only be 216 /// lowered using incompatible instructions for the selected mode. This also 217 /// returns true if the function F may use or modify ZA state. 218 static bool hasPossibleIncompatibleOps(const Function *F) { 219 for (const BasicBlock &BB : *F) { 220 for (const Instruction &I : BB) { 221 // Be conservative for now and assume that any call to inline asm or to 222 // intrinsics could could result in non-streaming ops (e.g. calls to 223 // @llvm.aarch64.* or @llvm.gather/scatter intrinsics). We can assume that 224 // all native LLVM instructions can be lowered to compatible instructions. 225 if (isa<CallInst>(I) && !I.isDebugOrPseudoInst() && 226 (cast<CallInst>(I).isInlineAsm() || isa<IntrinsicInst>(I) || 227 isSMEABIRoutineCall(cast<CallInst>(I)))) 228 return true; 229 } 230 } 231 return false; 232 } 233 234 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller, 235 const Function *Callee) const { 236 SMEAttrs CallerAttrs(*Caller); 237 SMEAttrs CalleeAttrs(*Callee); 238 if (CalleeAttrs.hasNewZABody()) 239 return false; 240 241 if (CallerAttrs.requiresLazySave(CalleeAttrs) || 242 (CallerAttrs.requiresSMChange(CalleeAttrs) && 243 (!CallerAttrs.hasStreamingInterfaceOrBody() || 244 !CalleeAttrs.hasStreamingBody()))) { 245 if (hasPossibleIncompatibleOps(Callee)) 246 return false; 247 } 248 249 const TargetMachine &TM = getTLI()->getTargetMachine(); 250 251 const FeatureBitset &CallerBits = 252 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 253 const FeatureBitset &CalleeBits = 254 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 255 256 // Inline a callee if its target-features are a subset of the callers 257 // target-features. 258 return (CallerBits & CalleeBits) == CalleeBits; 259 } 260 261 bool AArch64TTIImpl::areTypesABICompatible( 262 const Function *Caller, const Function *Callee, 263 const ArrayRef<Type *> &Types) const { 264 if (!BaseT::areTypesABICompatible(Caller, Callee, Types)) 265 return false; 266 267 // We need to ensure that argument promotion does not attempt to promote 268 // pointers to fixed-length vector types larger than 128 bits like 269 // <8 x float> (and pointers to aggregate types which have such fixed-length 270 // vector type members) into the values of the pointees. Such vector types 271 // are used for SVE VLS but there is no ABI for SVE VLS arguments and the 272 // backend cannot lower such value arguments. The 128-bit fixed-length SVE 273 // types can be safely treated as 128-bit NEON types and they cannot be 274 // distinguished in IR. 275 if (ST->useSVEForFixedLengthVectors() && llvm::any_of(Types, [](Type *Ty) { 276 auto FVTy = dyn_cast<FixedVectorType>(Ty); 277 return FVTy && 278 FVTy->getScalarSizeInBits() * FVTy->getNumElements() > 128; 279 })) 280 return false; 281 282 return true; 283 } 284 285 unsigned 286 AArch64TTIImpl::getInlineCallPenalty(const Function *F, const CallBase &Call, 287 unsigned DefaultCallPenalty) const { 288 // This function calculates a penalty for executing Call in F. 289 // 290 // There are two ways this function can be called: 291 // (1) F: 292 // call from F -> G (the call here is Call) 293 // 294 // For (1), Call.getCaller() == F, so it will always return a high cost if 295 // a streaming-mode change is required (thus promoting the need to inline the 296 // function) 297 // 298 // (2) F: 299 // call from F -> G (the call here is not Call) 300 // G: 301 // call from G -> H (the call here is Call) 302 // 303 // For (2), if after inlining the body of G into F the call to H requires a 304 // streaming-mode change, and the call to G from F would also require a 305 // streaming-mode change, then there is benefit to do the streaming-mode 306 // change only once and avoid inlining of G into F. 307 SMEAttrs FAttrs(*F); 308 SMEAttrs CalleeAttrs(Call); 309 if (FAttrs.requiresSMChange(CalleeAttrs)) { 310 if (F == Call.getCaller()) // (1) 311 return CallPenaltyChangeSM * DefaultCallPenalty; 312 if (FAttrs.requiresSMChange(SMEAttrs(*Call.getCaller()))) // (2) 313 return InlineCallPenaltyChangeSM * DefaultCallPenalty; 314 } 315 316 return DefaultCallPenalty; 317 } 318 319 bool AArch64TTIImpl::shouldMaximizeVectorBandwidth( 320 TargetTransformInfo::RegisterKind K) const { 321 assert(K != TargetTransformInfo::RGK_Scalar); 322 return (K == TargetTransformInfo::RGK_FixedWidthVector && 323 ST->isNeonAvailable()); 324 } 325 326 /// Calculate the cost of materializing a 64-bit value. This helper 327 /// method might only calculate a fraction of a larger immediate. Therefore it 328 /// is valid to return a cost of ZERO. 329 InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) { 330 // Check if the immediate can be encoded within an instruction. 331 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64)) 332 return 0; 333 334 if (Val < 0) 335 Val = ~Val; 336 337 // Calculate how many moves we will need to materialize this constant. 338 SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn; 339 AArch64_IMM::expandMOVImm(Val, 64, Insn); 340 return Insn.size(); 341 } 342 343 /// Calculate the cost of materializing the given constant. 344 InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 345 TTI::TargetCostKind CostKind) { 346 assert(Ty->isIntegerTy()); 347 348 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 349 if (BitSize == 0) 350 return ~0U; 351 352 // Sign-extend all constants to a multiple of 64-bit. 353 APInt ImmVal = Imm; 354 if (BitSize & 0x3f) 355 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 356 357 // Split the constant into 64-bit chunks and calculate the cost for each 358 // chunk. 359 InstructionCost Cost = 0; 360 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 361 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 362 int64_t Val = Tmp.getSExtValue(); 363 Cost += getIntImmCost(Val); 364 } 365 // We need at least one instruction to materialze the constant. 366 return std::max<InstructionCost>(1, Cost); 367 } 368 369 InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 370 const APInt &Imm, Type *Ty, 371 TTI::TargetCostKind CostKind, 372 Instruction *Inst) { 373 assert(Ty->isIntegerTy()); 374 375 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 376 // There is no cost model for constants with a bit size of 0. Return TCC_Free 377 // here, so that constant hoisting will ignore this constant. 378 if (BitSize == 0) 379 return TTI::TCC_Free; 380 381 unsigned ImmIdx = ~0U; 382 switch (Opcode) { 383 default: 384 return TTI::TCC_Free; 385 case Instruction::GetElementPtr: 386 // Always hoist the base address of a GetElementPtr. 387 if (Idx == 0) 388 return 2 * TTI::TCC_Basic; 389 return TTI::TCC_Free; 390 case Instruction::Store: 391 ImmIdx = 0; 392 break; 393 case Instruction::Add: 394 case Instruction::Sub: 395 case Instruction::Mul: 396 case Instruction::UDiv: 397 case Instruction::SDiv: 398 case Instruction::URem: 399 case Instruction::SRem: 400 case Instruction::And: 401 case Instruction::Or: 402 case Instruction::Xor: 403 case Instruction::ICmp: 404 ImmIdx = 1; 405 break; 406 // Always return TCC_Free for the shift value of a shift instruction. 407 case Instruction::Shl: 408 case Instruction::LShr: 409 case Instruction::AShr: 410 if (Idx == 1) 411 return TTI::TCC_Free; 412 break; 413 case Instruction::Trunc: 414 case Instruction::ZExt: 415 case Instruction::SExt: 416 case Instruction::IntToPtr: 417 case Instruction::PtrToInt: 418 case Instruction::BitCast: 419 case Instruction::PHI: 420 case Instruction::Call: 421 case Instruction::Select: 422 case Instruction::Ret: 423 case Instruction::Load: 424 break; 425 } 426 427 if (Idx == ImmIdx) { 428 int NumConstants = (BitSize + 63) / 64; 429 InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 430 return (Cost <= NumConstants * TTI::TCC_Basic) 431 ? static_cast<int>(TTI::TCC_Free) 432 : Cost; 433 } 434 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 435 } 436 437 InstructionCost 438 AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 439 const APInt &Imm, Type *Ty, 440 TTI::TargetCostKind CostKind) { 441 assert(Ty->isIntegerTy()); 442 443 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 444 // There is no cost model for constants with a bit size of 0. Return TCC_Free 445 // here, so that constant hoisting will ignore this constant. 446 if (BitSize == 0) 447 return TTI::TCC_Free; 448 449 // Most (all?) AArch64 intrinsics do not support folding immediates into the 450 // selected instruction, so we compute the materialization cost for the 451 // immediate directly. 452 if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv) 453 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 454 455 switch (IID) { 456 default: 457 return TTI::TCC_Free; 458 case Intrinsic::sadd_with_overflow: 459 case Intrinsic::uadd_with_overflow: 460 case Intrinsic::ssub_with_overflow: 461 case Intrinsic::usub_with_overflow: 462 case Intrinsic::smul_with_overflow: 463 case Intrinsic::umul_with_overflow: 464 if (Idx == 1) { 465 int NumConstants = (BitSize + 63) / 64; 466 InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 467 return (Cost <= NumConstants * TTI::TCC_Basic) 468 ? static_cast<int>(TTI::TCC_Free) 469 : Cost; 470 } 471 break; 472 case Intrinsic::experimental_stackmap: 473 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 474 return TTI::TCC_Free; 475 break; 476 case Intrinsic::experimental_patchpoint_void: 477 case Intrinsic::experimental_patchpoint_i64: 478 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 479 return TTI::TCC_Free; 480 break; 481 case Intrinsic::experimental_gc_statepoint: 482 if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 483 return TTI::TCC_Free; 484 break; 485 } 486 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 487 } 488 489 TargetTransformInfo::PopcntSupportKind 490 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) { 491 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 492 if (TyWidth == 32 || TyWidth == 64) 493 return TTI::PSK_FastHardware; 494 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount. 495 return TTI::PSK_Software; 496 } 497 498 InstructionCost 499 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 500 TTI::TargetCostKind CostKind) { 501 auto *RetTy = ICA.getReturnType(); 502 switch (ICA.getID()) { 503 case Intrinsic::umin: 504 case Intrinsic::umax: 505 case Intrinsic::smin: 506 case Intrinsic::smax: { 507 static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 508 MVT::v8i16, MVT::v2i32, MVT::v4i32, 509 MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, 510 MVT::nxv2i64}; 511 auto LT = getTypeLegalizationCost(RetTy); 512 // v2i64 types get converted to cmp+bif hence the cost of 2 513 if (LT.second == MVT::v2i64) 514 return LT.first * 2; 515 if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; })) 516 return LT.first; 517 break; 518 } 519 case Intrinsic::sadd_sat: 520 case Intrinsic::ssub_sat: 521 case Intrinsic::uadd_sat: 522 case Intrinsic::usub_sat: { 523 static const auto ValidSatTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 524 MVT::v8i16, MVT::v2i32, MVT::v4i32, 525 MVT::v2i64}; 526 auto LT = getTypeLegalizationCost(RetTy); 527 // This is a base cost of 1 for the vadd, plus 3 extract shifts if we 528 // need to extend the type, as it uses shr(qadd(shl, shl)). 529 unsigned Instrs = 530 LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4; 531 if (any_of(ValidSatTys, [<](MVT M) { return M == LT.second; })) 532 return LT.first * Instrs; 533 break; 534 } 535 case Intrinsic::abs: { 536 static const auto ValidAbsTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 537 MVT::v8i16, MVT::v2i32, MVT::v4i32, 538 MVT::v2i64}; 539 auto LT = getTypeLegalizationCost(RetTy); 540 if (any_of(ValidAbsTys, [<](MVT M) { return M == LT.second; })) 541 return LT.first; 542 break; 543 } 544 case Intrinsic::bswap: { 545 static const auto ValidAbsTys = {MVT::v4i16, MVT::v8i16, MVT::v2i32, 546 MVT::v4i32, MVT::v2i64}; 547 auto LT = getTypeLegalizationCost(RetTy); 548 if (any_of(ValidAbsTys, [<](MVT M) { return M == LT.second; }) && 549 LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits()) 550 return LT.first; 551 break; 552 } 553 case Intrinsic::experimental_stepvector: { 554 InstructionCost Cost = 1; // Cost of the `index' instruction 555 auto LT = getTypeLegalizationCost(RetTy); 556 // Legalisation of illegal vectors involves an `index' instruction plus 557 // (LT.first - 1) vector adds. 558 if (LT.first > 1) { 559 Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext()); 560 InstructionCost AddCost = 561 getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind); 562 Cost += AddCost * (LT.first - 1); 563 } 564 return Cost; 565 } 566 case Intrinsic::bitreverse: { 567 static const CostTblEntry BitreverseTbl[] = { 568 {Intrinsic::bitreverse, MVT::i32, 1}, 569 {Intrinsic::bitreverse, MVT::i64, 1}, 570 {Intrinsic::bitreverse, MVT::v8i8, 1}, 571 {Intrinsic::bitreverse, MVT::v16i8, 1}, 572 {Intrinsic::bitreverse, MVT::v4i16, 2}, 573 {Intrinsic::bitreverse, MVT::v8i16, 2}, 574 {Intrinsic::bitreverse, MVT::v2i32, 2}, 575 {Intrinsic::bitreverse, MVT::v4i32, 2}, 576 {Intrinsic::bitreverse, MVT::v1i64, 2}, 577 {Intrinsic::bitreverse, MVT::v2i64, 2}, 578 }; 579 const auto LegalisationCost = getTypeLegalizationCost(RetTy); 580 const auto *Entry = 581 CostTableLookup(BitreverseTbl, ICA.getID(), LegalisationCost.second); 582 if (Entry) { 583 // Cost Model is using the legal type(i32) that i8 and i16 will be 584 // converted to +1 so that we match the actual lowering cost 585 if (TLI->getValueType(DL, RetTy, true) == MVT::i8 || 586 TLI->getValueType(DL, RetTy, true) == MVT::i16) 587 return LegalisationCost.first * Entry->Cost + 1; 588 589 return LegalisationCost.first * Entry->Cost; 590 } 591 break; 592 } 593 case Intrinsic::ctpop: { 594 if (!ST->hasNEON()) { 595 // 32-bit or 64-bit ctpop without NEON is 12 instructions. 596 return getTypeLegalizationCost(RetTy).first * 12; 597 } 598 static const CostTblEntry CtpopCostTbl[] = { 599 {ISD::CTPOP, MVT::v2i64, 4}, 600 {ISD::CTPOP, MVT::v4i32, 3}, 601 {ISD::CTPOP, MVT::v8i16, 2}, 602 {ISD::CTPOP, MVT::v16i8, 1}, 603 {ISD::CTPOP, MVT::i64, 4}, 604 {ISD::CTPOP, MVT::v2i32, 3}, 605 {ISD::CTPOP, MVT::v4i16, 2}, 606 {ISD::CTPOP, MVT::v8i8, 1}, 607 {ISD::CTPOP, MVT::i32, 5}, 608 }; 609 auto LT = getTypeLegalizationCost(RetTy); 610 MVT MTy = LT.second; 611 if (const auto *Entry = CostTableLookup(CtpopCostTbl, ISD::CTPOP, MTy)) { 612 // Extra cost of +1 when illegal vector types are legalized by promoting 613 // the integer type. 614 int ExtraCost = MTy.isVector() && MTy.getScalarSizeInBits() != 615 RetTy->getScalarSizeInBits() 616 ? 1 617 : 0; 618 return LT.first * Entry->Cost + ExtraCost; 619 } 620 break; 621 } 622 case Intrinsic::sadd_with_overflow: 623 case Intrinsic::uadd_with_overflow: 624 case Intrinsic::ssub_with_overflow: 625 case Intrinsic::usub_with_overflow: 626 case Intrinsic::smul_with_overflow: 627 case Intrinsic::umul_with_overflow: { 628 static const CostTblEntry WithOverflowCostTbl[] = { 629 {Intrinsic::sadd_with_overflow, MVT::i8, 3}, 630 {Intrinsic::uadd_with_overflow, MVT::i8, 3}, 631 {Intrinsic::sadd_with_overflow, MVT::i16, 3}, 632 {Intrinsic::uadd_with_overflow, MVT::i16, 3}, 633 {Intrinsic::sadd_with_overflow, MVT::i32, 1}, 634 {Intrinsic::uadd_with_overflow, MVT::i32, 1}, 635 {Intrinsic::sadd_with_overflow, MVT::i64, 1}, 636 {Intrinsic::uadd_with_overflow, MVT::i64, 1}, 637 {Intrinsic::ssub_with_overflow, MVT::i8, 3}, 638 {Intrinsic::usub_with_overflow, MVT::i8, 3}, 639 {Intrinsic::ssub_with_overflow, MVT::i16, 3}, 640 {Intrinsic::usub_with_overflow, MVT::i16, 3}, 641 {Intrinsic::ssub_with_overflow, MVT::i32, 1}, 642 {Intrinsic::usub_with_overflow, MVT::i32, 1}, 643 {Intrinsic::ssub_with_overflow, MVT::i64, 1}, 644 {Intrinsic::usub_with_overflow, MVT::i64, 1}, 645 {Intrinsic::smul_with_overflow, MVT::i8, 5}, 646 {Intrinsic::umul_with_overflow, MVT::i8, 4}, 647 {Intrinsic::smul_with_overflow, MVT::i16, 5}, 648 {Intrinsic::umul_with_overflow, MVT::i16, 4}, 649 {Intrinsic::smul_with_overflow, MVT::i32, 2}, // eg umull;tst 650 {Intrinsic::umul_with_overflow, MVT::i32, 2}, // eg umull;cmp sxtw 651 {Intrinsic::smul_with_overflow, MVT::i64, 3}, // eg mul;smulh;cmp 652 {Intrinsic::umul_with_overflow, MVT::i64, 3}, // eg mul;umulh;cmp asr 653 }; 654 EVT MTy = TLI->getValueType(DL, RetTy->getContainedType(0), true); 655 if (MTy.isSimple()) 656 if (const auto *Entry = CostTableLookup(WithOverflowCostTbl, ICA.getID(), 657 MTy.getSimpleVT())) 658 return Entry->Cost; 659 break; 660 } 661 case Intrinsic::fptosi_sat: 662 case Intrinsic::fptoui_sat: { 663 if (ICA.getArgTypes().empty()) 664 break; 665 bool IsSigned = ICA.getID() == Intrinsic::fptosi_sat; 666 auto LT = getTypeLegalizationCost(ICA.getArgTypes()[0]); 667 EVT MTy = TLI->getValueType(DL, RetTy); 668 // Check for the legal types, which are where the size of the input and the 669 // output are the same, or we are using cvt f64->i32 or f32->i64. 670 if ((LT.second == MVT::f32 || LT.second == MVT::f64 || 671 LT.second == MVT::v2f32 || LT.second == MVT::v4f32 || 672 LT.second == MVT::v2f64) && 673 (LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits() || 674 (LT.second == MVT::f64 && MTy == MVT::i32) || 675 (LT.second == MVT::f32 && MTy == MVT::i64))) 676 return LT.first; 677 // Similarly for fp16 sizes 678 if (ST->hasFullFP16() && 679 ((LT.second == MVT::f16 && MTy == MVT::i32) || 680 ((LT.second == MVT::v4f16 || LT.second == MVT::v8f16) && 681 (LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits())))) 682 return LT.first; 683 684 // Otherwise we use a legal convert followed by a min+max 685 if ((LT.second.getScalarType() == MVT::f32 || 686 LT.second.getScalarType() == MVT::f64 || 687 (ST->hasFullFP16() && LT.second.getScalarType() == MVT::f16)) && 688 LT.second.getScalarSizeInBits() >= MTy.getScalarSizeInBits()) { 689 Type *LegalTy = 690 Type::getIntNTy(RetTy->getContext(), LT.second.getScalarSizeInBits()); 691 if (LT.second.isVector()) 692 LegalTy = VectorType::get(LegalTy, LT.second.getVectorElementCount()); 693 InstructionCost Cost = 1; 694 IntrinsicCostAttributes Attrs1(IsSigned ? Intrinsic::smin : Intrinsic::umin, 695 LegalTy, {LegalTy, LegalTy}); 696 Cost += getIntrinsicInstrCost(Attrs1, CostKind); 697 IntrinsicCostAttributes Attrs2(IsSigned ? Intrinsic::smax : Intrinsic::umax, 698 LegalTy, {LegalTy, LegalTy}); 699 Cost += getIntrinsicInstrCost(Attrs2, CostKind); 700 return LT.first * Cost; 701 } 702 break; 703 } 704 case Intrinsic::fshl: 705 case Intrinsic::fshr: { 706 if (ICA.getArgs().empty()) 707 break; 708 709 // TODO: Add handling for fshl where third argument is not a constant. 710 const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(ICA.getArgs()[2]); 711 if (!OpInfoZ.isConstant()) 712 break; 713 714 const auto LegalisationCost = getTypeLegalizationCost(RetTy); 715 if (OpInfoZ.isUniform()) { 716 // FIXME: The costs could be lower if the codegen is better. 717 static const CostTblEntry FshlTbl[] = { 718 {Intrinsic::fshl, MVT::v4i32, 3}, // ushr + shl + orr 719 {Intrinsic::fshl, MVT::v2i64, 3}, {Intrinsic::fshl, MVT::v16i8, 4}, 720 {Intrinsic::fshl, MVT::v8i16, 4}, {Intrinsic::fshl, MVT::v2i32, 3}, 721 {Intrinsic::fshl, MVT::v8i8, 4}, {Intrinsic::fshl, MVT::v4i16, 4}}; 722 // Costs for both fshl & fshr are the same, so just pass Intrinsic::fshl 723 // to avoid having to duplicate the costs. 724 const auto *Entry = 725 CostTableLookup(FshlTbl, Intrinsic::fshl, LegalisationCost.second); 726 if (Entry) 727 return LegalisationCost.first * Entry->Cost; 728 } 729 730 auto TyL = getTypeLegalizationCost(RetTy); 731 if (!RetTy->isIntegerTy()) 732 break; 733 734 // Estimate cost manually, as types like i8 and i16 will get promoted to 735 // i32 and CostTableLookup will ignore the extra conversion cost. 736 bool HigherCost = (RetTy->getScalarSizeInBits() != 32 && 737 RetTy->getScalarSizeInBits() < 64) || 738 (RetTy->getScalarSizeInBits() % 64 != 0); 739 unsigned ExtraCost = HigherCost ? 1 : 0; 740 if (RetTy->getScalarSizeInBits() == 32 || 741 RetTy->getScalarSizeInBits() == 64) 742 ExtraCost = 0; // fhsl/fshr for i32 and i64 can be lowered to a single 743 // extr instruction. 744 else if (HigherCost) 745 ExtraCost = 1; 746 else 747 break; 748 return TyL.first + ExtraCost; 749 } 750 default: 751 break; 752 } 753 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 754 } 755 756 /// The function will remove redundant reinterprets casting in the presence 757 /// of the control flow 758 static std::optional<Instruction *> processPhiNode(InstCombiner &IC, 759 IntrinsicInst &II) { 760 SmallVector<Instruction *, 32> Worklist; 761 auto RequiredType = II.getType(); 762 763 auto *PN = dyn_cast<PHINode>(II.getArgOperand(0)); 764 assert(PN && "Expected Phi Node!"); 765 766 // Don't create a new Phi unless we can remove the old one. 767 if (!PN->hasOneUse()) 768 return std::nullopt; 769 770 for (Value *IncValPhi : PN->incoming_values()) { 771 auto *Reinterpret = dyn_cast<IntrinsicInst>(IncValPhi); 772 if (!Reinterpret || 773 Reinterpret->getIntrinsicID() != 774 Intrinsic::aarch64_sve_convert_to_svbool || 775 RequiredType != Reinterpret->getArgOperand(0)->getType()) 776 return std::nullopt; 777 } 778 779 // Create the new Phi 780 IC.Builder.SetInsertPoint(PN); 781 PHINode *NPN = IC.Builder.CreatePHI(RequiredType, PN->getNumIncomingValues()); 782 Worklist.push_back(PN); 783 784 for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) { 785 auto *Reinterpret = cast<Instruction>(PN->getIncomingValue(I)); 786 NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I)); 787 Worklist.push_back(Reinterpret); 788 } 789 790 // Cleanup Phi Node and reinterprets 791 return IC.replaceInstUsesWith(II, NPN); 792 } 793 794 // (from_svbool (binop (to_svbool pred) (svbool_t _) (svbool_t _)))) 795 // => (binop (pred) (from_svbool _) (from_svbool _)) 796 // 797 // The above transformation eliminates a `to_svbool` in the predicate 798 // operand of bitwise operation `binop` by narrowing the vector width of 799 // the operation. For example, it would convert a `<vscale x 16 x i1> 800 // and` into a `<vscale x 4 x i1> and`. This is profitable because 801 // to_svbool must zero the new lanes during widening, whereas 802 // from_svbool is free. 803 static std::optional<Instruction *> 804 tryCombineFromSVBoolBinOp(InstCombiner &IC, IntrinsicInst &II) { 805 auto BinOp = dyn_cast<IntrinsicInst>(II.getOperand(0)); 806 if (!BinOp) 807 return std::nullopt; 808 809 auto IntrinsicID = BinOp->getIntrinsicID(); 810 switch (IntrinsicID) { 811 case Intrinsic::aarch64_sve_and_z: 812 case Intrinsic::aarch64_sve_bic_z: 813 case Intrinsic::aarch64_sve_eor_z: 814 case Intrinsic::aarch64_sve_nand_z: 815 case Intrinsic::aarch64_sve_nor_z: 816 case Intrinsic::aarch64_sve_orn_z: 817 case Intrinsic::aarch64_sve_orr_z: 818 break; 819 default: 820 return std::nullopt; 821 } 822 823 auto BinOpPred = BinOp->getOperand(0); 824 auto BinOpOp1 = BinOp->getOperand(1); 825 auto BinOpOp2 = BinOp->getOperand(2); 826 827 auto PredIntr = dyn_cast<IntrinsicInst>(BinOpPred); 828 if (!PredIntr || 829 PredIntr->getIntrinsicID() != Intrinsic::aarch64_sve_convert_to_svbool) 830 return std::nullopt; 831 832 auto PredOp = PredIntr->getOperand(0); 833 auto PredOpTy = cast<VectorType>(PredOp->getType()); 834 if (PredOpTy != II.getType()) 835 return std::nullopt; 836 837 SmallVector<Value *> NarrowedBinOpArgs = {PredOp}; 838 auto NarrowBinOpOp1 = IC.Builder.CreateIntrinsic( 839 Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp1}); 840 NarrowedBinOpArgs.push_back(NarrowBinOpOp1); 841 if (BinOpOp1 == BinOpOp2) 842 NarrowedBinOpArgs.push_back(NarrowBinOpOp1); 843 else 844 NarrowedBinOpArgs.push_back(IC.Builder.CreateIntrinsic( 845 Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp2})); 846 847 auto NarrowedBinOp = 848 IC.Builder.CreateIntrinsic(IntrinsicID, {PredOpTy}, NarrowedBinOpArgs); 849 return IC.replaceInstUsesWith(II, NarrowedBinOp); 850 } 851 852 static std::optional<Instruction *> 853 instCombineConvertFromSVBool(InstCombiner &IC, IntrinsicInst &II) { 854 // If the reinterpret instruction operand is a PHI Node 855 if (isa<PHINode>(II.getArgOperand(0))) 856 return processPhiNode(IC, II); 857 858 if (auto BinOpCombine = tryCombineFromSVBoolBinOp(IC, II)) 859 return BinOpCombine; 860 861 // Ignore converts to/from svcount_t. 862 if (isa<TargetExtType>(II.getArgOperand(0)->getType()) || 863 isa<TargetExtType>(II.getType())) 864 return std::nullopt; 865 866 SmallVector<Instruction *, 32> CandidatesForRemoval; 867 Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr; 868 869 const auto *IVTy = cast<VectorType>(II.getType()); 870 871 // Walk the chain of conversions. 872 while (Cursor) { 873 // If the type of the cursor has fewer lanes than the final result, zeroing 874 // must take place, which breaks the equivalence chain. 875 const auto *CursorVTy = cast<VectorType>(Cursor->getType()); 876 if (CursorVTy->getElementCount().getKnownMinValue() < 877 IVTy->getElementCount().getKnownMinValue()) 878 break; 879 880 // If the cursor has the same type as I, it is a viable replacement. 881 if (Cursor->getType() == IVTy) 882 EarliestReplacement = Cursor; 883 884 auto *IntrinsicCursor = dyn_cast<IntrinsicInst>(Cursor); 885 886 // If this is not an SVE conversion intrinsic, this is the end of the chain. 887 if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() == 888 Intrinsic::aarch64_sve_convert_to_svbool || 889 IntrinsicCursor->getIntrinsicID() == 890 Intrinsic::aarch64_sve_convert_from_svbool)) 891 break; 892 893 CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor); 894 Cursor = IntrinsicCursor->getOperand(0); 895 } 896 897 // If no viable replacement in the conversion chain was found, there is 898 // nothing to do. 899 if (!EarliestReplacement) 900 return std::nullopt; 901 902 return IC.replaceInstUsesWith(II, EarliestReplacement); 903 } 904 905 static bool isAllActivePredicate(Value *Pred) { 906 // Look through convert.from.svbool(convert.to.svbool(...) chain. 907 Value *UncastedPred; 908 if (match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_convert_from_svbool>( 909 m_Intrinsic<Intrinsic::aarch64_sve_convert_to_svbool>( 910 m_Value(UncastedPred))))) 911 // If the predicate has the same or less lanes than the uncasted 912 // predicate then we know the casting has no effect. 913 if (cast<ScalableVectorType>(Pred->getType())->getMinNumElements() <= 914 cast<ScalableVectorType>(UncastedPred->getType())->getMinNumElements()) 915 Pred = UncastedPred; 916 917 return match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>( 918 m_ConstantInt<AArch64SVEPredPattern::all>())); 919 } 920 921 static std::optional<Instruction *> instCombineSVESel(InstCombiner &IC, 922 IntrinsicInst &II) { 923 // svsel(ptrue, x, y) => x 924 auto *OpPredicate = II.getOperand(0); 925 if (isAllActivePredicate(OpPredicate)) 926 return IC.replaceInstUsesWith(II, II.getOperand(1)); 927 928 auto Select = 929 IC.Builder.CreateSelect(OpPredicate, II.getOperand(1), II.getOperand(2)); 930 return IC.replaceInstUsesWith(II, Select); 931 } 932 933 static std::optional<Instruction *> instCombineSVEDup(InstCombiner &IC, 934 IntrinsicInst &II) { 935 IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(1)); 936 if (!Pg) 937 return std::nullopt; 938 939 if (Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 940 return std::nullopt; 941 942 const auto PTruePattern = 943 cast<ConstantInt>(Pg->getOperand(0))->getZExtValue(); 944 if (PTruePattern != AArch64SVEPredPattern::vl1) 945 return std::nullopt; 946 947 // The intrinsic is inserting into lane zero so use an insert instead. 948 auto *IdxTy = Type::getInt64Ty(II.getContext()); 949 auto *Insert = InsertElementInst::Create( 950 II.getArgOperand(0), II.getArgOperand(2), ConstantInt::get(IdxTy, 0)); 951 Insert->insertBefore(&II); 952 Insert->takeName(&II); 953 954 return IC.replaceInstUsesWith(II, Insert); 955 } 956 957 static std::optional<Instruction *> instCombineSVEDupX(InstCombiner &IC, 958 IntrinsicInst &II) { 959 // Replace DupX with a regular IR splat. 960 auto *RetTy = cast<ScalableVectorType>(II.getType()); 961 Value *Splat = IC.Builder.CreateVectorSplat(RetTy->getElementCount(), 962 II.getArgOperand(0)); 963 Splat->takeName(&II); 964 return IC.replaceInstUsesWith(II, Splat); 965 } 966 967 static std::optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC, 968 IntrinsicInst &II) { 969 LLVMContext &Ctx = II.getContext(); 970 971 // Check that the predicate is all active 972 auto *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(0)); 973 if (!Pg || Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 974 return std::nullopt; 975 976 const auto PTruePattern = 977 cast<ConstantInt>(Pg->getOperand(0))->getZExtValue(); 978 if (PTruePattern != AArch64SVEPredPattern::all) 979 return std::nullopt; 980 981 // Check that we have a compare of zero.. 982 auto *SplatValue = 983 dyn_cast_or_null<ConstantInt>(getSplatValue(II.getArgOperand(2))); 984 if (!SplatValue || !SplatValue->isZero()) 985 return std::nullopt; 986 987 // ..against a dupq 988 auto *DupQLane = dyn_cast<IntrinsicInst>(II.getArgOperand(1)); 989 if (!DupQLane || 990 DupQLane->getIntrinsicID() != Intrinsic::aarch64_sve_dupq_lane) 991 return std::nullopt; 992 993 // Where the dupq is a lane 0 replicate of a vector insert 994 if (!cast<ConstantInt>(DupQLane->getArgOperand(1))->isZero()) 995 return std::nullopt; 996 997 auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0)); 998 if (!VecIns || VecIns->getIntrinsicID() != Intrinsic::vector_insert) 999 return std::nullopt; 1000 1001 // Where the vector insert is a fixed constant vector insert into undef at 1002 // index zero 1003 if (!isa<UndefValue>(VecIns->getArgOperand(0))) 1004 return std::nullopt; 1005 1006 if (!cast<ConstantInt>(VecIns->getArgOperand(2))->isZero()) 1007 return std::nullopt; 1008 1009 auto *ConstVec = dyn_cast<Constant>(VecIns->getArgOperand(1)); 1010 if (!ConstVec) 1011 return std::nullopt; 1012 1013 auto *VecTy = dyn_cast<FixedVectorType>(ConstVec->getType()); 1014 auto *OutTy = dyn_cast<ScalableVectorType>(II.getType()); 1015 if (!VecTy || !OutTy || VecTy->getNumElements() != OutTy->getMinNumElements()) 1016 return std::nullopt; 1017 1018 unsigned NumElts = VecTy->getNumElements(); 1019 unsigned PredicateBits = 0; 1020 1021 // Expand intrinsic operands to a 16-bit byte level predicate 1022 for (unsigned I = 0; I < NumElts; ++I) { 1023 auto *Arg = dyn_cast<ConstantInt>(ConstVec->getAggregateElement(I)); 1024 if (!Arg) 1025 return std::nullopt; 1026 if (!Arg->isZero()) 1027 PredicateBits |= 1 << (I * (16 / NumElts)); 1028 } 1029 1030 // If all bits are zero bail early with an empty predicate 1031 if (PredicateBits == 0) { 1032 auto *PFalse = Constant::getNullValue(II.getType()); 1033 PFalse->takeName(&II); 1034 return IC.replaceInstUsesWith(II, PFalse); 1035 } 1036 1037 // Calculate largest predicate type used (where byte predicate is largest) 1038 unsigned Mask = 8; 1039 for (unsigned I = 0; I < 16; ++I) 1040 if ((PredicateBits & (1 << I)) != 0) 1041 Mask |= (I % 8); 1042 1043 unsigned PredSize = Mask & -Mask; 1044 auto *PredType = ScalableVectorType::get( 1045 Type::getInt1Ty(Ctx), AArch64::SVEBitsPerBlock / (PredSize * 8)); 1046 1047 // Ensure all relevant bits are set 1048 for (unsigned I = 0; I < 16; I += PredSize) 1049 if ((PredicateBits & (1 << I)) == 0) 1050 return std::nullopt; 1051 1052 auto *PTruePat = 1053 ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all); 1054 auto *PTrue = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, 1055 {PredType}, {PTruePat}); 1056 auto *ConvertToSVBool = IC.Builder.CreateIntrinsic( 1057 Intrinsic::aarch64_sve_convert_to_svbool, {PredType}, {PTrue}); 1058 auto *ConvertFromSVBool = 1059 IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool, 1060 {II.getType()}, {ConvertToSVBool}); 1061 1062 ConvertFromSVBool->takeName(&II); 1063 return IC.replaceInstUsesWith(II, ConvertFromSVBool); 1064 } 1065 1066 static std::optional<Instruction *> instCombineSVELast(InstCombiner &IC, 1067 IntrinsicInst &II) { 1068 Value *Pg = II.getArgOperand(0); 1069 Value *Vec = II.getArgOperand(1); 1070 auto IntrinsicID = II.getIntrinsicID(); 1071 bool IsAfter = IntrinsicID == Intrinsic::aarch64_sve_lasta; 1072 1073 // lastX(splat(X)) --> X 1074 if (auto *SplatVal = getSplatValue(Vec)) 1075 return IC.replaceInstUsesWith(II, SplatVal); 1076 1077 // If x and/or y is a splat value then: 1078 // lastX (binop (x, y)) --> binop(lastX(x), lastX(y)) 1079 Value *LHS, *RHS; 1080 if (match(Vec, m_OneUse(m_BinOp(m_Value(LHS), m_Value(RHS))))) { 1081 if (isSplatValue(LHS) || isSplatValue(RHS)) { 1082 auto *OldBinOp = cast<BinaryOperator>(Vec); 1083 auto OpC = OldBinOp->getOpcode(); 1084 auto *NewLHS = 1085 IC.Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, LHS}); 1086 auto *NewRHS = 1087 IC.Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, RHS}); 1088 auto *NewBinOp = BinaryOperator::CreateWithCopiedFlags( 1089 OpC, NewLHS, NewRHS, OldBinOp, OldBinOp->getName(), &II); 1090 return IC.replaceInstUsesWith(II, NewBinOp); 1091 } 1092 } 1093 1094 auto *C = dyn_cast<Constant>(Pg); 1095 if (IsAfter && C && C->isNullValue()) { 1096 // The intrinsic is extracting lane 0 so use an extract instead. 1097 auto *IdxTy = Type::getInt64Ty(II.getContext()); 1098 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0)); 1099 Extract->insertBefore(&II); 1100 Extract->takeName(&II); 1101 return IC.replaceInstUsesWith(II, Extract); 1102 } 1103 1104 auto *IntrPG = dyn_cast<IntrinsicInst>(Pg); 1105 if (!IntrPG) 1106 return std::nullopt; 1107 1108 if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 1109 return std::nullopt; 1110 1111 const auto PTruePattern = 1112 cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue(); 1113 1114 // Can the intrinsic's predicate be converted to a known constant index? 1115 unsigned MinNumElts = getNumElementsFromSVEPredPattern(PTruePattern); 1116 if (!MinNumElts) 1117 return std::nullopt; 1118 1119 unsigned Idx = MinNumElts - 1; 1120 // Increment the index if extracting the element after the last active 1121 // predicate element. 1122 if (IsAfter) 1123 ++Idx; 1124 1125 // Ignore extracts whose index is larger than the known minimum vector 1126 // length. NOTE: This is an artificial constraint where we prefer to 1127 // maintain what the user asked for until an alternative is proven faster. 1128 auto *PgVTy = cast<ScalableVectorType>(Pg->getType()); 1129 if (Idx >= PgVTy->getMinNumElements()) 1130 return std::nullopt; 1131 1132 // The intrinsic is extracting a fixed lane so use an extract instead. 1133 auto *IdxTy = Type::getInt64Ty(II.getContext()); 1134 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx)); 1135 Extract->insertBefore(&II); 1136 Extract->takeName(&II); 1137 return IC.replaceInstUsesWith(II, Extract); 1138 } 1139 1140 static std::optional<Instruction *> instCombineSVECondLast(InstCombiner &IC, 1141 IntrinsicInst &II) { 1142 // The SIMD&FP variant of CLAST[AB] is significantly faster than the scalar 1143 // integer variant across a variety of micro-architectures. Replace scalar 1144 // integer CLAST[AB] intrinsic with optimal SIMD&FP variant. A simple 1145 // bitcast-to-fp + clast[ab] + bitcast-to-int will cost a cycle or two more 1146 // depending on the micro-architecture, but has been observed as generally 1147 // being faster, particularly when the CLAST[AB] op is a loop-carried 1148 // dependency. 1149 Value *Pg = II.getArgOperand(0); 1150 Value *Fallback = II.getArgOperand(1); 1151 Value *Vec = II.getArgOperand(2); 1152 Type *Ty = II.getType(); 1153 1154 if (!Ty->isIntegerTy()) 1155 return std::nullopt; 1156 1157 Type *FPTy; 1158 switch (cast<IntegerType>(Ty)->getBitWidth()) { 1159 default: 1160 return std::nullopt; 1161 case 16: 1162 FPTy = IC.Builder.getHalfTy(); 1163 break; 1164 case 32: 1165 FPTy = IC.Builder.getFloatTy(); 1166 break; 1167 case 64: 1168 FPTy = IC.Builder.getDoubleTy(); 1169 break; 1170 } 1171 1172 Value *FPFallBack = IC.Builder.CreateBitCast(Fallback, FPTy); 1173 auto *FPVTy = VectorType::get( 1174 FPTy, cast<VectorType>(Vec->getType())->getElementCount()); 1175 Value *FPVec = IC.Builder.CreateBitCast(Vec, FPVTy); 1176 auto *FPII = IC.Builder.CreateIntrinsic( 1177 II.getIntrinsicID(), {FPVec->getType()}, {Pg, FPFallBack, FPVec}); 1178 Value *FPIItoInt = IC.Builder.CreateBitCast(FPII, II.getType()); 1179 return IC.replaceInstUsesWith(II, FPIItoInt); 1180 } 1181 1182 static std::optional<Instruction *> instCombineRDFFR(InstCombiner &IC, 1183 IntrinsicInst &II) { 1184 LLVMContext &Ctx = II.getContext(); 1185 // Replace rdffr with predicated rdffr.z intrinsic, so that optimizePTestInstr 1186 // can work with RDFFR_PP for ptest elimination. 1187 auto *AllPat = 1188 ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all); 1189 auto *PTrue = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, 1190 {II.getType()}, {AllPat}); 1191 auto *RDFFR = 1192 IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {}, {PTrue}); 1193 RDFFR->takeName(&II); 1194 return IC.replaceInstUsesWith(II, RDFFR); 1195 } 1196 1197 static std::optional<Instruction *> 1198 instCombineSVECntElts(InstCombiner &IC, IntrinsicInst &II, unsigned NumElts) { 1199 const auto Pattern = cast<ConstantInt>(II.getArgOperand(0))->getZExtValue(); 1200 1201 if (Pattern == AArch64SVEPredPattern::all) { 1202 Constant *StepVal = ConstantInt::get(II.getType(), NumElts); 1203 auto *VScale = IC.Builder.CreateVScale(StepVal); 1204 VScale->takeName(&II); 1205 return IC.replaceInstUsesWith(II, VScale); 1206 } 1207 1208 unsigned MinNumElts = getNumElementsFromSVEPredPattern(Pattern); 1209 1210 return MinNumElts && NumElts >= MinNumElts 1211 ? std::optional<Instruction *>(IC.replaceInstUsesWith( 1212 II, ConstantInt::get(II.getType(), MinNumElts))) 1213 : std::nullopt; 1214 } 1215 1216 static std::optional<Instruction *> instCombineSVEPTest(InstCombiner &IC, 1217 IntrinsicInst &II) { 1218 Value *PgVal = II.getArgOperand(0); 1219 Value *OpVal = II.getArgOperand(1); 1220 1221 // PTEST_<FIRST|LAST>(X, X) is equivalent to PTEST_ANY(X, X). 1222 // Later optimizations prefer this form. 1223 if (PgVal == OpVal && 1224 (II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_first || 1225 II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_last)) { 1226 Value *Ops[] = {PgVal, OpVal}; 1227 Type *Tys[] = {PgVal->getType()}; 1228 1229 auto *PTest = 1230 IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptest_any, Tys, Ops); 1231 PTest->takeName(&II); 1232 1233 return IC.replaceInstUsesWith(II, PTest); 1234 } 1235 1236 IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(PgVal); 1237 IntrinsicInst *Op = dyn_cast<IntrinsicInst>(OpVal); 1238 1239 if (!Pg || !Op) 1240 return std::nullopt; 1241 1242 Intrinsic::ID OpIID = Op->getIntrinsicID(); 1243 1244 if (Pg->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool && 1245 OpIID == Intrinsic::aarch64_sve_convert_to_svbool && 1246 Pg->getArgOperand(0)->getType() == Op->getArgOperand(0)->getType()) { 1247 Value *Ops[] = {Pg->getArgOperand(0), Op->getArgOperand(0)}; 1248 Type *Tys[] = {Pg->getArgOperand(0)->getType()}; 1249 1250 auto *PTest = IC.Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops); 1251 1252 PTest->takeName(&II); 1253 return IC.replaceInstUsesWith(II, PTest); 1254 } 1255 1256 // Transform PTEST_ANY(X=OP(PG,...), X) -> PTEST_ANY(PG, X)). 1257 // Later optimizations may rewrite sequence to use the flag-setting variant 1258 // of instruction X to remove PTEST. 1259 if ((Pg == Op) && (II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_any) && 1260 ((OpIID == Intrinsic::aarch64_sve_brka_z) || 1261 (OpIID == Intrinsic::aarch64_sve_brkb_z) || 1262 (OpIID == Intrinsic::aarch64_sve_brkpa_z) || 1263 (OpIID == Intrinsic::aarch64_sve_brkpb_z) || 1264 (OpIID == Intrinsic::aarch64_sve_rdffr_z) || 1265 (OpIID == Intrinsic::aarch64_sve_and_z) || 1266 (OpIID == Intrinsic::aarch64_sve_bic_z) || 1267 (OpIID == Intrinsic::aarch64_sve_eor_z) || 1268 (OpIID == Intrinsic::aarch64_sve_nand_z) || 1269 (OpIID == Intrinsic::aarch64_sve_nor_z) || 1270 (OpIID == Intrinsic::aarch64_sve_orn_z) || 1271 (OpIID == Intrinsic::aarch64_sve_orr_z))) { 1272 Value *Ops[] = {Pg->getArgOperand(0), Pg}; 1273 Type *Tys[] = {Pg->getType()}; 1274 1275 auto *PTest = IC.Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops); 1276 PTest->takeName(&II); 1277 1278 return IC.replaceInstUsesWith(II, PTest); 1279 } 1280 1281 return std::nullopt; 1282 } 1283 1284 template <Intrinsic::ID MulOpc, typename Intrinsic::ID FuseOpc> 1285 static std::optional<Instruction *> 1286 instCombineSVEVectorFuseMulAddSub(InstCombiner &IC, IntrinsicInst &II, 1287 bool MergeIntoAddendOp) { 1288 Value *P = II.getOperand(0); 1289 Value *MulOp0, *MulOp1, *AddendOp, *Mul; 1290 if (MergeIntoAddendOp) { 1291 AddendOp = II.getOperand(1); 1292 Mul = II.getOperand(2); 1293 } else { 1294 AddendOp = II.getOperand(2); 1295 Mul = II.getOperand(1); 1296 } 1297 1298 if (!match(Mul, m_Intrinsic<MulOpc>(m_Specific(P), m_Value(MulOp0), 1299 m_Value(MulOp1)))) 1300 return std::nullopt; 1301 1302 if (!Mul->hasOneUse()) 1303 return std::nullopt; 1304 1305 Instruction *FMFSource = nullptr; 1306 if (II.getType()->isFPOrFPVectorTy()) { 1307 llvm::FastMathFlags FAddFlags = II.getFastMathFlags(); 1308 // Stop the combine when the flags on the inputs differ in case dropping 1309 // flags would lead to us missing out on more beneficial optimizations. 1310 if (FAddFlags != cast<CallInst>(Mul)->getFastMathFlags()) 1311 return std::nullopt; 1312 if (!FAddFlags.allowContract()) 1313 return std::nullopt; 1314 FMFSource = &II; 1315 } 1316 1317 CallInst *Res; 1318 if (MergeIntoAddendOp) 1319 Res = IC.Builder.CreateIntrinsic(FuseOpc, {II.getType()}, 1320 {P, AddendOp, MulOp0, MulOp1}, FMFSource); 1321 else 1322 Res = IC.Builder.CreateIntrinsic(FuseOpc, {II.getType()}, 1323 {P, MulOp0, MulOp1, AddendOp}, FMFSource); 1324 1325 return IC.replaceInstUsesWith(II, Res); 1326 } 1327 1328 static std::optional<Instruction *> 1329 instCombineSVELD1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) { 1330 Value *Pred = II.getOperand(0); 1331 Value *PtrOp = II.getOperand(1); 1332 Type *VecTy = II.getType(); 1333 1334 if (isAllActivePredicate(Pred)) { 1335 LoadInst *Load = IC.Builder.CreateLoad(VecTy, PtrOp); 1336 Load->copyMetadata(II); 1337 return IC.replaceInstUsesWith(II, Load); 1338 } 1339 1340 CallInst *MaskedLoad = 1341 IC.Builder.CreateMaskedLoad(VecTy, PtrOp, PtrOp->getPointerAlignment(DL), 1342 Pred, ConstantAggregateZero::get(VecTy)); 1343 MaskedLoad->copyMetadata(II); 1344 return IC.replaceInstUsesWith(II, MaskedLoad); 1345 } 1346 1347 static std::optional<Instruction *> 1348 instCombineSVEST1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) { 1349 Value *VecOp = II.getOperand(0); 1350 Value *Pred = II.getOperand(1); 1351 Value *PtrOp = II.getOperand(2); 1352 1353 if (isAllActivePredicate(Pred)) { 1354 StoreInst *Store = IC.Builder.CreateStore(VecOp, PtrOp); 1355 Store->copyMetadata(II); 1356 return IC.eraseInstFromFunction(II); 1357 } 1358 1359 CallInst *MaskedStore = IC.Builder.CreateMaskedStore( 1360 VecOp, PtrOp, PtrOp->getPointerAlignment(DL), Pred); 1361 MaskedStore->copyMetadata(II); 1362 return IC.eraseInstFromFunction(II); 1363 } 1364 1365 static Instruction::BinaryOps intrinsicIDToBinOpCode(unsigned Intrinsic) { 1366 switch (Intrinsic) { 1367 case Intrinsic::aarch64_sve_fmul_u: 1368 return Instruction::BinaryOps::FMul; 1369 case Intrinsic::aarch64_sve_fadd_u: 1370 return Instruction::BinaryOps::FAdd; 1371 case Intrinsic::aarch64_sve_fsub_u: 1372 return Instruction::BinaryOps::FSub; 1373 default: 1374 return Instruction::BinaryOpsEnd; 1375 } 1376 } 1377 1378 static std::optional<Instruction *> 1379 instCombineSVEVectorBinOp(InstCombiner &IC, IntrinsicInst &II) { 1380 // Bail due to missing support for ISD::STRICT_ scalable vector operations. 1381 if (II.isStrictFP()) 1382 return std::nullopt; 1383 1384 auto *OpPredicate = II.getOperand(0); 1385 auto BinOpCode = intrinsicIDToBinOpCode(II.getIntrinsicID()); 1386 if (BinOpCode == Instruction::BinaryOpsEnd || 1387 !match(OpPredicate, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>( 1388 m_ConstantInt<AArch64SVEPredPattern::all>()))) 1389 return std::nullopt; 1390 IRBuilderBase::FastMathFlagGuard FMFGuard(IC.Builder); 1391 IC.Builder.setFastMathFlags(II.getFastMathFlags()); 1392 auto BinOp = 1393 IC.Builder.CreateBinOp(BinOpCode, II.getOperand(1), II.getOperand(2)); 1394 return IC.replaceInstUsesWith(II, BinOp); 1395 } 1396 1397 // Canonicalise operations that take an all active predicate (e.g. sve.add -> 1398 // sve.add_u). 1399 static std::optional<Instruction *> instCombineSVEAllActive(IntrinsicInst &II, 1400 Intrinsic::ID IID) { 1401 auto *OpPredicate = II.getOperand(0); 1402 if (!match(OpPredicate, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>( 1403 m_ConstantInt<AArch64SVEPredPattern::all>()))) 1404 return std::nullopt; 1405 1406 auto *Mod = II.getModule(); 1407 auto *NewDecl = Intrinsic::getDeclaration(Mod, IID, {II.getType()}); 1408 II.setCalledFunction(NewDecl); 1409 1410 return &II; 1411 } 1412 1413 // Simplify operations where predicate has all inactive lanes or try to replace 1414 // with _u form when all lanes are active 1415 static std::optional<Instruction *> 1416 instCombineSVEAllOrNoActive(InstCombiner &IC, IntrinsicInst &II, 1417 Intrinsic::ID IID) { 1418 if (match(II.getOperand(0), m_ZeroInt())) { 1419 // llvm_ir, pred(0), op1, op2 - Spec says to return op1 when all lanes are 1420 // inactive for sv[func]_m 1421 return IC.replaceInstUsesWith(II, II.getOperand(1)); 1422 } 1423 return instCombineSVEAllActive(II, IID); 1424 } 1425 1426 static std::optional<Instruction *> instCombineSVEVectorAdd(InstCombiner &IC, 1427 IntrinsicInst &II) { 1428 if (auto II_U = 1429 instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_add_u)) 1430 return II_U; 1431 if (auto MLA = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul, 1432 Intrinsic::aarch64_sve_mla>( 1433 IC, II, true)) 1434 return MLA; 1435 if (auto MAD = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul, 1436 Intrinsic::aarch64_sve_mad>( 1437 IC, II, false)) 1438 return MAD; 1439 return std::nullopt; 1440 } 1441 1442 static std::optional<Instruction *> 1443 instCombineSVEVectorFAdd(InstCombiner &IC, IntrinsicInst &II) { 1444 if (auto II_U = 1445 instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fadd_u)) 1446 return II_U; 1447 if (auto FMLA = 1448 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul, 1449 Intrinsic::aarch64_sve_fmla>(IC, II, 1450 true)) 1451 return FMLA; 1452 if (auto FMAD = 1453 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul, 1454 Intrinsic::aarch64_sve_fmad>(IC, II, 1455 false)) 1456 return FMAD; 1457 if (auto FMLA = 1458 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u, 1459 Intrinsic::aarch64_sve_fmla>(IC, II, 1460 true)) 1461 return FMLA; 1462 return std::nullopt; 1463 } 1464 1465 static std::optional<Instruction *> 1466 instCombineSVEVectorFAddU(InstCombiner &IC, IntrinsicInst &II) { 1467 if (auto FMLA = 1468 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul, 1469 Intrinsic::aarch64_sve_fmla>(IC, II, 1470 true)) 1471 return FMLA; 1472 if (auto FMAD = 1473 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul, 1474 Intrinsic::aarch64_sve_fmad>(IC, II, 1475 false)) 1476 return FMAD; 1477 if (auto FMLA_U = 1478 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u, 1479 Intrinsic::aarch64_sve_fmla_u>( 1480 IC, II, true)) 1481 return FMLA_U; 1482 return instCombineSVEVectorBinOp(IC, II); 1483 } 1484 1485 static std::optional<Instruction *> 1486 instCombineSVEVectorFSub(InstCombiner &IC, IntrinsicInst &II) { 1487 if (auto II_U = 1488 instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fsub_u)) 1489 return II_U; 1490 if (auto FMLS = 1491 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul, 1492 Intrinsic::aarch64_sve_fmls>(IC, II, 1493 true)) 1494 return FMLS; 1495 if (auto FMSB = 1496 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul, 1497 Intrinsic::aarch64_sve_fnmsb>( 1498 IC, II, false)) 1499 return FMSB; 1500 if (auto FMLS = 1501 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u, 1502 Intrinsic::aarch64_sve_fmls>(IC, II, 1503 true)) 1504 return FMLS; 1505 return std::nullopt; 1506 } 1507 1508 static std::optional<Instruction *> 1509 instCombineSVEVectorFSubU(InstCombiner &IC, IntrinsicInst &II) { 1510 if (auto FMLS = 1511 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul, 1512 Intrinsic::aarch64_sve_fmls>(IC, II, 1513 true)) 1514 return FMLS; 1515 if (auto FMSB = 1516 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul, 1517 Intrinsic::aarch64_sve_fnmsb>( 1518 IC, II, false)) 1519 return FMSB; 1520 if (auto FMLS_U = 1521 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u, 1522 Intrinsic::aarch64_sve_fmls_u>( 1523 IC, II, true)) 1524 return FMLS_U; 1525 return instCombineSVEVectorBinOp(IC, II); 1526 } 1527 1528 static std::optional<Instruction *> instCombineSVEVectorSub(InstCombiner &IC, 1529 IntrinsicInst &II) { 1530 if (auto II_U = 1531 instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_sub_u)) 1532 return II_U; 1533 if (auto MLS = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul, 1534 Intrinsic::aarch64_sve_mls>( 1535 IC, II, true)) 1536 return MLS; 1537 return std::nullopt; 1538 } 1539 1540 static std::optional<Instruction *> instCombineSVEVectorMul(InstCombiner &IC, 1541 IntrinsicInst &II, 1542 Intrinsic::ID IID) { 1543 auto *OpPredicate = II.getOperand(0); 1544 auto *OpMultiplicand = II.getOperand(1); 1545 auto *OpMultiplier = II.getOperand(2); 1546 1547 // Return true if a given instruction is a unit splat value, false otherwise. 1548 auto IsUnitSplat = [](auto *I) { 1549 auto *SplatValue = getSplatValue(I); 1550 if (!SplatValue) 1551 return false; 1552 return match(SplatValue, m_FPOne()) || match(SplatValue, m_One()); 1553 }; 1554 1555 // Return true if a given instruction is an aarch64_sve_dup intrinsic call 1556 // with a unit splat value, false otherwise. 1557 auto IsUnitDup = [](auto *I) { 1558 auto *IntrI = dyn_cast<IntrinsicInst>(I); 1559 if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup) 1560 return false; 1561 1562 auto *SplatValue = IntrI->getOperand(2); 1563 return match(SplatValue, m_FPOne()) || match(SplatValue, m_One()); 1564 }; 1565 1566 if (IsUnitSplat(OpMultiplier)) { 1567 // [f]mul pg %n, (dupx 1) => %n 1568 OpMultiplicand->takeName(&II); 1569 return IC.replaceInstUsesWith(II, OpMultiplicand); 1570 } else if (IsUnitDup(OpMultiplier)) { 1571 // [f]mul pg %n, (dup pg 1) => %n 1572 auto *DupInst = cast<IntrinsicInst>(OpMultiplier); 1573 auto *DupPg = DupInst->getOperand(1); 1574 // TODO: this is naive. The optimization is still valid if DupPg 1575 // 'encompasses' OpPredicate, not only if they're the same predicate. 1576 if (OpPredicate == DupPg) { 1577 OpMultiplicand->takeName(&II); 1578 return IC.replaceInstUsesWith(II, OpMultiplicand); 1579 } 1580 } 1581 1582 return instCombineSVEVectorBinOp(IC, II); 1583 } 1584 1585 static std::optional<Instruction *> instCombineSVEUnpack(InstCombiner &IC, 1586 IntrinsicInst &II) { 1587 Value *UnpackArg = II.getArgOperand(0); 1588 auto *RetTy = cast<ScalableVectorType>(II.getType()); 1589 bool IsSigned = II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpkhi || 1590 II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpklo; 1591 1592 // Hi = uunpkhi(splat(X)) --> Hi = splat(extend(X)) 1593 // Lo = uunpklo(splat(X)) --> Lo = splat(extend(X)) 1594 if (auto *ScalarArg = getSplatValue(UnpackArg)) { 1595 ScalarArg = 1596 IC.Builder.CreateIntCast(ScalarArg, RetTy->getScalarType(), IsSigned); 1597 Value *NewVal = 1598 IC.Builder.CreateVectorSplat(RetTy->getElementCount(), ScalarArg); 1599 NewVal->takeName(&II); 1600 return IC.replaceInstUsesWith(II, NewVal); 1601 } 1602 1603 return std::nullopt; 1604 } 1605 static std::optional<Instruction *> instCombineSVETBL(InstCombiner &IC, 1606 IntrinsicInst &II) { 1607 auto *OpVal = II.getOperand(0); 1608 auto *OpIndices = II.getOperand(1); 1609 VectorType *VTy = cast<VectorType>(II.getType()); 1610 1611 // Check whether OpIndices is a constant splat value < minimal element count 1612 // of result. 1613 auto *SplatValue = dyn_cast_or_null<ConstantInt>(getSplatValue(OpIndices)); 1614 if (!SplatValue || 1615 SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue())) 1616 return std::nullopt; 1617 1618 // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to 1619 // splat_vector(extractelement(OpVal, SplatValue)) for further optimization. 1620 auto *Extract = IC.Builder.CreateExtractElement(OpVal, SplatValue); 1621 auto *VectorSplat = 1622 IC.Builder.CreateVectorSplat(VTy->getElementCount(), Extract); 1623 1624 VectorSplat->takeName(&II); 1625 return IC.replaceInstUsesWith(II, VectorSplat); 1626 } 1627 1628 static std::optional<Instruction *> instCombineSVEZip(InstCombiner &IC, 1629 IntrinsicInst &II) { 1630 // zip1(uzp1(A, B), uzp2(A, B)) --> A 1631 // zip2(uzp1(A, B), uzp2(A, B)) --> B 1632 Value *A, *B; 1633 if (match(II.getArgOperand(0), 1634 m_Intrinsic<Intrinsic::aarch64_sve_uzp1>(m_Value(A), m_Value(B))) && 1635 match(II.getArgOperand(1), m_Intrinsic<Intrinsic::aarch64_sve_uzp2>( 1636 m_Specific(A), m_Specific(B)))) 1637 return IC.replaceInstUsesWith( 1638 II, (II.getIntrinsicID() == Intrinsic::aarch64_sve_zip1 ? A : B)); 1639 1640 return std::nullopt; 1641 } 1642 1643 static std::optional<Instruction *> 1644 instCombineLD1GatherIndex(InstCombiner &IC, IntrinsicInst &II) { 1645 Value *Mask = II.getOperand(0); 1646 Value *BasePtr = II.getOperand(1); 1647 Value *Index = II.getOperand(2); 1648 Type *Ty = II.getType(); 1649 Value *PassThru = ConstantAggregateZero::get(Ty); 1650 1651 // Contiguous gather => masked load. 1652 // (sve.ld1.gather.index Mask BasePtr (sve.index IndexBase 1)) 1653 // => (masked.load (gep BasePtr IndexBase) Align Mask zeroinitializer) 1654 Value *IndexBase; 1655 if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>( 1656 m_Value(IndexBase), m_SpecificInt(1)))) { 1657 Align Alignment = 1658 BasePtr->getPointerAlignment(II.getModule()->getDataLayout()); 1659 1660 Type *VecPtrTy = PointerType::getUnqual(Ty); 1661 Value *Ptr = IC.Builder.CreateGEP(cast<VectorType>(Ty)->getElementType(), 1662 BasePtr, IndexBase); 1663 Ptr = IC.Builder.CreateBitCast(Ptr, VecPtrTy); 1664 CallInst *MaskedLoad = 1665 IC.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, Mask, PassThru); 1666 MaskedLoad->takeName(&II); 1667 return IC.replaceInstUsesWith(II, MaskedLoad); 1668 } 1669 1670 return std::nullopt; 1671 } 1672 1673 static std::optional<Instruction *> 1674 instCombineST1ScatterIndex(InstCombiner &IC, IntrinsicInst &II) { 1675 Value *Val = II.getOperand(0); 1676 Value *Mask = II.getOperand(1); 1677 Value *BasePtr = II.getOperand(2); 1678 Value *Index = II.getOperand(3); 1679 Type *Ty = Val->getType(); 1680 1681 // Contiguous scatter => masked store. 1682 // (sve.st1.scatter.index Value Mask BasePtr (sve.index IndexBase 1)) 1683 // => (masked.store Value (gep BasePtr IndexBase) Align Mask) 1684 Value *IndexBase; 1685 if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>( 1686 m_Value(IndexBase), m_SpecificInt(1)))) { 1687 Align Alignment = 1688 BasePtr->getPointerAlignment(II.getModule()->getDataLayout()); 1689 1690 Value *Ptr = IC.Builder.CreateGEP(cast<VectorType>(Ty)->getElementType(), 1691 BasePtr, IndexBase); 1692 Type *VecPtrTy = PointerType::getUnqual(Ty); 1693 Ptr = IC.Builder.CreateBitCast(Ptr, VecPtrTy); 1694 1695 (void)IC.Builder.CreateMaskedStore(Val, Ptr, Alignment, Mask); 1696 1697 return IC.eraseInstFromFunction(II); 1698 } 1699 1700 return std::nullopt; 1701 } 1702 1703 static std::optional<Instruction *> instCombineSVESDIV(InstCombiner &IC, 1704 IntrinsicInst &II) { 1705 Type *Int32Ty = IC.Builder.getInt32Ty(); 1706 Value *Pred = II.getOperand(0); 1707 Value *Vec = II.getOperand(1); 1708 Value *DivVec = II.getOperand(2); 1709 1710 Value *SplatValue = getSplatValue(DivVec); 1711 ConstantInt *SplatConstantInt = dyn_cast_or_null<ConstantInt>(SplatValue); 1712 if (!SplatConstantInt) 1713 return std::nullopt; 1714 APInt Divisor = SplatConstantInt->getValue(); 1715 1716 if (Divisor.isPowerOf2()) { 1717 Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2()); 1718 auto ASRD = IC.Builder.CreateIntrinsic( 1719 Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2}); 1720 return IC.replaceInstUsesWith(II, ASRD); 1721 } 1722 if (Divisor.isNegatedPowerOf2()) { 1723 Divisor.negate(); 1724 Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2()); 1725 auto ASRD = IC.Builder.CreateIntrinsic( 1726 Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2}); 1727 auto NEG = IC.Builder.CreateIntrinsic( 1728 Intrinsic::aarch64_sve_neg, {ASRD->getType()}, {ASRD, Pred, ASRD}); 1729 return IC.replaceInstUsesWith(II, NEG); 1730 } 1731 1732 return std::nullopt; 1733 } 1734 1735 bool SimplifyValuePattern(SmallVector<Value *> &Vec, bool AllowPoison) { 1736 size_t VecSize = Vec.size(); 1737 if (VecSize == 1) 1738 return true; 1739 if (!isPowerOf2_64(VecSize)) 1740 return false; 1741 size_t HalfVecSize = VecSize / 2; 1742 1743 for (auto LHS = Vec.begin(), RHS = Vec.begin() + HalfVecSize; 1744 RHS != Vec.end(); LHS++, RHS++) { 1745 if (*LHS != nullptr && *RHS != nullptr) { 1746 if (*LHS == *RHS) 1747 continue; 1748 else 1749 return false; 1750 } 1751 if (!AllowPoison) 1752 return false; 1753 if (*LHS == nullptr && *RHS != nullptr) 1754 *LHS = *RHS; 1755 } 1756 1757 Vec.resize(HalfVecSize); 1758 SimplifyValuePattern(Vec, AllowPoison); 1759 return true; 1760 } 1761 1762 // Try to simplify dupqlane patterns like dupqlane(f32 A, f32 B, f32 A, f32 B) 1763 // to dupqlane(f64(C)) where C is A concatenated with B 1764 static std::optional<Instruction *> instCombineSVEDupqLane(InstCombiner &IC, 1765 IntrinsicInst &II) { 1766 Value *CurrentInsertElt = nullptr, *Default = nullptr; 1767 if (!match(II.getOperand(0), 1768 m_Intrinsic<Intrinsic::vector_insert>( 1769 m_Value(Default), m_Value(CurrentInsertElt), m_Value())) || 1770 !isa<FixedVectorType>(CurrentInsertElt->getType())) 1771 return std::nullopt; 1772 auto IIScalableTy = cast<ScalableVectorType>(II.getType()); 1773 1774 // Insert the scalars into a container ordered by InsertElement index 1775 SmallVector<Value *> Elts(IIScalableTy->getMinNumElements(), nullptr); 1776 while (auto InsertElt = dyn_cast<InsertElementInst>(CurrentInsertElt)) { 1777 auto Idx = cast<ConstantInt>(InsertElt->getOperand(2)); 1778 Elts[Idx->getValue().getZExtValue()] = InsertElt->getOperand(1); 1779 CurrentInsertElt = InsertElt->getOperand(0); 1780 } 1781 1782 bool AllowPoison = 1783 isa<PoisonValue>(CurrentInsertElt) && isa<PoisonValue>(Default); 1784 if (!SimplifyValuePattern(Elts, AllowPoison)) 1785 return std::nullopt; 1786 1787 // Rebuild the simplified chain of InsertElements. e.g. (a, b, a, b) as (a, b) 1788 Value *InsertEltChain = PoisonValue::get(CurrentInsertElt->getType()); 1789 for (size_t I = 0; I < Elts.size(); I++) { 1790 if (Elts[I] == nullptr) 1791 continue; 1792 InsertEltChain = IC.Builder.CreateInsertElement(InsertEltChain, Elts[I], 1793 IC.Builder.getInt64(I)); 1794 } 1795 if (InsertEltChain == nullptr) 1796 return std::nullopt; 1797 1798 // Splat the simplified sequence, e.g. (f16 a, f16 b, f16 c, f16 d) as one i64 1799 // value or (f16 a, f16 b) as one i32 value. This requires an InsertSubvector 1800 // be bitcast to a type wide enough to fit the sequence, be splatted, and then 1801 // be narrowed back to the original type. 1802 unsigned PatternWidth = IIScalableTy->getScalarSizeInBits() * Elts.size(); 1803 unsigned PatternElementCount = IIScalableTy->getScalarSizeInBits() * 1804 IIScalableTy->getMinNumElements() / 1805 PatternWidth; 1806 1807 IntegerType *WideTy = IC.Builder.getIntNTy(PatternWidth); 1808 auto *WideScalableTy = ScalableVectorType::get(WideTy, PatternElementCount); 1809 auto *WideShuffleMaskTy = 1810 ScalableVectorType::get(IC.Builder.getInt32Ty(), PatternElementCount); 1811 1812 auto ZeroIdx = ConstantInt::get(IC.Builder.getInt64Ty(), APInt(64, 0)); 1813 auto InsertSubvector = IC.Builder.CreateInsertVector( 1814 II.getType(), PoisonValue::get(II.getType()), InsertEltChain, ZeroIdx); 1815 auto WideBitcast = 1816 IC.Builder.CreateBitOrPointerCast(InsertSubvector, WideScalableTy); 1817 auto WideShuffleMask = ConstantAggregateZero::get(WideShuffleMaskTy); 1818 auto WideShuffle = IC.Builder.CreateShuffleVector( 1819 WideBitcast, PoisonValue::get(WideScalableTy), WideShuffleMask); 1820 auto NarrowBitcast = 1821 IC.Builder.CreateBitOrPointerCast(WideShuffle, II.getType()); 1822 1823 return IC.replaceInstUsesWith(II, NarrowBitcast); 1824 } 1825 1826 static std::optional<Instruction *> instCombineMaxMinNM(InstCombiner &IC, 1827 IntrinsicInst &II) { 1828 Value *A = II.getArgOperand(0); 1829 Value *B = II.getArgOperand(1); 1830 if (A == B) 1831 return IC.replaceInstUsesWith(II, A); 1832 1833 return std::nullopt; 1834 } 1835 1836 static std::optional<Instruction *> instCombineSVESrshl(InstCombiner &IC, 1837 IntrinsicInst &II) { 1838 Value *Pred = II.getOperand(0); 1839 Value *Vec = II.getOperand(1); 1840 Value *Shift = II.getOperand(2); 1841 1842 // Convert SRSHL into the simpler LSL intrinsic when fed by an ABS intrinsic. 1843 Value *AbsPred, *MergedValue; 1844 if (!match(Vec, m_Intrinsic<Intrinsic::aarch64_sve_sqabs>( 1845 m_Value(MergedValue), m_Value(AbsPred), m_Value())) && 1846 !match(Vec, m_Intrinsic<Intrinsic::aarch64_sve_abs>( 1847 m_Value(MergedValue), m_Value(AbsPred), m_Value()))) 1848 1849 return std::nullopt; 1850 1851 // Transform is valid if any of the following are true: 1852 // * The ABS merge value is an undef or non-negative 1853 // * The ABS predicate is all active 1854 // * The ABS predicate and the SRSHL predicates are the same 1855 if (!isa<UndefValue>(MergedValue) && !match(MergedValue, m_NonNegative()) && 1856 AbsPred != Pred && !isAllActivePredicate(AbsPred)) 1857 return std::nullopt; 1858 1859 // Only valid when the shift amount is non-negative, otherwise the rounding 1860 // behaviour of SRSHL cannot be ignored. 1861 if (!match(Shift, m_NonNegative())) 1862 return std::nullopt; 1863 1864 auto LSL = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_lsl, 1865 {II.getType()}, {Pred, Vec, Shift}); 1866 1867 return IC.replaceInstUsesWith(II, LSL); 1868 } 1869 1870 std::optional<Instruction *> 1871 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC, 1872 IntrinsicInst &II) const { 1873 Intrinsic::ID IID = II.getIntrinsicID(); 1874 switch (IID) { 1875 default: 1876 break; 1877 case Intrinsic::aarch64_neon_fmaxnm: 1878 case Intrinsic::aarch64_neon_fminnm: 1879 return instCombineMaxMinNM(IC, II); 1880 case Intrinsic::aarch64_sve_convert_from_svbool: 1881 return instCombineConvertFromSVBool(IC, II); 1882 case Intrinsic::aarch64_sve_dup: 1883 return instCombineSVEDup(IC, II); 1884 case Intrinsic::aarch64_sve_dup_x: 1885 return instCombineSVEDupX(IC, II); 1886 case Intrinsic::aarch64_sve_cmpne: 1887 case Intrinsic::aarch64_sve_cmpne_wide: 1888 return instCombineSVECmpNE(IC, II); 1889 case Intrinsic::aarch64_sve_rdffr: 1890 return instCombineRDFFR(IC, II); 1891 case Intrinsic::aarch64_sve_lasta: 1892 case Intrinsic::aarch64_sve_lastb: 1893 return instCombineSVELast(IC, II); 1894 case Intrinsic::aarch64_sve_clasta_n: 1895 case Intrinsic::aarch64_sve_clastb_n: 1896 return instCombineSVECondLast(IC, II); 1897 case Intrinsic::aarch64_sve_cntd: 1898 return instCombineSVECntElts(IC, II, 2); 1899 case Intrinsic::aarch64_sve_cntw: 1900 return instCombineSVECntElts(IC, II, 4); 1901 case Intrinsic::aarch64_sve_cnth: 1902 return instCombineSVECntElts(IC, II, 8); 1903 case Intrinsic::aarch64_sve_cntb: 1904 return instCombineSVECntElts(IC, II, 16); 1905 case Intrinsic::aarch64_sve_ptest_any: 1906 case Intrinsic::aarch64_sve_ptest_first: 1907 case Intrinsic::aarch64_sve_ptest_last: 1908 return instCombineSVEPTest(IC, II); 1909 case Intrinsic::aarch64_sve_fabd: 1910 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fabd_u); 1911 case Intrinsic::aarch64_sve_fadd: 1912 return instCombineSVEVectorFAdd(IC, II); 1913 case Intrinsic::aarch64_sve_fadd_u: 1914 return instCombineSVEVectorFAddU(IC, II); 1915 case Intrinsic::aarch64_sve_fdiv: 1916 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fdiv_u); 1917 case Intrinsic::aarch64_sve_fmax: 1918 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmax_u); 1919 case Intrinsic::aarch64_sve_fmaxnm: 1920 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmaxnm_u); 1921 case Intrinsic::aarch64_sve_fmin: 1922 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmin_u); 1923 case Intrinsic::aarch64_sve_fminnm: 1924 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fminnm_u); 1925 case Intrinsic::aarch64_sve_fmla: 1926 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmla_u); 1927 case Intrinsic::aarch64_sve_fmls: 1928 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmls_u); 1929 case Intrinsic::aarch64_sve_fmul: 1930 if (auto II_U = 1931 instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmul_u)) 1932 return II_U; 1933 return instCombineSVEVectorMul(IC, II, Intrinsic::aarch64_sve_fmul_u); 1934 case Intrinsic::aarch64_sve_fmul_u: 1935 return instCombineSVEVectorMul(IC, II, Intrinsic::aarch64_sve_fmul_u); 1936 case Intrinsic::aarch64_sve_fmulx: 1937 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmulx_u); 1938 case Intrinsic::aarch64_sve_fnmla: 1939 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fnmla_u); 1940 case Intrinsic::aarch64_sve_fnmls: 1941 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fnmls_u); 1942 case Intrinsic::aarch64_sve_fsub: 1943 return instCombineSVEVectorFSub(IC, II); 1944 case Intrinsic::aarch64_sve_fsub_u: 1945 return instCombineSVEVectorFSubU(IC, II); 1946 case Intrinsic::aarch64_sve_add: 1947 return instCombineSVEVectorAdd(IC, II); 1948 case Intrinsic::aarch64_sve_add_u: 1949 return instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul_u, 1950 Intrinsic::aarch64_sve_mla_u>( 1951 IC, II, true); 1952 case Intrinsic::aarch64_sve_mla: 1953 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_mla_u); 1954 case Intrinsic::aarch64_sve_mls: 1955 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_mls_u); 1956 case Intrinsic::aarch64_sve_mul: 1957 if (auto II_U = 1958 instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_mul_u)) 1959 return II_U; 1960 return instCombineSVEVectorMul(IC, II, Intrinsic::aarch64_sve_mul_u); 1961 case Intrinsic::aarch64_sve_mul_u: 1962 return instCombineSVEVectorMul(IC, II, Intrinsic::aarch64_sve_mul_u); 1963 case Intrinsic::aarch64_sve_sabd: 1964 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_sabd_u); 1965 case Intrinsic::aarch64_sve_smax: 1966 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_smax_u); 1967 case Intrinsic::aarch64_sve_smin: 1968 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_smin_u); 1969 case Intrinsic::aarch64_sve_smulh: 1970 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_smulh_u); 1971 case Intrinsic::aarch64_sve_sub: 1972 return instCombineSVEVectorSub(IC, II); 1973 case Intrinsic::aarch64_sve_sub_u: 1974 return instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul_u, 1975 Intrinsic::aarch64_sve_mls_u>( 1976 IC, II, true); 1977 case Intrinsic::aarch64_sve_uabd: 1978 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_uabd_u); 1979 case Intrinsic::aarch64_sve_umax: 1980 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_umax_u); 1981 case Intrinsic::aarch64_sve_umin: 1982 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_umin_u); 1983 case Intrinsic::aarch64_sve_umulh: 1984 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_umulh_u); 1985 case Intrinsic::aarch64_sve_asr: 1986 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_asr_u); 1987 case Intrinsic::aarch64_sve_lsl: 1988 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_lsl_u); 1989 case Intrinsic::aarch64_sve_lsr: 1990 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_lsr_u); 1991 case Intrinsic::aarch64_sve_and: 1992 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_and_u); 1993 case Intrinsic::aarch64_sve_bic: 1994 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_bic_u); 1995 case Intrinsic::aarch64_sve_eor: 1996 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_eor_u); 1997 case Intrinsic::aarch64_sve_orr: 1998 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_orr_u); 1999 case Intrinsic::aarch64_sve_sqsub: 2000 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_sqsub_u); 2001 case Intrinsic::aarch64_sve_uqsub: 2002 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_uqsub_u); 2003 case Intrinsic::aarch64_sve_tbl: 2004 return instCombineSVETBL(IC, II); 2005 case Intrinsic::aarch64_sve_uunpkhi: 2006 case Intrinsic::aarch64_sve_uunpklo: 2007 case Intrinsic::aarch64_sve_sunpkhi: 2008 case Intrinsic::aarch64_sve_sunpklo: 2009 return instCombineSVEUnpack(IC, II); 2010 case Intrinsic::aarch64_sve_zip1: 2011 case Intrinsic::aarch64_sve_zip2: 2012 return instCombineSVEZip(IC, II); 2013 case Intrinsic::aarch64_sve_ld1_gather_index: 2014 return instCombineLD1GatherIndex(IC, II); 2015 case Intrinsic::aarch64_sve_st1_scatter_index: 2016 return instCombineST1ScatterIndex(IC, II); 2017 case Intrinsic::aarch64_sve_ld1: 2018 return instCombineSVELD1(IC, II, DL); 2019 case Intrinsic::aarch64_sve_st1: 2020 return instCombineSVEST1(IC, II, DL); 2021 case Intrinsic::aarch64_sve_sdiv: 2022 return instCombineSVESDIV(IC, II); 2023 case Intrinsic::aarch64_sve_sel: 2024 return instCombineSVESel(IC, II); 2025 case Intrinsic::aarch64_sve_srshl: 2026 return instCombineSVESrshl(IC, II); 2027 case Intrinsic::aarch64_sve_dupq_lane: 2028 return instCombineSVEDupqLane(IC, II); 2029 } 2030 2031 return std::nullopt; 2032 } 2033 2034 std::optional<Value *> AArch64TTIImpl::simplifyDemandedVectorEltsIntrinsic( 2035 InstCombiner &IC, IntrinsicInst &II, APInt OrigDemandedElts, 2036 APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, 2037 std::function<void(Instruction *, unsigned, APInt, APInt &)> 2038 SimplifyAndSetOp) const { 2039 switch (II.getIntrinsicID()) { 2040 default: 2041 break; 2042 case Intrinsic::aarch64_neon_fcvtxn: 2043 case Intrinsic::aarch64_neon_rshrn: 2044 case Intrinsic::aarch64_neon_sqrshrn: 2045 case Intrinsic::aarch64_neon_sqrshrun: 2046 case Intrinsic::aarch64_neon_sqshrn: 2047 case Intrinsic::aarch64_neon_sqshrun: 2048 case Intrinsic::aarch64_neon_sqxtn: 2049 case Intrinsic::aarch64_neon_sqxtun: 2050 case Intrinsic::aarch64_neon_uqrshrn: 2051 case Intrinsic::aarch64_neon_uqshrn: 2052 case Intrinsic::aarch64_neon_uqxtn: 2053 SimplifyAndSetOp(&II, 0, OrigDemandedElts, UndefElts); 2054 break; 2055 } 2056 2057 return std::nullopt; 2058 } 2059 2060 TypeSize 2061 AArch64TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { 2062 switch (K) { 2063 case TargetTransformInfo::RGK_Scalar: 2064 return TypeSize::getFixed(64); 2065 case TargetTransformInfo::RGK_FixedWidthVector: 2066 if (!ST->isNeonAvailable() && !EnableFixedwidthAutovecInStreamingMode) 2067 return TypeSize::getFixed(0); 2068 2069 if (ST->hasSVE()) 2070 return TypeSize::getFixed( 2071 std::max(ST->getMinSVEVectorSizeInBits(), 128u)); 2072 2073 return TypeSize::getFixed(ST->hasNEON() ? 128 : 0); 2074 case TargetTransformInfo::RGK_ScalableVector: 2075 if (!ST->isSVEAvailable() && !EnableScalableAutovecInStreamingMode) 2076 return TypeSize::getScalable(0); 2077 2078 return TypeSize::getScalable(ST->hasSVE() ? 128 : 0); 2079 } 2080 llvm_unreachable("Unsupported register kind"); 2081 } 2082 2083 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode, 2084 ArrayRef<const Value *> Args, 2085 Type *SrcOverrideTy) { 2086 // A helper that returns a vector type from the given type. The number of 2087 // elements in type Ty determines the vector width. 2088 auto toVectorTy = [&](Type *ArgTy) { 2089 return VectorType::get(ArgTy->getScalarType(), 2090 cast<VectorType>(DstTy)->getElementCount()); 2091 }; 2092 2093 // Exit early if DstTy is not a vector type whose elements are one of [i16, 2094 // i32, i64]. SVE doesn't generally have the same set of instructions to 2095 // perform an extend with the add/sub/mul. There are SMULLB style 2096 // instructions, but they operate on top/bottom, requiring some sort of lane 2097 // interleaving to be used with zext/sext. 2098 unsigned DstEltSize = DstTy->getScalarSizeInBits(); 2099 if (!useNeonVector(DstTy) || Args.size() != 2 || 2100 (DstEltSize != 16 && DstEltSize != 32 && DstEltSize != 64)) 2101 return false; 2102 2103 // Determine if the operation has a widening variant. We consider both the 2104 // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the 2105 // instructions. 2106 // 2107 // TODO: Add additional widening operations (e.g., shl, etc.) once we 2108 // verify that their extending operands are eliminated during code 2109 // generation. 2110 Type *SrcTy = SrcOverrideTy; 2111 switch (Opcode) { 2112 case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2). 2113 case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2). 2114 // The second operand needs to be an extend 2115 if (isa<SExtInst>(Args[1]) || isa<ZExtInst>(Args[1])) { 2116 if (!SrcTy) 2117 SrcTy = 2118 toVectorTy(cast<Instruction>(Args[1])->getOperand(0)->getType()); 2119 } else 2120 return false; 2121 break; 2122 case Instruction::Mul: { // SMULL(2), UMULL(2) 2123 // Both operands need to be extends of the same type. 2124 if ((isa<SExtInst>(Args[0]) && isa<SExtInst>(Args[1])) || 2125 (isa<ZExtInst>(Args[0]) && isa<ZExtInst>(Args[1]))) { 2126 if (!SrcTy) 2127 SrcTy = 2128 toVectorTy(cast<Instruction>(Args[0])->getOperand(0)->getType()); 2129 } else if (isa<ZExtInst>(Args[0]) || isa<ZExtInst>(Args[1])) { 2130 // If one of the operands is a Zext and the other has enough zero bits to 2131 // be treated as unsigned, we can still general a umull, meaning the zext 2132 // is free. 2133 KnownBits Known = 2134 computeKnownBits(isa<ZExtInst>(Args[0]) ? Args[1] : Args[0], DL); 2135 if (Args[0]->getType()->getScalarSizeInBits() - 2136 Known.Zero.countLeadingOnes() > 2137 DstTy->getScalarSizeInBits() / 2) 2138 return false; 2139 if (!SrcTy) 2140 SrcTy = toVectorTy(Type::getIntNTy(DstTy->getContext(), 2141 DstTy->getScalarSizeInBits() / 2)); 2142 } else 2143 return false; 2144 break; 2145 } 2146 default: 2147 return false; 2148 } 2149 2150 // Legalize the destination type and ensure it can be used in a widening 2151 // operation. 2152 auto DstTyL = getTypeLegalizationCost(DstTy); 2153 if (!DstTyL.second.isVector() || DstEltSize != DstTy->getScalarSizeInBits()) 2154 return false; 2155 2156 // Legalize the source type and ensure it can be used in a widening 2157 // operation. 2158 assert(SrcTy && "Expected some SrcTy"); 2159 auto SrcTyL = getTypeLegalizationCost(SrcTy); 2160 unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits(); 2161 if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits()) 2162 return false; 2163 2164 // Get the total number of vector elements in the legalized types. 2165 InstructionCost NumDstEls = 2166 DstTyL.first * DstTyL.second.getVectorMinNumElements(); 2167 InstructionCost NumSrcEls = 2168 SrcTyL.first * SrcTyL.second.getVectorMinNumElements(); 2169 2170 // Return true if the legalized types have the same number of vector elements 2171 // and the destination element type size is twice that of the source type. 2172 return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstEltSize; 2173 } 2174 2175 // s/urhadd instructions implement the following pattern, making the 2176 // extends free: 2177 // %x = add ((zext i8 -> i16), 1) 2178 // %y = (zext i8 -> i16) 2179 // trunc i16 (lshr (add %x, %y), 1) -> i8 2180 // 2181 bool AArch64TTIImpl::isExtPartOfAvgExpr(const Instruction *ExtUser, Type *Dst, 2182 Type *Src) { 2183 // The source should be a legal vector type. 2184 if (!Src->isVectorTy() || !TLI->isTypeLegal(TLI->getValueType(DL, Src)) || 2185 (Src->isScalableTy() && !ST->hasSVE2())) 2186 return false; 2187 2188 if (ExtUser->getOpcode() != Instruction::Add || !ExtUser->hasOneUse()) 2189 return false; 2190 2191 // Look for trunc/shl/add before trying to match the pattern. 2192 const Instruction *Add = ExtUser; 2193 auto *AddUser = 2194 dyn_cast_or_null<Instruction>(Add->getUniqueUndroppableUser()); 2195 if (AddUser && AddUser->getOpcode() == Instruction::Add) 2196 Add = AddUser; 2197 2198 auto *Shr = dyn_cast_or_null<Instruction>(Add->getUniqueUndroppableUser()); 2199 if (!Shr || Shr->getOpcode() != Instruction::LShr) 2200 return false; 2201 2202 auto *Trunc = dyn_cast_or_null<Instruction>(Shr->getUniqueUndroppableUser()); 2203 if (!Trunc || Trunc->getOpcode() != Instruction::Trunc || 2204 Src->getScalarSizeInBits() != 2205 cast<CastInst>(Trunc)->getDestTy()->getScalarSizeInBits()) 2206 return false; 2207 2208 // Try to match the whole pattern. Ext could be either the first or second 2209 // m_ZExtOrSExt matched. 2210 Instruction *Ex1, *Ex2; 2211 if (!(match(Add, m_c_Add(m_Instruction(Ex1), 2212 m_c_Add(m_Instruction(Ex2), m_SpecificInt(1)))))) 2213 return false; 2214 2215 // Ensure both extends are of the same type 2216 if (match(Ex1, m_ZExtOrSExt(m_Value())) && 2217 Ex1->getOpcode() == Ex2->getOpcode()) 2218 return true; 2219 2220 return false; 2221 } 2222 2223 InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 2224 Type *Src, 2225 TTI::CastContextHint CCH, 2226 TTI::TargetCostKind CostKind, 2227 const Instruction *I) { 2228 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2229 assert(ISD && "Invalid opcode"); 2230 // If the cast is observable, and it is used by a widening instruction (e.g., 2231 // uaddl, saddw, etc.), it may be free. 2232 if (I && I->hasOneUser()) { 2233 auto *SingleUser = cast<Instruction>(*I->user_begin()); 2234 SmallVector<const Value *, 4> Operands(SingleUser->operand_values()); 2235 if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands, Src)) { 2236 // For adds only count the second operand as free if both operands are 2237 // extends but not the same operation. (i.e both operands are not free in 2238 // add(sext, zext)). 2239 if (SingleUser->getOpcode() == Instruction::Add) { 2240 if (I == SingleUser->getOperand(1) || 2241 (isa<CastInst>(SingleUser->getOperand(1)) && 2242 cast<CastInst>(SingleUser->getOperand(1))->getOpcode() == Opcode)) 2243 return 0; 2244 } else // Others are free so long as isWideningInstruction returned true. 2245 return 0; 2246 } 2247 2248 // The cast will be free for the s/urhadd instructions 2249 if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) && 2250 isExtPartOfAvgExpr(SingleUser, Dst, Src)) 2251 return 0; 2252 } 2253 2254 // TODO: Allow non-throughput costs that aren't binary. 2255 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { 2256 if (CostKind != TTI::TCK_RecipThroughput) 2257 return Cost == 0 ? 0 : 1; 2258 return Cost; 2259 }; 2260 2261 EVT SrcTy = TLI->getValueType(DL, Src); 2262 EVT DstTy = TLI->getValueType(DL, Dst); 2263 2264 if (!SrcTy.isSimple() || !DstTy.isSimple()) 2265 return AdjustCost( 2266 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 2267 2268 static const TypeConversionCostTblEntry 2269 ConversionTbl[] = { 2270 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 1}, // xtn 2271 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1}, // xtn 2272 { ISD::TRUNCATE, MVT::v2i32, MVT::v2i64, 1}, // xtn 2273 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1}, // xtn 2274 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 3}, // 2 xtn + 1 uzp1 2275 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1}, // xtn 2276 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2}, // 1 uzp1 + 1 xtn 2277 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1}, // 1 uzp1 2278 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1}, // 1 xtn 2279 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2}, // 1 uzp1 + 1 xtn 2280 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 4}, // 3 x uzp1 + xtn 2281 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1}, // 1 uzp1 2282 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 3}, // 3 x uzp1 2283 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 2}, // 2 x uzp1 2284 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 1}, // uzp1 2285 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 3}, // (2 + 1) x uzp1 2286 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 7}, // (4 + 2 + 1) x uzp1 2287 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2}, // 2 x uzp1 2288 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i64, 6}, // (4 + 2) x uzp1 2289 { ISD::TRUNCATE, MVT::v16i32, MVT::v16i64, 4}, // 4 x uzp1 2290 2291 // Truncations on nxvmiN 2292 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 }, 2293 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 }, 2294 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 }, 2295 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 }, 2296 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 }, 2297 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 }, 2298 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 }, 2299 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 }, 2300 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 }, 2301 { ISD::TRUNCATE, MVT::nxv16i1, MVT::nxv16i8, 1 }, 2302 { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 }, 2303 { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 }, 2304 { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 }, 2305 { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 }, 2306 { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 }, 2307 { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 }, 2308 2309 // The number of shll instructions for the extension. 2310 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 2311 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 2312 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 2313 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 2314 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 2315 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 2316 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 2317 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 2318 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 2319 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 2320 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 2321 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 2322 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 2323 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 2324 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 2325 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 2326 2327 // LowerVectorINT_TO_FP: 2328 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 2329 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 2330 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 2331 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 2332 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 2333 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 2334 2335 // Complex: to v2f32 2336 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 2337 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 2338 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 2339 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 2340 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 2341 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 2342 2343 // Complex: to v4f32 2344 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 }, 2345 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 2346 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 2347 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 2348 2349 // Complex: to v8f32 2350 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 2351 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 2352 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 2353 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 2354 2355 // Complex: to v16f32 2356 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 2357 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 2358 2359 // Complex: to v2f64 2360 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 2361 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 2362 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 2363 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 2364 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 2365 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 2366 2367 // Complex: to v4f64 2368 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 4 }, 2369 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 4 }, 2370 2371 // LowerVectorFP_TO_INT 2372 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 }, 2373 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 2374 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 2375 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 2376 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 2377 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 2378 2379 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext). 2380 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 }, 2381 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 }, 2382 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 }, 2383 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 }, 2384 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 }, 2385 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 }, 2386 2387 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2 2388 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 2389 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 }, 2390 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 2391 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 }, 2392 2393 // Complex, from nxv2f32. 2394 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 }, 2395 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 }, 2396 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 }, 2397 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f32, 1 }, 2398 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 }, 2399 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 }, 2400 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 }, 2401 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f32, 1 }, 2402 2403 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2. 2404 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 2405 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 }, 2406 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 }, 2407 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 2408 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 }, 2409 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 }, 2410 2411 // Complex, from nxv2f64. 2412 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 }, 2413 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 }, 2414 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 }, 2415 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f64, 1 }, 2416 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 }, 2417 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 }, 2418 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 }, 2419 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f64, 1 }, 2420 2421 // Complex, from nxv4f32. 2422 { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 }, 2423 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 }, 2424 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 }, 2425 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f32, 1 }, 2426 { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 }, 2427 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 }, 2428 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 }, 2429 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f32, 1 }, 2430 2431 // Complex, from nxv8f64. Illegal -> illegal conversions not required. 2432 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 }, 2433 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f64, 7 }, 2434 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 }, 2435 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f64, 7 }, 2436 2437 // Complex, from nxv4f64. Illegal -> illegal conversions not required. 2438 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 }, 2439 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 }, 2440 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f64, 3 }, 2441 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 }, 2442 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 }, 2443 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f64, 3 }, 2444 2445 // Complex, from nxv8f32. Illegal -> illegal conversions not required. 2446 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 }, 2447 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f32, 3 }, 2448 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 }, 2449 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f32, 3 }, 2450 2451 // Complex, from nxv8f16. 2452 { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 }, 2453 { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 }, 2454 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 }, 2455 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f16, 1 }, 2456 { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 }, 2457 { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 }, 2458 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 }, 2459 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f16, 1 }, 2460 2461 // Complex, from nxv4f16. 2462 { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 }, 2463 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 }, 2464 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 }, 2465 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f16, 1 }, 2466 { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 }, 2467 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 }, 2468 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 }, 2469 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f16, 1 }, 2470 2471 // Complex, from nxv2f16. 2472 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 }, 2473 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 }, 2474 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 }, 2475 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f16, 1 }, 2476 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 }, 2477 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 }, 2478 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 }, 2479 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f16, 1 }, 2480 2481 // Truncate from nxvmf32 to nxvmf16. 2482 { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 }, 2483 { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 }, 2484 { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 }, 2485 2486 // Truncate from nxvmf64 to nxvmf16. 2487 { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 }, 2488 { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 }, 2489 { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 }, 2490 2491 // Truncate from nxvmf64 to nxvmf32. 2492 { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 }, 2493 { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 }, 2494 { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 }, 2495 2496 // Extend from nxvmf16 to nxvmf32. 2497 { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1}, 2498 { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1}, 2499 { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2}, 2500 2501 // Extend from nxvmf16 to nxvmf64. 2502 { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1}, 2503 { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2}, 2504 { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4}, 2505 2506 // Extend from nxvmf32 to nxvmf64. 2507 { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1}, 2508 { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2}, 2509 { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6}, 2510 2511 // Bitcasts from float to integer 2512 { ISD::BITCAST, MVT::nxv2f16, MVT::nxv2i16, 0 }, 2513 { ISD::BITCAST, MVT::nxv4f16, MVT::nxv4i16, 0 }, 2514 { ISD::BITCAST, MVT::nxv2f32, MVT::nxv2i32, 0 }, 2515 2516 // Bitcasts from integer to float 2517 { ISD::BITCAST, MVT::nxv2i16, MVT::nxv2f16, 0 }, 2518 { ISD::BITCAST, MVT::nxv4i16, MVT::nxv4f16, 0 }, 2519 { ISD::BITCAST, MVT::nxv2i32, MVT::nxv2f32, 0 }, 2520 2521 // Add cost for extending to illegal -too wide- scalable vectors. 2522 // zero/sign extend are implemented by multiple unpack operations, 2523 // where each operation has a cost of 1. 2524 { ISD::ZERO_EXTEND, MVT::nxv16i16, MVT::nxv16i8, 2}, 2525 { ISD::ZERO_EXTEND, MVT::nxv16i32, MVT::nxv16i8, 6}, 2526 { ISD::ZERO_EXTEND, MVT::nxv16i64, MVT::nxv16i8, 14}, 2527 { ISD::ZERO_EXTEND, MVT::nxv8i32, MVT::nxv8i16, 2}, 2528 { ISD::ZERO_EXTEND, MVT::nxv8i64, MVT::nxv8i16, 6}, 2529 { ISD::ZERO_EXTEND, MVT::nxv4i64, MVT::nxv4i32, 2}, 2530 2531 { ISD::SIGN_EXTEND, MVT::nxv16i16, MVT::nxv16i8, 2}, 2532 { ISD::SIGN_EXTEND, MVT::nxv16i32, MVT::nxv16i8, 6}, 2533 { ISD::SIGN_EXTEND, MVT::nxv16i64, MVT::nxv16i8, 14}, 2534 { ISD::SIGN_EXTEND, MVT::nxv8i32, MVT::nxv8i16, 2}, 2535 { ISD::SIGN_EXTEND, MVT::nxv8i64, MVT::nxv8i16, 6}, 2536 { ISD::SIGN_EXTEND, MVT::nxv4i64, MVT::nxv4i32, 2}, 2537 }; 2538 2539 // We have to estimate a cost of fixed length operation upon 2540 // SVE registers(operations) with the number of registers required 2541 // for a fixed type to be represented upon SVE registers. 2542 EVT WiderTy = SrcTy.bitsGT(DstTy) ? SrcTy : DstTy; 2543 if (SrcTy.isFixedLengthVector() && DstTy.isFixedLengthVector() && 2544 SrcTy.getVectorNumElements() == DstTy.getVectorNumElements() && 2545 ST->useSVEForFixedLengthVectors(WiderTy)) { 2546 std::pair<InstructionCost, MVT> LT = 2547 getTypeLegalizationCost(WiderTy.getTypeForEVT(Dst->getContext())); 2548 unsigned NumElements = AArch64::SVEBitsPerBlock / 2549 LT.second.getVectorElementType().getSizeInBits(); 2550 return AdjustCost( 2551 LT.first * 2552 getCastInstrCost( 2553 Opcode, ScalableVectorType::get(Dst->getScalarType(), NumElements), 2554 ScalableVectorType::get(Src->getScalarType(), NumElements), CCH, 2555 CostKind, I)); 2556 } 2557 2558 if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD, 2559 DstTy.getSimpleVT(), 2560 SrcTy.getSimpleVT())) 2561 return AdjustCost(Entry->Cost); 2562 2563 static const TypeConversionCostTblEntry FP16Tbl[] = { 2564 {ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f16, 1}, // fcvtzs 2565 {ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f16, 1}, 2566 {ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f16, 1}, // fcvtzs 2567 {ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f16, 1}, 2568 {ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f16, 2}, // fcvtl+fcvtzs 2569 {ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f16, 2}, 2570 {ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f16, 2}, // fcvtzs+xtn 2571 {ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f16, 2}, 2572 {ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f16, 1}, // fcvtzs 2573 {ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f16, 1}, 2574 {ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f16, 4}, // 2*fcvtl+2*fcvtzs 2575 {ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f16, 4}, 2576 {ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f16, 3}, // 2*fcvtzs+xtn 2577 {ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f16, 3}, 2578 {ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f16, 2}, // 2*fcvtzs 2579 {ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f16, 2}, 2580 {ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f16, 8}, // 4*fcvtl+4*fcvtzs 2581 {ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f16, 8}, 2582 {ISD::UINT_TO_FP, MVT::v8f16, MVT::v8i8, 2}, // ushll + ucvtf 2583 {ISD::SINT_TO_FP, MVT::v8f16, MVT::v8i8, 2}, // sshll + scvtf 2584 {ISD::UINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * ushl(2) + 2 * ucvtf 2585 {ISD::SINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * sshl(2) + 2 * scvtf 2586 }; 2587 2588 if (ST->hasFullFP16()) 2589 if (const auto *Entry = ConvertCostTableLookup( 2590 FP16Tbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 2591 return AdjustCost(Entry->Cost); 2592 2593 if ((ISD == ISD::ZERO_EXTEND || ISD == ISD::SIGN_EXTEND) && 2594 CCH == TTI::CastContextHint::Masked && ST->hasSVEorSME() && 2595 TLI->getTypeAction(Src->getContext(), SrcTy) == 2596 TargetLowering::TypePromoteInteger && 2597 TLI->getTypeAction(Dst->getContext(), DstTy) == 2598 TargetLowering::TypeSplitVector) { 2599 // The standard behaviour in the backend for these cases is to split the 2600 // extend up into two parts: 2601 // 1. Perform an extending load or masked load up to the legal type. 2602 // 2. Extend the loaded data to the final type. 2603 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src); 2604 Type *LegalTy = EVT(SrcLT.second).getTypeForEVT(Src->getContext()); 2605 InstructionCost Part1 = AArch64TTIImpl::getCastInstrCost( 2606 Opcode, LegalTy, Src, CCH, CostKind, I); 2607 InstructionCost Part2 = AArch64TTIImpl::getCastInstrCost( 2608 Opcode, Dst, LegalTy, TTI::CastContextHint::None, CostKind, I); 2609 return Part1 + Part2; 2610 } 2611 2612 // The BasicTTIImpl version only deals with CCH==TTI::CastContextHint::Normal, 2613 // but we also want to include the TTI::CastContextHint::Masked case too. 2614 if ((ISD == ISD::ZERO_EXTEND || ISD == ISD::SIGN_EXTEND) && 2615 CCH == TTI::CastContextHint::Masked && ST->hasSVEorSME() && 2616 TLI->isTypeLegal(DstTy)) 2617 CCH = TTI::CastContextHint::Normal; 2618 2619 return AdjustCost( 2620 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 2621 } 2622 2623 InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, 2624 Type *Dst, 2625 VectorType *VecTy, 2626 unsigned Index) { 2627 2628 // Make sure we were given a valid extend opcode. 2629 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) && 2630 "Invalid opcode"); 2631 2632 // We are extending an element we extract from a vector, so the source type 2633 // of the extend is the element type of the vector. 2634 auto *Src = VecTy->getElementType(); 2635 2636 // Sign- and zero-extends are for integer types only. 2637 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type"); 2638 2639 // Get the cost for the extract. We compute the cost (if any) for the extend 2640 // below. 2641 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 2642 InstructionCost Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy, 2643 CostKind, Index, nullptr, nullptr); 2644 2645 // Legalize the types. 2646 auto VecLT = getTypeLegalizationCost(VecTy); 2647 auto DstVT = TLI->getValueType(DL, Dst); 2648 auto SrcVT = TLI->getValueType(DL, Src); 2649 2650 // If the resulting type is still a vector and the destination type is legal, 2651 // we may get the extension for free. If not, get the default cost for the 2652 // extend. 2653 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT)) 2654 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 2655 CostKind); 2656 2657 // The destination type should be larger than the element type. If not, get 2658 // the default cost for the extend. 2659 if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits()) 2660 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 2661 CostKind); 2662 2663 switch (Opcode) { 2664 default: 2665 llvm_unreachable("Opcode should be either SExt or ZExt"); 2666 2667 // For sign-extends, we only need a smov, which performs the extension 2668 // automatically. 2669 case Instruction::SExt: 2670 return Cost; 2671 2672 // For zero-extends, the extend is performed automatically by a umov unless 2673 // the destination type is i64 and the element type is i8 or i16. 2674 case Instruction::ZExt: 2675 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u) 2676 return Cost; 2677 } 2678 2679 // If we are unable to perform the extend for free, get the default cost. 2680 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 2681 CostKind); 2682 } 2683 2684 InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode, 2685 TTI::TargetCostKind CostKind, 2686 const Instruction *I) { 2687 if (CostKind != TTI::TCK_RecipThroughput) 2688 return Opcode == Instruction::PHI ? 0 : 1; 2689 assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind"); 2690 // Branches are assumed to be predicted. 2691 return 0; 2692 } 2693 2694 InstructionCost AArch64TTIImpl::getVectorInstrCostHelper(const Instruction *I, 2695 Type *Val, 2696 unsigned Index, 2697 bool HasRealUse) { 2698 assert(Val->isVectorTy() && "This must be a vector type"); 2699 2700 if (Index != -1U) { 2701 // Legalize the type. 2702 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Val); 2703 2704 // This type is legalized to a scalar type. 2705 if (!LT.second.isVector()) 2706 return 0; 2707 2708 // The type may be split. For fixed-width vectors we can normalize the 2709 // index to the new type. 2710 if (LT.second.isFixedLengthVector()) { 2711 unsigned Width = LT.second.getVectorNumElements(); 2712 Index = Index % Width; 2713 } 2714 2715 // The element at index zero is already inside the vector. 2716 // - For a physical (HasRealUse==true) insert-element or extract-element 2717 // instruction that extracts integers, an explicit FPR -> GPR move is 2718 // needed. So it has non-zero cost. 2719 // - For the rest of cases (virtual instruction or element type is float), 2720 // consider the instruction free. 2721 if (Index == 0 && (!HasRealUse || !Val->getScalarType()->isIntegerTy())) 2722 return 0; 2723 2724 // This is recognising a LD1 single-element structure to one lane of one 2725 // register instruction. I.e., if this is an `insertelement` instruction, 2726 // and its second operand is a load, then we will generate a LD1, which 2727 // are expensive instructions. 2728 if (I && dyn_cast<LoadInst>(I->getOperand(1))) 2729 return ST->getVectorInsertExtractBaseCost() + 1; 2730 2731 // i1 inserts and extract will include an extra cset or cmp of the vector 2732 // value. Increase the cost by 1 to account. 2733 if (Val->getScalarSizeInBits() == 1) 2734 return ST->getVectorInsertExtractBaseCost() + 1; 2735 2736 // FIXME: 2737 // If the extract-element and insert-element instructions could be 2738 // simplified away (e.g., could be combined into users by looking at use-def 2739 // context), they have no cost. This is not done in the first place for 2740 // compile-time considerations. 2741 } 2742 2743 // All other insert/extracts cost this much. 2744 return ST->getVectorInsertExtractBaseCost(); 2745 } 2746 2747 InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 2748 TTI::TargetCostKind CostKind, 2749 unsigned Index, Value *Op0, 2750 Value *Op1) { 2751 bool HasRealUse = 2752 Opcode == Instruction::InsertElement && Op0 && !isa<UndefValue>(Op0); 2753 return getVectorInstrCostHelper(nullptr, Val, Index, HasRealUse); 2754 } 2755 2756 InstructionCost AArch64TTIImpl::getVectorInstrCost(const Instruction &I, 2757 Type *Val, 2758 TTI::TargetCostKind CostKind, 2759 unsigned Index) { 2760 return getVectorInstrCostHelper(&I, Val, Index, true /* HasRealUse */); 2761 } 2762 2763 InstructionCost AArch64TTIImpl::getScalarizationOverhead( 2764 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, 2765 TTI::TargetCostKind CostKind) { 2766 if (isa<ScalableVectorType>(Ty)) 2767 return InstructionCost::getInvalid(); 2768 if (Ty->getElementType()->isFloatingPointTy()) 2769 return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract, 2770 CostKind); 2771 return DemandedElts.popcount() * (Insert + Extract) * 2772 ST->getVectorInsertExtractBaseCost(); 2773 } 2774 2775 InstructionCost AArch64TTIImpl::getArithmeticInstrCost( 2776 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 2777 TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, 2778 ArrayRef<const Value *> Args, 2779 const Instruction *CxtI) { 2780 2781 // TODO: Handle more cost kinds. 2782 if (CostKind != TTI::TCK_RecipThroughput) 2783 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 2784 Op2Info, Args, CxtI); 2785 2786 // Legalize the type. 2787 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty); 2788 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2789 2790 switch (ISD) { 2791 default: 2792 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 2793 Op2Info); 2794 case ISD::SDIV: 2795 if (Op2Info.isConstant() && Op2Info.isUniform() && Op2Info.isPowerOf2()) { 2796 // On AArch64, scalar signed division by constants power-of-two are 2797 // normally expanded to the sequence ADD + CMP + SELECT + SRA. 2798 // The OperandValue properties many not be same as that of previous 2799 // operation; conservatively assume OP_None. 2800 InstructionCost Cost = getArithmeticInstrCost( 2801 Instruction::Add, Ty, CostKind, 2802 Op1Info.getNoProps(), Op2Info.getNoProps()); 2803 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, 2804 Op1Info.getNoProps(), Op2Info.getNoProps()); 2805 Cost += getArithmeticInstrCost( 2806 Instruction::Select, Ty, CostKind, 2807 Op1Info.getNoProps(), Op2Info.getNoProps()); 2808 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, 2809 Op1Info.getNoProps(), Op2Info.getNoProps()); 2810 return Cost; 2811 } 2812 [[fallthrough]]; 2813 case ISD::UDIV: { 2814 if (Op2Info.isConstant() && Op2Info.isUniform()) { 2815 auto VT = TLI->getValueType(DL, Ty); 2816 if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) { 2817 // Vector signed division by constant are expanded to the 2818 // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division 2819 // to MULHS + SUB + SRL + ADD + SRL. 2820 InstructionCost MulCost = getArithmeticInstrCost( 2821 Instruction::Mul, Ty, CostKind, Op1Info.getNoProps(), Op2Info.getNoProps()); 2822 InstructionCost AddCost = getArithmeticInstrCost( 2823 Instruction::Add, Ty, CostKind, Op1Info.getNoProps(), Op2Info.getNoProps()); 2824 InstructionCost ShrCost = getArithmeticInstrCost( 2825 Instruction::AShr, Ty, CostKind, Op1Info.getNoProps(), Op2Info.getNoProps()); 2826 return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1; 2827 } 2828 } 2829 2830 InstructionCost Cost = BaseT::getArithmeticInstrCost( 2831 Opcode, Ty, CostKind, Op1Info, Op2Info); 2832 if (Ty->isVectorTy()) { 2833 if (TLI->isOperationLegalOrCustom(ISD, LT.second) && ST->hasSVE()) { 2834 // SDIV/UDIV operations are lowered using SVE, then we can have less 2835 // costs. 2836 if (isa<FixedVectorType>(Ty) && cast<FixedVectorType>(Ty) 2837 ->getPrimitiveSizeInBits() 2838 .getFixedValue() < 128) { 2839 EVT VT = TLI->getValueType(DL, Ty); 2840 static const CostTblEntry DivTbl[]{ 2841 {ISD::SDIV, MVT::v2i8, 5}, {ISD::SDIV, MVT::v4i8, 8}, 2842 {ISD::SDIV, MVT::v8i8, 8}, {ISD::SDIV, MVT::v2i16, 5}, 2843 {ISD::SDIV, MVT::v4i16, 5}, {ISD::SDIV, MVT::v2i32, 1}, 2844 {ISD::UDIV, MVT::v2i8, 5}, {ISD::UDIV, MVT::v4i8, 8}, 2845 {ISD::UDIV, MVT::v8i8, 8}, {ISD::UDIV, MVT::v2i16, 5}, 2846 {ISD::UDIV, MVT::v4i16, 5}, {ISD::UDIV, MVT::v2i32, 1}}; 2847 2848 const auto *Entry = CostTableLookup(DivTbl, ISD, VT.getSimpleVT()); 2849 if (nullptr != Entry) 2850 return Entry->Cost; 2851 } 2852 // For 8/16-bit elements, the cost is higher because the type 2853 // requires promotion and possibly splitting: 2854 if (LT.second.getScalarType() == MVT::i8) 2855 Cost *= 8; 2856 else if (LT.second.getScalarType() == MVT::i16) 2857 Cost *= 4; 2858 return Cost; 2859 } else { 2860 // If one of the operands is a uniform constant then the cost for each 2861 // element is Cost for insertion, extraction and division. 2862 // Insertion cost = 2, Extraction Cost = 2, Division = cost for the 2863 // operation with scalar type 2864 if ((Op1Info.isConstant() && Op1Info.isUniform()) || 2865 (Op2Info.isConstant() && Op2Info.isUniform())) { 2866 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) { 2867 InstructionCost DivCost = BaseT::getArithmeticInstrCost( 2868 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info); 2869 return (4 + DivCost) * VTy->getNumElements(); 2870 } 2871 } 2872 // On AArch64, without SVE, vector divisions are expanded 2873 // into scalar divisions of each pair of elements. 2874 Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, 2875 CostKind, Op1Info, Op2Info); 2876 Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind, 2877 Op1Info, Op2Info); 2878 } 2879 2880 // TODO: if one of the arguments is scalar, then it's not necessary to 2881 // double the cost of handling the vector elements. 2882 Cost += Cost; 2883 } 2884 return Cost; 2885 } 2886 case ISD::MUL: 2887 // When SVE is available, then we can lower the v2i64 operation using 2888 // the SVE mul instruction, which has a lower cost. 2889 if (LT.second == MVT::v2i64 && ST->hasSVE()) 2890 return LT.first; 2891 2892 // When SVE is not available, there is no MUL.2d instruction, 2893 // which means mul <2 x i64> is expensive as elements are extracted 2894 // from the vectors and the muls scalarized. 2895 // As getScalarizationOverhead is a bit too pessimistic, we 2896 // estimate the cost for a i64 vector directly here, which is: 2897 // - four 2-cost i64 extracts, 2898 // - two 2-cost i64 inserts, and 2899 // - two 1-cost muls. 2900 // So, for a v2i64 with LT.First = 1 the cost is 14, and for a v4i64 with 2901 // LT.first = 2 the cost is 28. If both operands are extensions it will not 2902 // need to scalarize so the cost can be cheaper (smull or umull). 2903 // so the cost can be cheaper (smull or umull). 2904 if (LT.second != MVT::v2i64 || isWideningInstruction(Ty, Opcode, Args)) 2905 return LT.first; 2906 return LT.first * 14; 2907 case ISD::ADD: 2908 case ISD::XOR: 2909 case ISD::OR: 2910 case ISD::AND: 2911 case ISD::SRL: 2912 case ISD::SRA: 2913 case ISD::SHL: 2914 // These nodes are marked as 'custom' for combining purposes only. 2915 // We know that they are legal. See LowerAdd in ISelLowering. 2916 return LT.first; 2917 2918 case ISD::FNEG: 2919 case ISD::FADD: 2920 case ISD::FSUB: 2921 // Increase the cost for half and bfloat types if not architecturally 2922 // supported. 2923 if ((Ty->getScalarType()->isHalfTy() && !ST->hasFullFP16()) || 2924 (Ty->getScalarType()->isBFloatTy() && !ST->hasBF16())) 2925 return 2 * LT.first; 2926 if (!Ty->getScalarType()->isFP128Ty()) 2927 return LT.first; 2928 [[fallthrough]]; 2929 case ISD::FMUL: 2930 case ISD::FDIV: 2931 // These nodes are marked as 'custom' just to lower them to SVE. 2932 // We know said lowering will incur no additional cost. 2933 if (!Ty->getScalarType()->isFP128Ty()) 2934 return 2 * LT.first; 2935 2936 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 2937 Op2Info); 2938 } 2939 } 2940 2941 InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty, 2942 ScalarEvolution *SE, 2943 const SCEV *Ptr) { 2944 // Address computations in vectorized code with non-consecutive addresses will 2945 // likely result in more instructions compared to scalar code where the 2946 // computation can more often be merged into the index mode. The resulting 2947 // extra micro-ops can significantly decrease throughput. 2948 unsigned NumVectorInstToHideOverhead = NeonNonConstStrideOverhead; 2949 int MaxMergeDistance = 64; 2950 2951 if (Ty->isVectorTy() && SE && 2952 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) 2953 return NumVectorInstToHideOverhead; 2954 2955 // In many cases the address computation is not merged into the instruction 2956 // addressing mode. 2957 return 1; 2958 } 2959 2960 InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 2961 Type *CondTy, 2962 CmpInst::Predicate VecPred, 2963 TTI::TargetCostKind CostKind, 2964 const Instruction *I) { 2965 // TODO: Handle other cost kinds. 2966 if (CostKind != TTI::TCK_RecipThroughput) 2967 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, 2968 I); 2969 2970 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2971 // We don't lower some vector selects well that are wider than the register 2972 // width. 2973 if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) { 2974 // We would need this many instructions to hide the scalarization happening. 2975 const int AmortizationCost = 20; 2976 2977 // If VecPred is not set, check if we can get a predicate from the context 2978 // instruction, if its type matches the requested ValTy. 2979 if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) { 2980 CmpInst::Predicate CurrentPred; 2981 if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(), 2982 m_Value()))) 2983 VecPred = CurrentPred; 2984 } 2985 // Check if we have a compare/select chain that can be lowered using 2986 // a (F)CMxx & BFI pair. 2987 if (CmpInst::isIntPredicate(VecPred) || VecPred == CmpInst::FCMP_OLE || 2988 VecPred == CmpInst::FCMP_OLT || VecPred == CmpInst::FCMP_OGT || 2989 VecPred == CmpInst::FCMP_OGE || VecPred == CmpInst::FCMP_OEQ || 2990 VecPred == CmpInst::FCMP_UNE) { 2991 static const auto ValidMinMaxTys = { 2992 MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, 2993 MVT::v4i32, MVT::v2i64, MVT::v2f32, MVT::v4f32, MVT::v2f64}; 2994 static const auto ValidFP16MinMaxTys = {MVT::v4f16, MVT::v8f16}; 2995 2996 auto LT = getTypeLegalizationCost(ValTy); 2997 if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; }) || 2998 (ST->hasFullFP16() && 2999 any_of(ValidFP16MinMaxTys, [<](MVT M) { return M == LT.second; }))) 3000 return LT.first; 3001 } 3002 3003 static const TypeConversionCostTblEntry 3004 VectorSelectTbl[] = { 3005 { ISD::SELECT, MVT::v2i1, MVT::v2f32, 2 }, 3006 { ISD::SELECT, MVT::v2i1, MVT::v2f64, 2 }, 3007 { ISD::SELECT, MVT::v4i1, MVT::v4f32, 2 }, 3008 { ISD::SELECT, MVT::v4i1, MVT::v4f16, 2 }, 3009 { ISD::SELECT, MVT::v8i1, MVT::v8f16, 2 }, 3010 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 }, 3011 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 }, 3012 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 }, 3013 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost }, 3014 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost }, 3015 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost } 3016 }; 3017 3018 EVT SelCondTy = TLI->getValueType(DL, CondTy); 3019 EVT SelValTy = TLI->getValueType(DL, ValTy); 3020 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 3021 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD, 3022 SelCondTy.getSimpleVT(), 3023 SelValTy.getSimpleVT())) 3024 return Entry->Cost; 3025 } 3026 } 3027 3028 if (isa<FixedVectorType>(ValTy) && ISD == ISD::SETCC) { 3029 auto LT = getTypeLegalizationCost(ValTy); 3030 // Cost v4f16 FCmp without FP16 support via converting to v4f32 and back. 3031 if (LT.second == MVT::v4f16 && !ST->hasFullFP16()) 3032 return LT.first * 4; // fcvtl + fcvtl + fcmp + xtn 3033 } 3034 3035 // Treat the icmp in icmp(and, 0) as free, as we can make use of ands. 3036 // FIXME: This can apply to more conditions and add/sub if it can be shown to 3037 // be profitable. 3038 if (ValTy->isIntegerTy() && ISD == ISD::SETCC && I && 3039 ICmpInst::isEquality(VecPred) && 3040 TLI->isTypeLegal(TLI->getValueType(DL, ValTy)) && 3041 match(I->getOperand(1), m_Zero()) && 3042 match(I->getOperand(0), m_And(m_Value(), m_Value()))) 3043 return 0; 3044 3045 // The base case handles scalable vectors fine for now, since it treats the 3046 // cost as 1 * legalization cost. 3047 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 3048 } 3049 3050 AArch64TTIImpl::TTI::MemCmpExpansionOptions 3051 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 3052 TTI::MemCmpExpansionOptions Options; 3053 if (ST->requiresStrictAlign()) { 3054 // TODO: Add cost modeling for strict align. Misaligned loads expand to 3055 // a bunch of instructions when strict align is enabled. 3056 return Options; 3057 } 3058 Options.AllowOverlappingLoads = true; 3059 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 3060 Options.NumLoadsPerBlock = Options.MaxNumLoads; 3061 // TODO: Though vector loads usually perform well on AArch64, in some targets 3062 // they may wake up the FP unit, which raises the power consumption. Perhaps 3063 // they could be used with no holds barred (-O3). 3064 Options.LoadSizes = {8, 4, 2, 1}; 3065 Options.AllowedTailExpansions = {3, 5, 6}; 3066 return Options; 3067 } 3068 3069 bool AArch64TTIImpl::prefersVectorizedAddressing() const { 3070 return ST->hasSVE(); 3071 } 3072 3073 InstructionCost 3074 AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, 3075 Align Alignment, unsigned AddressSpace, 3076 TTI::TargetCostKind CostKind) { 3077 if (useNeonVector(Src)) 3078 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 3079 CostKind); 3080 auto LT = getTypeLegalizationCost(Src); 3081 if (!LT.first.isValid()) 3082 return InstructionCost::getInvalid(); 3083 3084 // The code-generator is currently not able to handle scalable vectors 3085 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 3086 // it. This change will be removed when code-generation for these types is 3087 // sufficiently reliable. 3088 if (cast<VectorType>(Src)->getElementCount() == ElementCount::getScalable(1)) 3089 return InstructionCost::getInvalid(); 3090 3091 return LT.first; 3092 } 3093 3094 static unsigned getSVEGatherScatterOverhead(unsigned Opcode) { 3095 return Opcode == Instruction::Load ? SVEGatherOverhead : SVEScatterOverhead; 3096 } 3097 3098 InstructionCost AArch64TTIImpl::getGatherScatterOpCost( 3099 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 3100 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { 3101 if (useNeonVector(DataTy) || !isLegalMaskedGatherScatter(DataTy)) 3102 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, 3103 Alignment, CostKind, I); 3104 auto *VT = cast<VectorType>(DataTy); 3105 auto LT = getTypeLegalizationCost(DataTy); 3106 if (!LT.first.isValid()) 3107 return InstructionCost::getInvalid(); 3108 3109 if (!LT.second.isVector() || 3110 !isElementTypeLegalForScalableVector(VT->getElementType())) 3111 return InstructionCost::getInvalid(); 3112 3113 // The code-generator is currently not able to handle scalable vectors 3114 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 3115 // it. This change will be removed when code-generation for these types is 3116 // sufficiently reliable. 3117 if (cast<VectorType>(DataTy)->getElementCount() == 3118 ElementCount::getScalable(1)) 3119 return InstructionCost::getInvalid(); 3120 3121 ElementCount LegalVF = LT.second.getVectorElementCount(); 3122 InstructionCost MemOpCost = 3123 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, 3124 {TTI::OK_AnyValue, TTI::OP_None}, I); 3125 // Add on an overhead cost for using gathers/scatters. 3126 // TODO: At the moment this is applied unilaterally for all CPUs, but at some 3127 // point we may want a per-CPU overhead. 3128 MemOpCost *= getSVEGatherScatterOverhead(Opcode); 3129 return LT.first * MemOpCost * getMaxNumElements(LegalVF); 3130 } 3131 3132 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const { 3133 return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors(); 3134 } 3135 3136 InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty, 3137 MaybeAlign Alignment, 3138 unsigned AddressSpace, 3139 TTI::TargetCostKind CostKind, 3140 TTI::OperandValueInfo OpInfo, 3141 const Instruction *I) { 3142 EVT VT = TLI->getValueType(DL, Ty, true); 3143 // Type legalization can't handle structs 3144 if (VT == MVT::Other) 3145 return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace, 3146 CostKind); 3147 3148 auto LT = getTypeLegalizationCost(Ty); 3149 if (!LT.first.isValid()) 3150 return InstructionCost::getInvalid(); 3151 3152 // The code-generator is currently not able to handle scalable vectors 3153 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 3154 // it. This change will be removed when code-generation for these types is 3155 // sufficiently reliable. 3156 if (auto *VTy = dyn_cast<ScalableVectorType>(Ty)) 3157 if (VTy->getElementCount() == ElementCount::getScalable(1)) 3158 return InstructionCost::getInvalid(); 3159 3160 // TODO: consider latency as well for TCK_SizeAndLatency. 3161 if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) 3162 return LT.first; 3163 3164 if (CostKind != TTI::TCK_RecipThroughput) 3165 return 1; 3166 3167 if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store && 3168 LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) { 3169 // Unaligned stores are extremely inefficient. We don't split all 3170 // unaligned 128-bit stores because the negative impact that has shown in 3171 // practice on inlined block copy code. 3172 // We make such stores expensive so that we will only vectorize if there 3173 // are 6 other instructions getting vectorized. 3174 const int AmortizationCost = 6; 3175 3176 return LT.first * 2 * AmortizationCost; 3177 } 3178 3179 // Opaque ptr or ptr vector types are i64s and can be lowered to STP/LDPs. 3180 if (Ty->isPtrOrPtrVectorTy()) 3181 return LT.first; 3182 3183 if (useNeonVector(Ty)) { 3184 // Check truncating stores and extending loads. 3185 if (Ty->getScalarSizeInBits() != LT.second.getScalarSizeInBits()) { 3186 // v4i8 types are lowered to scalar a load/store and sshll/xtn. 3187 if (VT == MVT::v4i8) 3188 return 2; 3189 // Otherwise we need to scalarize. 3190 return cast<FixedVectorType>(Ty)->getNumElements() * 2; 3191 } 3192 EVT EltVT = VT.getVectorElementType(); 3193 unsigned EltSize = EltVT.getScalarSizeInBits(); 3194 if (!isPowerOf2_32(EltSize) || EltSize < 8 || EltSize > 64 || 3195 VT.getVectorNumElements() >= (128 / EltSize) || !Alignment || 3196 *Alignment != Align(1)) 3197 return LT.first; 3198 // FIXME: v3i8 lowering currently is very inefficient, due to automatic 3199 // widening to v4i8, which produces suboptimal results. 3200 if (VT.getVectorNumElements() == 3 && EltVT == MVT::i8) 3201 return LT.first; 3202 3203 // Check non-power-of-2 loads/stores for legal vector element types with 3204 // NEON. Non-power-of-2 memory ops will get broken down to a set of 3205 // operations on smaller power-of-2 ops, including ld1/st1. 3206 LLVMContext &C = Ty->getContext(); 3207 InstructionCost Cost(0); 3208 SmallVector<EVT> TypeWorklist; 3209 TypeWorklist.push_back(VT); 3210 while (!TypeWorklist.empty()) { 3211 EVT CurrVT = TypeWorklist.pop_back_val(); 3212 unsigned CurrNumElements = CurrVT.getVectorNumElements(); 3213 if (isPowerOf2_32(CurrNumElements)) { 3214 Cost += 1; 3215 continue; 3216 } 3217 3218 unsigned PrevPow2 = NextPowerOf2(CurrNumElements) / 2; 3219 TypeWorklist.push_back(EVT::getVectorVT(C, EltVT, PrevPow2)); 3220 TypeWorklist.push_back( 3221 EVT::getVectorVT(C, EltVT, CurrNumElements - PrevPow2)); 3222 } 3223 return Cost; 3224 } 3225 3226 return LT.first; 3227 } 3228 3229 InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost( 3230 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 3231 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 3232 bool UseMaskForCond, bool UseMaskForGaps) { 3233 assert(Factor >= 2 && "Invalid interleave factor"); 3234 auto *VecVTy = cast<VectorType>(VecTy); 3235 3236 if (VecTy->isScalableTy() && (!ST->hasSVE() || Factor != 2)) 3237 return InstructionCost::getInvalid(); 3238 3239 // Vectorization for masked interleaved accesses is only enabled for scalable 3240 // VF. 3241 if (!VecTy->isScalableTy() && (UseMaskForCond || UseMaskForGaps)) 3242 return InstructionCost::getInvalid(); 3243 3244 if (!UseMaskForGaps && Factor <= TLI->getMaxSupportedInterleaveFactor()) { 3245 unsigned MinElts = VecVTy->getElementCount().getKnownMinValue(); 3246 auto *SubVecTy = 3247 VectorType::get(VecVTy->getElementType(), 3248 VecVTy->getElementCount().divideCoefficientBy(Factor)); 3249 3250 // ldN/stN only support legal vector types of size 64 or 128 in bits. 3251 // Accesses having vector types that are a multiple of 128 bits can be 3252 // matched to more than one ldN/stN instruction. 3253 bool UseScalable; 3254 if (MinElts % Factor == 0 && 3255 TLI->isLegalInterleavedAccessType(SubVecTy, DL, UseScalable)) 3256 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL, UseScalable); 3257 } 3258 3259 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 3260 Alignment, AddressSpace, CostKind, 3261 UseMaskForCond, UseMaskForGaps); 3262 } 3263 3264 InstructionCost 3265 AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { 3266 InstructionCost Cost = 0; 3267 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 3268 for (auto *I : Tys) { 3269 if (!I->isVectorTy()) 3270 continue; 3271 if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() == 3272 128) 3273 Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) + 3274 getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind); 3275 } 3276 return Cost; 3277 } 3278 3279 unsigned AArch64TTIImpl::getMaxInterleaveFactor(ElementCount VF) { 3280 return ST->getMaxInterleaveFactor(); 3281 } 3282 3283 // For Falkor, we want to avoid having too many strided loads in a loop since 3284 // that can exhaust the HW prefetcher resources. We adjust the unroller 3285 // MaxCount preference below to attempt to ensure unrolling doesn't create too 3286 // many strided loads. 3287 static void 3288 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE, 3289 TargetTransformInfo::UnrollingPreferences &UP) { 3290 enum { MaxStridedLoads = 7 }; 3291 auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) { 3292 int StridedLoads = 0; 3293 // FIXME? We could make this more precise by looking at the CFG and 3294 // e.g. not counting loads in each side of an if-then-else diamond. 3295 for (const auto BB : L->blocks()) { 3296 for (auto &I : *BB) { 3297 LoadInst *LMemI = dyn_cast<LoadInst>(&I); 3298 if (!LMemI) 3299 continue; 3300 3301 Value *PtrValue = LMemI->getPointerOperand(); 3302 if (L->isLoopInvariant(PtrValue)) 3303 continue; 3304 3305 const SCEV *LSCEV = SE.getSCEV(PtrValue); 3306 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV); 3307 if (!LSCEVAddRec || !LSCEVAddRec->isAffine()) 3308 continue; 3309 3310 // FIXME? We could take pairing of unrolled load copies into account 3311 // by looking at the AddRec, but we would probably have to limit this 3312 // to loops with no stores or other memory optimization barriers. 3313 ++StridedLoads; 3314 // We've seen enough strided loads that seeing more won't make a 3315 // difference. 3316 if (StridedLoads > MaxStridedLoads / 2) 3317 return StridedLoads; 3318 } 3319 } 3320 return StridedLoads; 3321 }; 3322 3323 int StridedLoads = countStridedLoads(L, SE); 3324 LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads 3325 << " strided loads\n"); 3326 // Pick the largest power of 2 unroll count that won't result in too many 3327 // strided loads. 3328 if (StridedLoads) { 3329 UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads); 3330 LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " 3331 << UP.MaxCount << '\n'); 3332 } 3333 } 3334 3335 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 3336 TTI::UnrollingPreferences &UP, 3337 OptimizationRemarkEmitter *ORE) { 3338 // Enable partial unrolling and runtime unrolling. 3339 BaseT::getUnrollingPreferences(L, SE, UP, ORE); 3340 3341 UP.UpperBound = true; 3342 3343 // For inner loop, it is more likely to be a hot one, and the runtime check 3344 // can be promoted out from LICM pass, so the overhead is less, let's try 3345 // a larger threshold to unroll more loops. 3346 if (L->getLoopDepth() > 1) 3347 UP.PartialThreshold *= 2; 3348 3349 // Disable partial & runtime unrolling on -Os. 3350 UP.PartialOptSizeThreshold = 0; 3351 3352 if (ST->getProcFamily() == AArch64Subtarget::Falkor && 3353 EnableFalkorHWPFUnrollFix) 3354 getFalkorUnrollingPreferences(L, SE, UP); 3355 3356 // Scan the loop: don't unroll loops with calls as this could prevent 3357 // inlining. Don't unroll vector loops either, as they don't benefit much from 3358 // unrolling. 3359 for (auto *BB : L->getBlocks()) { 3360 for (auto &I : *BB) { 3361 // Don't unroll vectorised loop. 3362 if (I.getType()->isVectorTy()) 3363 return; 3364 3365 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 3366 if (const Function *F = cast<CallBase>(I).getCalledFunction()) { 3367 if (!isLoweredToCall(F)) 3368 continue; 3369 } 3370 return; 3371 } 3372 } 3373 } 3374 3375 // Enable runtime unrolling for in-order models 3376 // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by 3377 // checking for that case, we can ensure that the default behaviour is 3378 // unchanged 3379 if (ST->getProcFamily() != AArch64Subtarget::Others && 3380 !ST->getSchedModel().isOutOfOrder()) { 3381 UP.Runtime = true; 3382 UP.Partial = true; 3383 UP.UnrollRemainder = true; 3384 UP.DefaultUnrollRuntimeCount = 4; 3385 3386 UP.UnrollAndJam = true; 3387 UP.UnrollAndJamInnerLoopThreshold = 60; 3388 } 3389 } 3390 3391 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 3392 TTI::PeelingPreferences &PP) { 3393 BaseT::getPeelingPreferences(L, SE, PP); 3394 } 3395 3396 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 3397 Type *ExpectedType) { 3398 switch (Inst->getIntrinsicID()) { 3399 default: 3400 return nullptr; 3401 case Intrinsic::aarch64_neon_st2: 3402 case Intrinsic::aarch64_neon_st3: 3403 case Intrinsic::aarch64_neon_st4: { 3404 // Create a struct type 3405 StructType *ST = dyn_cast<StructType>(ExpectedType); 3406 if (!ST) 3407 return nullptr; 3408 unsigned NumElts = Inst->arg_size() - 1; 3409 if (ST->getNumElements() != NumElts) 3410 return nullptr; 3411 for (unsigned i = 0, e = NumElts; i != e; ++i) { 3412 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i)) 3413 return nullptr; 3414 } 3415 Value *Res = PoisonValue::get(ExpectedType); 3416 IRBuilder<> Builder(Inst); 3417 for (unsigned i = 0, e = NumElts; i != e; ++i) { 3418 Value *L = Inst->getArgOperand(i); 3419 Res = Builder.CreateInsertValue(Res, L, i); 3420 } 3421 return Res; 3422 } 3423 case Intrinsic::aarch64_neon_ld2: 3424 case Intrinsic::aarch64_neon_ld3: 3425 case Intrinsic::aarch64_neon_ld4: 3426 if (Inst->getType() == ExpectedType) 3427 return Inst; 3428 return nullptr; 3429 } 3430 } 3431 3432 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, 3433 MemIntrinsicInfo &Info) { 3434 switch (Inst->getIntrinsicID()) { 3435 default: 3436 break; 3437 case Intrinsic::aarch64_neon_ld2: 3438 case Intrinsic::aarch64_neon_ld3: 3439 case Intrinsic::aarch64_neon_ld4: 3440 Info.ReadMem = true; 3441 Info.WriteMem = false; 3442 Info.PtrVal = Inst->getArgOperand(0); 3443 break; 3444 case Intrinsic::aarch64_neon_st2: 3445 case Intrinsic::aarch64_neon_st3: 3446 case Intrinsic::aarch64_neon_st4: 3447 Info.ReadMem = false; 3448 Info.WriteMem = true; 3449 Info.PtrVal = Inst->getArgOperand(Inst->arg_size() - 1); 3450 break; 3451 } 3452 3453 switch (Inst->getIntrinsicID()) { 3454 default: 3455 return false; 3456 case Intrinsic::aarch64_neon_ld2: 3457 case Intrinsic::aarch64_neon_st2: 3458 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS; 3459 break; 3460 case Intrinsic::aarch64_neon_ld3: 3461 case Intrinsic::aarch64_neon_st3: 3462 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS; 3463 break; 3464 case Intrinsic::aarch64_neon_ld4: 3465 case Intrinsic::aarch64_neon_st4: 3466 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS; 3467 break; 3468 } 3469 return true; 3470 } 3471 3472 /// See if \p I should be considered for address type promotion. We check if \p 3473 /// I is a sext with right type and used in memory accesses. If it used in a 3474 /// "complex" getelementptr, we allow it to be promoted without finding other 3475 /// sext instructions that sign extended the same initial value. A getelementptr 3476 /// is considered as "complex" if it has more than 2 operands. 3477 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion( 3478 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) { 3479 bool Considerable = false; 3480 AllowPromotionWithoutCommonHeader = false; 3481 if (!isa<SExtInst>(&I)) 3482 return false; 3483 Type *ConsideredSExtType = 3484 Type::getInt64Ty(I.getParent()->getParent()->getContext()); 3485 if (I.getType() != ConsideredSExtType) 3486 return false; 3487 // See if the sext is the one with the right type and used in at least one 3488 // GetElementPtrInst. 3489 for (const User *U : I.users()) { 3490 if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) { 3491 Considerable = true; 3492 // A getelementptr is considered as "complex" if it has more than 2 3493 // operands. We will promote a SExt used in such complex GEP as we 3494 // expect some computation to be merged if they are done on 64 bits. 3495 if (GEPInst->getNumOperands() > 2) { 3496 AllowPromotionWithoutCommonHeader = true; 3497 break; 3498 } 3499 } 3500 } 3501 return Considerable; 3502 } 3503 3504 bool AArch64TTIImpl::isLegalToVectorizeReduction( 3505 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const { 3506 if (!VF.isScalable()) 3507 return true; 3508 3509 Type *Ty = RdxDesc.getRecurrenceType(); 3510 if (Ty->isBFloatTy() || !isElementTypeLegalForScalableVector(Ty)) 3511 return false; 3512 3513 switch (RdxDesc.getRecurrenceKind()) { 3514 case RecurKind::Add: 3515 case RecurKind::FAdd: 3516 case RecurKind::And: 3517 case RecurKind::Or: 3518 case RecurKind::Xor: 3519 case RecurKind::SMin: 3520 case RecurKind::SMax: 3521 case RecurKind::UMin: 3522 case RecurKind::UMax: 3523 case RecurKind::FMin: 3524 case RecurKind::FMax: 3525 case RecurKind::FMulAdd: 3526 case RecurKind::IAnyOf: 3527 case RecurKind::FAnyOf: 3528 return true; 3529 default: 3530 return false; 3531 } 3532 } 3533 3534 InstructionCost 3535 AArch64TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, 3536 FastMathFlags FMF, 3537 TTI::TargetCostKind CostKind) { 3538 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty); 3539 3540 if (LT.second.getScalarType() == MVT::f16 && !ST->hasFullFP16()) 3541 return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind); 3542 3543 InstructionCost LegalizationCost = 0; 3544 if (LT.first > 1) { 3545 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext()); 3546 IntrinsicCostAttributes Attrs(IID, LegalVTy, {LegalVTy, LegalVTy}, FMF); 3547 LegalizationCost = getIntrinsicInstrCost(Attrs, CostKind) * (LT.first - 1); 3548 } 3549 3550 return LegalizationCost + /*Cost of horizontal reduction*/ 2; 3551 } 3552 3553 InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE( 3554 unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) { 3555 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy); 3556 InstructionCost LegalizationCost = 0; 3557 if (LT.first > 1) { 3558 Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext()); 3559 LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind); 3560 LegalizationCost *= LT.first - 1; 3561 } 3562 3563 int ISD = TLI->InstructionOpcodeToISD(Opcode); 3564 assert(ISD && "Invalid opcode"); 3565 // Add the final reduction cost for the legal horizontal reduction 3566 switch (ISD) { 3567 case ISD::ADD: 3568 case ISD::AND: 3569 case ISD::OR: 3570 case ISD::XOR: 3571 case ISD::FADD: 3572 return LegalizationCost + 2; 3573 default: 3574 return InstructionCost::getInvalid(); 3575 } 3576 } 3577 3578 InstructionCost 3579 AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 3580 std::optional<FastMathFlags> FMF, 3581 TTI::TargetCostKind CostKind) { 3582 if (TTI::requiresOrderedReduction(FMF)) { 3583 if (auto *FixedVTy = dyn_cast<FixedVectorType>(ValTy)) { 3584 InstructionCost BaseCost = 3585 BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 3586 // Add on extra cost to reflect the extra overhead on some CPUs. We still 3587 // end up vectorizing for more computationally intensive loops. 3588 return BaseCost + FixedVTy->getNumElements(); 3589 } 3590 3591 if (Opcode != Instruction::FAdd) 3592 return InstructionCost::getInvalid(); 3593 3594 auto *VTy = cast<ScalableVectorType>(ValTy); 3595 InstructionCost Cost = 3596 getArithmeticInstrCost(Opcode, VTy->getScalarType(), CostKind); 3597 Cost *= getMaxNumElements(VTy->getElementCount()); 3598 return Cost; 3599 } 3600 3601 if (isa<ScalableVectorType>(ValTy)) 3602 return getArithmeticReductionCostSVE(Opcode, ValTy, CostKind); 3603 3604 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy); 3605 MVT MTy = LT.second; 3606 int ISD = TLI->InstructionOpcodeToISD(Opcode); 3607 assert(ISD && "Invalid opcode"); 3608 3609 // Horizontal adds can use the 'addv' instruction. We model the cost of these 3610 // instructions as twice a normal vector add, plus 1 for each legalization 3611 // step (LT.first). This is the only arithmetic vector reduction operation for 3612 // which we have an instruction. 3613 // OR, XOR and AND costs should match the codegen from: 3614 // OR: llvm/test/CodeGen/AArch64/reduce-or.ll 3615 // XOR: llvm/test/CodeGen/AArch64/reduce-xor.ll 3616 // AND: llvm/test/CodeGen/AArch64/reduce-and.ll 3617 static const CostTblEntry CostTblNoPairwise[]{ 3618 {ISD::ADD, MVT::v8i8, 2}, 3619 {ISD::ADD, MVT::v16i8, 2}, 3620 {ISD::ADD, MVT::v4i16, 2}, 3621 {ISD::ADD, MVT::v8i16, 2}, 3622 {ISD::ADD, MVT::v4i32, 2}, 3623 {ISD::ADD, MVT::v2i64, 2}, 3624 {ISD::OR, MVT::v8i8, 15}, 3625 {ISD::OR, MVT::v16i8, 17}, 3626 {ISD::OR, MVT::v4i16, 7}, 3627 {ISD::OR, MVT::v8i16, 9}, 3628 {ISD::OR, MVT::v2i32, 3}, 3629 {ISD::OR, MVT::v4i32, 5}, 3630 {ISD::OR, MVT::v2i64, 3}, 3631 {ISD::XOR, MVT::v8i8, 15}, 3632 {ISD::XOR, MVT::v16i8, 17}, 3633 {ISD::XOR, MVT::v4i16, 7}, 3634 {ISD::XOR, MVT::v8i16, 9}, 3635 {ISD::XOR, MVT::v2i32, 3}, 3636 {ISD::XOR, MVT::v4i32, 5}, 3637 {ISD::XOR, MVT::v2i64, 3}, 3638 {ISD::AND, MVT::v8i8, 15}, 3639 {ISD::AND, MVT::v16i8, 17}, 3640 {ISD::AND, MVT::v4i16, 7}, 3641 {ISD::AND, MVT::v8i16, 9}, 3642 {ISD::AND, MVT::v2i32, 3}, 3643 {ISD::AND, MVT::v4i32, 5}, 3644 {ISD::AND, MVT::v2i64, 3}, 3645 }; 3646 switch (ISD) { 3647 default: 3648 break; 3649 case ISD::ADD: 3650 if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy)) 3651 return (LT.first - 1) + Entry->Cost; 3652 break; 3653 case ISD::XOR: 3654 case ISD::AND: 3655 case ISD::OR: 3656 const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy); 3657 if (!Entry) 3658 break; 3659 auto *ValVTy = cast<FixedVectorType>(ValTy); 3660 if (MTy.getVectorNumElements() <= ValVTy->getNumElements() && 3661 isPowerOf2_32(ValVTy->getNumElements())) { 3662 InstructionCost ExtraCost = 0; 3663 if (LT.first != 1) { 3664 // Type needs to be split, so there is an extra cost of LT.first - 1 3665 // arithmetic ops. 3666 auto *Ty = FixedVectorType::get(ValTy->getElementType(), 3667 MTy.getVectorNumElements()); 3668 ExtraCost = getArithmeticInstrCost(Opcode, Ty, CostKind); 3669 ExtraCost *= LT.first - 1; 3670 } 3671 // All and/or/xor of i1 will be lowered with maxv/minv/addv + fmov 3672 auto Cost = ValVTy->getElementType()->isIntegerTy(1) ? 2 : Entry->Cost; 3673 return Cost + ExtraCost; 3674 } 3675 break; 3676 } 3677 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 3678 } 3679 3680 InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index) { 3681 static const CostTblEntry ShuffleTbl[] = { 3682 { TTI::SK_Splice, MVT::nxv16i8, 1 }, 3683 { TTI::SK_Splice, MVT::nxv8i16, 1 }, 3684 { TTI::SK_Splice, MVT::nxv4i32, 1 }, 3685 { TTI::SK_Splice, MVT::nxv2i64, 1 }, 3686 { TTI::SK_Splice, MVT::nxv2f16, 1 }, 3687 { TTI::SK_Splice, MVT::nxv4f16, 1 }, 3688 { TTI::SK_Splice, MVT::nxv8f16, 1 }, 3689 { TTI::SK_Splice, MVT::nxv2bf16, 1 }, 3690 { TTI::SK_Splice, MVT::nxv4bf16, 1 }, 3691 { TTI::SK_Splice, MVT::nxv8bf16, 1 }, 3692 { TTI::SK_Splice, MVT::nxv2f32, 1 }, 3693 { TTI::SK_Splice, MVT::nxv4f32, 1 }, 3694 { TTI::SK_Splice, MVT::nxv2f64, 1 }, 3695 }; 3696 3697 // The code-generator is currently not able to handle scalable vectors 3698 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 3699 // it. This change will be removed when code-generation for these types is 3700 // sufficiently reliable. 3701 if (Tp->getElementCount() == ElementCount::getScalable(1)) 3702 return InstructionCost::getInvalid(); 3703 3704 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp); 3705 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Tp->getContext()); 3706 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 3707 EVT PromotedVT = LT.second.getScalarType() == MVT::i1 3708 ? TLI->getPromotedVTForPredicate(EVT(LT.second)) 3709 : LT.second; 3710 Type *PromotedVTy = EVT(PromotedVT).getTypeForEVT(Tp->getContext()); 3711 InstructionCost LegalizationCost = 0; 3712 if (Index < 0) { 3713 LegalizationCost = 3714 getCmpSelInstrCost(Instruction::ICmp, PromotedVTy, PromotedVTy, 3715 CmpInst::BAD_ICMP_PREDICATE, CostKind) + 3716 getCmpSelInstrCost(Instruction::Select, PromotedVTy, LegalVTy, 3717 CmpInst::BAD_ICMP_PREDICATE, CostKind); 3718 } 3719 3720 // Predicated splice are promoted when lowering. See AArch64ISelLowering.cpp 3721 // Cost performed on a promoted type. 3722 if (LT.second.getScalarType() == MVT::i1) { 3723 LegalizationCost += 3724 getCastInstrCost(Instruction::ZExt, PromotedVTy, LegalVTy, 3725 TTI::CastContextHint::None, CostKind) + 3726 getCastInstrCost(Instruction::Trunc, LegalVTy, PromotedVTy, 3727 TTI::CastContextHint::None, CostKind); 3728 } 3729 const auto *Entry = 3730 CostTableLookup(ShuffleTbl, TTI::SK_Splice, PromotedVT.getSimpleVT()); 3731 assert(Entry && "Illegal Type for Splice"); 3732 LegalizationCost += Entry->Cost; 3733 return LegalizationCost * LT.first; 3734 } 3735 3736 InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 3737 VectorType *Tp, 3738 ArrayRef<int> Mask, 3739 TTI::TargetCostKind CostKind, 3740 int Index, VectorType *SubTp, 3741 ArrayRef<const Value *> Args) { 3742 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp); 3743 // If we have a Mask, and the LT is being legalized somehow, split the Mask 3744 // into smaller vectors and sum the cost of each shuffle. 3745 if (!Mask.empty() && isa<FixedVectorType>(Tp) && LT.second.isVector() && 3746 Tp->getScalarSizeInBits() == LT.second.getScalarSizeInBits() && 3747 Mask.size() > LT.second.getVectorNumElements() && !Index && !SubTp) { 3748 unsigned TpNumElts = Mask.size(); 3749 unsigned LTNumElts = LT.second.getVectorNumElements(); 3750 unsigned NumVecs = (TpNumElts + LTNumElts - 1) / LTNumElts; 3751 VectorType *NTp = 3752 VectorType::get(Tp->getScalarType(), LT.second.getVectorElementCount()); 3753 InstructionCost Cost; 3754 for (unsigned N = 0; N < NumVecs; N++) { 3755 SmallVector<int> NMask; 3756 // Split the existing mask into chunks of size LTNumElts. Track the source 3757 // sub-vectors to ensure the result has at most 2 inputs. 3758 unsigned Source1, Source2; 3759 unsigned NumSources = 0; 3760 for (unsigned E = 0; E < LTNumElts; E++) { 3761 int MaskElt = (N * LTNumElts + E < TpNumElts) ? Mask[N * LTNumElts + E] 3762 : PoisonMaskElem; 3763 if (MaskElt < 0) { 3764 NMask.push_back(PoisonMaskElem); 3765 continue; 3766 } 3767 3768 // Calculate which source from the input this comes from and whether it 3769 // is new to us. 3770 unsigned Source = MaskElt / LTNumElts; 3771 if (NumSources == 0) { 3772 Source1 = Source; 3773 NumSources = 1; 3774 } else if (NumSources == 1 && Source != Source1) { 3775 Source2 = Source; 3776 NumSources = 2; 3777 } else if (NumSources >= 2 && Source != Source1 && Source != Source2) { 3778 NumSources++; 3779 } 3780 3781 // Add to the new mask. For the NumSources>2 case these are not correct, 3782 // but are only used for the modular lane number. 3783 if (Source == Source1) 3784 NMask.push_back(MaskElt % LTNumElts); 3785 else if (Source == Source2) 3786 NMask.push_back(MaskElt % LTNumElts + LTNumElts); 3787 else 3788 NMask.push_back(MaskElt % LTNumElts); 3789 } 3790 // If the sub-mask has at most 2 input sub-vectors then re-cost it using 3791 // getShuffleCost. If not then cost it using the worst case. 3792 if (NumSources <= 2) 3793 Cost += getShuffleCost(NumSources <= 1 ? TTI::SK_PermuteSingleSrc 3794 : TTI::SK_PermuteTwoSrc, 3795 NTp, NMask, CostKind, 0, nullptr, Args); 3796 else if (any_of(enumerate(NMask), [&](const auto &ME) { 3797 return ME.value() % LTNumElts == ME.index(); 3798 })) 3799 Cost += LTNumElts - 1; 3800 else 3801 Cost += LTNumElts; 3802 } 3803 return Cost; 3804 } 3805 3806 Kind = improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp); 3807 3808 // Check for broadcast loads, which are supported by the LD1R instruction. 3809 // In terms of code-size, the shuffle vector is free when a load + dup get 3810 // folded into a LD1R. That's what we check and return here. For performance 3811 // and reciprocal throughput, a LD1R is not completely free. In this case, we 3812 // return the cost for the broadcast below (i.e. 1 for most/all types), so 3813 // that we model the load + dup sequence slightly higher because LD1R is a 3814 // high latency instruction. 3815 if (CostKind == TTI::TCK_CodeSize && Kind == TTI::SK_Broadcast) { 3816 bool IsLoad = !Args.empty() && isa<LoadInst>(Args[0]); 3817 if (IsLoad && LT.second.isVector() && 3818 isLegalBroadcastLoad(Tp->getElementType(), 3819 LT.second.getVectorElementCount())) 3820 return 0; 3821 } 3822 3823 // If we have 4 elements for the shuffle and a Mask, get the cost straight 3824 // from the perfect shuffle tables. 3825 if (Mask.size() == 4 && Tp->getElementCount() == ElementCount::getFixed(4) && 3826 (Tp->getScalarSizeInBits() == 16 || Tp->getScalarSizeInBits() == 32) && 3827 all_of(Mask, [](int E) { return E < 8; })) 3828 return getPerfectShuffleCost(Mask); 3829 3830 if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose || 3831 Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc || 3832 Kind == TTI::SK_Reverse || Kind == TTI::SK_Splice) { 3833 static const CostTblEntry ShuffleTbl[] = { 3834 // Broadcast shuffle kinds can be performed with 'dup'. 3835 {TTI::SK_Broadcast, MVT::v8i8, 1}, 3836 {TTI::SK_Broadcast, MVT::v16i8, 1}, 3837 {TTI::SK_Broadcast, MVT::v4i16, 1}, 3838 {TTI::SK_Broadcast, MVT::v8i16, 1}, 3839 {TTI::SK_Broadcast, MVT::v2i32, 1}, 3840 {TTI::SK_Broadcast, MVT::v4i32, 1}, 3841 {TTI::SK_Broadcast, MVT::v2i64, 1}, 3842 {TTI::SK_Broadcast, MVT::v4f16, 1}, 3843 {TTI::SK_Broadcast, MVT::v8f16, 1}, 3844 {TTI::SK_Broadcast, MVT::v2f32, 1}, 3845 {TTI::SK_Broadcast, MVT::v4f32, 1}, 3846 {TTI::SK_Broadcast, MVT::v2f64, 1}, 3847 // Transpose shuffle kinds can be performed with 'trn1/trn2' and 3848 // 'zip1/zip2' instructions. 3849 {TTI::SK_Transpose, MVT::v8i8, 1}, 3850 {TTI::SK_Transpose, MVT::v16i8, 1}, 3851 {TTI::SK_Transpose, MVT::v4i16, 1}, 3852 {TTI::SK_Transpose, MVT::v8i16, 1}, 3853 {TTI::SK_Transpose, MVT::v2i32, 1}, 3854 {TTI::SK_Transpose, MVT::v4i32, 1}, 3855 {TTI::SK_Transpose, MVT::v2i64, 1}, 3856 {TTI::SK_Transpose, MVT::v4f16, 1}, 3857 {TTI::SK_Transpose, MVT::v8f16, 1}, 3858 {TTI::SK_Transpose, MVT::v2f32, 1}, 3859 {TTI::SK_Transpose, MVT::v4f32, 1}, 3860 {TTI::SK_Transpose, MVT::v2f64, 1}, 3861 // Select shuffle kinds. 3862 // TODO: handle vXi8/vXi16. 3863 {TTI::SK_Select, MVT::v2i32, 1}, // mov. 3864 {TTI::SK_Select, MVT::v4i32, 2}, // rev+trn (or similar). 3865 {TTI::SK_Select, MVT::v2i64, 1}, // mov. 3866 {TTI::SK_Select, MVT::v2f32, 1}, // mov. 3867 {TTI::SK_Select, MVT::v4f32, 2}, // rev+trn (or similar). 3868 {TTI::SK_Select, MVT::v2f64, 1}, // mov. 3869 // PermuteSingleSrc shuffle kinds. 3870 {TTI::SK_PermuteSingleSrc, MVT::v2i32, 1}, // mov. 3871 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 3}, // perfectshuffle worst case. 3872 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // mov. 3873 {TTI::SK_PermuteSingleSrc, MVT::v2f32, 1}, // mov. 3874 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 3}, // perfectshuffle worst case. 3875 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // mov. 3876 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 3}, // perfectshuffle worst case. 3877 {TTI::SK_PermuteSingleSrc, MVT::v4f16, 3}, // perfectshuffle worst case. 3878 {TTI::SK_PermuteSingleSrc, MVT::v4bf16, 3}, // same 3879 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 8}, // constpool + load + tbl 3880 {TTI::SK_PermuteSingleSrc, MVT::v8f16, 8}, // constpool + load + tbl 3881 {TTI::SK_PermuteSingleSrc, MVT::v8bf16, 8}, // constpool + load + tbl 3882 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 8}, // constpool + load + tbl 3883 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 8}, // constpool + load + tbl 3884 // Reverse can be lowered with `rev`. 3885 {TTI::SK_Reverse, MVT::v2i32, 1}, // REV64 3886 {TTI::SK_Reverse, MVT::v4i32, 2}, // REV64; EXT 3887 {TTI::SK_Reverse, MVT::v2i64, 1}, // EXT 3888 {TTI::SK_Reverse, MVT::v2f32, 1}, // REV64 3889 {TTI::SK_Reverse, MVT::v4f32, 2}, // REV64; EXT 3890 {TTI::SK_Reverse, MVT::v2f64, 1}, // EXT 3891 {TTI::SK_Reverse, MVT::v8f16, 2}, // REV64; EXT 3892 {TTI::SK_Reverse, MVT::v8i16, 2}, // REV64; EXT 3893 {TTI::SK_Reverse, MVT::v16i8, 2}, // REV64; EXT 3894 {TTI::SK_Reverse, MVT::v4f16, 1}, // REV64 3895 {TTI::SK_Reverse, MVT::v4i16, 1}, // REV64 3896 {TTI::SK_Reverse, MVT::v8i8, 1}, // REV64 3897 // Splice can all be lowered as `ext`. 3898 {TTI::SK_Splice, MVT::v2i32, 1}, 3899 {TTI::SK_Splice, MVT::v4i32, 1}, 3900 {TTI::SK_Splice, MVT::v2i64, 1}, 3901 {TTI::SK_Splice, MVT::v2f32, 1}, 3902 {TTI::SK_Splice, MVT::v4f32, 1}, 3903 {TTI::SK_Splice, MVT::v2f64, 1}, 3904 {TTI::SK_Splice, MVT::v8f16, 1}, 3905 {TTI::SK_Splice, MVT::v8bf16, 1}, 3906 {TTI::SK_Splice, MVT::v8i16, 1}, 3907 {TTI::SK_Splice, MVT::v16i8, 1}, 3908 {TTI::SK_Splice, MVT::v4bf16, 1}, 3909 {TTI::SK_Splice, MVT::v4f16, 1}, 3910 {TTI::SK_Splice, MVT::v4i16, 1}, 3911 {TTI::SK_Splice, MVT::v8i8, 1}, 3912 // Broadcast shuffle kinds for scalable vectors 3913 {TTI::SK_Broadcast, MVT::nxv16i8, 1}, 3914 {TTI::SK_Broadcast, MVT::nxv8i16, 1}, 3915 {TTI::SK_Broadcast, MVT::nxv4i32, 1}, 3916 {TTI::SK_Broadcast, MVT::nxv2i64, 1}, 3917 {TTI::SK_Broadcast, MVT::nxv2f16, 1}, 3918 {TTI::SK_Broadcast, MVT::nxv4f16, 1}, 3919 {TTI::SK_Broadcast, MVT::nxv8f16, 1}, 3920 {TTI::SK_Broadcast, MVT::nxv2bf16, 1}, 3921 {TTI::SK_Broadcast, MVT::nxv4bf16, 1}, 3922 {TTI::SK_Broadcast, MVT::nxv8bf16, 1}, 3923 {TTI::SK_Broadcast, MVT::nxv2f32, 1}, 3924 {TTI::SK_Broadcast, MVT::nxv4f32, 1}, 3925 {TTI::SK_Broadcast, MVT::nxv2f64, 1}, 3926 {TTI::SK_Broadcast, MVT::nxv16i1, 1}, 3927 {TTI::SK_Broadcast, MVT::nxv8i1, 1}, 3928 {TTI::SK_Broadcast, MVT::nxv4i1, 1}, 3929 {TTI::SK_Broadcast, MVT::nxv2i1, 1}, 3930 // Handle the cases for vector.reverse with scalable vectors 3931 {TTI::SK_Reverse, MVT::nxv16i8, 1}, 3932 {TTI::SK_Reverse, MVT::nxv8i16, 1}, 3933 {TTI::SK_Reverse, MVT::nxv4i32, 1}, 3934 {TTI::SK_Reverse, MVT::nxv2i64, 1}, 3935 {TTI::SK_Reverse, MVT::nxv2f16, 1}, 3936 {TTI::SK_Reverse, MVT::nxv4f16, 1}, 3937 {TTI::SK_Reverse, MVT::nxv8f16, 1}, 3938 {TTI::SK_Reverse, MVT::nxv2bf16, 1}, 3939 {TTI::SK_Reverse, MVT::nxv4bf16, 1}, 3940 {TTI::SK_Reverse, MVT::nxv8bf16, 1}, 3941 {TTI::SK_Reverse, MVT::nxv2f32, 1}, 3942 {TTI::SK_Reverse, MVT::nxv4f32, 1}, 3943 {TTI::SK_Reverse, MVT::nxv2f64, 1}, 3944 {TTI::SK_Reverse, MVT::nxv16i1, 1}, 3945 {TTI::SK_Reverse, MVT::nxv8i1, 1}, 3946 {TTI::SK_Reverse, MVT::nxv4i1, 1}, 3947 {TTI::SK_Reverse, MVT::nxv2i1, 1}, 3948 }; 3949 if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second)) 3950 return LT.first * Entry->Cost; 3951 } 3952 3953 if (Kind == TTI::SK_Splice && isa<ScalableVectorType>(Tp)) 3954 return getSpliceCost(Tp, Index); 3955 3956 // Inserting a subvector can often be done with either a D, S or H register 3957 // move, so long as the inserted vector is "aligned". 3958 if (Kind == TTI::SK_InsertSubvector && LT.second.isFixedLengthVector() && 3959 LT.second.getSizeInBits() <= 128 && SubTp) { 3960 std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(SubTp); 3961 if (SubLT.second.isVector()) { 3962 int NumElts = LT.second.getVectorNumElements(); 3963 int NumSubElts = SubLT.second.getVectorNumElements(); 3964 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 3965 return SubLT.first; 3966 } 3967 } 3968 3969 return BaseT::getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp); 3970 } 3971 3972 static bool containsDecreasingPointers(Loop *TheLoop, 3973 PredicatedScalarEvolution *PSE) { 3974 const auto &Strides = DenseMap<Value *, const SCEV *>(); 3975 for (BasicBlock *BB : TheLoop->blocks()) { 3976 // Scan the instructions in the block and look for addresses that are 3977 // consecutive and decreasing. 3978 for (Instruction &I : *BB) { 3979 if (isa<LoadInst>(&I) || isa<StoreInst>(&I)) { 3980 Value *Ptr = getLoadStorePointerOperand(&I); 3981 Type *AccessTy = getLoadStoreType(&I); 3982 if (getPtrStride(*PSE, AccessTy, Ptr, TheLoop, Strides, /*Assume=*/true, 3983 /*ShouldCheckWrap=*/false) 3984 .value_or(0) < 0) 3985 return true; 3986 } 3987 } 3988 } 3989 return false; 3990 } 3991 3992 bool AArch64TTIImpl::preferPredicateOverEpilogue(TailFoldingInfo *TFI) { 3993 if (!ST->hasSVE()) 3994 return false; 3995 3996 // We don't currently support vectorisation with interleaving for SVE - with 3997 // such loops we're better off not using tail-folding. This gives us a chance 3998 // to fall back on fixed-width vectorisation using NEON's ld2/st2/etc. 3999 if (TFI->IAI->hasGroups()) 4000 return false; 4001 4002 TailFoldingOpts Required = TailFoldingOpts::Disabled; 4003 if (TFI->LVL->getReductionVars().size()) 4004 Required |= TailFoldingOpts::Reductions; 4005 if (TFI->LVL->getFixedOrderRecurrences().size()) 4006 Required |= TailFoldingOpts::Recurrences; 4007 4008 // We call this to discover whether any load/store pointers in the loop have 4009 // negative strides. This will require extra work to reverse the loop 4010 // predicate, which may be expensive. 4011 if (containsDecreasingPointers(TFI->LVL->getLoop(), 4012 TFI->LVL->getPredicatedScalarEvolution())) 4013 Required |= TailFoldingOpts::Reverse; 4014 if (Required == TailFoldingOpts::Disabled) 4015 Required |= TailFoldingOpts::Simple; 4016 4017 if (!TailFoldingOptionLoc.satisfies(ST->getSVETailFoldingDefaultOpts(), 4018 Required)) 4019 return false; 4020 4021 // Don't tail-fold for tight loops where we would be better off interleaving 4022 // with an unpredicated loop. 4023 unsigned NumInsns = 0; 4024 for (BasicBlock *BB : TFI->LVL->getLoop()->blocks()) { 4025 NumInsns += BB->sizeWithoutDebug(); 4026 } 4027 4028 // We expect 4 of these to be a IV PHI, IV add, IV compare and branch. 4029 return NumInsns >= SVETailFoldInsnThreshold; 4030 } 4031 4032 InstructionCost 4033 AArch64TTIImpl::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 4034 int64_t BaseOffset, bool HasBaseReg, 4035 int64_t Scale, unsigned AddrSpace) const { 4036 // Scaling factors are not free at all. 4037 // Operands | Rt Latency 4038 // ------------------------------------------- 4039 // Rt, [Xn, Xm] | 4 4040 // ------------------------------------------- 4041 // Rt, [Xn, Xm, lsl #imm] | Rn: 4 Rm: 5 4042 // Rt, [Xn, Wm, <extend> #imm] | 4043 TargetLoweringBase::AddrMode AM; 4044 AM.BaseGV = BaseGV; 4045 AM.BaseOffs = BaseOffset; 4046 AM.HasBaseReg = HasBaseReg; 4047 AM.Scale = Scale; 4048 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace)) 4049 // Scale represents reg2 * scale, thus account for 1 if 4050 // it is not equal to 0 or 1. 4051 return AM.Scale != 0 && AM.Scale != 1; 4052 return -1; 4053 } 4054 4055 bool AArch64TTIImpl::shouldTreatInstructionLikeSelect(const Instruction *I) { 4056 // For the binary operators (e.g. or) we need to be more careful than 4057 // selects, here we only transform them if they are already at a natural 4058 // break point in the code - the end of a block with an unconditional 4059 // terminator. 4060 if (EnableOrLikeSelectOpt && I->getOpcode() == Instruction::Or && 4061 isa<BranchInst>(I->getNextNode()) && 4062 cast<BranchInst>(I->getNextNode())->isUnconditional()) 4063 return true; 4064 return BaseT::shouldTreatInstructionLikeSelect(I); 4065 }