1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AArch64TargetTransformInfo.h" 10 #include "AArch64ExpandImm.h" 11 #include "MCTargetDesc/AArch64AddressingModes.h" 12 #include "llvm/Analysis/LoopInfo.h" 13 #include "llvm/Analysis/TargetTransformInfo.h" 14 #include "llvm/CodeGen/BasicTTIImpl.h" 15 #include "llvm/CodeGen/CostTable.h" 16 #include "llvm/CodeGen/TargetLowering.h" 17 #include "llvm/IR/IntrinsicInst.h" 18 #include "llvm/IR/IntrinsicsAArch64.h" 19 #include "llvm/IR/PatternMatch.h" 20 #include "llvm/Support/Debug.h" 21 #include <algorithm> 22 using namespace llvm; 23 using namespace llvm::PatternMatch; 24 25 #define DEBUG_TYPE "aarch64tti" 26 27 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix", 28 cl::init(true), cl::Hidden); 29 30 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller, 31 const Function *Callee) const { 32 const TargetMachine &TM = getTLI()->getTargetMachine(); 33 34 const FeatureBitset &CallerBits = 35 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 36 const FeatureBitset &CalleeBits = 37 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 38 39 // Inline a callee if its target-features are a subset of the callers 40 // target-features. 41 return (CallerBits & CalleeBits) == CalleeBits; 42 } 43 44 /// Calculate the cost of materializing a 64-bit value. This helper 45 /// method might only calculate a fraction of a larger immediate. Therefore it 46 /// is valid to return a cost of ZERO. 47 int AArch64TTIImpl::getIntImmCost(int64_t Val) { 48 // Check if the immediate can be encoded within an instruction. 49 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64)) 50 return 0; 51 52 if (Val < 0) 53 Val = ~Val; 54 55 // Calculate how many moves we will need to materialize this constant. 56 SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn; 57 AArch64_IMM::expandMOVImm(Val, 64, Insn); 58 return Insn.size(); 59 } 60 61 /// Calculate the cost of materializing the given constant. 62 int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 63 TTI::TargetCostKind CostKind) { 64 assert(Ty->isIntegerTy()); 65 66 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 67 if (BitSize == 0) 68 return ~0U; 69 70 // Sign-extend all constants to a multiple of 64-bit. 71 APInt ImmVal = Imm; 72 if (BitSize & 0x3f) 73 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 74 75 // Split the constant into 64-bit chunks and calculate the cost for each 76 // chunk. 77 int Cost = 0; 78 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 79 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 80 int64_t Val = Tmp.getSExtValue(); 81 Cost += getIntImmCost(Val); 82 } 83 // We need at least one instruction to materialze the constant. 84 return std::max(1, Cost); 85 } 86 87 int AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 88 const APInt &Imm, Type *Ty, 89 TTI::TargetCostKind CostKind, 90 Instruction *Inst) { 91 assert(Ty->isIntegerTy()); 92 93 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 94 // There is no cost model for constants with a bit size of 0. Return TCC_Free 95 // here, so that constant hoisting will ignore this constant. 96 if (BitSize == 0) 97 return TTI::TCC_Free; 98 99 unsigned ImmIdx = ~0U; 100 switch (Opcode) { 101 default: 102 return TTI::TCC_Free; 103 case Instruction::GetElementPtr: 104 // Always hoist the base address of a GetElementPtr. 105 if (Idx == 0) 106 return 2 * TTI::TCC_Basic; 107 return TTI::TCC_Free; 108 case Instruction::Store: 109 ImmIdx = 0; 110 break; 111 case Instruction::Add: 112 case Instruction::Sub: 113 case Instruction::Mul: 114 case Instruction::UDiv: 115 case Instruction::SDiv: 116 case Instruction::URem: 117 case Instruction::SRem: 118 case Instruction::And: 119 case Instruction::Or: 120 case Instruction::Xor: 121 case Instruction::ICmp: 122 ImmIdx = 1; 123 break; 124 // Always return TCC_Free for the shift value of a shift instruction. 125 case Instruction::Shl: 126 case Instruction::LShr: 127 case Instruction::AShr: 128 if (Idx == 1) 129 return TTI::TCC_Free; 130 break; 131 case Instruction::Trunc: 132 case Instruction::ZExt: 133 case Instruction::SExt: 134 case Instruction::IntToPtr: 135 case Instruction::PtrToInt: 136 case Instruction::BitCast: 137 case Instruction::PHI: 138 case Instruction::Call: 139 case Instruction::Select: 140 case Instruction::Ret: 141 case Instruction::Load: 142 break; 143 } 144 145 if (Idx == ImmIdx) { 146 int NumConstants = (BitSize + 63) / 64; 147 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 148 return (Cost <= NumConstants * TTI::TCC_Basic) 149 ? static_cast<int>(TTI::TCC_Free) 150 : Cost; 151 } 152 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 153 } 154 155 int AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 156 const APInt &Imm, Type *Ty, 157 TTI::TargetCostKind CostKind) { 158 assert(Ty->isIntegerTy()); 159 160 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 161 // There is no cost model for constants with a bit size of 0. Return TCC_Free 162 // here, so that constant hoisting will ignore this constant. 163 if (BitSize == 0) 164 return TTI::TCC_Free; 165 166 // Most (all?) AArch64 intrinsics do not support folding immediates into the 167 // selected instruction, so we compute the materialization cost for the 168 // immediate directly. 169 if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv) 170 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 171 172 switch (IID) { 173 default: 174 return TTI::TCC_Free; 175 case Intrinsic::sadd_with_overflow: 176 case Intrinsic::uadd_with_overflow: 177 case Intrinsic::ssub_with_overflow: 178 case Intrinsic::usub_with_overflow: 179 case Intrinsic::smul_with_overflow: 180 case Intrinsic::umul_with_overflow: 181 if (Idx == 1) { 182 int NumConstants = (BitSize + 63) / 64; 183 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 184 return (Cost <= NumConstants * TTI::TCC_Basic) 185 ? static_cast<int>(TTI::TCC_Free) 186 : Cost; 187 } 188 break; 189 case Intrinsic::experimental_stackmap: 190 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 191 return TTI::TCC_Free; 192 break; 193 case Intrinsic::experimental_patchpoint_void: 194 case Intrinsic::experimental_patchpoint_i64: 195 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 196 return TTI::TCC_Free; 197 break; 198 case Intrinsic::experimental_gc_statepoint: 199 if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 200 return TTI::TCC_Free; 201 break; 202 } 203 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 204 } 205 206 TargetTransformInfo::PopcntSupportKind 207 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) { 208 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 209 if (TyWidth == 32 || TyWidth == 64) 210 return TTI::PSK_FastHardware; 211 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount. 212 return TTI::PSK_Software; 213 } 214 215 unsigned 216 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 217 TTI::TargetCostKind CostKind) { 218 auto *RetTy = ICA.getReturnType(); 219 switch (ICA.getID()) { 220 case Intrinsic::umin: 221 case Intrinsic::umax: { 222 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 223 // umin(x,y) -> sub(x,usubsat(x,y)) 224 // umax(x,y) -> add(x,usubsat(y,x)) 225 if (LT.second == MVT::v2i64) 226 return LT.first * 2; 227 LLVM_FALLTHROUGH; 228 } 229 case Intrinsic::smin: 230 case Intrinsic::smax: { 231 static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 232 MVT::v8i16, MVT::v2i32, MVT::v4i32}; 233 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 234 if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; })) 235 return LT.first; 236 break; 237 } 238 default: 239 break; 240 } 241 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 242 } 243 244 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode, 245 ArrayRef<const Value *> Args) { 246 247 // A helper that returns a vector type from the given type. The number of 248 // elements in type Ty determine the vector width. 249 auto toVectorTy = [&](Type *ArgTy) { 250 return VectorType::get(ArgTy->getScalarType(), 251 cast<VectorType>(DstTy)->getElementCount()); 252 }; 253 254 // Exit early if DstTy is not a vector type whose elements are at least 255 // 16-bits wide. 256 if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16) 257 return false; 258 259 // Determine if the operation has a widening variant. We consider both the 260 // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the 261 // instructions. 262 // 263 // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we 264 // verify that their extending operands are eliminated during code 265 // generation. 266 switch (Opcode) { 267 case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2). 268 case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2). 269 break; 270 default: 271 return false; 272 } 273 274 // To be a widening instruction (either the "wide" or "long" versions), the 275 // second operand must be a sign- or zero extend having a single user. We 276 // only consider extends having a single user because they may otherwise not 277 // be eliminated. 278 if (Args.size() != 2 || 279 (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) || 280 !Args[1]->hasOneUse()) 281 return false; 282 auto *Extend = cast<CastInst>(Args[1]); 283 284 // Legalize the destination type and ensure it can be used in a widening 285 // operation. 286 auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy); 287 unsigned DstElTySize = DstTyL.second.getScalarSizeInBits(); 288 if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits()) 289 return false; 290 291 // Legalize the source type and ensure it can be used in a widening 292 // operation. 293 auto *SrcTy = toVectorTy(Extend->getSrcTy()); 294 auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy); 295 unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits(); 296 if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits()) 297 return false; 298 299 // Get the total number of vector elements in the legalized types. 300 unsigned NumDstEls = DstTyL.first * DstTyL.second.getVectorMinNumElements(); 301 unsigned NumSrcEls = SrcTyL.first * SrcTyL.second.getVectorMinNumElements(); 302 303 // Return true if the legalized types have the same number of vector elements 304 // and the destination element type size is twice that of the source type. 305 return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize; 306 } 307 308 int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 309 TTI::CastContextHint CCH, 310 TTI::TargetCostKind CostKind, 311 const Instruction *I) { 312 int ISD = TLI->InstructionOpcodeToISD(Opcode); 313 assert(ISD && "Invalid opcode"); 314 315 // If the cast is observable, and it is used by a widening instruction (e.g., 316 // uaddl, saddw, etc.), it may be free. 317 if (I && I->hasOneUse()) { 318 auto *SingleUser = cast<Instruction>(*I->user_begin()); 319 SmallVector<const Value *, 4> Operands(SingleUser->operand_values()); 320 if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) { 321 // If the cast is the second operand, it is free. We will generate either 322 // a "wide" or "long" version of the widening instruction. 323 if (I == SingleUser->getOperand(1)) 324 return 0; 325 // If the cast is not the second operand, it will be free if it looks the 326 // same as the second operand. In this case, we will generate a "long" 327 // version of the widening instruction. 328 if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1))) 329 if (I->getOpcode() == unsigned(Cast->getOpcode()) && 330 cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy()) 331 return 0; 332 } 333 } 334 335 // TODO: Allow non-throughput costs that aren't binary. 336 auto AdjustCost = [&CostKind](int Cost) { 337 if (CostKind != TTI::TCK_RecipThroughput) 338 return Cost == 0 ? 0 : 1; 339 return Cost; 340 }; 341 342 EVT SrcTy = TLI->getValueType(DL, Src); 343 EVT DstTy = TLI->getValueType(DL, Dst); 344 345 if (!SrcTy.isSimple() || !DstTy.isSimple()) 346 return AdjustCost( 347 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 348 349 static const TypeConversionCostTblEntry 350 ConversionTbl[] = { 351 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 352 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 353 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 354 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 355 356 // The number of shll instructions for the extension. 357 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 358 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 359 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 360 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 361 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 362 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 363 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 364 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 365 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 366 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 367 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 368 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 369 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 370 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 371 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 372 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 373 374 // LowerVectorINT_TO_FP: 375 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 376 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 377 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 378 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 379 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 380 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 381 382 // Complex: to v2f32 383 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 384 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 385 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 386 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 387 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 388 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 389 390 // Complex: to v4f32 391 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 }, 392 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 393 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 394 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 395 396 // Complex: to v8f32 397 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 398 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 399 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 400 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 401 402 // Complex: to v16f32 403 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 404 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 405 406 // Complex: to v2f64 407 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 408 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 409 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 410 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 411 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 412 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 413 414 415 // LowerVectorFP_TO_INT 416 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 }, 417 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 418 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 419 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 420 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 421 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 422 423 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext). 424 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 }, 425 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 }, 426 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 }, 427 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 }, 428 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 }, 429 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 }, 430 431 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2 432 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 433 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 }, 434 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 435 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 }, 436 437 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2. 438 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 439 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 }, 440 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 }, 441 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 442 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 }, 443 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 }, 444 }; 445 446 if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD, 447 DstTy.getSimpleVT(), 448 SrcTy.getSimpleVT())) 449 return AdjustCost(Entry->Cost); 450 451 return AdjustCost( 452 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 453 } 454 455 int AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, Type *Dst, 456 VectorType *VecTy, 457 unsigned Index) { 458 459 // Make sure we were given a valid extend opcode. 460 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) && 461 "Invalid opcode"); 462 463 // We are extending an element we extract from a vector, so the source type 464 // of the extend is the element type of the vector. 465 auto *Src = VecTy->getElementType(); 466 467 // Sign- and zero-extends are for integer types only. 468 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type"); 469 470 // Get the cost for the extract. We compute the cost (if any) for the extend 471 // below. 472 auto Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy, Index); 473 474 // Legalize the types. 475 auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy); 476 auto DstVT = TLI->getValueType(DL, Dst); 477 auto SrcVT = TLI->getValueType(DL, Src); 478 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 479 480 // If the resulting type is still a vector and the destination type is legal, 481 // we may get the extension for free. If not, get the default cost for the 482 // extend. 483 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT)) 484 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 485 CostKind); 486 487 // The destination type should be larger than the element type. If not, get 488 // the default cost for the extend. 489 if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits()) 490 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 491 CostKind); 492 493 switch (Opcode) { 494 default: 495 llvm_unreachable("Opcode should be either SExt or ZExt"); 496 497 // For sign-extends, we only need a smov, which performs the extension 498 // automatically. 499 case Instruction::SExt: 500 return Cost; 501 502 // For zero-extends, the extend is performed automatically by a umov unless 503 // the destination type is i64 and the element type is i8 or i16. 504 case Instruction::ZExt: 505 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u) 506 return Cost; 507 } 508 509 // If we are unable to perform the extend for free, get the default cost. 510 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 511 CostKind); 512 } 513 514 unsigned AArch64TTIImpl::getCFInstrCost(unsigned Opcode, 515 TTI::TargetCostKind CostKind) { 516 if (CostKind != TTI::TCK_RecipThroughput) 517 return Opcode == Instruction::PHI ? 0 : 1; 518 assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind"); 519 // Branches are assumed to be predicted. 520 return 0; 521 } 522 523 int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 524 unsigned Index) { 525 assert(Val->isVectorTy() && "This must be a vector type"); 526 527 if (Index != -1U) { 528 // Legalize the type. 529 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 530 531 // This type is legalized to a scalar type. 532 if (!LT.second.isVector()) 533 return 0; 534 535 // The type may be split. Normalize the index to the new type. 536 unsigned Width = LT.second.getVectorNumElements(); 537 Index = Index % Width; 538 539 // The element at index zero is already inside the vector. 540 if (Index == 0) 541 return 0; 542 } 543 544 // All other insert/extracts cost this much. 545 return ST->getVectorInsertExtractBaseCost(); 546 } 547 548 int AArch64TTIImpl::getArithmeticInstrCost( 549 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 550 TTI::OperandValueKind Opd1Info, 551 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, 552 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 553 const Instruction *CxtI) { 554 // TODO: Handle more cost kinds. 555 if (CostKind != TTI::TCK_RecipThroughput) 556 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 557 Opd2Info, Opd1PropInfo, 558 Opd2PropInfo, Args, CxtI); 559 560 // Legalize the type. 561 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 562 563 // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.), 564 // add in the widening overhead specified by the sub-target. Since the 565 // extends feeding widening instructions are performed automatically, they 566 // aren't present in the generated code and have a zero cost. By adding a 567 // widening overhead here, we attach the total cost of the combined operation 568 // to the widening instruction. 569 int Cost = 0; 570 if (isWideningInstruction(Ty, Opcode, Args)) 571 Cost += ST->getWideningBaseCost(); 572 573 int ISD = TLI->InstructionOpcodeToISD(Opcode); 574 575 switch (ISD) { 576 default: 577 return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 578 Opd2Info, 579 Opd1PropInfo, Opd2PropInfo); 580 case ISD::SDIV: 581 if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue && 582 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 583 // On AArch64, scalar signed division by constants power-of-two are 584 // normally expanded to the sequence ADD + CMP + SELECT + SRA. 585 // The OperandValue properties many not be same as that of previous 586 // operation; conservatively assume OP_None. 587 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, 588 Opd1Info, Opd2Info, 589 TargetTransformInfo::OP_None, 590 TargetTransformInfo::OP_None); 591 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, 592 Opd1Info, Opd2Info, 593 TargetTransformInfo::OP_None, 594 TargetTransformInfo::OP_None); 595 Cost += getArithmeticInstrCost(Instruction::Select, Ty, CostKind, 596 Opd1Info, Opd2Info, 597 TargetTransformInfo::OP_None, 598 TargetTransformInfo::OP_None); 599 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, 600 Opd1Info, Opd2Info, 601 TargetTransformInfo::OP_None, 602 TargetTransformInfo::OP_None); 603 return Cost; 604 } 605 LLVM_FALLTHROUGH; 606 case ISD::UDIV: 607 if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) { 608 auto VT = TLI->getValueType(DL, Ty); 609 if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) { 610 // Vector signed division by constant are expanded to the 611 // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division 612 // to MULHS + SUB + SRL + ADD + SRL. 613 int MulCost = getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 614 Opd1Info, Opd2Info, 615 TargetTransformInfo::OP_None, 616 TargetTransformInfo::OP_None); 617 int AddCost = getArithmeticInstrCost(Instruction::Add, Ty, CostKind, 618 Opd1Info, Opd2Info, 619 TargetTransformInfo::OP_None, 620 TargetTransformInfo::OP_None); 621 int ShrCost = getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, 622 Opd1Info, Opd2Info, 623 TargetTransformInfo::OP_None, 624 TargetTransformInfo::OP_None); 625 return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1; 626 } 627 } 628 629 Cost += BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 630 Opd2Info, 631 Opd1PropInfo, Opd2PropInfo); 632 if (Ty->isVectorTy()) { 633 // On AArch64, vector divisions are not supported natively and are 634 // expanded into scalar divisions of each pair of elements. 635 Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, CostKind, 636 Opd1Info, Opd2Info, Opd1PropInfo, 637 Opd2PropInfo); 638 Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind, 639 Opd1Info, Opd2Info, Opd1PropInfo, 640 Opd2PropInfo); 641 // TODO: if one of the arguments is scalar, then it's not necessary to 642 // double the cost of handling the vector elements. 643 Cost += Cost; 644 } 645 return Cost; 646 647 case ISD::MUL: 648 if (LT.second != MVT::v2i64) 649 return (Cost + 1) * LT.first; 650 // Since we do not have a MUL.2d instruction, a mul <2 x i64> is expensive 651 // as elements are extracted from the vectors and the muls scalarized. 652 // As getScalarizationOverhead is a bit too pessimistic, we estimate the 653 // cost for a i64 vector directly here, which is: 654 // - four i64 extracts, 655 // - two i64 inserts, and 656 // - two muls. 657 // So, for a v2i64 with LT.First = 1 the cost is 8, and for a v4i64 with 658 // LT.first = 2 the cost is 16. 659 return LT.first * 8; 660 case ISD::ADD: 661 case ISD::XOR: 662 case ISD::OR: 663 case ISD::AND: 664 // These nodes are marked as 'custom' for combining purposes only. 665 // We know that they are legal. See LowerAdd in ISelLowering. 666 return (Cost + 1) * LT.first; 667 668 case ISD::FADD: 669 // These nodes are marked as 'custom' just to lower them to SVE. 670 // We know said lowering will incur no additional cost. 671 if (isa<FixedVectorType>(Ty) && !Ty->getScalarType()->isFP128Ty()) 672 return (Cost + 2) * LT.first; 673 674 return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 675 Opd2Info, 676 Opd1PropInfo, Opd2PropInfo); 677 } 678 } 679 680 int AArch64TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 681 const SCEV *Ptr) { 682 // Address computations in vectorized code with non-consecutive addresses will 683 // likely result in more instructions compared to scalar code where the 684 // computation can more often be merged into the index mode. The resulting 685 // extra micro-ops can significantly decrease throughput. 686 unsigned NumVectorInstToHideOverhead = 10; 687 int MaxMergeDistance = 64; 688 689 if (Ty->isVectorTy() && SE && 690 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) 691 return NumVectorInstToHideOverhead; 692 693 // In many cases the address computation is not merged into the instruction 694 // addressing mode. 695 return 1; 696 } 697 698 int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 699 Type *CondTy, CmpInst::Predicate VecPred, 700 TTI::TargetCostKind CostKind, 701 const Instruction *I) { 702 // TODO: Handle other cost kinds. 703 if (CostKind != TTI::TCK_RecipThroughput) 704 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, 705 I); 706 707 int ISD = TLI->InstructionOpcodeToISD(Opcode); 708 // We don't lower some vector selects well that are wider than the register 709 // width. 710 if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) { 711 // We would need this many instructions to hide the scalarization happening. 712 const int AmortizationCost = 20; 713 714 // If VecPred is not set, check if we can get a predicate from the context 715 // instruction, if its type matches the requested ValTy. 716 if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) { 717 CmpInst::Predicate CurrentPred; 718 if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(), 719 m_Value()))) 720 VecPred = CurrentPred; 721 } 722 // Check if we have a compare/select chain that can be lowered using CMxx & 723 // BFI pair. 724 if (CmpInst::isIntPredicate(VecPred)) { 725 static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 726 MVT::v8i16, MVT::v2i32, MVT::v4i32, 727 MVT::v2i64}; 728 auto LT = TLI->getTypeLegalizationCost(DL, ValTy); 729 if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; })) 730 return LT.first; 731 } 732 733 static const TypeConversionCostTblEntry 734 VectorSelectTbl[] = { 735 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 }, 736 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 }, 737 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 }, 738 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost }, 739 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost }, 740 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost } 741 }; 742 743 EVT SelCondTy = TLI->getValueType(DL, CondTy); 744 EVT SelValTy = TLI->getValueType(DL, ValTy); 745 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 746 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD, 747 SelCondTy.getSimpleVT(), 748 SelValTy.getSimpleVT())) 749 return Entry->Cost; 750 } 751 } 752 // The base case handles scalable vectors fine for now, since it treats the 753 // cost as 1 * legalization cost. 754 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 755 } 756 757 AArch64TTIImpl::TTI::MemCmpExpansionOptions 758 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 759 TTI::MemCmpExpansionOptions Options; 760 if (ST->requiresStrictAlign()) { 761 // TODO: Add cost modeling for strict align. Misaligned loads expand to 762 // a bunch of instructions when strict align is enabled. 763 return Options; 764 } 765 Options.AllowOverlappingLoads = true; 766 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 767 Options.NumLoadsPerBlock = Options.MaxNumLoads; 768 // TODO: Though vector loads usually perform well on AArch64, in some targets 769 // they may wake up the FP unit, which raises the power consumption. Perhaps 770 // they could be used with no holds barred (-O3). 771 Options.LoadSizes = {8, 4, 2, 1}; 772 return Options; 773 } 774 775 unsigned AArch64TTIImpl::getGatherScatterOpCost( 776 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 777 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { 778 779 if (!isa<ScalableVectorType>(DataTy)) 780 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, 781 Alignment, CostKind, I); 782 auto *VT = cast<VectorType>(DataTy); 783 auto LT = TLI->getTypeLegalizationCost(DL, DataTy); 784 ElementCount LegalVF = LT.second.getVectorElementCount(); 785 Optional<unsigned> MaxNumVScale = getMaxVScale(); 786 assert(MaxNumVScale && "Expected valid max vscale value"); 787 788 unsigned MemOpCost = 789 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, I); 790 unsigned MaxNumElementsPerGather = 791 MaxNumVScale.getValue() * LegalVF.getKnownMinValue(); 792 return LT.first * MaxNumElementsPerGather * MemOpCost; 793 } 794 795 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const { 796 return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors(); 797 } 798 799 int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty, 800 MaybeAlign Alignment, unsigned AddressSpace, 801 TTI::TargetCostKind CostKind, 802 const Instruction *I) { 803 // TODO: Handle other cost kinds. 804 if (CostKind != TTI::TCK_RecipThroughput) 805 return 1; 806 807 // Type legalization can't handle structs 808 if (TLI->getValueType(DL, Ty, true) == MVT::Other) 809 return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace, 810 CostKind); 811 812 auto LT = TLI->getTypeLegalizationCost(DL, Ty); 813 814 if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store && 815 LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) { 816 // Unaligned stores are extremely inefficient. We don't split all 817 // unaligned 128-bit stores because the negative impact that has shown in 818 // practice on inlined block copy code. 819 // We make such stores expensive so that we will only vectorize if there 820 // are 6 other instructions getting vectorized. 821 const int AmortizationCost = 6; 822 823 return LT.first * 2 * AmortizationCost; 824 } 825 826 if (useNeonVector(Ty) && 827 cast<VectorType>(Ty)->getElementType()->isIntegerTy(8)) { 828 unsigned ProfitableNumElements; 829 if (Opcode == Instruction::Store) 830 // We use a custom trunc store lowering so v.4b should be profitable. 831 ProfitableNumElements = 4; 832 else 833 // We scalarize the loads because there is not v.4b register and we 834 // have to promote the elements to v.2. 835 ProfitableNumElements = 8; 836 837 if (cast<FixedVectorType>(Ty)->getNumElements() < ProfitableNumElements) { 838 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements(); 839 unsigned NumVectorizableInstsToAmortize = NumVecElts * 2; 840 // We generate 2 instructions per vector element. 841 return NumVectorizableInstsToAmortize * NumVecElts * 2; 842 } 843 } 844 845 return LT.first; 846 } 847 848 int AArch64TTIImpl::getInterleavedMemoryOpCost( 849 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 850 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 851 bool UseMaskForCond, bool UseMaskForGaps) { 852 assert(Factor >= 2 && "Invalid interleave factor"); 853 auto *VecVTy = cast<FixedVectorType>(VecTy); 854 855 if (!UseMaskForCond && !UseMaskForGaps && 856 Factor <= TLI->getMaxSupportedInterleaveFactor()) { 857 unsigned NumElts = VecVTy->getNumElements(); 858 auto *SubVecTy = 859 FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor); 860 861 // ldN/stN only support legal vector types of size 64 or 128 in bits. 862 // Accesses having vector types that are a multiple of 128 bits can be 863 // matched to more than one ldN/stN instruction. 864 if (NumElts % Factor == 0 && 865 TLI->isLegalInterleavedAccessType(SubVecTy, DL)) 866 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL); 867 } 868 869 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 870 Alignment, AddressSpace, CostKind, 871 UseMaskForCond, UseMaskForGaps); 872 } 873 874 int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { 875 int Cost = 0; 876 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 877 for (auto *I : Tys) { 878 if (!I->isVectorTy()) 879 continue; 880 if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() == 881 128) 882 Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) + 883 getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind); 884 } 885 return Cost; 886 } 887 888 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) { 889 return ST->getMaxInterleaveFactor(); 890 } 891 892 // For Falkor, we want to avoid having too many strided loads in a loop since 893 // that can exhaust the HW prefetcher resources. We adjust the unroller 894 // MaxCount preference below to attempt to ensure unrolling doesn't create too 895 // many strided loads. 896 static void 897 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE, 898 TargetTransformInfo::UnrollingPreferences &UP) { 899 enum { MaxStridedLoads = 7 }; 900 auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) { 901 int StridedLoads = 0; 902 // FIXME? We could make this more precise by looking at the CFG and 903 // e.g. not counting loads in each side of an if-then-else diamond. 904 for (const auto BB : L->blocks()) { 905 for (auto &I : *BB) { 906 LoadInst *LMemI = dyn_cast<LoadInst>(&I); 907 if (!LMemI) 908 continue; 909 910 Value *PtrValue = LMemI->getPointerOperand(); 911 if (L->isLoopInvariant(PtrValue)) 912 continue; 913 914 const SCEV *LSCEV = SE.getSCEV(PtrValue); 915 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV); 916 if (!LSCEVAddRec || !LSCEVAddRec->isAffine()) 917 continue; 918 919 // FIXME? We could take pairing of unrolled load copies into account 920 // by looking at the AddRec, but we would probably have to limit this 921 // to loops with no stores or other memory optimization barriers. 922 ++StridedLoads; 923 // We've seen enough strided loads that seeing more won't make a 924 // difference. 925 if (StridedLoads > MaxStridedLoads / 2) 926 return StridedLoads; 927 } 928 } 929 return StridedLoads; 930 }; 931 932 int StridedLoads = countStridedLoads(L, SE); 933 LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads 934 << " strided loads\n"); 935 // Pick the largest power of 2 unroll count that won't result in too many 936 // strided loads. 937 if (StridedLoads) { 938 UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads); 939 LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " 940 << UP.MaxCount << '\n'); 941 } 942 } 943 944 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 945 TTI::UnrollingPreferences &UP) { 946 // Enable partial unrolling and runtime unrolling. 947 BaseT::getUnrollingPreferences(L, SE, UP); 948 949 // For inner loop, it is more likely to be a hot one, and the runtime check 950 // can be promoted out from LICM pass, so the overhead is less, let's try 951 // a larger threshold to unroll more loops. 952 if (L->getLoopDepth() > 1) 953 UP.PartialThreshold *= 2; 954 955 // Disable partial & runtime unrolling on -Os. 956 UP.PartialOptSizeThreshold = 0; 957 958 if (ST->getProcFamily() == AArch64Subtarget::Falkor && 959 EnableFalkorHWPFUnrollFix) 960 getFalkorUnrollingPreferences(L, SE, UP); 961 } 962 963 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 964 TTI::PeelingPreferences &PP) { 965 BaseT::getPeelingPreferences(L, SE, PP); 966 } 967 968 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 969 Type *ExpectedType) { 970 switch (Inst->getIntrinsicID()) { 971 default: 972 return nullptr; 973 case Intrinsic::aarch64_neon_st2: 974 case Intrinsic::aarch64_neon_st3: 975 case Intrinsic::aarch64_neon_st4: { 976 // Create a struct type 977 StructType *ST = dyn_cast<StructType>(ExpectedType); 978 if (!ST) 979 return nullptr; 980 unsigned NumElts = Inst->getNumArgOperands() - 1; 981 if (ST->getNumElements() != NumElts) 982 return nullptr; 983 for (unsigned i = 0, e = NumElts; i != e; ++i) { 984 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i)) 985 return nullptr; 986 } 987 Value *Res = UndefValue::get(ExpectedType); 988 IRBuilder<> Builder(Inst); 989 for (unsigned i = 0, e = NumElts; i != e; ++i) { 990 Value *L = Inst->getArgOperand(i); 991 Res = Builder.CreateInsertValue(Res, L, i); 992 } 993 return Res; 994 } 995 case Intrinsic::aarch64_neon_ld2: 996 case Intrinsic::aarch64_neon_ld3: 997 case Intrinsic::aarch64_neon_ld4: 998 if (Inst->getType() == ExpectedType) 999 return Inst; 1000 return nullptr; 1001 } 1002 } 1003 1004 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, 1005 MemIntrinsicInfo &Info) { 1006 switch (Inst->getIntrinsicID()) { 1007 default: 1008 break; 1009 case Intrinsic::aarch64_neon_ld2: 1010 case Intrinsic::aarch64_neon_ld3: 1011 case Intrinsic::aarch64_neon_ld4: 1012 Info.ReadMem = true; 1013 Info.WriteMem = false; 1014 Info.PtrVal = Inst->getArgOperand(0); 1015 break; 1016 case Intrinsic::aarch64_neon_st2: 1017 case Intrinsic::aarch64_neon_st3: 1018 case Intrinsic::aarch64_neon_st4: 1019 Info.ReadMem = false; 1020 Info.WriteMem = true; 1021 Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1); 1022 break; 1023 } 1024 1025 switch (Inst->getIntrinsicID()) { 1026 default: 1027 return false; 1028 case Intrinsic::aarch64_neon_ld2: 1029 case Intrinsic::aarch64_neon_st2: 1030 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS; 1031 break; 1032 case Intrinsic::aarch64_neon_ld3: 1033 case Intrinsic::aarch64_neon_st3: 1034 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS; 1035 break; 1036 case Intrinsic::aarch64_neon_ld4: 1037 case Intrinsic::aarch64_neon_st4: 1038 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS; 1039 break; 1040 } 1041 return true; 1042 } 1043 1044 /// See if \p I should be considered for address type promotion. We check if \p 1045 /// I is a sext with right type and used in memory accesses. If it used in a 1046 /// "complex" getelementptr, we allow it to be promoted without finding other 1047 /// sext instructions that sign extended the same initial value. A getelementptr 1048 /// is considered as "complex" if it has more than 2 operands. 1049 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion( 1050 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) { 1051 bool Considerable = false; 1052 AllowPromotionWithoutCommonHeader = false; 1053 if (!isa<SExtInst>(&I)) 1054 return false; 1055 Type *ConsideredSExtType = 1056 Type::getInt64Ty(I.getParent()->getParent()->getContext()); 1057 if (I.getType() != ConsideredSExtType) 1058 return false; 1059 // See if the sext is the one with the right type and used in at least one 1060 // GetElementPtrInst. 1061 for (const User *U : I.users()) { 1062 if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) { 1063 Considerable = true; 1064 // A getelementptr is considered as "complex" if it has more than 2 1065 // operands. We will promote a SExt used in such complex GEP as we 1066 // expect some computation to be merged if they are done on 64 bits. 1067 if (GEPInst->getNumOperands() > 2) { 1068 AllowPromotionWithoutCommonHeader = true; 1069 break; 1070 } 1071 } 1072 } 1073 return Considerable; 1074 } 1075 1076 bool AArch64TTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty, 1077 TTI::ReductionFlags Flags) const { 1078 auto *VTy = cast<VectorType>(Ty); 1079 unsigned ScalarBits = Ty->getScalarSizeInBits(); 1080 switch (Opcode) { 1081 case Instruction::FAdd: 1082 case Instruction::FMul: 1083 case Instruction::And: 1084 case Instruction::Or: 1085 case Instruction::Xor: 1086 case Instruction::Mul: 1087 return false; 1088 case Instruction::Add: 1089 return ScalarBits * cast<FixedVectorType>(VTy)->getNumElements() >= 128; 1090 case Instruction::ICmp: 1091 return (ScalarBits < 64) && 1092 (ScalarBits * cast<FixedVectorType>(VTy)->getNumElements() >= 128); 1093 case Instruction::FCmp: 1094 return Flags.NoNaN; 1095 default: 1096 llvm_unreachable("Unhandled reduction opcode"); 1097 } 1098 return false; 1099 } 1100 1101 int AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, 1102 bool IsPairwise, bool IsUnsigned, 1103 TTI::TargetCostKind CostKind) { 1104 if (!isa<ScalableVectorType>(Ty)) 1105 return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned, 1106 CostKind); 1107 assert((isa<ScalableVectorType>(Ty) && isa<ScalableVectorType>(CondTy)) && 1108 "Both vector needs to be scalable"); 1109 1110 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 1111 int LegalizationCost = 0; 1112 if (LT.first > 1) { 1113 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext()); 1114 unsigned CmpOpcode = 1115 Ty->isFPOrFPVectorTy() ? Instruction::FCmp : Instruction::ICmp; 1116 LegalizationCost = 1117 getCmpSelInstrCost(CmpOpcode, LegalVTy, LegalVTy, 1118 CmpInst::BAD_ICMP_PREDICATE, CostKind) + 1119 getCmpSelInstrCost(Instruction::Select, LegalVTy, LegalVTy, 1120 CmpInst::BAD_ICMP_PREDICATE, CostKind); 1121 LegalizationCost *= LT.first - 1; 1122 } 1123 1124 return LegalizationCost + /*Cost of horizontal reduction*/ 2; 1125 } 1126 1127 int AArch64TTIImpl::getArithmeticReductionCostSVE( 1128 unsigned Opcode, VectorType *ValTy, bool IsPairwise, 1129 TTI::TargetCostKind CostKind) { 1130 assert(!IsPairwise && "Cannot be pair wise to continue"); 1131 1132 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1133 int LegalizationCost = 0; 1134 if (LT.first > 1) { 1135 Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext()); 1136 LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind); 1137 LegalizationCost *= LT.first - 1; 1138 } 1139 1140 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1141 assert(ISD && "Invalid opcode"); 1142 // Add the final reduction cost for the legal horizontal reduction 1143 switch (ISD) { 1144 case ISD::ADD: 1145 case ISD::AND: 1146 case ISD::OR: 1147 case ISD::XOR: 1148 case ISD::FADD: 1149 return LegalizationCost + 2; 1150 default: 1151 // TODO: Replace for invalid when InstructionCost is used 1152 // cases not supported by SVE 1153 return 16; 1154 } 1155 } 1156 1157 int AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, 1158 VectorType *ValTy, 1159 bool IsPairwiseForm, 1160 TTI::TargetCostKind CostKind) { 1161 1162 if (isa<ScalableVectorType>(ValTy)) 1163 return getArithmeticReductionCostSVE(Opcode, ValTy, IsPairwiseForm, 1164 CostKind); 1165 if (IsPairwiseForm) 1166 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm, 1167 CostKind); 1168 1169 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1170 MVT MTy = LT.second; 1171 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1172 assert(ISD && "Invalid opcode"); 1173 1174 // Horizontal adds can use the 'addv' instruction. We model the cost of these 1175 // instructions as normal vector adds. This is the only arithmetic vector 1176 // reduction operation for which we have an instruction. 1177 static const CostTblEntry CostTblNoPairwise[]{ 1178 {ISD::ADD, MVT::v8i8, 1}, 1179 {ISD::ADD, MVT::v16i8, 1}, 1180 {ISD::ADD, MVT::v4i16, 1}, 1181 {ISD::ADD, MVT::v8i16, 1}, 1182 {ISD::ADD, MVT::v4i32, 1}, 1183 }; 1184 1185 if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy)) 1186 return LT.first * Entry->Cost; 1187 1188 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm, 1189 CostKind); 1190 } 1191 1192 int AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, 1193 int Index, VectorType *SubTp) { 1194 if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose || 1195 Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc) { 1196 static const CostTblEntry ShuffleTbl[] = { 1197 // Broadcast shuffle kinds can be performed with 'dup'. 1198 { TTI::SK_Broadcast, MVT::v8i8, 1 }, 1199 { TTI::SK_Broadcast, MVT::v16i8, 1 }, 1200 { TTI::SK_Broadcast, MVT::v4i16, 1 }, 1201 { TTI::SK_Broadcast, MVT::v8i16, 1 }, 1202 { TTI::SK_Broadcast, MVT::v2i32, 1 }, 1203 { TTI::SK_Broadcast, MVT::v4i32, 1 }, 1204 { TTI::SK_Broadcast, MVT::v2i64, 1 }, 1205 { TTI::SK_Broadcast, MVT::v2f32, 1 }, 1206 { TTI::SK_Broadcast, MVT::v4f32, 1 }, 1207 { TTI::SK_Broadcast, MVT::v2f64, 1 }, 1208 // Transpose shuffle kinds can be performed with 'trn1/trn2' and 1209 // 'zip1/zip2' instructions. 1210 { TTI::SK_Transpose, MVT::v8i8, 1 }, 1211 { TTI::SK_Transpose, MVT::v16i8, 1 }, 1212 { TTI::SK_Transpose, MVT::v4i16, 1 }, 1213 { TTI::SK_Transpose, MVT::v8i16, 1 }, 1214 { TTI::SK_Transpose, MVT::v2i32, 1 }, 1215 { TTI::SK_Transpose, MVT::v4i32, 1 }, 1216 { TTI::SK_Transpose, MVT::v2i64, 1 }, 1217 { TTI::SK_Transpose, MVT::v2f32, 1 }, 1218 { TTI::SK_Transpose, MVT::v4f32, 1 }, 1219 { TTI::SK_Transpose, MVT::v2f64, 1 }, 1220 // Select shuffle kinds. 1221 // TODO: handle vXi8/vXi16. 1222 { TTI::SK_Select, MVT::v2i32, 1 }, // mov. 1223 { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar). 1224 { TTI::SK_Select, MVT::v2i64, 1 }, // mov. 1225 { TTI::SK_Select, MVT::v2f32, 1 }, // mov. 1226 { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar). 1227 { TTI::SK_Select, MVT::v2f64, 1 }, // mov. 1228 // PermuteSingleSrc shuffle kinds. 1229 // TODO: handle vXi8/vXi16. 1230 { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov. 1231 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case. 1232 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov. 1233 { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov. 1234 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case. 1235 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov. 1236 }; 1237 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1238 if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second)) 1239 return LT.first * Entry->Cost; 1240 } 1241 1242 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 1243 } 1244