1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "PPCTargetTransformInfo.h" 10 #include "llvm/Analysis/CodeMetrics.h" 11 #include "llvm/Analysis/TargetTransformInfo.h" 12 #include "llvm/CodeGen/BasicTTIImpl.h" 13 #include "llvm/CodeGen/CostTable.h" 14 #include "llvm/CodeGen/TargetLowering.h" 15 #include "llvm/CodeGen/TargetSchedule.h" 16 #include "llvm/Support/CommandLine.h" 17 #include "llvm/Support/Debug.h" 18 using namespace llvm; 19 20 #define DEBUG_TYPE "ppctti" 21 22 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting", 23 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden); 24 25 // This is currently only used for the data prefetch pass which is only enabled 26 // for BG/Q by default. 27 static cl::opt<unsigned> 28 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64), 29 cl::desc("The loop prefetch cache line size")); 30 31 static cl::opt<bool> 32 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false), 33 cl::desc("Enable using coldcc calling conv for cold " 34 "internal functions")); 35 36 // The latency of mtctr is only justified if there are more than 4 37 // comparisons that will be removed as a result. 38 static cl::opt<unsigned> 39 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden, 40 cl::desc("Loops with a constant trip count smaller than " 41 "this value will not use the count register.")); 42 43 //===----------------------------------------------------------------------===// 44 // 45 // PPC cost model. 46 // 47 //===----------------------------------------------------------------------===// 48 49 TargetTransformInfo::PopcntSupportKind 50 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) { 51 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 52 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64) 53 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ? 54 TTI::PSK_SlowHardware : TTI::PSK_FastHardware; 55 return TTI::PSK_Software; 56 } 57 58 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 59 if (DisablePPCConstHoist) 60 return BaseT::getIntImmCost(Imm, Ty); 61 62 assert(Ty->isIntegerTy()); 63 64 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 65 if (BitSize == 0) 66 return ~0U; 67 68 if (Imm == 0) 69 return TTI::TCC_Free; 70 71 if (Imm.getBitWidth() <= 64) { 72 if (isInt<16>(Imm.getSExtValue())) 73 return TTI::TCC_Basic; 74 75 if (isInt<32>(Imm.getSExtValue())) { 76 // A constant that can be materialized using lis. 77 if ((Imm.getZExtValue() & 0xFFFF) == 0) 78 return TTI::TCC_Basic; 79 80 return 2 * TTI::TCC_Basic; 81 } 82 } 83 84 return 4 * TTI::TCC_Basic; 85 } 86 87 int PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 88 const APInt &Imm, Type *Ty) { 89 if (DisablePPCConstHoist) 90 return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty); 91 92 assert(Ty->isIntegerTy()); 93 94 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 95 if (BitSize == 0) 96 return ~0U; 97 98 switch (IID) { 99 default: 100 return TTI::TCC_Free; 101 case Intrinsic::sadd_with_overflow: 102 case Intrinsic::uadd_with_overflow: 103 case Intrinsic::ssub_with_overflow: 104 case Intrinsic::usub_with_overflow: 105 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue())) 106 return TTI::TCC_Free; 107 break; 108 case Intrinsic::experimental_stackmap: 109 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 110 return TTI::TCC_Free; 111 break; 112 case Intrinsic::experimental_patchpoint_void: 113 case Intrinsic::experimental_patchpoint_i64: 114 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 115 return TTI::TCC_Free; 116 break; 117 } 118 return PPCTTIImpl::getIntImmCost(Imm, Ty); 119 } 120 121 int PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 122 const APInt &Imm, Type *Ty) { 123 if (DisablePPCConstHoist) 124 return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty); 125 126 assert(Ty->isIntegerTy()); 127 128 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 129 if (BitSize == 0) 130 return ~0U; 131 132 unsigned ImmIdx = ~0U; 133 bool ShiftedFree = false, RunFree = false, UnsignedFree = false, 134 ZeroFree = false; 135 switch (Opcode) { 136 default: 137 return TTI::TCC_Free; 138 case Instruction::GetElementPtr: 139 // Always hoist the base address of a GetElementPtr. This prevents the 140 // creation of new constants for every base constant that gets constant 141 // folded with the offset. 142 if (Idx == 0) 143 return 2 * TTI::TCC_Basic; 144 return TTI::TCC_Free; 145 case Instruction::And: 146 RunFree = true; // (for the rotate-and-mask instructions) 147 LLVM_FALLTHROUGH; 148 case Instruction::Add: 149 case Instruction::Or: 150 case Instruction::Xor: 151 ShiftedFree = true; 152 LLVM_FALLTHROUGH; 153 case Instruction::Sub: 154 case Instruction::Mul: 155 case Instruction::Shl: 156 case Instruction::LShr: 157 case Instruction::AShr: 158 ImmIdx = 1; 159 break; 160 case Instruction::ICmp: 161 UnsignedFree = true; 162 ImmIdx = 1; 163 // Zero comparisons can use record-form instructions. 164 LLVM_FALLTHROUGH; 165 case Instruction::Select: 166 ZeroFree = true; 167 break; 168 case Instruction::PHI: 169 case Instruction::Call: 170 case Instruction::Ret: 171 case Instruction::Load: 172 case Instruction::Store: 173 break; 174 } 175 176 if (ZeroFree && Imm == 0) 177 return TTI::TCC_Free; 178 179 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) { 180 if (isInt<16>(Imm.getSExtValue())) 181 return TTI::TCC_Free; 182 183 if (RunFree) { 184 if (Imm.getBitWidth() <= 32 && 185 (isShiftedMask_32(Imm.getZExtValue()) || 186 isShiftedMask_32(~Imm.getZExtValue()))) 187 return TTI::TCC_Free; 188 189 if (ST->isPPC64() && 190 (isShiftedMask_64(Imm.getZExtValue()) || 191 isShiftedMask_64(~Imm.getZExtValue()))) 192 return TTI::TCC_Free; 193 } 194 195 if (UnsignedFree && isUInt<16>(Imm.getZExtValue())) 196 return TTI::TCC_Free; 197 198 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0) 199 return TTI::TCC_Free; 200 } 201 202 return PPCTTIImpl::getIntImmCost(Imm, Ty); 203 } 204 205 unsigned PPCTTIImpl::getUserCost(const User *U, 206 ArrayRef<const Value *> Operands) { 207 if (U->getType()->isVectorTy()) { 208 // Instructions that need to be split should cost more. 209 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType()); 210 return LT.first * BaseT::getUserCost(U, Operands); 211 } 212 213 return BaseT::getUserCost(U, Operands); 214 } 215 216 bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, 217 TargetLibraryInfo *LibInfo) { 218 const PPCTargetMachine &TM = ST->getTargetMachine(); 219 220 // Loop through the inline asm constraints and look for something that 221 // clobbers ctr. 222 auto asmClobbersCTR = [](InlineAsm *IA) { 223 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints(); 224 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { 225 InlineAsm::ConstraintInfo &C = CIV[i]; 226 if (C.Type != InlineAsm::isInput) 227 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) 228 if (StringRef(C.Codes[j]).equals_lower("{ctr}")) 229 return true; 230 } 231 return false; 232 }; 233 234 // Determining the address of a TLS variable results in a function call in 235 // certain TLS models. 236 std::function<bool(const Value*)> memAddrUsesCTR = 237 [&memAddrUsesCTR, &TM](const Value *MemAddr) -> bool { 238 const auto *GV = dyn_cast<GlobalValue>(MemAddr); 239 if (!GV) { 240 // Recurse to check for constants that refer to TLS global variables. 241 if (const auto *CV = dyn_cast<Constant>(MemAddr)) 242 for (const auto &CO : CV->operands()) 243 if (memAddrUsesCTR(CO)) 244 return true; 245 246 return false; 247 } 248 249 if (!GV->isThreadLocal()) 250 return false; 251 TLSModel::Model Model = TM.getTLSModel(GV); 252 return Model == TLSModel::GeneralDynamic || 253 Model == TLSModel::LocalDynamic; 254 }; 255 256 auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) { 257 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty)) 258 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U); 259 260 return false; 261 }; 262 263 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); 264 J != JE; ++J) { 265 if (CallInst *CI = dyn_cast<CallInst>(J)) { 266 // Inline ASM is okay, unless it clobbers the ctr register. 267 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) { 268 if (asmClobbersCTR(IA)) 269 return true; 270 continue; 271 } 272 273 if (Function *F = CI->getCalledFunction()) { 274 // Most intrinsics don't become function calls, but some might. 275 // sin, cos, exp and log are always calls. 276 unsigned Opcode = 0; 277 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) { 278 switch (F->getIntrinsicID()) { 279 default: continue; 280 // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr 281 // we're definitely using CTR. 282 case Intrinsic::set_loop_iterations: 283 case Intrinsic::loop_decrement: 284 return true; 285 286 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp 287 // because, although it does clobber the counter register, the 288 // control can't then return to inside the loop unless there is also 289 // an eh_sjlj_setjmp. 290 case Intrinsic::eh_sjlj_setjmp: 291 292 case Intrinsic::memcpy: 293 case Intrinsic::memmove: 294 case Intrinsic::memset: 295 case Intrinsic::powi: 296 case Intrinsic::log: 297 case Intrinsic::log2: 298 case Intrinsic::log10: 299 case Intrinsic::exp: 300 case Intrinsic::exp2: 301 case Intrinsic::pow: 302 case Intrinsic::sin: 303 case Intrinsic::cos: 304 return true; 305 case Intrinsic::copysign: 306 if (CI->getArgOperand(0)->getType()->getScalarType()-> 307 isPPC_FP128Ty()) 308 return true; 309 else 310 continue; // ISD::FCOPYSIGN is never a library call. 311 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 312 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 313 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 314 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 315 case Intrinsic::rint: Opcode = ISD::FRINT; break; 316 case Intrinsic::lrint: Opcode = ISD::LRINT; break; 317 case Intrinsic::llrint: Opcode = ISD::LLRINT; break; 318 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 319 case Intrinsic::round: Opcode = ISD::FROUND; break; 320 case Intrinsic::lround: Opcode = ISD::LROUND; break; 321 case Intrinsic::llround: Opcode = ISD::LLROUND; break; 322 case Intrinsic::minnum: Opcode = ISD::FMINNUM; break; 323 case Intrinsic::maxnum: Opcode = ISD::FMAXNUM; break; 324 case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO; break; 325 case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO; break; 326 } 327 } 328 329 // PowerPC does not use [US]DIVREM or other library calls for 330 // operations on regular types which are not otherwise library calls 331 // (i.e. soft float or atomics). If adapting for targets that do, 332 // additional care is required here. 333 334 LibFunc Func; 335 if (!F->hasLocalLinkage() && F->hasName() && LibInfo && 336 LibInfo->getLibFunc(F->getName(), Func) && 337 LibInfo->hasOptimizedCodeGen(Func)) { 338 // Non-read-only functions are never treated as intrinsics. 339 if (!CI->onlyReadsMemory()) 340 return true; 341 342 // Conversion happens only for FP calls. 343 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy()) 344 return true; 345 346 switch (Func) { 347 default: return true; 348 case LibFunc_copysign: 349 case LibFunc_copysignf: 350 continue; // ISD::FCOPYSIGN is never a library call. 351 case LibFunc_copysignl: 352 return true; 353 case LibFunc_fabs: 354 case LibFunc_fabsf: 355 case LibFunc_fabsl: 356 continue; // ISD::FABS is never a library call. 357 case LibFunc_sqrt: 358 case LibFunc_sqrtf: 359 case LibFunc_sqrtl: 360 Opcode = ISD::FSQRT; break; 361 case LibFunc_floor: 362 case LibFunc_floorf: 363 case LibFunc_floorl: 364 Opcode = ISD::FFLOOR; break; 365 case LibFunc_nearbyint: 366 case LibFunc_nearbyintf: 367 case LibFunc_nearbyintl: 368 Opcode = ISD::FNEARBYINT; break; 369 case LibFunc_ceil: 370 case LibFunc_ceilf: 371 case LibFunc_ceill: 372 Opcode = ISD::FCEIL; break; 373 case LibFunc_rint: 374 case LibFunc_rintf: 375 case LibFunc_rintl: 376 Opcode = ISD::FRINT; break; 377 case LibFunc_round: 378 case LibFunc_roundf: 379 case LibFunc_roundl: 380 Opcode = ISD::FROUND; break; 381 case LibFunc_trunc: 382 case LibFunc_truncf: 383 case LibFunc_truncl: 384 Opcode = ISD::FTRUNC; break; 385 case LibFunc_fmin: 386 case LibFunc_fminf: 387 case LibFunc_fminl: 388 Opcode = ISD::FMINNUM; break; 389 case LibFunc_fmax: 390 case LibFunc_fmaxf: 391 case LibFunc_fmaxl: 392 Opcode = ISD::FMAXNUM; break; 393 } 394 } 395 396 if (Opcode) { 397 EVT EVTy = 398 TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true); 399 400 if (EVTy == MVT::Other) 401 return true; 402 403 if (TLI->isOperationLegalOrCustom(Opcode, EVTy)) 404 continue; 405 else if (EVTy.isVector() && 406 TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType())) 407 continue; 408 409 return true; 410 } 411 } 412 413 return true; 414 } else if (isa<BinaryOperator>(J) && 415 J->getType()->getScalarType()->isPPC_FP128Ty()) { 416 // Most operations on ppc_f128 values become calls. 417 return true; 418 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) || 419 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) { 420 CastInst *CI = cast<CastInst>(J); 421 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || 422 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || 423 isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) || 424 isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType())) 425 return true; 426 } else if (isLargeIntegerTy(!TM.isPPC64(), 427 J->getType()->getScalarType()) && 428 (J->getOpcode() == Instruction::UDiv || 429 J->getOpcode() == Instruction::SDiv || 430 J->getOpcode() == Instruction::URem || 431 J->getOpcode() == Instruction::SRem)) { 432 return true; 433 } else if (!TM.isPPC64() && 434 isLargeIntegerTy(false, J->getType()->getScalarType()) && 435 (J->getOpcode() == Instruction::Shl || 436 J->getOpcode() == Instruction::AShr || 437 J->getOpcode() == Instruction::LShr)) { 438 // Only on PPC32, for 128-bit integers (specifically not 64-bit 439 // integers), these might be runtime calls. 440 return true; 441 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) { 442 // On PowerPC, indirect jumps use the counter register. 443 return true; 444 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) { 445 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries()) 446 return true; 447 } 448 449 // FREM is always a call. 450 if (J->getOpcode() == Instruction::FRem) 451 return true; 452 453 if (ST->useSoftFloat()) { 454 switch(J->getOpcode()) { 455 case Instruction::FAdd: 456 case Instruction::FSub: 457 case Instruction::FMul: 458 case Instruction::FDiv: 459 case Instruction::FPTrunc: 460 case Instruction::FPExt: 461 case Instruction::FPToUI: 462 case Instruction::FPToSI: 463 case Instruction::UIToFP: 464 case Instruction::SIToFP: 465 case Instruction::FCmp: 466 return true; 467 } 468 } 469 470 for (Value *Operand : J->operands()) 471 if (memAddrUsesCTR(Operand)) 472 return true; 473 } 474 475 return false; 476 } 477 478 bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 479 AssumptionCache &AC, 480 TargetLibraryInfo *LibInfo, 481 HardwareLoopInfo &HWLoopInfo) { 482 const PPCTargetMachine &TM = ST->getTargetMachine(); 483 TargetSchedModel SchedModel; 484 SchedModel.init(ST); 485 486 // Do not convert small short loops to CTR loop. 487 unsigned ConstTripCount = SE.getSmallConstantTripCount(L); 488 if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) { 489 SmallPtrSet<const Value *, 32> EphValues; 490 CodeMetrics::collectEphemeralValues(L, &AC, EphValues); 491 CodeMetrics Metrics; 492 for (BasicBlock *BB : L->blocks()) 493 Metrics.analyzeBasicBlock(BB, *this, EphValues); 494 // 6 is an approximate latency for the mtctr instruction. 495 if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth())) 496 return false; 497 } 498 499 // We don't want to spill/restore the counter register, and so we don't 500 // want to use the counter register if the loop contains calls. 501 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end(); 502 I != IE; ++I) 503 if (mightUseCTR(*I, LibInfo)) 504 return false; 505 506 SmallVector<BasicBlock*, 4> ExitingBlocks; 507 L->getExitingBlocks(ExitingBlocks); 508 509 // If there is an exit edge known to be frequently taken, 510 // we should not transform this loop. 511 for (auto &BB : ExitingBlocks) { 512 Instruction *TI = BB->getTerminator(); 513 if (!TI) continue; 514 515 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 516 uint64_t TrueWeight = 0, FalseWeight = 0; 517 if (!BI->isConditional() || 518 !BI->extractProfMetadata(TrueWeight, FalseWeight)) 519 continue; 520 521 // If the exit path is more frequent than the loop path, 522 // we return here without further analysis for this loop. 523 bool TrueIsExit = !L->contains(BI->getSuccessor(0)); 524 if (( TrueIsExit && FalseWeight < TrueWeight) || 525 (!TrueIsExit && FalseWeight > TrueWeight)) 526 return false; 527 } 528 } 529 530 LLVMContext &C = L->getHeader()->getContext(); 531 HWLoopInfo.CountType = TM.isPPC64() ? 532 Type::getInt64Ty(C) : Type::getInt32Ty(C); 533 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); 534 return true; 535 } 536 537 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 538 TTI::UnrollingPreferences &UP) { 539 if (ST->getCPUDirective() == PPC::DIR_A2) { 540 // The A2 is in-order with a deep pipeline, and concatenation unrolling 541 // helps expose latency-hiding opportunities to the instruction scheduler. 542 UP.Partial = UP.Runtime = true; 543 544 // We unroll a lot on the A2 (hundreds of instructions), and the benefits 545 // often outweigh the cost of a division to compute the trip count. 546 UP.AllowExpensiveTripCount = true; 547 } 548 549 BaseT::getUnrollingPreferences(L, SE, UP); 550 } 551 552 // This function returns true to allow using coldcc calling convention. 553 // Returning true results in coldcc being used for functions which are cold at 554 // all call sites when the callers of the functions are not calling any other 555 // non coldcc functions. 556 bool PPCTTIImpl::useColdCCForColdCall(Function &F) { 557 return EnablePPCColdCC; 558 } 559 560 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) { 561 // On the A2, always unroll aggressively. For QPX unaligned loads, we depend 562 // on combining the loads generated for consecutive accesses, and failure to 563 // do so is particularly expensive. This makes it much more likely (compared 564 // to only using concatenation unrolling). 565 if (ST->getCPUDirective() == PPC::DIR_A2) 566 return true; 567 568 return LoopHasReductions; 569 } 570 571 PPCTTIImpl::TTI::MemCmpExpansionOptions 572 PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 573 TTI::MemCmpExpansionOptions Options; 574 Options.LoadSizes = {8, 4, 2, 1}; 575 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 576 return Options; 577 } 578 579 bool PPCTTIImpl::enableInterleavedAccessVectorization() { 580 return true; 581 } 582 583 unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const { 584 assert(ClassID == GPRRC || ClassID == FPRRC || 585 ClassID == VRRC || ClassID == VSXRC); 586 if (ST->hasVSX()) { 587 assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC); 588 return ClassID == VSXRC ? 64 : 32; 589 } 590 assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC); 591 return 32; 592 } 593 594 unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const { 595 if (Vector) 596 return ST->hasVSX() ? VSXRC : VRRC; 597 else if (Ty && (Ty->getScalarType()->isFloatTy() || 598 Ty->getScalarType()->isDoubleTy())) 599 return ST->hasVSX() ? VSXRC : FPRRC; 600 else if (Ty && (Ty->getScalarType()->isFP128Ty() || 601 Ty->getScalarType()->isPPC_FP128Ty())) 602 return VRRC; 603 else if (Ty && Ty->getScalarType()->isHalfTy()) 604 return VSXRC; 605 else 606 return GPRRC; 607 } 608 609 const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const { 610 611 switch (ClassID) { 612 default: 613 llvm_unreachable("unknown register class"); 614 return "PPC::unknown register class"; 615 case GPRRC: return "PPC::GPRRC"; 616 case FPRRC: return "PPC::FPRRC"; 617 case VRRC: return "PPC::VRRC"; 618 case VSXRC: return "PPC::VSXRC"; 619 } 620 } 621 622 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const { 623 if (Vector) { 624 if (ST->hasQPX()) return 256; 625 if (ST->hasAltivec()) return 128; 626 return 0; 627 } 628 629 if (ST->isPPC64()) 630 return 64; 631 return 32; 632 633 } 634 635 unsigned PPCTTIImpl::getCacheLineSize() const { 636 // Check first if the user specified a custom line size. 637 if (CacheLineSize.getNumOccurrences() > 0) 638 return CacheLineSize; 639 640 // On P7, P8 or P9 we have a cache line size of 128. 641 unsigned Directive = ST->getCPUDirective(); 642 // Assume that Future CPU has the same cache line size as the others. 643 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || 644 Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR_FUTURE) 645 return 128; 646 647 // On other processors return a default of 64 bytes. 648 return 64; 649 } 650 651 unsigned PPCTTIImpl::getPrefetchDistance() const { 652 // This seems like a reasonable default for the BG/Q (this pass is enabled, by 653 // default, only on the BG/Q). 654 return 300; 655 } 656 657 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) { 658 unsigned Directive = ST->getCPUDirective(); 659 // The 440 has no SIMD support, but floating-point instructions 660 // have a 5-cycle latency, so unroll by 5x for latency hiding. 661 if (Directive == PPC::DIR_440) 662 return 5; 663 664 // The A2 has no SIMD support, but floating-point instructions 665 // have a 6-cycle latency, so unroll by 6x for latency hiding. 666 if (Directive == PPC::DIR_A2) 667 return 6; 668 669 // FIXME: For lack of any better information, do no harm... 670 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) 671 return 1; 672 673 // For P7 and P8, floating-point instructions have a 6-cycle latency and 674 // there are two execution units, so unroll by 12x for latency hiding. 675 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready 676 // Assume that future is the same as the others. 677 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || 678 Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR_FUTURE) 679 return 12; 680 681 // For most things, modern systems have two execution units (and 682 // out-of-order execution). 683 return 2; 684 } 685 686 // Adjust the cost of vector instructions on targets which there is overlap 687 // between the vector and scalar units, thereby reducing the overall throughput 688 // of vector code wrt. scalar code. 689 int PPCTTIImpl::vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1, 690 Type *Ty2) { 691 if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy()) 692 return Cost; 693 694 std::pair<int, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1); 695 // If type legalization involves splitting the vector, we don't want to 696 // double the cost at every step - only the last step. 697 if (LT1.first != 1 || !LT1.second.isVector()) 698 return Cost; 699 700 int ISD = TLI->InstructionOpcodeToISD(Opcode); 701 if (TLI->isOperationExpand(ISD, LT1.second)) 702 return Cost; 703 704 if (Ty2) { 705 std::pair<int, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2); 706 if (LT2.first != 1 || !LT2.second.isVector()) 707 return Cost; 708 } 709 710 return Cost * 2; 711 } 712 713 int PPCTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 714 TTI::OperandValueKind Op1Info, 715 TTI::OperandValueKind Op2Info, 716 TTI::OperandValueProperties Opd1PropInfo, 717 TTI::OperandValueProperties Opd2PropInfo, 718 ArrayRef<const Value *> Args, 719 const Instruction *CxtI) { 720 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); 721 722 // Fallback to the default implementation. 723 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info, 724 Opd1PropInfo, Opd2PropInfo); 725 return vectorCostAdjustment(Cost, Opcode, Ty, nullptr); 726 } 727 728 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 729 Type *SubTp) { 730 // Legalize the type. 731 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 732 733 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations 734 // (at least in the sense that there need only be one non-loop-invariant 735 // instruction). We need one such shuffle instruction for each actual 736 // register (this is not true for arbitrary shuffles, but is true for the 737 // structured types of shuffles covered by TTI::ShuffleKind). 738 return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp, 739 nullptr); 740 } 741 742 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 743 const Instruction *I) { 744 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); 745 746 int Cost = BaseT::getCastInstrCost(Opcode, Dst, Src); 747 return vectorCostAdjustment(Cost, Opcode, Dst, Src); 748 } 749 750 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 751 const Instruction *I) { 752 int Cost = BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); 753 return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr); 754 } 755 756 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 757 assert(Val->isVectorTy() && "This must be a vector type"); 758 759 int ISD = TLI->InstructionOpcodeToISD(Opcode); 760 assert(ISD && "Invalid opcode"); 761 762 int Cost = BaseT::getVectorInstrCost(Opcode, Val, Index); 763 Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr); 764 765 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) { 766 // Double-precision scalars are already located in index #0 (or #1 if LE). 767 if (ISD == ISD::EXTRACT_VECTOR_ELT && 768 Index == (ST->isLittleEndian() ? 1 : 0)) 769 return 0; 770 771 return Cost; 772 773 } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) { 774 // Floating point scalars are already located in index #0. 775 if (Index == 0) 776 return 0; 777 778 return Cost; 779 780 } else if (Val->getScalarType()->isIntegerTy() && Index != -1U) { 781 if (ST->hasP9Altivec()) { 782 if (ISD == ISD::INSERT_VECTOR_ELT) 783 // A move-to VSR and a permute/insert. Assume vector operation cost 784 // for both (cost will be 2x on P9). 785 return vectorCostAdjustment(2, Opcode, Val, nullptr); 786 787 // It's an extract. Maybe we can do a cheap move-from VSR. 788 unsigned EltSize = Val->getScalarSizeInBits(); 789 if (EltSize == 64) { 790 unsigned MfvsrdIndex = ST->isLittleEndian() ? 1 : 0; 791 if (Index == MfvsrdIndex) 792 return 1; 793 } else if (EltSize == 32) { 794 unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1; 795 if (Index == MfvsrwzIndex) 796 return 1; 797 } 798 799 // We need a vector extract (or mfvsrld). Assume vector operation cost. 800 // The cost of the load constant for a vector extract is disregarded 801 // (invariant, easily schedulable). 802 return vectorCostAdjustment(1, Opcode, Val, nullptr); 803 804 } else if (ST->hasDirectMove()) 805 // Assume permute has standard cost. 806 // Assume move-to/move-from VSR have 2x standard cost. 807 return 3; 808 } 809 810 // Estimated cost of a load-hit-store delay. This was obtained 811 // experimentally as a minimum needed to prevent unprofitable 812 // vectorization for the paq8p benchmark. It may need to be 813 // raised further if other unprofitable cases remain. 814 unsigned LHSPenalty = 2; 815 if (ISD == ISD::INSERT_VECTOR_ELT) 816 LHSPenalty += 7; 817 818 // Vector element insert/extract with Altivec is very expensive, 819 // because they require store and reload with the attendant 820 // processor stall for load-hit-store. Until VSX is available, 821 // these need to be estimated as very costly. 822 if (ISD == ISD::EXTRACT_VECTOR_ELT || 823 ISD == ISD::INSERT_VECTOR_ELT) 824 return LHSPenalty + Cost; 825 826 return Cost; 827 } 828 829 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 830 MaybeAlign Alignment, unsigned AddressSpace, 831 const Instruction *I) { 832 // Legalize the type. 833 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 834 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 835 "Invalid Opcode"); 836 837 int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace); 838 Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr); 839 840 bool IsAltivecType = ST->hasAltivec() && 841 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 || 842 LT.second == MVT::v4i32 || LT.second == MVT::v4f32); 843 bool IsVSXType = ST->hasVSX() && 844 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64); 845 bool IsQPXType = ST->hasQPX() && 846 (LT.second == MVT::v4f64 || LT.second == MVT::v4f32); 847 848 // VSX has 32b/64b load instructions. Legalization can handle loading of 849 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and 850 // PPCTargetLowering can't compute the cost appropriately. So here we 851 // explicitly check this case. 852 unsigned MemBytes = Src->getPrimitiveSizeInBits(); 853 if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType && 854 (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32))) 855 return 1; 856 857 // Aligned loads and stores are easy. 858 unsigned SrcBytes = LT.second.getStoreSize(); 859 if (!SrcBytes || !Alignment || Alignment >= SrcBytes) 860 return Cost; 861 862 // If we can use the permutation-based load sequence, then this is also 863 // relatively cheap (not counting loop-invariant instructions): one load plus 864 // one permute (the last load in a series has extra cost, but we're 865 // neglecting that here). Note that on the P7, we could do unaligned loads 866 // for Altivec types using the VSX instructions, but that's more expensive 867 // than using the permutation-based load sequence. On the P8, that's no 868 // longer true. 869 if (Opcode == Instruction::Load && 870 ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) && 871 Alignment >= LT.second.getScalarType().getStoreSize()) 872 return Cost + LT.first; // Add the cost of the permutations. 873 874 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the 875 // P7, unaligned vector loads are more expensive than the permutation-based 876 // load sequence, so that might be used instead, but regardless, the net cost 877 // is about the same (not counting loop-invariant instructions). 878 if (IsVSXType || (ST->hasVSX() && IsAltivecType)) 879 return Cost; 880 881 // Newer PPC supports unaligned memory access. 882 if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0)) 883 return Cost; 884 885 // PPC in general does not support unaligned loads and stores. They'll need 886 // to be decomposed based on the alignment factor. 887 888 // Add the cost of each scalar load or store. 889 assert(Alignment); 890 Cost += LT.first * ((SrcBytes / Alignment->value()) - 1); 891 892 // For a vector type, there is also scalarization overhead (only for 893 // stores, loads are expanded using the vector-load + permutation sequence, 894 // which is much less expensive). 895 if (Src->isVectorTy() && Opcode == Instruction::Store) 896 for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i) 897 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i); 898 899 return Cost; 900 } 901 902 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 903 unsigned Factor, 904 ArrayRef<unsigned> Indices, 905 unsigned Alignment, 906 unsigned AddressSpace, 907 bool UseMaskForCond, 908 bool UseMaskForGaps) { 909 if (UseMaskForCond || UseMaskForGaps) 910 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 911 Alignment, AddressSpace, 912 UseMaskForCond, UseMaskForGaps); 913 914 assert(isa<VectorType>(VecTy) && 915 "Expect a vector type for interleaved memory op"); 916 917 // Legalize the type. 918 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy); 919 920 // Firstly, the cost of load/store operation. 921 int Cost = 922 getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace); 923 924 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations 925 // (at least in the sense that there need only be one non-loop-invariant 926 // instruction). For each result vector, we need one shuffle per incoming 927 // vector (except that the first shuffle can take two incoming vectors 928 // because it does not need to take itself). 929 Cost += Factor*(LT.first-1); 930 931 return Cost; 932 } 933 934 unsigned PPCTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, 935 ArrayRef<Value*> Args, FastMathFlags FMF, unsigned VF) { 936 return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF); 937 } 938 939 unsigned PPCTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, 940 ArrayRef<Type*> Tys, FastMathFlags FMF, 941 unsigned ScalarizationCostPassed) { 942 if (ID == Intrinsic::bswap && ST->hasP9Vector()) 943 return TLI->getTypeLegalizationCost(DL, RetTy).first; 944 return BaseT::getIntrinsicInstrCost(ID, RetTy, Tys, FMF, 945 ScalarizationCostPassed); 946 } 947 948 bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, 949 LoopInfo *LI, DominatorTree *DT, 950 AssumptionCache *AC, TargetLibraryInfo *LibInfo) { 951 // Process nested loops first. 952 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 953 if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo)) 954 return false; // Stop search. 955 956 HardwareLoopInfo HWLoopInfo(L); 957 958 if (!HWLoopInfo.canAnalyze(*LI)) 959 return false; 960 961 if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo)) 962 return false; 963 964 if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT)) 965 return false; 966 967 *BI = HWLoopInfo.ExitBranch; 968 return true; 969 } 970