1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "PPCTargetTransformInfo.h" 10 #include "llvm/Analysis/CodeMetrics.h" 11 #include "llvm/Analysis/TargetTransformInfo.h" 12 #include "llvm/CodeGen/BasicTTIImpl.h" 13 #include "llvm/CodeGen/CostTable.h" 14 #include "llvm/CodeGen/TargetLowering.h" 15 #include "llvm/CodeGen/TargetSchedule.h" 16 #include "llvm/Support/CommandLine.h" 17 #include "llvm/Support/Debug.h" 18 using namespace llvm; 19 20 #define DEBUG_TYPE "ppctti" 21 22 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting", 23 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden); 24 25 // This is currently only used for the data prefetch pass which is only enabled 26 // for BG/Q by default. 27 static cl::opt<unsigned> 28 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64), 29 cl::desc("The loop prefetch cache line size")); 30 31 static cl::opt<bool> 32 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false), 33 cl::desc("Enable using coldcc calling conv for cold " 34 "internal functions")); 35 36 // The latency of mtctr is only justified if there are more than 4 37 // comparisons that will be removed as a result. 38 static cl::opt<unsigned> 39 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden, 40 cl::desc("Loops with a constant trip count smaller than " 41 "this value will not use the count register.")); 42 43 //===----------------------------------------------------------------------===// 44 // 45 // PPC cost model. 46 // 47 //===----------------------------------------------------------------------===// 48 49 TargetTransformInfo::PopcntSupportKind 50 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) { 51 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 52 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64) 53 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ? 54 TTI::PSK_SlowHardware : TTI::PSK_FastHardware; 55 return TTI::PSK_Software; 56 } 57 58 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 59 if (DisablePPCConstHoist) 60 return BaseT::getIntImmCost(Imm, Ty); 61 62 assert(Ty->isIntegerTy()); 63 64 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 65 if (BitSize == 0) 66 return ~0U; 67 68 if (Imm == 0) 69 return TTI::TCC_Free; 70 71 if (Imm.getBitWidth() <= 64) { 72 if (isInt<16>(Imm.getSExtValue())) 73 return TTI::TCC_Basic; 74 75 if (isInt<32>(Imm.getSExtValue())) { 76 // A constant that can be materialized using lis. 77 if ((Imm.getZExtValue() & 0xFFFF) == 0) 78 return TTI::TCC_Basic; 79 80 return 2 * TTI::TCC_Basic; 81 } 82 } 83 84 return 4 * TTI::TCC_Basic; 85 } 86 87 int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 88 Type *Ty) { 89 if (DisablePPCConstHoist) 90 return BaseT::getIntImmCost(IID, Idx, Imm, Ty); 91 92 assert(Ty->isIntegerTy()); 93 94 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 95 if (BitSize == 0) 96 return ~0U; 97 98 switch (IID) { 99 default: 100 return TTI::TCC_Free; 101 case Intrinsic::sadd_with_overflow: 102 case Intrinsic::uadd_with_overflow: 103 case Intrinsic::ssub_with_overflow: 104 case Intrinsic::usub_with_overflow: 105 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue())) 106 return TTI::TCC_Free; 107 break; 108 case Intrinsic::experimental_stackmap: 109 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 110 return TTI::TCC_Free; 111 break; 112 case Intrinsic::experimental_patchpoint_void: 113 case Intrinsic::experimental_patchpoint_i64: 114 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 115 return TTI::TCC_Free; 116 break; 117 } 118 return PPCTTIImpl::getIntImmCost(Imm, Ty); 119 } 120 121 int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 122 Type *Ty) { 123 if (DisablePPCConstHoist) 124 return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty); 125 126 assert(Ty->isIntegerTy()); 127 128 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 129 if (BitSize == 0) 130 return ~0U; 131 132 unsigned ImmIdx = ~0U; 133 bool ShiftedFree = false, RunFree = false, UnsignedFree = false, 134 ZeroFree = false; 135 switch (Opcode) { 136 default: 137 return TTI::TCC_Free; 138 case Instruction::GetElementPtr: 139 // Always hoist the base address of a GetElementPtr. This prevents the 140 // creation of new constants for every base constant that gets constant 141 // folded with the offset. 142 if (Idx == 0) 143 return 2 * TTI::TCC_Basic; 144 return TTI::TCC_Free; 145 case Instruction::And: 146 RunFree = true; // (for the rotate-and-mask instructions) 147 LLVM_FALLTHROUGH; 148 case Instruction::Add: 149 case Instruction::Or: 150 case Instruction::Xor: 151 ShiftedFree = true; 152 LLVM_FALLTHROUGH; 153 case Instruction::Sub: 154 case Instruction::Mul: 155 case Instruction::Shl: 156 case Instruction::LShr: 157 case Instruction::AShr: 158 ImmIdx = 1; 159 break; 160 case Instruction::ICmp: 161 UnsignedFree = true; 162 ImmIdx = 1; 163 // Zero comparisons can use record-form instructions. 164 LLVM_FALLTHROUGH; 165 case Instruction::Select: 166 ZeroFree = true; 167 break; 168 case Instruction::PHI: 169 case Instruction::Call: 170 case Instruction::Ret: 171 case Instruction::Load: 172 case Instruction::Store: 173 break; 174 } 175 176 if (ZeroFree && Imm == 0) 177 return TTI::TCC_Free; 178 179 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) { 180 if (isInt<16>(Imm.getSExtValue())) 181 return TTI::TCC_Free; 182 183 if (RunFree) { 184 if (Imm.getBitWidth() <= 32 && 185 (isShiftedMask_32(Imm.getZExtValue()) || 186 isShiftedMask_32(~Imm.getZExtValue()))) 187 return TTI::TCC_Free; 188 189 if (ST->isPPC64() && 190 (isShiftedMask_64(Imm.getZExtValue()) || 191 isShiftedMask_64(~Imm.getZExtValue()))) 192 return TTI::TCC_Free; 193 } 194 195 if (UnsignedFree && isUInt<16>(Imm.getZExtValue())) 196 return TTI::TCC_Free; 197 198 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0) 199 return TTI::TCC_Free; 200 } 201 202 return PPCTTIImpl::getIntImmCost(Imm, Ty); 203 } 204 205 unsigned PPCTTIImpl::getUserCost(const User *U, 206 ArrayRef<const Value *> Operands) { 207 if (U->getType()->isVectorTy()) { 208 // Instructions that need to be split should cost more. 209 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType()); 210 return LT.first * BaseT::getUserCost(U, Operands); 211 } 212 213 return BaseT::getUserCost(U, Operands); 214 } 215 216 bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, 217 TargetLibraryInfo *LibInfo) { 218 const PPCTargetMachine &TM = ST->getTargetMachine(); 219 220 // Loop through the inline asm constraints and look for something that 221 // clobbers ctr. 222 auto asmClobbersCTR = [](InlineAsm *IA) { 223 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints(); 224 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { 225 InlineAsm::ConstraintInfo &C = CIV[i]; 226 if (C.Type != InlineAsm::isInput) 227 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) 228 if (StringRef(C.Codes[j]).equals_lower("{ctr}")) 229 return true; 230 } 231 return false; 232 }; 233 234 // Determining the address of a TLS variable results in a function call in 235 // certain TLS models. 236 std::function<bool(const Value*)> memAddrUsesCTR = 237 [&memAddrUsesCTR, &TM](const Value *MemAddr) -> bool { 238 const auto *GV = dyn_cast<GlobalValue>(MemAddr); 239 if (!GV) { 240 // Recurse to check for constants that refer to TLS global variables. 241 if (const auto *CV = dyn_cast<Constant>(MemAddr)) 242 for (const auto &CO : CV->operands()) 243 if (memAddrUsesCTR(CO)) 244 return true; 245 246 return false; 247 } 248 249 if (!GV->isThreadLocal()) 250 return false; 251 TLSModel::Model Model = TM.getTLSModel(GV); 252 return Model == TLSModel::GeneralDynamic || 253 Model == TLSModel::LocalDynamic; 254 }; 255 256 auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) { 257 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty)) 258 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U); 259 260 return false; 261 }; 262 263 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); 264 J != JE; ++J) { 265 if (CallInst *CI = dyn_cast<CallInst>(J)) { 266 // Inline ASM is okay, unless it clobbers the ctr register. 267 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) { 268 if (asmClobbersCTR(IA)) 269 return true; 270 continue; 271 } 272 273 if (Function *F = CI->getCalledFunction()) { 274 // Most intrinsics don't become function calls, but some might. 275 // sin, cos, exp and log are always calls. 276 unsigned Opcode = 0; 277 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) { 278 switch (F->getIntrinsicID()) { 279 default: continue; 280 // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr 281 // we're definitely using CTR. 282 case Intrinsic::set_loop_iterations: 283 case Intrinsic::loop_decrement: 284 return true; 285 286 // VisualStudio defines setjmp as _setjmp 287 #if defined(_MSC_VER) && defined(setjmp) && \ 288 !defined(setjmp_undefined_for_msvc) 289 # pragma push_macro("setjmp") 290 # undef setjmp 291 # define setjmp_undefined_for_msvc 292 #endif 293 294 case Intrinsic::setjmp: 295 296 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc) 297 // let's return it to _setjmp state 298 # pragma pop_macro("setjmp") 299 # undef setjmp_undefined_for_msvc 300 #endif 301 302 case Intrinsic::longjmp: 303 304 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp 305 // because, although it does clobber the counter register, the 306 // control can't then return to inside the loop unless there is also 307 // an eh_sjlj_setjmp. 308 case Intrinsic::eh_sjlj_setjmp: 309 310 case Intrinsic::memcpy: 311 case Intrinsic::memmove: 312 case Intrinsic::memset: 313 case Intrinsic::powi: 314 case Intrinsic::log: 315 case Intrinsic::log2: 316 case Intrinsic::log10: 317 case Intrinsic::exp: 318 case Intrinsic::exp2: 319 case Intrinsic::pow: 320 case Intrinsic::sin: 321 case Intrinsic::cos: 322 return true; 323 case Intrinsic::copysign: 324 if (CI->getArgOperand(0)->getType()->getScalarType()-> 325 isPPC_FP128Ty()) 326 return true; 327 else 328 continue; // ISD::FCOPYSIGN is never a library call. 329 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 330 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 331 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 332 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 333 case Intrinsic::rint: Opcode = ISD::FRINT; break; 334 case Intrinsic::lrint: Opcode = ISD::LRINT; break; 335 case Intrinsic::llrint: Opcode = ISD::LLRINT; break; 336 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 337 case Intrinsic::round: Opcode = ISD::FROUND; break; 338 case Intrinsic::lround: Opcode = ISD::LROUND; break; 339 case Intrinsic::llround: Opcode = ISD::LLROUND; break; 340 case Intrinsic::minnum: Opcode = ISD::FMINNUM; break; 341 case Intrinsic::maxnum: Opcode = ISD::FMAXNUM; break; 342 case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO; break; 343 case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO; break; 344 } 345 } 346 347 // PowerPC does not use [US]DIVREM or other library calls for 348 // operations on regular types which are not otherwise library calls 349 // (i.e. soft float or atomics). If adapting for targets that do, 350 // additional care is required here. 351 352 LibFunc Func; 353 if (!F->hasLocalLinkage() && F->hasName() && LibInfo && 354 LibInfo->getLibFunc(F->getName(), Func) && 355 LibInfo->hasOptimizedCodeGen(Func)) { 356 // Non-read-only functions are never treated as intrinsics. 357 if (!CI->onlyReadsMemory()) 358 return true; 359 360 // Conversion happens only for FP calls. 361 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy()) 362 return true; 363 364 switch (Func) { 365 default: return true; 366 case LibFunc_copysign: 367 case LibFunc_copysignf: 368 continue; // ISD::FCOPYSIGN is never a library call. 369 case LibFunc_copysignl: 370 return true; 371 case LibFunc_fabs: 372 case LibFunc_fabsf: 373 case LibFunc_fabsl: 374 continue; // ISD::FABS is never a library call. 375 case LibFunc_sqrt: 376 case LibFunc_sqrtf: 377 case LibFunc_sqrtl: 378 Opcode = ISD::FSQRT; break; 379 case LibFunc_floor: 380 case LibFunc_floorf: 381 case LibFunc_floorl: 382 Opcode = ISD::FFLOOR; break; 383 case LibFunc_nearbyint: 384 case LibFunc_nearbyintf: 385 case LibFunc_nearbyintl: 386 Opcode = ISD::FNEARBYINT; break; 387 case LibFunc_ceil: 388 case LibFunc_ceilf: 389 case LibFunc_ceill: 390 Opcode = ISD::FCEIL; break; 391 case LibFunc_rint: 392 case LibFunc_rintf: 393 case LibFunc_rintl: 394 Opcode = ISD::FRINT; break; 395 case LibFunc_round: 396 case LibFunc_roundf: 397 case LibFunc_roundl: 398 Opcode = ISD::FROUND; break; 399 case LibFunc_trunc: 400 case LibFunc_truncf: 401 case LibFunc_truncl: 402 Opcode = ISD::FTRUNC; break; 403 case LibFunc_fmin: 404 case LibFunc_fminf: 405 case LibFunc_fminl: 406 Opcode = ISD::FMINNUM; break; 407 case LibFunc_fmax: 408 case LibFunc_fmaxf: 409 case LibFunc_fmaxl: 410 Opcode = ISD::FMAXNUM; break; 411 } 412 } 413 414 if (Opcode) { 415 EVT EVTy = 416 TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true); 417 418 if (EVTy == MVT::Other) 419 return true; 420 421 if (TLI->isOperationLegalOrCustom(Opcode, EVTy)) 422 continue; 423 else if (EVTy.isVector() && 424 TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType())) 425 continue; 426 427 return true; 428 } 429 } 430 431 return true; 432 } else if (isa<BinaryOperator>(J) && 433 J->getType()->getScalarType()->isPPC_FP128Ty()) { 434 // Most operations on ppc_f128 values become calls. 435 return true; 436 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) || 437 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) { 438 CastInst *CI = cast<CastInst>(J); 439 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || 440 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || 441 isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) || 442 isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType())) 443 return true; 444 } else if (isLargeIntegerTy(!TM.isPPC64(), 445 J->getType()->getScalarType()) && 446 (J->getOpcode() == Instruction::UDiv || 447 J->getOpcode() == Instruction::SDiv || 448 J->getOpcode() == Instruction::URem || 449 J->getOpcode() == Instruction::SRem)) { 450 return true; 451 } else if (!TM.isPPC64() && 452 isLargeIntegerTy(false, J->getType()->getScalarType()) && 453 (J->getOpcode() == Instruction::Shl || 454 J->getOpcode() == Instruction::AShr || 455 J->getOpcode() == Instruction::LShr)) { 456 // Only on PPC32, for 128-bit integers (specifically not 64-bit 457 // integers), these might be runtime calls. 458 return true; 459 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) { 460 // On PowerPC, indirect jumps use the counter register. 461 return true; 462 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) { 463 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries()) 464 return true; 465 } 466 467 // FREM is always a call. 468 if (J->getOpcode() == Instruction::FRem) 469 return true; 470 471 if (ST->useSoftFloat()) { 472 switch(J->getOpcode()) { 473 case Instruction::FAdd: 474 case Instruction::FSub: 475 case Instruction::FMul: 476 case Instruction::FDiv: 477 case Instruction::FPTrunc: 478 case Instruction::FPExt: 479 case Instruction::FPToUI: 480 case Instruction::FPToSI: 481 case Instruction::UIToFP: 482 case Instruction::SIToFP: 483 case Instruction::FCmp: 484 return true; 485 } 486 } 487 488 for (Value *Operand : J->operands()) 489 if (memAddrUsesCTR(Operand)) 490 return true; 491 } 492 493 return false; 494 } 495 496 bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 497 AssumptionCache &AC, 498 TargetLibraryInfo *LibInfo, 499 HardwareLoopInfo &HWLoopInfo) { 500 const PPCTargetMachine &TM = ST->getTargetMachine(); 501 TargetSchedModel SchedModel; 502 SchedModel.init(ST); 503 504 // Do not convert small short loops to CTR loop. 505 unsigned ConstTripCount = SE.getSmallConstantTripCount(L); 506 if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) { 507 SmallPtrSet<const Value *, 32> EphValues; 508 CodeMetrics::collectEphemeralValues(L, &AC, EphValues); 509 CodeMetrics Metrics; 510 for (BasicBlock *BB : L->blocks()) 511 Metrics.analyzeBasicBlock(BB, *this, EphValues); 512 // 6 is an approximate latency for the mtctr instruction. 513 if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth())) 514 return false; 515 } 516 517 // We don't want to spill/restore the counter register, and so we don't 518 // want to use the counter register if the loop contains calls. 519 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end(); 520 I != IE; ++I) 521 if (mightUseCTR(*I, LibInfo)) 522 return false; 523 524 SmallVector<BasicBlock*, 4> ExitingBlocks; 525 L->getExitingBlocks(ExitingBlocks); 526 527 // If there is an exit edge known to be frequently taken, 528 // we should not transform this loop. 529 for (auto &BB : ExitingBlocks) { 530 Instruction *TI = BB->getTerminator(); 531 if (!TI) continue; 532 533 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 534 uint64_t TrueWeight = 0, FalseWeight = 0; 535 if (!BI->isConditional() || 536 !BI->extractProfMetadata(TrueWeight, FalseWeight)) 537 continue; 538 539 // If the exit path is more frequent than the loop path, 540 // we return here without further analysis for this loop. 541 bool TrueIsExit = !L->contains(BI->getSuccessor(0)); 542 if (( TrueIsExit && FalseWeight < TrueWeight) || 543 (!TrueIsExit && FalseWeight > TrueWeight)) 544 return false; 545 } 546 } 547 548 LLVMContext &C = L->getHeader()->getContext(); 549 HWLoopInfo.CountType = TM.isPPC64() ? 550 Type::getInt64Ty(C) : Type::getInt32Ty(C); 551 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); 552 return true; 553 } 554 555 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 556 TTI::UnrollingPreferences &UP) { 557 if (ST->getDarwinDirective() == PPC::DIR_A2) { 558 // The A2 is in-order with a deep pipeline, and concatenation unrolling 559 // helps expose latency-hiding opportunities to the instruction scheduler. 560 UP.Partial = UP.Runtime = true; 561 562 // We unroll a lot on the A2 (hundreds of instructions), and the benefits 563 // often outweigh the cost of a division to compute the trip count. 564 UP.AllowExpensiveTripCount = true; 565 } 566 567 BaseT::getUnrollingPreferences(L, SE, UP); 568 } 569 570 // This function returns true to allow using coldcc calling convention. 571 // Returning true results in coldcc being used for functions which are cold at 572 // all call sites when the callers of the functions are not calling any other 573 // non coldcc functions. 574 bool PPCTTIImpl::useColdCCForColdCall(Function &F) { 575 return EnablePPCColdCC; 576 } 577 578 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) { 579 // On the A2, always unroll aggressively. For QPX unaligned loads, we depend 580 // on combining the loads generated for consecutive accesses, and failure to 581 // do so is particularly expensive. This makes it much more likely (compared 582 // to only using concatenation unrolling). 583 if (ST->getDarwinDirective() == PPC::DIR_A2) 584 return true; 585 586 return LoopHasReductions; 587 } 588 589 PPCTTIImpl::TTI::MemCmpExpansionOptions 590 PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 591 TTI::MemCmpExpansionOptions Options; 592 Options.LoadSizes = {8, 4, 2, 1}; 593 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 594 return Options; 595 } 596 597 bool PPCTTIImpl::enableInterleavedAccessVectorization() { 598 return true; 599 } 600 601 unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const { 602 assert(ClassID == GPRRC || ClassID == FPRRC || 603 ClassID == VRRC || ClassID == VSXRC); 604 if (ST->hasVSX()) { 605 assert(ClassID == GPRRC || ClassID == VSXRC); 606 return ClassID == GPRRC ? 32 : 64; 607 } 608 assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC); 609 return 32; 610 } 611 612 unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const { 613 if (Vector) 614 return ST->hasVSX() ? VSXRC : VRRC; 615 else if (Ty && Ty->getScalarType()->isFloatTy()) 616 return ST->hasVSX() ? VSXRC : FPRRC; 617 else 618 return GPRRC; 619 } 620 621 const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const { 622 623 switch (ClassID) { 624 default: 625 llvm_unreachable("unknown register class"); 626 return "PPC::unknown register class"; 627 case GPRRC: return "PPC::GPRRC"; 628 case FPRRC: return "PPC::FPRRC"; 629 case VRRC: return "PPC::VRRC"; 630 case VSXRC: return "PPC::VSXRC"; 631 } 632 } 633 634 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const { 635 if (Vector) { 636 if (ST->hasQPX()) return 256; 637 if (ST->hasAltivec()) return 128; 638 return 0; 639 } 640 641 if (ST->isPPC64()) 642 return 64; 643 return 32; 644 645 } 646 647 unsigned PPCTTIImpl::getCacheLineSize() const { 648 // Check first if the user specified a custom line size. 649 if (CacheLineSize.getNumOccurrences() > 0) 650 return CacheLineSize; 651 652 // On P7, P8 or P9 we have a cache line size of 128. 653 unsigned Directive = ST->getDarwinDirective(); 654 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || 655 Directive == PPC::DIR_PWR9) 656 return 128; 657 658 // On other processors return a default of 64 bytes. 659 return 64; 660 } 661 662 unsigned PPCTTIImpl::getPrefetchDistance() const { 663 // This seems like a reasonable default for the BG/Q (this pass is enabled, by 664 // default, only on the BG/Q). 665 return 300; 666 } 667 668 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) { 669 unsigned Directive = ST->getDarwinDirective(); 670 // The 440 has no SIMD support, but floating-point instructions 671 // have a 5-cycle latency, so unroll by 5x for latency hiding. 672 if (Directive == PPC::DIR_440) 673 return 5; 674 675 // The A2 has no SIMD support, but floating-point instructions 676 // have a 6-cycle latency, so unroll by 6x for latency hiding. 677 if (Directive == PPC::DIR_A2) 678 return 6; 679 680 // FIXME: For lack of any better information, do no harm... 681 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) 682 return 1; 683 684 // For P7 and P8, floating-point instructions have a 6-cycle latency and 685 // there are two execution units, so unroll by 12x for latency hiding. 686 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready 687 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || 688 Directive == PPC::DIR_PWR9) 689 return 12; 690 691 // For most things, modern systems have two execution units (and 692 // out-of-order execution). 693 return 2; 694 } 695 696 // Adjust the cost of vector instructions on targets which there is overlap 697 // between the vector and scalar units, thereby reducing the overall throughput 698 // of vector code wrt. scalar code. 699 int PPCTTIImpl::vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1, 700 Type *Ty2) { 701 if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy()) 702 return Cost; 703 704 std::pair<int, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1); 705 // If type legalization involves splitting the vector, we don't want to 706 // double the cost at every step - only the last step. 707 if (LT1.first != 1 || !LT1.second.isVector()) 708 return Cost; 709 710 int ISD = TLI->InstructionOpcodeToISD(Opcode); 711 if (TLI->isOperationExpand(ISD, LT1.second)) 712 return Cost; 713 714 if (Ty2) { 715 std::pair<int, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2); 716 if (LT2.first != 1 || !LT2.second.isVector()) 717 return Cost; 718 } 719 720 return Cost * 2; 721 } 722 723 int PPCTTIImpl::getArithmeticInstrCost( 724 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, 725 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, 726 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) { 727 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); 728 729 // Fallback to the default implementation. 730 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info, 731 Opd1PropInfo, Opd2PropInfo); 732 return vectorCostAdjustment(Cost, Opcode, Ty, nullptr); 733 } 734 735 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 736 Type *SubTp) { 737 // Legalize the type. 738 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 739 740 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations 741 // (at least in the sense that there need only be one non-loop-invariant 742 // instruction). We need one such shuffle instruction for each actual 743 // register (this is not true for arbitrary shuffles, but is true for the 744 // structured types of shuffles covered by TTI::ShuffleKind). 745 return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp, 746 nullptr); 747 } 748 749 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 750 const Instruction *I) { 751 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); 752 753 int Cost = BaseT::getCastInstrCost(Opcode, Dst, Src); 754 return vectorCostAdjustment(Cost, Opcode, Dst, Src); 755 } 756 757 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 758 const Instruction *I) { 759 int Cost = BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); 760 return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr); 761 } 762 763 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 764 assert(Val->isVectorTy() && "This must be a vector type"); 765 766 int ISD = TLI->InstructionOpcodeToISD(Opcode); 767 assert(ISD && "Invalid opcode"); 768 769 int Cost = BaseT::getVectorInstrCost(Opcode, Val, Index); 770 Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr); 771 772 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) { 773 // Double-precision scalars are already located in index #0 (or #1 if LE). 774 if (ISD == ISD::EXTRACT_VECTOR_ELT && 775 Index == (ST->isLittleEndian() ? 1 : 0)) 776 return 0; 777 778 return Cost; 779 780 } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) { 781 // Floating point scalars are already located in index #0. 782 if (Index == 0) 783 return 0; 784 785 return Cost; 786 787 } else if (Val->getScalarType()->isIntegerTy() && Index != -1U) { 788 if (ST->hasP9Altivec()) { 789 if (ISD == ISD::INSERT_VECTOR_ELT) 790 // A move-to VSR and a permute/insert. Assume vector operation cost 791 // for both (cost will be 2x on P9). 792 return vectorCostAdjustment(2, Opcode, Val, nullptr); 793 794 // It's an extract. Maybe we can do a cheap move-from VSR. 795 unsigned EltSize = Val->getScalarSizeInBits(); 796 if (EltSize == 64) { 797 unsigned MfvsrdIndex = ST->isLittleEndian() ? 1 : 0; 798 if (Index == MfvsrdIndex) 799 return 1; 800 } else if (EltSize == 32) { 801 unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1; 802 if (Index == MfvsrwzIndex) 803 return 1; 804 } 805 806 // We need a vector extract (or mfvsrld). Assume vector operation cost. 807 // The cost of the load constant for a vector extract is disregarded 808 // (invariant, easily schedulable). 809 return vectorCostAdjustment(1, Opcode, Val, nullptr); 810 811 } else if (ST->hasDirectMove()) 812 // Assume permute has standard cost. 813 // Assume move-to/move-from VSR have 2x standard cost. 814 return 3; 815 } 816 817 // Estimated cost of a load-hit-store delay. This was obtained 818 // experimentally as a minimum needed to prevent unprofitable 819 // vectorization for the paq8p benchmark. It may need to be 820 // raised further if other unprofitable cases remain. 821 unsigned LHSPenalty = 2; 822 if (ISD == ISD::INSERT_VECTOR_ELT) 823 LHSPenalty += 7; 824 825 // Vector element insert/extract with Altivec is very expensive, 826 // because they require store and reload with the attendant 827 // processor stall for load-hit-store. Until VSX is available, 828 // these need to be estimated as very costly. 829 if (ISD == ISD::EXTRACT_VECTOR_ELT || 830 ISD == ISD::INSERT_VECTOR_ELT) 831 return LHSPenalty + Cost; 832 833 return Cost; 834 } 835 836 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 837 unsigned AddressSpace, const Instruction *I) { 838 // Legalize the type. 839 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 840 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 841 "Invalid Opcode"); 842 843 int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace); 844 Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr); 845 846 bool IsAltivecType = ST->hasAltivec() && 847 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 || 848 LT.second == MVT::v4i32 || LT.second == MVT::v4f32); 849 bool IsVSXType = ST->hasVSX() && 850 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64); 851 bool IsQPXType = ST->hasQPX() && 852 (LT.second == MVT::v4f64 || LT.second == MVT::v4f32); 853 854 // VSX has 32b/64b load instructions. Legalization can handle loading of 855 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and 856 // PPCTargetLowering can't compute the cost appropriately. So here we 857 // explicitly check this case. 858 unsigned MemBytes = Src->getPrimitiveSizeInBits(); 859 if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType && 860 (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32))) 861 return 1; 862 863 // Aligned loads and stores are easy. 864 unsigned SrcBytes = LT.second.getStoreSize(); 865 if (!SrcBytes || !Alignment || Alignment >= SrcBytes) 866 return Cost; 867 868 // If we can use the permutation-based load sequence, then this is also 869 // relatively cheap (not counting loop-invariant instructions): one load plus 870 // one permute (the last load in a series has extra cost, but we're 871 // neglecting that here). Note that on the P7, we could do unaligned loads 872 // for Altivec types using the VSX instructions, but that's more expensive 873 // than using the permutation-based load sequence. On the P8, that's no 874 // longer true. 875 if (Opcode == Instruction::Load && 876 ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) && 877 Alignment >= LT.second.getScalarType().getStoreSize()) 878 return Cost + LT.first; // Add the cost of the permutations. 879 880 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the 881 // P7, unaligned vector loads are more expensive than the permutation-based 882 // load sequence, so that might be used instead, but regardless, the net cost 883 // is about the same (not counting loop-invariant instructions). 884 if (IsVSXType || (ST->hasVSX() && IsAltivecType)) 885 return Cost; 886 887 // Newer PPC supports unaligned memory access. 888 if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0)) 889 return Cost; 890 891 // PPC in general does not support unaligned loads and stores. They'll need 892 // to be decomposed based on the alignment factor. 893 894 // Add the cost of each scalar load or store. 895 Cost += LT.first*(SrcBytes/Alignment-1); 896 897 // For a vector type, there is also scalarization overhead (only for 898 // stores, loads are expanded using the vector-load + permutation sequence, 899 // which is much less expensive). 900 if (Src->isVectorTy() && Opcode == Instruction::Store) 901 for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i) 902 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i); 903 904 return Cost; 905 } 906 907 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 908 unsigned Factor, 909 ArrayRef<unsigned> Indices, 910 unsigned Alignment, 911 unsigned AddressSpace, 912 bool UseMaskForCond, 913 bool UseMaskForGaps) { 914 if (UseMaskForCond || UseMaskForGaps) 915 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 916 Alignment, AddressSpace, 917 UseMaskForCond, UseMaskForGaps); 918 919 assert(isa<VectorType>(VecTy) && 920 "Expect a vector type for interleaved memory op"); 921 922 // Legalize the type. 923 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy); 924 925 // Firstly, the cost of load/store operation. 926 int Cost = getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace); 927 928 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations 929 // (at least in the sense that there need only be one non-loop-invariant 930 // instruction). For each result vector, we need one shuffle per incoming 931 // vector (except that the first shuffle can take two incoming vectors 932 // because it does not need to take itself). 933 Cost += Factor*(LT.first-1); 934 935 return Cost; 936 } 937 938 bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, 939 LoopInfo *LI, DominatorTree *DT, 940 AssumptionCache *AC, TargetLibraryInfo *LibInfo) { 941 // Process nested loops first. 942 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 943 if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo)) 944 return false; // Stop search. 945 946 HardwareLoopInfo HWLoopInfo(L); 947 948 if (!HWLoopInfo.canAnalyze(*LI)) 949 return false; 950 951 if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo)) 952 return false; 953 954 if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT)) 955 return false; 956 957 *BI = HWLoopInfo.ExitBranch; 958 return true; 959 } 960