xref: /freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp (revision 13ec1e3155c7e9bf037b12af186351b7fa9b9450)
1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "PPCTargetTransformInfo.h"
10 #include "llvm/Analysis/CodeMetrics.h"
11 #include "llvm/Analysis/TargetLibraryInfo.h"
12 #include "llvm/Analysis/TargetTransformInfo.h"
13 #include "llvm/CodeGen/BasicTTIImpl.h"
14 #include "llvm/CodeGen/CostTable.h"
15 #include "llvm/CodeGen/TargetLowering.h"
16 #include "llvm/CodeGen/TargetSchedule.h"
17 #include "llvm/IR/IntrinsicsPowerPC.h"
18 #include "llvm/Support/CommandLine.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/KnownBits.h"
21 #include "llvm/Transforms/InstCombine/InstCombiner.h"
22 #include "llvm/Transforms/Utils/Local.h"
23 
24 using namespace llvm;
25 
26 #define DEBUG_TYPE "ppctti"
27 
28 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
29 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
30 
31 // This is currently only used for the data prefetch pass
32 static cl::opt<unsigned>
33 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
34               cl::desc("The loop prefetch cache line size"));
35 
36 static cl::opt<bool>
37 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false),
38                 cl::desc("Enable using coldcc calling conv for cold "
39                          "internal functions"));
40 
41 static cl::opt<bool>
42 LsrNoInsnsCost("ppc-lsr-no-insns-cost", cl::Hidden, cl::init(false),
43                cl::desc("Do not add instruction count to lsr cost model"));
44 
45 // The latency of mtctr is only justified if there are more than 4
46 // comparisons that will be removed as a result.
47 static cl::opt<unsigned>
48 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden,
49                       cl::desc("Loops with a constant trip count smaller than "
50                                "this value will not use the count register."));
51 
52 //===----------------------------------------------------------------------===//
53 //
54 // PPC cost model.
55 //
56 //===----------------------------------------------------------------------===//
57 
58 TargetTransformInfo::PopcntSupportKind
59 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
60   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
61   if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
62     return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
63              TTI::PSK_SlowHardware : TTI::PSK_FastHardware;
64   return TTI::PSK_Software;
65 }
66 
67 Optional<Instruction *>
68 PPCTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
69   Intrinsic::ID IID = II.getIntrinsicID();
70   switch (IID) {
71   default:
72     break;
73   case Intrinsic::ppc_altivec_lvx:
74   case Intrinsic::ppc_altivec_lvxl:
75     // Turn PPC lvx -> load if the pointer is known aligned.
76     if (getOrEnforceKnownAlignment(
77             II.getArgOperand(0), Align(16), IC.getDataLayout(), &II,
78             &IC.getAssumptionCache(), &IC.getDominatorTree()) >= 16) {
79       Value *Ptr = IC.Builder.CreateBitCast(
80           II.getArgOperand(0), PointerType::getUnqual(II.getType()));
81       return new LoadInst(II.getType(), Ptr, "", false, Align(16));
82     }
83     break;
84   case Intrinsic::ppc_vsx_lxvw4x:
85   case Intrinsic::ppc_vsx_lxvd2x: {
86     // Turn PPC VSX loads into normal loads.
87     Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(0),
88                                           PointerType::getUnqual(II.getType()));
89     return new LoadInst(II.getType(), Ptr, Twine(""), false, Align(1));
90   }
91   case Intrinsic::ppc_altivec_stvx:
92   case Intrinsic::ppc_altivec_stvxl:
93     // Turn stvx -> store if the pointer is known aligned.
94     if (getOrEnforceKnownAlignment(
95             II.getArgOperand(1), Align(16), IC.getDataLayout(), &II,
96             &IC.getAssumptionCache(), &IC.getDominatorTree()) >= 16) {
97       Type *OpPtrTy = PointerType::getUnqual(II.getArgOperand(0)->getType());
98       Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(1), OpPtrTy);
99       return new StoreInst(II.getArgOperand(0), Ptr, false, Align(16));
100     }
101     break;
102   case Intrinsic::ppc_vsx_stxvw4x:
103   case Intrinsic::ppc_vsx_stxvd2x: {
104     // Turn PPC VSX stores into normal stores.
105     Type *OpPtrTy = PointerType::getUnqual(II.getArgOperand(0)->getType());
106     Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(1), OpPtrTy);
107     return new StoreInst(II.getArgOperand(0), Ptr, false, Align(1));
108   }
109   case Intrinsic::ppc_altivec_vperm:
110     // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
111     // Note that ppc_altivec_vperm has a big-endian bias, so when creating
112     // a vectorshuffle for little endian, we must undo the transformation
113     // performed on vec_perm in altivec.h.  That is, we must complement
114     // the permutation mask with respect to 31 and reverse the order of
115     // V1 and V2.
116     if (Constant *Mask = dyn_cast<Constant>(II.getArgOperand(2))) {
117       assert(cast<FixedVectorType>(Mask->getType())->getNumElements() == 16 &&
118              "Bad type for intrinsic!");
119 
120       // Check that all of the elements are integer constants or undefs.
121       bool AllEltsOk = true;
122       for (unsigned i = 0; i != 16; ++i) {
123         Constant *Elt = Mask->getAggregateElement(i);
124         if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
125           AllEltsOk = false;
126           break;
127         }
128       }
129 
130       if (AllEltsOk) {
131         // Cast the input vectors to byte vectors.
132         Value *Op0 =
133             IC.Builder.CreateBitCast(II.getArgOperand(0), Mask->getType());
134         Value *Op1 =
135             IC.Builder.CreateBitCast(II.getArgOperand(1), Mask->getType());
136         Value *Result = UndefValue::get(Op0->getType());
137 
138         // Only extract each element once.
139         Value *ExtractedElts[32];
140         memset(ExtractedElts, 0, sizeof(ExtractedElts));
141 
142         for (unsigned i = 0; i != 16; ++i) {
143           if (isa<UndefValue>(Mask->getAggregateElement(i)))
144             continue;
145           unsigned Idx =
146               cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
147           Idx &= 31; // Match the hardware behavior.
148           if (DL.isLittleEndian())
149             Idx = 31 - Idx;
150 
151           if (!ExtractedElts[Idx]) {
152             Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
153             Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
154             ExtractedElts[Idx] = IC.Builder.CreateExtractElement(
155                 Idx < 16 ? Op0ToUse : Op1ToUse, IC.Builder.getInt32(Idx & 15));
156           }
157 
158           // Insert this value into the result vector.
159           Result = IC.Builder.CreateInsertElement(Result, ExtractedElts[Idx],
160                                                   IC.Builder.getInt32(i));
161         }
162         return CastInst::Create(Instruction::BitCast, Result, II.getType());
163       }
164     }
165     break;
166   }
167   return None;
168 }
169 
170 InstructionCost PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
171                                           TTI::TargetCostKind CostKind) {
172   if (DisablePPCConstHoist)
173     return BaseT::getIntImmCost(Imm, Ty, CostKind);
174 
175   assert(Ty->isIntegerTy());
176 
177   unsigned BitSize = Ty->getPrimitiveSizeInBits();
178   if (BitSize == 0)
179     return ~0U;
180 
181   if (Imm == 0)
182     return TTI::TCC_Free;
183 
184   if (Imm.getBitWidth() <= 64) {
185     if (isInt<16>(Imm.getSExtValue()))
186       return TTI::TCC_Basic;
187 
188     if (isInt<32>(Imm.getSExtValue())) {
189       // A constant that can be materialized using lis.
190       if ((Imm.getZExtValue() & 0xFFFF) == 0)
191         return TTI::TCC_Basic;
192 
193       return 2 * TTI::TCC_Basic;
194     }
195   }
196 
197   return 4 * TTI::TCC_Basic;
198 }
199 
200 InstructionCost PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
201                                                 const APInt &Imm, Type *Ty,
202                                                 TTI::TargetCostKind CostKind) {
203   if (DisablePPCConstHoist)
204     return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
205 
206   assert(Ty->isIntegerTy());
207 
208   unsigned BitSize = Ty->getPrimitiveSizeInBits();
209   if (BitSize == 0)
210     return ~0U;
211 
212   switch (IID) {
213   default:
214     return TTI::TCC_Free;
215   case Intrinsic::sadd_with_overflow:
216   case Intrinsic::uadd_with_overflow:
217   case Intrinsic::ssub_with_overflow:
218   case Intrinsic::usub_with_overflow:
219     if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
220       return TTI::TCC_Free;
221     break;
222   case Intrinsic::experimental_stackmap:
223     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
224       return TTI::TCC_Free;
225     break;
226   case Intrinsic::experimental_patchpoint_void:
227   case Intrinsic::experimental_patchpoint_i64:
228     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
229       return TTI::TCC_Free;
230     break;
231   }
232   return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
233 }
234 
235 InstructionCost PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
236                                               const APInt &Imm, Type *Ty,
237                                               TTI::TargetCostKind CostKind,
238                                               Instruction *Inst) {
239   if (DisablePPCConstHoist)
240     return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
241 
242   assert(Ty->isIntegerTy());
243 
244   unsigned BitSize = Ty->getPrimitiveSizeInBits();
245   if (BitSize == 0)
246     return ~0U;
247 
248   unsigned ImmIdx = ~0U;
249   bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
250        ZeroFree = false;
251   switch (Opcode) {
252   default:
253     return TTI::TCC_Free;
254   case Instruction::GetElementPtr:
255     // Always hoist the base address of a GetElementPtr. This prevents the
256     // creation of new constants for every base constant that gets constant
257     // folded with the offset.
258     if (Idx == 0)
259       return 2 * TTI::TCC_Basic;
260     return TTI::TCC_Free;
261   case Instruction::And:
262     RunFree = true; // (for the rotate-and-mask instructions)
263     LLVM_FALLTHROUGH;
264   case Instruction::Add:
265   case Instruction::Or:
266   case Instruction::Xor:
267     ShiftedFree = true;
268     LLVM_FALLTHROUGH;
269   case Instruction::Sub:
270   case Instruction::Mul:
271   case Instruction::Shl:
272   case Instruction::LShr:
273   case Instruction::AShr:
274     ImmIdx = 1;
275     break;
276   case Instruction::ICmp:
277     UnsignedFree = true;
278     ImmIdx = 1;
279     // Zero comparisons can use record-form instructions.
280     LLVM_FALLTHROUGH;
281   case Instruction::Select:
282     ZeroFree = true;
283     break;
284   case Instruction::PHI:
285   case Instruction::Call:
286   case Instruction::Ret:
287   case Instruction::Load:
288   case Instruction::Store:
289     break;
290   }
291 
292   if (ZeroFree && Imm == 0)
293     return TTI::TCC_Free;
294 
295   if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
296     if (isInt<16>(Imm.getSExtValue()))
297       return TTI::TCC_Free;
298 
299     if (RunFree) {
300       if (Imm.getBitWidth() <= 32 &&
301           (isShiftedMask_32(Imm.getZExtValue()) ||
302            isShiftedMask_32(~Imm.getZExtValue())))
303         return TTI::TCC_Free;
304 
305       if (ST->isPPC64() &&
306           (isShiftedMask_64(Imm.getZExtValue()) ||
307            isShiftedMask_64(~Imm.getZExtValue())))
308         return TTI::TCC_Free;
309     }
310 
311     if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
312       return TTI::TCC_Free;
313 
314     if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
315       return TTI::TCC_Free;
316   }
317 
318   return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
319 }
320 
321 InstructionCost PPCTTIImpl::getUserCost(const User *U,
322                                         ArrayRef<const Value *> Operands,
323                                         TTI::TargetCostKind CostKind) {
324   // We already implement getCastInstrCost and getMemoryOpCost where we perform
325   // the vector adjustment there.
326   if (isa<CastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U))
327     return BaseT::getUserCost(U, Operands, CostKind);
328 
329   if (U->getType()->isVectorTy()) {
330     // Instructions that need to be split should cost more.
331     std::pair<InstructionCost, MVT> LT =
332         TLI->getTypeLegalizationCost(DL, U->getType());
333     return LT.first * BaseT::getUserCost(U, Operands, CostKind);
334   }
335 
336   return BaseT::getUserCost(U, Operands, CostKind);
337 }
338 
339 // Determining the address of a TLS variable results in a function call in
340 // certain TLS models.
341 static bool memAddrUsesCTR(const Value *MemAddr, const PPCTargetMachine &TM,
342                            SmallPtrSetImpl<const Value *> &Visited) {
343   // No need to traverse again if we already checked this operand.
344   if (!Visited.insert(MemAddr).second)
345     return false;
346   const auto *GV = dyn_cast<GlobalValue>(MemAddr);
347   if (!GV) {
348     // Recurse to check for constants that refer to TLS global variables.
349     if (const auto *CV = dyn_cast<Constant>(MemAddr))
350       for (const auto &CO : CV->operands())
351         if (memAddrUsesCTR(CO, TM, Visited))
352           return true;
353     return false;
354   }
355 
356   if (!GV->isThreadLocal())
357     return false;
358   TLSModel::Model Model = TM.getTLSModel(GV);
359   return Model == TLSModel::GeneralDynamic || Model == TLSModel::LocalDynamic;
360 }
361 
362 bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo,
363                              SmallPtrSetImpl<const Value *> &Visited) {
364   const PPCTargetMachine &TM = ST->getTargetMachine();
365 
366   // Loop through the inline asm constraints and look for something that
367   // clobbers ctr.
368   auto asmClobbersCTR = [](InlineAsm *IA) {
369     InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
370     for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
371       InlineAsm::ConstraintInfo &C = CIV[i];
372       if (C.Type != InlineAsm::isInput)
373         for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
374           if (StringRef(C.Codes[j]).equals_insensitive("{ctr}"))
375             return true;
376     }
377     return false;
378   };
379 
380   auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) {
381     if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
382       return ITy->getBitWidth() > (Is32Bit ? 32U : 64U);
383 
384     return false;
385   };
386 
387   auto supportedHalfPrecisionOp = [](Instruction *Inst) {
388     switch (Inst->getOpcode()) {
389     default:
390       return false;
391     case Instruction::FPTrunc:
392     case Instruction::FPExt:
393     case Instruction::Load:
394     case Instruction::Store:
395     case Instruction::FPToUI:
396     case Instruction::UIToFP:
397     case Instruction::FPToSI:
398     case Instruction::SIToFP:
399       return true;
400     }
401   };
402 
403   for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
404        J != JE; ++J) {
405     // There are no direct operations on half precision so assume that
406     // anything with that type requires a call except for a few select
407     // operations with Power9.
408     if (Instruction *CurrInst = dyn_cast<Instruction>(J)) {
409       for (const auto &Op : CurrInst->operands()) {
410         if (Op->getType()->getScalarType()->isHalfTy() ||
411             CurrInst->getType()->getScalarType()->isHalfTy())
412           return !(ST->isISA3_0() && supportedHalfPrecisionOp(CurrInst));
413       }
414     }
415     if (CallInst *CI = dyn_cast<CallInst>(J)) {
416       // Inline ASM is okay, unless it clobbers the ctr register.
417       if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand())) {
418         if (asmClobbersCTR(IA))
419           return true;
420         continue;
421       }
422 
423       if (Function *F = CI->getCalledFunction()) {
424         // Most intrinsics don't become function calls, but some might.
425         // sin, cos, exp and log are always calls.
426         unsigned Opcode = 0;
427         if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
428           switch (F->getIntrinsicID()) {
429           default: continue;
430           // If we have a call to loop_decrement or set_loop_iterations,
431           // we're definitely using CTR.
432           case Intrinsic::set_loop_iterations:
433           case Intrinsic::loop_decrement:
434             return true;
435 
436           // Binary operations on 128-bit value will use CTR.
437           case Intrinsic::experimental_constrained_fadd:
438           case Intrinsic::experimental_constrained_fsub:
439           case Intrinsic::experimental_constrained_fmul:
440           case Intrinsic::experimental_constrained_fdiv:
441           case Intrinsic::experimental_constrained_frem:
442             if (F->getType()->getScalarType()->isFP128Ty() ||
443                 F->getType()->getScalarType()->isPPC_FP128Ty())
444               return true;
445             break;
446 
447           case Intrinsic::experimental_constrained_fptosi:
448           case Intrinsic::experimental_constrained_fptoui:
449           case Intrinsic::experimental_constrained_sitofp:
450           case Intrinsic::experimental_constrained_uitofp: {
451             Type *SrcType = CI->getArgOperand(0)->getType()->getScalarType();
452             Type *DstType = CI->getType()->getScalarType();
453             if (SrcType->isPPC_FP128Ty() || DstType->isPPC_FP128Ty() ||
454                 isLargeIntegerTy(!TM.isPPC64(), SrcType) ||
455                 isLargeIntegerTy(!TM.isPPC64(), DstType))
456               return true;
457             break;
458           }
459 
460           // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
461           // because, although it does clobber the counter register, the
462           // control can't then return to inside the loop unless there is also
463           // an eh_sjlj_setjmp.
464           case Intrinsic::eh_sjlj_setjmp:
465 
466           case Intrinsic::memcpy:
467           case Intrinsic::memmove:
468           case Intrinsic::memset:
469           case Intrinsic::powi:
470           case Intrinsic::log:
471           case Intrinsic::log2:
472           case Intrinsic::log10:
473           case Intrinsic::exp:
474           case Intrinsic::exp2:
475           case Intrinsic::pow:
476           case Intrinsic::sin:
477           case Intrinsic::cos:
478           case Intrinsic::experimental_constrained_powi:
479           case Intrinsic::experimental_constrained_log:
480           case Intrinsic::experimental_constrained_log2:
481           case Intrinsic::experimental_constrained_log10:
482           case Intrinsic::experimental_constrained_exp:
483           case Intrinsic::experimental_constrained_exp2:
484           case Intrinsic::experimental_constrained_pow:
485           case Intrinsic::experimental_constrained_sin:
486           case Intrinsic::experimental_constrained_cos:
487             return true;
488           // There is no corresponding FMA instruction for PPC double double.
489           // Thus, we need to disable CTR loop generation for this type.
490           case Intrinsic::fmuladd:
491           case Intrinsic::copysign:
492             if (CI->getArgOperand(0)->getType()->getScalarType()->
493                 isPPC_FP128Ty())
494               return true;
495             else
496               continue; // ISD::FCOPYSIGN is never a library call.
497           case Intrinsic::fma:                Opcode = ISD::FMA;        break;
498           case Intrinsic::sqrt:               Opcode = ISD::FSQRT;      break;
499           case Intrinsic::floor:              Opcode = ISD::FFLOOR;     break;
500           case Intrinsic::ceil:               Opcode = ISD::FCEIL;      break;
501           case Intrinsic::trunc:              Opcode = ISD::FTRUNC;     break;
502           case Intrinsic::rint:               Opcode = ISD::FRINT;      break;
503           case Intrinsic::lrint:              Opcode = ISD::LRINT;      break;
504           case Intrinsic::llrint:             Opcode = ISD::LLRINT;     break;
505           case Intrinsic::nearbyint:          Opcode = ISD::FNEARBYINT; break;
506           case Intrinsic::round:              Opcode = ISD::FROUND;     break;
507           case Intrinsic::lround:             Opcode = ISD::LROUND;     break;
508           case Intrinsic::llround:            Opcode = ISD::LLROUND;    break;
509           case Intrinsic::minnum:             Opcode = ISD::FMINNUM;    break;
510           case Intrinsic::maxnum:             Opcode = ISD::FMAXNUM;    break;
511           case Intrinsic::experimental_constrained_fcmp:
512             Opcode = ISD::STRICT_FSETCC;
513             break;
514           case Intrinsic::experimental_constrained_fcmps:
515             Opcode = ISD::STRICT_FSETCCS;
516             break;
517           case Intrinsic::experimental_constrained_fma:
518             Opcode = ISD::STRICT_FMA;
519             break;
520           case Intrinsic::experimental_constrained_sqrt:
521             Opcode = ISD::STRICT_FSQRT;
522             break;
523           case Intrinsic::experimental_constrained_floor:
524             Opcode = ISD::STRICT_FFLOOR;
525             break;
526           case Intrinsic::experimental_constrained_ceil:
527             Opcode = ISD::STRICT_FCEIL;
528             break;
529           case Intrinsic::experimental_constrained_trunc:
530             Opcode = ISD::STRICT_FTRUNC;
531             break;
532           case Intrinsic::experimental_constrained_rint:
533             Opcode = ISD::STRICT_FRINT;
534             break;
535           case Intrinsic::experimental_constrained_lrint:
536             Opcode = ISD::STRICT_LRINT;
537             break;
538           case Intrinsic::experimental_constrained_llrint:
539             Opcode = ISD::STRICT_LLRINT;
540             break;
541           case Intrinsic::experimental_constrained_nearbyint:
542             Opcode = ISD::STRICT_FNEARBYINT;
543             break;
544           case Intrinsic::experimental_constrained_round:
545             Opcode = ISD::STRICT_FROUND;
546             break;
547           case Intrinsic::experimental_constrained_lround:
548             Opcode = ISD::STRICT_LROUND;
549             break;
550           case Intrinsic::experimental_constrained_llround:
551             Opcode = ISD::STRICT_LLROUND;
552             break;
553           case Intrinsic::experimental_constrained_minnum:
554             Opcode = ISD::STRICT_FMINNUM;
555             break;
556           case Intrinsic::experimental_constrained_maxnum:
557             Opcode = ISD::STRICT_FMAXNUM;
558             break;
559           case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO;      break;
560           case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO;      break;
561           }
562         }
563 
564         // PowerPC does not use [US]DIVREM or other library calls for
565         // operations on regular types which are not otherwise library calls
566         // (i.e. soft float or atomics). If adapting for targets that do,
567         // additional care is required here.
568 
569         LibFunc Func;
570         if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
571             LibInfo->getLibFunc(F->getName(), Func) &&
572             LibInfo->hasOptimizedCodeGen(Func)) {
573           // Non-read-only functions are never treated as intrinsics.
574           if (!CI->onlyReadsMemory())
575             return true;
576 
577           // Conversion happens only for FP calls.
578           if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
579             return true;
580 
581           switch (Func) {
582           default: return true;
583           case LibFunc_copysign:
584           case LibFunc_copysignf:
585             continue; // ISD::FCOPYSIGN is never a library call.
586           case LibFunc_copysignl:
587             return true;
588           case LibFunc_fabs:
589           case LibFunc_fabsf:
590           case LibFunc_fabsl:
591             continue; // ISD::FABS is never a library call.
592           case LibFunc_sqrt:
593           case LibFunc_sqrtf:
594           case LibFunc_sqrtl:
595             Opcode = ISD::FSQRT; break;
596           case LibFunc_floor:
597           case LibFunc_floorf:
598           case LibFunc_floorl:
599             Opcode = ISD::FFLOOR; break;
600           case LibFunc_nearbyint:
601           case LibFunc_nearbyintf:
602           case LibFunc_nearbyintl:
603             Opcode = ISD::FNEARBYINT; break;
604           case LibFunc_ceil:
605           case LibFunc_ceilf:
606           case LibFunc_ceill:
607             Opcode = ISD::FCEIL; break;
608           case LibFunc_rint:
609           case LibFunc_rintf:
610           case LibFunc_rintl:
611             Opcode = ISD::FRINT; break;
612           case LibFunc_round:
613           case LibFunc_roundf:
614           case LibFunc_roundl:
615             Opcode = ISD::FROUND; break;
616           case LibFunc_trunc:
617           case LibFunc_truncf:
618           case LibFunc_truncl:
619             Opcode = ISD::FTRUNC; break;
620           case LibFunc_fmin:
621           case LibFunc_fminf:
622           case LibFunc_fminl:
623             Opcode = ISD::FMINNUM; break;
624           case LibFunc_fmax:
625           case LibFunc_fmaxf:
626           case LibFunc_fmaxl:
627             Opcode = ISD::FMAXNUM; break;
628           }
629         }
630 
631         if (Opcode) {
632           EVT EVTy =
633               TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true);
634 
635           if (EVTy == MVT::Other)
636             return true;
637 
638           if (TLI->isOperationLegalOrCustom(Opcode, EVTy))
639             continue;
640           else if (EVTy.isVector() &&
641                    TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType()))
642             continue;
643 
644           return true;
645         }
646       }
647 
648       return true;
649     } else if (isa<BinaryOperator>(J) &&
650                (J->getType()->getScalarType()->isFP128Ty() ||
651                 J->getType()->getScalarType()->isPPC_FP128Ty())) {
652       // Most operations on f128 or ppc_f128 values become calls.
653       return true;
654     } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
655                isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
656       CastInst *CI = cast<CastInst>(J);
657       if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
658           CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
659           isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) ||
660           isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType()))
661         return true;
662     } else if (isLargeIntegerTy(!TM.isPPC64(),
663                                 J->getType()->getScalarType()) &&
664                (J->getOpcode() == Instruction::UDiv ||
665                 J->getOpcode() == Instruction::SDiv ||
666                 J->getOpcode() == Instruction::URem ||
667                 J->getOpcode() == Instruction::SRem)) {
668       return true;
669     } else if (!TM.isPPC64() &&
670                isLargeIntegerTy(false, J->getType()->getScalarType()) &&
671                (J->getOpcode() == Instruction::Shl ||
672                 J->getOpcode() == Instruction::AShr ||
673                 J->getOpcode() == Instruction::LShr)) {
674       // Only on PPC32, for 128-bit integers (specifically not 64-bit
675       // integers), these might be runtime calls.
676       return true;
677     } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
678       // On PowerPC, indirect jumps use the counter register.
679       return true;
680     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
681       if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
682         return true;
683     }
684 
685     // FREM is always a call.
686     if (J->getOpcode() == Instruction::FRem)
687       return true;
688 
689     if (ST->useSoftFloat()) {
690       switch(J->getOpcode()) {
691       case Instruction::FAdd:
692       case Instruction::FSub:
693       case Instruction::FMul:
694       case Instruction::FDiv:
695       case Instruction::FPTrunc:
696       case Instruction::FPExt:
697       case Instruction::FPToUI:
698       case Instruction::FPToSI:
699       case Instruction::UIToFP:
700       case Instruction::SIToFP:
701       case Instruction::FCmp:
702         return true;
703       }
704     }
705 
706     for (Value *Operand : J->operands())
707       if (memAddrUsesCTR(Operand, TM, Visited))
708         return true;
709   }
710 
711   return false;
712 }
713 
714 bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
715                                           AssumptionCache &AC,
716                                           TargetLibraryInfo *LibInfo,
717                                           HardwareLoopInfo &HWLoopInfo) {
718   const PPCTargetMachine &TM = ST->getTargetMachine();
719   TargetSchedModel SchedModel;
720   SchedModel.init(ST);
721 
722   // Do not convert small short loops to CTR loop.
723   unsigned ConstTripCount = SE.getSmallConstantTripCount(L);
724   if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) {
725     SmallPtrSet<const Value *, 32> EphValues;
726     CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
727     CodeMetrics Metrics;
728     for (BasicBlock *BB : L->blocks())
729       Metrics.analyzeBasicBlock(BB, *this, EphValues);
730     // 6 is an approximate latency for the mtctr instruction.
731     if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth()))
732       return false;
733   }
734 
735   // We don't want to spill/restore the counter register, and so we don't
736   // want to use the counter register if the loop contains calls.
737   SmallPtrSet<const Value *, 4> Visited;
738   for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
739        I != IE; ++I)
740     if (mightUseCTR(*I, LibInfo, Visited))
741       return false;
742 
743   SmallVector<BasicBlock*, 4> ExitingBlocks;
744   L->getExitingBlocks(ExitingBlocks);
745 
746   // If there is an exit edge known to be frequently taken,
747   // we should not transform this loop.
748   for (auto &BB : ExitingBlocks) {
749     Instruction *TI = BB->getTerminator();
750     if (!TI) continue;
751 
752     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
753       uint64_t TrueWeight = 0, FalseWeight = 0;
754       if (!BI->isConditional() ||
755           !BI->extractProfMetadata(TrueWeight, FalseWeight))
756         continue;
757 
758       // If the exit path is more frequent than the loop path,
759       // we return here without further analysis for this loop.
760       bool TrueIsExit = !L->contains(BI->getSuccessor(0));
761       if (( TrueIsExit && FalseWeight < TrueWeight) ||
762           (!TrueIsExit && FalseWeight > TrueWeight))
763         return false;
764     }
765   }
766 
767   // If an exit block has a PHI that accesses a TLS variable as one of the
768   // incoming values from the loop, we cannot produce a CTR loop because the
769   // address for that value will be computed in the loop.
770   SmallVector<BasicBlock *, 4> ExitBlocks;
771   L->getExitBlocks(ExitBlocks);
772   for (auto &BB : ExitBlocks) {
773     for (auto &PHI : BB->phis()) {
774       for (int Idx = 0, EndIdx = PHI.getNumIncomingValues(); Idx < EndIdx;
775            Idx++) {
776         const BasicBlock *IncomingBB = PHI.getIncomingBlock(Idx);
777         const Value *IncomingValue = PHI.getIncomingValue(Idx);
778         if (L->contains(IncomingBB) &&
779             memAddrUsesCTR(IncomingValue, TM, Visited))
780           return false;
781       }
782     }
783   }
784 
785   LLVMContext &C = L->getHeader()->getContext();
786   HWLoopInfo.CountType = TM.isPPC64() ?
787     Type::getInt64Ty(C) : Type::getInt32Ty(C);
788   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
789   return true;
790 }
791 
792 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
793                                          TTI::UnrollingPreferences &UP) {
794   if (ST->getCPUDirective() == PPC::DIR_A2) {
795     // The A2 is in-order with a deep pipeline, and concatenation unrolling
796     // helps expose latency-hiding opportunities to the instruction scheduler.
797     UP.Partial = UP.Runtime = true;
798 
799     // We unroll a lot on the A2 (hundreds of instructions), and the benefits
800     // often outweigh the cost of a division to compute the trip count.
801     UP.AllowExpensiveTripCount = true;
802   }
803 
804   BaseT::getUnrollingPreferences(L, SE, UP);
805 }
806 
807 void PPCTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
808                                        TTI::PeelingPreferences &PP) {
809   BaseT::getPeelingPreferences(L, SE, PP);
810 }
811 // This function returns true to allow using coldcc calling convention.
812 // Returning true results in coldcc being used for functions which are cold at
813 // all call sites when the callers of the functions are not calling any other
814 // non coldcc functions.
815 bool PPCTTIImpl::useColdCCForColdCall(Function &F) {
816   return EnablePPCColdCC;
817 }
818 
819 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
820   // On the A2, always unroll aggressively.
821   if (ST->getCPUDirective() == PPC::DIR_A2)
822     return true;
823 
824   return LoopHasReductions;
825 }
826 
827 PPCTTIImpl::TTI::MemCmpExpansionOptions
828 PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
829   TTI::MemCmpExpansionOptions Options;
830   Options.LoadSizes = {8, 4, 2, 1};
831   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
832   return Options;
833 }
834 
835 bool PPCTTIImpl::enableInterleavedAccessVectorization() {
836   return true;
837 }
838 
839 unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
840   assert(ClassID == GPRRC || ClassID == FPRRC ||
841          ClassID == VRRC || ClassID == VSXRC);
842   if (ST->hasVSX()) {
843     assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC);
844     return ClassID == VSXRC ? 64 : 32;
845   }
846   assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC);
847   return 32;
848 }
849 
850 unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const {
851   if (Vector)
852     return ST->hasVSX() ? VSXRC : VRRC;
853   else if (Ty && (Ty->getScalarType()->isFloatTy() ||
854                   Ty->getScalarType()->isDoubleTy()))
855     return ST->hasVSX() ? VSXRC : FPRRC;
856   else if (Ty && (Ty->getScalarType()->isFP128Ty() ||
857                   Ty->getScalarType()->isPPC_FP128Ty()))
858     return VRRC;
859   else if (Ty && Ty->getScalarType()->isHalfTy())
860     return VSXRC;
861   else
862     return GPRRC;
863 }
864 
865 const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const {
866 
867   switch (ClassID) {
868     default:
869       llvm_unreachable("unknown register class");
870       return "PPC::unknown register class";
871     case GPRRC:       return "PPC::GPRRC";
872     case FPRRC:       return "PPC::FPRRC";
873     case VRRC:        return "PPC::VRRC";
874     case VSXRC:       return "PPC::VSXRC";
875   }
876 }
877 
878 TypeSize
879 PPCTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
880   switch (K) {
881   case TargetTransformInfo::RGK_Scalar:
882     return TypeSize::getFixed(ST->isPPC64() ? 64 : 32);
883   case TargetTransformInfo::RGK_FixedWidthVector:
884     return TypeSize::getFixed(ST->hasAltivec() ? 128 : 0);
885   case TargetTransformInfo::RGK_ScalableVector:
886     return TypeSize::getScalable(0);
887   }
888 
889   llvm_unreachable("Unsupported register kind");
890 }
891 
892 unsigned PPCTTIImpl::getCacheLineSize() const {
893   // Check first if the user specified a custom line size.
894   if (CacheLineSize.getNumOccurrences() > 0)
895     return CacheLineSize;
896 
897   // Starting with P7 we have a cache line size of 128.
898   unsigned Directive = ST->getCPUDirective();
899   // Assume that Future CPU has the same cache line size as the others.
900   if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
901       Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
902       Directive == PPC::DIR_PWR_FUTURE)
903     return 128;
904 
905   // On other processors return a default of 64 bytes.
906   return 64;
907 }
908 
909 unsigned PPCTTIImpl::getPrefetchDistance() const {
910   return 300;
911 }
912 
913 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
914   unsigned Directive = ST->getCPUDirective();
915   // The 440 has no SIMD support, but floating-point instructions
916   // have a 5-cycle latency, so unroll by 5x for latency hiding.
917   if (Directive == PPC::DIR_440)
918     return 5;
919 
920   // The A2 has no SIMD support, but floating-point instructions
921   // have a 6-cycle latency, so unroll by 6x for latency hiding.
922   if (Directive == PPC::DIR_A2)
923     return 6;
924 
925   // FIXME: For lack of any better information, do no harm...
926   if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
927     return 1;
928 
929   // For P7 and P8, floating-point instructions have a 6-cycle latency and
930   // there are two execution units, so unroll by 12x for latency hiding.
931   // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
932   // FIXME: the same for P10 as previous gen until POWER10 scheduling is ready
933   // Assume that future is the same as the others.
934   if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
935       Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
936       Directive == PPC::DIR_PWR_FUTURE)
937     return 12;
938 
939   // For most things, modern systems have two execution units (and
940   // out-of-order execution).
941   return 2;
942 }
943 
944 // Adjust the cost of vector instructions on targets which there is overlap
945 // between the vector and scalar units, thereby reducing the overall throughput
946 // of vector code wrt. scalar code.
947 InstructionCost PPCTTIImpl::vectorCostAdjustment(InstructionCost Cost,
948                                                  unsigned Opcode, Type *Ty1,
949                                                  Type *Ty2) {
950   if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy())
951     return Cost;
952 
953   std::pair<InstructionCost, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1);
954   // If type legalization involves splitting the vector, we don't want to
955   // double the cost at every step - only the last step.
956   if (LT1.first != 1 || !LT1.second.isVector())
957     return Cost;
958 
959   int ISD = TLI->InstructionOpcodeToISD(Opcode);
960   if (TLI->isOperationExpand(ISD, LT1.second))
961     return Cost;
962 
963   if (Ty2) {
964     std::pair<InstructionCost, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2);
965     if (LT2.first != 1 || !LT2.second.isVector())
966       return Cost;
967   }
968 
969   return Cost * 2;
970 }
971 
972 InstructionCost PPCTTIImpl::getArithmeticInstrCost(
973     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
974     TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
975     TTI::OperandValueProperties Opd1PropInfo,
976     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
977     const Instruction *CxtI) {
978   assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
979   // TODO: Handle more cost kinds.
980   if (CostKind != TTI::TCK_RecipThroughput)
981     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
982                                          Op2Info, Opd1PropInfo,
983                                          Opd2PropInfo, Args, CxtI);
984 
985   // Fallback to the default implementation.
986   InstructionCost Cost = BaseT::getArithmeticInstrCost(
987       Opcode, Ty, CostKind, Op1Info, Op2Info, Opd1PropInfo, Opd2PropInfo);
988   return vectorCostAdjustment(Cost, Opcode, Ty, nullptr);
989 }
990 
991 InstructionCost PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
992                                            ArrayRef<int> Mask, int Index,
993                                            Type *SubTp) {
994   // Legalize the type.
995   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
996 
997   // PPC, for both Altivec/VSX, support cheap arbitrary permutations
998   // (at least in the sense that there need only be one non-loop-invariant
999   // instruction). We need one such shuffle instruction for each actual
1000   // register (this is not true for arbitrary shuffles, but is true for the
1001   // structured types of shuffles covered by TTI::ShuffleKind).
1002   return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp,
1003                               nullptr);
1004 }
1005 
1006 InstructionCost PPCTTIImpl::getCFInstrCost(unsigned Opcode,
1007                                            TTI::TargetCostKind CostKind,
1008                                            const Instruction *I) {
1009   if (CostKind != TTI::TCK_RecipThroughput)
1010     return Opcode == Instruction::PHI ? 0 : 1;
1011   // Branches are assumed to be predicted.
1012   return 0;
1013 }
1014 
1015 InstructionCost PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
1016                                              Type *Src,
1017                                              TTI::CastContextHint CCH,
1018                                              TTI::TargetCostKind CostKind,
1019                                              const Instruction *I) {
1020   assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
1021 
1022   InstructionCost Cost =
1023       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
1024   Cost = vectorCostAdjustment(Cost, Opcode, Dst, Src);
1025   // TODO: Allow non-throughput costs that aren't binary.
1026   if (CostKind != TTI::TCK_RecipThroughput)
1027     return Cost == 0 ? 0 : 1;
1028   return Cost;
1029 }
1030 
1031 InstructionCost PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
1032                                                Type *CondTy,
1033                                                CmpInst::Predicate VecPred,
1034                                                TTI::TargetCostKind CostKind,
1035                                                const Instruction *I) {
1036   InstructionCost Cost =
1037       BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
1038   // TODO: Handle other cost kinds.
1039   if (CostKind != TTI::TCK_RecipThroughput)
1040     return Cost;
1041   return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr);
1042 }
1043 
1044 InstructionCost PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
1045                                                unsigned Index) {
1046   assert(Val->isVectorTy() && "This must be a vector type");
1047 
1048   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1049   assert(ISD && "Invalid opcode");
1050 
1051   InstructionCost Cost = BaseT::getVectorInstrCost(Opcode, Val, Index);
1052   Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr);
1053 
1054   if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
1055     // Double-precision scalars are already located in index #0 (or #1 if LE).
1056     if (ISD == ISD::EXTRACT_VECTOR_ELT &&
1057         Index == (ST->isLittleEndian() ? 1 : 0))
1058       return 0;
1059 
1060     return Cost;
1061 
1062   } else if (Val->getScalarType()->isIntegerTy() && Index != -1U) {
1063     if (ST->hasP9Altivec()) {
1064       if (ISD == ISD::INSERT_VECTOR_ELT)
1065         // A move-to VSR and a permute/insert.  Assume vector operation cost
1066         // for both (cost will be 2x on P9).
1067         return vectorCostAdjustment(2, Opcode, Val, nullptr);
1068 
1069       // It's an extract.  Maybe we can do a cheap move-from VSR.
1070       unsigned EltSize = Val->getScalarSizeInBits();
1071       if (EltSize == 64) {
1072         unsigned MfvsrdIndex = ST->isLittleEndian() ? 1 : 0;
1073         if (Index == MfvsrdIndex)
1074           return 1;
1075       } else if (EltSize == 32) {
1076         unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1;
1077         if (Index == MfvsrwzIndex)
1078           return 1;
1079       }
1080 
1081       // We need a vector extract (or mfvsrld).  Assume vector operation cost.
1082       // The cost of the load constant for a vector extract is disregarded
1083       // (invariant, easily schedulable).
1084       return vectorCostAdjustment(1, Opcode, Val, nullptr);
1085 
1086     } else if (ST->hasDirectMove())
1087       // Assume permute has standard cost.
1088       // Assume move-to/move-from VSR have 2x standard cost.
1089       return 3;
1090   }
1091 
1092   // Estimated cost of a load-hit-store delay.  This was obtained
1093   // experimentally as a minimum needed to prevent unprofitable
1094   // vectorization for the paq8p benchmark.  It may need to be
1095   // raised further if other unprofitable cases remain.
1096   unsigned LHSPenalty = 2;
1097   if (ISD == ISD::INSERT_VECTOR_ELT)
1098     LHSPenalty += 7;
1099 
1100   // Vector element insert/extract with Altivec is very expensive,
1101   // because they require store and reload with the attendant
1102   // processor stall for load-hit-store.  Until VSX is available,
1103   // these need to be estimated as very costly.
1104   if (ISD == ISD::EXTRACT_VECTOR_ELT ||
1105       ISD == ISD::INSERT_VECTOR_ELT)
1106     return LHSPenalty + Cost;
1107 
1108   return Cost;
1109 }
1110 
1111 InstructionCost PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1112                                             MaybeAlign Alignment,
1113                                             unsigned AddressSpace,
1114                                             TTI::TargetCostKind CostKind,
1115                                             const Instruction *I) {
1116   if (TLI->getValueType(DL, Src,  true) == MVT::Other)
1117     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1118                                   CostKind);
1119   // Legalize the type.
1120   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1121   assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1122          "Invalid Opcode");
1123 
1124   InstructionCost Cost =
1125       BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind);
1126   // TODO: Handle other cost kinds.
1127   if (CostKind != TTI::TCK_RecipThroughput)
1128     return Cost;
1129 
1130   Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr);
1131 
1132   bool IsAltivecType = ST->hasAltivec() &&
1133                        (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
1134                         LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
1135   bool IsVSXType = ST->hasVSX() &&
1136                    (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
1137 
1138   // VSX has 32b/64b load instructions. Legalization can handle loading of
1139   // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
1140   // PPCTargetLowering can't compute the cost appropriately. So here we
1141   // explicitly check this case.
1142   unsigned MemBytes = Src->getPrimitiveSizeInBits();
1143   if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType &&
1144       (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
1145     return 1;
1146 
1147   // Aligned loads and stores are easy.
1148   unsigned SrcBytes = LT.second.getStoreSize();
1149   if (!SrcBytes || !Alignment || *Alignment >= SrcBytes)
1150     return Cost;
1151 
1152   // If we can use the permutation-based load sequence, then this is also
1153   // relatively cheap (not counting loop-invariant instructions): one load plus
1154   // one permute (the last load in a series has extra cost, but we're
1155   // neglecting that here). Note that on the P7, we could do unaligned loads
1156   // for Altivec types using the VSX instructions, but that's more expensive
1157   // than using the permutation-based load sequence. On the P8, that's no
1158   // longer true.
1159   if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) &&
1160       *Alignment >= LT.second.getScalarType().getStoreSize())
1161     return Cost + LT.first; // Add the cost of the permutations.
1162 
1163   // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
1164   // P7, unaligned vector loads are more expensive than the permutation-based
1165   // load sequence, so that might be used instead, but regardless, the net cost
1166   // is about the same (not counting loop-invariant instructions).
1167   if (IsVSXType || (ST->hasVSX() && IsAltivecType))
1168     return Cost;
1169 
1170   // Newer PPC supports unaligned memory access.
1171   if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
1172     return Cost;
1173 
1174   // PPC in general does not support unaligned loads and stores. They'll need
1175   // to be decomposed based on the alignment factor.
1176 
1177   // Add the cost of each scalar load or store.
1178   assert(Alignment);
1179   Cost += LT.first * ((SrcBytes / Alignment->value()) - 1);
1180 
1181   // For a vector type, there is also scalarization overhead (only for
1182   // stores, loads are expanded using the vector-load + permutation sequence,
1183   // which is much less expensive).
1184   if (Src->isVectorTy() && Opcode == Instruction::Store)
1185     for (int i = 0, e = cast<FixedVectorType>(Src)->getNumElements(); i < e;
1186          ++i)
1187       Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
1188 
1189   return Cost;
1190 }
1191 
1192 InstructionCost PPCTTIImpl::getInterleavedMemoryOpCost(
1193     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1194     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1195     bool UseMaskForCond, bool UseMaskForGaps) {
1196   if (UseMaskForCond || UseMaskForGaps)
1197     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1198                                              Alignment, AddressSpace, CostKind,
1199                                              UseMaskForCond, UseMaskForGaps);
1200 
1201   assert(isa<VectorType>(VecTy) &&
1202          "Expect a vector type for interleaved memory op");
1203 
1204   // Legalize the type.
1205   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
1206 
1207   // Firstly, the cost of load/store operation.
1208   InstructionCost Cost = getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment),
1209                                          AddressSpace, CostKind);
1210 
1211   // PPC, for both Altivec/VSX, support cheap arbitrary permutations
1212   // (at least in the sense that there need only be one non-loop-invariant
1213   // instruction). For each result vector, we need one shuffle per incoming
1214   // vector (except that the first shuffle can take two incoming vectors
1215   // because it does not need to take itself).
1216   Cost += Factor*(LT.first-1);
1217 
1218   return Cost;
1219 }
1220 
1221 InstructionCost
1222 PPCTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1223                                   TTI::TargetCostKind CostKind) {
1224   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1225 }
1226 
1227 bool PPCTTIImpl::areFunctionArgsABICompatible(
1228     const Function *Caller, const Function *Callee,
1229     SmallPtrSetImpl<Argument *> &Args) const {
1230 
1231   // We need to ensure that argument promotion does not
1232   // attempt to promote pointers to MMA types (__vector_pair
1233   // and __vector_quad) since these types explicitly cannot be
1234   // passed as arguments. Both of these types are larger than
1235   // the 128-bit Altivec vectors and have a scalar size of 1 bit.
1236   if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
1237     return false;
1238 
1239   return llvm::none_of(Args, [](Argument *A) {
1240     auto *EltTy = cast<PointerType>(A->getType())->getElementType();
1241     if (EltTy->isSized())
1242       return (EltTy->isIntOrIntVectorTy(1) &&
1243               EltTy->getPrimitiveSizeInBits() > 128);
1244     return false;
1245   });
1246 }
1247 
1248 bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
1249                             LoopInfo *LI, DominatorTree *DT,
1250                             AssumptionCache *AC, TargetLibraryInfo *LibInfo) {
1251   // Process nested loops first.
1252   for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
1253     if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo))
1254       return false; // Stop search.
1255 
1256   HardwareLoopInfo HWLoopInfo(L);
1257 
1258   if (!HWLoopInfo.canAnalyze(*LI))
1259     return false;
1260 
1261   if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo))
1262     return false;
1263 
1264   if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT))
1265     return false;
1266 
1267   *BI = HWLoopInfo.ExitBranch;
1268   return true;
1269 }
1270 
1271 bool PPCTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1272                                TargetTransformInfo::LSRCost &C2) {
1273   // PowerPC default behaviour here is "instruction number 1st priority".
1274   // If LsrNoInsnsCost is set, call default implementation.
1275   if (!LsrNoInsnsCost)
1276     return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, C1.NumIVMuls,
1277                     C1.NumBaseAdds, C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
1278            std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, C2.NumIVMuls,
1279                     C2.NumBaseAdds, C2.ScaleCost, C2.ImmCost, C2.SetupCost);
1280   else
1281     return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
1282 }
1283 
1284 bool PPCTTIImpl::isNumRegsMajorCostOfLSR() {
1285   return false;
1286 }
1287 
1288 bool PPCTTIImpl::shouldBuildRelLookupTables() const {
1289   const PPCTargetMachine &TM = ST->getTargetMachine();
1290   // XCOFF hasn't implemented lowerRelativeReference, disable non-ELF for now.
1291   if (!TM.isELFv2ABI())
1292     return false;
1293   return BaseT::shouldBuildRelLookupTables();
1294 }
1295 
1296 bool PPCTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
1297                                     MemIntrinsicInfo &Info) {
1298   switch (Inst->getIntrinsicID()) {
1299   case Intrinsic::ppc_altivec_lvx:
1300   case Intrinsic::ppc_altivec_lvxl:
1301   case Intrinsic::ppc_altivec_lvebx:
1302   case Intrinsic::ppc_altivec_lvehx:
1303   case Intrinsic::ppc_altivec_lvewx:
1304   case Intrinsic::ppc_vsx_lxvd2x:
1305   case Intrinsic::ppc_vsx_lxvw4x:
1306   case Intrinsic::ppc_vsx_lxvd2x_be:
1307   case Intrinsic::ppc_vsx_lxvw4x_be:
1308   case Intrinsic::ppc_vsx_lxvl:
1309   case Intrinsic::ppc_vsx_lxvll:
1310   case Intrinsic::ppc_vsx_lxvp: {
1311     Info.PtrVal = Inst->getArgOperand(0);
1312     Info.ReadMem = true;
1313     Info.WriteMem = false;
1314     return true;
1315   }
1316   case Intrinsic::ppc_altivec_stvx:
1317   case Intrinsic::ppc_altivec_stvxl:
1318   case Intrinsic::ppc_altivec_stvebx:
1319   case Intrinsic::ppc_altivec_stvehx:
1320   case Intrinsic::ppc_altivec_stvewx:
1321   case Intrinsic::ppc_vsx_stxvd2x:
1322   case Intrinsic::ppc_vsx_stxvw4x:
1323   case Intrinsic::ppc_vsx_stxvd2x_be:
1324   case Intrinsic::ppc_vsx_stxvw4x_be:
1325   case Intrinsic::ppc_vsx_stxvl:
1326   case Intrinsic::ppc_vsx_stxvll:
1327   case Intrinsic::ppc_vsx_stxvp: {
1328     Info.PtrVal = Inst->getArgOperand(1);
1329     Info.ReadMem = false;
1330     Info.WriteMem = true;
1331     return true;
1332   }
1333   default:
1334     break;
1335   }
1336 
1337   return false;
1338 }
1339