xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp (revision d11f81afd5a4a71d5f725950b0592ca212084780)
1 //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // AMDGPU target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "AMDGPUTargetTransformInfo.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/IR/IntrinsicsAMDGPU.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/PatternMatch.h"
24 #include "llvm/Support/KnownBits.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "AMDGPUtti"
29 
30 static cl::opt<unsigned> UnrollThresholdPrivate(
31   "amdgpu-unroll-threshold-private",
32   cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
33   cl::init(2700), cl::Hidden);
34 
35 static cl::opt<unsigned> UnrollThresholdLocal(
36   "amdgpu-unroll-threshold-local",
37   cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
38   cl::init(1000), cl::Hidden);
39 
40 static cl::opt<unsigned> UnrollThresholdIf(
41   "amdgpu-unroll-threshold-if",
42   cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
43   cl::init(200), cl::Hidden);
44 
45 static cl::opt<bool> UnrollRuntimeLocal(
46   "amdgpu-unroll-runtime-local",
47   cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"),
48   cl::init(true), cl::Hidden);
49 
50 static cl::opt<bool> UseLegacyDA(
51   "amdgpu-use-legacy-divergence-analysis",
52   cl::desc("Enable legacy divergence analysis for AMDGPU"),
53   cl::init(false), cl::Hidden);
54 
55 static cl::opt<unsigned> UnrollMaxBlockToAnalyze(
56     "amdgpu-unroll-max-block-to-analyze",
57     cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"),
58     cl::init(32), cl::Hidden);
59 
60 static cl::opt<unsigned> ArgAllocaCost("amdgpu-inline-arg-alloca-cost",
61                                        cl::Hidden, cl::init(4000),
62                                        cl::desc("Cost of alloca argument"));
63 
64 // If the amount of scratch memory to eliminate exceeds our ability to allocate
65 // it into registers we gain nothing by aggressively inlining functions for that
66 // heuristic.
67 static cl::opt<unsigned>
68     ArgAllocaCutoff("amdgpu-inline-arg-alloca-cutoff", cl::Hidden,
69                     cl::init(256),
70                     cl::desc("Maximum alloca size to use for inline cost"));
71 
72 // Inliner constraint to achieve reasonable compilation time.
73 static cl::opt<size_t> InlineMaxBB(
74     "amdgpu-inline-max-bb", cl::Hidden, cl::init(1100),
75     cl::desc("Maximum number of BBs allowed in a function after inlining"
76              " (compile time constraint)"));
77 
78 static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
79                               unsigned Depth = 0) {
80   const Instruction *I = dyn_cast<Instruction>(Cond);
81   if (!I)
82     return false;
83 
84   for (const Value *V : I->operand_values()) {
85     if (!L->contains(I))
86       continue;
87     if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
88       if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
89                   return SubLoop->contains(PHI); }))
90         return true;
91     } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
92       return true;
93   }
94   return false;
95 }
96 
97 AMDGPUTTIImpl::AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
98     : BaseT(TM, F.getParent()->getDataLayout()),
99       TargetTriple(TM->getTargetTriple()),
100       ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
101       TLI(ST->getTargetLowering()) {}
102 
103 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
104                                             TTI::UnrollingPreferences &UP) {
105   const Function &F = *L->getHeader()->getParent();
106   UP.Threshold = AMDGPU::getIntegerAttribute(F, "amdgpu-unroll-threshold", 300);
107   UP.MaxCount = std::numeric_limits<unsigned>::max();
108   UP.Partial = true;
109 
110   // Conditional branch in a loop back edge needs 3 additional exec
111   // manipulations in average.
112   UP.BEInsns += 3;
113 
114   // TODO: Do we want runtime unrolling?
115 
116   // Maximum alloca size than can fit registers. Reserve 16 registers.
117   const unsigned MaxAlloca = (256 - 16) * 4;
118   unsigned ThresholdPrivate = UnrollThresholdPrivate;
119   unsigned ThresholdLocal = UnrollThresholdLocal;
120 
121   // If this loop has the amdgpu.loop.unroll.threshold metadata we will use the
122   // provided threshold value as the default for Threshold
123   if (MDNode *LoopUnrollThreshold =
124           findOptionMDForLoop(L, "amdgpu.loop.unroll.threshold")) {
125     if (LoopUnrollThreshold->getNumOperands() == 2) {
126       ConstantInt *MetaThresholdValue = mdconst::extract_or_null<ConstantInt>(
127           LoopUnrollThreshold->getOperand(1));
128       if (MetaThresholdValue) {
129         // We will also use the supplied value for PartialThreshold for now.
130         // We may introduce additional metadata if it becomes necessary in the
131         // future.
132         UP.Threshold = MetaThresholdValue->getSExtValue();
133         UP.PartialThreshold = UP.Threshold;
134         ThresholdPrivate = std::min(ThresholdPrivate, UP.Threshold);
135         ThresholdLocal = std::min(ThresholdLocal, UP.Threshold);
136       }
137     }
138   }
139 
140   unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
141   for (const BasicBlock *BB : L->getBlocks()) {
142     const DataLayout &DL = BB->getModule()->getDataLayout();
143     unsigned LocalGEPsSeen = 0;
144 
145     if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
146                return SubLoop->contains(BB); }))
147         continue; // Block belongs to an inner loop.
148 
149     for (const Instruction &I : *BB) {
150       // Unroll a loop which contains an "if" statement whose condition
151       // defined by a PHI belonging to the loop. This may help to eliminate
152       // if region and potentially even PHI itself, saving on both divergence
153       // and registers used for the PHI.
154       // Add a small bonus for each of such "if" statements.
155       if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
156         if (UP.Threshold < MaxBoost && Br->isConditional()) {
157           BasicBlock *Succ0 = Br->getSuccessor(0);
158           BasicBlock *Succ1 = Br->getSuccessor(1);
159           if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) ||
160               (L->contains(Succ1) && L->isLoopExiting(Succ1)))
161             continue;
162           if (dependsOnLocalPhi(L, Br->getCondition())) {
163             UP.Threshold += UnrollThresholdIf;
164             LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
165                               << " for loop:\n"
166                               << *L << " due to " << *Br << '\n');
167             if (UP.Threshold >= MaxBoost)
168               return;
169           }
170         }
171         continue;
172       }
173 
174       const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
175       if (!GEP)
176         continue;
177 
178       unsigned AS = GEP->getAddressSpace();
179       unsigned Threshold = 0;
180       if (AS == AMDGPUAS::PRIVATE_ADDRESS)
181         Threshold = ThresholdPrivate;
182       else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS)
183         Threshold = ThresholdLocal;
184       else
185         continue;
186 
187       if (UP.Threshold >= Threshold)
188         continue;
189 
190       if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
191         const Value *Ptr = GEP->getPointerOperand();
192         const AllocaInst *Alloca =
193             dyn_cast<AllocaInst>(getUnderlyingObject(Ptr));
194         if (!Alloca || !Alloca->isStaticAlloca())
195           continue;
196         Type *Ty = Alloca->getAllocatedType();
197         unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
198         if (AllocaSize > MaxAlloca)
199           continue;
200       } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
201                  AS == AMDGPUAS::REGION_ADDRESS) {
202         LocalGEPsSeen++;
203         // Inhibit unroll for local memory if we have seen addressing not to
204         // a variable, most likely we will be unable to combine it.
205         // Do not unroll too deep inner loops for local memory to give a chance
206         // to unroll an outer loop for a more important reason.
207         if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
208             (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
209              !isa<Argument>(GEP->getPointerOperand())))
210           continue;
211         LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n"
212                           << *L << " due to LDS use.\n");
213         UP.Runtime = UnrollRuntimeLocal;
214       }
215 
216       // Check if GEP depends on a value defined by this loop itself.
217       bool HasLoopDef = false;
218       for (const Value *Op : GEP->operands()) {
219         const Instruction *Inst = dyn_cast<Instruction>(Op);
220         if (!Inst || L->isLoopInvariant(Op))
221           continue;
222 
223         if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
224              return SubLoop->contains(Inst); }))
225           continue;
226         HasLoopDef = true;
227         break;
228       }
229       if (!HasLoopDef)
230         continue;
231 
232       // We want to do whatever we can to limit the number of alloca
233       // instructions that make it through to the code generator.  allocas
234       // require us to use indirect addressing, which is slow and prone to
235       // compiler bugs.  If this loop does an address calculation on an
236       // alloca ptr, then we want to use a higher than normal loop unroll
237       // threshold. This will give SROA a better chance to eliminate these
238       // allocas.
239       //
240       // We also want to have more unrolling for local memory to let ds
241       // instructions with different offsets combine.
242       //
243       // Don't use the maximum allowed value here as it will make some
244       // programs way too big.
245       UP.Threshold = Threshold;
246       LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
247                         << " for loop:\n"
248                         << *L << " due to " << *GEP << '\n');
249       if (UP.Threshold >= MaxBoost)
250         return;
251     }
252 
253     // If we got a GEP in a small BB from inner loop then increase max trip
254     // count to analyze for better estimation cost in unroll
255     if (L->isInnermost() && BB->size() < UnrollMaxBlockToAnalyze)
256       UP.MaxIterationsCountToAnalyze = 32;
257   }
258 }
259 
260 void AMDGPUTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
261                                           TTI::PeelingPreferences &PP) {
262   BaseT::getPeelingPreferences(L, SE, PP);
263 }
264 
265 const FeatureBitset GCNTTIImpl::InlineFeatureIgnoreList = {
266     // Codegen control options which don't matter.
267     AMDGPU::FeatureEnableLoadStoreOpt, AMDGPU::FeatureEnableSIScheduler,
268     AMDGPU::FeatureEnableUnsafeDSOffsetFolding, AMDGPU::FeatureFlatForGlobal,
269     AMDGPU::FeaturePromoteAlloca, AMDGPU::FeatureUnalignedScratchAccess,
270     AMDGPU::FeatureUnalignedAccessMode,
271 
272     AMDGPU::FeatureAutoWaitcntBeforeBarrier,
273 
274     // Property of the kernel/environment which can't actually differ.
275     AMDGPU::FeatureSGPRInitBug, AMDGPU::FeatureXNACK,
276     AMDGPU::FeatureTrapHandler,
277 
278     // The default assumption needs to be ecc is enabled, but no directly
279     // exposed operations depend on it, so it can be safely inlined.
280     AMDGPU::FeatureSRAMECC,
281 
282     // Perf-tuning features
283     AMDGPU::FeatureFastFMAF32, AMDGPU::HalfRate64Ops};
284 
285 GCNTTIImpl::GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
286     : BaseT(TM, F.getParent()->getDataLayout()),
287       ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
288       TLI(ST->getTargetLowering()), CommonTTI(TM, F),
289       IsGraphics(AMDGPU::isGraphics(F.getCallingConv())),
290       MaxVGPRs(ST->getMaxNumVGPRs(
291           std::max(ST->getWavesPerEU(F).first,
292                    ST->getWavesPerEUForWorkGroup(
293                        ST->getFlatWorkGroupSizes(F).second)))) {
294   AMDGPU::SIModeRegisterDefaults Mode(F);
295   HasFP32Denormals = Mode.allFP32Denormals();
296   HasFP64FP16Denormals = Mode.allFP64FP16Denormals();
297 }
298 
299 unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
300   // The concept of vector registers doesn't really exist. Some packed vector
301   // operations operate on the normal 32-bit registers.
302   return MaxVGPRs;
303 }
304 
305 unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec) const {
306   // This is really the number of registers to fill when vectorizing /
307   // interleaving loops, so we lie to avoid trying to use all registers.
308   return getHardwareNumberOfRegisters(Vec) >> 3;
309 }
310 
311 unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
312   const SIRegisterInfo *TRI = ST->getRegisterInfo();
313   const TargetRegisterClass *RC = TRI->getRegClass(RCID);
314   unsigned NumVGPRs = (TRI->getRegSizeInBits(*RC) + 31) / 32;
315   return getHardwareNumberOfRegisters(false) / NumVGPRs;
316 }
317 
318 TypeSize
319 GCNTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
320   switch (K) {
321   case TargetTransformInfo::RGK_Scalar:
322     return TypeSize::getFixed(32);
323   case TargetTransformInfo::RGK_FixedWidthVector:
324     return TypeSize::getFixed(ST->hasPackedFP32Ops() ? 64 : 32);
325   case TargetTransformInfo::RGK_ScalableVector:
326     return TypeSize::getScalable(0);
327   }
328   llvm_unreachable("Unsupported register kind");
329 }
330 
331 unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const {
332   return 32;
333 }
334 
335 unsigned GCNTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
336   if (Opcode == Instruction::Load || Opcode == Instruction::Store)
337     return 32 * 4 / ElemWidth;
338   return (ElemWidth == 16 && ST->has16BitInsts()) ? 2
339        : (ElemWidth == 32 && ST->hasPackedFP32Ops()) ? 2
340        : 1;
341 }
342 
343 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
344                                          unsigned ChainSizeInBytes,
345                                          VectorType *VecTy) const {
346   unsigned VecRegBitWidth = VF * LoadSize;
347   if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
348     // TODO: Support element-size less than 32bit?
349     return 128 / LoadSize;
350 
351   return VF;
352 }
353 
354 unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
355                                              unsigned ChainSizeInBytes,
356                                              VectorType *VecTy) const {
357   unsigned VecRegBitWidth = VF * StoreSize;
358   if (VecRegBitWidth > 128)
359     return 128 / StoreSize;
360 
361   return VF;
362 }
363 
364 unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
365   if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
366       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
367       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
368       AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER) {
369     return 512;
370   }
371 
372   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
373     return 8 * ST->getMaxPrivateElementSize();
374 
375   // Common to flat, global, local and region. Assume for unknown addrspace.
376   return 128;
377 }
378 
379 bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
380                                             Align Alignment,
381                                             unsigned AddrSpace) const {
382   // We allow vectorization of flat stores, even though we may need to decompose
383   // them later if they may access private memory. We don't have enough context
384   // here, and legalization can handle it.
385   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
386     return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
387       ChainSizeInBytes <= ST->getMaxPrivateElementSize();
388   }
389   return true;
390 }
391 
392 bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
393                                              Align Alignment,
394                                              unsigned AddrSpace) const {
395   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
396 }
397 
398 bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
399                                               Align Alignment,
400                                               unsigned AddrSpace) const {
401   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
402 }
403 
404 // FIXME: Really we would like to issue multiple 128-bit loads and stores per
405 // iteration. Should we report a larger size and let it legalize?
406 //
407 // FIXME: Should we use narrower types for local/region, or account for when
408 // unaligned access is legal?
409 //
410 // FIXME: This could use fine tuning and microbenchmarks.
411 Type *GCNTTIImpl::getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
412                                             unsigned SrcAddrSpace,
413                                             unsigned DestAddrSpace,
414                                             unsigned SrcAlign,
415                                             unsigned DestAlign) const {
416   unsigned MinAlign = std::min(SrcAlign, DestAlign);
417 
418   // A (multi-)dword access at an address == 2 (mod 4) will be decomposed by the
419   // hardware into byte accesses. If you assume all alignments are equally
420   // probable, it's more efficient on average to use short accesses for this
421   // case.
422   if (MinAlign == 2)
423     return Type::getInt16Ty(Context);
424 
425   // Not all subtargets have 128-bit DS instructions, and we currently don't
426   // form them by default.
427   if (SrcAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
428       SrcAddrSpace == AMDGPUAS::REGION_ADDRESS ||
429       DestAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
430       DestAddrSpace == AMDGPUAS::REGION_ADDRESS) {
431     return FixedVectorType::get(Type::getInt32Ty(Context), 2);
432   }
433 
434   // Global memory works best with 16-byte accesses. Private memory will also
435   // hit this, although they'll be decomposed.
436   return FixedVectorType::get(Type::getInt32Ty(Context), 4);
437 }
438 
439 void GCNTTIImpl::getMemcpyLoopResidualLoweringType(
440   SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
441   unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
442   unsigned SrcAlign, unsigned DestAlign) const {
443   assert(RemainingBytes < 16);
444 
445   unsigned MinAlign = std::min(SrcAlign, DestAlign);
446 
447   if (MinAlign != 2) {
448     Type *I64Ty = Type::getInt64Ty(Context);
449     while (RemainingBytes >= 8) {
450       OpsOut.push_back(I64Ty);
451       RemainingBytes -= 8;
452     }
453 
454     Type *I32Ty = Type::getInt32Ty(Context);
455     while (RemainingBytes >= 4) {
456       OpsOut.push_back(I32Ty);
457       RemainingBytes -= 4;
458     }
459   }
460 
461   Type *I16Ty = Type::getInt16Ty(Context);
462   while (RemainingBytes >= 2) {
463     OpsOut.push_back(I16Ty);
464     RemainingBytes -= 2;
465   }
466 
467   Type *I8Ty = Type::getInt8Ty(Context);
468   while (RemainingBytes) {
469     OpsOut.push_back(I8Ty);
470     --RemainingBytes;
471   }
472 }
473 
474 unsigned GCNTTIImpl::getMaxInterleaveFactor(unsigned VF) {
475   // Disable unrolling if the loop is not vectorized.
476   // TODO: Enable this again.
477   if (VF == 1)
478     return 1;
479 
480   return 8;
481 }
482 
483 bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
484                                        MemIntrinsicInfo &Info) const {
485   switch (Inst->getIntrinsicID()) {
486   case Intrinsic::amdgcn_atomic_inc:
487   case Intrinsic::amdgcn_atomic_dec:
488   case Intrinsic::amdgcn_ds_ordered_add:
489   case Intrinsic::amdgcn_ds_ordered_swap:
490   case Intrinsic::amdgcn_ds_fadd:
491   case Intrinsic::amdgcn_ds_fmin:
492   case Intrinsic::amdgcn_ds_fmax: {
493     auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
494     auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
495     if (!Ordering || !Volatile)
496       return false; // Invalid.
497 
498     unsigned OrderingVal = Ordering->getZExtValue();
499     if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
500       return false;
501 
502     Info.PtrVal = Inst->getArgOperand(0);
503     Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
504     Info.ReadMem = true;
505     Info.WriteMem = true;
506     Info.IsVolatile = !Volatile->isNullValue();
507     return true;
508   }
509   default:
510     return false;
511   }
512 }
513 
514 InstructionCost GCNTTIImpl::getArithmeticInstrCost(
515     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
516     TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
517     TTI::OperandValueProperties Opd1PropInfo,
518     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
519     const Instruction *CxtI) {
520   EVT OrigTy = TLI->getValueType(DL, Ty);
521   if (!OrigTy.isSimple()) {
522     // FIXME: We're having to query the throughput cost so that the basic
523     // implementation tries to generate legalize and scalarization costs. Maybe
524     // we could hoist the scalarization code here?
525     if (CostKind != TTI::TCK_CodeSize)
526       return BaseT::getArithmeticInstrCost(Opcode, Ty, TTI::TCK_RecipThroughput,
527                                            Opd1Info, Opd2Info, Opd1PropInfo,
528                                            Opd2PropInfo, Args, CxtI);
529     // Scalarization
530 
531     // Check if any of the operands are vector operands.
532     int ISD = TLI->InstructionOpcodeToISD(Opcode);
533     assert(ISD && "Invalid opcode");
534 
535     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
536 
537     bool IsFloat = Ty->isFPOrFPVectorTy();
538     // Assume that floating point arithmetic operations cost twice as much as
539     // integer operations.
540     unsigned OpCost = (IsFloat ? 2 : 1);
541 
542     if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
543       // The operation is legal. Assume it costs 1.
544       // TODO: Once we have extract/insert subvector cost we need to use them.
545       return LT.first * OpCost;
546     }
547 
548     if (!TLI->isOperationExpand(ISD, LT.second)) {
549       // If the operation is custom lowered, then assume that the code is twice
550       // as expensive.
551       return LT.first * 2 * OpCost;
552     }
553 
554     // Else, assume that we need to scalarize this op.
555     // TODO: If one of the types get legalized by splitting, handle this
556     // similarly to what getCastInstrCost() does.
557     if (auto *VTy = dyn_cast<VectorType>(Ty)) {
558       unsigned Num = cast<FixedVectorType>(VTy)->getNumElements();
559       InstructionCost Cost = getArithmeticInstrCost(
560           Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
561           Opd1PropInfo, Opd2PropInfo, Args, CxtI);
562       // Return the cost of multiple scalar invocation plus the cost of
563       // inserting and extracting the values.
564       SmallVector<Type *> Tys(Args.size(), Ty);
565       return getScalarizationOverhead(VTy, Args, Tys) + Num * Cost;
566     }
567 
568     // We don't know anything about this scalar instruction.
569     return OpCost;
570   }
571 
572   // Legalize the type.
573   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
574   int ISD = TLI->InstructionOpcodeToISD(Opcode);
575 
576   // Because we don't have any legal vector operations, but the legal types, we
577   // need to account for split vectors.
578   unsigned NElts = LT.second.isVector() ?
579     LT.second.getVectorNumElements() : 1;
580 
581   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
582 
583   switch (ISD) {
584   case ISD::SHL:
585   case ISD::SRL:
586   case ISD::SRA:
587     if (SLT == MVT::i64)
588       return get64BitInstrCost(CostKind) * LT.first * NElts;
589 
590     if (ST->has16BitInsts() && SLT == MVT::i16)
591       NElts = (NElts + 1) / 2;
592 
593     // i32
594     return getFullRateInstrCost() * LT.first * NElts;
595   case ISD::ADD:
596   case ISD::SUB:
597   case ISD::AND:
598   case ISD::OR:
599   case ISD::XOR:
600     if (SLT == MVT::i64) {
601       // and, or and xor are typically split into 2 VALU instructions.
602       return 2 * getFullRateInstrCost() * LT.first * NElts;
603     }
604 
605     if (ST->has16BitInsts() && SLT == MVT::i16)
606       NElts = (NElts + 1) / 2;
607 
608     return LT.first * NElts * getFullRateInstrCost();
609   case ISD::MUL: {
610     const int QuarterRateCost = getQuarterRateInstrCost(CostKind);
611     if (SLT == MVT::i64) {
612       const int FullRateCost = getFullRateInstrCost();
613       return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
614     }
615 
616     if (ST->has16BitInsts() && SLT == MVT::i16)
617       NElts = (NElts + 1) / 2;
618 
619     // i32
620     return QuarterRateCost * NElts * LT.first;
621   }
622   case ISD::FMUL:
623     // Check possible fuse {fadd|fsub}(a,fmul(b,c)) and return zero cost for
624     // fmul(b,c) supposing the fadd|fsub will get estimated cost for the whole
625     // fused operation.
626     if (CxtI && CxtI->hasOneUse())
627       if (const auto *FAdd = dyn_cast<BinaryOperator>(*CxtI->user_begin())) {
628         const int OPC = TLI->InstructionOpcodeToISD(FAdd->getOpcode());
629         if (OPC == ISD::FADD || OPC == ISD::FSUB) {
630           if (ST->hasMadMacF32Insts() && SLT == MVT::f32 && !HasFP32Denormals)
631             return TargetTransformInfo::TCC_Free;
632           if (ST->has16BitInsts() && SLT == MVT::f16 && !HasFP64FP16Denormals)
633             return TargetTransformInfo::TCC_Free;
634 
635           // Estimate all types may be fused with contract/unsafe flags
636           const TargetOptions &Options = TLI->getTargetMachine().Options;
637           if (Options.AllowFPOpFusion == FPOpFusion::Fast ||
638               Options.UnsafeFPMath ||
639               (FAdd->hasAllowContract() && CxtI->hasAllowContract()))
640             return TargetTransformInfo::TCC_Free;
641         }
642       }
643     LLVM_FALLTHROUGH;
644   case ISD::FADD:
645   case ISD::FSUB:
646     if (ST->hasPackedFP32Ops() && SLT == MVT::f32)
647       NElts = (NElts + 1) / 2;
648     if (SLT == MVT::f64)
649       return LT.first * NElts * get64BitInstrCost(CostKind);
650 
651     if (ST->has16BitInsts() && SLT == MVT::f16)
652       NElts = (NElts + 1) / 2;
653 
654     if (SLT == MVT::f32 || SLT == MVT::f16)
655       return LT.first * NElts * getFullRateInstrCost();
656     break;
657   case ISD::FDIV:
658   case ISD::FREM:
659     // FIXME: frem should be handled separately. The fdiv in it is most of it,
660     // but the current lowering is also not entirely correct.
661     if (SLT == MVT::f64) {
662       int Cost = 7 * get64BitInstrCost(CostKind) +
663                  getQuarterRateInstrCost(CostKind) +
664                  3 * getHalfRateInstrCost(CostKind);
665       // Add cost of workaround.
666       if (!ST->hasUsableDivScaleConditionOutput())
667         Cost += 3 * getFullRateInstrCost();
668 
669       return LT.first * Cost * NElts;
670     }
671 
672     if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
673       // TODO: This is more complicated, unsafe flags etc.
674       if ((SLT == MVT::f32 && !HasFP32Denormals) ||
675           (SLT == MVT::f16 && ST->has16BitInsts())) {
676         return LT.first * getQuarterRateInstrCost(CostKind) * NElts;
677       }
678     }
679 
680     if (SLT == MVT::f16 && ST->has16BitInsts()) {
681       // 2 x v_cvt_f32_f16
682       // f32 rcp
683       // f32 fmul
684       // v_cvt_f16_f32
685       // f16 div_fixup
686       int Cost =
687           4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost(CostKind);
688       return LT.first * Cost * NElts;
689     }
690 
691     if (SLT == MVT::f32 || SLT == MVT::f16) {
692       // 4 more v_cvt_* insts without f16 insts support
693       int Cost = (SLT == MVT::f16 ? 14 : 10) * getFullRateInstrCost() +
694                  1 * getQuarterRateInstrCost(CostKind);
695 
696       if (!HasFP32Denormals) {
697         // FP mode switches.
698         Cost += 2 * getFullRateInstrCost();
699       }
700 
701       return LT.first * NElts * Cost;
702     }
703     break;
704   case ISD::FNEG:
705     // Use the backend' estimation. If fneg is not free each element will cost
706     // one additional instruction.
707     return TLI->isFNegFree(SLT) ? 0 : NElts;
708   default:
709     break;
710   }
711 
712   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
713                                        Opd1PropInfo, Opd2PropInfo, Args, CxtI);
714 }
715 
716 // Return true if there's a potential benefit from using v2f16/v2i16
717 // instructions for an intrinsic, even if it requires nontrivial legalization.
718 static bool intrinsicHasPackedVectorBenefit(Intrinsic::ID ID) {
719   switch (ID) {
720   case Intrinsic::fma: // TODO: fmuladd
721   // There's a small benefit to using vector ops in the legalized code.
722   case Intrinsic::round:
723   case Intrinsic::uadd_sat:
724   case Intrinsic::usub_sat:
725   case Intrinsic::sadd_sat:
726   case Intrinsic::ssub_sat:
727     return true;
728   default:
729     return false;
730   }
731 }
732 
733 InstructionCost
734 GCNTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
735                                   TTI::TargetCostKind CostKind) {
736   if (ICA.getID() == Intrinsic::fabs)
737     return 0;
738 
739   if (!intrinsicHasPackedVectorBenefit(ICA.getID()))
740     return BaseT::getIntrinsicInstrCost(ICA, CostKind);
741 
742   Type *RetTy = ICA.getReturnType();
743   EVT OrigTy = TLI->getValueType(DL, RetTy);
744   if (!OrigTy.isSimple()) {
745     if (CostKind != TTI::TCK_CodeSize)
746       return BaseT::getIntrinsicInstrCost(ICA, CostKind);
747 
748     // TODO: Combine these two logic paths.
749     if (ICA.isTypeBasedOnly())
750       return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
751 
752     unsigned RetVF =
753         (RetTy->isVectorTy() ? cast<FixedVectorType>(RetTy)->getNumElements()
754                              : 1);
755     const IntrinsicInst *I = ICA.getInst();
756     const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
757     FastMathFlags FMF = ICA.getFlags();
758     // Assume that we need to scalarize this intrinsic.
759 
760     // Compute the scalarization overhead based on Args for a vector
761     // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while
762     // CostModel will pass a vector RetTy and VF is 1.
763     InstructionCost ScalarizationCost = InstructionCost::getInvalid();
764     if (RetVF > 1) {
765       ScalarizationCost = 0;
766       if (!RetTy->isVoidTy())
767         ScalarizationCost +=
768             getScalarizationOverhead(cast<VectorType>(RetTy), true, false);
769       ScalarizationCost +=
770           getOperandsScalarizationOverhead(Args, ICA.getArgTypes());
771     }
772 
773     IntrinsicCostAttributes Attrs(ICA.getID(), RetTy, ICA.getArgTypes(), FMF, I,
774                                   ScalarizationCost);
775     return getIntrinsicInstrCost(Attrs, CostKind);
776   }
777 
778   // Legalize the type.
779   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
780 
781   unsigned NElts = LT.second.isVector() ?
782     LT.second.getVectorNumElements() : 1;
783 
784   MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
785 
786   if (SLT == MVT::f64)
787     return LT.first * NElts * get64BitInstrCost(CostKind);
788 
789   if ((ST->has16BitInsts() && SLT == MVT::f16) ||
790       (ST->hasPackedFP32Ops() && SLT == MVT::f32))
791     NElts = (NElts + 1) / 2;
792 
793   // TODO: Get more refined intrinsic costs?
794   unsigned InstRate = getQuarterRateInstrCost(CostKind);
795 
796   switch (ICA.getID()) {
797   case Intrinsic::fma:
798     InstRate = ST->hasFastFMAF32() ? getHalfRateInstrCost(CostKind)
799                                    : getQuarterRateInstrCost(CostKind);
800     break;
801   case Intrinsic::uadd_sat:
802   case Intrinsic::usub_sat:
803   case Intrinsic::sadd_sat:
804   case Intrinsic::ssub_sat:
805     static const auto ValidSatTys = {MVT::v2i16, MVT::v4i16};
806     if (any_of(ValidSatTys, [&LT](MVT M) { return M == LT.second; }))
807       NElts = 1;
808     break;
809   }
810 
811   return LT.first * NElts * InstRate;
812 }
813 
814 InstructionCost GCNTTIImpl::getCFInstrCost(unsigned Opcode,
815                                            TTI::TargetCostKind CostKind,
816                                            const Instruction *I) {
817   assert((I == nullptr || I->getOpcode() == Opcode) &&
818          "Opcode should reflect passed instruction.");
819   const bool SCost =
820       (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency);
821   const int CBrCost = SCost ? 5 : 7;
822   switch (Opcode) {
823   case Instruction::Br: {
824     // Branch instruction takes about 4 slots on gfx900.
825     auto BI = dyn_cast_or_null<BranchInst>(I);
826     if (BI && BI->isUnconditional())
827       return SCost ? 1 : 4;
828     // Suppose conditional branch takes additional 3 exec manipulations
829     // instructions in average.
830     return CBrCost;
831   }
832   case Instruction::Switch: {
833     auto SI = dyn_cast_or_null<SwitchInst>(I);
834     // Each case (including default) takes 1 cmp + 1 cbr instructions in
835     // average.
836     return (SI ? (SI->getNumCases() + 1) : 4) * (CBrCost + 1);
837   }
838   case Instruction::Ret:
839     return SCost ? 1 : 10;
840   }
841   return BaseT::getCFInstrCost(Opcode, CostKind, I);
842 }
843 
844 InstructionCost
845 GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
846                                        Optional<FastMathFlags> FMF,
847                                        TTI::TargetCostKind CostKind) {
848   if (TTI::requiresOrderedReduction(FMF))
849     return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
850 
851   EVT OrigTy = TLI->getValueType(DL, Ty);
852 
853   // Computes cost on targets that have packed math instructions(which support
854   // 16-bit types only).
855   if (!ST->hasVOP3PInsts() || OrigTy.getScalarSizeInBits() != 16)
856     return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
857 
858   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
859   return LT.first * getFullRateInstrCost();
860 }
861 
862 InstructionCost
863 GCNTTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
864                                    bool IsUnsigned,
865                                    TTI::TargetCostKind CostKind) {
866   EVT OrigTy = TLI->getValueType(DL, Ty);
867 
868   // Computes cost on targets that have packed math instructions(which support
869   // 16-bit types only).
870   if (!ST->hasVOP3PInsts() || OrigTy.getScalarSizeInBits() != 16)
871     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
872 
873   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
874   return LT.first * getHalfRateInstrCost(CostKind);
875 }
876 
877 InstructionCost GCNTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
878                                                unsigned Index) {
879   switch (Opcode) {
880   case Instruction::ExtractElement:
881   case Instruction::InsertElement: {
882     unsigned EltSize
883       = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
884     if (EltSize < 32) {
885       if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
886         return 0;
887       return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
888     }
889 
890     // Extracts are just reads of a subregister, so are free. Inserts are
891     // considered free because we don't want to have any cost for scalarizing
892     // operations, and we don't have to copy into a different register class.
893 
894     // Dynamic indexing isn't free and is best avoided.
895     return Index == ~0u ? 2 : 0;
896   }
897   default:
898     return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
899   }
900 }
901 
902 /// Analyze if the results of inline asm are divergent. If \p Indices is empty,
903 /// this is analyzing the collective result of all output registers. Otherwise,
904 /// this is only querying a specific result index if this returns multiple
905 /// registers in a struct.
906 bool GCNTTIImpl::isInlineAsmSourceOfDivergence(
907   const CallInst *CI, ArrayRef<unsigned> Indices) const {
908   // TODO: Handle complex extract indices
909   if (Indices.size() > 1)
910     return true;
911 
912   const DataLayout &DL = CI->getModule()->getDataLayout();
913   const SIRegisterInfo *TRI = ST->getRegisterInfo();
914   TargetLowering::AsmOperandInfoVector TargetConstraints =
915       TLI->ParseConstraints(DL, ST->getRegisterInfo(), *CI);
916 
917   const int TargetOutputIdx = Indices.empty() ? -1 : Indices[0];
918 
919   int OutputIdx = 0;
920   for (auto &TC : TargetConstraints) {
921     if (TC.Type != InlineAsm::isOutput)
922       continue;
923 
924     // Skip outputs we don't care about.
925     if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++)
926       continue;
927 
928     TLI->ComputeConstraintToUse(TC, SDValue());
929 
930     Register AssignedReg;
931     const TargetRegisterClass *RC;
932     std::tie(AssignedReg, RC) = TLI->getRegForInlineAsmConstraint(
933       TRI, TC.ConstraintCode, TC.ConstraintVT);
934     if (AssignedReg) {
935       // FIXME: This is a workaround for getRegForInlineAsmConstraint
936       // returning VS_32
937       RC = TRI->getPhysRegClass(AssignedReg);
938     }
939 
940     // For AGPR constraints null is returned on subtargets without AGPRs, so
941     // assume divergent for null.
942     if (!RC || !TRI->isSGPRClass(RC))
943       return true;
944   }
945 
946   return false;
947 }
948 
949 /// \returns true if the new GPU divergence analysis is enabled.
950 bool GCNTTIImpl::useGPUDivergenceAnalysis() const {
951   return !UseLegacyDA;
952 }
953 
954 /// \returns true if the result of the value could potentially be
955 /// different across workitems in a wavefront.
956 bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
957   if (const Argument *A = dyn_cast<Argument>(V))
958     return !AMDGPU::isArgPassedInSGPR(A);
959 
960   // Loads from the private and flat address spaces are divergent, because
961   // threads can execute the load instruction with the same inputs and get
962   // different results.
963   //
964   // All other loads are not divergent, because if threads issue loads with the
965   // same arguments, they will always get the same result.
966   if (const LoadInst *Load = dyn_cast<LoadInst>(V))
967     return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
968            Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
969 
970   // Atomics are divergent because they are executed sequentially: when an
971   // atomic operation refers to the same address in each thread, then each
972   // thread after the first sees the value written by the previous thread as
973   // original value.
974   if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
975     return true;
976 
977   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
978     return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
979 
980   // Assume all function calls are a source of divergence.
981   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
982     if (CI->isInlineAsm())
983       return isInlineAsmSourceOfDivergence(CI);
984     return true;
985   }
986 
987   // Assume all function calls are a source of divergence.
988   if (isa<InvokeInst>(V))
989     return true;
990 
991   return false;
992 }
993 
994 bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
995   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
996     switch (Intrinsic->getIntrinsicID()) {
997     default:
998       return false;
999     case Intrinsic::amdgcn_readfirstlane:
1000     case Intrinsic::amdgcn_readlane:
1001     case Intrinsic::amdgcn_icmp:
1002     case Intrinsic::amdgcn_fcmp:
1003     case Intrinsic::amdgcn_ballot:
1004     case Intrinsic::amdgcn_if_break:
1005       return true;
1006     }
1007   }
1008 
1009   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
1010     if (CI->isInlineAsm())
1011       return !isInlineAsmSourceOfDivergence(CI);
1012     return false;
1013   }
1014 
1015   const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V);
1016   if (!ExtValue)
1017     return false;
1018 
1019   const CallInst *CI = dyn_cast<CallInst>(ExtValue->getOperand(0));
1020   if (!CI)
1021     return false;
1022 
1023   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) {
1024     switch (Intrinsic->getIntrinsicID()) {
1025     default:
1026       return false;
1027     case Intrinsic::amdgcn_if:
1028     case Intrinsic::amdgcn_else: {
1029       ArrayRef<unsigned> Indices = ExtValue->getIndices();
1030       return Indices.size() == 1 && Indices[0] == 1;
1031     }
1032     }
1033   }
1034 
1035   // If we have inline asm returning mixed SGPR and VGPR results, we inferred
1036   // divergent for the overall struct return. We need to override it in the
1037   // case we're extracting an SGPR component here.
1038   if (CI->isInlineAsm())
1039     return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices());
1040 
1041   return false;
1042 }
1043 
1044 bool GCNTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
1045                                             Intrinsic::ID IID) const {
1046   switch (IID) {
1047   case Intrinsic::amdgcn_atomic_inc:
1048   case Intrinsic::amdgcn_atomic_dec:
1049   case Intrinsic::amdgcn_ds_fadd:
1050   case Intrinsic::amdgcn_ds_fmin:
1051   case Intrinsic::amdgcn_ds_fmax:
1052   case Intrinsic::amdgcn_is_shared:
1053   case Intrinsic::amdgcn_is_private:
1054     OpIndexes.push_back(0);
1055     return true;
1056   default:
1057     return false;
1058   }
1059 }
1060 
1061 Value *GCNTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
1062                                                     Value *OldV,
1063                                                     Value *NewV) const {
1064   auto IntrID = II->getIntrinsicID();
1065   switch (IntrID) {
1066   case Intrinsic::amdgcn_atomic_inc:
1067   case Intrinsic::amdgcn_atomic_dec:
1068   case Intrinsic::amdgcn_ds_fadd:
1069   case Intrinsic::amdgcn_ds_fmin:
1070   case Intrinsic::amdgcn_ds_fmax: {
1071     const ConstantInt *IsVolatile = cast<ConstantInt>(II->getArgOperand(4));
1072     if (!IsVolatile->isZero())
1073       return nullptr;
1074     Module *M = II->getParent()->getParent()->getParent();
1075     Type *DestTy = II->getType();
1076     Type *SrcTy = NewV->getType();
1077     Function *NewDecl =
1078         Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
1079     II->setArgOperand(0, NewV);
1080     II->setCalledFunction(NewDecl);
1081     return II;
1082   }
1083   case Intrinsic::amdgcn_is_shared:
1084   case Intrinsic::amdgcn_is_private: {
1085     unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ?
1086       AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
1087     unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1088     LLVMContext &Ctx = NewV->getType()->getContext();
1089     ConstantInt *NewVal = (TrueAS == NewAS) ?
1090       ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
1091     return NewVal;
1092   }
1093   case Intrinsic::ptrmask: {
1094     unsigned OldAS = OldV->getType()->getPointerAddressSpace();
1095     unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1096     Value *MaskOp = II->getArgOperand(1);
1097     Type *MaskTy = MaskOp->getType();
1098 
1099     bool DoTruncate = false;
1100 
1101     const GCNTargetMachine &TM =
1102         static_cast<const GCNTargetMachine &>(getTLI()->getTargetMachine());
1103     if (!TM.isNoopAddrSpaceCast(OldAS, NewAS)) {
1104       // All valid 64-bit to 32-bit casts work by chopping off the high
1105       // bits. Any masking only clearing the low bits will also apply in the new
1106       // address space.
1107       if (DL.getPointerSizeInBits(OldAS) != 64 ||
1108           DL.getPointerSizeInBits(NewAS) != 32)
1109         return nullptr;
1110 
1111       // TODO: Do we need to thread more context in here?
1112       KnownBits Known = computeKnownBits(MaskOp, DL, 0, nullptr, II);
1113       if (Known.countMinLeadingOnes() < 32)
1114         return nullptr;
1115 
1116       DoTruncate = true;
1117     }
1118 
1119     IRBuilder<> B(II);
1120     if (DoTruncate) {
1121       MaskTy = B.getInt32Ty();
1122       MaskOp = B.CreateTrunc(MaskOp, MaskTy);
1123     }
1124 
1125     return B.CreateIntrinsic(Intrinsic::ptrmask, {NewV->getType(), MaskTy},
1126                              {NewV, MaskOp});
1127   }
1128   default:
1129     return nullptr;
1130   }
1131 }
1132 
1133 InstructionCost GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1134                                            VectorType *VT, ArrayRef<int> Mask,
1135                                            int Index, VectorType *SubTp) {
1136   Kind = improveShuffleKindFromMask(Kind, Mask);
1137   if (ST->hasVOP3PInsts()) {
1138     if (cast<FixedVectorType>(VT)->getNumElements() == 2 &&
1139         DL.getTypeSizeInBits(VT->getElementType()) == 16) {
1140       // With op_sel VOP3P instructions freely can access the low half or high
1141       // half of a register, so any swizzle is free.
1142 
1143       switch (Kind) {
1144       case TTI::SK_Broadcast:
1145       case TTI::SK_Reverse:
1146       case TTI::SK_PermuteSingleSrc:
1147         return 0;
1148       default:
1149         break;
1150       }
1151     }
1152   }
1153 
1154   return BaseT::getShuffleCost(Kind, VT, Mask, Index, SubTp);
1155 }
1156 
1157 bool GCNTTIImpl::areInlineCompatible(const Function *Caller,
1158                                      const Function *Callee) const {
1159   const TargetMachine &TM = getTLI()->getTargetMachine();
1160   const GCNSubtarget *CallerST
1161     = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Caller));
1162   const GCNSubtarget *CalleeST
1163     = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Callee));
1164 
1165   const FeatureBitset &CallerBits = CallerST->getFeatureBits();
1166   const FeatureBitset &CalleeBits = CalleeST->getFeatureBits();
1167 
1168   FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
1169   FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
1170   if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
1171     return false;
1172 
1173   // FIXME: dx10_clamp can just take the caller setting, but there seems to be
1174   // no way to support merge for backend defined attributes.
1175   AMDGPU::SIModeRegisterDefaults CallerMode(*Caller);
1176   AMDGPU::SIModeRegisterDefaults CalleeMode(*Callee);
1177   if (!CallerMode.isInlineCompatible(CalleeMode))
1178     return false;
1179 
1180   if (Callee->hasFnAttribute(Attribute::AlwaysInline) ||
1181       Callee->hasFnAttribute(Attribute::InlineHint))
1182     return true;
1183 
1184   // Hack to make compile times reasonable.
1185   if (InlineMaxBB) {
1186     // Single BB does not increase total BB amount.
1187     if (Callee->size() == 1)
1188       return true;
1189     size_t BBSize = Caller->size() + Callee->size() - 1;
1190     return BBSize <= InlineMaxBB;
1191   }
1192 
1193   return true;
1194 }
1195 
1196 unsigned GCNTTIImpl::adjustInliningThreshold(const CallBase *CB) const {
1197   // If we have a pointer to private array passed into a function
1198   // it will not be optimized out, leaving scratch usage.
1199   // Increase the inline threshold to allow inlining in this case.
1200   uint64_t AllocaSize = 0;
1201   SmallPtrSet<const AllocaInst *, 8> AIVisited;
1202   for (Value *PtrArg : CB->args()) {
1203     PointerType *Ty = dyn_cast<PointerType>(PtrArg->getType());
1204     if (!Ty || (Ty->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS &&
1205                 Ty->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS))
1206       continue;
1207 
1208     PtrArg = getUnderlyingObject(PtrArg);
1209     if (const AllocaInst *AI = dyn_cast<AllocaInst>(PtrArg)) {
1210       if (!AI->isStaticAlloca() || !AIVisited.insert(AI).second)
1211         continue;
1212       AllocaSize += DL.getTypeAllocSize(AI->getAllocatedType());
1213       // If the amount of stack memory is excessive we will not be able
1214       // to get rid of the scratch anyway, bail out.
1215       if (AllocaSize > ArgAllocaCutoff) {
1216         AllocaSize = 0;
1217         break;
1218       }
1219     }
1220   }
1221   if (AllocaSize)
1222     return ArgAllocaCost;
1223   return 0;
1224 }
1225 
1226 void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1227                                          TTI::UnrollingPreferences &UP) {
1228   CommonTTI.getUnrollingPreferences(L, SE, UP);
1229 }
1230 
1231 void GCNTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1232                                        TTI::PeelingPreferences &PP) {
1233   CommonTTI.getPeelingPreferences(L, SE, PP);
1234 }
1235 
1236 int GCNTTIImpl::get64BitInstrCost(TTI::TargetCostKind CostKind) const {
1237   return ST->hasFullRate64Ops()
1238              ? getFullRateInstrCost()
1239              : ST->hasHalfRate64Ops() ? getHalfRateInstrCost(CostKind)
1240                                       : getQuarterRateInstrCost(CostKind);
1241 }
1242 
1243 R600TTIImpl::R600TTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
1244     : BaseT(TM, F.getParent()->getDataLayout()),
1245       ST(static_cast<const R600Subtarget *>(TM->getSubtargetImpl(F))),
1246       TLI(ST->getTargetLowering()), CommonTTI(TM, F) {}
1247 
1248 unsigned R600TTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
1249   return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
1250 }
1251 
1252 unsigned R600TTIImpl::getNumberOfRegisters(bool Vec) const {
1253   return getHardwareNumberOfRegisters(Vec);
1254 }
1255 
1256 TypeSize
1257 R600TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
1258   return TypeSize::getFixed(32);
1259 }
1260 
1261 unsigned R600TTIImpl::getMinVectorRegisterBitWidth() const {
1262   return 32;
1263 }
1264 
1265 unsigned R600TTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
1266   if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
1267       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS)
1268     return 128;
1269   if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1270       AddrSpace == AMDGPUAS::REGION_ADDRESS)
1271     return 64;
1272   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
1273     return 32;
1274 
1275   if ((AddrSpace == AMDGPUAS::PARAM_D_ADDRESS ||
1276       AddrSpace == AMDGPUAS::PARAM_I_ADDRESS ||
1277       (AddrSpace >= AMDGPUAS::CONSTANT_BUFFER_0 &&
1278       AddrSpace <= AMDGPUAS::CONSTANT_BUFFER_15)))
1279     return 128;
1280   llvm_unreachable("unhandled address space");
1281 }
1282 
1283 bool R600TTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
1284                                              Align Alignment,
1285                                              unsigned AddrSpace) const {
1286   // We allow vectorization of flat stores, even though we may need to decompose
1287   // them later if they may access private memory. We don't have enough context
1288   // here, and legalization can handle it.
1289   return (AddrSpace != AMDGPUAS::PRIVATE_ADDRESS);
1290 }
1291 
1292 bool R600TTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1293                                               Align Alignment,
1294                                               unsigned AddrSpace) const {
1295   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
1296 }
1297 
1298 bool R600TTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1299                                                Align Alignment,
1300                                                unsigned AddrSpace) const {
1301   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
1302 }
1303 
1304 unsigned R600TTIImpl::getMaxInterleaveFactor(unsigned VF) {
1305   // Disable unrolling if the loop is not vectorized.
1306   // TODO: Enable this again.
1307   if (VF == 1)
1308     return 1;
1309 
1310   return 8;
1311 }
1312 
1313 InstructionCost R600TTIImpl::getCFInstrCost(unsigned Opcode,
1314                                             TTI::TargetCostKind CostKind,
1315                                             const Instruction *I) {
1316   if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency)
1317     return Opcode == Instruction::PHI ? 0 : 1;
1318 
1319   // XXX - For some reason this isn't called for switch.
1320   switch (Opcode) {
1321   case Instruction::Br:
1322   case Instruction::Ret:
1323     return 10;
1324   default:
1325     return BaseT::getCFInstrCost(Opcode, CostKind, I);
1326   }
1327 }
1328 
1329 InstructionCost R600TTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
1330                                                 unsigned Index) {
1331   switch (Opcode) {
1332   case Instruction::ExtractElement:
1333   case Instruction::InsertElement: {
1334     unsigned EltSize
1335       = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
1336     if (EltSize < 32) {
1337       return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
1338     }
1339 
1340     // Extracts are just reads of a subregister, so are free. Inserts are
1341     // considered free because we don't want to have any cost for scalarizing
1342     // operations, and we don't have to copy into a different register class.
1343 
1344     // Dynamic indexing isn't free and is best avoided.
1345     return Index == ~0u ? 2 : 0;
1346   }
1347   default:
1348     return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
1349   }
1350 }
1351 
1352 void R600TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1353                                           TTI::UnrollingPreferences &UP) {
1354   CommonTTI.getUnrollingPreferences(L, SE, UP);
1355 }
1356 
1357 void R600TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1358                                         TTI::PeelingPreferences &PP) {
1359   CommonTTI.getPeelingPreferences(L, SE, PP);
1360 }
1361