xref: /freebsd/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h (revision 271171e0d97b88ba2a7c3bf750c9672b484c1c13)
1 //===-- NVPTXTargetTransformInfo.h - NVPTX specific TTI ---------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file a TargetTransformInfo::Concept conforming object specific to the
10 /// NVPTX target machine. It uses the target's detailed information to
11 /// provide more precise answers to certain TTI queries, while letting the
12 /// target independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
17 #define LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
18 
19 #include "NVPTXTargetMachine.h"
20 #include "MCTargetDesc/NVPTXBaseInfo.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/CodeGen/BasicTTIImpl.h"
23 #include "llvm/CodeGen/TargetLowering.h"
24 
25 namespace llvm {
26 
27 class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {
28   typedef BasicTTIImplBase<NVPTXTTIImpl> BaseT;
29   typedef TargetTransformInfo TTI;
30   friend BaseT;
31 
32   const NVPTXSubtarget *ST;
33   const NVPTXTargetLowering *TLI;
34 
35   const NVPTXSubtarget *getST() const { return ST; };
36   const NVPTXTargetLowering *getTLI() const { return TLI; };
37 
38 public:
39   explicit NVPTXTTIImpl(const NVPTXTargetMachine *TM, const Function &F)
40       : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl()),
41         TLI(ST->getTargetLowering()) {}
42 
43   bool hasBranchDivergence() { return true; }
44 
45   bool isSourceOfDivergence(const Value *V);
46 
47   unsigned getFlatAddressSpace() const {
48     return AddressSpace::ADDRESS_SPACE_GENERIC;
49   }
50 
51   bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const {
52     return AS != AddressSpace::ADDRESS_SPACE_SHARED &&
53            AS != AddressSpace::ADDRESS_SPACE_LOCAL && AS != ADDRESS_SPACE_PARAM;
54   }
55 
56   Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
57                                                IntrinsicInst &II) const;
58 
59   // Loads and stores can be vectorized if the alignment is at least as big as
60   // the load/store we want to vectorize.
61   bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
62                                    unsigned AddrSpace) const {
63     return Alignment >= ChainSizeInBytes;
64   }
65   bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
66                                     unsigned AddrSpace) const {
67     return isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment, AddrSpace);
68   }
69 
70   // NVPTX has infinite registers of all kinds, but the actual machine doesn't.
71   // We conservatively return 1 here which is just enough to enable the
72   // vectorizers but disables heuristics based on the number of registers.
73   // FIXME: Return a more reasonable number, while keeping an eye on
74   // LoopVectorizer's unrolling heuristics.
75   unsigned getNumberOfRegisters(bool Vector) const { return 1; }
76 
77   // Only <2 x half> should be vectorized, so always return 32 for the vector
78   // register size.
79   TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
80     return TypeSize::getFixed(32);
81   }
82   unsigned getMinVectorRegisterBitWidth() const { return 32; }
83 
84   // We don't want to prevent inlining because of target-cpu and -features
85   // attributes that were added to newer versions of LLVM/Clang: There are
86   // no incompatible functions in PTX, ptxas will throw errors in such cases.
87   bool areInlineCompatible(const Function *Caller,
88                            const Function *Callee) const {
89     return true;
90   }
91 
92   // Increase the inlining cost threshold by a factor of 5, reflecting that
93   // calls are particularly expensive in NVPTX.
94   unsigned getInliningThresholdMultiplier() { return 5; }
95 
96   InstructionCost getArithmeticInstrCost(
97       unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
98       TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
99       TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
100       TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
101       TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
102       ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
103       const Instruction *CxtI = nullptr);
104 
105   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
106                                TTI::UnrollingPreferences &UP,
107                                OptimizationRemarkEmitter *ORE);
108 
109   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
110                              TTI::PeelingPreferences &PP);
111 
112   bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) {
113     // Volatile loads/stores are only supported for shared and global address
114     // spaces, or for generic AS that maps to them.
115     if (!(AddrSpace == llvm::ADDRESS_SPACE_GENERIC ||
116           AddrSpace == llvm::ADDRESS_SPACE_GLOBAL ||
117           AddrSpace == llvm::ADDRESS_SPACE_SHARED))
118       return false;
119 
120     switch(I->getOpcode()){
121     default:
122       return false;
123     case Instruction::Load:
124     case Instruction::Store:
125       return true;
126     }
127   }
128 };
129 
130 } // end namespace llvm
131 
132 #endif
133