1 //===-- NVPTXTargetTransformInfo.cpp - NVPTX specific TTI -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "NVPTXTargetTransformInfo.h" 10 #include "NVPTXUtilities.h" 11 #include "llvm/Analysis/LoopInfo.h" 12 #include "llvm/Analysis/TargetTransformInfo.h" 13 #include "llvm/Analysis/ValueTracking.h" 14 #include "llvm/CodeGen/BasicTTIImpl.h" 15 #include "llvm/CodeGen/CostTable.h" 16 #include "llvm/CodeGen/TargetLowering.h" 17 #include "llvm/IR/IntrinsicsNVPTX.h" 18 #include "llvm/Support/Debug.h" 19 using namespace llvm; 20 21 #define DEBUG_TYPE "NVPTXtti" 22 23 // Whether the given intrinsic reads threadIdx.x/y/z. 24 static bool readsThreadIndex(const IntrinsicInst *II) { 25 switch (II->getIntrinsicID()) { 26 default: return false; 27 case Intrinsic::nvvm_read_ptx_sreg_tid_x: 28 case Intrinsic::nvvm_read_ptx_sreg_tid_y: 29 case Intrinsic::nvvm_read_ptx_sreg_tid_z: 30 return true; 31 } 32 } 33 34 static bool readsLaneId(const IntrinsicInst *II) { 35 return II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_laneid; 36 } 37 38 // Whether the given intrinsic is an atomic instruction in PTX. 39 static bool isNVVMAtomic(const IntrinsicInst *II) { 40 switch (II->getIntrinsicID()) { 41 default: return false; 42 case Intrinsic::nvvm_atomic_load_inc_32: 43 case Intrinsic::nvvm_atomic_load_dec_32: 44 45 case Intrinsic::nvvm_atomic_add_gen_f_cta: 46 case Intrinsic::nvvm_atomic_add_gen_f_sys: 47 case Intrinsic::nvvm_atomic_add_gen_i_cta: 48 case Intrinsic::nvvm_atomic_add_gen_i_sys: 49 case Intrinsic::nvvm_atomic_and_gen_i_cta: 50 case Intrinsic::nvvm_atomic_and_gen_i_sys: 51 case Intrinsic::nvvm_atomic_cas_gen_i_cta: 52 case Intrinsic::nvvm_atomic_cas_gen_i_sys: 53 case Intrinsic::nvvm_atomic_dec_gen_i_cta: 54 case Intrinsic::nvvm_atomic_dec_gen_i_sys: 55 case Intrinsic::nvvm_atomic_inc_gen_i_cta: 56 case Intrinsic::nvvm_atomic_inc_gen_i_sys: 57 case Intrinsic::nvvm_atomic_max_gen_i_cta: 58 case Intrinsic::nvvm_atomic_max_gen_i_sys: 59 case Intrinsic::nvvm_atomic_min_gen_i_cta: 60 case Intrinsic::nvvm_atomic_min_gen_i_sys: 61 case Intrinsic::nvvm_atomic_or_gen_i_cta: 62 case Intrinsic::nvvm_atomic_or_gen_i_sys: 63 case Intrinsic::nvvm_atomic_exch_gen_i_cta: 64 case Intrinsic::nvvm_atomic_exch_gen_i_sys: 65 case Intrinsic::nvvm_atomic_xor_gen_i_cta: 66 case Intrinsic::nvvm_atomic_xor_gen_i_sys: 67 return true; 68 } 69 } 70 71 bool NVPTXTTIImpl::isSourceOfDivergence(const Value *V) { 72 // Without inter-procedural analysis, we conservatively assume that arguments 73 // to __device__ functions are divergent. 74 if (const Argument *Arg = dyn_cast<Argument>(V)) 75 return !isKernelFunction(*Arg->getParent()); 76 77 if (const Instruction *I = dyn_cast<Instruction>(V)) { 78 // Without pointer analysis, we conservatively assume values loaded from 79 // generic or local address space are divergent. 80 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 81 unsigned AS = LI->getPointerAddressSpace(); 82 return AS == ADDRESS_SPACE_GENERIC || AS == ADDRESS_SPACE_LOCAL; 83 } 84 // Atomic instructions may cause divergence. Atomic instructions are 85 // executed sequentially across all threads in a warp. Therefore, an earlier 86 // executed thread may see different memory inputs than a later executed 87 // thread. For example, suppose *a = 0 initially. 88 // 89 // atom.global.add.s32 d, [a], 1 90 // 91 // returns 0 for the first thread that enters the critical region, and 1 for 92 // the second thread. 93 if (I->isAtomic()) 94 return true; 95 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 96 // Instructions that read threadIdx are obviously divergent. 97 if (readsThreadIndex(II) || readsLaneId(II)) 98 return true; 99 // Handle the NVPTX atomic instrinsics that cannot be represented as an 100 // atomic IR instruction. 101 if (isNVVMAtomic(II)) 102 return true; 103 } 104 // Conservatively consider the return value of function calls as divergent. 105 // We could analyze callees with bodies more precisely using 106 // inter-procedural analysis. 107 if (isa<CallInst>(I)) 108 return true; 109 } 110 111 return false; 112 } 113 114 int NVPTXTTIImpl::getArithmeticInstrCost( 115 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 116 TTI::OperandValueKind Opd1Info, 117 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, 118 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 119 const Instruction *CxtI) { 120 // Legalize the type. 121 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 122 123 int ISD = TLI->InstructionOpcodeToISD(Opcode); 124 125 switch (ISD) { 126 default: 127 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 128 Opd2Info, 129 Opd1PropInfo, Opd2PropInfo); 130 case ISD::ADD: 131 case ISD::MUL: 132 case ISD::XOR: 133 case ISD::OR: 134 case ISD::AND: 135 // The machine code (SASS) simulates an i64 with two i32. Therefore, we 136 // estimate that arithmetic operations on i64 are twice as expensive as 137 // those on types that can fit into one machine register. 138 if (LT.second.SimpleTy == MVT::i64) 139 return 2 * LT.first; 140 // Delegate other cases to the basic TTI. 141 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 142 Opd2Info, 143 Opd1PropInfo, Opd2PropInfo); 144 } 145 } 146 147 void NVPTXTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 148 TTI::UnrollingPreferences &UP) { 149 BaseT::getUnrollingPreferences(L, SE, UP); 150 151 // Enable partial unrolling and runtime unrolling, but reduce the 152 // threshold. This partially unrolls small loops which are often 153 // unrolled by the PTX to SASS compiler and unrolling earlier can be 154 // beneficial. 155 UP.Partial = UP.Runtime = true; 156 UP.PartialThreshold = UP.Threshold / 4; 157 } 158 159 void NVPTXTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 160 TTI::PeelingPreferences &PP) { 161 BaseT::getPeelingPreferences(L, SE, PP); 162 } 163