1 //===-- NVPTXTargetTransformInfo.cpp - NVPTX specific TTI -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "NVPTXTargetTransformInfo.h" 10 #include "NVPTXUtilities.h" 11 #include "llvm/Analysis/LoopInfo.h" 12 #include "llvm/Analysis/TargetTransformInfo.h" 13 #include "llvm/Analysis/ValueTracking.h" 14 #include "llvm/CodeGen/BasicTTIImpl.h" 15 #include "llvm/CodeGen/CostTable.h" 16 #include "llvm/CodeGen/TargetLowering.h" 17 #include "llvm/IR/IntrinsicsNVPTX.h" 18 #include "llvm/Support/Debug.h" 19 using namespace llvm; 20 21 #define DEBUG_TYPE "NVPTXtti" 22 23 // Whether the given intrinsic reads threadIdx.x/y/z. 24 static bool readsThreadIndex(const IntrinsicInst *II) { 25 switch (II->getIntrinsicID()) { 26 default: return false; 27 case Intrinsic::nvvm_read_ptx_sreg_tid_x: 28 case Intrinsic::nvvm_read_ptx_sreg_tid_y: 29 case Intrinsic::nvvm_read_ptx_sreg_tid_z: 30 return true; 31 } 32 } 33 34 static bool readsLaneId(const IntrinsicInst *II) { 35 return II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_laneid; 36 } 37 38 // Whether the given intrinsic is an atomic instruction in PTX. 39 static bool isNVVMAtomic(const IntrinsicInst *II) { 40 switch (II->getIntrinsicID()) { 41 default: return false; 42 case Intrinsic::nvvm_atomic_load_inc_32: 43 case Intrinsic::nvvm_atomic_load_dec_32: 44 45 case Intrinsic::nvvm_atomic_add_gen_f_cta: 46 case Intrinsic::nvvm_atomic_add_gen_f_sys: 47 case Intrinsic::nvvm_atomic_add_gen_i_cta: 48 case Intrinsic::nvvm_atomic_add_gen_i_sys: 49 case Intrinsic::nvvm_atomic_and_gen_i_cta: 50 case Intrinsic::nvvm_atomic_and_gen_i_sys: 51 case Intrinsic::nvvm_atomic_cas_gen_i_cta: 52 case Intrinsic::nvvm_atomic_cas_gen_i_sys: 53 case Intrinsic::nvvm_atomic_dec_gen_i_cta: 54 case Intrinsic::nvvm_atomic_dec_gen_i_sys: 55 case Intrinsic::nvvm_atomic_inc_gen_i_cta: 56 case Intrinsic::nvvm_atomic_inc_gen_i_sys: 57 case Intrinsic::nvvm_atomic_max_gen_i_cta: 58 case Intrinsic::nvvm_atomic_max_gen_i_sys: 59 case Intrinsic::nvvm_atomic_min_gen_i_cta: 60 case Intrinsic::nvvm_atomic_min_gen_i_sys: 61 case Intrinsic::nvvm_atomic_or_gen_i_cta: 62 case Intrinsic::nvvm_atomic_or_gen_i_sys: 63 case Intrinsic::nvvm_atomic_exch_gen_i_cta: 64 case Intrinsic::nvvm_atomic_exch_gen_i_sys: 65 case Intrinsic::nvvm_atomic_xor_gen_i_cta: 66 case Intrinsic::nvvm_atomic_xor_gen_i_sys: 67 return true; 68 } 69 } 70 71 bool NVPTXTTIImpl::isSourceOfDivergence(const Value *V) { 72 // Without inter-procedural analysis, we conservatively assume that arguments 73 // to __device__ functions are divergent. 74 if (const Argument *Arg = dyn_cast<Argument>(V)) 75 return !isKernelFunction(*Arg->getParent()); 76 77 if (const Instruction *I = dyn_cast<Instruction>(V)) { 78 // Without pointer analysis, we conservatively assume values loaded from 79 // generic or local address space are divergent. 80 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 81 unsigned AS = LI->getPointerAddressSpace(); 82 return AS == ADDRESS_SPACE_GENERIC || AS == ADDRESS_SPACE_LOCAL; 83 } 84 // Atomic instructions may cause divergence. Atomic instructions are 85 // executed sequentially across all threads in a warp. Therefore, an earlier 86 // executed thread may see different memory inputs than a later executed 87 // thread. For example, suppose *a = 0 initially. 88 // 89 // atom.global.add.s32 d, [a], 1 90 // 91 // returns 0 for the first thread that enters the critical region, and 1 for 92 // the second thread. 93 if (I->isAtomic()) 94 return true; 95 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 96 // Instructions that read threadIdx are obviously divergent. 97 if (readsThreadIndex(II) || readsLaneId(II)) 98 return true; 99 // Handle the NVPTX atomic instrinsics that cannot be represented as an 100 // atomic IR instruction. 101 if (isNVVMAtomic(II)) 102 return true; 103 } 104 // Conservatively consider the return value of function calls as divergent. 105 // We could analyze callees with bodies more precisely using 106 // inter-procedural analysis. 107 if (isa<CallInst>(I)) 108 return true; 109 } 110 111 return false; 112 } 113 114 int NVPTXTTIImpl::getArithmeticInstrCost( 115 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info, 116 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, 117 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 118 const Instruction *CxtI) { 119 // Legalize the type. 120 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 121 122 int ISD = TLI->InstructionOpcodeToISD(Opcode); 123 124 switch (ISD) { 125 default: 126 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info, 127 Opd1PropInfo, Opd2PropInfo); 128 case ISD::ADD: 129 case ISD::MUL: 130 case ISD::XOR: 131 case ISD::OR: 132 case ISD::AND: 133 // The machine code (SASS) simulates an i64 with two i32. Therefore, we 134 // estimate that arithmetic operations on i64 are twice as expensive as 135 // those on types that can fit into one machine register. 136 if (LT.second.SimpleTy == MVT::i64) 137 return 2 * LT.first; 138 // Delegate other cases to the basic TTI. 139 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info, 140 Opd1PropInfo, Opd2PropInfo); 141 } 142 } 143 144 void NVPTXTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 145 TTI::UnrollingPreferences &UP) { 146 BaseT::getUnrollingPreferences(L, SE, UP); 147 148 // Enable partial unrolling and runtime unrolling, but reduce the 149 // threshold. This partially unrolls small loops which are often 150 // unrolled by the PTX to SASS compiler and unrolling earlier can be 151 // beneficial. 152 UP.Partial = UP.Runtime = true; 153 UP.PartialThreshold = UP.Threshold / 4; 154 } 155