1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Custom DAG lowering for SI 11 // 12 //===----------------------------------------------------------------------===// 13 14 #if defined(_MSC_VER) || defined(__MINGW32__) 15 // Provide M_PI. 16 #define _USE_MATH_DEFINES 17 #endif 18 19 #include "SIISelLowering.h" 20 #include "AMDGPU.h" 21 #include "AMDGPUSubtarget.h" 22 #include "AMDGPUTargetMachine.h" 23 #include "SIDefines.h" 24 #include "SIInstrInfo.h" 25 #include "SIMachineFunctionInfo.h" 26 #include "SIRegisterInfo.h" 27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 28 #include "Utils/AMDGPUBaseInfo.h" 29 #include "llvm/ADT/APFloat.h" 30 #include "llvm/ADT/APInt.h" 31 #include "llvm/ADT/ArrayRef.h" 32 #include "llvm/ADT/BitVector.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/StringSwitch.h" 37 #include "llvm/ADT/Twine.h" 38 #include "llvm/CodeGen/Analysis.h" 39 #include "llvm/CodeGen/CallingConvLower.h" 40 #include "llvm/CodeGen/DAGCombine.h" 41 #include "llvm/CodeGen/ISDOpcodes.h" 42 #include "llvm/CodeGen/MachineBasicBlock.h" 43 #include "llvm/CodeGen/MachineFrameInfo.h" 44 #include "llvm/CodeGen/MachineFunction.h" 45 #include "llvm/CodeGen/MachineInstr.h" 46 #include "llvm/CodeGen/MachineInstrBuilder.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineModuleInfo.h" 49 #include "llvm/CodeGen/MachineOperand.h" 50 #include "llvm/CodeGen/MachineRegisterInfo.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetCallingConv.h" 54 #include "llvm/CodeGen/TargetRegisterInfo.h" 55 #include "llvm/CodeGen/ValueTypes.h" 56 #include "llvm/IR/Constants.h" 57 #include "llvm/IR/DataLayout.h" 58 #include "llvm/IR/DebugLoc.h" 59 #include "llvm/IR/DerivedTypes.h" 60 #include "llvm/IR/DiagnosticInfo.h" 61 #include "llvm/IR/Function.h" 62 #include "llvm/IR/GlobalValue.h" 63 #include "llvm/IR/InstrTypes.h" 64 #include "llvm/IR/Instruction.h" 65 #include "llvm/IR/Instructions.h" 66 #include "llvm/IR/IntrinsicInst.h" 67 #include "llvm/IR/Type.h" 68 #include "llvm/Support/Casting.h" 69 #include "llvm/Support/CodeGen.h" 70 #include "llvm/Support/CommandLine.h" 71 #include "llvm/Support/Compiler.h" 72 #include "llvm/Support/ErrorHandling.h" 73 #include "llvm/Support/KnownBits.h" 74 #include "llvm/Support/MachineValueType.h" 75 #include "llvm/Support/MathExtras.h" 76 #include "llvm/Target/TargetOptions.h" 77 #include <cassert> 78 #include <cmath> 79 #include <cstdint> 80 #include <iterator> 81 #include <tuple> 82 #include <utility> 83 #include <vector> 84 85 using namespace llvm; 86 87 #define DEBUG_TYPE "si-lower" 88 89 STATISTIC(NumTailCalls, "Number of tail calls"); 90 91 static cl::opt<bool> EnableVGPRIndexMode( 92 "amdgpu-vgpr-index-mode", 93 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), 94 cl::init(false)); 95 96 static cl::opt<bool> DisableLoopAlignment( 97 "amdgpu-disable-loop-alignment", 98 cl::desc("Do not align and prefetch loops"), 99 cl::init(false)); 100 101 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 102 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 103 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 104 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 105 return AMDGPU::SGPR0 + Reg; 106 } 107 } 108 llvm_unreachable("Cannot allocate sgpr"); 109 } 110 111 SITargetLowering::SITargetLowering(const TargetMachine &TM, 112 const GCNSubtarget &STI) 113 : AMDGPUTargetLowering(TM, STI), 114 Subtarget(&STI) { 115 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 116 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 117 118 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass); 119 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 120 121 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 122 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 123 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 124 125 addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass); 126 addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass); 127 128 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 129 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 130 131 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 132 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 133 134 addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass); 135 addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass); 136 137 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 138 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 139 140 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 141 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 142 143 if (Subtarget->has16BitInsts()) { 144 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass); 145 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass); 146 147 // Unless there are also VOP3P operations, not operations are really legal. 148 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass); 149 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass); 150 addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass); 151 addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass); 152 } 153 154 if (Subtarget->hasMAIInsts()) { 155 addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass); 156 addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass); 157 } 158 159 computeRegisterProperties(Subtarget->getRegisterInfo()); 160 161 // We need to custom lower vector stores from local memory 162 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 163 setOperationAction(ISD::LOAD, MVT::v3i32, Custom); 164 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 165 setOperationAction(ISD::LOAD, MVT::v5i32, Custom); 166 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 167 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 168 setOperationAction(ISD::LOAD, MVT::i1, Custom); 169 setOperationAction(ISD::LOAD, MVT::v32i32, Custom); 170 171 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 172 setOperationAction(ISD::STORE, MVT::v3i32, Custom); 173 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 174 setOperationAction(ISD::STORE, MVT::v5i32, Custom); 175 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 176 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 177 setOperationAction(ISD::STORE, MVT::i1, Custom); 178 setOperationAction(ISD::STORE, MVT::v32i32, Custom); 179 180 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); 181 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); 182 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); 183 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); 184 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); 185 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); 186 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); 187 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); 188 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); 189 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); 190 191 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 192 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 193 194 setOperationAction(ISD::SELECT, MVT::i1, Promote); 195 setOperationAction(ISD::SELECT, MVT::i64, Custom); 196 setOperationAction(ISD::SELECT, MVT::f64, Promote); 197 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 198 199 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 200 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 201 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 202 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 203 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 204 205 setOperationAction(ISD::SETCC, MVT::i1, Promote); 206 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 207 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 208 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); 209 210 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 211 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 212 213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 217 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 218 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 219 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 220 221 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 222 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 223 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 224 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); 225 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom); 226 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom); 227 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); 228 229 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom); 230 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom); 231 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom); 232 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 233 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); 234 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); 235 236 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 237 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); 238 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); 239 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom); 240 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 241 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 242 243 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 244 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 245 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 246 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 247 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 248 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 249 250 setOperationAction(ISD::UADDO, MVT::i32, Legal); 251 setOperationAction(ISD::USUBO, MVT::i32, Legal); 252 253 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); 254 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); 255 256 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 257 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 258 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 259 260 #if 0 261 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal); 262 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal); 263 #endif 264 265 // We only support LOAD/STORE and vector manipulation ops for vectors 266 // with > 4 elements. 267 for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, 268 MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16, 269 MVT::v32i32, MVT::v32f32 }) { 270 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 271 switch (Op) { 272 case ISD::LOAD: 273 case ISD::STORE: 274 case ISD::BUILD_VECTOR: 275 case ISD::BITCAST: 276 case ISD::EXTRACT_VECTOR_ELT: 277 case ISD::INSERT_VECTOR_ELT: 278 case ISD::INSERT_SUBVECTOR: 279 case ISD::EXTRACT_SUBVECTOR: 280 case ISD::SCALAR_TO_VECTOR: 281 break; 282 case ISD::CONCAT_VECTORS: 283 setOperationAction(Op, VT, Custom); 284 break; 285 default: 286 setOperationAction(Op, VT, Expand); 287 break; 288 } 289 } 290 } 291 292 setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand); 293 294 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that 295 // is expanded to avoid having two separate loops in case the index is a VGPR. 296 297 // Most operations are naturally 32-bit vector operations. We only support 298 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 299 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 300 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 301 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 302 303 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 304 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 305 306 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 307 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 308 309 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 310 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 311 } 312 313 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 314 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 315 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 316 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 317 318 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom); 319 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 320 321 // Avoid stack access for these. 322 // TODO: Generalize to more vector types. 323 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); 324 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); 325 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); 326 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); 327 328 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 329 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 330 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom); 331 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom); 332 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom); 333 334 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom); 335 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom); 336 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom); 337 338 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom); 339 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom); 340 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); 341 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); 342 343 // Deal with vec3 vector operations when widened to vec4. 344 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom); 345 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom); 346 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom); 347 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom); 348 349 // Deal with vec5 vector operations when widened to vec8. 350 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom); 351 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom); 352 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom); 353 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom); 354 355 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 356 // and output demarshalling 357 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 358 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 359 360 // We can't return success/failure, only the old value, 361 // let LLVM add the comparison 362 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); 363 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); 364 365 if (Subtarget->hasFlatAddressSpace()) { 366 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 367 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); 368 } 369 370 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 371 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 372 373 // On SI this is s_memtime and s_memrealtime on VI. 374 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 375 setOperationAction(ISD::TRAP, MVT::Other, Custom); 376 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); 377 378 if (Subtarget->has16BitInsts()) { 379 setOperationAction(ISD::FLOG, MVT::f16, Custom); 380 setOperationAction(ISD::FEXP, MVT::f16, Custom); 381 setOperationAction(ISD::FLOG10, MVT::f16, Custom); 382 } 383 384 // v_mad_f32 does not support denormals according to some sources. 385 if (!Subtarget->hasFP32Denormals()) 386 setOperationAction(ISD::FMAD, MVT::f32, Legal); 387 388 if (!Subtarget->hasBFI()) { 389 // fcopysign can be done in a single instruction with BFI. 390 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 391 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 392 } 393 394 if (!Subtarget->hasBCNT(32)) 395 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 396 397 if (!Subtarget->hasBCNT(64)) 398 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 399 400 if (Subtarget->hasFFBH()) 401 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); 402 403 if (Subtarget->hasFFBL()) 404 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); 405 406 // We only really have 32-bit BFE instructions (and 16-bit on VI). 407 // 408 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any 409 // effort to match them now. We want this to be false for i64 cases when the 410 // extraction isn't restricted to the upper or lower half. Ideally we would 411 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that 412 // span the midpoint are probably relatively rare, so don't worry about them 413 // for now. 414 if (Subtarget->hasBFE()) 415 setHasExtractBitsInsn(true); 416 417 setOperationAction(ISD::FMINNUM, MVT::f32, Custom); 418 setOperationAction(ISD::FMAXNUM, MVT::f32, Custom); 419 setOperationAction(ISD::FMINNUM, MVT::f64, Custom); 420 setOperationAction(ISD::FMAXNUM, MVT::f64, Custom); 421 422 423 // These are really only legal for ieee_mode functions. We should be avoiding 424 // them for functions that don't have ieee_mode enabled, so just say they are 425 // legal. 426 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); 427 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); 428 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); 429 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); 430 431 432 if (Subtarget->haveRoundOpsF64()) { 433 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 434 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 435 setOperationAction(ISD::FRINT, MVT::f64, Legal); 436 } else { 437 setOperationAction(ISD::FCEIL, MVT::f64, Custom); 438 setOperationAction(ISD::FTRUNC, MVT::f64, Custom); 439 setOperationAction(ISD::FRINT, MVT::f64, Custom); 440 setOperationAction(ISD::FFLOOR, MVT::f64, Custom); 441 } 442 443 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 444 445 setOperationAction(ISD::FSIN, MVT::f32, Custom); 446 setOperationAction(ISD::FCOS, MVT::f32, Custom); 447 setOperationAction(ISD::FDIV, MVT::f32, Custom); 448 setOperationAction(ISD::FDIV, MVT::f64, Custom); 449 450 if (Subtarget->has16BitInsts()) { 451 setOperationAction(ISD::Constant, MVT::i16, Legal); 452 453 setOperationAction(ISD::SMIN, MVT::i16, Legal); 454 setOperationAction(ISD::SMAX, MVT::i16, Legal); 455 456 setOperationAction(ISD::UMIN, MVT::i16, Legal); 457 setOperationAction(ISD::UMAX, MVT::i16, Legal); 458 459 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); 460 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); 461 462 setOperationAction(ISD::ROTR, MVT::i16, Promote); 463 setOperationAction(ISD::ROTL, MVT::i16, Promote); 464 465 setOperationAction(ISD::SDIV, MVT::i16, Promote); 466 setOperationAction(ISD::UDIV, MVT::i16, Promote); 467 setOperationAction(ISD::SREM, MVT::i16, Promote); 468 setOperationAction(ISD::UREM, MVT::i16, Promote); 469 470 setOperationAction(ISD::BSWAP, MVT::i16, Promote); 471 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); 472 473 setOperationAction(ISD::CTTZ, MVT::i16, Promote); 474 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); 475 setOperationAction(ISD::CTLZ, MVT::i16, Promote); 476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); 477 setOperationAction(ISD::CTPOP, MVT::i16, Promote); 478 479 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); 480 481 setOperationAction(ISD::BR_CC, MVT::i16, Expand); 482 483 setOperationAction(ISD::LOAD, MVT::i16, Custom); 484 485 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 486 487 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); 488 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); 489 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); 490 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); 491 492 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); 493 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); 494 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); 495 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); 496 497 // F16 - Constant Actions. 498 setOperationAction(ISD::ConstantFP, MVT::f16, Legal); 499 500 // F16 - Load/Store Actions. 501 setOperationAction(ISD::LOAD, MVT::f16, Promote); 502 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); 503 setOperationAction(ISD::STORE, MVT::f16, Promote); 504 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); 505 506 // F16 - VOP1 Actions. 507 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); 508 setOperationAction(ISD::FCOS, MVT::f16, Promote); 509 setOperationAction(ISD::FSIN, MVT::f16, Promote); 510 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); 511 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); 512 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); 513 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); 514 setOperationAction(ISD::FROUND, MVT::f16, Custom); 515 516 // F16 - VOP2 Actions. 517 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 518 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 519 520 setOperationAction(ISD::FDIV, MVT::f16, Custom); 521 522 // F16 - VOP3 Actions. 523 setOperationAction(ISD::FMA, MVT::f16, Legal); 524 if (!Subtarget->hasFP16Denormals() && STI.hasMadF16()) 525 setOperationAction(ISD::FMAD, MVT::f16, Legal); 526 527 for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) { 528 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 529 switch (Op) { 530 case ISD::LOAD: 531 case ISD::STORE: 532 case ISD::BUILD_VECTOR: 533 case ISD::BITCAST: 534 case ISD::EXTRACT_VECTOR_ELT: 535 case ISD::INSERT_VECTOR_ELT: 536 case ISD::INSERT_SUBVECTOR: 537 case ISD::EXTRACT_SUBVECTOR: 538 case ISD::SCALAR_TO_VECTOR: 539 break; 540 case ISD::CONCAT_VECTORS: 541 setOperationAction(Op, VT, Custom); 542 break; 543 default: 544 setOperationAction(Op, VT, Expand); 545 break; 546 } 547 } 548 } 549 550 // XXX - Do these do anything? Vector constants turn into build_vector. 551 setOperationAction(ISD::Constant, MVT::v2i16, Legal); 552 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); 553 554 setOperationAction(ISD::UNDEF, MVT::v2i16, Legal); 555 setOperationAction(ISD::UNDEF, MVT::v2f16, Legal); 556 557 setOperationAction(ISD::STORE, MVT::v2i16, Promote); 558 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); 559 setOperationAction(ISD::STORE, MVT::v2f16, Promote); 560 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); 561 562 setOperationAction(ISD::LOAD, MVT::v2i16, Promote); 563 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); 564 setOperationAction(ISD::LOAD, MVT::v2f16, Promote); 565 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); 566 567 setOperationAction(ISD::AND, MVT::v2i16, Promote); 568 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); 569 setOperationAction(ISD::OR, MVT::v2i16, Promote); 570 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); 571 setOperationAction(ISD::XOR, MVT::v2i16, Promote); 572 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); 573 574 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 575 AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32); 576 setOperationAction(ISD::LOAD, MVT::v4f16, Promote); 577 AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32); 578 579 setOperationAction(ISD::STORE, MVT::v4i16, Promote); 580 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32); 581 setOperationAction(ISD::STORE, MVT::v4f16, Promote); 582 AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32); 583 584 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand); 585 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); 586 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); 587 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); 588 589 setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand); 590 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand); 591 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand); 592 593 if (!Subtarget->hasVOP3PInsts()) { 594 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom); 595 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); 596 } 597 598 setOperationAction(ISD::FNEG, MVT::v2f16, Legal); 599 // This isn't really legal, but this avoids the legalizer unrolling it (and 600 // allows matching fneg (fabs x) patterns) 601 setOperationAction(ISD::FABS, MVT::v2f16, Legal); 602 603 setOperationAction(ISD::FMAXNUM, MVT::f16, Custom); 604 setOperationAction(ISD::FMINNUM, MVT::f16, Custom); 605 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal); 606 setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal); 607 608 setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom); 609 setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom); 610 611 setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand); 612 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand); 613 } 614 615 if (Subtarget->hasVOP3PInsts()) { 616 setOperationAction(ISD::ADD, MVT::v2i16, Legal); 617 setOperationAction(ISD::SUB, MVT::v2i16, Legal); 618 setOperationAction(ISD::MUL, MVT::v2i16, Legal); 619 setOperationAction(ISD::SHL, MVT::v2i16, Legal); 620 setOperationAction(ISD::SRL, MVT::v2i16, Legal); 621 setOperationAction(ISD::SRA, MVT::v2i16, Legal); 622 setOperationAction(ISD::SMIN, MVT::v2i16, Legal); 623 setOperationAction(ISD::UMIN, MVT::v2i16, Legal); 624 setOperationAction(ISD::SMAX, MVT::v2i16, Legal); 625 setOperationAction(ISD::UMAX, MVT::v2i16, Legal); 626 627 setOperationAction(ISD::FADD, MVT::v2f16, Legal); 628 setOperationAction(ISD::FMUL, MVT::v2f16, Legal); 629 setOperationAction(ISD::FMA, MVT::v2f16, Legal); 630 631 setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal); 632 setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal); 633 634 setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal); 635 636 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 637 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 638 639 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom); 640 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 641 642 setOperationAction(ISD::SHL, MVT::v4i16, Custom); 643 setOperationAction(ISD::SRA, MVT::v4i16, Custom); 644 setOperationAction(ISD::SRL, MVT::v4i16, Custom); 645 setOperationAction(ISD::ADD, MVT::v4i16, Custom); 646 setOperationAction(ISD::SUB, MVT::v4i16, Custom); 647 setOperationAction(ISD::MUL, MVT::v4i16, Custom); 648 649 setOperationAction(ISD::SMIN, MVT::v4i16, Custom); 650 setOperationAction(ISD::SMAX, MVT::v4i16, Custom); 651 setOperationAction(ISD::UMIN, MVT::v4i16, Custom); 652 setOperationAction(ISD::UMAX, MVT::v4i16, Custom); 653 654 setOperationAction(ISD::FADD, MVT::v4f16, Custom); 655 setOperationAction(ISD::FMUL, MVT::v4f16, Custom); 656 657 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom); 658 setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom); 659 660 setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom); 661 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom); 662 setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom); 663 664 setOperationAction(ISD::FEXP, MVT::v2f16, Custom); 665 setOperationAction(ISD::SELECT, MVT::v4i16, Custom); 666 setOperationAction(ISD::SELECT, MVT::v4f16, Custom); 667 } 668 669 setOperationAction(ISD::FNEG, MVT::v4f16, Custom); 670 setOperationAction(ISD::FABS, MVT::v4f16, Custom); 671 672 if (Subtarget->has16BitInsts()) { 673 setOperationAction(ISD::SELECT, MVT::v2i16, Promote); 674 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); 675 setOperationAction(ISD::SELECT, MVT::v2f16, Promote); 676 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); 677 } else { 678 // Legalization hack. 679 setOperationAction(ISD::SELECT, MVT::v2i16, Custom); 680 setOperationAction(ISD::SELECT, MVT::v2f16, Custom); 681 682 setOperationAction(ISD::FNEG, MVT::v2f16, Custom); 683 setOperationAction(ISD::FABS, MVT::v2f16, Custom); 684 } 685 686 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) { 687 setOperationAction(ISD::SELECT, VT, Custom); 688 } 689 690 setTargetDAGCombine(ISD::ADD); 691 setTargetDAGCombine(ISD::ADDCARRY); 692 setTargetDAGCombine(ISD::SUB); 693 setTargetDAGCombine(ISD::SUBCARRY); 694 setTargetDAGCombine(ISD::FADD); 695 setTargetDAGCombine(ISD::FSUB); 696 setTargetDAGCombine(ISD::FMINNUM); 697 setTargetDAGCombine(ISD::FMAXNUM); 698 setTargetDAGCombine(ISD::FMINNUM_IEEE); 699 setTargetDAGCombine(ISD::FMAXNUM_IEEE); 700 setTargetDAGCombine(ISD::FMA); 701 setTargetDAGCombine(ISD::SMIN); 702 setTargetDAGCombine(ISD::SMAX); 703 setTargetDAGCombine(ISD::UMIN); 704 setTargetDAGCombine(ISD::UMAX); 705 setTargetDAGCombine(ISD::SETCC); 706 setTargetDAGCombine(ISD::AND); 707 setTargetDAGCombine(ISD::OR); 708 setTargetDAGCombine(ISD::XOR); 709 setTargetDAGCombine(ISD::SINT_TO_FP); 710 setTargetDAGCombine(ISD::UINT_TO_FP); 711 setTargetDAGCombine(ISD::FCANONICALIZE); 712 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); 713 setTargetDAGCombine(ISD::ZERO_EXTEND); 714 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); 715 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 716 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 717 718 // All memory operations. Some folding on the pointer operand is done to help 719 // matching the constant offsets in the addressing modes. 720 setTargetDAGCombine(ISD::LOAD); 721 setTargetDAGCombine(ISD::STORE); 722 setTargetDAGCombine(ISD::ATOMIC_LOAD); 723 setTargetDAGCombine(ISD::ATOMIC_STORE); 724 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 725 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 726 setTargetDAGCombine(ISD::ATOMIC_SWAP); 727 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 728 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 729 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 730 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 731 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 732 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 733 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 734 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 735 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 736 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 737 setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD); 738 739 setSchedulingPreference(Sched::RegPressure); 740 } 741 742 const GCNSubtarget *SITargetLowering::getSubtarget() const { 743 return Subtarget; 744 } 745 746 //===----------------------------------------------------------------------===// 747 // TargetLowering queries 748 //===----------------------------------------------------------------------===// 749 750 // v_mad_mix* support a conversion from f16 to f32. 751 // 752 // There is only one special case when denormals are enabled we don't currently, 753 // where this is OK to use. 754 bool SITargetLowering::isFPExtFoldable(unsigned Opcode, 755 EVT DestVT, EVT SrcVT) const { 756 return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) || 757 (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) && 758 DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() && 759 SrcVT.getScalarType() == MVT::f16; 760 } 761 762 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const { 763 // SI has some legal vector types, but no legal vector operations. Say no 764 // shuffles are legal in order to prefer scalarizing some vector operations. 765 return false; 766 } 767 768 MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, 769 CallingConv::ID CC, 770 EVT VT) const { 771 // TODO: Consider splitting all arguments into 32-bit pieces. 772 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { 773 EVT ScalarVT = VT.getScalarType(); 774 unsigned Size = ScalarVT.getSizeInBits(); 775 if (Size == 32) 776 return ScalarVT.getSimpleVT(); 777 778 if (Size == 64) 779 return MVT::i32; 780 781 if (Size == 16 && Subtarget->has16BitInsts()) 782 return VT.isInteger() ? MVT::v2i16 : MVT::v2f16; 783 } 784 785 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); 786 } 787 788 unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, 789 CallingConv::ID CC, 790 EVT VT) const { 791 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { 792 unsigned NumElts = VT.getVectorNumElements(); 793 EVT ScalarVT = VT.getScalarType(); 794 unsigned Size = ScalarVT.getSizeInBits(); 795 796 if (Size == 32) 797 return NumElts; 798 799 if (Size == 64) 800 return 2 * NumElts; 801 802 if (Size == 16 && Subtarget->has16BitInsts()) 803 return (VT.getVectorNumElements() + 1) / 2; 804 } 805 806 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); 807 } 808 809 unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv( 810 LLVMContext &Context, CallingConv::ID CC, 811 EVT VT, EVT &IntermediateVT, 812 unsigned &NumIntermediates, MVT &RegisterVT) const { 813 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { 814 unsigned NumElts = VT.getVectorNumElements(); 815 EVT ScalarVT = VT.getScalarType(); 816 unsigned Size = ScalarVT.getSizeInBits(); 817 if (Size == 32) { 818 RegisterVT = ScalarVT.getSimpleVT(); 819 IntermediateVT = RegisterVT; 820 NumIntermediates = NumElts; 821 return NumIntermediates; 822 } 823 824 if (Size == 64) { 825 RegisterVT = MVT::i32; 826 IntermediateVT = RegisterVT; 827 NumIntermediates = 2 * NumElts; 828 return NumIntermediates; 829 } 830 831 // FIXME: We should fix the ABI to be the same on targets without 16-bit 832 // support, but unless we can properly handle 3-vectors, it will be still be 833 // inconsistent. 834 if (Size == 16 && Subtarget->has16BitInsts()) { 835 RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16; 836 IntermediateVT = RegisterVT; 837 NumIntermediates = (NumElts + 1) / 2; 838 return NumIntermediates; 839 } 840 } 841 842 return TargetLowering::getVectorTypeBreakdownForCallingConv( 843 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT); 844 } 845 846 static MVT memVTFromAggregate(Type *Ty) { 847 // Only limited forms of aggregate type currently expected. 848 assert(Ty->isStructTy() && "Expected struct type"); 849 850 851 Type *ElementType = nullptr; 852 unsigned NumElts; 853 if (Ty->getContainedType(0)->isVectorTy()) { 854 VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0)); 855 ElementType = VecComponent->getElementType(); 856 NumElts = VecComponent->getNumElements(); 857 } else { 858 ElementType = Ty->getContainedType(0); 859 NumElts = 1; 860 } 861 862 assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type"); 863 864 // Calculate the size of the memVT type from the aggregate 865 unsigned Pow2Elts = 0; 866 unsigned ElementSize; 867 switch (ElementType->getTypeID()) { 868 default: 869 llvm_unreachable("Unknown type!"); 870 case Type::IntegerTyID: 871 ElementSize = cast<IntegerType>(ElementType)->getBitWidth(); 872 break; 873 case Type::HalfTyID: 874 ElementSize = 16; 875 break; 876 case Type::FloatTyID: 877 ElementSize = 32; 878 break; 879 } 880 unsigned AdditionalElts = ElementSize == 16 ? 2 : 1; 881 Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts); 882 883 return MVT::getVectorVT(MVT::getVT(ElementType, false), 884 Pow2Elts); 885 } 886 887 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 888 const CallInst &CI, 889 MachineFunction &MF, 890 unsigned IntrID) const { 891 if (const AMDGPU::RsrcIntrinsic *RsrcIntr = 892 AMDGPU::lookupRsrcIntrinsic(IntrID)) { 893 AttributeList Attr = Intrinsic::getAttributes(CI.getContext(), 894 (Intrinsic::ID)IntrID); 895 if (Attr.hasFnAttribute(Attribute::ReadNone)) 896 return false; 897 898 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 899 900 if (RsrcIntr->IsImage) { 901 Info.ptrVal = MFI->getImagePSV( 902 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(), 903 CI.getArgOperand(RsrcIntr->RsrcArg)); 904 Info.align = 0; 905 } else { 906 Info.ptrVal = MFI->getBufferPSV( 907 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(), 908 CI.getArgOperand(RsrcIntr->RsrcArg)); 909 } 910 911 Info.flags = MachineMemOperand::MODereferenceable; 912 if (Attr.hasFnAttribute(Attribute::ReadOnly)) { 913 Info.opc = ISD::INTRINSIC_W_CHAIN; 914 Info.memVT = MVT::getVT(CI.getType(), true); 915 if (Info.memVT == MVT::Other) { 916 // Some intrinsics return an aggregate type - special case to work out 917 // the correct memVT 918 Info.memVT = memVTFromAggregate(CI.getType()); 919 } 920 Info.flags |= MachineMemOperand::MOLoad; 921 } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) { 922 Info.opc = ISD::INTRINSIC_VOID; 923 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); 924 Info.flags |= MachineMemOperand::MOStore; 925 } else { 926 // Atomic 927 Info.opc = ISD::INTRINSIC_W_CHAIN; 928 Info.memVT = MVT::getVT(CI.getType()); 929 Info.flags = MachineMemOperand::MOLoad | 930 MachineMemOperand::MOStore | 931 MachineMemOperand::MODereferenceable; 932 933 // XXX - Should this be volatile without known ordering? 934 Info.flags |= MachineMemOperand::MOVolatile; 935 } 936 return true; 937 } 938 939 switch (IntrID) { 940 case Intrinsic::amdgcn_atomic_inc: 941 case Intrinsic::amdgcn_atomic_dec: 942 case Intrinsic::amdgcn_ds_ordered_add: 943 case Intrinsic::amdgcn_ds_ordered_swap: 944 case Intrinsic::amdgcn_ds_fadd: 945 case Intrinsic::amdgcn_ds_fmin: 946 case Intrinsic::amdgcn_ds_fmax: { 947 Info.opc = ISD::INTRINSIC_W_CHAIN; 948 Info.memVT = MVT::getVT(CI.getType()); 949 Info.ptrVal = CI.getOperand(0); 950 Info.align = 0; 951 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 952 953 const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4)); 954 if (!Vol->isZero()) 955 Info.flags |= MachineMemOperand::MOVolatile; 956 957 return true; 958 } 959 case Intrinsic::amdgcn_buffer_atomic_fadd: { 960 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 961 962 Info.opc = ISD::INTRINSIC_VOID; 963 Info.memVT = MVT::getVT(CI.getOperand(0)->getType()); 964 Info.ptrVal = MFI->getBufferPSV( 965 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(), 966 CI.getArgOperand(1)); 967 Info.align = 0; 968 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 969 970 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4)); 971 if (!Vol || !Vol->isZero()) 972 Info.flags |= MachineMemOperand::MOVolatile; 973 974 return true; 975 } 976 case Intrinsic::amdgcn_global_atomic_fadd: { 977 Info.opc = ISD::INTRINSIC_VOID; 978 Info.memVT = MVT::getVT(CI.getOperand(0)->getType() 979 ->getPointerElementType()); 980 Info.ptrVal = CI.getOperand(0); 981 Info.align = 0; 982 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 983 984 return true; 985 } 986 case Intrinsic::amdgcn_ds_append: 987 case Intrinsic::amdgcn_ds_consume: { 988 Info.opc = ISD::INTRINSIC_W_CHAIN; 989 Info.memVT = MVT::getVT(CI.getType()); 990 Info.ptrVal = CI.getOperand(0); 991 Info.align = 0; 992 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 993 994 const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1)); 995 if (!Vol->isZero()) 996 Info.flags |= MachineMemOperand::MOVolatile; 997 998 return true; 999 } 1000 case Intrinsic::amdgcn_ds_gws_init: 1001 case Intrinsic::amdgcn_ds_gws_barrier: 1002 case Intrinsic::amdgcn_ds_gws_sema_v: 1003 case Intrinsic::amdgcn_ds_gws_sema_br: 1004 case Intrinsic::amdgcn_ds_gws_sema_p: 1005 case Intrinsic::amdgcn_ds_gws_sema_release_all: { 1006 Info.opc = ISD::INTRINSIC_VOID; 1007 1008 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1009 Info.ptrVal = 1010 MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); 1011 1012 // This is an abstract access, but we need to specify a type and size. 1013 Info.memVT = MVT::i32; 1014 Info.size = 4; 1015 Info.align = 4; 1016 1017 Info.flags = MachineMemOperand::MOStore; 1018 if (IntrID == Intrinsic::amdgcn_ds_gws_barrier) 1019 Info.flags = MachineMemOperand::MOLoad; 1020 return true; 1021 } 1022 default: 1023 return false; 1024 } 1025 } 1026 1027 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, 1028 SmallVectorImpl<Value*> &Ops, 1029 Type *&AccessTy) const { 1030 switch (II->getIntrinsicID()) { 1031 case Intrinsic::amdgcn_atomic_inc: 1032 case Intrinsic::amdgcn_atomic_dec: 1033 case Intrinsic::amdgcn_ds_ordered_add: 1034 case Intrinsic::amdgcn_ds_ordered_swap: 1035 case Intrinsic::amdgcn_ds_fadd: 1036 case Intrinsic::amdgcn_ds_fmin: 1037 case Intrinsic::amdgcn_ds_fmax: { 1038 Value *Ptr = II->getArgOperand(0); 1039 AccessTy = II->getType(); 1040 Ops.push_back(Ptr); 1041 return true; 1042 } 1043 default: 1044 return false; 1045 } 1046 } 1047 1048 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 1049 if (!Subtarget->hasFlatInstOffsets()) { 1050 // Flat instructions do not have offsets, and only have the register 1051 // address. 1052 return AM.BaseOffs == 0 && AM.Scale == 0; 1053 } 1054 1055 // GFX9 added a 13-bit signed offset. When using regular flat instructions, 1056 // the sign bit is ignored and is treated as a 12-bit unsigned offset. 1057 1058 // GFX10 shrinked signed offset to 12 bits. When using regular flat 1059 // instructions, the sign bit is also ignored and is treated as 11-bit 1060 // unsigned offset. 1061 1062 if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) 1063 return isUInt<11>(AM.BaseOffs) && AM.Scale == 0; 1064 1065 // Just r + i 1066 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0; 1067 } 1068 1069 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const { 1070 if (Subtarget->hasFlatGlobalInsts()) 1071 return isInt<13>(AM.BaseOffs) && AM.Scale == 0; 1072 1073 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) { 1074 // Assume the we will use FLAT for all global memory accesses 1075 // on VI. 1076 // FIXME: This assumption is currently wrong. On VI we still use 1077 // MUBUF instructions for the r + i addressing mode. As currently 1078 // implemented, the MUBUF instructions only work on buffer < 4GB. 1079 // It may be possible to support > 4GB buffers with MUBUF instructions, 1080 // by setting the stride value in the resource descriptor which would 1081 // increase the size limit to (stride * 4GB). However, this is risky, 1082 // because it has never been validated. 1083 return isLegalFlatAddressingMode(AM); 1084 } 1085 1086 return isLegalMUBUFAddressingMode(AM); 1087 } 1088 1089 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 1090 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 1091 // additionally can do r + r + i with addr64. 32-bit has more addressing 1092 // mode options. Depending on the resource constant, it can also do 1093 // (i64 r0) + (i32 r1) * (i14 i). 1094 // 1095 // Private arrays end up using a scratch buffer most of the time, so also 1096 // assume those use MUBUF instructions. Scratch loads / stores are currently 1097 // implemented as mubuf instructions with offen bit set, so slightly 1098 // different than the normal addr64. 1099 if (!isUInt<12>(AM.BaseOffs)) 1100 return false; 1101 1102 // FIXME: Since we can split immediate into soffset and immediate offset, 1103 // would it make sense to allow any immediate? 1104 1105 switch (AM.Scale) { 1106 case 0: // r + i or just i, depending on HasBaseReg. 1107 return true; 1108 case 1: 1109 return true; // We have r + r or r + i. 1110 case 2: 1111 if (AM.HasBaseReg) { 1112 // Reject 2 * r + r. 1113 return false; 1114 } 1115 1116 // Allow 2 * r as r + r 1117 // Or 2 * r + i is allowed as r + r + i. 1118 return true; 1119 default: // Don't allow n * r 1120 return false; 1121 } 1122 } 1123 1124 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 1125 const AddrMode &AM, Type *Ty, 1126 unsigned AS, Instruction *I) const { 1127 // No global is ever allowed as a base. 1128 if (AM.BaseGV) 1129 return false; 1130 1131 if (AS == AMDGPUAS::GLOBAL_ADDRESS) 1132 return isLegalGlobalAddressingMode(AM); 1133 1134 if (AS == AMDGPUAS::CONSTANT_ADDRESS || 1135 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 1136 AS == AMDGPUAS::BUFFER_FAT_POINTER) { 1137 // If the offset isn't a multiple of 4, it probably isn't going to be 1138 // correctly aligned. 1139 // FIXME: Can we get the real alignment here? 1140 if (AM.BaseOffs % 4 != 0) 1141 return isLegalMUBUFAddressingMode(AM); 1142 1143 // There are no SMRD extloads, so if we have to do a small type access we 1144 // will use a MUBUF load. 1145 // FIXME?: We also need to do this if unaligned, but we don't know the 1146 // alignment here. 1147 if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4) 1148 return isLegalGlobalAddressingMode(AM); 1149 1150 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { 1151 // SMRD instructions have an 8-bit, dword offset on SI. 1152 if (!isUInt<8>(AM.BaseOffs / 4)) 1153 return false; 1154 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { 1155 // On CI+, this can also be a 32-bit literal constant offset. If it fits 1156 // in 8-bits, it can use a smaller encoding. 1157 if (!isUInt<32>(AM.BaseOffs / 4)) 1158 return false; 1159 } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 1160 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 1161 if (!isUInt<20>(AM.BaseOffs)) 1162 return false; 1163 } else 1164 llvm_unreachable("unhandled generation"); 1165 1166 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 1167 return true; 1168 1169 if (AM.Scale == 1 && AM.HasBaseReg) 1170 return true; 1171 1172 return false; 1173 1174 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 1175 return isLegalMUBUFAddressingMode(AM); 1176 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || 1177 AS == AMDGPUAS::REGION_ADDRESS) { 1178 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 1179 // field. 1180 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 1181 // an 8-bit dword offset but we don't know the alignment here. 1182 if (!isUInt<16>(AM.BaseOffs)) 1183 return false; 1184 1185 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 1186 return true; 1187 1188 if (AM.Scale == 1 && AM.HasBaseReg) 1189 return true; 1190 1191 return false; 1192 } else if (AS == AMDGPUAS::FLAT_ADDRESS || 1193 AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) { 1194 // For an unknown address space, this usually means that this is for some 1195 // reason being used for pure arithmetic, and not based on some addressing 1196 // computation. We don't have instructions that compute pointers with any 1197 // addressing modes, so treat them as having no offset like flat 1198 // instructions. 1199 return isLegalFlatAddressingMode(AM); 1200 } else { 1201 llvm_unreachable("unhandled address space"); 1202 } 1203 } 1204 1205 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, 1206 const SelectionDAG &DAG) const { 1207 if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) { 1208 return (MemVT.getSizeInBits() <= 4 * 32); 1209 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 1210 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); 1211 return (MemVT.getSizeInBits() <= MaxPrivateBits); 1212 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { 1213 return (MemVT.getSizeInBits() <= 2 * 32); 1214 } 1215 return true; 1216 } 1217 1218 bool SITargetLowering::allowsMisalignedMemoryAccesses( 1219 EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, 1220 bool *IsFast) const { 1221 if (IsFast) 1222 *IsFast = false; 1223 1224 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 1225 // which isn't a simple VT. 1226 // Until MVT is extended to handle this, simply check for the size and 1227 // rely on the condition below: allow accesses if the size is a multiple of 4. 1228 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && 1229 VT.getStoreSize() > 16)) { 1230 return false; 1231 } 1232 1233 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || 1234 AddrSpace == AMDGPUAS::REGION_ADDRESS) { 1235 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 1236 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 1237 // with adjacent offsets. 1238 bool AlignedBy4 = (Align % 4 == 0); 1239 if (IsFast) 1240 *IsFast = AlignedBy4; 1241 1242 return AlignedBy4; 1243 } 1244 1245 // FIXME: We have to be conservative here and assume that flat operations 1246 // will access scratch. If we had access to the IR function, then we 1247 // could determine if any private memory was used in the function. 1248 if (!Subtarget->hasUnalignedScratchAccess() && 1249 (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS || 1250 AddrSpace == AMDGPUAS::FLAT_ADDRESS)) { 1251 bool AlignedBy4 = Align >= 4; 1252 if (IsFast) 1253 *IsFast = AlignedBy4; 1254 1255 return AlignedBy4; 1256 } 1257 1258 if (Subtarget->hasUnalignedBufferAccess()) { 1259 // If we have an uniform constant load, it still requires using a slow 1260 // buffer instruction if unaligned. 1261 if (IsFast) { 1262 *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS || 1263 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ? 1264 (Align % 4 == 0) : true; 1265 } 1266 1267 return true; 1268 } 1269 1270 // Smaller than dword value must be aligned. 1271 if (VT.bitsLT(MVT::i32)) 1272 return false; 1273 1274 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 1275 // byte-address are ignored, thus forcing Dword alignment. 1276 // This applies to private, global, and constant memory. 1277 if (IsFast) 1278 *IsFast = true; 1279 1280 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 1281 } 1282 1283 EVT SITargetLowering::getOptimalMemOpType( 1284 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, 1285 bool ZeroMemset, bool MemcpyStrSrc, 1286 const AttributeList &FuncAttributes) const { 1287 // FIXME: Should account for address space here. 1288 1289 // The default fallback uses the private pointer size as a guess for a type to 1290 // use. Make sure we switch these to 64-bit accesses. 1291 1292 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 1293 return MVT::v4i32; 1294 1295 if (Size >= 8 && DstAlign >= 4) 1296 return MVT::v2i32; 1297 1298 // Use the default. 1299 return MVT::Other; 1300 } 1301 1302 static bool isFlatGlobalAddrSpace(unsigned AS) { 1303 return AS == AMDGPUAS::GLOBAL_ADDRESS || 1304 AS == AMDGPUAS::FLAT_ADDRESS || 1305 AS == AMDGPUAS::CONSTANT_ADDRESS || 1306 AS > AMDGPUAS::MAX_AMDGPU_ADDRESS; 1307 } 1308 1309 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 1310 unsigned DestAS) const { 1311 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS); 1312 } 1313 1314 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { 1315 const MemSDNode *MemNode = cast<MemSDNode>(N); 1316 const Value *Ptr = MemNode->getMemOperand()->getValue(); 1317 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr); 1318 return I && I->getMetadata("amdgpu.noclobber"); 1319 } 1320 1321 bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS, 1322 unsigned DestAS) const { 1323 // Flat -> private/local is a simple truncate. 1324 // Flat -> global is no-op 1325 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) 1326 return true; 1327 1328 return isNoopAddrSpaceCast(SrcAS, DestAS); 1329 } 1330 1331 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 1332 const MemSDNode *MemNode = cast<MemSDNode>(N); 1333 1334 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand()); 1335 } 1336 1337 TargetLoweringBase::LegalizeTypeAction 1338 SITargetLowering::getPreferredVectorAction(MVT VT) const { 1339 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 1340 return TypeSplitVector; 1341 1342 return TargetLoweringBase::getPreferredVectorAction(VT); 1343 } 1344 1345 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 1346 Type *Ty) const { 1347 // FIXME: Could be smarter if called for vector constants. 1348 return true; 1349 } 1350 1351 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 1352 if (Subtarget->has16BitInsts() && VT == MVT::i16) { 1353 switch (Op) { 1354 case ISD::LOAD: 1355 case ISD::STORE: 1356 1357 // These operations are done with 32-bit instructions anyway. 1358 case ISD::AND: 1359 case ISD::OR: 1360 case ISD::XOR: 1361 case ISD::SELECT: 1362 // TODO: Extensions? 1363 return true; 1364 default: 1365 return false; 1366 } 1367 } 1368 1369 // SimplifySetCC uses this function to determine whether or not it should 1370 // create setcc with i1 operands. We don't have instructions for i1 setcc. 1371 if (VT == MVT::i1 && Op == ISD::SETCC) 1372 return false; 1373 1374 return TargetLowering::isTypeDesirableForOp(Op, VT); 1375 } 1376 1377 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, 1378 const SDLoc &SL, 1379 SDValue Chain, 1380 uint64_t Offset) const { 1381 const DataLayout &DL = DAG.getDataLayout(); 1382 MachineFunction &MF = DAG.getMachineFunction(); 1383 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1384 1385 const ArgDescriptor *InputPtrReg; 1386 const TargetRegisterClass *RC; 1387 1388 std::tie(InputPtrReg, RC) 1389 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 1390 1391 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1392 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 1393 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 1394 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT); 1395 1396 return DAG.getObjectPtrOffset(SL, BasePtr, Offset); 1397 } 1398 1399 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG, 1400 const SDLoc &SL) const { 1401 uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(), 1402 FIRST_IMPLICIT); 1403 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset); 1404 } 1405 1406 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, 1407 const SDLoc &SL, SDValue Val, 1408 bool Signed, 1409 const ISD::InputArg *Arg) const { 1410 // First, if it is a widened vector, narrow it. 1411 if (VT.isVector() && 1412 VT.getVectorNumElements() != MemVT.getVectorNumElements()) { 1413 EVT NarrowedVT = 1414 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 1415 VT.getVectorNumElements()); 1416 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val, 1417 DAG.getConstant(0, SL, MVT::i32)); 1418 } 1419 1420 // Then convert the vector elements or scalar value. 1421 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && 1422 VT.bitsLT(MemVT)) { 1423 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; 1424 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); 1425 } 1426 1427 if (MemVT.isFloatingPoint()) 1428 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT); 1429 else if (Signed) 1430 Val = DAG.getSExtOrTrunc(Val, SL, VT); 1431 else 1432 Val = DAG.getZExtOrTrunc(Val, SL, VT); 1433 1434 return Val; 1435 } 1436 1437 SDValue SITargetLowering::lowerKernargMemParameter( 1438 SelectionDAG &DAG, EVT VT, EVT MemVT, 1439 const SDLoc &SL, SDValue Chain, 1440 uint64_t Offset, unsigned Align, bool Signed, 1441 const ISD::InputArg *Arg) const { 1442 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 1443 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 1444 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 1445 1446 // Try to avoid using an extload by loading earlier than the argument address, 1447 // and extracting the relevant bits. The load should hopefully be merged with 1448 // the previous argument. 1449 if (MemVT.getStoreSize() < 4 && Align < 4) { 1450 // TODO: Handle align < 4 and size >= 4 (can happen with packed structs). 1451 int64_t AlignDownOffset = alignDown(Offset, 4); 1452 int64_t OffsetDiff = Offset - AlignDownOffset; 1453 1454 EVT IntVT = MemVT.changeTypeToInteger(); 1455 1456 // TODO: If we passed in the base kernel offset we could have a better 1457 // alignment than 4, but we don't really need it. 1458 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset); 1459 SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4, 1460 MachineMemOperand::MODereferenceable | 1461 MachineMemOperand::MOInvariant); 1462 1463 SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32); 1464 SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt); 1465 1466 SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract); 1467 ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal); 1468 ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg); 1469 1470 1471 return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL); 1472 } 1473 1474 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); 1475 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align, 1476 MachineMemOperand::MODereferenceable | 1477 MachineMemOperand::MOInvariant); 1478 1479 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); 1480 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); 1481 } 1482 1483 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, 1484 const SDLoc &SL, SDValue Chain, 1485 const ISD::InputArg &Arg) const { 1486 MachineFunction &MF = DAG.getMachineFunction(); 1487 MachineFrameInfo &MFI = MF.getFrameInfo(); 1488 1489 if (Arg.Flags.isByVal()) { 1490 unsigned Size = Arg.Flags.getByValSize(); 1491 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); 1492 return DAG.getFrameIndex(FrameIdx, MVT::i32); 1493 } 1494 1495 unsigned ArgOffset = VA.getLocMemOffset(); 1496 unsigned ArgSize = VA.getValVT().getStoreSize(); 1497 1498 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); 1499 1500 // Create load nodes to retrieve arguments from the stack. 1501 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1502 SDValue ArgValue; 1503 1504 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) 1505 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 1506 MVT MemVT = VA.getValVT(); 1507 1508 switch (VA.getLocInfo()) { 1509 default: 1510 break; 1511 case CCValAssign::BCvt: 1512 MemVT = VA.getLocVT(); 1513 break; 1514 case CCValAssign::SExt: 1515 ExtType = ISD::SEXTLOAD; 1516 break; 1517 case CCValAssign::ZExt: 1518 ExtType = ISD::ZEXTLOAD; 1519 break; 1520 case CCValAssign::AExt: 1521 ExtType = ISD::EXTLOAD; 1522 break; 1523 } 1524 1525 ArgValue = DAG.getExtLoad( 1526 ExtType, SL, VA.getLocVT(), Chain, FIN, 1527 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 1528 MemVT); 1529 return ArgValue; 1530 } 1531 1532 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG, 1533 const SIMachineFunctionInfo &MFI, 1534 EVT VT, 1535 AMDGPUFunctionArgInfo::PreloadedValue PVID) const { 1536 const ArgDescriptor *Reg; 1537 const TargetRegisterClass *RC; 1538 1539 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID); 1540 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT); 1541 } 1542 1543 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, 1544 CallingConv::ID CallConv, 1545 ArrayRef<ISD::InputArg> Ins, 1546 BitVector &Skipped, 1547 FunctionType *FType, 1548 SIMachineFunctionInfo *Info) { 1549 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { 1550 const ISD::InputArg *Arg = &Ins[I]; 1551 1552 assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && 1553 "vector type argument should have been split"); 1554 1555 // First check if it's a PS input addr. 1556 if (CallConv == CallingConv::AMDGPU_PS && 1557 !Arg->Flags.isInReg() && PSInputNum <= 15) { 1558 bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum); 1559 1560 // Inconveniently only the first part of the split is marked as isSplit, 1561 // so skip to the end. We only want to increment PSInputNum once for the 1562 // entire split argument. 1563 if (Arg->Flags.isSplit()) { 1564 while (!Arg->Flags.isSplitEnd()) { 1565 assert(!Arg->VT.isVector() && 1566 "unexpected vector split in ps argument type"); 1567 if (!SkipArg) 1568 Splits.push_back(*Arg); 1569 Arg = &Ins[++I]; 1570 } 1571 } 1572 1573 if (SkipArg) { 1574 // We can safely skip PS inputs. 1575 Skipped.set(Arg->getOrigArgIndex()); 1576 ++PSInputNum; 1577 continue; 1578 } 1579 1580 Info->markPSInputAllocated(PSInputNum); 1581 if (Arg->Used) 1582 Info->markPSInputEnabled(PSInputNum); 1583 1584 ++PSInputNum; 1585 } 1586 1587 Splits.push_back(*Arg); 1588 } 1589 } 1590 1591 // Allocate special inputs passed in VGPRs. 1592 static void allocateSpecialEntryInputVGPRs(CCState &CCInfo, 1593 MachineFunction &MF, 1594 const SIRegisterInfo &TRI, 1595 SIMachineFunctionInfo &Info) { 1596 if (Info.hasWorkItemIDX()) { 1597 unsigned Reg = AMDGPU::VGPR0; 1598 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1599 1600 CCInfo.AllocateReg(Reg); 1601 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg)); 1602 } 1603 1604 if (Info.hasWorkItemIDY()) { 1605 unsigned Reg = AMDGPU::VGPR1; 1606 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1607 1608 CCInfo.AllocateReg(Reg); 1609 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg)); 1610 } 1611 1612 if (Info.hasWorkItemIDZ()) { 1613 unsigned Reg = AMDGPU::VGPR2; 1614 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1615 1616 CCInfo.AllocateReg(Reg); 1617 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg)); 1618 } 1619 } 1620 1621 // Try to allocate a VGPR at the end of the argument list, or if no argument 1622 // VGPRs are left allocating a stack slot. 1623 // If \p Mask is is given it indicates bitfield position in the register. 1624 // If \p Arg is given use it with new ]p Mask instead of allocating new. 1625 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u, 1626 ArgDescriptor Arg = ArgDescriptor()) { 1627 if (Arg.isSet()) 1628 return ArgDescriptor::createArg(Arg, Mask); 1629 1630 ArrayRef<MCPhysReg> ArgVGPRs 1631 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32); 1632 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs); 1633 if (RegIdx == ArgVGPRs.size()) { 1634 // Spill to stack required. 1635 int64_t Offset = CCInfo.AllocateStack(4, 4); 1636 1637 return ArgDescriptor::createStack(Offset, Mask); 1638 } 1639 1640 unsigned Reg = ArgVGPRs[RegIdx]; 1641 Reg = CCInfo.AllocateReg(Reg); 1642 assert(Reg != AMDGPU::NoRegister); 1643 1644 MachineFunction &MF = CCInfo.getMachineFunction(); 1645 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1646 return ArgDescriptor::createRegister(Reg, Mask); 1647 } 1648 1649 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo, 1650 const TargetRegisterClass *RC, 1651 unsigned NumArgRegs) { 1652 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32); 1653 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs); 1654 if (RegIdx == ArgSGPRs.size()) 1655 report_fatal_error("ran out of SGPRs for arguments"); 1656 1657 unsigned Reg = ArgSGPRs[RegIdx]; 1658 Reg = CCInfo.AllocateReg(Reg); 1659 assert(Reg != AMDGPU::NoRegister); 1660 1661 MachineFunction &MF = CCInfo.getMachineFunction(); 1662 MF.addLiveIn(Reg, RC); 1663 return ArgDescriptor::createRegister(Reg); 1664 } 1665 1666 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) { 1667 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32); 1668 } 1669 1670 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) { 1671 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16); 1672 } 1673 1674 static void allocateSpecialInputVGPRs(CCState &CCInfo, 1675 MachineFunction &MF, 1676 const SIRegisterInfo &TRI, 1677 SIMachineFunctionInfo &Info) { 1678 const unsigned Mask = 0x3ff; 1679 ArgDescriptor Arg; 1680 1681 if (Info.hasWorkItemIDX()) { 1682 Arg = allocateVGPR32Input(CCInfo, Mask); 1683 Info.setWorkItemIDX(Arg); 1684 } 1685 1686 if (Info.hasWorkItemIDY()) { 1687 Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg); 1688 Info.setWorkItemIDY(Arg); 1689 } 1690 1691 if (Info.hasWorkItemIDZ()) 1692 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg)); 1693 } 1694 1695 static void allocateSpecialInputSGPRs(CCState &CCInfo, 1696 MachineFunction &MF, 1697 const SIRegisterInfo &TRI, 1698 SIMachineFunctionInfo &Info) { 1699 auto &ArgInfo = Info.getArgInfo(); 1700 1701 // TODO: Unify handling with private memory pointers. 1702 1703 if (Info.hasDispatchPtr()) 1704 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo); 1705 1706 if (Info.hasQueuePtr()) 1707 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo); 1708 1709 if (Info.hasKernargSegmentPtr()) 1710 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo); 1711 1712 if (Info.hasDispatchID()) 1713 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo); 1714 1715 // flat_scratch_init is not applicable for non-kernel functions. 1716 1717 if (Info.hasWorkGroupIDX()) 1718 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo); 1719 1720 if (Info.hasWorkGroupIDY()) 1721 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo); 1722 1723 if (Info.hasWorkGroupIDZ()) 1724 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo); 1725 1726 if (Info.hasImplicitArgPtr()) 1727 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo); 1728 } 1729 1730 // Allocate special inputs passed in user SGPRs. 1731 static void allocateHSAUserSGPRs(CCState &CCInfo, 1732 MachineFunction &MF, 1733 const SIRegisterInfo &TRI, 1734 SIMachineFunctionInfo &Info) { 1735 if (Info.hasImplicitBufferPtr()) { 1736 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI); 1737 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 1738 CCInfo.AllocateReg(ImplicitBufferPtrReg); 1739 } 1740 1741 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 1742 if (Info.hasPrivateSegmentBuffer()) { 1743 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 1744 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 1745 CCInfo.AllocateReg(PrivateSegmentBufferReg); 1746 } 1747 1748 if (Info.hasDispatchPtr()) { 1749 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI); 1750 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 1751 CCInfo.AllocateReg(DispatchPtrReg); 1752 } 1753 1754 if (Info.hasQueuePtr()) { 1755 unsigned QueuePtrReg = Info.addQueuePtr(TRI); 1756 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 1757 CCInfo.AllocateReg(QueuePtrReg); 1758 } 1759 1760 if (Info.hasKernargSegmentPtr()) { 1761 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI); 1762 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); 1763 CCInfo.AllocateReg(InputPtrReg); 1764 } 1765 1766 if (Info.hasDispatchID()) { 1767 unsigned DispatchIDReg = Info.addDispatchID(TRI); 1768 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 1769 CCInfo.AllocateReg(DispatchIDReg); 1770 } 1771 1772 if (Info.hasFlatScratchInit()) { 1773 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI); 1774 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 1775 CCInfo.AllocateReg(FlatScratchInitReg); 1776 } 1777 1778 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 1779 // these from the dispatch pointer. 1780 } 1781 1782 // Allocate special input registers that are initialized per-wave. 1783 static void allocateSystemSGPRs(CCState &CCInfo, 1784 MachineFunction &MF, 1785 SIMachineFunctionInfo &Info, 1786 CallingConv::ID CallConv, 1787 bool IsShader) { 1788 if (Info.hasWorkGroupIDX()) { 1789 unsigned Reg = Info.addWorkGroupIDX(); 1790 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1791 CCInfo.AllocateReg(Reg); 1792 } 1793 1794 if (Info.hasWorkGroupIDY()) { 1795 unsigned Reg = Info.addWorkGroupIDY(); 1796 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1797 CCInfo.AllocateReg(Reg); 1798 } 1799 1800 if (Info.hasWorkGroupIDZ()) { 1801 unsigned Reg = Info.addWorkGroupIDZ(); 1802 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1803 CCInfo.AllocateReg(Reg); 1804 } 1805 1806 if (Info.hasWorkGroupInfo()) { 1807 unsigned Reg = Info.addWorkGroupInfo(); 1808 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1809 CCInfo.AllocateReg(Reg); 1810 } 1811 1812 if (Info.hasPrivateSegmentWaveByteOffset()) { 1813 // Scratch wave offset passed in system SGPR. 1814 unsigned PrivateSegmentWaveByteOffsetReg; 1815 1816 if (IsShader) { 1817 PrivateSegmentWaveByteOffsetReg = 1818 Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); 1819 1820 // This is true if the scratch wave byte offset doesn't have a fixed 1821 // location. 1822 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { 1823 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 1824 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 1825 } 1826 } else 1827 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); 1828 1829 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 1830 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 1831 } 1832 } 1833 1834 static void reservePrivateMemoryRegs(const TargetMachine &TM, 1835 MachineFunction &MF, 1836 const SIRegisterInfo &TRI, 1837 SIMachineFunctionInfo &Info) { 1838 // Now that we've figured out where the scratch register inputs are, see if 1839 // should reserve the arguments and use them directly. 1840 MachineFrameInfo &MFI = MF.getFrameInfo(); 1841 bool HasStackObjects = MFI.hasStackObjects(); 1842 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1843 1844 // Record that we know we have non-spill stack objects so we don't need to 1845 // check all stack objects later. 1846 if (HasStackObjects) 1847 Info.setHasNonSpillStackObjects(true); 1848 1849 // Everything live out of a block is spilled with fast regalloc, so it's 1850 // almost certain that spilling will be required. 1851 if (TM.getOptLevel() == CodeGenOpt::None) 1852 HasStackObjects = true; 1853 1854 // For now assume stack access is needed in any callee functions, so we need 1855 // the scratch registers to pass in. 1856 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls(); 1857 1858 if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) { 1859 // If we have stack objects, we unquestionably need the private buffer 1860 // resource. For the Code Object V2 ABI, this will be the first 4 user 1861 // SGPR inputs. We can reserve those and use them directly. 1862 1863 unsigned PrivateSegmentBufferReg = 1864 Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 1865 Info.setScratchRSrcReg(PrivateSegmentBufferReg); 1866 } else { 1867 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); 1868 // We tentatively reserve the last registers (skipping the last registers 1869 // which may contain VCC, FLAT_SCR, and XNACK). After register allocation, 1870 // we'll replace these with the ones immediately after those which were 1871 // really allocated. In the prologue copies will be inserted from the 1872 // argument to these reserved registers. 1873 1874 // Without HSA, relocations are used for the scratch pointer and the 1875 // buffer resource setup is always inserted in the prologue. Scratch wave 1876 // offset is still in an input SGPR. 1877 Info.setScratchRSrcReg(ReservedBufferReg); 1878 } 1879 1880 // hasFP should be accurate for kernels even before the frame is finalized. 1881 if (ST.getFrameLowering()->hasFP(MF)) { 1882 MachineRegisterInfo &MRI = MF.getRegInfo(); 1883 1884 // Try to use s32 as the SP, but move it if it would interfere with input 1885 // arguments. This won't work with calls though. 1886 // 1887 // FIXME: Move SP to avoid any possible inputs, or find a way to spill input 1888 // registers. 1889 if (!MRI.isLiveIn(AMDGPU::SGPR32)) { 1890 Info.setStackPtrOffsetReg(AMDGPU::SGPR32); 1891 } else { 1892 assert(AMDGPU::isShader(MF.getFunction().getCallingConv())); 1893 1894 if (MFI.hasCalls()) 1895 report_fatal_error("call in graphics shader with too many input SGPRs"); 1896 1897 for (unsigned Reg : AMDGPU::SGPR_32RegClass) { 1898 if (!MRI.isLiveIn(Reg)) { 1899 Info.setStackPtrOffsetReg(Reg); 1900 break; 1901 } 1902 } 1903 1904 if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG) 1905 report_fatal_error("failed to find register for SP"); 1906 } 1907 1908 if (MFI.hasCalls()) { 1909 Info.setScratchWaveOffsetReg(AMDGPU::SGPR33); 1910 Info.setFrameOffsetReg(AMDGPU::SGPR33); 1911 } else { 1912 unsigned ReservedOffsetReg = 1913 TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1914 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1915 Info.setFrameOffsetReg(ReservedOffsetReg); 1916 } 1917 } else if (RequiresStackAccess) { 1918 assert(!MFI.hasCalls()); 1919 // We know there are accesses and they will be done relative to SP, so just 1920 // pin it to the input. 1921 // 1922 // FIXME: Should not do this if inline asm is reading/writing these 1923 // registers. 1924 unsigned PreloadedSP = Info.getPreloadedReg( 1925 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1926 1927 Info.setStackPtrOffsetReg(PreloadedSP); 1928 Info.setScratchWaveOffsetReg(PreloadedSP); 1929 Info.setFrameOffsetReg(PreloadedSP); 1930 } else { 1931 assert(!MFI.hasCalls()); 1932 1933 // There may not be stack access at all. There may still be spills, or 1934 // access of a constant pointer (in which cases an extra copy will be 1935 // emitted in the prolog). 1936 unsigned ReservedOffsetReg 1937 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1938 Info.setStackPtrOffsetReg(ReservedOffsetReg); 1939 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1940 Info.setFrameOffsetReg(ReservedOffsetReg); 1941 } 1942 } 1943 1944 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const { 1945 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1946 return !Info->isEntryFunction(); 1947 } 1948 1949 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 1950 1951 } 1952 1953 void SITargetLowering::insertCopiesSplitCSR( 1954 MachineBasicBlock *Entry, 1955 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 1956 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1957 1958 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 1959 if (!IStart) 1960 return; 1961 1962 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 1963 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 1964 MachineBasicBlock::iterator MBBI = Entry->begin(); 1965 for (const MCPhysReg *I = IStart; *I; ++I) { 1966 const TargetRegisterClass *RC = nullptr; 1967 if (AMDGPU::SReg_64RegClass.contains(*I)) 1968 RC = &AMDGPU::SGPR_64RegClass; 1969 else if (AMDGPU::SReg_32RegClass.contains(*I)) 1970 RC = &AMDGPU::SGPR_32RegClass; 1971 else 1972 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 1973 1974 unsigned NewVR = MRI->createVirtualRegister(RC); 1975 // Create copy from CSR to a virtual register. 1976 Entry->addLiveIn(*I); 1977 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 1978 .addReg(*I); 1979 1980 // Insert the copy-back instructions right before the terminator. 1981 for (auto *Exit : Exits) 1982 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 1983 TII->get(TargetOpcode::COPY), *I) 1984 .addReg(NewVR); 1985 } 1986 } 1987 1988 SDValue SITargetLowering::LowerFormalArguments( 1989 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1990 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1991 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1992 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1993 1994 MachineFunction &MF = DAG.getMachineFunction(); 1995 const Function &Fn = MF.getFunction(); 1996 FunctionType *FType = MF.getFunction().getFunctionType(); 1997 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1998 1999 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { 2000 DiagnosticInfoUnsupported NoGraphicsHSA( 2001 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 2002 DAG.getContext()->diagnose(NoGraphicsHSA); 2003 return DAG.getEntryNode(); 2004 } 2005 2006 SmallVector<ISD::InputArg, 16> Splits; 2007 SmallVector<CCValAssign, 16> ArgLocs; 2008 BitVector Skipped(Ins.size()); 2009 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2010 *DAG.getContext()); 2011 2012 bool IsShader = AMDGPU::isShader(CallConv); 2013 bool IsKernel = AMDGPU::isKernel(CallConv); 2014 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); 2015 2016 if (IsShader) { 2017 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); 2018 2019 // At least one interpolation mode must be enabled or else the GPU will 2020 // hang. 2021 // 2022 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 2023 // set PSInputAddr, the user wants to enable some bits after the compilation 2024 // based on run-time states. Since we can't know what the final PSInputEna 2025 // will look like, so we shouldn't do anything here and the user should take 2026 // responsibility for the correct programming. 2027 // 2028 // Otherwise, the following restrictions apply: 2029 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 2030 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 2031 // enabled too. 2032 if (CallConv == CallingConv::AMDGPU_PS) { 2033 if ((Info->getPSInputAddr() & 0x7F) == 0 || 2034 ((Info->getPSInputAddr() & 0xF) == 0 && 2035 Info->isPSInputAllocated(11))) { 2036 CCInfo.AllocateReg(AMDGPU::VGPR0); 2037 CCInfo.AllocateReg(AMDGPU::VGPR1); 2038 Info->markPSInputAllocated(0); 2039 Info->markPSInputEnabled(0); 2040 } 2041 if (Subtarget->isAmdPalOS()) { 2042 // For isAmdPalOS, the user does not enable some bits after compilation 2043 // based on run-time states; the register values being generated here are 2044 // the final ones set in hardware. Therefore we need to apply the 2045 // workaround to PSInputAddr and PSInputEnable together. (The case where 2046 // a bit is set in PSInputAddr but not PSInputEnable is where the 2047 // frontend set up an input arg for a particular interpolation mode, but 2048 // nothing uses that input arg. Really we should have an earlier pass 2049 // that removes such an arg.) 2050 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 2051 if ((PsInputBits & 0x7F) == 0 || 2052 ((PsInputBits & 0xF) == 0 && 2053 (PsInputBits >> 11 & 1))) 2054 Info->markPSInputEnabled( 2055 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 2056 } 2057 } 2058 2059 assert(!Info->hasDispatchPtr() && 2060 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && 2061 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 2062 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 2063 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 2064 !Info->hasWorkItemIDZ()); 2065 } else if (IsKernel) { 2066 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 2067 } else { 2068 Splits.append(Ins.begin(), Ins.end()); 2069 } 2070 2071 if (IsEntryFunc) { 2072 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 2073 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); 2074 } 2075 2076 if (IsKernel) { 2077 analyzeFormalArgumentsCompute(CCInfo, Ins); 2078 } else { 2079 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); 2080 CCInfo.AnalyzeFormalArguments(Splits, AssignFn); 2081 } 2082 2083 SmallVector<SDValue, 16> Chains; 2084 2085 // FIXME: This is the minimum kernel argument alignment. We should improve 2086 // this to the maximum alignment of the arguments. 2087 // 2088 // FIXME: Alignment of explicit arguments totally broken with non-0 explicit 2089 // kern arg offset. 2090 const unsigned KernelArgBaseAlign = 16; 2091 2092 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 2093 const ISD::InputArg &Arg = Ins[i]; 2094 if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) { 2095 InVals.push_back(DAG.getUNDEF(Arg.VT)); 2096 continue; 2097 } 2098 2099 CCValAssign &VA = ArgLocs[ArgIdx++]; 2100 MVT VT = VA.getLocVT(); 2101 2102 if (IsEntryFunc && VA.isMemLoc()) { 2103 VT = Ins[i].VT; 2104 EVT MemVT = VA.getLocVT(); 2105 2106 const uint64_t Offset = VA.getLocMemOffset(); 2107 unsigned Align = MinAlign(KernelArgBaseAlign, Offset); 2108 2109 SDValue Arg = lowerKernargMemParameter( 2110 DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]); 2111 Chains.push_back(Arg.getValue(1)); 2112 2113 auto *ParamTy = 2114 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 2115 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && 2116 ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 2117 ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) { 2118 // On SI local pointers are just offsets into LDS, so they are always 2119 // less than 16-bits. On CI and newer they could potentially be 2120 // real pointers, so we can't guarantee their size. 2121 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 2122 DAG.getValueType(MVT::i16)); 2123 } 2124 2125 InVals.push_back(Arg); 2126 continue; 2127 } else if (!IsEntryFunc && VA.isMemLoc()) { 2128 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); 2129 InVals.push_back(Val); 2130 if (!Arg.Flags.isByVal()) 2131 Chains.push_back(Val.getValue(1)); 2132 continue; 2133 } 2134 2135 assert(VA.isRegLoc() && "Parameter must be in a register!"); 2136 2137 unsigned Reg = VA.getLocReg(); 2138 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2139 EVT ValVT = VA.getValVT(); 2140 2141 Reg = MF.addLiveIn(Reg, RC); 2142 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 2143 2144 if (Arg.Flags.isSRet()) { 2145 // The return object should be reasonably addressable. 2146 2147 // FIXME: This helps when the return is a real sret. If it is a 2148 // automatically inserted sret (i.e. CanLowerReturn returns false), an 2149 // extra copy is inserted in SelectionDAGBuilder which obscures this. 2150 unsigned NumBits 2151 = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex(); 2152 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, 2153 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits))); 2154 } 2155 2156 // If this is an 8 or 16-bit value, it is really passed promoted 2157 // to 32 bits. Insert an assert[sz]ext to capture this, then 2158 // truncate to the right size. 2159 switch (VA.getLocInfo()) { 2160 case CCValAssign::Full: 2161 break; 2162 case CCValAssign::BCvt: 2163 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val); 2164 break; 2165 case CCValAssign::SExt: 2166 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val, 2167 DAG.getValueType(ValVT)); 2168 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 2169 break; 2170 case CCValAssign::ZExt: 2171 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, 2172 DAG.getValueType(ValVT)); 2173 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 2174 break; 2175 case CCValAssign::AExt: 2176 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 2177 break; 2178 default: 2179 llvm_unreachable("Unknown loc info!"); 2180 } 2181 2182 InVals.push_back(Val); 2183 } 2184 2185 if (!IsEntryFunc) { 2186 // Special inputs come after user arguments. 2187 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 2188 } 2189 2190 // Start adding system SGPRs. 2191 if (IsEntryFunc) { 2192 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader); 2193 } else { 2194 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 2195 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg()); 2196 CCInfo.AllocateReg(Info->getFrameOffsetReg()); 2197 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 2198 } 2199 2200 auto &ArgUsageInfo = 2201 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); 2202 ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo()); 2203 2204 unsigned StackArgSize = CCInfo.getNextStackOffset(); 2205 Info->setBytesInStackArgArea(StackArgSize); 2206 2207 return Chains.empty() ? Chain : 2208 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 2209 } 2210 2211 // TODO: If return values can't fit in registers, we should return as many as 2212 // possible in registers before passing on stack. 2213 bool SITargetLowering::CanLowerReturn( 2214 CallingConv::ID CallConv, 2215 MachineFunction &MF, bool IsVarArg, 2216 const SmallVectorImpl<ISD::OutputArg> &Outs, 2217 LLVMContext &Context) const { 2218 // Replacing returns with sret/stack usage doesn't make sense for shaders. 2219 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn 2220 // for shaders. Vector types should be explicitly handled by CC. 2221 if (AMDGPU::isEntryFunctionCC(CallConv)) 2222 return true; 2223 2224 SmallVector<CCValAssign, 16> RVLocs; 2225 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 2226 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); 2227 } 2228 2229 SDValue 2230 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 2231 bool isVarArg, 2232 const SmallVectorImpl<ISD::OutputArg> &Outs, 2233 const SmallVectorImpl<SDValue> &OutVals, 2234 const SDLoc &DL, SelectionDAG &DAG) const { 2235 MachineFunction &MF = DAG.getMachineFunction(); 2236 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2237 2238 if (AMDGPU::isKernel(CallConv)) { 2239 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 2240 OutVals, DL, DAG); 2241 } 2242 2243 bool IsShader = AMDGPU::isShader(CallConv); 2244 2245 Info->setIfReturnsVoid(Outs.empty()); 2246 bool IsWaveEnd = Info->returnsVoid() && IsShader; 2247 2248 // CCValAssign - represent the assignment of the return value to a location. 2249 SmallVector<CCValAssign, 48> RVLocs; 2250 SmallVector<ISD::OutputArg, 48> Splits; 2251 2252 // CCState - Info about the registers and stack slots. 2253 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2254 *DAG.getContext()); 2255 2256 // Analyze outgoing return values. 2257 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); 2258 2259 SDValue Flag; 2260 SmallVector<SDValue, 48> RetOps; 2261 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2262 2263 // Add return address for callable functions. 2264 if (!Info->isEntryFunction()) { 2265 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2266 SDValue ReturnAddrReg = CreateLiveInRegister( 2267 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); 2268 2269 SDValue ReturnAddrVirtualReg = DAG.getRegister( 2270 MF.getRegInfo().createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass), 2271 MVT::i64); 2272 Chain = 2273 DAG.getCopyToReg(Chain, DL, ReturnAddrVirtualReg, ReturnAddrReg, Flag); 2274 Flag = Chain.getValue(1); 2275 RetOps.push_back(ReturnAddrVirtualReg); 2276 } 2277 2278 // Copy the result values into the output registers. 2279 for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E; 2280 ++I, ++RealRVLocIdx) { 2281 CCValAssign &VA = RVLocs[I]; 2282 assert(VA.isRegLoc() && "Can only return in registers!"); 2283 // TODO: Partially return in registers if return values don't fit. 2284 SDValue Arg = OutVals[RealRVLocIdx]; 2285 2286 // Copied from other backends. 2287 switch (VA.getLocInfo()) { 2288 case CCValAssign::Full: 2289 break; 2290 case CCValAssign::BCvt: 2291 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 2292 break; 2293 case CCValAssign::SExt: 2294 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 2295 break; 2296 case CCValAssign::ZExt: 2297 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 2298 break; 2299 case CCValAssign::AExt: 2300 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 2301 break; 2302 default: 2303 llvm_unreachable("Unknown loc info!"); 2304 } 2305 2306 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 2307 Flag = Chain.getValue(1); 2308 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2309 } 2310 2311 // FIXME: Does sret work properly? 2312 if (!Info->isEntryFunction()) { 2313 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2314 const MCPhysReg *I = 2315 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 2316 if (I) { 2317 for (; *I; ++I) { 2318 if (AMDGPU::SReg_64RegClass.contains(*I)) 2319 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 2320 else if (AMDGPU::SReg_32RegClass.contains(*I)) 2321 RetOps.push_back(DAG.getRegister(*I, MVT::i32)); 2322 else 2323 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 2324 } 2325 } 2326 } 2327 2328 // Update chain and glue. 2329 RetOps[0] = Chain; 2330 if (Flag.getNode()) 2331 RetOps.push_back(Flag); 2332 2333 unsigned Opc = AMDGPUISD::ENDPGM; 2334 if (!IsWaveEnd) 2335 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; 2336 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 2337 } 2338 2339 SDValue SITargetLowering::LowerCallResult( 2340 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, 2341 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 2342 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn, 2343 SDValue ThisVal) const { 2344 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg); 2345 2346 // Assign locations to each value returned by this call. 2347 SmallVector<CCValAssign, 16> RVLocs; 2348 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 2349 *DAG.getContext()); 2350 CCInfo.AnalyzeCallResult(Ins, RetCC); 2351 2352 // Copy all of the result registers out of their specified physreg. 2353 for (unsigned i = 0; i != RVLocs.size(); ++i) { 2354 CCValAssign VA = RVLocs[i]; 2355 SDValue Val; 2356 2357 if (VA.isRegLoc()) { 2358 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); 2359 Chain = Val.getValue(1); 2360 InFlag = Val.getValue(2); 2361 } else if (VA.isMemLoc()) { 2362 report_fatal_error("TODO: return values in memory"); 2363 } else 2364 llvm_unreachable("unknown argument location type"); 2365 2366 switch (VA.getLocInfo()) { 2367 case CCValAssign::Full: 2368 break; 2369 case CCValAssign::BCvt: 2370 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 2371 break; 2372 case CCValAssign::ZExt: 2373 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val, 2374 DAG.getValueType(VA.getValVT())); 2375 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2376 break; 2377 case CCValAssign::SExt: 2378 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val, 2379 DAG.getValueType(VA.getValVT())); 2380 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2381 break; 2382 case CCValAssign::AExt: 2383 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2384 break; 2385 default: 2386 llvm_unreachable("Unknown loc info!"); 2387 } 2388 2389 InVals.push_back(Val); 2390 } 2391 2392 return Chain; 2393 } 2394 2395 // Add code to pass special inputs required depending on used features separate 2396 // from the explicit user arguments present in the IR. 2397 void SITargetLowering::passSpecialInputs( 2398 CallLoweringInfo &CLI, 2399 CCState &CCInfo, 2400 const SIMachineFunctionInfo &Info, 2401 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 2402 SmallVectorImpl<SDValue> &MemOpChains, 2403 SDValue Chain) const { 2404 // If we don't have a call site, this was a call inserted by 2405 // legalization. These can never use special inputs. 2406 if (!CLI.CS) 2407 return; 2408 2409 const Function *CalleeFunc = CLI.CS.getCalledFunction(); 2410 assert(CalleeFunc); 2411 2412 SelectionDAG &DAG = CLI.DAG; 2413 const SDLoc &DL = CLI.DL; 2414 2415 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2416 2417 auto &ArgUsageInfo = 2418 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); 2419 const AMDGPUFunctionArgInfo &CalleeArgInfo 2420 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc); 2421 2422 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo(); 2423 2424 // TODO: Unify with private memory register handling. This is complicated by 2425 // the fact that at least in kernels, the input argument is not necessarily 2426 // in the same location as the input. 2427 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { 2428 AMDGPUFunctionArgInfo::DISPATCH_PTR, 2429 AMDGPUFunctionArgInfo::QUEUE_PTR, 2430 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR, 2431 AMDGPUFunctionArgInfo::DISPATCH_ID, 2432 AMDGPUFunctionArgInfo::WORKGROUP_ID_X, 2433 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, 2434 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z, 2435 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR 2436 }; 2437 2438 for (auto InputID : InputRegs) { 2439 const ArgDescriptor *OutgoingArg; 2440 const TargetRegisterClass *ArgRC; 2441 2442 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID); 2443 if (!OutgoingArg) 2444 continue; 2445 2446 const ArgDescriptor *IncomingArg; 2447 const TargetRegisterClass *IncomingArgRC; 2448 std::tie(IncomingArg, IncomingArgRC) 2449 = CallerArgInfo.getPreloadedValue(InputID); 2450 assert(IncomingArgRC == ArgRC); 2451 2452 // All special arguments are ints for now. 2453 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32; 2454 SDValue InputReg; 2455 2456 if (IncomingArg) { 2457 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg); 2458 } else { 2459 // The implicit arg ptr is special because it doesn't have a corresponding 2460 // input for kernels, and is computed from the kernarg segment pointer. 2461 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 2462 InputReg = getImplicitArgPtr(DAG, DL); 2463 } 2464 2465 if (OutgoingArg->isRegister()) { 2466 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); 2467 } else { 2468 unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4); 2469 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, 2470 SpecialArgOffset); 2471 MemOpChains.push_back(ArgStore); 2472 } 2473 } 2474 2475 // Pack workitem IDs into a single register or pass it as is if already 2476 // packed. 2477 const ArgDescriptor *OutgoingArg; 2478 const TargetRegisterClass *ArgRC; 2479 2480 std::tie(OutgoingArg, ArgRC) = 2481 CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 2482 if (!OutgoingArg) 2483 std::tie(OutgoingArg, ArgRC) = 2484 CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 2485 if (!OutgoingArg) 2486 std::tie(OutgoingArg, ArgRC) = 2487 CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 2488 if (!OutgoingArg) 2489 return; 2490 2491 const ArgDescriptor *IncomingArgX 2492 = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X).first; 2493 const ArgDescriptor *IncomingArgY 2494 = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y).first; 2495 const ArgDescriptor *IncomingArgZ 2496 = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z).first; 2497 2498 SDValue InputReg; 2499 SDLoc SL; 2500 2501 // If incoming ids are not packed we need to pack them. 2502 if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo.WorkItemIDX) 2503 InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX); 2504 2505 if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo.WorkItemIDY) { 2506 SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY); 2507 Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y, 2508 DAG.getShiftAmountConstant(10, MVT::i32, SL)); 2509 InputReg = InputReg.getNode() ? 2510 DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y; 2511 } 2512 2513 if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo.WorkItemIDZ) { 2514 SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ); 2515 Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z, 2516 DAG.getShiftAmountConstant(20, MVT::i32, SL)); 2517 InputReg = InputReg.getNode() ? 2518 DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z; 2519 } 2520 2521 if (!InputReg.getNode()) { 2522 // Workitem ids are already packed, any of present incoming arguments 2523 // will carry all required fields. 2524 ArgDescriptor IncomingArg = ArgDescriptor::createArg( 2525 IncomingArgX ? *IncomingArgX : 2526 IncomingArgY ? *IncomingArgY : 2527 *IncomingArgZ, ~0u); 2528 InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg); 2529 } 2530 2531 if (OutgoingArg->isRegister()) { 2532 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); 2533 } else { 2534 unsigned SpecialArgOffset = CCInfo.AllocateStack(4, 4); 2535 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, 2536 SpecialArgOffset); 2537 MemOpChains.push_back(ArgStore); 2538 } 2539 } 2540 2541 static bool canGuaranteeTCO(CallingConv::ID CC) { 2542 return CC == CallingConv::Fast; 2543 } 2544 2545 /// Return true if we might ever do TCO for calls with this calling convention. 2546 static bool mayTailCallThisCC(CallingConv::ID CC) { 2547 switch (CC) { 2548 case CallingConv::C: 2549 return true; 2550 default: 2551 return canGuaranteeTCO(CC); 2552 } 2553 } 2554 2555 bool SITargetLowering::isEligibleForTailCallOptimization( 2556 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg, 2557 const SmallVectorImpl<ISD::OutputArg> &Outs, 2558 const SmallVectorImpl<SDValue> &OutVals, 2559 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { 2560 if (!mayTailCallThisCC(CalleeCC)) 2561 return false; 2562 2563 MachineFunction &MF = DAG.getMachineFunction(); 2564 const Function &CallerF = MF.getFunction(); 2565 CallingConv::ID CallerCC = CallerF.getCallingConv(); 2566 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2567 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2568 2569 // Kernels aren't callable, and don't have a live in return address so it 2570 // doesn't make sense to do a tail call with entry functions. 2571 if (!CallerPreserved) 2572 return false; 2573 2574 bool CCMatch = CallerCC == CalleeCC; 2575 2576 if (DAG.getTarget().Options.GuaranteedTailCallOpt) { 2577 if (canGuaranteeTCO(CalleeCC) && CCMatch) 2578 return true; 2579 return false; 2580 } 2581 2582 // TODO: Can we handle var args? 2583 if (IsVarArg) 2584 return false; 2585 2586 for (const Argument &Arg : CallerF.args()) { 2587 if (Arg.hasByValAttr()) 2588 return false; 2589 } 2590 2591 LLVMContext &Ctx = *DAG.getContext(); 2592 2593 // Check that the call results are passed in the same way. 2594 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins, 2595 CCAssignFnForCall(CalleeCC, IsVarArg), 2596 CCAssignFnForCall(CallerCC, IsVarArg))) 2597 return false; 2598 2599 // The callee has to preserve all registers the caller needs to preserve. 2600 if (!CCMatch) { 2601 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2602 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2603 return false; 2604 } 2605 2606 // Nothing more to check if the callee is taking no arguments. 2607 if (Outs.empty()) 2608 return true; 2609 2610 SmallVector<CCValAssign, 16> ArgLocs; 2611 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx); 2612 2613 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg)); 2614 2615 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 2616 // If the stack arguments for this call do not fit into our own save area then 2617 // the call cannot be made tail. 2618 // TODO: Is this really necessary? 2619 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) 2620 return false; 2621 2622 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2623 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals); 2624 } 2625 2626 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 2627 if (!CI->isTailCall()) 2628 return false; 2629 2630 const Function *ParentFn = CI->getParent()->getParent(); 2631 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv())) 2632 return false; 2633 2634 auto Attr = ParentFn->getFnAttribute("disable-tail-calls"); 2635 return (Attr.getValueAsString() != "true"); 2636 } 2637 2638 // The wave scratch offset register is used as the global base pointer. 2639 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI, 2640 SmallVectorImpl<SDValue> &InVals) const { 2641 SelectionDAG &DAG = CLI.DAG; 2642 const SDLoc &DL = CLI.DL; 2643 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 2644 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 2645 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 2646 SDValue Chain = CLI.Chain; 2647 SDValue Callee = CLI.Callee; 2648 bool &IsTailCall = CLI.IsTailCall; 2649 CallingConv::ID CallConv = CLI.CallConv; 2650 bool IsVarArg = CLI.IsVarArg; 2651 bool IsSibCall = false; 2652 bool IsThisReturn = false; 2653 MachineFunction &MF = DAG.getMachineFunction(); 2654 2655 if (IsVarArg) { 2656 return lowerUnhandledCall(CLI, InVals, 2657 "unsupported call to variadic function "); 2658 } 2659 2660 if (!CLI.CS.getInstruction()) 2661 report_fatal_error("unsupported libcall legalization"); 2662 2663 if (!CLI.CS.getCalledFunction()) { 2664 return lowerUnhandledCall(CLI, InVals, 2665 "unsupported indirect call to function "); 2666 } 2667 2668 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) { 2669 return lowerUnhandledCall(CLI, InVals, 2670 "unsupported required tail call to function "); 2671 } 2672 2673 if (AMDGPU::isShader(MF.getFunction().getCallingConv())) { 2674 // Note the issue is with the CC of the calling function, not of the call 2675 // itself. 2676 return lowerUnhandledCall(CLI, InVals, 2677 "unsupported call from graphics shader of function "); 2678 } 2679 2680 if (IsTailCall) { 2681 IsTailCall = isEligibleForTailCallOptimization( 2682 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); 2683 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) { 2684 report_fatal_error("failed to perform tail call elimination on a call " 2685 "site marked musttail"); 2686 } 2687 2688 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 2689 2690 // A sibling call is one where we're under the usual C ABI and not planning 2691 // to change that but can still do a tail call: 2692 if (!TailCallOpt && IsTailCall) 2693 IsSibCall = true; 2694 2695 if (IsTailCall) 2696 ++NumTailCalls; 2697 } 2698 2699 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2700 2701 // Analyze operands of the call, assigning locations to each operand. 2702 SmallVector<CCValAssign, 16> ArgLocs; 2703 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 2704 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg); 2705 2706 CCInfo.AnalyzeCallOperands(Outs, AssignFn); 2707 2708 // Get a count of how many bytes are to be pushed on the stack. 2709 unsigned NumBytes = CCInfo.getNextStackOffset(); 2710 2711 if (IsSibCall) { 2712 // Since we're not changing the ABI to make this a tail call, the memory 2713 // operands are already available in the caller's incoming argument space. 2714 NumBytes = 0; 2715 } 2716 2717 // FPDiff is the byte offset of the call's argument area from the callee's. 2718 // Stores to callee stack arguments will be placed in FixedStackSlots offset 2719 // by this amount for a tail call. In a sibling call it must be 0 because the 2720 // caller will deallocate the entire stack and the callee still expects its 2721 // arguments to begin at SP+0. Completely unused for non-tail calls. 2722 int32_t FPDiff = 0; 2723 MachineFrameInfo &MFI = MF.getFrameInfo(); 2724 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2725 2726 // Adjust the stack pointer for the new arguments... 2727 // These operations are automatically eliminated by the prolog/epilog pass 2728 if (!IsSibCall) { 2729 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); 2730 2731 SmallVector<SDValue, 4> CopyFromChains; 2732 2733 // In the HSA case, this should be an identity copy. 2734 SDValue ScratchRSrcReg 2735 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32); 2736 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); 2737 CopyFromChains.push_back(ScratchRSrcReg.getValue(1)); 2738 Chain = DAG.getTokenFactor(DL, CopyFromChains); 2739 } 2740 2741 SmallVector<SDValue, 8> MemOpChains; 2742 MVT PtrVT = MVT::i32; 2743 2744 // Walk the register/memloc assignments, inserting copies/loads. 2745 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e; 2746 ++i, ++realArgIdx) { 2747 CCValAssign &VA = ArgLocs[i]; 2748 SDValue Arg = OutVals[realArgIdx]; 2749 2750 // Promote the value if needed. 2751 switch (VA.getLocInfo()) { 2752 case CCValAssign::Full: 2753 break; 2754 case CCValAssign::BCvt: 2755 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 2756 break; 2757 case CCValAssign::ZExt: 2758 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 2759 break; 2760 case CCValAssign::SExt: 2761 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 2762 break; 2763 case CCValAssign::AExt: 2764 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 2765 break; 2766 case CCValAssign::FPExt: 2767 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); 2768 break; 2769 default: 2770 llvm_unreachable("Unknown loc info!"); 2771 } 2772 2773 if (VA.isRegLoc()) { 2774 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2775 } else { 2776 assert(VA.isMemLoc()); 2777 2778 SDValue DstAddr; 2779 MachinePointerInfo DstInfo; 2780 2781 unsigned LocMemOffset = VA.getLocMemOffset(); 2782 int32_t Offset = LocMemOffset; 2783 2784 SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT); 2785 unsigned Align = 0; 2786 2787 if (IsTailCall) { 2788 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2789 unsigned OpSize = Flags.isByVal() ? 2790 Flags.getByValSize() : VA.getValVT().getStoreSize(); 2791 2792 // FIXME: We can have better than the minimum byval required alignment. 2793 Align = Flags.isByVal() ? Flags.getByValAlign() : 2794 MinAlign(Subtarget->getStackAlignment(), Offset); 2795 2796 Offset = Offset + FPDiff; 2797 int FI = MFI.CreateFixedObject(OpSize, Offset, true); 2798 2799 DstAddr = DAG.getFrameIndex(FI, PtrVT); 2800 DstInfo = MachinePointerInfo::getFixedStack(MF, FI); 2801 2802 // Make sure any stack arguments overlapping with where we're storing 2803 // are loaded before this eventual operation. Otherwise they'll be 2804 // clobbered. 2805 2806 // FIXME: Why is this really necessary? This seems to just result in a 2807 // lot of code to copy the stack and write them back to the same 2808 // locations, which are supposed to be immutable? 2809 Chain = addTokenForArgument(Chain, DAG, MFI, FI); 2810 } else { 2811 DstAddr = PtrOff; 2812 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset); 2813 Align = MinAlign(Subtarget->getStackAlignment(), LocMemOffset); 2814 } 2815 2816 if (Outs[i].Flags.isByVal()) { 2817 SDValue SizeNode = 2818 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32); 2819 SDValue Cpy = DAG.getMemcpy( 2820 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(), 2821 /*isVol = */ false, /*AlwaysInline = */ true, 2822 /*isTailCall = */ false, DstInfo, 2823 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy( 2824 *DAG.getContext(), AMDGPUAS::PRIVATE_ADDRESS)))); 2825 2826 MemOpChains.push_back(Cpy); 2827 } else { 2828 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Align); 2829 MemOpChains.push_back(Store); 2830 } 2831 } 2832 } 2833 2834 // Copy special input registers after user input arguments. 2835 passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain); 2836 2837 if (!MemOpChains.empty()) 2838 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 2839 2840 // Build a sequence of copy-to-reg nodes chained together with token chain 2841 // and flag operands which copy the outgoing args into the appropriate regs. 2842 SDValue InFlag; 2843 for (auto &RegToPass : RegsToPass) { 2844 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, 2845 RegToPass.second, InFlag); 2846 InFlag = Chain.getValue(1); 2847 } 2848 2849 2850 SDValue PhysReturnAddrReg; 2851 if (IsTailCall) { 2852 // Since the return is being combined with the call, we need to pass on the 2853 // return address. 2854 2855 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2856 SDValue ReturnAddrReg = CreateLiveInRegister( 2857 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); 2858 2859 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), 2860 MVT::i64); 2861 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag); 2862 InFlag = Chain.getValue(1); 2863 } 2864 2865 // We don't usually want to end the call-sequence here because we would tidy 2866 // the frame up *after* the call, however in the ABI-changing tail-call case 2867 // we've carefully laid out the parameters so that when sp is reset they'll be 2868 // in the correct location. 2869 if (IsTailCall && !IsSibCall) { 2870 Chain = DAG.getCALLSEQ_END(Chain, 2871 DAG.getTargetConstant(NumBytes, DL, MVT::i32), 2872 DAG.getTargetConstant(0, DL, MVT::i32), 2873 InFlag, DL); 2874 InFlag = Chain.getValue(1); 2875 } 2876 2877 std::vector<SDValue> Ops; 2878 Ops.push_back(Chain); 2879 Ops.push_back(Callee); 2880 // Add a redundant copy of the callee global which will not be legalized, as 2881 // we need direct access to the callee later. 2882 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee); 2883 const GlobalValue *GV = GSD->getGlobal(); 2884 Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64)); 2885 2886 if (IsTailCall) { 2887 // Each tail call may have to adjust the stack by a different amount, so 2888 // this information must travel along with the operation for eventual 2889 // consumption by emitEpilogue. 2890 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); 2891 2892 Ops.push_back(PhysReturnAddrReg); 2893 } 2894 2895 // Add argument registers to the end of the list so that they are known live 2896 // into the call. 2897 for (auto &RegToPass : RegsToPass) { 2898 Ops.push_back(DAG.getRegister(RegToPass.first, 2899 RegToPass.second.getValueType())); 2900 } 2901 2902 // Add a register mask operand representing the call-preserved registers. 2903 2904 auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); 2905 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 2906 assert(Mask && "Missing call preserved mask for calling convention"); 2907 Ops.push_back(DAG.getRegisterMask(Mask)); 2908 2909 if (InFlag.getNode()) 2910 Ops.push_back(InFlag); 2911 2912 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2913 2914 // If we're doing a tall call, use a TC_RETURN here rather than an 2915 // actual call instruction. 2916 if (IsTailCall) { 2917 MFI.setHasTailCall(); 2918 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops); 2919 } 2920 2921 // Returns a chain and a flag for retval copy to use. 2922 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops); 2923 Chain = Call.getValue(0); 2924 InFlag = Call.getValue(1); 2925 2926 uint64_t CalleePopBytes = NumBytes; 2927 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32), 2928 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32), 2929 InFlag, DL); 2930 if (!Ins.empty()) 2931 InFlag = Chain.getValue(1); 2932 2933 // Handle result values, copying them out of physregs into vregs that we 2934 // return. 2935 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, 2936 InVals, IsThisReturn, 2937 IsThisReturn ? OutVals[0] : SDValue()); 2938 } 2939 2940 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, 2941 SelectionDAG &DAG) const { 2942 unsigned Reg = StringSwitch<unsigned>(RegName) 2943 .Case("m0", AMDGPU::M0) 2944 .Case("exec", AMDGPU::EXEC) 2945 .Case("exec_lo", AMDGPU::EXEC_LO) 2946 .Case("exec_hi", AMDGPU::EXEC_HI) 2947 .Case("flat_scratch", AMDGPU::FLAT_SCR) 2948 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 2949 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 2950 .Default(AMDGPU::NoRegister); 2951 2952 if (Reg == AMDGPU::NoRegister) { 2953 report_fatal_error(Twine("invalid register name \"" 2954 + StringRef(RegName) + "\".")); 2955 2956 } 2957 2958 if (!Subtarget->hasFlatScrRegister() && 2959 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 2960 report_fatal_error(Twine("invalid register \"" 2961 + StringRef(RegName) + "\" for subtarget.")); 2962 } 2963 2964 switch (Reg) { 2965 case AMDGPU::M0: 2966 case AMDGPU::EXEC_LO: 2967 case AMDGPU::EXEC_HI: 2968 case AMDGPU::FLAT_SCR_LO: 2969 case AMDGPU::FLAT_SCR_HI: 2970 if (VT.getSizeInBits() == 32) 2971 return Reg; 2972 break; 2973 case AMDGPU::EXEC: 2974 case AMDGPU::FLAT_SCR: 2975 if (VT.getSizeInBits() == 64) 2976 return Reg; 2977 break; 2978 default: 2979 llvm_unreachable("missing register type checking"); 2980 } 2981 2982 report_fatal_error(Twine("invalid type for register \"" 2983 + StringRef(RegName) + "\".")); 2984 } 2985 2986 // If kill is not the last instruction, split the block so kill is always a 2987 // proper terminator. 2988 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, 2989 MachineBasicBlock *BB) const { 2990 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 2991 2992 MachineBasicBlock::iterator SplitPoint(&MI); 2993 ++SplitPoint; 2994 2995 if (SplitPoint == BB->end()) { 2996 // Don't bother with a new block. 2997 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); 2998 return BB; 2999 } 3000 3001 MachineFunction *MF = BB->getParent(); 3002 MachineBasicBlock *SplitBB 3003 = MF->CreateMachineBasicBlock(BB->getBasicBlock()); 3004 3005 MF->insert(++MachineFunction::iterator(BB), SplitBB); 3006 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); 3007 3008 SplitBB->transferSuccessorsAndUpdatePHIs(BB); 3009 BB->addSuccessor(SplitBB); 3010 3011 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); 3012 return SplitBB; 3013 } 3014 3015 // Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true, 3016 // \p MI will be the only instruction in the loop body block. Otherwise, it will 3017 // be the first instruction in the remainder block. 3018 // 3019 /// \returns { LoopBody, Remainder } 3020 static std::pair<MachineBasicBlock *, MachineBasicBlock *> 3021 splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) { 3022 MachineFunction *MF = MBB.getParent(); 3023 MachineBasicBlock::iterator I(&MI); 3024 3025 // To insert the loop we need to split the block. Move everything after this 3026 // point to a new block, and insert a new empty block between the two. 3027 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); 3028 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); 3029 MachineFunction::iterator MBBI(MBB); 3030 ++MBBI; 3031 3032 MF->insert(MBBI, LoopBB); 3033 MF->insert(MBBI, RemainderBB); 3034 3035 LoopBB->addSuccessor(LoopBB); 3036 LoopBB->addSuccessor(RemainderBB); 3037 3038 // Move the rest of the block into a new block. 3039 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 3040 3041 if (InstInLoop) { 3042 auto Next = std::next(I); 3043 3044 // Move instruction to loop body. 3045 LoopBB->splice(LoopBB->begin(), &MBB, I, Next); 3046 3047 // Move the rest of the block. 3048 RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end()); 3049 } else { 3050 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 3051 } 3052 3053 MBB.addSuccessor(LoopBB); 3054 3055 return std::make_pair(LoopBB, RemainderBB); 3056 } 3057 3058 MachineBasicBlock * 3059 SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI, 3060 MachineBasicBlock *BB) const { 3061 const DebugLoc &DL = MI.getDebugLoc(); 3062 3063 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3064 3065 MachineBasicBlock *LoopBB; 3066 MachineBasicBlock *RemainderBB; 3067 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3068 3069 MachineBasicBlock::iterator Prev = std::prev(MI.getIterator()); 3070 3071 std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true); 3072 3073 MachineBasicBlock::iterator I = LoopBB->end(); 3074 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0); 3075 3076 const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg( 3077 AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1); 3078 3079 // Clear TRAP_STS.MEM_VIOL 3080 BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32)) 3081 .addImm(0) 3082 .addImm(EncodedReg); 3083 3084 // This is a pain, but we're not allowed to have physical register live-ins 3085 // yet. Insert a pair of copies if the VGPR0 hack is necessary. 3086 if (Src && TargetRegisterInfo::isPhysicalRegister(Src->getReg())) { 3087 unsigned Data0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3088 BuildMI(*BB, std::next(Prev), DL, TII->get(AMDGPU::COPY), Data0) 3089 .add(*Src); 3090 3091 BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::COPY), Src->getReg()) 3092 .addReg(Data0); 3093 3094 MRI.setSimpleHint(Data0, Src->getReg()); 3095 } 3096 3097 BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_WAITCNT)) 3098 .addImm(0); 3099 3100 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3101 3102 // Load and check TRAP_STS.MEM_VIOL 3103 BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg) 3104 .addImm(EncodedReg); 3105 3106 // FIXME: Do we need to use an isel pseudo that may clobber scc? 3107 BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32)) 3108 .addReg(Reg, RegState::Kill) 3109 .addImm(0); 3110 BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) 3111 .addMBB(LoopBB); 3112 3113 return RemainderBB; 3114 } 3115 3116 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the 3117 // wavefront. If the value is uniform and just happens to be in a VGPR, this 3118 // will only do one iteration. In the worst case, this will loop 64 times. 3119 // 3120 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. 3121 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( 3122 const SIInstrInfo *TII, 3123 MachineRegisterInfo &MRI, 3124 MachineBasicBlock &OrigBB, 3125 MachineBasicBlock &LoopBB, 3126 const DebugLoc &DL, 3127 const MachineOperand &IdxReg, 3128 unsigned InitReg, 3129 unsigned ResultReg, 3130 unsigned PhiReg, 3131 unsigned InitSaveExecReg, 3132 int Offset, 3133 bool UseGPRIdxMode, 3134 bool IsIndirectSrc) { 3135 MachineFunction *MF = OrigBB.getParent(); 3136 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3137 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 3138 MachineBasicBlock::iterator I = LoopBB.begin(); 3139 3140 const TargetRegisterClass *BoolRC = TRI->getBoolRC(); 3141 unsigned PhiExec = MRI.createVirtualRegister(BoolRC); 3142 unsigned NewExec = MRI.createVirtualRegister(BoolRC); 3143 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3144 unsigned CondReg = MRI.createVirtualRegister(BoolRC); 3145 3146 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) 3147 .addReg(InitReg) 3148 .addMBB(&OrigBB) 3149 .addReg(ResultReg) 3150 .addMBB(&LoopBB); 3151 3152 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) 3153 .addReg(InitSaveExecReg) 3154 .addMBB(&OrigBB) 3155 .addReg(NewExec) 3156 .addMBB(&LoopBB); 3157 3158 // Read the next variant <- also loop target. 3159 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) 3160 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); 3161 3162 // Compare the just read M0 value to all possible Idx values. 3163 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) 3164 .addReg(CurrentIdxReg) 3165 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); 3166 3167 // Update EXEC, save the original EXEC value to VCC. 3168 BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 3169 : AMDGPU::S_AND_SAVEEXEC_B64), 3170 NewExec) 3171 .addReg(CondReg, RegState::Kill); 3172 3173 MRI.setSimpleHint(NewExec, CondReg); 3174 3175 if (UseGPRIdxMode) { 3176 unsigned IdxReg; 3177 if (Offset == 0) { 3178 IdxReg = CurrentIdxReg; 3179 } else { 3180 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3181 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) 3182 .addReg(CurrentIdxReg, RegState::Kill) 3183 .addImm(Offset); 3184 } 3185 unsigned IdxMode = IsIndirectSrc ? 3186 AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE; 3187 MachineInstr *SetOn = 3188 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 3189 .addReg(IdxReg, RegState::Kill) 3190 .addImm(IdxMode); 3191 SetOn->getOperand(3).setIsUndef(); 3192 } else { 3193 // Move index from VCC into M0 3194 if (Offset == 0) { 3195 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 3196 .addReg(CurrentIdxReg, RegState::Kill); 3197 } else { 3198 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 3199 .addReg(CurrentIdxReg, RegState::Kill) 3200 .addImm(Offset); 3201 } 3202 } 3203 3204 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 3205 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 3206 MachineInstr *InsertPt = 3207 BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term 3208 : AMDGPU::S_XOR_B64_term), Exec) 3209 .addReg(Exec) 3210 .addReg(NewExec); 3211 3212 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use 3213 // s_cbranch_scc0? 3214 3215 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. 3216 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 3217 .addMBB(&LoopBB); 3218 3219 return InsertPt->getIterator(); 3220 } 3221 3222 // This has slightly sub-optimal regalloc when the source vector is killed by 3223 // the read. The register allocator does not understand that the kill is 3224 // per-workitem, so is kept alive for the whole loop so we end up not re-using a 3225 // subregister from it, using 1 more VGPR than necessary. This was saved when 3226 // this was expanded after register allocation. 3227 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, 3228 MachineBasicBlock &MBB, 3229 MachineInstr &MI, 3230 unsigned InitResultReg, 3231 unsigned PhiReg, 3232 int Offset, 3233 bool UseGPRIdxMode, 3234 bool IsIndirectSrc) { 3235 MachineFunction *MF = MBB.getParent(); 3236 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3237 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 3238 MachineRegisterInfo &MRI = MF->getRegInfo(); 3239 const DebugLoc &DL = MI.getDebugLoc(); 3240 MachineBasicBlock::iterator I(&MI); 3241 3242 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 3243 unsigned DstReg = MI.getOperand(0).getReg(); 3244 unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC); 3245 unsigned TmpExec = MRI.createVirtualRegister(BoolXExecRC); 3246 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 3247 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 3248 3249 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); 3250 3251 // Save the EXEC mask 3252 BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec) 3253 .addReg(Exec); 3254 3255 MachineBasicBlock *LoopBB; 3256 MachineBasicBlock *RemainderBB; 3257 std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false); 3258 3259 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3260 3261 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, 3262 InitResultReg, DstReg, PhiReg, TmpExec, 3263 Offset, UseGPRIdxMode, IsIndirectSrc); 3264 3265 MachineBasicBlock::iterator First = RemainderBB->begin(); 3266 BuildMI(*RemainderBB, First, DL, TII->get(MovExecOpc), Exec) 3267 .addReg(SaveExec); 3268 3269 return InsPt; 3270 } 3271 3272 // Returns subreg index, offset 3273 static std::pair<unsigned, int> 3274 computeIndirectRegAndOffset(const SIRegisterInfo &TRI, 3275 const TargetRegisterClass *SuperRC, 3276 unsigned VecReg, 3277 int Offset) { 3278 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; 3279 3280 // Skip out of bounds offsets, or else we would end up using an undefined 3281 // register. 3282 if (Offset >= NumElts || Offset < 0) 3283 return std::make_pair(AMDGPU::sub0, Offset); 3284 3285 return std::make_pair(AMDGPU::sub0 + Offset, 0); 3286 } 3287 3288 // Return true if the index is an SGPR and was set. 3289 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, 3290 MachineRegisterInfo &MRI, 3291 MachineInstr &MI, 3292 int Offset, 3293 bool UseGPRIdxMode, 3294 bool IsIndirectSrc) { 3295 MachineBasicBlock *MBB = MI.getParent(); 3296 const DebugLoc &DL = MI.getDebugLoc(); 3297 MachineBasicBlock::iterator I(&MI); 3298 3299 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3300 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 3301 3302 assert(Idx->getReg() != AMDGPU::NoRegister); 3303 3304 if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) 3305 return false; 3306 3307 if (UseGPRIdxMode) { 3308 unsigned IdxMode = IsIndirectSrc ? 3309 AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE; 3310 if (Offset == 0) { 3311 MachineInstr *SetOn = 3312 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 3313 .add(*Idx) 3314 .addImm(IdxMode); 3315 3316 SetOn->getOperand(3).setIsUndef(); 3317 } else { 3318 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3319 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) 3320 .add(*Idx) 3321 .addImm(Offset); 3322 MachineInstr *SetOn = 3323 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 3324 .addReg(Tmp, RegState::Kill) 3325 .addImm(IdxMode); 3326 3327 SetOn->getOperand(3).setIsUndef(); 3328 } 3329 3330 return true; 3331 } 3332 3333 if (Offset == 0) { 3334 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 3335 .add(*Idx); 3336 } else { 3337 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 3338 .add(*Idx) 3339 .addImm(Offset); 3340 } 3341 3342 return true; 3343 } 3344 3345 // Control flow needs to be inserted if indexing with a VGPR. 3346 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, 3347 MachineBasicBlock &MBB, 3348 const GCNSubtarget &ST) { 3349 const SIInstrInfo *TII = ST.getInstrInfo(); 3350 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 3351 MachineFunction *MF = MBB.getParent(); 3352 MachineRegisterInfo &MRI = MF->getRegInfo(); 3353 3354 unsigned Dst = MI.getOperand(0).getReg(); 3355 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); 3356 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 3357 3358 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); 3359 3360 unsigned SubReg; 3361 std::tie(SubReg, Offset) 3362 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); 3363 3364 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 3365 3366 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { 3367 MachineBasicBlock::iterator I(&MI); 3368 const DebugLoc &DL = MI.getDebugLoc(); 3369 3370 if (UseGPRIdxMode) { 3371 // TODO: Look at the uses to avoid the copy. This may require rescheduling 3372 // to avoid interfering with other uses, so probably requires a new 3373 // optimization pass. 3374 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 3375 .addReg(SrcReg, RegState::Undef, SubReg) 3376 .addReg(SrcReg, RegState::Implicit) 3377 .addReg(AMDGPU::M0, RegState::Implicit); 3378 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3379 } else { 3380 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 3381 .addReg(SrcReg, RegState::Undef, SubReg) 3382 .addReg(SrcReg, RegState::Implicit); 3383 } 3384 3385 MI.eraseFromParent(); 3386 3387 return &MBB; 3388 } 3389 3390 const DebugLoc &DL = MI.getDebugLoc(); 3391 MachineBasicBlock::iterator I(&MI); 3392 3393 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3394 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3395 3396 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); 3397 3398 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, 3399 Offset, UseGPRIdxMode, true); 3400 MachineBasicBlock *LoopBB = InsPt->getParent(); 3401 3402 if (UseGPRIdxMode) { 3403 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 3404 .addReg(SrcReg, RegState::Undef, SubReg) 3405 .addReg(SrcReg, RegState::Implicit) 3406 .addReg(AMDGPU::M0, RegState::Implicit); 3407 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3408 } else { 3409 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 3410 .addReg(SrcReg, RegState::Undef, SubReg) 3411 .addReg(SrcReg, RegState::Implicit); 3412 } 3413 3414 MI.eraseFromParent(); 3415 3416 return LoopBB; 3417 } 3418 3419 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI, 3420 const TargetRegisterClass *VecRC) { 3421 switch (TRI.getRegSizeInBits(*VecRC)) { 3422 case 32: // 4 bytes 3423 return AMDGPU::V_MOVRELD_B32_V1; 3424 case 64: // 8 bytes 3425 return AMDGPU::V_MOVRELD_B32_V2; 3426 case 128: // 16 bytes 3427 return AMDGPU::V_MOVRELD_B32_V4; 3428 case 256: // 32 bytes 3429 return AMDGPU::V_MOVRELD_B32_V8; 3430 case 512: // 64 bytes 3431 return AMDGPU::V_MOVRELD_B32_V16; 3432 default: 3433 llvm_unreachable("unsupported size for MOVRELD pseudos"); 3434 } 3435 } 3436 3437 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, 3438 MachineBasicBlock &MBB, 3439 const GCNSubtarget &ST) { 3440 const SIInstrInfo *TII = ST.getInstrInfo(); 3441 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 3442 MachineFunction *MF = MBB.getParent(); 3443 MachineRegisterInfo &MRI = MF->getRegInfo(); 3444 3445 unsigned Dst = MI.getOperand(0).getReg(); 3446 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); 3447 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3448 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); 3449 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 3450 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); 3451 3452 // This can be an immediate, but will be folded later. 3453 assert(Val->getReg()); 3454 3455 unsigned SubReg; 3456 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, 3457 SrcVec->getReg(), 3458 Offset); 3459 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 3460 3461 if (Idx->getReg() == AMDGPU::NoRegister) { 3462 MachineBasicBlock::iterator I(&MI); 3463 const DebugLoc &DL = MI.getDebugLoc(); 3464 3465 assert(Offset == 0); 3466 3467 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) 3468 .add(*SrcVec) 3469 .add(*Val) 3470 .addImm(SubReg); 3471 3472 MI.eraseFromParent(); 3473 return &MBB; 3474 } 3475 3476 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { 3477 MachineBasicBlock::iterator I(&MI); 3478 const DebugLoc &DL = MI.getDebugLoc(); 3479 3480 if (UseGPRIdxMode) { 3481 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 3482 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 3483 .add(*Val) 3484 .addReg(Dst, RegState::ImplicitDefine) 3485 .addReg(SrcVec->getReg(), RegState::Implicit) 3486 .addReg(AMDGPU::M0, RegState::Implicit); 3487 3488 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3489 } else { 3490 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 3491 3492 BuildMI(MBB, I, DL, MovRelDesc) 3493 .addReg(Dst, RegState::Define) 3494 .addReg(SrcVec->getReg()) 3495 .add(*Val) 3496 .addImm(SubReg - AMDGPU::sub0); 3497 } 3498 3499 MI.eraseFromParent(); 3500 return &MBB; 3501 } 3502 3503 if (Val->isReg()) 3504 MRI.clearKillFlags(Val->getReg()); 3505 3506 const DebugLoc &DL = MI.getDebugLoc(); 3507 3508 unsigned PhiReg = MRI.createVirtualRegister(VecRC); 3509 3510 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, 3511 Offset, UseGPRIdxMode, false); 3512 MachineBasicBlock *LoopBB = InsPt->getParent(); 3513 3514 if (UseGPRIdxMode) { 3515 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 3516 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 3517 .add(*Val) // src0 3518 .addReg(Dst, RegState::ImplicitDefine) 3519 .addReg(PhiReg, RegState::Implicit) 3520 .addReg(AMDGPU::M0, RegState::Implicit); 3521 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3522 } else { 3523 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 3524 3525 BuildMI(*LoopBB, InsPt, DL, MovRelDesc) 3526 .addReg(Dst, RegState::Define) 3527 .addReg(PhiReg) 3528 .add(*Val) 3529 .addImm(SubReg - AMDGPU::sub0); 3530 } 3531 3532 MI.eraseFromParent(); 3533 3534 return LoopBB; 3535 } 3536 3537 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 3538 MachineInstr &MI, MachineBasicBlock *BB) const { 3539 3540 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3541 MachineFunction *MF = BB->getParent(); 3542 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 3543 3544 if (TII->isMIMG(MI)) { 3545 if (MI.memoperands_empty() && MI.mayLoadOrStore()) { 3546 report_fatal_error("missing mem operand from MIMG instruction"); 3547 } 3548 // Add a memoperand for mimg instructions so that they aren't assumed to 3549 // be ordered memory instuctions. 3550 3551 return BB; 3552 } 3553 3554 switch (MI.getOpcode()) { 3555 case AMDGPU::S_ADD_U64_PSEUDO: 3556 case AMDGPU::S_SUB_U64_PSEUDO: { 3557 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3558 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3559 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 3560 const TargetRegisterClass *BoolRC = TRI->getBoolRC(); 3561 const DebugLoc &DL = MI.getDebugLoc(); 3562 3563 MachineOperand &Dest = MI.getOperand(0); 3564 MachineOperand &Src0 = MI.getOperand(1); 3565 MachineOperand &Src1 = MI.getOperand(2); 3566 3567 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3568 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3569 3570 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, 3571 Src0, BoolRC, AMDGPU::sub0, 3572 &AMDGPU::SReg_32_XM0RegClass); 3573 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, 3574 Src0, BoolRC, AMDGPU::sub1, 3575 &AMDGPU::SReg_32_XM0RegClass); 3576 3577 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, 3578 Src1, BoolRC, AMDGPU::sub0, 3579 &AMDGPU::SReg_32_XM0RegClass); 3580 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, 3581 Src1, BoolRC, AMDGPU::sub1, 3582 &AMDGPU::SReg_32_XM0RegClass); 3583 3584 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 3585 3586 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; 3587 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; 3588 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0) 3589 .add(Src0Sub0) 3590 .add(Src1Sub0); 3591 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1) 3592 .add(Src0Sub1) 3593 .add(Src1Sub1); 3594 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) 3595 .addReg(DestSub0) 3596 .addImm(AMDGPU::sub0) 3597 .addReg(DestSub1) 3598 .addImm(AMDGPU::sub1); 3599 MI.eraseFromParent(); 3600 return BB; 3601 } 3602 case AMDGPU::SI_INIT_M0: { 3603 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), 3604 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 3605 .add(MI.getOperand(0)); 3606 MI.eraseFromParent(); 3607 return BB; 3608 } 3609 case AMDGPU::SI_INIT_EXEC: 3610 // This should be before all vector instructions. 3611 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), 3612 AMDGPU::EXEC) 3613 .addImm(MI.getOperand(0).getImm()); 3614 MI.eraseFromParent(); 3615 return BB; 3616 3617 case AMDGPU::SI_INIT_EXEC_LO: 3618 // This should be before all vector instructions. 3619 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), 3620 AMDGPU::EXEC_LO) 3621 .addImm(MI.getOperand(0).getImm()); 3622 MI.eraseFromParent(); 3623 return BB; 3624 3625 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: { 3626 // Extract the thread count from an SGPR input and set EXEC accordingly. 3627 // Since BFM can't shift by 64, handle that case with CMP + CMOV. 3628 // 3629 // S_BFE_U32 count, input, {shift, 7} 3630 // S_BFM_B64 exec, count, 0 3631 // S_CMP_EQ_U32 count, 64 3632 // S_CMOV_B64 exec, -1 3633 MachineInstr *FirstMI = &*BB->begin(); 3634 MachineRegisterInfo &MRI = MF->getRegInfo(); 3635 unsigned InputReg = MI.getOperand(0).getReg(); 3636 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3637 bool Found = false; 3638 3639 // Move the COPY of the input reg to the beginning, so that we can use it. 3640 for (auto I = BB->begin(); I != &MI; I++) { 3641 if (I->getOpcode() != TargetOpcode::COPY || 3642 I->getOperand(0).getReg() != InputReg) 3643 continue; 3644 3645 if (I == FirstMI) { 3646 FirstMI = &*++BB->begin(); 3647 } else { 3648 I->removeFromParent(); 3649 BB->insert(FirstMI, &*I); 3650 } 3651 Found = true; 3652 break; 3653 } 3654 assert(Found); 3655 (void)Found; 3656 3657 // This should be before all vector instructions. 3658 unsigned Mask = (getSubtarget()->getWavefrontSize() << 1) - 1; 3659 bool isWave32 = getSubtarget()->isWave32(); 3660 unsigned Exec = isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 3661 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg) 3662 .addReg(InputReg) 3663 .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000); 3664 BuildMI(*BB, FirstMI, DebugLoc(), 3665 TII->get(isWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64), 3666 Exec) 3667 .addReg(CountReg) 3668 .addImm(0); 3669 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32)) 3670 .addReg(CountReg, RegState::Kill) 3671 .addImm(getSubtarget()->getWavefrontSize()); 3672 BuildMI(*BB, FirstMI, DebugLoc(), 3673 TII->get(isWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64), 3674 Exec) 3675 .addImm(-1); 3676 MI.eraseFromParent(); 3677 return BB; 3678 } 3679 3680 case AMDGPU::GET_GROUPSTATICSIZE: { 3681 assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || 3682 getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL); 3683 DebugLoc DL = MI.getDebugLoc(); 3684 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) 3685 .add(MI.getOperand(0)) 3686 .addImm(MFI->getLDSSize()); 3687 MI.eraseFromParent(); 3688 return BB; 3689 } 3690 case AMDGPU::SI_INDIRECT_SRC_V1: 3691 case AMDGPU::SI_INDIRECT_SRC_V2: 3692 case AMDGPU::SI_INDIRECT_SRC_V4: 3693 case AMDGPU::SI_INDIRECT_SRC_V8: 3694 case AMDGPU::SI_INDIRECT_SRC_V16: 3695 return emitIndirectSrc(MI, *BB, *getSubtarget()); 3696 case AMDGPU::SI_INDIRECT_DST_V1: 3697 case AMDGPU::SI_INDIRECT_DST_V2: 3698 case AMDGPU::SI_INDIRECT_DST_V4: 3699 case AMDGPU::SI_INDIRECT_DST_V8: 3700 case AMDGPU::SI_INDIRECT_DST_V16: 3701 return emitIndirectDst(MI, *BB, *getSubtarget()); 3702 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 3703 case AMDGPU::SI_KILL_I1_PSEUDO: 3704 return splitKillBlock(MI, BB); 3705 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 3706 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3707 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3708 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 3709 3710 unsigned Dst = MI.getOperand(0).getReg(); 3711 unsigned Src0 = MI.getOperand(1).getReg(); 3712 unsigned Src1 = MI.getOperand(2).getReg(); 3713 const DebugLoc &DL = MI.getDebugLoc(); 3714 unsigned SrcCond = MI.getOperand(3).getReg(); 3715 3716 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3717 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3718 const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 3719 unsigned SrcCondCopy = MRI.createVirtualRegister(CondRC); 3720 3721 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) 3722 .addReg(SrcCond); 3723 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 3724 .addImm(0) 3725 .addReg(Src0, 0, AMDGPU::sub0) 3726 .addImm(0) 3727 .addReg(Src1, 0, AMDGPU::sub0) 3728 .addReg(SrcCondCopy); 3729 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 3730 .addImm(0) 3731 .addReg(Src0, 0, AMDGPU::sub1) 3732 .addImm(0) 3733 .addReg(Src1, 0, AMDGPU::sub1) 3734 .addReg(SrcCondCopy); 3735 3736 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) 3737 .addReg(DstLo) 3738 .addImm(AMDGPU::sub0) 3739 .addReg(DstHi) 3740 .addImm(AMDGPU::sub1); 3741 MI.eraseFromParent(); 3742 return BB; 3743 } 3744 case AMDGPU::SI_BR_UNDEF: { 3745 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3746 const DebugLoc &DL = MI.getDebugLoc(); 3747 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) 3748 .add(MI.getOperand(0)); 3749 Br->getOperand(1).setIsUndef(true); // read undef SCC 3750 MI.eraseFromParent(); 3751 return BB; 3752 } 3753 case AMDGPU::ADJCALLSTACKUP: 3754 case AMDGPU::ADJCALLSTACKDOWN: { 3755 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3756 MachineInstrBuilder MIB(*MF, &MI); 3757 3758 // Add an implicit use of the frame offset reg to prevent the restore copy 3759 // inserted after the call from being reorderd after stack operations in the 3760 // the caller's frame. 3761 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine) 3762 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit) 3763 .addReg(Info->getFrameOffsetReg(), RegState::Implicit); 3764 return BB; 3765 } 3766 case AMDGPU::SI_CALL_ISEL: { 3767 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3768 const DebugLoc &DL = MI.getDebugLoc(); 3769 3770 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF); 3771 3772 MachineInstrBuilder MIB; 3773 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg); 3774 3775 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) 3776 MIB.add(MI.getOperand(I)); 3777 3778 MIB.cloneMemRefs(MI); 3779 MI.eraseFromParent(); 3780 return BB; 3781 } 3782 case AMDGPU::V_ADD_I32_e32: 3783 case AMDGPU::V_SUB_I32_e32: 3784 case AMDGPU::V_SUBREV_I32_e32: { 3785 // TODO: Define distinct V_*_I32_Pseudo instructions instead. 3786 const DebugLoc &DL = MI.getDebugLoc(); 3787 unsigned Opc = MI.getOpcode(); 3788 3789 bool NeedClampOperand = false; 3790 if (TII->pseudoToMCOpcode(Opc) == -1) { 3791 Opc = AMDGPU::getVOPe64(Opc); 3792 NeedClampOperand = true; 3793 } 3794 3795 auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg()); 3796 if (TII->isVOP3(*I)) { 3797 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3798 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 3799 I.addReg(TRI->getVCC(), RegState::Define); 3800 } 3801 I.add(MI.getOperand(1)) 3802 .add(MI.getOperand(2)); 3803 if (NeedClampOperand) 3804 I.addImm(0); // clamp bit for e64 encoding 3805 3806 TII->legalizeOperands(*I); 3807 3808 MI.eraseFromParent(); 3809 return BB; 3810 } 3811 case AMDGPU::DS_GWS_INIT: 3812 case AMDGPU::DS_GWS_SEMA_V: 3813 case AMDGPU::DS_GWS_SEMA_BR: 3814 case AMDGPU::DS_GWS_SEMA_P: 3815 case AMDGPU::DS_GWS_SEMA_RELEASE_ALL: 3816 case AMDGPU::DS_GWS_BARRIER: 3817 if (getSubtarget()->hasGWSAutoReplay()) 3818 return BB; 3819 return emitGWSMemViolTestLoop(MI, BB); 3820 default: 3821 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 3822 } 3823 } 3824 3825 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const { 3826 return isTypeLegal(VT.getScalarType()); 3827 } 3828 3829 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 3830 // This currently forces unfolding various combinations of fsub into fma with 3831 // free fneg'd operands. As long as we have fast FMA (controlled by 3832 // isFMAFasterThanFMulAndFAdd), we should perform these. 3833 3834 // When fma is quarter rate, for f64 where add / sub are at best half rate, 3835 // most of these combines appear to be cycle neutral but save on instruction 3836 // count / code size. 3837 return true; 3838 } 3839 3840 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 3841 EVT VT) const { 3842 if (!VT.isVector()) { 3843 return MVT::i1; 3844 } 3845 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 3846 } 3847 3848 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { 3849 // TODO: Should i16 be used always if legal? For now it would force VALU 3850 // shifts. 3851 return (VT == MVT::i16) ? MVT::i16 : MVT::i32; 3852 } 3853 3854 // Answering this is somewhat tricky and depends on the specific device which 3855 // have different rates for fma or all f64 operations. 3856 // 3857 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 3858 // regardless of which device (although the number of cycles differs between 3859 // devices), so it is always profitable for f64. 3860 // 3861 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 3862 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 3863 // which we can always do even without fused FP ops since it returns the same 3864 // result as the separate operations and since it is always full 3865 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 3866 // however does not support denormals, so we do report fma as faster if we have 3867 // a fast fma device and require denormals. 3868 // 3869 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 3870 VT = VT.getScalarType(); 3871 3872 switch (VT.getSimpleVT().SimpleTy) { 3873 case MVT::f32: { 3874 // This is as fast on some subtargets. However, we always have full rate f32 3875 // mad available which returns the same result as the separate operations 3876 // which we should prefer over fma. We can't use this if we want to support 3877 // denormals, so only report this in these cases. 3878 if (Subtarget->hasFP32Denormals()) 3879 return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts(); 3880 3881 // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32. 3882 return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts(); 3883 } 3884 case MVT::f64: 3885 return true; 3886 case MVT::f16: 3887 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals(); 3888 default: 3889 break; 3890 } 3891 3892 return false; 3893 } 3894 3895 //===----------------------------------------------------------------------===// 3896 // Custom DAG Lowering Operations 3897 //===----------------------------------------------------------------------===// 3898 3899 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the 3900 // wider vector type is legal. 3901 SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op, 3902 SelectionDAG &DAG) const { 3903 unsigned Opc = Op.getOpcode(); 3904 EVT VT = Op.getValueType(); 3905 assert(VT == MVT::v4f16); 3906 3907 SDValue Lo, Hi; 3908 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); 3909 3910 SDLoc SL(Op); 3911 SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo, 3912 Op->getFlags()); 3913 SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi, 3914 Op->getFlags()); 3915 3916 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); 3917 } 3918 3919 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the 3920 // wider vector type is legal. 3921 SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op, 3922 SelectionDAG &DAG) const { 3923 unsigned Opc = Op.getOpcode(); 3924 EVT VT = Op.getValueType(); 3925 assert(VT == MVT::v4i16 || VT == MVT::v4f16); 3926 3927 SDValue Lo0, Hi0; 3928 std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); 3929 SDValue Lo1, Hi1; 3930 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); 3931 3932 SDLoc SL(Op); 3933 3934 SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, 3935 Op->getFlags()); 3936 SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, 3937 Op->getFlags()); 3938 3939 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); 3940 } 3941 3942 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 3943 switch (Op.getOpcode()) { 3944 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 3945 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 3946 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 3947 case ISD::LOAD: { 3948 SDValue Result = LowerLOAD(Op, DAG); 3949 assert((!Result.getNode() || 3950 Result.getNode()->getNumValues() == 2) && 3951 "Load should return a value and a chain"); 3952 return Result; 3953 } 3954 3955 case ISD::FSIN: 3956 case ISD::FCOS: 3957 return LowerTrig(Op, DAG); 3958 case ISD::SELECT: return LowerSELECT(Op, DAG); 3959 case ISD::FDIV: return LowerFDIV(Op, DAG); 3960 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 3961 case ISD::STORE: return LowerSTORE(Op, DAG); 3962 case ISD::GlobalAddress: { 3963 MachineFunction &MF = DAG.getMachineFunction(); 3964 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3965 return LowerGlobalAddress(MFI, Op, DAG); 3966 } 3967 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 3968 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 3969 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 3970 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 3971 case ISD::INSERT_SUBVECTOR: 3972 return lowerINSERT_SUBVECTOR(Op, DAG); 3973 case ISD::INSERT_VECTOR_ELT: 3974 return lowerINSERT_VECTOR_ELT(Op, DAG); 3975 case ISD::EXTRACT_VECTOR_ELT: 3976 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 3977 case ISD::VECTOR_SHUFFLE: 3978 return lowerVECTOR_SHUFFLE(Op, DAG); 3979 case ISD::BUILD_VECTOR: 3980 return lowerBUILD_VECTOR(Op, DAG); 3981 case ISD::FP_ROUND: 3982 return lowerFP_ROUND(Op, DAG); 3983 case ISD::TRAP: 3984 return lowerTRAP(Op, DAG); 3985 case ISD::DEBUGTRAP: 3986 return lowerDEBUGTRAP(Op, DAG); 3987 case ISD::FABS: 3988 case ISD::FNEG: 3989 case ISD::FCANONICALIZE: 3990 return splitUnaryVectorOp(Op, DAG); 3991 case ISD::FMINNUM: 3992 case ISD::FMAXNUM: 3993 return lowerFMINNUM_FMAXNUM(Op, DAG); 3994 case ISD::SHL: 3995 case ISD::SRA: 3996 case ISD::SRL: 3997 case ISD::ADD: 3998 case ISD::SUB: 3999 case ISD::MUL: 4000 case ISD::SMIN: 4001 case ISD::SMAX: 4002 case ISD::UMIN: 4003 case ISD::UMAX: 4004 case ISD::FADD: 4005 case ISD::FMUL: 4006 case ISD::FMINNUM_IEEE: 4007 case ISD::FMAXNUM_IEEE: 4008 return splitBinaryVectorOp(Op, DAG); 4009 } 4010 return SDValue(); 4011 } 4012 4013 static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT, 4014 const SDLoc &DL, 4015 SelectionDAG &DAG, bool Unpacked) { 4016 if (!LoadVT.isVector()) 4017 return Result; 4018 4019 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16. 4020 // Truncate to v2i16/v4i16. 4021 EVT IntLoadVT = LoadVT.changeTypeToInteger(); 4022 4023 // Workaround legalizer not scalarizing truncate after vector op 4024 // legalization byt not creating intermediate vector trunc. 4025 SmallVector<SDValue, 4> Elts; 4026 DAG.ExtractVectorElements(Result, Elts); 4027 for (SDValue &Elt : Elts) 4028 Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt); 4029 4030 Result = DAG.getBuildVector(IntLoadVT, DL, Elts); 4031 4032 // Bitcast to original type (v2f16/v4f16). 4033 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result); 4034 } 4035 4036 // Cast back to the original packed type. 4037 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result); 4038 } 4039 4040 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode, 4041 MemSDNode *M, 4042 SelectionDAG &DAG, 4043 ArrayRef<SDValue> Ops, 4044 bool IsIntrinsic) const { 4045 SDLoc DL(M); 4046 4047 bool Unpacked = Subtarget->hasUnpackedD16VMem(); 4048 EVT LoadVT = M->getValueType(0); 4049 4050 EVT EquivLoadVT = LoadVT; 4051 if (Unpacked && LoadVT.isVector()) { 4052 EquivLoadVT = LoadVT.isVector() ? 4053 EVT::getVectorVT(*DAG.getContext(), MVT::i32, 4054 LoadVT.getVectorNumElements()) : LoadVT; 4055 } 4056 4057 // Change from v4f16/v2f16 to EquivLoadVT. 4058 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other); 4059 4060 SDValue Load 4061 = DAG.getMemIntrinsicNode( 4062 IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL, 4063 VTList, Ops, M->getMemoryVT(), 4064 M->getMemOperand()); 4065 if (!Unpacked) // Just adjusted the opcode. 4066 return Load; 4067 4068 SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked); 4069 4070 return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL); 4071 } 4072 4073 static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI, 4074 SDNode *N, SelectionDAG &DAG) { 4075 EVT VT = N->getValueType(0); 4076 const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); 4077 int CondCode = CD->getSExtValue(); 4078 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || 4079 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE) 4080 return DAG.getUNDEF(VT); 4081 4082 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); 4083 4084 SDValue LHS = N->getOperand(1); 4085 SDValue RHS = N->getOperand(2); 4086 4087 SDLoc DL(N); 4088 4089 EVT CmpVT = LHS.getValueType(); 4090 if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) { 4091 unsigned PromoteOp = ICmpInst::isSigned(IcInput) ? 4092 ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4093 LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS); 4094 RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS); 4095 } 4096 4097 ISD::CondCode CCOpcode = getICmpCondCode(IcInput); 4098 4099 unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); 4100 EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); 4101 4102 SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS, 4103 DAG.getCondCode(CCOpcode)); 4104 if (VT.bitsEq(CCVT)) 4105 return SetCC; 4106 return DAG.getZExtOrTrunc(SetCC, DL, VT); 4107 } 4108 4109 static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI, 4110 SDNode *N, SelectionDAG &DAG) { 4111 EVT VT = N->getValueType(0); 4112 const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); 4113 4114 int CondCode = CD->getSExtValue(); 4115 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE || 4116 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) { 4117 return DAG.getUNDEF(VT); 4118 } 4119 4120 SDValue Src0 = N->getOperand(1); 4121 SDValue Src1 = N->getOperand(2); 4122 EVT CmpVT = Src0.getValueType(); 4123 SDLoc SL(N); 4124 4125 if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) { 4126 Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 4127 Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 4128 } 4129 4130 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); 4131 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); 4132 unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); 4133 EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); 4134 SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0, 4135 Src1, DAG.getCondCode(CCOpcode)); 4136 if (VT.bitsEq(CCVT)) 4137 return SetCC; 4138 return DAG.getZExtOrTrunc(SetCC, SL, VT); 4139 } 4140 4141 void SITargetLowering::ReplaceNodeResults(SDNode *N, 4142 SmallVectorImpl<SDValue> &Results, 4143 SelectionDAG &DAG) const { 4144 switch (N->getOpcode()) { 4145 case ISD::INSERT_VECTOR_ELT: { 4146 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) 4147 Results.push_back(Res); 4148 return; 4149 } 4150 case ISD::EXTRACT_VECTOR_ELT: { 4151 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) 4152 Results.push_back(Res); 4153 return; 4154 } 4155 case ISD::INTRINSIC_WO_CHAIN: { 4156 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 4157 switch (IID) { 4158 case Intrinsic::amdgcn_cvt_pkrtz: { 4159 SDValue Src0 = N->getOperand(1); 4160 SDValue Src1 = N->getOperand(2); 4161 SDLoc SL(N); 4162 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, 4163 Src0, Src1); 4164 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); 4165 return; 4166 } 4167 case Intrinsic::amdgcn_cvt_pknorm_i16: 4168 case Intrinsic::amdgcn_cvt_pknorm_u16: 4169 case Intrinsic::amdgcn_cvt_pk_i16: 4170 case Intrinsic::amdgcn_cvt_pk_u16: { 4171 SDValue Src0 = N->getOperand(1); 4172 SDValue Src1 = N->getOperand(2); 4173 SDLoc SL(N); 4174 unsigned Opcode; 4175 4176 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16) 4177 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; 4178 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16) 4179 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; 4180 else if (IID == Intrinsic::amdgcn_cvt_pk_i16) 4181 Opcode = AMDGPUISD::CVT_PK_I16_I32; 4182 else 4183 Opcode = AMDGPUISD::CVT_PK_U16_U32; 4184 4185 EVT VT = N->getValueType(0); 4186 if (isTypeLegal(VT)) 4187 Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1)); 4188 else { 4189 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1); 4190 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt)); 4191 } 4192 return; 4193 } 4194 } 4195 break; 4196 } 4197 case ISD::INTRINSIC_W_CHAIN: { 4198 if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) { 4199 Results.push_back(Res); 4200 Results.push_back(Res.getValue(1)); 4201 return; 4202 } 4203 4204 break; 4205 } 4206 case ISD::SELECT: { 4207 SDLoc SL(N); 4208 EVT VT = N->getValueType(0); 4209 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 4210 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); 4211 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); 4212 4213 EVT SelectVT = NewVT; 4214 if (NewVT.bitsLT(MVT::i32)) { 4215 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); 4216 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); 4217 SelectVT = MVT::i32; 4218 } 4219 4220 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, 4221 N->getOperand(0), LHS, RHS); 4222 4223 if (NewVT != SelectVT) 4224 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); 4225 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); 4226 return; 4227 } 4228 case ISD::FNEG: { 4229 if (N->getValueType(0) != MVT::v2f16) 4230 break; 4231 4232 SDLoc SL(N); 4233 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); 4234 4235 SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32, 4236 BC, 4237 DAG.getConstant(0x80008000, SL, MVT::i32)); 4238 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); 4239 return; 4240 } 4241 case ISD::FABS: { 4242 if (N->getValueType(0) != MVT::v2f16) 4243 break; 4244 4245 SDLoc SL(N); 4246 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); 4247 4248 SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32, 4249 BC, 4250 DAG.getConstant(0x7fff7fff, SL, MVT::i32)); 4251 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); 4252 return; 4253 } 4254 default: 4255 break; 4256 } 4257 } 4258 4259 /// Helper function for LowerBRCOND 4260 static SDNode *findUser(SDValue Value, unsigned Opcode) { 4261 4262 SDNode *Parent = Value.getNode(); 4263 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 4264 I != E; ++I) { 4265 4266 if (I.getUse().get() != Value) 4267 continue; 4268 4269 if (I->getOpcode() == Opcode) 4270 return *I; 4271 } 4272 return nullptr; 4273 } 4274 4275 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 4276 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 4277 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 4278 case Intrinsic::amdgcn_if: 4279 return AMDGPUISD::IF; 4280 case Intrinsic::amdgcn_else: 4281 return AMDGPUISD::ELSE; 4282 case Intrinsic::amdgcn_loop: 4283 return AMDGPUISD::LOOP; 4284 case Intrinsic::amdgcn_end_cf: 4285 llvm_unreachable("should not occur"); 4286 default: 4287 return 0; 4288 } 4289 } 4290 4291 // break, if_break, else_break are all only used as inputs to loop, not 4292 // directly as branch conditions. 4293 return 0; 4294 } 4295 4296 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { 4297 const Triple &TT = getTargetMachine().getTargetTriple(); 4298 return (GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 4299 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && 4300 AMDGPU::shouldEmitConstantsToTextSection(TT); 4301 } 4302 4303 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { 4304 // FIXME: Either avoid relying on address space here or change the default 4305 // address space for functions to avoid the explicit check. 4306 return (GV->getValueType()->isFunctionTy() || 4307 GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || 4308 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 4309 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && 4310 !shouldEmitFixup(GV) && 4311 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 4312 } 4313 4314 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { 4315 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); 4316 } 4317 4318 /// This transforms the control flow intrinsics to get the branch destination as 4319 /// last parameter, also switches branch target with BR if the need arise 4320 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 4321 SelectionDAG &DAG) const { 4322 SDLoc DL(BRCOND); 4323 4324 SDNode *Intr = BRCOND.getOperand(1).getNode(); 4325 SDValue Target = BRCOND.getOperand(2); 4326 SDNode *BR = nullptr; 4327 SDNode *SetCC = nullptr; 4328 4329 if (Intr->getOpcode() == ISD::SETCC) { 4330 // As long as we negate the condition everything is fine 4331 SetCC = Intr; 4332 Intr = SetCC->getOperand(0).getNode(); 4333 4334 } else { 4335 // Get the target from BR if we don't negate the condition 4336 BR = findUser(BRCOND, ISD::BR); 4337 Target = BR->getOperand(1); 4338 } 4339 4340 // FIXME: This changes the types of the intrinsics instead of introducing new 4341 // nodes with the correct types. 4342 // e.g. llvm.amdgcn.loop 4343 4344 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 4345 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> 4346 4347 unsigned CFNode = isCFIntrinsic(Intr); 4348 if (CFNode == 0) { 4349 // This is a uniform branch so we don't need to legalize. 4350 return BRCOND; 4351 } 4352 4353 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || 4354 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; 4355 4356 assert(!SetCC || 4357 (SetCC->getConstantOperandVal(1) == 1 && 4358 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 4359 ISD::SETNE)); 4360 4361 // operands of the new intrinsic call 4362 SmallVector<SDValue, 4> Ops; 4363 if (HaveChain) 4364 Ops.push_back(BRCOND.getOperand(0)); 4365 4366 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); 4367 Ops.push_back(Target); 4368 4369 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 4370 4371 // build the new intrinsic call 4372 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); 4373 4374 if (!HaveChain) { 4375 SDValue Ops[] = { 4376 SDValue(Result, 0), 4377 BRCOND.getOperand(0) 4378 }; 4379 4380 Result = DAG.getMergeValues(Ops, DL).getNode(); 4381 } 4382 4383 if (BR) { 4384 // Give the branch instruction our target 4385 SDValue Ops[] = { 4386 BR->getOperand(0), 4387 BRCOND.getOperand(2) 4388 }; 4389 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 4390 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 4391 BR = NewBR.getNode(); 4392 } 4393 4394 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 4395 4396 // Copy the intrinsic results to registers 4397 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 4398 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 4399 if (!CopyToReg) 4400 continue; 4401 4402 Chain = DAG.getCopyToReg( 4403 Chain, DL, 4404 CopyToReg->getOperand(1), 4405 SDValue(Result, i - 1), 4406 SDValue()); 4407 4408 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 4409 } 4410 4411 // Remove the old intrinsic from the chain 4412 DAG.ReplaceAllUsesOfValueWith( 4413 SDValue(Intr, Intr->getNumValues() - 1), 4414 Intr->getOperand(0)); 4415 4416 return Chain; 4417 } 4418 4419 SDValue SITargetLowering::LowerRETURNADDR(SDValue Op, 4420 SelectionDAG &DAG) const { 4421 MVT VT = Op.getSimpleValueType(); 4422 SDLoc DL(Op); 4423 // Checking the depth 4424 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) 4425 return DAG.getConstant(0, DL, VT); 4426 4427 MachineFunction &MF = DAG.getMachineFunction(); 4428 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 4429 // Check for kernel and shader functions 4430 if (Info->isEntryFunction()) 4431 return DAG.getConstant(0, DL, VT); 4432 4433 MachineFrameInfo &MFI = MF.getFrameInfo(); 4434 // There is a call to @llvm.returnaddress in this function 4435 MFI.setReturnAddressIsTaken(true); 4436 4437 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 4438 // Get the return address reg and mark it as an implicit live-in 4439 unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent())); 4440 4441 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); 4442 } 4443 4444 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG, 4445 SDValue Op, 4446 const SDLoc &DL, 4447 EVT VT) const { 4448 return Op.getValueType().bitsLE(VT) ? 4449 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : 4450 DAG.getNode(ISD::FTRUNC, DL, VT, Op); 4451 } 4452 4453 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 4454 assert(Op.getValueType() == MVT::f16 && 4455 "Do not know how to custom lower FP_ROUND for non-f16 type"); 4456 4457 SDValue Src = Op.getOperand(0); 4458 EVT SrcVT = Src.getValueType(); 4459 if (SrcVT != MVT::f64) 4460 return Op; 4461 4462 SDLoc DL(Op); 4463 4464 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); 4465 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); 4466 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); 4467 } 4468 4469 SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op, 4470 SelectionDAG &DAG) const { 4471 EVT VT = Op.getValueType(); 4472 const MachineFunction &MF = DAG.getMachineFunction(); 4473 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 4474 bool IsIEEEMode = Info->getMode().IEEE; 4475 4476 // FIXME: Assert during eslection that this is only selected for 4477 // ieee_mode. Currently a combine can produce the ieee version for non-ieee 4478 // mode functions, but this happens to be OK since it's only done in cases 4479 // where there is known no sNaN. 4480 if (IsIEEEMode) 4481 return expandFMINNUM_FMAXNUM(Op.getNode(), DAG); 4482 4483 if (VT == MVT::v4f16) 4484 return splitBinaryVectorOp(Op, DAG); 4485 return Op; 4486 } 4487 4488 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { 4489 SDLoc SL(Op); 4490 SDValue Chain = Op.getOperand(0); 4491 4492 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa || 4493 !Subtarget->isTrapHandlerEnabled()) 4494 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); 4495 4496 MachineFunction &MF = DAG.getMachineFunction(); 4497 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 4498 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 4499 assert(UserSGPR != AMDGPU::NoRegister); 4500 SDValue QueuePtr = CreateLiveInRegister( 4501 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 4502 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); 4503 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, 4504 QueuePtr, SDValue()); 4505 SDValue Ops[] = { 4506 ToReg, 4507 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16), 4508 SGPR01, 4509 ToReg.getValue(1) 4510 }; 4511 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 4512 } 4513 4514 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const { 4515 SDLoc SL(Op); 4516 SDValue Chain = Op.getOperand(0); 4517 MachineFunction &MF = DAG.getMachineFunction(); 4518 4519 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa || 4520 !Subtarget->isTrapHandlerEnabled()) { 4521 DiagnosticInfoUnsupported NoTrap(MF.getFunction(), 4522 "debugtrap handler not supported", 4523 Op.getDebugLoc(), 4524 DS_Warning); 4525 LLVMContext &Ctx = MF.getFunction().getContext(); 4526 Ctx.diagnose(NoTrap); 4527 return Chain; 4528 } 4529 4530 SDValue Ops[] = { 4531 Chain, 4532 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16) 4533 }; 4534 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 4535 } 4536 4537 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, 4538 SelectionDAG &DAG) const { 4539 // FIXME: Use inline constants (src_{shared, private}_base) instead. 4540 if (Subtarget->hasApertureRegs()) { 4541 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ? 4542 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : 4543 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; 4544 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ? 4545 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : 4546 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; 4547 unsigned Encoding = 4548 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | 4549 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | 4550 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; 4551 4552 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); 4553 SDValue ApertureReg = SDValue( 4554 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); 4555 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); 4556 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); 4557 } 4558 4559 MachineFunction &MF = DAG.getMachineFunction(); 4560 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 4561 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 4562 assert(UserSGPR != AMDGPU::NoRegister); 4563 4564 SDValue QueuePtr = CreateLiveInRegister( 4565 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 4566 4567 // Offset into amd_queue_t for group_segment_aperture_base_hi / 4568 // private_segment_aperture_base_hi. 4569 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; 4570 4571 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset); 4572 4573 // TODO: Use custom target PseudoSourceValue. 4574 // TODO: We should use the value from the IR intrinsic call, but it might not 4575 // be available and how do we get it? 4576 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), 4577 AMDGPUAS::CONSTANT_ADDRESS)); 4578 4579 MachinePointerInfo PtrInfo(V, StructOffset); 4580 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, 4581 MinAlign(64, StructOffset), 4582 MachineMemOperand::MODereferenceable | 4583 MachineMemOperand::MOInvariant); 4584 } 4585 4586 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 4587 SelectionDAG &DAG) const { 4588 SDLoc SL(Op); 4589 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 4590 4591 SDValue Src = ASC->getOperand(0); 4592 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 4593 4594 const AMDGPUTargetMachine &TM = 4595 static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); 4596 4597 // flat -> local/private 4598 if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 4599 unsigned DestAS = ASC->getDestAddressSpace(); 4600 4601 if (DestAS == AMDGPUAS::LOCAL_ADDRESS || 4602 DestAS == AMDGPUAS::PRIVATE_ADDRESS) { 4603 unsigned NullVal = TM.getNullPointerValue(DestAS); 4604 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 4605 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 4606 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 4607 4608 return DAG.getNode(ISD::SELECT, SL, MVT::i32, 4609 NonNull, Ptr, SegmentNullPtr); 4610 } 4611 } 4612 4613 // local/private -> flat 4614 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 4615 unsigned SrcAS = ASC->getSrcAddressSpace(); 4616 4617 if (SrcAS == AMDGPUAS::LOCAL_ADDRESS || 4618 SrcAS == AMDGPUAS::PRIVATE_ADDRESS) { 4619 unsigned NullVal = TM.getNullPointerValue(SrcAS); 4620 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 4621 4622 SDValue NonNull 4623 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 4624 4625 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); 4626 SDValue CvtPtr 4627 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 4628 4629 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, 4630 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), 4631 FlatNullPtr); 4632 } 4633 } 4634 4635 // global <-> flat are no-ops and never emitted. 4636 4637 const MachineFunction &MF = DAG.getMachineFunction(); 4638 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 4639 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 4640 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 4641 4642 return DAG.getUNDEF(ASC->getValueType(0)); 4643 } 4644 4645 // This lowers an INSERT_SUBVECTOR by extracting the individual elements from 4646 // the small vector and inserting them into the big vector. That is better than 4647 // the default expansion of doing it via a stack slot. Even though the use of 4648 // the stack slot would be optimized away afterwards, the stack slot itself 4649 // remains. 4650 SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, 4651 SelectionDAG &DAG) const { 4652 SDValue Vec = Op.getOperand(0); 4653 SDValue Ins = Op.getOperand(1); 4654 SDValue Idx = Op.getOperand(2); 4655 EVT VecVT = Vec.getValueType(); 4656 EVT InsVT = Ins.getValueType(); 4657 EVT EltVT = VecVT.getVectorElementType(); 4658 unsigned InsNumElts = InsVT.getVectorNumElements(); 4659 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 4660 SDLoc SL(Op); 4661 4662 for (unsigned I = 0; I != InsNumElts; ++I) { 4663 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins, 4664 DAG.getConstant(I, SL, MVT::i32)); 4665 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt, 4666 DAG.getConstant(IdxVal + I, SL, MVT::i32)); 4667 } 4668 return Vec; 4669 } 4670 4671 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 4672 SelectionDAG &DAG) const { 4673 SDValue Vec = Op.getOperand(0); 4674 SDValue InsVal = Op.getOperand(1); 4675 SDValue Idx = Op.getOperand(2); 4676 EVT VecVT = Vec.getValueType(); 4677 EVT EltVT = VecVT.getVectorElementType(); 4678 unsigned VecSize = VecVT.getSizeInBits(); 4679 unsigned EltSize = EltVT.getSizeInBits(); 4680 4681 4682 assert(VecSize <= 64); 4683 4684 unsigned NumElts = VecVT.getVectorNumElements(); 4685 SDLoc SL(Op); 4686 auto KIdx = dyn_cast<ConstantSDNode>(Idx); 4687 4688 if (NumElts == 4 && EltSize == 16 && KIdx) { 4689 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec); 4690 4691 SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, 4692 DAG.getConstant(0, SL, MVT::i32)); 4693 SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, 4694 DAG.getConstant(1, SL, MVT::i32)); 4695 4696 SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf); 4697 SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf); 4698 4699 unsigned Idx = KIdx->getZExtValue(); 4700 bool InsertLo = Idx < 2; 4701 SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16, 4702 InsertLo ? LoVec : HiVec, 4703 DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal), 4704 DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32)); 4705 4706 InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf); 4707 4708 SDValue Concat = InsertLo ? 4709 DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) : 4710 DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf }); 4711 4712 return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat); 4713 } 4714 4715 if (isa<ConstantSDNode>(Idx)) 4716 return SDValue(); 4717 4718 MVT IntVT = MVT::getIntegerVT(VecSize); 4719 4720 // Avoid stack access for dynamic indexing. 4721 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec 4722 4723 // Create a congruent vector with the target value in each element so that 4724 // the required element can be masked and ORed into the target vector. 4725 SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT, 4726 DAG.getSplatBuildVector(VecVT, SL, InsVal)); 4727 4728 assert(isPowerOf2_32(EltSize)); 4729 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); 4730 4731 // Convert vector index to bit-index. 4732 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); 4733 4734 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); 4735 SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT, 4736 DAG.getConstant(0xffff, SL, IntVT), 4737 ScaledIdx); 4738 4739 SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal); 4740 SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT, 4741 DAG.getNOT(SL, BFM, IntVT), BCVec); 4742 4743 SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS); 4744 return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI); 4745 } 4746 4747 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 4748 SelectionDAG &DAG) const { 4749 SDLoc SL(Op); 4750 4751 EVT ResultVT = Op.getValueType(); 4752 SDValue Vec = Op.getOperand(0); 4753 SDValue Idx = Op.getOperand(1); 4754 EVT VecVT = Vec.getValueType(); 4755 unsigned VecSize = VecVT.getSizeInBits(); 4756 EVT EltVT = VecVT.getVectorElementType(); 4757 assert(VecSize <= 64); 4758 4759 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); 4760 4761 // Make sure we do any optimizations that will make it easier to fold 4762 // source modifiers before obscuring it with bit operations. 4763 4764 // XXX - Why doesn't this get called when vector_shuffle is expanded? 4765 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) 4766 return Combined; 4767 4768 unsigned EltSize = EltVT.getSizeInBits(); 4769 assert(isPowerOf2_32(EltSize)); 4770 4771 MVT IntVT = MVT::getIntegerVT(VecSize); 4772 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); 4773 4774 // Convert vector index to bit-index (* EltSize) 4775 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); 4776 4777 SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); 4778 SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx); 4779 4780 if (ResultVT == MVT::f16) { 4781 SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt); 4782 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 4783 } 4784 4785 return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT); 4786 } 4787 4788 static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) { 4789 assert(Elt % 2 == 0); 4790 return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0); 4791 } 4792 4793 SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, 4794 SelectionDAG &DAG) const { 4795 SDLoc SL(Op); 4796 EVT ResultVT = Op.getValueType(); 4797 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 4798 4799 EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16; 4800 EVT EltVT = PackVT.getVectorElementType(); 4801 int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements(); 4802 4803 // vector_shuffle <0,1,6,7> lhs, rhs 4804 // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2) 4805 // 4806 // vector_shuffle <6,7,2,3> lhs, rhs 4807 // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2) 4808 // 4809 // vector_shuffle <6,7,0,1> lhs, rhs 4810 // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0) 4811 4812 // Avoid scalarizing when both halves are reading from consecutive elements. 4813 SmallVector<SDValue, 4> Pieces; 4814 for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) { 4815 if (elementPairIsContiguous(SVN->getMask(), I)) { 4816 const int Idx = SVN->getMaskElt(I); 4817 int VecIdx = Idx < SrcNumElts ? 0 : 1; 4818 int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts; 4819 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, 4820 PackVT, SVN->getOperand(VecIdx), 4821 DAG.getConstant(EltIdx, SL, MVT::i32)); 4822 Pieces.push_back(SubVec); 4823 } else { 4824 const int Idx0 = SVN->getMaskElt(I); 4825 const int Idx1 = SVN->getMaskElt(I + 1); 4826 int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1; 4827 int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1; 4828 int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts; 4829 int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts; 4830 4831 SDValue Vec0 = SVN->getOperand(VecIdx0); 4832 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 4833 Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32)); 4834 4835 SDValue Vec1 = SVN->getOperand(VecIdx1); 4836 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 4837 Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32)); 4838 Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 })); 4839 } 4840 } 4841 4842 return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces); 4843 } 4844 4845 SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op, 4846 SelectionDAG &DAG) const { 4847 SDLoc SL(Op); 4848 EVT VT = Op.getValueType(); 4849 4850 if (VT == MVT::v4i16 || VT == MVT::v4f16) { 4851 EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2); 4852 4853 // Turn into pair of packed build_vectors. 4854 // TODO: Special case for constants that can be materialized with s_mov_b64. 4855 SDValue Lo = DAG.getBuildVector(HalfVT, SL, 4856 { Op.getOperand(0), Op.getOperand(1) }); 4857 SDValue Hi = DAG.getBuildVector(HalfVT, SL, 4858 { Op.getOperand(2), Op.getOperand(3) }); 4859 4860 SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo); 4861 SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi); 4862 4863 SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi }); 4864 return DAG.getNode(ISD::BITCAST, SL, VT, Blend); 4865 } 4866 4867 assert(VT == MVT::v2f16 || VT == MVT::v2i16); 4868 assert(!Subtarget->hasVOP3PInsts() && "this should be legal"); 4869 4870 SDValue Lo = Op.getOperand(0); 4871 SDValue Hi = Op.getOperand(1); 4872 4873 // Avoid adding defined bits with the zero_extend. 4874 if (Hi.isUndef()) { 4875 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); 4876 SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo); 4877 return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo); 4878 } 4879 4880 Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi); 4881 Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi); 4882 4883 SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi, 4884 DAG.getConstant(16, SL, MVT::i32)); 4885 if (Lo.isUndef()) 4886 return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi); 4887 4888 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); 4889 Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo); 4890 4891 SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi); 4892 return DAG.getNode(ISD::BITCAST, SL, VT, Or); 4893 } 4894 4895 bool 4896 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 4897 // We can fold offsets for anything that doesn't require a GOT relocation. 4898 return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || 4899 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 4900 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && 4901 !shouldEmitGOTReloc(GA->getGlobal()); 4902 } 4903 4904 static SDValue 4905 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, 4906 const SDLoc &DL, unsigned Offset, EVT PtrVT, 4907 unsigned GAFlags = SIInstrInfo::MO_NONE) { 4908 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is 4909 // lowered to the following code sequence: 4910 // 4911 // For constant address space: 4912 // s_getpc_b64 s[0:1] 4913 // s_add_u32 s0, s0, $symbol 4914 // s_addc_u32 s1, s1, 0 4915 // 4916 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 4917 // a fixup or relocation is emitted to replace $symbol with a literal 4918 // constant, which is a pc-relative offset from the encoding of the $symbol 4919 // operand to the global variable. 4920 // 4921 // For global address space: 4922 // s_getpc_b64 s[0:1] 4923 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo 4924 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi 4925 // 4926 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 4927 // fixups or relocations are emitted to replace $symbol@*@lo and 4928 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, 4929 // which is a 64-bit pc-relative offset from the encoding of the $symbol 4930 // operand to the global variable. 4931 // 4932 // What we want here is an offset from the value returned by s_getpc 4933 // (which is the address of the s_add_u32 instruction) to the global 4934 // variable, but since the encoding of $symbol starts 4 bytes after the start 4935 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too 4936 // small. This requires us to add 4 to the global variable offset in order to 4937 // compute the correct address. 4938 unsigned LoFlags = GAFlags; 4939 if (LoFlags == SIInstrInfo::MO_NONE) 4940 LoFlags = SIInstrInfo::MO_REL32; 4941 SDValue PtrLo = 4942 DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, LoFlags); 4943 SDValue PtrHi; 4944 if (GAFlags == SIInstrInfo::MO_NONE) { 4945 PtrHi = DAG.getTargetConstant(0, DL, MVT::i32); 4946 } else { 4947 PtrHi = 4948 DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags + 1); 4949 } 4950 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); 4951 } 4952 4953 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 4954 SDValue Op, 4955 SelectionDAG &DAG) const { 4956 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 4957 const GlobalValue *GV = GSD->getGlobal(); 4958 if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && 4959 (!GV->hasExternalLinkage() || 4960 getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || 4961 getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL)) || 4962 GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS || 4963 GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) 4964 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 4965 4966 SDLoc DL(GSD); 4967 EVT PtrVT = Op.getValueType(); 4968 4969 if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 4970 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(), 4971 SIInstrInfo::MO_ABS32_LO); 4972 return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA); 4973 } 4974 4975 if (shouldEmitFixup(GV)) 4976 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); 4977 else if (shouldEmitPCReloc(GV)) 4978 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, 4979 SIInstrInfo::MO_REL32); 4980 4981 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, 4982 SIInstrInfo::MO_GOTPCREL32); 4983 4984 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); 4985 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 4986 const DataLayout &DataLayout = DAG.getDataLayout(); 4987 unsigned Align = DataLayout.getABITypeAlignment(PtrTy); 4988 MachinePointerInfo PtrInfo 4989 = MachinePointerInfo::getGOT(DAG.getMachineFunction()); 4990 4991 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, 4992 MachineMemOperand::MODereferenceable | 4993 MachineMemOperand::MOInvariant); 4994 } 4995 4996 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, 4997 const SDLoc &DL, SDValue V) const { 4998 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 4999 // the destination register. 5000 // 5001 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 5002 // so we will end up with redundant moves to m0. 5003 // 5004 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 5005 5006 // A Null SDValue creates a glue result. 5007 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 5008 V, Chain); 5009 return SDValue(M0, 0); 5010 } 5011 5012 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 5013 SDValue Op, 5014 MVT VT, 5015 unsigned Offset) const { 5016 SDLoc SL(Op); 5017 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL, 5018 DAG.getEntryNode(), Offset, 4, false); 5019 // The local size values will have the hi 16-bits as zero. 5020 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 5021 DAG.getValueType(VT)); 5022 } 5023 5024 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 5025 EVT VT) { 5026 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), 5027 "non-hsa intrinsic with hsa target", 5028 DL.getDebugLoc()); 5029 DAG.getContext()->diagnose(BadIntrin); 5030 return DAG.getUNDEF(VT); 5031 } 5032 5033 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 5034 EVT VT) { 5035 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), 5036 "intrinsic not supported on subtarget", 5037 DL.getDebugLoc()); 5038 DAG.getContext()->diagnose(BadIntrin); 5039 return DAG.getUNDEF(VT); 5040 } 5041 5042 static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL, 5043 ArrayRef<SDValue> Elts) { 5044 assert(!Elts.empty()); 5045 MVT Type; 5046 unsigned NumElts; 5047 5048 if (Elts.size() == 1) { 5049 Type = MVT::f32; 5050 NumElts = 1; 5051 } else if (Elts.size() == 2) { 5052 Type = MVT::v2f32; 5053 NumElts = 2; 5054 } else if (Elts.size() <= 4) { 5055 Type = MVT::v4f32; 5056 NumElts = 4; 5057 } else if (Elts.size() <= 8) { 5058 Type = MVT::v8f32; 5059 NumElts = 8; 5060 } else { 5061 assert(Elts.size() <= 16); 5062 Type = MVT::v16f32; 5063 NumElts = 16; 5064 } 5065 5066 SmallVector<SDValue, 16> VecElts(NumElts); 5067 for (unsigned i = 0; i < Elts.size(); ++i) { 5068 SDValue Elt = Elts[i]; 5069 if (Elt.getValueType() != MVT::f32) 5070 Elt = DAG.getBitcast(MVT::f32, Elt); 5071 VecElts[i] = Elt; 5072 } 5073 for (unsigned i = Elts.size(); i < NumElts; ++i) 5074 VecElts[i] = DAG.getUNDEF(MVT::f32); 5075 5076 if (NumElts == 1) 5077 return VecElts[0]; 5078 return DAG.getBuildVector(Type, DL, VecElts); 5079 } 5080 5081 static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG, 5082 SDValue *GLC, SDValue *SLC, SDValue *DLC) { 5083 auto CachePolicyConst = cast<ConstantSDNode>(CachePolicy.getNode()); 5084 5085 uint64_t Value = CachePolicyConst->getZExtValue(); 5086 SDLoc DL(CachePolicy); 5087 if (GLC) { 5088 *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); 5089 Value &= ~(uint64_t)0x1; 5090 } 5091 if (SLC) { 5092 *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); 5093 Value &= ~(uint64_t)0x2; 5094 } 5095 if (DLC) { 5096 *DLC = DAG.getTargetConstant((Value & 0x4) ? 1 : 0, DL, MVT::i32); 5097 Value &= ~(uint64_t)0x4; 5098 } 5099 5100 return Value == 0; 5101 } 5102 5103 // Re-construct the required return value for a image load intrinsic. 5104 // This is more complicated due to the optional use TexFailCtrl which means the required 5105 // return type is an aggregate 5106 static SDValue constructRetValue(SelectionDAG &DAG, 5107 MachineSDNode *Result, 5108 ArrayRef<EVT> ResultTypes, 5109 bool IsTexFail, bool Unpacked, bool IsD16, 5110 int DMaskPop, int NumVDataDwords, 5111 const SDLoc &DL, LLVMContext &Context) { 5112 // Determine the required return type. This is the same regardless of IsTexFail flag 5113 EVT ReqRetVT = ResultTypes[0]; 5114 EVT ReqRetEltVT = ReqRetVT.isVector() ? ReqRetVT.getVectorElementType() : ReqRetVT; 5115 int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1; 5116 EVT AdjEltVT = Unpacked && IsD16 ? MVT::i32 : ReqRetEltVT; 5117 EVT AdjVT = Unpacked ? ReqRetNumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, ReqRetNumElts) 5118 : AdjEltVT 5119 : ReqRetVT; 5120 5121 // Extract data part of the result 5122 // Bitcast the result to the same type as the required return type 5123 int NumElts; 5124 if (IsD16 && !Unpacked) 5125 NumElts = NumVDataDwords << 1; 5126 else 5127 NumElts = NumVDataDwords; 5128 5129 EVT CastVT = NumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, NumElts) 5130 : AdjEltVT; 5131 5132 // Special case for v6f16. Rather than add support for this, use v3i32 to 5133 // extract the data elements 5134 bool V6F16Special = false; 5135 if (NumElts == 6) { 5136 CastVT = EVT::getVectorVT(Context, MVT::i32, NumElts / 2); 5137 DMaskPop >>= 1; 5138 ReqRetNumElts >>= 1; 5139 V6F16Special = true; 5140 AdjVT = MVT::v2i32; 5141 } 5142 5143 SDValue N = SDValue(Result, 0); 5144 SDValue CastRes = DAG.getNode(ISD::BITCAST, DL, CastVT, N); 5145 5146 // Iterate over the result 5147 SmallVector<SDValue, 4> BVElts; 5148 5149 if (CastVT.isVector()) { 5150 DAG.ExtractVectorElements(CastRes, BVElts, 0, DMaskPop); 5151 } else { 5152 BVElts.push_back(CastRes); 5153 } 5154 int ExtraElts = ReqRetNumElts - DMaskPop; 5155 while(ExtraElts--) 5156 BVElts.push_back(DAG.getUNDEF(AdjEltVT)); 5157 5158 SDValue PreTFCRes; 5159 if (ReqRetNumElts > 1) { 5160 SDValue NewVec = DAG.getBuildVector(AdjVT, DL, BVElts); 5161 if (IsD16 && Unpacked) 5162 PreTFCRes = adjustLoadValueTypeImpl(NewVec, ReqRetVT, DL, DAG, Unpacked); 5163 else 5164 PreTFCRes = NewVec; 5165 } else { 5166 PreTFCRes = BVElts[0]; 5167 } 5168 5169 if (V6F16Special) 5170 PreTFCRes = DAG.getNode(ISD::BITCAST, DL, MVT::v4f16, PreTFCRes); 5171 5172 if (!IsTexFail) { 5173 if (Result->getNumValues() > 1) 5174 return DAG.getMergeValues({PreTFCRes, SDValue(Result, 1)}, DL); 5175 else 5176 return PreTFCRes; 5177 } 5178 5179 // Extract the TexFail result and insert into aggregate return 5180 SmallVector<SDValue, 1> TFCElt; 5181 DAG.ExtractVectorElements(N, TFCElt, DMaskPop, 1); 5182 SDValue TFCRes = DAG.getNode(ISD::BITCAST, DL, ResultTypes[1], TFCElt[0]); 5183 return DAG.getMergeValues({PreTFCRes, TFCRes, SDValue(Result, 1)}, DL); 5184 } 5185 5186 static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE, 5187 SDValue *LWE, bool &IsTexFail) { 5188 auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode()); 5189 5190 uint64_t Value = TexFailCtrlConst->getZExtValue(); 5191 if (Value) { 5192 IsTexFail = true; 5193 } 5194 5195 SDLoc DL(TexFailCtrlConst); 5196 *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); 5197 Value &= ~(uint64_t)0x1; 5198 *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); 5199 Value &= ~(uint64_t)0x2; 5200 5201 return Value == 0; 5202 } 5203 5204 SDValue SITargetLowering::lowerImage(SDValue Op, 5205 const AMDGPU::ImageDimIntrinsicInfo *Intr, 5206 SelectionDAG &DAG) const { 5207 SDLoc DL(Op); 5208 MachineFunction &MF = DAG.getMachineFunction(); 5209 const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>(); 5210 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 5211 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); 5212 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); 5213 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo = 5214 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode); 5215 const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo = 5216 AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode); 5217 unsigned IntrOpcode = Intr->BaseOpcode; 5218 bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10; 5219 5220 SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end()); 5221 SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end()); 5222 bool IsD16 = false; 5223 bool IsA16 = false; 5224 SDValue VData; 5225 int NumVDataDwords; 5226 bool AdjustRetType = false; 5227 5228 unsigned AddrIdx; // Index of first address argument 5229 unsigned DMask; 5230 unsigned DMaskLanes = 0; 5231 5232 if (BaseOpcode->Atomic) { 5233 VData = Op.getOperand(2); 5234 5235 bool Is64Bit = VData.getValueType() == MVT::i64; 5236 if (BaseOpcode->AtomicX2) { 5237 SDValue VData2 = Op.getOperand(3); 5238 VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL, 5239 {VData, VData2}); 5240 if (Is64Bit) 5241 VData = DAG.getBitcast(MVT::v4i32, VData); 5242 5243 ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32; 5244 DMask = Is64Bit ? 0xf : 0x3; 5245 NumVDataDwords = Is64Bit ? 4 : 2; 5246 AddrIdx = 4; 5247 } else { 5248 DMask = Is64Bit ? 0x3 : 0x1; 5249 NumVDataDwords = Is64Bit ? 2 : 1; 5250 AddrIdx = 3; 5251 } 5252 } else { 5253 unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1; 5254 auto DMaskConst = cast<ConstantSDNode>(Op.getOperand(DMaskIdx)); 5255 DMask = DMaskConst->getZExtValue(); 5256 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); 5257 5258 if (BaseOpcode->Store) { 5259 VData = Op.getOperand(2); 5260 5261 MVT StoreVT = VData.getSimpleValueType(); 5262 if (StoreVT.getScalarType() == MVT::f16) { 5263 if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) 5264 return Op; // D16 is unsupported for this instruction 5265 5266 IsD16 = true; 5267 VData = handleD16VData(VData, DAG); 5268 } 5269 5270 NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32; 5271 } else { 5272 // Work out the num dwords based on the dmask popcount and underlying type 5273 // and whether packing is supported. 5274 MVT LoadVT = ResultTypes[0].getSimpleVT(); 5275 if (LoadVT.getScalarType() == MVT::f16) { 5276 if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) 5277 return Op; // D16 is unsupported for this instruction 5278 5279 IsD16 = true; 5280 } 5281 5282 // Confirm that the return type is large enough for the dmask specified 5283 if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) || 5284 (!LoadVT.isVector() && DMaskLanes > 1)) 5285 return Op; 5286 5287 if (IsD16 && !Subtarget->hasUnpackedD16VMem()) 5288 NumVDataDwords = (DMaskLanes + 1) / 2; 5289 else 5290 NumVDataDwords = DMaskLanes; 5291 5292 AdjustRetType = true; 5293 } 5294 5295 AddrIdx = DMaskIdx + 1; 5296 } 5297 5298 unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0; 5299 unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0; 5300 unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0; 5301 unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients + 5302 NumCoords + NumLCM; 5303 unsigned NumMIVAddrs = NumVAddrs; 5304 5305 SmallVector<SDValue, 4> VAddrs; 5306 5307 // Optimize _L to _LZ when _L is zero 5308 if (LZMappingInfo) { 5309 if (auto ConstantLod = 5310 dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) { 5311 if (ConstantLod->isZero() || ConstantLod->isNegative()) { 5312 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l 5313 NumMIVAddrs--; // remove 'lod' 5314 } 5315 } 5316 } 5317 5318 // Optimize _mip away, when 'lod' is zero 5319 if (MIPMappingInfo) { 5320 if (auto ConstantLod = 5321 dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) { 5322 if (ConstantLod->isNullValue()) { 5323 IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip 5324 NumMIVAddrs--; // remove 'lod' 5325 } 5326 } 5327 } 5328 5329 // Check for 16 bit addresses and pack if true. 5330 unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs; 5331 MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType(); 5332 const MVT VAddrScalarVT = VAddrVT.getScalarType(); 5333 if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16)) && 5334 ST->hasFeature(AMDGPU::FeatureR128A16)) { 5335 IsA16 = true; 5336 const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16; 5337 for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) { 5338 SDValue AddrLo, AddrHi; 5339 // Push back extra arguments. 5340 if (i < DimIdx) { 5341 AddrLo = Op.getOperand(i); 5342 } else { 5343 AddrLo = Op.getOperand(i); 5344 // Dz/dh, dz/dv and the last odd coord are packed with undef. Also, 5345 // in 1D, derivatives dx/dh and dx/dv are packed with undef. 5346 if (((i + 1) >= (AddrIdx + NumMIVAddrs)) || 5347 ((NumGradients / 2) % 2 == 1 && 5348 (i == DimIdx + (NumGradients / 2) - 1 || 5349 i == DimIdx + NumGradients - 1))) { 5350 AddrHi = DAG.getUNDEF(MVT::f16); 5351 } else { 5352 AddrHi = Op.getOperand(i + 1); 5353 i++; 5354 } 5355 AddrLo = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorVT, 5356 {AddrLo, AddrHi}); 5357 AddrLo = DAG.getBitcast(MVT::i32, AddrLo); 5358 } 5359 VAddrs.push_back(AddrLo); 5360 } 5361 } else { 5362 for (unsigned i = 0; i < NumMIVAddrs; ++i) 5363 VAddrs.push_back(Op.getOperand(AddrIdx + i)); 5364 } 5365 5366 // If the register allocator cannot place the address registers contiguously 5367 // without introducing moves, then using the non-sequential address encoding 5368 // is always preferable, since it saves VALU instructions and is usually a 5369 // wash in terms of code size or even better. 5370 // 5371 // However, we currently have no way of hinting to the register allocator that 5372 // MIMG addresses should be placed contiguously when it is possible to do so, 5373 // so force non-NSA for the common 2-address case as a heuristic. 5374 // 5375 // SIShrinkInstructions will convert NSA encodings to non-NSA after register 5376 // allocation when possible. 5377 bool UseNSA = 5378 ST->hasFeature(AMDGPU::FeatureNSAEncoding) && VAddrs.size() >= 3; 5379 SDValue VAddr; 5380 if (!UseNSA) 5381 VAddr = getBuildDwordsVector(DAG, DL, VAddrs); 5382 5383 SDValue True = DAG.getTargetConstant(1, DL, MVT::i1); 5384 SDValue False = DAG.getTargetConstant(0, DL, MVT::i1); 5385 unsigned CtrlIdx; // Index of texfailctrl argument 5386 SDValue Unorm; 5387 if (!BaseOpcode->Sampler) { 5388 Unorm = True; 5389 CtrlIdx = AddrIdx + NumVAddrs + 1; 5390 } else { 5391 auto UnormConst = 5392 cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2)); 5393 5394 Unorm = UnormConst->getZExtValue() ? True : False; 5395 CtrlIdx = AddrIdx + NumVAddrs + 3; 5396 } 5397 5398 SDValue TFE; 5399 SDValue LWE; 5400 SDValue TexFail = Op.getOperand(CtrlIdx); 5401 bool IsTexFail = false; 5402 if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail)) 5403 return Op; 5404 5405 if (IsTexFail) { 5406 if (!DMaskLanes) { 5407 // Expecting to get an error flag since TFC is on - and dmask is 0 5408 // Force dmask to be at least 1 otherwise the instruction will fail 5409 DMask = 0x1; 5410 DMaskLanes = 1; 5411 NumVDataDwords = 1; 5412 } 5413 NumVDataDwords += 1; 5414 AdjustRetType = true; 5415 } 5416 5417 // Has something earlier tagged that the return type needs adjusting 5418 // This happens if the instruction is a load or has set TexFailCtrl flags 5419 if (AdjustRetType) { 5420 // NumVDataDwords reflects the true number of dwords required in the return type 5421 if (DMaskLanes == 0 && !BaseOpcode->Store) { 5422 // This is a no-op load. This can be eliminated 5423 SDValue Undef = DAG.getUNDEF(Op.getValueType()); 5424 if (isa<MemSDNode>(Op)) 5425 return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL); 5426 return Undef; 5427 } 5428 5429 EVT NewVT = NumVDataDwords > 1 ? 5430 EVT::getVectorVT(*DAG.getContext(), MVT::f32, NumVDataDwords) 5431 : MVT::f32; 5432 5433 ResultTypes[0] = NewVT; 5434 if (ResultTypes.size() == 3) { 5435 // Original result was aggregate type used for TexFailCtrl results 5436 // The actual instruction returns as a vector type which has now been 5437 // created. Remove the aggregate result. 5438 ResultTypes.erase(&ResultTypes[1]); 5439 } 5440 } 5441 5442 SDValue GLC; 5443 SDValue SLC; 5444 SDValue DLC; 5445 if (BaseOpcode->Atomic) { 5446 GLC = True; // TODO no-return optimization 5447 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC, 5448 IsGFX10 ? &DLC : nullptr)) 5449 return Op; 5450 } else { 5451 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC, 5452 IsGFX10 ? &DLC : nullptr)) 5453 return Op; 5454 } 5455 5456 SmallVector<SDValue, 26> Ops; 5457 if (BaseOpcode->Store || BaseOpcode->Atomic) 5458 Ops.push_back(VData); // vdata 5459 if (UseNSA) { 5460 for (const SDValue &Addr : VAddrs) 5461 Ops.push_back(Addr); 5462 } else { 5463 Ops.push_back(VAddr); 5464 } 5465 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc 5466 if (BaseOpcode->Sampler) 5467 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler 5468 Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32)); 5469 if (IsGFX10) 5470 Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32)); 5471 Ops.push_back(Unorm); 5472 if (IsGFX10) 5473 Ops.push_back(DLC); 5474 Ops.push_back(GLC); 5475 Ops.push_back(SLC); 5476 Ops.push_back(IsA16 && // a16 or r128 5477 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False); 5478 Ops.push_back(TFE); // tfe 5479 Ops.push_back(LWE); // lwe 5480 if (!IsGFX10) 5481 Ops.push_back(DimInfo->DA ? True : False); 5482 if (BaseOpcode->HasD16) 5483 Ops.push_back(IsD16 ? True : False); 5484 if (isa<MemSDNode>(Op)) 5485 Ops.push_back(Op.getOperand(0)); // chain 5486 5487 int NumVAddrDwords = 5488 UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32; 5489 int Opcode = -1; 5490 5491 if (IsGFX10) { 5492 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, 5493 UseNSA ? AMDGPU::MIMGEncGfx10NSA 5494 : AMDGPU::MIMGEncGfx10Default, 5495 NumVDataDwords, NumVAddrDwords); 5496 } else { 5497 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 5498 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, 5499 NumVDataDwords, NumVAddrDwords); 5500 if (Opcode == -1) 5501 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, 5502 NumVDataDwords, NumVAddrDwords); 5503 } 5504 assert(Opcode != -1); 5505 5506 MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops); 5507 if (auto MemOp = dyn_cast<MemSDNode>(Op)) { 5508 MachineMemOperand *MemRef = MemOp->getMemOperand(); 5509 DAG.setNodeMemRefs(NewNode, {MemRef}); 5510 } 5511 5512 if (BaseOpcode->AtomicX2) { 5513 SmallVector<SDValue, 1> Elt; 5514 DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1); 5515 return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL); 5516 } else if (!BaseOpcode->Store) { 5517 return constructRetValue(DAG, NewNode, 5518 OrigResultTypes, IsTexFail, 5519 Subtarget->hasUnpackedD16VMem(), IsD16, 5520 DMaskLanes, NumVDataDwords, DL, 5521 *DAG.getContext()); 5522 } 5523 5524 return SDValue(NewNode, 0); 5525 } 5526 5527 SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc, 5528 SDValue Offset, SDValue GLC, SDValue DLC, 5529 SelectionDAG &DAG) const { 5530 MachineFunction &MF = DAG.getMachineFunction(); 5531 MachineMemOperand *MMO = MF.getMachineMemOperand( 5532 MachinePointerInfo(), 5533 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 5534 MachineMemOperand::MOInvariant, 5535 VT.getStoreSize(), VT.getStoreSize()); 5536 5537 if (!Offset->isDivergent()) { 5538 SDValue Ops[] = { 5539 Rsrc, 5540 Offset, // Offset 5541 GLC, 5542 DLC, 5543 }; 5544 return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL, 5545 DAG.getVTList(VT), Ops, VT, MMO); 5546 } 5547 5548 // We have a divergent offset. Emit a MUBUF buffer load instead. We can 5549 // assume that the buffer is unswizzled. 5550 SmallVector<SDValue, 4> Loads; 5551 unsigned NumLoads = 1; 5552 MVT LoadVT = VT.getSimpleVT(); 5553 unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1; 5554 assert((LoadVT.getScalarType() == MVT::i32 || 5555 LoadVT.getScalarType() == MVT::f32) && 5556 isPowerOf2_32(NumElts)); 5557 5558 if (NumElts == 8 || NumElts == 16) { 5559 NumLoads = NumElts == 16 ? 4 : 2; 5560 LoadVT = MVT::v4i32; 5561 } 5562 5563 SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue}); 5564 unsigned CachePolicy = cast<ConstantSDNode>(GLC)->getZExtValue(); 5565 SDValue Ops[] = { 5566 DAG.getEntryNode(), // Chain 5567 Rsrc, // rsrc 5568 DAG.getConstant(0, DL, MVT::i32), // vindex 5569 {}, // voffset 5570 {}, // soffset 5571 {}, // offset 5572 DAG.getConstant(CachePolicy, DL, MVT::i32), // cachepolicy 5573 DAG.getConstant(0, DL, MVT::i1), // idxen 5574 }; 5575 5576 // Use the alignment to ensure that the required offsets will fit into the 5577 // immediate offsets. 5578 setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4); 5579 5580 uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue(); 5581 for (unsigned i = 0; i < NumLoads; ++i) { 5582 Ops[5] = DAG.getConstant(InstOffset + 16 * i, DL, MVT::i32); 5583 Loads.push_back(DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, 5584 Ops, LoadVT, MMO)); 5585 } 5586 5587 if (VT == MVT::v8i32 || VT == MVT::v16i32) 5588 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads); 5589 5590 return Loads[0]; 5591 } 5592 5593 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 5594 SelectionDAG &DAG) const { 5595 MachineFunction &MF = DAG.getMachineFunction(); 5596 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 5597 5598 EVT VT = Op.getValueType(); 5599 SDLoc DL(Op); 5600 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5601 5602 // TODO: Should this propagate fast-math-flags? 5603 5604 switch (IntrinsicID) { 5605 case Intrinsic::amdgcn_implicit_buffer_ptr: { 5606 if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction())) 5607 return emitNonHSAIntrinsicError(DAG, DL, VT); 5608 return getPreloadedValue(DAG, *MFI, VT, 5609 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR); 5610 } 5611 case Intrinsic::amdgcn_dispatch_ptr: 5612 case Intrinsic::amdgcn_queue_ptr: { 5613 if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) { 5614 DiagnosticInfoUnsupported BadIntrin( 5615 MF.getFunction(), "unsupported hsa intrinsic without hsa target", 5616 DL.getDebugLoc()); 5617 DAG.getContext()->diagnose(BadIntrin); 5618 return DAG.getUNDEF(VT); 5619 } 5620 5621 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 5622 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR; 5623 return getPreloadedValue(DAG, *MFI, VT, RegID); 5624 } 5625 case Intrinsic::amdgcn_implicitarg_ptr: { 5626 if (MFI->isEntryFunction()) 5627 return getImplicitArgPtr(DAG, DL); 5628 return getPreloadedValue(DAG, *MFI, VT, 5629 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 5630 } 5631 case Intrinsic::amdgcn_kernarg_segment_ptr: { 5632 return getPreloadedValue(DAG, *MFI, VT, 5633 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 5634 } 5635 case Intrinsic::amdgcn_dispatch_id: { 5636 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID); 5637 } 5638 case Intrinsic::amdgcn_rcp: 5639 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 5640 case Intrinsic::amdgcn_rsq: 5641 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 5642 case Intrinsic::amdgcn_rsq_legacy: 5643 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 5644 return emitRemovedIntrinsicError(DAG, DL, VT); 5645 5646 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); 5647 case Intrinsic::amdgcn_rcp_legacy: 5648 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 5649 return emitRemovedIntrinsicError(DAG, DL, VT); 5650 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); 5651 case Intrinsic::amdgcn_rsq_clamp: { 5652 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) 5653 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 5654 5655 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 5656 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 5657 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 5658 5659 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 5660 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 5661 DAG.getConstantFP(Max, DL, VT)); 5662 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 5663 DAG.getConstantFP(Min, DL, VT)); 5664 } 5665 case Intrinsic::r600_read_ngroups_x: 5666 if (Subtarget->isAmdHsaOS()) 5667 return emitNonHSAIntrinsicError(DAG, DL, VT); 5668 5669 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 5670 SI::KernelInputOffsets::NGROUPS_X, 4, false); 5671 case Intrinsic::r600_read_ngroups_y: 5672 if (Subtarget->isAmdHsaOS()) 5673 return emitNonHSAIntrinsicError(DAG, DL, VT); 5674 5675 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 5676 SI::KernelInputOffsets::NGROUPS_Y, 4, false); 5677 case Intrinsic::r600_read_ngroups_z: 5678 if (Subtarget->isAmdHsaOS()) 5679 return emitNonHSAIntrinsicError(DAG, DL, VT); 5680 5681 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 5682 SI::KernelInputOffsets::NGROUPS_Z, 4, false); 5683 case Intrinsic::r600_read_global_size_x: 5684 if (Subtarget->isAmdHsaOS()) 5685 return emitNonHSAIntrinsicError(DAG, DL, VT); 5686 5687 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 5688 SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false); 5689 case Intrinsic::r600_read_global_size_y: 5690 if (Subtarget->isAmdHsaOS()) 5691 return emitNonHSAIntrinsicError(DAG, DL, VT); 5692 5693 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 5694 SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false); 5695 case Intrinsic::r600_read_global_size_z: 5696 if (Subtarget->isAmdHsaOS()) 5697 return emitNonHSAIntrinsicError(DAG, DL, VT); 5698 5699 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 5700 SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false); 5701 case Intrinsic::r600_read_local_size_x: 5702 if (Subtarget->isAmdHsaOS()) 5703 return emitNonHSAIntrinsicError(DAG, DL, VT); 5704 5705 return lowerImplicitZextParam(DAG, Op, MVT::i16, 5706 SI::KernelInputOffsets::LOCAL_SIZE_X); 5707 case Intrinsic::r600_read_local_size_y: 5708 if (Subtarget->isAmdHsaOS()) 5709 return emitNonHSAIntrinsicError(DAG, DL, VT); 5710 5711 return lowerImplicitZextParam(DAG, Op, MVT::i16, 5712 SI::KernelInputOffsets::LOCAL_SIZE_Y); 5713 case Intrinsic::r600_read_local_size_z: 5714 if (Subtarget->isAmdHsaOS()) 5715 return emitNonHSAIntrinsicError(DAG, DL, VT); 5716 5717 return lowerImplicitZextParam(DAG, Op, MVT::i16, 5718 SI::KernelInputOffsets::LOCAL_SIZE_Z); 5719 case Intrinsic::amdgcn_workgroup_id_x: 5720 case Intrinsic::r600_read_tgid_x: 5721 return getPreloadedValue(DAG, *MFI, VT, 5722 AMDGPUFunctionArgInfo::WORKGROUP_ID_X); 5723 case Intrinsic::amdgcn_workgroup_id_y: 5724 case Intrinsic::r600_read_tgid_y: 5725 return getPreloadedValue(DAG, *MFI, VT, 5726 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); 5727 case Intrinsic::amdgcn_workgroup_id_z: 5728 case Intrinsic::r600_read_tgid_z: 5729 return getPreloadedValue(DAG, *MFI, VT, 5730 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); 5731 case Intrinsic::amdgcn_workitem_id_x: 5732 case Intrinsic::r600_read_tidig_x: 5733 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 5734 SDLoc(DAG.getEntryNode()), 5735 MFI->getArgInfo().WorkItemIDX); 5736 case Intrinsic::amdgcn_workitem_id_y: 5737 case Intrinsic::r600_read_tidig_y: 5738 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 5739 SDLoc(DAG.getEntryNode()), 5740 MFI->getArgInfo().WorkItemIDY); 5741 case Intrinsic::amdgcn_workitem_id_z: 5742 case Intrinsic::r600_read_tidig_z: 5743 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 5744 SDLoc(DAG.getEntryNode()), 5745 MFI->getArgInfo().WorkItemIDZ); 5746 case Intrinsic::amdgcn_wavefrontsize: 5747 return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(), 5748 SDLoc(Op), MVT::i32); 5749 case Intrinsic::amdgcn_s_buffer_load: { 5750 bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10; 5751 SDValue GLC; 5752 SDValue DLC = DAG.getTargetConstant(0, DL, MVT::i1); 5753 if (!parseCachePolicy(Op.getOperand(3), DAG, &GLC, nullptr, 5754 IsGFX10 ? &DLC : nullptr)) 5755 return Op; 5756 return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), GLC, DLC, 5757 DAG); 5758 } 5759 case Intrinsic::amdgcn_fdiv_fast: 5760 return lowerFDIV_FAST(Op, DAG); 5761 case Intrinsic::amdgcn_interp_mov: { 5762 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 5763 SDValue Glue = M0.getValue(1); 5764 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1), 5765 Op.getOperand(2), Op.getOperand(3), Glue); 5766 } 5767 case Intrinsic::amdgcn_interp_p1: { 5768 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 5769 SDValue Glue = M0.getValue(1); 5770 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 5771 Op.getOperand(2), Op.getOperand(3), Glue); 5772 } 5773 case Intrinsic::amdgcn_interp_p2: { 5774 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 5775 SDValue Glue = SDValue(M0.getNode(), 1); 5776 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 5777 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 5778 Glue); 5779 } 5780 case Intrinsic::amdgcn_interp_p1_f16: { 5781 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 5782 SDValue Glue = M0.getValue(1); 5783 if (getSubtarget()->getLDSBankCount() == 16) { 5784 // 16 bank LDS 5785 SDValue S = DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, 5786 DAG.getConstant(2, DL, MVT::i32), // P0 5787 Op.getOperand(2), // Attrchan 5788 Op.getOperand(3), // Attr 5789 Glue); 5790 SDValue Ops[] = { 5791 Op.getOperand(1), // Src0 5792 Op.getOperand(2), // Attrchan 5793 Op.getOperand(3), // Attr 5794 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers 5795 S, // Src2 - holds two f16 values selected by high 5796 DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers 5797 Op.getOperand(4), // high 5798 DAG.getConstant(0, DL, MVT::i1), // $clamp 5799 DAG.getConstant(0, DL, MVT::i32) // $omod 5800 }; 5801 return DAG.getNode(AMDGPUISD::INTERP_P1LV_F16, DL, MVT::f32, Ops); 5802 } else { 5803 // 32 bank LDS 5804 SDValue Ops[] = { 5805 Op.getOperand(1), // Src0 5806 Op.getOperand(2), // Attrchan 5807 Op.getOperand(3), // Attr 5808 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers 5809 Op.getOperand(4), // high 5810 DAG.getConstant(0, DL, MVT::i1), // $clamp 5811 DAG.getConstant(0, DL, MVT::i32), // $omod 5812 Glue 5813 }; 5814 return DAG.getNode(AMDGPUISD::INTERP_P1LL_F16, DL, MVT::f32, Ops); 5815 } 5816 } 5817 case Intrinsic::amdgcn_interp_p2_f16: { 5818 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(6)); 5819 SDValue Glue = SDValue(M0.getNode(), 1); 5820 SDValue Ops[] = { 5821 Op.getOperand(2), // Src0 5822 Op.getOperand(3), // Attrchan 5823 Op.getOperand(4), // Attr 5824 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers 5825 Op.getOperand(1), // Src2 5826 DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers 5827 Op.getOperand(5), // high 5828 DAG.getConstant(0, DL, MVT::i1), // $clamp 5829 Glue 5830 }; 5831 return DAG.getNode(AMDGPUISD::INTERP_P2_F16, DL, MVT::f16, Ops); 5832 } 5833 case Intrinsic::amdgcn_sin: 5834 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 5835 5836 case Intrinsic::amdgcn_cos: 5837 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 5838 5839 case Intrinsic::amdgcn_mul_u24: 5840 return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2)); 5841 case Intrinsic::amdgcn_mul_i24: 5842 return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2)); 5843 5844 case Intrinsic::amdgcn_log_clamp: { 5845 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) 5846 return SDValue(); 5847 5848 DiagnosticInfoUnsupported BadIntrin( 5849 MF.getFunction(), "intrinsic not supported on subtarget", 5850 DL.getDebugLoc()); 5851 DAG.getContext()->diagnose(BadIntrin); 5852 return DAG.getUNDEF(VT); 5853 } 5854 case Intrinsic::amdgcn_ldexp: 5855 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 5856 Op.getOperand(1), Op.getOperand(2)); 5857 5858 case Intrinsic::amdgcn_fract: 5859 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 5860 5861 case Intrinsic::amdgcn_class: 5862 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 5863 Op.getOperand(1), Op.getOperand(2)); 5864 case Intrinsic::amdgcn_div_fmas: 5865 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 5866 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 5867 Op.getOperand(4)); 5868 5869 case Intrinsic::amdgcn_div_fixup: 5870 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 5871 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 5872 5873 case Intrinsic::amdgcn_trig_preop: 5874 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 5875 Op.getOperand(1), Op.getOperand(2)); 5876 case Intrinsic::amdgcn_div_scale: { 5877 const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3)); 5878 5879 // Translate to the operands expected by the machine instruction. The 5880 // first parameter must be the same as the first instruction. 5881 SDValue Numerator = Op.getOperand(1); 5882 SDValue Denominator = Op.getOperand(2); 5883 5884 // Note this order is opposite of the machine instruction's operations, 5885 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 5886 // intrinsic has the numerator as the first operand to match a normal 5887 // division operation. 5888 5889 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 5890 5891 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 5892 Denominator, Numerator); 5893 } 5894 case Intrinsic::amdgcn_icmp: { 5895 // There is a Pat that handles this variant, so return it as-is. 5896 if (Op.getOperand(1).getValueType() == MVT::i1 && 5897 Op.getConstantOperandVal(2) == 0 && 5898 Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE) 5899 return Op; 5900 return lowerICMPIntrinsic(*this, Op.getNode(), DAG); 5901 } 5902 case Intrinsic::amdgcn_fcmp: { 5903 return lowerFCMPIntrinsic(*this, Op.getNode(), DAG); 5904 } 5905 case Intrinsic::amdgcn_fmed3: 5906 return DAG.getNode(AMDGPUISD::FMED3, DL, VT, 5907 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 5908 case Intrinsic::amdgcn_fdot2: 5909 return DAG.getNode(AMDGPUISD::FDOT2, DL, VT, 5910 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 5911 Op.getOperand(4)); 5912 case Intrinsic::amdgcn_fmul_legacy: 5913 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, 5914 Op.getOperand(1), Op.getOperand(2)); 5915 case Intrinsic::amdgcn_sffbh: 5916 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); 5917 case Intrinsic::amdgcn_sbfe: 5918 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, 5919 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 5920 case Intrinsic::amdgcn_ubfe: 5921 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, 5922 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 5923 case Intrinsic::amdgcn_cvt_pkrtz: 5924 case Intrinsic::amdgcn_cvt_pknorm_i16: 5925 case Intrinsic::amdgcn_cvt_pknorm_u16: 5926 case Intrinsic::amdgcn_cvt_pk_i16: 5927 case Intrinsic::amdgcn_cvt_pk_u16: { 5928 // FIXME: Stop adding cast if v2f16/v2i16 are legal. 5929 EVT VT = Op.getValueType(); 5930 unsigned Opcode; 5931 5932 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz) 5933 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32; 5934 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16) 5935 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; 5936 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16) 5937 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; 5938 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16) 5939 Opcode = AMDGPUISD::CVT_PK_I16_I32; 5940 else 5941 Opcode = AMDGPUISD::CVT_PK_U16_U32; 5942 5943 if (isTypeLegal(VT)) 5944 return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2)); 5945 5946 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32, 5947 Op.getOperand(1), Op.getOperand(2)); 5948 return DAG.getNode(ISD::BITCAST, DL, VT, Node); 5949 } 5950 case Intrinsic::amdgcn_wqm: { 5951 SDValue Src = Op.getOperand(1); 5952 return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src), 5953 0); 5954 } 5955 case Intrinsic::amdgcn_wwm: { 5956 SDValue Src = Op.getOperand(1); 5957 return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src), 5958 0); 5959 } 5960 case Intrinsic::amdgcn_fmad_ftz: 5961 return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1), 5962 Op.getOperand(2), Op.getOperand(3)); 5963 5964 case Intrinsic::amdgcn_if_break: 5965 return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT, 5966 Op->getOperand(1), Op->getOperand(2)), 0); 5967 5968 case Intrinsic::amdgcn_groupstaticsize: { 5969 Triple::OSType OS = getTargetMachine().getTargetTriple().getOS(); 5970 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) 5971 return Op; 5972 5973 const Module *M = MF.getFunction().getParent(); 5974 const GlobalValue *GV = 5975 M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize)); 5976 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0, 5977 SIInstrInfo::MO_ABS32_LO); 5978 return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; 5979 } 5980 default: 5981 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = 5982 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) 5983 return lowerImage(Op, ImageDimIntr, DAG); 5984 5985 return Op; 5986 } 5987 } 5988 5989 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 5990 SelectionDAG &DAG) const { 5991 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5992 SDLoc DL(Op); 5993 5994 switch (IntrID) { 5995 case Intrinsic::amdgcn_ds_ordered_add: 5996 case Intrinsic::amdgcn_ds_ordered_swap: { 5997 MemSDNode *M = cast<MemSDNode>(Op); 5998 SDValue Chain = M->getOperand(0); 5999 SDValue M0 = M->getOperand(2); 6000 SDValue Value = M->getOperand(3); 6001 unsigned IndexOperand = M->getConstantOperandVal(7); 6002 unsigned WaveRelease = M->getConstantOperandVal(8); 6003 unsigned WaveDone = M->getConstantOperandVal(9); 6004 unsigned ShaderType; 6005 unsigned Instruction; 6006 6007 unsigned OrderedCountIndex = IndexOperand & 0x3f; 6008 IndexOperand &= ~0x3f; 6009 unsigned CountDw = 0; 6010 6011 if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) { 6012 CountDw = (IndexOperand >> 24) & 0xf; 6013 IndexOperand &= ~(0xf << 24); 6014 6015 if (CountDw < 1 || CountDw > 4) { 6016 report_fatal_error( 6017 "ds_ordered_count: dword count must be between 1 and 4"); 6018 } 6019 } 6020 6021 if (IndexOperand) 6022 report_fatal_error("ds_ordered_count: bad index operand"); 6023 6024 switch (IntrID) { 6025 case Intrinsic::amdgcn_ds_ordered_add: 6026 Instruction = 0; 6027 break; 6028 case Intrinsic::amdgcn_ds_ordered_swap: 6029 Instruction = 1; 6030 break; 6031 } 6032 6033 if (WaveDone && !WaveRelease) 6034 report_fatal_error("ds_ordered_count: wave_done requires wave_release"); 6035 6036 switch (DAG.getMachineFunction().getFunction().getCallingConv()) { 6037 case CallingConv::AMDGPU_CS: 6038 case CallingConv::AMDGPU_KERNEL: 6039 ShaderType = 0; 6040 break; 6041 case CallingConv::AMDGPU_PS: 6042 ShaderType = 1; 6043 break; 6044 case CallingConv::AMDGPU_VS: 6045 ShaderType = 2; 6046 break; 6047 case CallingConv::AMDGPU_GS: 6048 ShaderType = 3; 6049 break; 6050 default: 6051 report_fatal_error("ds_ordered_count unsupported for this calling conv"); 6052 } 6053 6054 unsigned Offset0 = OrderedCountIndex << 2; 6055 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | 6056 (Instruction << 4); 6057 6058 if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) 6059 Offset1 |= (CountDw - 1) << 6; 6060 6061 unsigned Offset = Offset0 | (Offset1 << 8); 6062 6063 SDValue Ops[] = { 6064 Chain, 6065 Value, 6066 DAG.getTargetConstant(Offset, DL, MVT::i16), 6067 copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue 6068 }; 6069 return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL, 6070 M->getVTList(), Ops, M->getMemoryVT(), 6071 M->getMemOperand()); 6072 } 6073 case Intrinsic::amdgcn_ds_fadd: { 6074 MemSDNode *M = cast<MemSDNode>(Op); 6075 unsigned Opc; 6076 switch (IntrID) { 6077 case Intrinsic::amdgcn_ds_fadd: 6078 Opc = ISD::ATOMIC_LOAD_FADD; 6079 break; 6080 } 6081 6082 return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(), 6083 M->getOperand(0), M->getOperand(2), M->getOperand(3), 6084 M->getMemOperand()); 6085 } 6086 case Intrinsic::amdgcn_atomic_inc: 6087 case Intrinsic::amdgcn_atomic_dec: 6088 case Intrinsic::amdgcn_ds_fmin: 6089 case Intrinsic::amdgcn_ds_fmax: { 6090 MemSDNode *M = cast<MemSDNode>(Op); 6091 unsigned Opc; 6092 switch (IntrID) { 6093 case Intrinsic::amdgcn_atomic_inc: 6094 Opc = AMDGPUISD::ATOMIC_INC; 6095 break; 6096 case Intrinsic::amdgcn_atomic_dec: 6097 Opc = AMDGPUISD::ATOMIC_DEC; 6098 break; 6099 case Intrinsic::amdgcn_ds_fmin: 6100 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN; 6101 break; 6102 case Intrinsic::amdgcn_ds_fmax: 6103 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX; 6104 break; 6105 default: 6106 llvm_unreachable("Unknown intrinsic!"); 6107 } 6108 SDValue Ops[] = { 6109 M->getOperand(0), // Chain 6110 M->getOperand(2), // Ptr 6111 M->getOperand(3) // Value 6112 }; 6113 6114 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 6115 M->getMemoryVT(), M->getMemOperand()); 6116 } 6117 case Intrinsic::amdgcn_buffer_load: 6118 case Intrinsic::amdgcn_buffer_load_format: { 6119 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 6120 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); 6121 unsigned IdxEn = 1; 6122 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3))) 6123 IdxEn = Idx->getZExtValue() != 0; 6124 SDValue Ops[] = { 6125 Op.getOperand(0), // Chain 6126 Op.getOperand(2), // rsrc 6127 Op.getOperand(3), // vindex 6128 SDValue(), // voffset -- will be set by setBufferOffsets 6129 SDValue(), // soffset -- will be set by setBufferOffsets 6130 SDValue(), // offset -- will be set by setBufferOffsets 6131 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy 6132 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen 6133 }; 6134 6135 setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]); 6136 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? 6137 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 6138 6139 EVT VT = Op.getValueType(); 6140 EVT IntVT = VT.changeTypeToInteger(); 6141 auto *M = cast<MemSDNode>(Op); 6142 EVT LoadVT = Op.getValueType(); 6143 6144 if (LoadVT.getScalarType() == MVT::f16) 6145 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, 6146 M, DAG, Ops); 6147 6148 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics 6149 if (LoadVT.getScalarType() == MVT::i8 || 6150 LoadVT.getScalarType() == MVT::i16) 6151 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); 6152 6153 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, 6154 M->getMemOperand(), DAG); 6155 } 6156 case Intrinsic::amdgcn_raw_buffer_load: 6157 case Intrinsic::amdgcn_raw_buffer_load_format: { 6158 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); 6159 SDValue Ops[] = { 6160 Op.getOperand(0), // Chain 6161 Op.getOperand(2), // rsrc 6162 DAG.getConstant(0, DL, MVT::i32), // vindex 6163 Offsets.first, // voffset 6164 Op.getOperand(4), // soffset 6165 Offsets.second, // offset 6166 Op.getOperand(5), // cachepolicy 6167 DAG.getConstant(0, DL, MVT::i1), // idxen 6168 }; 6169 6170 unsigned Opc = (IntrID == Intrinsic::amdgcn_raw_buffer_load) ? 6171 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 6172 6173 EVT VT = Op.getValueType(); 6174 EVT IntVT = VT.changeTypeToInteger(); 6175 auto *M = cast<MemSDNode>(Op); 6176 EVT LoadVT = Op.getValueType(); 6177 6178 if (LoadVT.getScalarType() == MVT::f16) 6179 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, 6180 M, DAG, Ops); 6181 6182 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics 6183 if (LoadVT.getScalarType() == MVT::i8 || 6184 LoadVT.getScalarType() == MVT::i16) 6185 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); 6186 6187 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, 6188 M->getMemOperand(), DAG); 6189 } 6190 case Intrinsic::amdgcn_struct_buffer_load: 6191 case Intrinsic::amdgcn_struct_buffer_load_format: { 6192 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 6193 SDValue Ops[] = { 6194 Op.getOperand(0), // Chain 6195 Op.getOperand(2), // rsrc 6196 Op.getOperand(3), // vindex 6197 Offsets.first, // voffset 6198 Op.getOperand(5), // soffset 6199 Offsets.second, // offset 6200 Op.getOperand(6), // cachepolicy 6201 DAG.getConstant(1, DL, MVT::i1), // idxen 6202 }; 6203 6204 unsigned Opc = (IntrID == Intrinsic::amdgcn_struct_buffer_load) ? 6205 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 6206 6207 EVT VT = Op.getValueType(); 6208 EVT IntVT = VT.changeTypeToInteger(); 6209 auto *M = cast<MemSDNode>(Op); 6210 EVT LoadVT = Op.getValueType(); 6211 6212 if (LoadVT.getScalarType() == MVT::f16) 6213 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, 6214 M, DAG, Ops); 6215 6216 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics 6217 if (LoadVT.getScalarType() == MVT::i8 || 6218 LoadVT.getScalarType() == MVT::i16) 6219 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); 6220 6221 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, 6222 M->getMemOperand(), DAG); 6223 } 6224 case Intrinsic::amdgcn_tbuffer_load: { 6225 MemSDNode *M = cast<MemSDNode>(Op); 6226 EVT LoadVT = Op.getValueType(); 6227 6228 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); 6229 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); 6230 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); 6231 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); 6232 unsigned IdxEn = 1; 6233 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3))) 6234 IdxEn = Idx->getZExtValue() != 0; 6235 SDValue Ops[] = { 6236 Op.getOperand(0), // Chain 6237 Op.getOperand(2), // rsrc 6238 Op.getOperand(3), // vindex 6239 Op.getOperand(4), // voffset 6240 Op.getOperand(5), // soffset 6241 Op.getOperand(6), // offset 6242 DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format 6243 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy 6244 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen 6245 }; 6246 6247 if (LoadVT.getScalarType() == MVT::f16) 6248 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, 6249 M, DAG, Ops); 6250 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 6251 Op->getVTList(), Ops, LoadVT, M->getMemOperand(), 6252 DAG); 6253 } 6254 case Intrinsic::amdgcn_raw_tbuffer_load: { 6255 MemSDNode *M = cast<MemSDNode>(Op); 6256 EVT LoadVT = Op.getValueType(); 6257 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); 6258 6259 SDValue Ops[] = { 6260 Op.getOperand(0), // Chain 6261 Op.getOperand(2), // rsrc 6262 DAG.getConstant(0, DL, MVT::i32), // vindex 6263 Offsets.first, // voffset 6264 Op.getOperand(4), // soffset 6265 Offsets.second, // offset 6266 Op.getOperand(5), // format 6267 Op.getOperand(6), // cachepolicy 6268 DAG.getConstant(0, DL, MVT::i1), // idxen 6269 }; 6270 6271 if (LoadVT.getScalarType() == MVT::f16) 6272 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, 6273 M, DAG, Ops); 6274 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 6275 Op->getVTList(), Ops, LoadVT, M->getMemOperand(), 6276 DAG); 6277 } 6278 case Intrinsic::amdgcn_struct_tbuffer_load: { 6279 MemSDNode *M = cast<MemSDNode>(Op); 6280 EVT LoadVT = Op.getValueType(); 6281 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 6282 6283 SDValue Ops[] = { 6284 Op.getOperand(0), // Chain 6285 Op.getOperand(2), // rsrc 6286 Op.getOperand(3), // vindex 6287 Offsets.first, // voffset 6288 Op.getOperand(5), // soffset 6289 Offsets.second, // offset 6290 Op.getOperand(6), // format 6291 Op.getOperand(7), // cachepolicy 6292 DAG.getConstant(1, DL, MVT::i1), // idxen 6293 }; 6294 6295 if (LoadVT.getScalarType() == MVT::f16) 6296 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, 6297 M, DAG, Ops); 6298 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 6299 Op->getVTList(), Ops, LoadVT, M->getMemOperand(), 6300 DAG); 6301 } 6302 case Intrinsic::amdgcn_buffer_atomic_swap: 6303 case Intrinsic::amdgcn_buffer_atomic_add: 6304 case Intrinsic::amdgcn_buffer_atomic_sub: 6305 case Intrinsic::amdgcn_buffer_atomic_smin: 6306 case Intrinsic::amdgcn_buffer_atomic_umin: 6307 case Intrinsic::amdgcn_buffer_atomic_smax: 6308 case Intrinsic::amdgcn_buffer_atomic_umax: 6309 case Intrinsic::amdgcn_buffer_atomic_and: 6310 case Intrinsic::amdgcn_buffer_atomic_or: 6311 case Intrinsic::amdgcn_buffer_atomic_xor: { 6312 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); 6313 unsigned IdxEn = 1; 6314 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) 6315 IdxEn = Idx->getZExtValue() != 0; 6316 SDValue Ops[] = { 6317 Op.getOperand(0), // Chain 6318 Op.getOperand(2), // vdata 6319 Op.getOperand(3), // rsrc 6320 Op.getOperand(4), // vindex 6321 SDValue(), // voffset -- will be set by setBufferOffsets 6322 SDValue(), // soffset -- will be set by setBufferOffsets 6323 SDValue(), // offset -- will be set by setBufferOffsets 6324 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy 6325 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen 6326 }; 6327 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); 6328 EVT VT = Op.getValueType(); 6329 6330 auto *M = cast<MemSDNode>(Op); 6331 unsigned Opcode = 0; 6332 6333 switch (IntrID) { 6334 case Intrinsic::amdgcn_buffer_atomic_swap: 6335 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; 6336 break; 6337 case Intrinsic::amdgcn_buffer_atomic_add: 6338 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; 6339 break; 6340 case Intrinsic::amdgcn_buffer_atomic_sub: 6341 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; 6342 break; 6343 case Intrinsic::amdgcn_buffer_atomic_smin: 6344 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; 6345 break; 6346 case Intrinsic::amdgcn_buffer_atomic_umin: 6347 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; 6348 break; 6349 case Intrinsic::amdgcn_buffer_atomic_smax: 6350 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; 6351 break; 6352 case Intrinsic::amdgcn_buffer_atomic_umax: 6353 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; 6354 break; 6355 case Intrinsic::amdgcn_buffer_atomic_and: 6356 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; 6357 break; 6358 case Intrinsic::amdgcn_buffer_atomic_or: 6359 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; 6360 break; 6361 case Intrinsic::amdgcn_buffer_atomic_xor: 6362 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; 6363 break; 6364 default: 6365 llvm_unreachable("unhandled atomic opcode"); 6366 } 6367 6368 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, 6369 M->getMemOperand()); 6370 } 6371 case Intrinsic::amdgcn_raw_buffer_atomic_swap: 6372 case Intrinsic::amdgcn_raw_buffer_atomic_add: 6373 case Intrinsic::amdgcn_raw_buffer_atomic_sub: 6374 case Intrinsic::amdgcn_raw_buffer_atomic_smin: 6375 case Intrinsic::amdgcn_raw_buffer_atomic_umin: 6376 case Intrinsic::amdgcn_raw_buffer_atomic_smax: 6377 case Intrinsic::amdgcn_raw_buffer_atomic_umax: 6378 case Intrinsic::amdgcn_raw_buffer_atomic_and: 6379 case Intrinsic::amdgcn_raw_buffer_atomic_or: 6380 case Intrinsic::amdgcn_raw_buffer_atomic_xor: { 6381 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 6382 SDValue Ops[] = { 6383 Op.getOperand(0), // Chain 6384 Op.getOperand(2), // vdata 6385 Op.getOperand(3), // rsrc 6386 DAG.getConstant(0, DL, MVT::i32), // vindex 6387 Offsets.first, // voffset 6388 Op.getOperand(5), // soffset 6389 Offsets.second, // offset 6390 Op.getOperand(6), // cachepolicy 6391 DAG.getConstant(0, DL, MVT::i1), // idxen 6392 }; 6393 EVT VT = Op.getValueType(); 6394 6395 auto *M = cast<MemSDNode>(Op); 6396 unsigned Opcode = 0; 6397 6398 switch (IntrID) { 6399 case Intrinsic::amdgcn_raw_buffer_atomic_swap: 6400 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; 6401 break; 6402 case Intrinsic::amdgcn_raw_buffer_atomic_add: 6403 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; 6404 break; 6405 case Intrinsic::amdgcn_raw_buffer_atomic_sub: 6406 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; 6407 break; 6408 case Intrinsic::amdgcn_raw_buffer_atomic_smin: 6409 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; 6410 break; 6411 case Intrinsic::amdgcn_raw_buffer_atomic_umin: 6412 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; 6413 break; 6414 case Intrinsic::amdgcn_raw_buffer_atomic_smax: 6415 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; 6416 break; 6417 case Intrinsic::amdgcn_raw_buffer_atomic_umax: 6418 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; 6419 break; 6420 case Intrinsic::amdgcn_raw_buffer_atomic_and: 6421 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; 6422 break; 6423 case Intrinsic::amdgcn_raw_buffer_atomic_or: 6424 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; 6425 break; 6426 case Intrinsic::amdgcn_raw_buffer_atomic_xor: 6427 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; 6428 break; 6429 default: 6430 llvm_unreachable("unhandled atomic opcode"); 6431 } 6432 6433 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, 6434 M->getMemOperand()); 6435 } 6436 case Intrinsic::amdgcn_struct_buffer_atomic_swap: 6437 case Intrinsic::amdgcn_struct_buffer_atomic_add: 6438 case Intrinsic::amdgcn_struct_buffer_atomic_sub: 6439 case Intrinsic::amdgcn_struct_buffer_atomic_smin: 6440 case Intrinsic::amdgcn_struct_buffer_atomic_umin: 6441 case Intrinsic::amdgcn_struct_buffer_atomic_smax: 6442 case Intrinsic::amdgcn_struct_buffer_atomic_umax: 6443 case Intrinsic::amdgcn_struct_buffer_atomic_and: 6444 case Intrinsic::amdgcn_struct_buffer_atomic_or: 6445 case Intrinsic::amdgcn_struct_buffer_atomic_xor: { 6446 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); 6447 SDValue Ops[] = { 6448 Op.getOperand(0), // Chain 6449 Op.getOperand(2), // vdata 6450 Op.getOperand(3), // rsrc 6451 Op.getOperand(4), // vindex 6452 Offsets.first, // voffset 6453 Op.getOperand(6), // soffset 6454 Offsets.second, // offset 6455 Op.getOperand(7), // cachepolicy 6456 DAG.getConstant(1, DL, MVT::i1), // idxen 6457 }; 6458 EVT VT = Op.getValueType(); 6459 6460 auto *M = cast<MemSDNode>(Op); 6461 unsigned Opcode = 0; 6462 6463 switch (IntrID) { 6464 case Intrinsic::amdgcn_struct_buffer_atomic_swap: 6465 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; 6466 break; 6467 case Intrinsic::amdgcn_struct_buffer_atomic_add: 6468 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; 6469 break; 6470 case Intrinsic::amdgcn_struct_buffer_atomic_sub: 6471 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; 6472 break; 6473 case Intrinsic::amdgcn_struct_buffer_atomic_smin: 6474 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; 6475 break; 6476 case Intrinsic::amdgcn_struct_buffer_atomic_umin: 6477 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; 6478 break; 6479 case Intrinsic::amdgcn_struct_buffer_atomic_smax: 6480 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; 6481 break; 6482 case Intrinsic::amdgcn_struct_buffer_atomic_umax: 6483 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; 6484 break; 6485 case Intrinsic::amdgcn_struct_buffer_atomic_and: 6486 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; 6487 break; 6488 case Intrinsic::amdgcn_struct_buffer_atomic_or: 6489 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; 6490 break; 6491 case Intrinsic::amdgcn_struct_buffer_atomic_xor: 6492 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; 6493 break; 6494 default: 6495 llvm_unreachable("unhandled atomic opcode"); 6496 } 6497 6498 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, 6499 M->getMemOperand()); 6500 } 6501 case Intrinsic::amdgcn_buffer_atomic_cmpswap: { 6502 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); 6503 unsigned IdxEn = 1; 6504 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5))) 6505 IdxEn = Idx->getZExtValue() != 0; 6506 SDValue Ops[] = { 6507 Op.getOperand(0), // Chain 6508 Op.getOperand(2), // src 6509 Op.getOperand(3), // cmp 6510 Op.getOperand(4), // rsrc 6511 Op.getOperand(5), // vindex 6512 SDValue(), // voffset -- will be set by setBufferOffsets 6513 SDValue(), // soffset -- will be set by setBufferOffsets 6514 SDValue(), // offset -- will be set by setBufferOffsets 6515 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy 6516 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen 6517 }; 6518 setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]); 6519 EVT VT = Op.getValueType(); 6520 auto *M = cast<MemSDNode>(Op); 6521 6522 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, 6523 Op->getVTList(), Ops, VT, M->getMemOperand()); 6524 } 6525 case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: { 6526 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); 6527 SDValue Ops[] = { 6528 Op.getOperand(0), // Chain 6529 Op.getOperand(2), // src 6530 Op.getOperand(3), // cmp 6531 Op.getOperand(4), // rsrc 6532 DAG.getConstant(0, DL, MVT::i32), // vindex 6533 Offsets.first, // voffset 6534 Op.getOperand(6), // soffset 6535 Offsets.second, // offset 6536 Op.getOperand(7), // cachepolicy 6537 DAG.getConstant(0, DL, MVT::i1), // idxen 6538 }; 6539 EVT VT = Op.getValueType(); 6540 auto *M = cast<MemSDNode>(Op); 6541 6542 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, 6543 Op->getVTList(), Ops, VT, M->getMemOperand()); 6544 } 6545 case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: { 6546 auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG); 6547 SDValue Ops[] = { 6548 Op.getOperand(0), // Chain 6549 Op.getOperand(2), // src 6550 Op.getOperand(3), // cmp 6551 Op.getOperand(4), // rsrc 6552 Op.getOperand(5), // vindex 6553 Offsets.first, // voffset 6554 Op.getOperand(7), // soffset 6555 Offsets.second, // offset 6556 Op.getOperand(8), // cachepolicy 6557 DAG.getConstant(1, DL, MVT::i1), // idxen 6558 }; 6559 EVT VT = Op.getValueType(); 6560 auto *M = cast<MemSDNode>(Op); 6561 6562 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, 6563 Op->getVTList(), Ops, VT, M->getMemOperand()); 6564 } 6565 6566 default: 6567 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = 6568 AMDGPU::getImageDimIntrinsicInfo(IntrID)) 6569 return lowerImage(Op, ImageDimIntr, DAG); 6570 6571 return SDValue(); 6572 } 6573 } 6574 6575 // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to 6576 // dwordx4 if on SI. 6577 SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, 6578 SDVTList VTList, 6579 ArrayRef<SDValue> Ops, EVT MemVT, 6580 MachineMemOperand *MMO, 6581 SelectionDAG &DAG) const { 6582 EVT VT = VTList.VTs[0]; 6583 EVT WidenedVT = VT; 6584 EVT WidenedMemVT = MemVT; 6585 if (!Subtarget->hasDwordx3LoadStores() && 6586 (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) { 6587 WidenedVT = EVT::getVectorVT(*DAG.getContext(), 6588 WidenedVT.getVectorElementType(), 4); 6589 WidenedMemVT = EVT::getVectorVT(*DAG.getContext(), 6590 WidenedMemVT.getVectorElementType(), 4); 6591 MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16); 6592 } 6593 6594 assert(VTList.NumVTs == 2); 6595 SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]); 6596 6597 auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops, 6598 WidenedMemVT, MMO); 6599 if (WidenedVT != VT) { 6600 auto Extract = DAG.getNode( 6601 ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp, 6602 DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout()))); 6603 NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL); 6604 } 6605 return NewOp; 6606 } 6607 6608 SDValue SITargetLowering::handleD16VData(SDValue VData, 6609 SelectionDAG &DAG) const { 6610 EVT StoreVT = VData.getValueType(); 6611 6612 // No change for f16 and legal vector D16 types. 6613 if (!StoreVT.isVector()) 6614 return VData; 6615 6616 SDLoc DL(VData); 6617 assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16"); 6618 6619 if (Subtarget->hasUnpackedD16VMem()) { 6620 // We need to unpack the packed data to store. 6621 EVT IntStoreVT = StoreVT.changeTypeToInteger(); 6622 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); 6623 6624 EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, 6625 StoreVT.getVectorNumElements()); 6626 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData); 6627 return DAG.UnrollVectorOp(ZExt.getNode()); 6628 } 6629 6630 assert(isTypeLegal(StoreVT)); 6631 return VData; 6632 } 6633 6634 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 6635 SelectionDAG &DAG) const { 6636 SDLoc DL(Op); 6637 SDValue Chain = Op.getOperand(0); 6638 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6639 MachineFunction &MF = DAG.getMachineFunction(); 6640 6641 switch (IntrinsicID) { 6642 case Intrinsic::amdgcn_exp: { 6643 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 6644 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 6645 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8)); 6646 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9)); 6647 6648 const SDValue Ops[] = { 6649 Chain, 6650 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 6651 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 6652 Op.getOperand(4), // src0 6653 Op.getOperand(5), // src1 6654 Op.getOperand(6), // src2 6655 Op.getOperand(7), // src3 6656 DAG.getTargetConstant(0, DL, MVT::i1), // compr 6657 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 6658 }; 6659 6660 unsigned Opc = Done->isNullValue() ? 6661 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 6662 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 6663 } 6664 case Intrinsic::amdgcn_exp_compr: { 6665 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 6666 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 6667 SDValue Src0 = Op.getOperand(4); 6668 SDValue Src1 = Op.getOperand(5); 6669 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); 6670 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7)); 6671 6672 SDValue Undef = DAG.getUNDEF(MVT::f32); 6673 const SDValue Ops[] = { 6674 Chain, 6675 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 6676 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 6677 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), 6678 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), 6679 Undef, // src2 6680 Undef, // src3 6681 DAG.getTargetConstant(1, DL, MVT::i1), // compr 6682 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 6683 }; 6684 6685 unsigned Opc = Done->isNullValue() ? 6686 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 6687 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 6688 } 6689 case Intrinsic::amdgcn_s_sendmsg: 6690 case Intrinsic::amdgcn_s_sendmsghalt: { 6691 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ? 6692 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT; 6693 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 6694 SDValue Glue = Chain.getValue(1); 6695 return DAG.getNode(NodeOp, DL, MVT::Other, Chain, 6696 Op.getOperand(2), Glue); 6697 } 6698 case Intrinsic::amdgcn_init_exec: { 6699 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain, 6700 Op.getOperand(2)); 6701 } 6702 case Intrinsic::amdgcn_init_exec_from_input: { 6703 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain, 6704 Op.getOperand(2), Op.getOperand(3)); 6705 } 6706 case Intrinsic::amdgcn_s_barrier: { 6707 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { 6708 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 6709 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second; 6710 if (WGSize <= ST.getWavefrontSize()) 6711 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, 6712 Op.getOperand(0)), 0); 6713 } 6714 return SDValue(); 6715 }; 6716 case Intrinsic::amdgcn_tbuffer_store: { 6717 SDValue VData = Op.getOperand(2); 6718 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 6719 if (IsD16) 6720 VData = handleD16VData(VData, DAG); 6721 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); 6722 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); 6723 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); 6724 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue(); 6725 unsigned IdxEn = 1; 6726 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) 6727 IdxEn = Idx->getZExtValue() != 0; 6728 SDValue Ops[] = { 6729 Chain, 6730 VData, // vdata 6731 Op.getOperand(3), // rsrc 6732 Op.getOperand(4), // vindex 6733 Op.getOperand(5), // voffset 6734 Op.getOperand(6), // soffset 6735 Op.getOperand(7), // offset 6736 DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format 6737 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy 6738 DAG.getConstant(IdxEn, DL, MVT::i1), // idexen 6739 }; 6740 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : 6741 AMDGPUISD::TBUFFER_STORE_FORMAT; 6742 MemSDNode *M = cast<MemSDNode>(Op); 6743 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 6744 M->getMemoryVT(), M->getMemOperand()); 6745 } 6746 6747 case Intrinsic::amdgcn_struct_tbuffer_store: { 6748 SDValue VData = Op.getOperand(2); 6749 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 6750 if (IsD16) 6751 VData = handleD16VData(VData, DAG); 6752 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); 6753 SDValue Ops[] = { 6754 Chain, 6755 VData, // vdata 6756 Op.getOperand(3), // rsrc 6757 Op.getOperand(4), // vindex 6758 Offsets.first, // voffset 6759 Op.getOperand(6), // soffset 6760 Offsets.second, // offset 6761 Op.getOperand(7), // format 6762 Op.getOperand(8), // cachepolicy 6763 DAG.getConstant(1, DL, MVT::i1), // idexen 6764 }; 6765 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : 6766 AMDGPUISD::TBUFFER_STORE_FORMAT; 6767 MemSDNode *M = cast<MemSDNode>(Op); 6768 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 6769 M->getMemoryVT(), M->getMemOperand()); 6770 } 6771 6772 case Intrinsic::amdgcn_raw_tbuffer_store: { 6773 SDValue VData = Op.getOperand(2); 6774 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 6775 if (IsD16) 6776 VData = handleD16VData(VData, DAG); 6777 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 6778 SDValue Ops[] = { 6779 Chain, 6780 VData, // vdata 6781 Op.getOperand(3), // rsrc 6782 DAG.getConstant(0, DL, MVT::i32), // vindex 6783 Offsets.first, // voffset 6784 Op.getOperand(5), // soffset 6785 Offsets.second, // offset 6786 Op.getOperand(6), // format 6787 Op.getOperand(7), // cachepolicy 6788 DAG.getConstant(0, DL, MVT::i1), // idexen 6789 }; 6790 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : 6791 AMDGPUISD::TBUFFER_STORE_FORMAT; 6792 MemSDNode *M = cast<MemSDNode>(Op); 6793 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 6794 M->getMemoryVT(), M->getMemOperand()); 6795 } 6796 6797 case Intrinsic::amdgcn_buffer_store: 6798 case Intrinsic::amdgcn_buffer_store_format: { 6799 SDValue VData = Op.getOperand(2); 6800 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 6801 if (IsD16) 6802 VData = handleD16VData(VData, DAG); 6803 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); 6804 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); 6805 unsigned IdxEn = 1; 6806 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) 6807 IdxEn = Idx->getZExtValue() != 0; 6808 SDValue Ops[] = { 6809 Chain, 6810 VData, 6811 Op.getOperand(3), // rsrc 6812 Op.getOperand(4), // vindex 6813 SDValue(), // voffset -- will be set by setBufferOffsets 6814 SDValue(), // soffset -- will be set by setBufferOffsets 6815 SDValue(), // offset -- will be set by setBufferOffsets 6816 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy 6817 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen 6818 }; 6819 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); 6820 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ? 6821 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; 6822 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; 6823 MemSDNode *M = cast<MemSDNode>(Op); 6824 6825 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics 6826 EVT VDataType = VData.getValueType().getScalarType(); 6827 if (VDataType == MVT::i8 || VDataType == MVT::i16) 6828 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); 6829 6830 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 6831 M->getMemoryVT(), M->getMemOperand()); 6832 } 6833 6834 case Intrinsic::amdgcn_raw_buffer_store: 6835 case Intrinsic::amdgcn_raw_buffer_store_format: { 6836 SDValue VData = Op.getOperand(2); 6837 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 6838 if (IsD16) 6839 VData = handleD16VData(VData, DAG); 6840 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 6841 SDValue Ops[] = { 6842 Chain, 6843 VData, 6844 Op.getOperand(3), // rsrc 6845 DAG.getConstant(0, DL, MVT::i32), // vindex 6846 Offsets.first, // voffset 6847 Op.getOperand(5), // soffset 6848 Offsets.second, // offset 6849 Op.getOperand(6), // cachepolicy 6850 DAG.getConstant(0, DL, MVT::i1), // idxen 6851 }; 6852 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_raw_buffer_store ? 6853 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; 6854 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; 6855 MemSDNode *M = cast<MemSDNode>(Op); 6856 6857 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics 6858 EVT VDataType = VData.getValueType().getScalarType(); 6859 if (VDataType == MVT::i8 || VDataType == MVT::i16) 6860 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); 6861 6862 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 6863 M->getMemoryVT(), M->getMemOperand()); 6864 } 6865 6866 case Intrinsic::amdgcn_struct_buffer_store: 6867 case Intrinsic::amdgcn_struct_buffer_store_format: { 6868 SDValue VData = Op.getOperand(2); 6869 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 6870 if (IsD16) 6871 VData = handleD16VData(VData, DAG); 6872 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); 6873 SDValue Ops[] = { 6874 Chain, 6875 VData, 6876 Op.getOperand(3), // rsrc 6877 Op.getOperand(4), // vindex 6878 Offsets.first, // voffset 6879 Op.getOperand(6), // soffset 6880 Offsets.second, // offset 6881 Op.getOperand(7), // cachepolicy 6882 DAG.getConstant(1, DL, MVT::i1), // idxen 6883 }; 6884 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ? 6885 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; 6886 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; 6887 MemSDNode *M = cast<MemSDNode>(Op); 6888 6889 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics 6890 EVT VDataType = VData.getValueType().getScalarType(); 6891 if (VDataType == MVT::i8 || VDataType == MVT::i16) 6892 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); 6893 6894 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 6895 M->getMemoryVT(), M->getMemOperand()); 6896 } 6897 6898 case Intrinsic::amdgcn_buffer_atomic_fadd: { 6899 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); 6900 unsigned IdxEn = 1; 6901 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) 6902 IdxEn = Idx->getZExtValue() != 0; 6903 SDValue Ops[] = { 6904 Chain, 6905 Op.getOperand(2), // vdata 6906 Op.getOperand(3), // rsrc 6907 Op.getOperand(4), // vindex 6908 SDValue(), // voffset -- will be set by setBufferOffsets 6909 SDValue(), // soffset -- will be set by setBufferOffsets 6910 SDValue(), // offset -- will be set by setBufferOffsets 6911 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy 6912 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen 6913 }; 6914 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); 6915 EVT VT = Op.getOperand(2).getValueType(); 6916 6917 auto *M = cast<MemSDNode>(Op); 6918 unsigned Opcode = VT.isVector() ? AMDGPUISD::BUFFER_ATOMIC_PK_FADD 6919 : AMDGPUISD::BUFFER_ATOMIC_FADD; 6920 6921 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, 6922 M->getMemOperand()); 6923 } 6924 6925 case Intrinsic::amdgcn_global_atomic_fadd: { 6926 SDValue Ops[] = { 6927 Chain, 6928 Op.getOperand(2), // ptr 6929 Op.getOperand(3) // vdata 6930 }; 6931 EVT VT = Op.getOperand(3).getValueType(); 6932 6933 auto *M = cast<MemSDNode>(Op); 6934 unsigned Opcode = VT.isVector() ? AMDGPUISD::ATOMIC_PK_FADD 6935 : AMDGPUISD::ATOMIC_FADD; 6936 6937 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, 6938 M->getMemOperand()); 6939 } 6940 6941 case Intrinsic::amdgcn_end_cf: 6942 return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other, 6943 Op->getOperand(2), Chain), 0); 6944 6945 default: { 6946 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = 6947 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) 6948 return lowerImage(Op, ImageDimIntr, DAG); 6949 6950 return Op; 6951 } 6952 } 6953 } 6954 6955 // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args: 6956 // offset (the offset that is included in bounds checking and swizzling, to be 6957 // split between the instruction's voffset and immoffset fields) and soffset 6958 // (the offset that is excluded from bounds checking and swizzling, to go in 6959 // the instruction's soffset field). This function takes the first kind of 6960 // offset and figures out how to split it between voffset and immoffset. 6961 std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets( 6962 SDValue Offset, SelectionDAG &DAG) const { 6963 SDLoc DL(Offset); 6964 const unsigned MaxImm = 4095; 6965 SDValue N0 = Offset; 6966 ConstantSDNode *C1 = nullptr; 6967 6968 if ((C1 = dyn_cast<ConstantSDNode>(N0))) 6969 N0 = SDValue(); 6970 else if (DAG.isBaseWithConstantOffset(N0)) { 6971 C1 = cast<ConstantSDNode>(N0.getOperand(1)); 6972 N0 = N0.getOperand(0); 6973 } 6974 6975 if (C1) { 6976 unsigned ImmOffset = C1->getZExtValue(); 6977 // If the immediate value is too big for the immoffset field, put the value 6978 // and -4096 into the immoffset field so that the value that is copied/added 6979 // for the voffset field is a multiple of 4096, and it stands more chance 6980 // of being CSEd with the copy/add for another similar load/store. 6981 // However, do not do that rounding down to a multiple of 4096 if that is a 6982 // negative number, as it appears to be illegal to have a negative offset 6983 // in the vgpr, even if adding the immediate offset makes it positive. 6984 unsigned Overflow = ImmOffset & ~MaxImm; 6985 ImmOffset -= Overflow; 6986 if ((int32_t)Overflow < 0) { 6987 Overflow += ImmOffset; 6988 ImmOffset = 0; 6989 } 6990 C1 = cast<ConstantSDNode>(DAG.getConstant(ImmOffset, DL, MVT::i32)); 6991 if (Overflow) { 6992 auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32); 6993 if (!N0) 6994 N0 = OverflowVal; 6995 else { 6996 SDValue Ops[] = { N0, OverflowVal }; 6997 N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops); 6998 } 6999 } 7000 } 7001 if (!N0) 7002 N0 = DAG.getConstant(0, DL, MVT::i32); 7003 if (!C1) 7004 C1 = cast<ConstantSDNode>(DAG.getConstant(0, DL, MVT::i32)); 7005 return {N0, SDValue(C1, 0)}; 7006 } 7007 7008 // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the 7009 // three offsets (voffset, soffset and instoffset) into the SDValue[3] array 7010 // pointed to by Offsets. 7011 void SITargetLowering::setBufferOffsets(SDValue CombinedOffset, 7012 SelectionDAG &DAG, SDValue *Offsets, 7013 unsigned Align) const { 7014 SDLoc DL(CombinedOffset); 7015 if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) { 7016 uint32_t Imm = C->getZExtValue(); 7017 uint32_t SOffset, ImmOffset; 7018 if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) { 7019 Offsets[0] = DAG.getConstant(0, DL, MVT::i32); 7020 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); 7021 Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32); 7022 return; 7023 } 7024 } 7025 if (DAG.isBaseWithConstantOffset(CombinedOffset)) { 7026 SDValue N0 = CombinedOffset.getOperand(0); 7027 SDValue N1 = CombinedOffset.getOperand(1); 7028 uint32_t SOffset, ImmOffset; 7029 int Offset = cast<ConstantSDNode>(N1)->getSExtValue(); 7030 if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset, 7031 Subtarget, Align)) { 7032 Offsets[0] = N0; 7033 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); 7034 Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32); 7035 return; 7036 } 7037 } 7038 Offsets[0] = CombinedOffset; 7039 Offsets[1] = DAG.getConstant(0, DL, MVT::i32); 7040 Offsets[2] = DAG.getConstant(0, DL, MVT::i32); 7041 } 7042 7043 // Handle 8 bit and 16 bit buffer loads 7044 SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG, 7045 EVT LoadVT, SDLoc DL, 7046 ArrayRef<SDValue> Ops, 7047 MemSDNode *M) const { 7048 EVT IntVT = LoadVT.changeTypeToInteger(); 7049 unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ? 7050 AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT; 7051 7052 SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other); 7053 SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList, 7054 Ops, IntVT, 7055 M->getMemOperand()); 7056 SDValue BufferLoadTrunc = DAG.getNode(ISD::TRUNCATE, DL, 7057 LoadVT.getScalarType(), BufferLoad); 7058 return DAG.getMergeValues({BufferLoadTrunc, BufferLoad.getValue(1)}, DL); 7059 } 7060 7061 // Handle 8 bit and 16 bit buffer stores 7062 SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG, 7063 EVT VDataType, SDLoc DL, 7064 SDValue Ops[], 7065 MemSDNode *M) const { 7066 SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]); 7067 Ops[1] = BufferStoreExt; 7068 unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE : 7069 AMDGPUISD::BUFFER_STORE_SHORT; 7070 ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9); 7071 return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType, 7072 M->getMemOperand()); 7073 } 7074 7075 static SDValue getLoadExtOrTrunc(SelectionDAG &DAG, 7076 ISD::LoadExtType ExtType, SDValue Op, 7077 const SDLoc &SL, EVT VT) { 7078 if (VT.bitsLT(Op.getValueType())) 7079 return DAG.getNode(ISD::TRUNCATE, SL, VT, Op); 7080 7081 switch (ExtType) { 7082 case ISD::SEXTLOAD: 7083 return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op); 7084 case ISD::ZEXTLOAD: 7085 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op); 7086 case ISD::EXTLOAD: 7087 return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op); 7088 case ISD::NON_EXTLOAD: 7089 return Op; 7090 } 7091 7092 llvm_unreachable("invalid ext type"); 7093 } 7094 7095 SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const { 7096 SelectionDAG &DAG = DCI.DAG; 7097 if (Ld->getAlignment() < 4 || Ld->isDivergent()) 7098 return SDValue(); 7099 7100 // FIXME: Constant loads should all be marked invariant. 7101 unsigned AS = Ld->getAddressSpace(); 7102 if (AS != AMDGPUAS::CONSTANT_ADDRESS && 7103 AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT && 7104 (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant())) 7105 return SDValue(); 7106 7107 // Don't do this early, since it may interfere with adjacent load merging for 7108 // illegal types. We can avoid losing alignment information for exotic types 7109 // pre-legalize. 7110 EVT MemVT = Ld->getMemoryVT(); 7111 if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) || 7112 MemVT.getSizeInBits() >= 32) 7113 return SDValue(); 7114 7115 SDLoc SL(Ld); 7116 7117 assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) && 7118 "unexpected vector extload"); 7119 7120 // TODO: Drop only high part of range. 7121 SDValue Ptr = Ld->getBasePtr(); 7122 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, 7123 MVT::i32, SL, Ld->getChain(), Ptr, 7124 Ld->getOffset(), 7125 Ld->getPointerInfo(), MVT::i32, 7126 Ld->getAlignment(), 7127 Ld->getMemOperand()->getFlags(), 7128 Ld->getAAInfo(), 7129 nullptr); // Drop ranges 7130 7131 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()); 7132 if (MemVT.isFloatingPoint()) { 7133 assert(Ld->getExtensionType() == ISD::NON_EXTLOAD && 7134 "unexpected fp extload"); 7135 TruncVT = MemVT.changeTypeToInteger(); 7136 } 7137 7138 SDValue Cvt = NewLoad; 7139 if (Ld->getExtensionType() == ISD::SEXTLOAD) { 7140 Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad, 7141 DAG.getValueType(TruncVT)); 7142 } else if (Ld->getExtensionType() == ISD::ZEXTLOAD || 7143 Ld->getExtensionType() == ISD::NON_EXTLOAD) { 7144 Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT); 7145 } else { 7146 assert(Ld->getExtensionType() == ISD::EXTLOAD); 7147 } 7148 7149 EVT VT = Ld->getValueType(0); 7150 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 7151 7152 DCI.AddToWorklist(Cvt.getNode()); 7153 7154 // We may need to handle exotic cases, such as i16->i64 extloads, so insert 7155 // the appropriate extension from the 32-bit load. 7156 Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT); 7157 DCI.AddToWorklist(Cvt.getNode()); 7158 7159 // Handle conversion back to floating point if necessary. 7160 Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt); 7161 7162 return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL); 7163 } 7164 7165 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 7166 SDLoc DL(Op); 7167 LoadSDNode *Load = cast<LoadSDNode>(Op); 7168 ISD::LoadExtType ExtType = Load->getExtensionType(); 7169 EVT MemVT = Load->getMemoryVT(); 7170 7171 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 7172 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16)) 7173 return SDValue(); 7174 7175 // FIXME: Copied from PPC 7176 // First, load into 32 bits, then truncate to 1 bit. 7177 7178 SDValue Chain = Load->getChain(); 7179 SDValue BasePtr = Load->getBasePtr(); 7180 MachineMemOperand *MMO = Load->getMemOperand(); 7181 7182 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; 7183 7184 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 7185 BasePtr, RealMemVT, MMO); 7186 7187 if (!MemVT.isVector()) { 7188 SDValue Ops[] = { 7189 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 7190 NewLD.getValue(1) 7191 }; 7192 7193 return DAG.getMergeValues(Ops, DL); 7194 } 7195 7196 SmallVector<SDValue, 3> Elts; 7197 for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) { 7198 SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD, 7199 DAG.getConstant(I, DL, MVT::i32)); 7200 7201 Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt)); 7202 } 7203 7204 SDValue Ops[] = { 7205 DAG.getBuildVector(MemVT, DL, Elts), 7206 NewLD.getValue(1) 7207 }; 7208 7209 return DAG.getMergeValues(Ops, DL); 7210 } 7211 7212 if (!MemVT.isVector()) 7213 return SDValue(); 7214 7215 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 7216 "Custom lowering for non-i32 vectors hasn't been implemented."); 7217 7218 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, 7219 *Load->getMemOperand())) { 7220 SDValue Ops[2]; 7221 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 7222 return DAG.getMergeValues(Ops, DL); 7223 } 7224 7225 unsigned Alignment = Load->getAlignment(); 7226 unsigned AS = Load->getAddressSpace(); 7227 if (Subtarget->hasLDSMisalignedBug() && 7228 AS == AMDGPUAS::FLAT_ADDRESS && 7229 Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) { 7230 return SplitVectorLoad(Op, DAG); 7231 } 7232 7233 MachineFunction &MF = DAG.getMachineFunction(); 7234 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 7235 // If there is a possibilty that flat instruction access scratch memory 7236 // then we need to use the same legalization rules we use for private. 7237 if (AS == AMDGPUAS::FLAT_ADDRESS) 7238 AS = MFI->hasFlatScratchInit() ? 7239 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; 7240 7241 unsigned NumElements = MemVT.getVectorNumElements(); 7242 7243 if (AS == AMDGPUAS::CONSTANT_ADDRESS || 7244 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) { 7245 if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) { 7246 if (MemVT.isPow2VectorType()) 7247 return SDValue(); 7248 if (NumElements == 3) 7249 return WidenVectorLoad(Op, DAG); 7250 return SplitVectorLoad(Op, DAG); 7251 } 7252 // Non-uniform loads will be selected to MUBUF instructions, so they 7253 // have the same legalization requirements as global and private 7254 // loads. 7255 // 7256 } 7257 7258 if (AS == AMDGPUAS::CONSTANT_ADDRESS || 7259 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 7260 AS == AMDGPUAS::GLOBAL_ADDRESS) { 7261 if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() && 7262 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) && 7263 Alignment >= 4 && NumElements < 32) { 7264 if (MemVT.isPow2VectorType()) 7265 return SDValue(); 7266 if (NumElements == 3) 7267 return WidenVectorLoad(Op, DAG); 7268 return SplitVectorLoad(Op, DAG); 7269 } 7270 // Non-uniform loads will be selected to MUBUF instructions, so they 7271 // have the same legalization requirements as global and private 7272 // loads. 7273 // 7274 } 7275 if (AS == AMDGPUAS::CONSTANT_ADDRESS || 7276 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 7277 AS == AMDGPUAS::GLOBAL_ADDRESS || 7278 AS == AMDGPUAS::FLAT_ADDRESS) { 7279 if (NumElements > 4) 7280 return SplitVectorLoad(Op, DAG); 7281 // v3 loads not supported on SI. 7282 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) 7283 return WidenVectorLoad(Op, DAG); 7284 // v3 and v4 loads are supported for private and global memory. 7285 return SDValue(); 7286 } 7287 if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 7288 // Depending on the setting of the private_element_size field in the 7289 // resource descriptor, we can only make private accesses up to a certain 7290 // size. 7291 switch (Subtarget->getMaxPrivateElementSize()) { 7292 case 4: 7293 return scalarizeVectorLoad(Load, DAG); 7294 case 8: 7295 if (NumElements > 2) 7296 return SplitVectorLoad(Op, DAG); 7297 return SDValue(); 7298 case 16: 7299 // Same as global/flat 7300 if (NumElements > 4) 7301 return SplitVectorLoad(Op, DAG); 7302 // v3 loads not supported on SI. 7303 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) 7304 return WidenVectorLoad(Op, DAG); 7305 return SDValue(); 7306 default: 7307 llvm_unreachable("unsupported private_element_size"); 7308 } 7309 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { 7310 // Use ds_read_b128 if possible. 7311 if (Subtarget->useDS128() && Load->getAlignment() >= 16 && 7312 MemVT.getStoreSize() == 16) 7313 return SDValue(); 7314 7315 if (NumElements > 2) 7316 return SplitVectorLoad(Op, DAG); 7317 7318 // SI has a hardware bug in the LDS / GDS boounds checking: if the base 7319 // address is negative, then the instruction is incorrectly treated as 7320 // out-of-bounds even if base + offsets is in bounds. Split vectorized 7321 // loads here to avoid emitting ds_read2_b32. We may re-combine the 7322 // load later in the SILoadStoreOptimizer. 7323 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && 7324 NumElements == 2 && MemVT.getStoreSize() == 8 && 7325 Load->getAlignment() < 8) { 7326 return SplitVectorLoad(Op, DAG); 7327 } 7328 } 7329 return SDValue(); 7330 } 7331 7332 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 7333 EVT VT = Op.getValueType(); 7334 assert(VT.getSizeInBits() == 64); 7335 7336 SDLoc DL(Op); 7337 SDValue Cond = Op.getOperand(0); 7338 7339 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 7340 SDValue One = DAG.getConstant(1, DL, MVT::i32); 7341 7342 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 7343 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 7344 7345 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 7346 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 7347 7348 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 7349 7350 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 7351 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 7352 7353 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 7354 7355 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 7356 return DAG.getNode(ISD::BITCAST, DL, VT, Res); 7357 } 7358 7359 // Catch division cases where we can use shortcuts with rcp and rsq 7360 // instructions. 7361 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, 7362 SelectionDAG &DAG) const { 7363 SDLoc SL(Op); 7364 SDValue LHS = Op.getOperand(0); 7365 SDValue RHS = Op.getOperand(1); 7366 EVT VT = Op.getValueType(); 7367 const SDNodeFlags Flags = Op->getFlags(); 7368 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal(); 7369 7370 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals()) 7371 return SDValue(); 7372 7373 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 7374 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) { 7375 if (CLHS->isExactlyValue(1.0)) { 7376 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 7377 // the CI documentation has a worst case error of 1 ulp. 7378 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 7379 // use it as long as we aren't trying to use denormals. 7380 // 7381 // v_rcp_f16 and v_rsq_f16 DO support denormals. 7382 7383 // 1.0 / sqrt(x) -> rsq(x) 7384 7385 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 7386 // error seems really high at 2^29 ULP. 7387 if (RHS.getOpcode() == ISD::FSQRT) 7388 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 7389 7390 // 1.0 / x -> rcp(x) 7391 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 7392 } 7393 7394 // Same as for 1.0, but expand the sign out of the constant. 7395 if (CLHS->isExactlyValue(-1.0)) { 7396 // -1.0 / x -> rcp (fneg x) 7397 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 7398 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); 7399 } 7400 } 7401 } 7402 7403 if (Unsafe) { 7404 // Turn into multiply by the reciprocal. 7405 // x / y -> x * (1.0 / y) 7406 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 7407 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags); 7408 } 7409 7410 return SDValue(); 7411 } 7412 7413 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 7414 EVT VT, SDValue A, SDValue B, SDValue GlueChain) { 7415 if (GlueChain->getNumValues() <= 1) { 7416 return DAG.getNode(Opcode, SL, VT, A, B); 7417 } 7418 7419 assert(GlueChain->getNumValues() == 3); 7420 7421 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 7422 switch (Opcode) { 7423 default: llvm_unreachable("no chain equivalent for opcode"); 7424 case ISD::FMUL: 7425 Opcode = AMDGPUISD::FMUL_W_CHAIN; 7426 break; 7427 } 7428 7429 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, 7430 GlueChain.getValue(2)); 7431 } 7432 7433 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 7434 EVT VT, SDValue A, SDValue B, SDValue C, 7435 SDValue GlueChain) { 7436 if (GlueChain->getNumValues() <= 1) { 7437 return DAG.getNode(Opcode, SL, VT, A, B, C); 7438 } 7439 7440 assert(GlueChain->getNumValues() == 3); 7441 7442 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 7443 switch (Opcode) { 7444 default: llvm_unreachable("no chain equivalent for opcode"); 7445 case ISD::FMA: 7446 Opcode = AMDGPUISD::FMA_W_CHAIN; 7447 break; 7448 } 7449 7450 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C, 7451 GlueChain.getValue(2)); 7452 } 7453 7454 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { 7455 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 7456 return FastLowered; 7457 7458 SDLoc SL(Op); 7459 SDValue Src0 = Op.getOperand(0); 7460 SDValue Src1 = Op.getOperand(1); 7461 7462 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 7463 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 7464 7465 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); 7466 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); 7467 7468 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); 7469 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); 7470 7471 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); 7472 } 7473 7474 // Faster 2.5 ULP division that does not support denormals. 7475 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { 7476 SDLoc SL(Op); 7477 SDValue LHS = Op.getOperand(1); 7478 SDValue RHS = Op.getOperand(2); 7479 7480 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 7481 7482 const APFloat K0Val(BitsToFloat(0x6f800000)); 7483 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 7484 7485 const APFloat K1Val(BitsToFloat(0x2f800000)); 7486 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 7487 7488 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 7489 7490 EVT SetCCVT = 7491 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 7492 7493 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 7494 7495 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 7496 7497 // TODO: Should this propagate fast-math-flags? 7498 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 7499 7500 // rcp does not support denormals. 7501 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 7502 7503 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 7504 7505 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 7506 } 7507 7508 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 7509 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 7510 return FastLowered; 7511 7512 SDLoc SL(Op); 7513 SDValue LHS = Op.getOperand(0); 7514 SDValue RHS = Op.getOperand(1); 7515 7516 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 7517 7518 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); 7519 7520 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 7521 RHS, RHS, LHS); 7522 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 7523 LHS, RHS, LHS); 7524 7525 // Denominator is scaled to not be denormal, so using rcp is ok. 7526 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, 7527 DenominatorScaled); 7528 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, 7529 DenominatorScaled); 7530 7531 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | 7532 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | 7533 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); 7534 7535 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16); 7536 7537 if (!Subtarget->hasFP32Denormals()) { 7538 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); 7539 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE, 7540 SL, MVT::i32); 7541 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs, 7542 DAG.getEntryNode(), 7543 EnableDenormValue, BitField); 7544 SDValue Ops[3] = { 7545 NegDivScale0, 7546 EnableDenorm.getValue(0), 7547 EnableDenorm.getValue(1) 7548 }; 7549 7550 NegDivScale0 = DAG.getMergeValues(Ops, SL); 7551 } 7552 7553 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, 7554 ApproxRcp, One, NegDivScale0); 7555 7556 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, 7557 ApproxRcp, Fma0); 7558 7559 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, 7560 Fma1, Fma1); 7561 7562 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, 7563 NumeratorScaled, Mul); 7564 7565 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2); 7566 7567 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, 7568 NumeratorScaled, Fma3); 7569 7570 if (!Subtarget->hasFP32Denormals()) { 7571 const SDValue DisableDenormValue = 7572 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32); 7573 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other, 7574 Fma4.getValue(1), 7575 DisableDenormValue, 7576 BitField, 7577 Fma4.getValue(2)); 7578 7579 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 7580 DisableDenorm, DAG.getRoot()); 7581 DAG.setRoot(OutputChain); 7582 } 7583 7584 SDValue Scale = NumeratorScaled.getValue(1); 7585 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, 7586 Fma4, Fma1, Fma3, Scale); 7587 7588 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); 7589 } 7590 7591 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 7592 if (DAG.getTarget().Options.UnsafeFPMath) 7593 return lowerFastUnsafeFDIV(Op, DAG); 7594 7595 SDLoc SL(Op); 7596 SDValue X = Op.getOperand(0); 7597 SDValue Y = Op.getOperand(1); 7598 7599 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 7600 7601 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 7602 7603 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 7604 7605 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 7606 7607 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 7608 7609 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 7610 7611 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 7612 7613 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 7614 7615 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 7616 7617 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 7618 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 7619 7620 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 7621 NegDivScale0, Mul, DivScale1); 7622 7623 SDValue Scale; 7624 7625 if (!Subtarget->hasUsableDivScaleConditionOutput()) { 7626 // Workaround a hardware bug on SI where the condition output from div_scale 7627 // is not usable. 7628 7629 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 7630 7631 // Figure out if the scale to use for div_fmas. 7632 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 7633 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 7634 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 7635 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 7636 7637 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 7638 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 7639 7640 SDValue Scale0Hi 7641 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 7642 SDValue Scale1Hi 7643 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 7644 7645 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 7646 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 7647 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 7648 } else { 7649 Scale = DivScale1.getValue(1); 7650 } 7651 7652 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 7653 Fma4, Fma3, Mul, Scale); 7654 7655 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 7656 } 7657 7658 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 7659 EVT VT = Op.getValueType(); 7660 7661 if (VT == MVT::f32) 7662 return LowerFDIV32(Op, DAG); 7663 7664 if (VT == MVT::f64) 7665 return LowerFDIV64(Op, DAG); 7666 7667 if (VT == MVT::f16) 7668 return LowerFDIV16(Op, DAG); 7669 7670 llvm_unreachable("Unexpected type for fdiv"); 7671 } 7672 7673 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 7674 SDLoc DL(Op); 7675 StoreSDNode *Store = cast<StoreSDNode>(Op); 7676 EVT VT = Store->getMemoryVT(); 7677 7678 if (VT == MVT::i1) { 7679 return DAG.getTruncStore(Store->getChain(), DL, 7680 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 7681 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 7682 } 7683 7684 assert(VT.isVector() && 7685 Store->getValue().getValueType().getScalarType() == MVT::i32); 7686 7687 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 7688 *Store->getMemOperand())) { 7689 return expandUnalignedStore(Store, DAG); 7690 } 7691 7692 unsigned AS = Store->getAddressSpace(); 7693 if (Subtarget->hasLDSMisalignedBug() && 7694 AS == AMDGPUAS::FLAT_ADDRESS && 7695 Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) { 7696 return SplitVectorStore(Op, DAG); 7697 } 7698 7699 MachineFunction &MF = DAG.getMachineFunction(); 7700 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 7701 // If there is a possibilty that flat instruction access scratch memory 7702 // then we need to use the same legalization rules we use for private. 7703 if (AS == AMDGPUAS::FLAT_ADDRESS) 7704 AS = MFI->hasFlatScratchInit() ? 7705 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; 7706 7707 unsigned NumElements = VT.getVectorNumElements(); 7708 if (AS == AMDGPUAS::GLOBAL_ADDRESS || 7709 AS == AMDGPUAS::FLAT_ADDRESS) { 7710 if (NumElements > 4) 7711 return SplitVectorStore(Op, DAG); 7712 // v3 stores not supported on SI. 7713 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) 7714 return SplitVectorStore(Op, DAG); 7715 return SDValue(); 7716 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 7717 switch (Subtarget->getMaxPrivateElementSize()) { 7718 case 4: 7719 return scalarizeVectorStore(Store, DAG); 7720 case 8: 7721 if (NumElements > 2) 7722 return SplitVectorStore(Op, DAG); 7723 return SDValue(); 7724 case 16: 7725 if (NumElements > 4 || NumElements == 3) 7726 return SplitVectorStore(Op, DAG); 7727 return SDValue(); 7728 default: 7729 llvm_unreachable("unsupported private_element_size"); 7730 } 7731 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { 7732 // Use ds_write_b128 if possible. 7733 if (Subtarget->useDS128() && Store->getAlignment() >= 16 && 7734 VT.getStoreSize() == 16 && NumElements != 3) 7735 return SDValue(); 7736 7737 if (NumElements > 2) 7738 return SplitVectorStore(Op, DAG); 7739 7740 // SI has a hardware bug in the LDS / GDS boounds checking: if the base 7741 // address is negative, then the instruction is incorrectly treated as 7742 // out-of-bounds even if base + offsets is in bounds. Split vectorized 7743 // stores here to avoid emitting ds_write2_b32. We may re-combine the 7744 // store later in the SILoadStoreOptimizer. 7745 if (!Subtarget->hasUsableDSOffset() && 7746 NumElements == 2 && VT.getStoreSize() == 8 && 7747 Store->getAlignment() < 8) { 7748 return SplitVectorStore(Op, DAG); 7749 } 7750 7751 return SDValue(); 7752 } else { 7753 llvm_unreachable("unhandled address space"); 7754 } 7755 } 7756 7757 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 7758 SDLoc DL(Op); 7759 EVT VT = Op.getValueType(); 7760 SDValue Arg = Op.getOperand(0); 7761 SDValue TrigVal; 7762 7763 // TODO: Should this propagate fast-math-flags? 7764 7765 SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI, DL, VT); 7766 7767 if (Subtarget->hasTrigReducedRange()) { 7768 SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi); 7769 TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal); 7770 } else { 7771 TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi); 7772 } 7773 7774 switch (Op.getOpcode()) { 7775 case ISD::FCOS: 7776 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal); 7777 case ISD::FSIN: 7778 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal); 7779 default: 7780 llvm_unreachable("Wrong trig opcode"); 7781 } 7782 } 7783 7784 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 7785 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 7786 assert(AtomicNode->isCompareAndSwap()); 7787 unsigned AS = AtomicNode->getAddressSpace(); 7788 7789 // No custom lowering required for local address space 7790 if (!isFlatGlobalAddrSpace(AS)) 7791 return Op; 7792 7793 // Non-local address space requires custom lowering for atomic compare 7794 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 7795 SDLoc DL(Op); 7796 SDValue ChainIn = Op.getOperand(0); 7797 SDValue Addr = Op.getOperand(1); 7798 SDValue Old = Op.getOperand(2); 7799 SDValue New = Op.getOperand(3); 7800 EVT VT = Op.getValueType(); 7801 MVT SimpleVT = VT.getSimpleVT(); 7802 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 7803 7804 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 7805 SDValue Ops[] = { ChainIn, Addr, NewOld }; 7806 7807 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), 7808 Ops, VT, AtomicNode->getMemOperand()); 7809 } 7810 7811 //===----------------------------------------------------------------------===// 7812 // Custom DAG optimizations 7813 //===----------------------------------------------------------------------===// 7814 7815 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 7816 DAGCombinerInfo &DCI) const { 7817 EVT VT = N->getValueType(0); 7818 EVT ScalarVT = VT.getScalarType(); 7819 if (ScalarVT != MVT::f32) 7820 return SDValue(); 7821 7822 SelectionDAG &DAG = DCI.DAG; 7823 SDLoc DL(N); 7824 7825 SDValue Src = N->getOperand(0); 7826 EVT SrcVT = Src.getValueType(); 7827 7828 // TODO: We could try to match extracting the higher bytes, which would be 7829 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 7830 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 7831 // about in practice. 7832 if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) { 7833 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 7834 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 7835 DCI.AddToWorklist(Cvt.getNode()); 7836 return Cvt; 7837 } 7838 } 7839 7840 return SDValue(); 7841 } 7842 7843 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 7844 7845 // This is a variant of 7846 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 7847 // 7848 // The normal DAG combiner will do this, but only if the add has one use since 7849 // that would increase the number of instructions. 7850 // 7851 // This prevents us from seeing a constant offset that can be folded into a 7852 // memory instruction's addressing mode. If we know the resulting add offset of 7853 // a pointer can be folded into an addressing offset, we can replace the pointer 7854 // operand with the add of new constant offset. This eliminates one of the uses, 7855 // and may allow the remaining use to also be simplified. 7856 // 7857 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 7858 unsigned AddrSpace, 7859 EVT MemVT, 7860 DAGCombinerInfo &DCI) const { 7861 SDValue N0 = N->getOperand(0); 7862 SDValue N1 = N->getOperand(1); 7863 7864 // We only do this to handle cases where it's profitable when there are 7865 // multiple uses of the add, so defer to the standard combine. 7866 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) || 7867 N0->hasOneUse()) 7868 return SDValue(); 7869 7870 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 7871 if (!CN1) 7872 return SDValue(); 7873 7874 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 7875 if (!CAdd) 7876 return SDValue(); 7877 7878 // If the resulting offset is too large, we can't fold it into the addressing 7879 // mode offset. 7880 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 7881 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext()); 7882 7883 AddrMode AM; 7884 AM.HasBaseReg = true; 7885 AM.BaseOffs = Offset.getSExtValue(); 7886 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace)) 7887 return SDValue(); 7888 7889 SelectionDAG &DAG = DCI.DAG; 7890 SDLoc SL(N); 7891 EVT VT = N->getValueType(0); 7892 7893 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 7894 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 7895 7896 SDNodeFlags Flags; 7897 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() && 7898 (N0.getOpcode() == ISD::OR || 7899 N0->getFlags().hasNoUnsignedWrap())); 7900 7901 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags); 7902 } 7903 7904 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, 7905 DAGCombinerInfo &DCI) const { 7906 SDValue Ptr = N->getBasePtr(); 7907 SelectionDAG &DAG = DCI.DAG; 7908 SDLoc SL(N); 7909 7910 // TODO: We could also do this for multiplies. 7911 if (Ptr.getOpcode() == ISD::SHL) { 7912 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(), 7913 N->getMemoryVT(), DCI); 7914 if (NewPtr) { 7915 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); 7916 7917 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 7918 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); 7919 } 7920 } 7921 7922 return SDValue(); 7923 } 7924 7925 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { 7926 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || 7927 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || 7928 (Opc == ISD::XOR && Val == 0); 7929 } 7930 7931 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This 7932 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit 7933 // integer combine opportunities since most 64-bit operations are decomposed 7934 // this way. TODO: We won't want this for SALU especially if it is an inline 7935 // immediate. 7936 SDValue SITargetLowering::splitBinaryBitConstantOp( 7937 DAGCombinerInfo &DCI, 7938 const SDLoc &SL, 7939 unsigned Opc, SDValue LHS, 7940 const ConstantSDNode *CRHS) const { 7941 uint64_t Val = CRHS->getZExtValue(); 7942 uint32_t ValLo = Lo_32(Val); 7943 uint32_t ValHi = Hi_32(Val); 7944 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 7945 7946 if ((bitOpWithConstantIsReducible(Opc, ValLo) || 7947 bitOpWithConstantIsReducible(Opc, ValHi)) || 7948 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { 7949 // If we need to materialize a 64-bit immediate, it will be split up later 7950 // anyway. Avoid creating the harder to understand 64-bit immediate 7951 // materialization. 7952 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); 7953 } 7954 7955 return SDValue(); 7956 } 7957 7958 // Returns true if argument is a boolean value which is not serialized into 7959 // memory or argument and does not require v_cmdmask_b32 to be deserialized. 7960 static bool isBoolSGPR(SDValue V) { 7961 if (V.getValueType() != MVT::i1) 7962 return false; 7963 switch (V.getOpcode()) { 7964 default: break; 7965 case ISD::SETCC: 7966 case ISD::AND: 7967 case ISD::OR: 7968 case ISD::XOR: 7969 case AMDGPUISD::FP_CLASS: 7970 return true; 7971 } 7972 return false; 7973 } 7974 7975 // If a constant has all zeroes or all ones within each byte return it. 7976 // Otherwise return 0. 7977 static uint32_t getConstantPermuteMask(uint32_t C) { 7978 // 0xff for any zero byte in the mask 7979 uint32_t ZeroByteMask = 0; 7980 if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff; 7981 if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00; 7982 if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000; 7983 if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000; 7984 uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte 7985 if ((NonZeroByteMask & C) != NonZeroByteMask) 7986 return 0; // Partial bytes selected. 7987 return C; 7988 } 7989 7990 // Check if a node selects whole bytes from its operand 0 starting at a byte 7991 // boundary while masking the rest. Returns select mask as in the v_perm_b32 7992 // or -1 if not succeeded. 7993 // Note byte select encoding: 7994 // value 0-3 selects corresponding source byte; 7995 // value 0xc selects zero; 7996 // value 0xff selects 0xff. 7997 static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) { 7998 assert(V.getValueSizeInBits() == 32); 7999 8000 if (V.getNumOperands() != 2) 8001 return ~0; 8002 8003 ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1)); 8004 if (!N1) 8005 return ~0; 8006 8007 uint32_t C = N1->getZExtValue(); 8008 8009 switch (V.getOpcode()) { 8010 default: 8011 break; 8012 case ISD::AND: 8013 if (uint32_t ConstMask = getConstantPermuteMask(C)) { 8014 return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask); 8015 } 8016 break; 8017 8018 case ISD::OR: 8019 if (uint32_t ConstMask = getConstantPermuteMask(C)) { 8020 return (0x03020100 & ~ConstMask) | ConstMask; 8021 } 8022 break; 8023 8024 case ISD::SHL: 8025 if (C % 8) 8026 return ~0; 8027 8028 return uint32_t((0x030201000c0c0c0cull << C) >> 32); 8029 8030 case ISD::SRL: 8031 if (C % 8) 8032 return ~0; 8033 8034 return uint32_t(0x0c0c0c0c03020100ull >> C); 8035 } 8036 8037 return ~0; 8038 } 8039 8040 SDValue SITargetLowering::performAndCombine(SDNode *N, 8041 DAGCombinerInfo &DCI) const { 8042 if (DCI.isBeforeLegalize()) 8043 return SDValue(); 8044 8045 SelectionDAG &DAG = DCI.DAG; 8046 EVT VT = N->getValueType(0); 8047 SDValue LHS = N->getOperand(0); 8048 SDValue RHS = N->getOperand(1); 8049 8050 8051 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 8052 if (VT == MVT::i64 && CRHS) { 8053 if (SDValue Split 8054 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) 8055 return Split; 8056 } 8057 8058 if (CRHS && VT == MVT::i32) { 8059 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb 8060 // nb = number of trailing zeroes in mask 8061 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass, 8062 // given that we are selecting 8 or 16 bit fields starting at byte boundary. 8063 uint64_t Mask = CRHS->getZExtValue(); 8064 unsigned Bits = countPopulation(Mask); 8065 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && 8066 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { 8067 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { 8068 unsigned Shift = CShift->getZExtValue(); 8069 unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); 8070 unsigned Offset = NB + Shift; 8071 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary. 8072 SDLoc SL(N); 8073 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 8074 LHS->getOperand(0), 8075 DAG.getConstant(Offset, SL, MVT::i32), 8076 DAG.getConstant(Bits, SL, MVT::i32)); 8077 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); 8078 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, 8079 DAG.getValueType(NarrowVT)); 8080 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, 8081 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); 8082 return Shl; 8083 } 8084 } 8085 } 8086 8087 // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) 8088 if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM && 8089 isa<ConstantSDNode>(LHS.getOperand(2))) { 8090 uint32_t Sel = getConstantPermuteMask(Mask); 8091 if (!Sel) 8092 return SDValue(); 8093 8094 // Select 0xc for all zero bytes 8095 Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c); 8096 SDLoc DL(N); 8097 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), 8098 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); 8099 } 8100 } 8101 8102 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 8103 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 8104 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { 8105 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 8106 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 8107 8108 SDValue X = LHS.getOperand(0); 8109 SDValue Y = RHS.getOperand(0); 8110 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 8111 return SDValue(); 8112 8113 if (LCC == ISD::SETO) { 8114 if (X != LHS.getOperand(1)) 8115 return SDValue(); 8116 8117 if (RCC == ISD::SETUNE) { 8118 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 8119 if (!C1 || !C1->isInfinity() || C1->isNegative()) 8120 return SDValue(); 8121 8122 const uint32_t Mask = SIInstrFlags::N_NORMAL | 8123 SIInstrFlags::N_SUBNORMAL | 8124 SIInstrFlags::N_ZERO | 8125 SIInstrFlags::P_ZERO | 8126 SIInstrFlags::P_SUBNORMAL | 8127 SIInstrFlags::P_NORMAL; 8128 8129 static_assert(((~(SIInstrFlags::S_NAN | 8130 SIInstrFlags::Q_NAN | 8131 SIInstrFlags::N_INFINITY | 8132 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 8133 "mask not equal"); 8134 8135 SDLoc DL(N); 8136 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 8137 X, DAG.getConstant(Mask, DL, MVT::i32)); 8138 } 8139 } 8140 } 8141 8142 if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS) 8143 std::swap(LHS, RHS); 8144 8145 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS && 8146 RHS.hasOneUse()) { 8147 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 8148 // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan) 8149 // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan) 8150 const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 8151 if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask && 8152 (RHS.getOperand(0) == LHS.getOperand(0) && 8153 LHS.getOperand(0) == LHS.getOperand(1))) { 8154 const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN; 8155 unsigned NewMask = LCC == ISD::SETO ? 8156 Mask->getZExtValue() & ~OrdMask : 8157 Mask->getZExtValue() & OrdMask; 8158 8159 SDLoc DL(N); 8160 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0), 8161 DAG.getConstant(NewMask, DL, MVT::i32)); 8162 } 8163 } 8164 8165 if (VT == MVT::i32 && 8166 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { 8167 // and x, (sext cc from i1) => select cc, x, 0 8168 if (RHS.getOpcode() != ISD::SIGN_EXTEND) 8169 std::swap(LHS, RHS); 8170 if (isBoolSGPR(RHS.getOperand(0))) 8171 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0), 8172 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32)); 8173 } 8174 8175 // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) 8176 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 8177 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && 8178 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) { 8179 uint32_t LHSMask = getPermuteMask(DAG, LHS); 8180 uint32_t RHSMask = getPermuteMask(DAG, RHS); 8181 if (LHSMask != ~0u && RHSMask != ~0u) { 8182 // Canonicalize the expression in an attempt to have fewer unique masks 8183 // and therefore fewer registers used to hold the masks. 8184 if (LHSMask > RHSMask) { 8185 std::swap(LHSMask, RHSMask); 8186 std::swap(LHS, RHS); 8187 } 8188 8189 // Select 0xc for each lane used from source operand. Zero has 0xc mask 8190 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. 8191 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 8192 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 8193 8194 // Check of we need to combine values from two sources within a byte. 8195 if (!(LHSUsedLanes & RHSUsedLanes) && 8196 // If we select high and lower word keep it for SDWA. 8197 // TODO: teach SDWA to work with v_perm_b32 and remove the check. 8198 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { 8199 // Each byte in each mask is either selector mask 0-3, or has higher 8200 // bits set in either of masks, which can be 0xff for 0xff or 0x0c for 8201 // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise 8202 // mask which is not 0xff wins. By anding both masks we have a correct 8203 // result except that 0x0c shall be corrected to give 0x0c only. 8204 uint32_t Mask = LHSMask & RHSMask; 8205 for (unsigned I = 0; I < 32; I += 8) { 8206 uint32_t ByteSel = 0xff << I; 8207 if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c) 8208 Mask &= (0x0c << I) & 0xffffffff; 8209 } 8210 8211 // Add 4 to each active LHS lane. It will not affect any existing 0xff 8212 // or 0x0c. 8213 uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404); 8214 SDLoc DL(N); 8215 8216 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, 8217 LHS.getOperand(0), RHS.getOperand(0), 8218 DAG.getConstant(Sel, DL, MVT::i32)); 8219 } 8220 } 8221 } 8222 8223 return SDValue(); 8224 } 8225 8226 SDValue SITargetLowering::performOrCombine(SDNode *N, 8227 DAGCombinerInfo &DCI) const { 8228 SelectionDAG &DAG = DCI.DAG; 8229 SDValue LHS = N->getOperand(0); 8230 SDValue RHS = N->getOperand(1); 8231 8232 EVT VT = N->getValueType(0); 8233 if (VT == MVT::i1) { 8234 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 8235 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 8236 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 8237 SDValue Src = LHS.getOperand(0); 8238 if (Src != RHS.getOperand(0)) 8239 return SDValue(); 8240 8241 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 8242 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 8243 if (!CLHS || !CRHS) 8244 return SDValue(); 8245 8246 // Only 10 bits are used. 8247 static const uint32_t MaxMask = 0x3ff; 8248 8249 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 8250 SDLoc DL(N); 8251 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 8252 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 8253 } 8254 8255 return SDValue(); 8256 } 8257 8258 // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) 8259 if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() && 8260 LHS.getOpcode() == AMDGPUISD::PERM && 8261 isa<ConstantSDNode>(LHS.getOperand(2))) { 8262 uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1)); 8263 if (!Sel) 8264 return SDValue(); 8265 8266 Sel |= LHS.getConstantOperandVal(2); 8267 SDLoc DL(N); 8268 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), 8269 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); 8270 } 8271 8272 // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) 8273 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 8274 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && 8275 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) { 8276 uint32_t LHSMask = getPermuteMask(DAG, LHS); 8277 uint32_t RHSMask = getPermuteMask(DAG, RHS); 8278 if (LHSMask != ~0u && RHSMask != ~0u) { 8279 // Canonicalize the expression in an attempt to have fewer unique masks 8280 // and therefore fewer registers used to hold the masks. 8281 if (LHSMask > RHSMask) { 8282 std::swap(LHSMask, RHSMask); 8283 std::swap(LHS, RHS); 8284 } 8285 8286 // Select 0xc for each lane used from source operand. Zero has 0xc mask 8287 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. 8288 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 8289 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 8290 8291 // Check of we need to combine values from two sources within a byte. 8292 if (!(LHSUsedLanes & RHSUsedLanes) && 8293 // If we select high and lower word keep it for SDWA. 8294 // TODO: teach SDWA to work with v_perm_b32 and remove the check. 8295 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { 8296 // Kill zero bytes selected by other mask. Zero value is 0xc. 8297 LHSMask &= ~RHSUsedLanes; 8298 RHSMask &= ~LHSUsedLanes; 8299 // Add 4 to each active LHS lane 8300 LHSMask |= LHSUsedLanes & 0x04040404; 8301 // Combine masks 8302 uint32_t Sel = LHSMask | RHSMask; 8303 SDLoc DL(N); 8304 8305 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, 8306 LHS.getOperand(0), RHS.getOperand(0), 8307 DAG.getConstant(Sel, DL, MVT::i32)); 8308 } 8309 } 8310 } 8311 8312 if (VT != MVT::i64) 8313 return SDValue(); 8314 8315 // TODO: This could be a generic combine with a predicate for extracting the 8316 // high half of an integer being free. 8317 8318 // (or i64:x, (zero_extend i32:y)) -> 8319 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 8320 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 8321 RHS.getOpcode() != ISD::ZERO_EXTEND) 8322 std::swap(LHS, RHS); 8323 8324 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 8325 SDValue ExtSrc = RHS.getOperand(0); 8326 EVT SrcVT = ExtSrc.getValueType(); 8327 if (SrcVT == MVT::i32) { 8328 SDLoc SL(N); 8329 SDValue LowLHS, HiBits; 8330 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 8331 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 8332 8333 DCI.AddToWorklist(LowOr.getNode()); 8334 DCI.AddToWorklist(HiBits.getNode()); 8335 8336 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 8337 LowOr, HiBits); 8338 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 8339 } 8340 } 8341 8342 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 8343 if (CRHS) { 8344 if (SDValue Split 8345 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) 8346 return Split; 8347 } 8348 8349 return SDValue(); 8350 } 8351 8352 SDValue SITargetLowering::performXorCombine(SDNode *N, 8353 DAGCombinerInfo &DCI) const { 8354 EVT VT = N->getValueType(0); 8355 if (VT != MVT::i64) 8356 return SDValue(); 8357 8358 SDValue LHS = N->getOperand(0); 8359 SDValue RHS = N->getOperand(1); 8360 8361 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 8362 if (CRHS) { 8363 if (SDValue Split 8364 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) 8365 return Split; 8366 } 8367 8368 return SDValue(); 8369 } 8370 8371 // Instructions that will be lowered with a final instruction that zeros the 8372 // high result bits. 8373 // XXX - probably only need to list legal operations. 8374 static bool fp16SrcZerosHighBits(unsigned Opc) { 8375 switch (Opc) { 8376 case ISD::FADD: 8377 case ISD::FSUB: 8378 case ISD::FMUL: 8379 case ISD::FDIV: 8380 case ISD::FREM: 8381 case ISD::FMA: 8382 case ISD::FMAD: 8383 case ISD::FCANONICALIZE: 8384 case ISD::FP_ROUND: 8385 case ISD::UINT_TO_FP: 8386 case ISD::SINT_TO_FP: 8387 case ISD::FABS: 8388 // Fabs is lowered to a bit operation, but it's an and which will clear the 8389 // high bits anyway. 8390 case ISD::FSQRT: 8391 case ISD::FSIN: 8392 case ISD::FCOS: 8393 case ISD::FPOWI: 8394 case ISD::FPOW: 8395 case ISD::FLOG: 8396 case ISD::FLOG2: 8397 case ISD::FLOG10: 8398 case ISD::FEXP: 8399 case ISD::FEXP2: 8400 case ISD::FCEIL: 8401 case ISD::FTRUNC: 8402 case ISD::FRINT: 8403 case ISD::FNEARBYINT: 8404 case ISD::FROUND: 8405 case ISD::FFLOOR: 8406 case ISD::FMINNUM: 8407 case ISD::FMAXNUM: 8408 case AMDGPUISD::FRACT: 8409 case AMDGPUISD::CLAMP: 8410 case AMDGPUISD::COS_HW: 8411 case AMDGPUISD::SIN_HW: 8412 case AMDGPUISD::FMIN3: 8413 case AMDGPUISD::FMAX3: 8414 case AMDGPUISD::FMED3: 8415 case AMDGPUISD::FMAD_FTZ: 8416 case AMDGPUISD::RCP: 8417 case AMDGPUISD::RSQ: 8418 case AMDGPUISD::RCP_IFLAG: 8419 case AMDGPUISD::LDEXP: 8420 return true; 8421 default: 8422 // fcopysign, select and others may be lowered to 32-bit bit operations 8423 // which don't zero the high bits. 8424 return false; 8425 } 8426 } 8427 8428 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, 8429 DAGCombinerInfo &DCI) const { 8430 if (!Subtarget->has16BitInsts() || 8431 DCI.getDAGCombineLevel() < AfterLegalizeDAG) 8432 return SDValue(); 8433 8434 EVT VT = N->getValueType(0); 8435 if (VT != MVT::i32) 8436 return SDValue(); 8437 8438 SDValue Src = N->getOperand(0); 8439 if (Src.getValueType() != MVT::i16) 8440 return SDValue(); 8441 8442 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src 8443 // FIXME: It is not universally true that the high bits are zeroed on gfx9. 8444 if (Src.getOpcode() == ISD::BITCAST) { 8445 SDValue BCSrc = Src.getOperand(0); 8446 if (BCSrc.getValueType() == MVT::f16 && 8447 fp16SrcZerosHighBits(BCSrc.getOpcode())) 8448 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc); 8449 } 8450 8451 return SDValue(); 8452 } 8453 8454 SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N, 8455 DAGCombinerInfo &DCI) 8456 const { 8457 SDValue Src = N->getOperand(0); 8458 auto *VTSign = cast<VTSDNode>(N->getOperand(1)); 8459 8460 if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE && 8461 VTSign->getVT() == MVT::i8) || 8462 (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT && 8463 VTSign->getVT() == MVT::i16)) && 8464 Src.hasOneUse()) { 8465 auto *M = cast<MemSDNode>(Src); 8466 SDValue Ops[] = { 8467 Src.getOperand(0), // Chain 8468 Src.getOperand(1), // rsrc 8469 Src.getOperand(2), // vindex 8470 Src.getOperand(3), // voffset 8471 Src.getOperand(4), // soffset 8472 Src.getOperand(5), // offset 8473 Src.getOperand(6), 8474 Src.getOperand(7) 8475 }; 8476 // replace with BUFFER_LOAD_BYTE/SHORT 8477 SDVTList ResList = DCI.DAG.getVTList(MVT::i32, 8478 Src.getOperand(0).getValueType()); 8479 unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ? 8480 AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT; 8481 SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N), 8482 ResList, 8483 Ops, M->getMemoryVT(), 8484 M->getMemOperand()); 8485 return DCI.DAG.getMergeValues({BufferLoadSignExt, 8486 BufferLoadSignExt.getValue(1)}, SDLoc(N)); 8487 } 8488 return SDValue(); 8489 } 8490 8491 SDValue SITargetLowering::performClassCombine(SDNode *N, 8492 DAGCombinerInfo &DCI) const { 8493 SelectionDAG &DAG = DCI.DAG; 8494 SDValue Mask = N->getOperand(1); 8495 8496 // fp_class x, 0 -> false 8497 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 8498 if (CMask->isNullValue()) 8499 return DAG.getConstant(0, SDLoc(N), MVT::i1); 8500 } 8501 8502 if (N->getOperand(0).isUndef()) 8503 return DAG.getUNDEF(MVT::i1); 8504 8505 return SDValue(); 8506 } 8507 8508 SDValue SITargetLowering::performRcpCombine(SDNode *N, 8509 DAGCombinerInfo &DCI) const { 8510 EVT VT = N->getValueType(0); 8511 SDValue N0 = N->getOperand(0); 8512 8513 if (N0.isUndef()) 8514 return N0; 8515 8516 if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP || 8517 N0.getOpcode() == ISD::SINT_TO_FP)) { 8518 return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0, 8519 N->getFlags()); 8520 } 8521 8522 return AMDGPUTargetLowering::performRcpCombine(N, DCI); 8523 } 8524 8525 bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op, 8526 unsigned MaxDepth) const { 8527 unsigned Opcode = Op.getOpcode(); 8528 if (Opcode == ISD::FCANONICALIZE) 8529 return true; 8530 8531 if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) { 8532 auto F = CFP->getValueAPF(); 8533 if (F.isNaN() && F.isSignaling()) 8534 return false; 8535 return !F.isDenormal() || denormalsEnabledForType(Op.getValueType()); 8536 } 8537 8538 // If source is a result of another standard FP operation it is already in 8539 // canonical form. 8540 if (MaxDepth == 0) 8541 return false; 8542 8543 switch (Opcode) { 8544 // These will flush denorms if required. 8545 case ISD::FADD: 8546 case ISD::FSUB: 8547 case ISD::FMUL: 8548 case ISD::FCEIL: 8549 case ISD::FFLOOR: 8550 case ISD::FMA: 8551 case ISD::FMAD: 8552 case ISD::FSQRT: 8553 case ISD::FDIV: 8554 case ISD::FREM: 8555 case ISD::FP_ROUND: 8556 case ISD::FP_EXTEND: 8557 case AMDGPUISD::FMUL_LEGACY: 8558 case AMDGPUISD::FMAD_FTZ: 8559 case AMDGPUISD::RCP: 8560 case AMDGPUISD::RSQ: 8561 case AMDGPUISD::RSQ_CLAMP: 8562 case AMDGPUISD::RCP_LEGACY: 8563 case AMDGPUISD::RSQ_LEGACY: 8564 case AMDGPUISD::RCP_IFLAG: 8565 case AMDGPUISD::TRIG_PREOP: 8566 case AMDGPUISD::DIV_SCALE: 8567 case AMDGPUISD::DIV_FMAS: 8568 case AMDGPUISD::DIV_FIXUP: 8569 case AMDGPUISD::FRACT: 8570 case AMDGPUISD::LDEXP: 8571 case AMDGPUISD::CVT_PKRTZ_F16_F32: 8572 case AMDGPUISD::CVT_F32_UBYTE0: 8573 case AMDGPUISD::CVT_F32_UBYTE1: 8574 case AMDGPUISD::CVT_F32_UBYTE2: 8575 case AMDGPUISD::CVT_F32_UBYTE3: 8576 return true; 8577 8578 // It can/will be lowered or combined as a bit operation. 8579 // Need to check their input recursively to handle. 8580 case ISD::FNEG: 8581 case ISD::FABS: 8582 case ISD::FCOPYSIGN: 8583 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); 8584 8585 case ISD::FSIN: 8586 case ISD::FCOS: 8587 case ISD::FSINCOS: 8588 return Op.getValueType().getScalarType() != MVT::f16; 8589 8590 case ISD::FMINNUM: 8591 case ISD::FMAXNUM: 8592 case ISD::FMINNUM_IEEE: 8593 case ISD::FMAXNUM_IEEE: 8594 case AMDGPUISD::CLAMP: 8595 case AMDGPUISD::FMED3: 8596 case AMDGPUISD::FMAX3: 8597 case AMDGPUISD::FMIN3: { 8598 // FIXME: Shouldn't treat the generic operations different based these. 8599 // However, we aren't really required to flush the result from 8600 // minnum/maxnum.. 8601 8602 // snans will be quieted, so we only need to worry about denormals. 8603 if (Subtarget->supportsMinMaxDenormModes() || 8604 denormalsEnabledForType(Op.getValueType())) 8605 return true; 8606 8607 // Flushing may be required. 8608 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such 8609 // targets need to check their input recursively. 8610 8611 // FIXME: Does this apply with clamp? It's implemented with max. 8612 for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) { 8613 if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1)) 8614 return false; 8615 } 8616 8617 return true; 8618 } 8619 case ISD::SELECT: { 8620 return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) && 8621 isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1); 8622 } 8623 case ISD::BUILD_VECTOR: { 8624 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 8625 SDValue SrcOp = Op.getOperand(i); 8626 if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1)) 8627 return false; 8628 } 8629 8630 return true; 8631 } 8632 case ISD::EXTRACT_VECTOR_ELT: 8633 case ISD::EXTRACT_SUBVECTOR: { 8634 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); 8635 } 8636 case ISD::INSERT_VECTOR_ELT: { 8637 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) && 8638 isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1); 8639 } 8640 case ISD::UNDEF: 8641 // Could be anything. 8642 return false; 8643 8644 case ISD::BITCAST: { 8645 // Hack round the mess we make when legalizing extract_vector_elt 8646 SDValue Src = Op.getOperand(0); 8647 if (Src.getValueType() == MVT::i16 && 8648 Src.getOpcode() == ISD::TRUNCATE) { 8649 SDValue TruncSrc = Src.getOperand(0); 8650 if (TruncSrc.getValueType() == MVT::i32 && 8651 TruncSrc.getOpcode() == ISD::BITCAST && 8652 TruncSrc.getOperand(0).getValueType() == MVT::v2f16) { 8653 return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1); 8654 } 8655 } 8656 8657 return false; 8658 } 8659 case ISD::INTRINSIC_WO_CHAIN: { 8660 unsigned IntrinsicID 8661 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8662 // TODO: Handle more intrinsics 8663 switch (IntrinsicID) { 8664 case Intrinsic::amdgcn_cvt_pkrtz: 8665 case Intrinsic::amdgcn_cubeid: 8666 case Intrinsic::amdgcn_frexp_mant: 8667 case Intrinsic::amdgcn_fdot2: 8668 return true; 8669 default: 8670 break; 8671 } 8672 8673 LLVM_FALLTHROUGH; 8674 } 8675 default: 8676 return denormalsEnabledForType(Op.getValueType()) && 8677 DAG.isKnownNeverSNaN(Op); 8678 } 8679 8680 llvm_unreachable("invalid operation"); 8681 } 8682 8683 // Constant fold canonicalize. 8684 SDValue SITargetLowering::getCanonicalConstantFP( 8685 SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const { 8686 // Flush denormals to 0 if not enabled. 8687 if (C.isDenormal() && !denormalsEnabledForType(VT)) 8688 return DAG.getConstantFP(0.0, SL, VT); 8689 8690 if (C.isNaN()) { 8691 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 8692 if (C.isSignaling()) { 8693 // Quiet a signaling NaN. 8694 // FIXME: Is this supposed to preserve payload bits? 8695 return DAG.getConstantFP(CanonicalQNaN, SL, VT); 8696 } 8697 8698 // Make sure it is the canonical NaN bitpattern. 8699 // 8700 // TODO: Can we use -1 as the canonical NaN value since it's an inline 8701 // immediate? 8702 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 8703 return DAG.getConstantFP(CanonicalQNaN, SL, VT); 8704 } 8705 8706 // Already canonical. 8707 return DAG.getConstantFP(C, SL, VT); 8708 } 8709 8710 static bool vectorEltWillFoldAway(SDValue Op) { 8711 return Op.isUndef() || isa<ConstantFPSDNode>(Op); 8712 } 8713 8714 SDValue SITargetLowering::performFCanonicalizeCombine( 8715 SDNode *N, 8716 DAGCombinerInfo &DCI) const { 8717 SelectionDAG &DAG = DCI.DAG; 8718 SDValue N0 = N->getOperand(0); 8719 EVT VT = N->getValueType(0); 8720 8721 // fcanonicalize undef -> qnan 8722 if (N0.isUndef()) { 8723 APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT)); 8724 return DAG.getConstantFP(QNaN, SDLoc(N), VT); 8725 } 8726 8727 if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) { 8728 EVT VT = N->getValueType(0); 8729 return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF()); 8730 } 8731 8732 // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x), 8733 // (fcanonicalize k) 8734 // 8735 // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0 8736 8737 // TODO: This could be better with wider vectors that will be split to v2f16, 8738 // and to consider uses since there aren't that many packed operations. 8739 if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 && 8740 isTypeLegal(MVT::v2f16)) { 8741 SDLoc SL(N); 8742 SDValue NewElts[2]; 8743 SDValue Lo = N0.getOperand(0); 8744 SDValue Hi = N0.getOperand(1); 8745 EVT EltVT = Lo.getValueType(); 8746 8747 if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) { 8748 for (unsigned I = 0; I != 2; ++I) { 8749 SDValue Op = N0.getOperand(I); 8750 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) { 8751 NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT, 8752 CFP->getValueAPF()); 8753 } else if (Op.isUndef()) { 8754 // Handled below based on what the other operand is. 8755 NewElts[I] = Op; 8756 } else { 8757 NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op); 8758 } 8759 } 8760 8761 // If one half is undef, and one is constant, perfer a splat vector rather 8762 // than the normal qNaN. If it's a register, prefer 0.0 since that's 8763 // cheaper to use and may be free with a packed operation. 8764 if (NewElts[0].isUndef()) { 8765 if (isa<ConstantFPSDNode>(NewElts[1])) 8766 NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ? 8767 NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT); 8768 } 8769 8770 if (NewElts[1].isUndef()) { 8771 NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ? 8772 NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT); 8773 } 8774 8775 return DAG.getBuildVector(VT, SL, NewElts); 8776 } 8777 } 8778 8779 unsigned SrcOpc = N0.getOpcode(); 8780 8781 // If it's free to do so, push canonicalizes further up the source, which may 8782 // find a canonical source. 8783 // 8784 // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for 8785 // sNaNs. 8786 if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) { 8787 auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); 8788 if (CRHS && N0.hasOneUse()) { 8789 SDLoc SL(N); 8790 SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT, 8791 N0.getOperand(0)); 8792 SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF()); 8793 DCI.AddToWorklist(Canon0.getNode()); 8794 8795 return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1); 8796 } 8797 } 8798 8799 return isCanonicalized(DAG, N0) ? N0 : SDValue(); 8800 } 8801 8802 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 8803 switch (Opc) { 8804 case ISD::FMAXNUM: 8805 case ISD::FMAXNUM_IEEE: 8806 return AMDGPUISD::FMAX3; 8807 case ISD::SMAX: 8808 return AMDGPUISD::SMAX3; 8809 case ISD::UMAX: 8810 return AMDGPUISD::UMAX3; 8811 case ISD::FMINNUM: 8812 case ISD::FMINNUM_IEEE: 8813 return AMDGPUISD::FMIN3; 8814 case ISD::SMIN: 8815 return AMDGPUISD::SMIN3; 8816 case ISD::UMIN: 8817 return AMDGPUISD::UMIN3; 8818 default: 8819 llvm_unreachable("Not a min/max opcode"); 8820 } 8821 } 8822 8823 SDValue SITargetLowering::performIntMed3ImmCombine( 8824 SelectionDAG &DAG, const SDLoc &SL, 8825 SDValue Op0, SDValue Op1, bool Signed) const { 8826 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 8827 if (!K1) 8828 return SDValue(); 8829 8830 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 8831 if (!K0) 8832 return SDValue(); 8833 8834 if (Signed) { 8835 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 8836 return SDValue(); 8837 } else { 8838 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 8839 return SDValue(); 8840 } 8841 8842 EVT VT = K0->getValueType(0); 8843 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; 8844 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { 8845 return DAG.getNode(Med3Opc, SL, VT, 8846 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 8847 } 8848 8849 // If there isn't a 16-bit med3 operation, convert to 32-bit. 8850 MVT NVT = MVT::i32; 8851 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 8852 8853 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); 8854 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); 8855 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); 8856 8857 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); 8858 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); 8859 } 8860 8861 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) { 8862 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 8863 return C; 8864 8865 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) { 8866 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode()) 8867 return C; 8868 } 8869 8870 return nullptr; 8871 } 8872 8873 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, 8874 const SDLoc &SL, 8875 SDValue Op0, 8876 SDValue Op1) const { 8877 ConstantFPSDNode *K1 = getSplatConstantFP(Op1); 8878 if (!K1) 8879 return SDValue(); 8880 8881 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1)); 8882 if (!K0) 8883 return SDValue(); 8884 8885 // Ordered >= (although NaN inputs should have folded away by now). 8886 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); 8887 if (Cmp == APFloat::cmpGreaterThan) 8888 return SDValue(); 8889 8890 const MachineFunction &MF = DAG.getMachineFunction(); 8891 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 8892 8893 // TODO: Check IEEE bit enabled? 8894 EVT VT = Op0.getValueType(); 8895 if (Info->getMode().DX10Clamp) { 8896 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the 8897 // hardware fmed3 behavior converting to a min. 8898 // FIXME: Should this be allowing -0.0? 8899 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) 8900 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); 8901 } 8902 8903 // med3 for f16 is only available on gfx9+, and not available for v2f16. 8904 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) { 8905 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 8906 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would 8907 // then give the other result, which is different from med3 with a NaN 8908 // input. 8909 SDValue Var = Op0.getOperand(0); 8910 if (!DAG.isKnownNeverSNaN(Var)) 8911 return SDValue(); 8912 8913 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 8914 8915 if ((!K0->hasOneUse() || 8916 TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) && 8917 (!K1->hasOneUse() || 8918 TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) { 8919 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 8920 Var, SDValue(K0, 0), SDValue(K1, 0)); 8921 } 8922 } 8923 8924 return SDValue(); 8925 } 8926 8927 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 8928 DAGCombinerInfo &DCI) const { 8929 SelectionDAG &DAG = DCI.DAG; 8930 8931 EVT VT = N->getValueType(0); 8932 unsigned Opc = N->getOpcode(); 8933 SDValue Op0 = N->getOperand(0); 8934 SDValue Op1 = N->getOperand(1); 8935 8936 // Only do this if the inner op has one use since this will just increases 8937 // register pressure for no benefit. 8938 8939 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && 8940 !VT.isVector() && 8941 (VT == MVT::i32 || VT == MVT::f32 || 8942 ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) { 8943 // max(max(a, b), c) -> max3(a, b, c) 8944 // min(min(a, b), c) -> min3(a, b, c) 8945 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 8946 SDLoc DL(N); 8947 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 8948 DL, 8949 N->getValueType(0), 8950 Op0.getOperand(0), 8951 Op0.getOperand(1), 8952 Op1); 8953 } 8954 8955 // Try commuted. 8956 // max(a, max(b, c)) -> max3(a, b, c) 8957 // min(a, min(b, c)) -> min3(a, b, c) 8958 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 8959 SDLoc DL(N); 8960 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 8961 DL, 8962 N->getValueType(0), 8963 Op0, 8964 Op1.getOperand(0), 8965 Op1.getOperand(1)); 8966 } 8967 } 8968 8969 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 8970 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 8971 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 8972 return Med3; 8973 } 8974 8975 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 8976 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 8977 return Med3; 8978 } 8979 8980 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 8981 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 8982 (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) || 8983 (Opc == AMDGPUISD::FMIN_LEGACY && 8984 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 8985 (VT == MVT::f32 || VT == MVT::f64 || 8986 (VT == MVT::f16 && Subtarget->has16BitInsts()) || 8987 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) && 8988 Op0.hasOneUse()) { 8989 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 8990 return Res; 8991 } 8992 8993 return SDValue(); 8994 } 8995 8996 static bool isClampZeroToOne(SDValue A, SDValue B) { 8997 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { 8998 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { 8999 // FIXME: Should this be allowing -0.0? 9000 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || 9001 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); 9002 } 9003 } 9004 9005 return false; 9006 } 9007 9008 // FIXME: Should only worry about snans for version with chain. 9009 SDValue SITargetLowering::performFMed3Combine(SDNode *N, 9010 DAGCombinerInfo &DCI) const { 9011 EVT VT = N->getValueType(0); 9012 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and 9013 // NaNs. With a NaN input, the order of the operands may change the result. 9014 9015 SelectionDAG &DAG = DCI.DAG; 9016 SDLoc SL(N); 9017 9018 SDValue Src0 = N->getOperand(0); 9019 SDValue Src1 = N->getOperand(1); 9020 SDValue Src2 = N->getOperand(2); 9021 9022 if (isClampZeroToOne(Src0, Src1)) { 9023 // const_a, const_b, x -> clamp is safe in all cases including signaling 9024 // nans. 9025 // FIXME: Should this be allowing -0.0? 9026 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); 9027 } 9028 9029 const MachineFunction &MF = DAG.getMachineFunction(); 9030 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 9031 9032 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother 9033 // handling no dx10-clamp? 9034 if (Info->getMode().DX10Clamp) { 9035 // If NaNs is clamped to 0, we are free to reorder the inputs. 9036 9037 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 9038 std::swap(Src0, Src1); 9039 9040 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) 9041 std::swap(Src1, Src2); 9042 9043 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 9044 std::swap(Src0, Src1); 9045 9046 if (isClampZeroToOne(Src1, Src2)) 9047 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); 9048 } 9049 9050 return SDValue(); 9051 } 9052 9053 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, 9054 DAGCombinerInfo &DCI) const { 9055 SDValue Src0 = N->getOperand(0); 9056 SDValue Src1 = N->getOperand(1); 9057 if (Src0.isUndef() && Src1.isUndef()) 9058 return DCI.DAG.getUNDEF(N->getValueType(0)); 9059 return SDValue(); 9060 } 9061 9062 SDValue SITargetLowering::performExtractVectorEltCombine( 9063 SDNode *N, DAGCombinerInfo &DCI) const { 9064 SDValue Vec = N->getOperand(0); 9065 SelectionDAG &DAG = DCI.DAG; 9066 9067 EVT VecVT = Vec.getValueType(); 9068 EVT EltVT = VecVT.getVectorElementType(); 9069 9070 if ((Vec.getOpcode() == ISD::FNEG || 9071 Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) { 9072 SDLoc SL(N); 9073 EVT EltVT = N->getValueType(0); 9074 SDValue Idx = N->getOperand(1); 9075 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 9076 Vec.getOperand(0), Idx); 9077 return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt); 9078 } 9079 9080 // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx) 9081 // => 9082 // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx) 9083 // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx) 9084 // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt 9085 if (Vec.hasOneUse() && DCI.isBeforeLegalize()) { 9086 SDLoc SL(N); 9087 EVT EltVT = N->getValueType(0); 9088 SDValue Idx = N->getOperand(1); 9089 unsigned Opc = Vec.getOpcode(); 9090 9091 switch(Opc) { 9092 default: 9093 break; 9094 // TODO: Support other binary operations. 9095 case ISD::FADD: 9096 case ISD::FSUB: 9097 case ISD::FMUL: 9098 case ISD::ADD: 9099 case ISD::UMIN: 9100 case ISD::UMAX: 9101 case ISD::SMIN: 9102 case ISD::SMAX: 9103 case ISD::FMAXNUM: 9104 case ISD::FMINNUM: 9105 case ISD::FMAXNUM_IEEE: 9106 case ISD::FMINNUM_IEEE: { 9107 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 9108 Vec.getOperand(0), Idx); 9109 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 9110 Vec.getOperand(1), Idx); 9111 9112 DCI.AddToWorklist(Elt0.getNode()); 9113 DCI.AddToWorklist(Elt1.getNode()); 9114 return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags()); 9115 } 9116 } 9117 } 9118 9119 unsigned VecSize = VecVT.getSizeInBits(); 9120 unsigned EltSize = EltVT.getSizeInBits(); 9121 9122 // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx) 9123 // This elminates non-constant index and subsequent movrel or scratch access. 9124 // Sub-dword vectors of size 2 dword or less have better implementation. 9125 // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32 9126 // instructions. 9127 if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) && 9128 !isa<ConstantSDNode>(N->getOperand(1))) { 9129 SDLoc SL(N); 9130 SDValue Idx = N->getOperand(1); 9131 EVT IdxVT = Idx.getValueType(); 9132 SDValue V; 9133 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { 9134 SDValue IC = DAG.getConstant(I, SL, IdxVT); 9135 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); 9136 if (I == 0) 9137 V = Elt; 9138 else 9139 V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ); 9140 } 9141 return V; 9142 } 9143 9144 if (!DCI.isBeforeLegalize()) 9145 return SDValue(); 9146 9147 // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit 9148 // elements. This exposes more load reduction opportunities by replacing 9149 // multiple small extract_vector_elements with a single 32-bit extract. 9150 auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1)); 9151 if (isa<MemSDNode>(Vec) && 9152 EltSize <= 16 && 9153 EltVT.isByteSized() && 9154 VecSize > 32 && 9155 VecSize % 32 == 0 && 9156 Idx) { 9157 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT); 9158 9159 unsigned BitIndex = Idx->getZExtValue() * EltSize; 9160 unsigned EltIdx = BitIndex / 32; 9161 unsigned LeftoverBitIdx = BitIndex % 32; 9162 SDLoc SL(N); 9163 9164 SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec); 9165 DCI.AddToWorklist(Cast.getNode()); 9166 9167 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast, 9168 DAG.getConstant(EltIdx, SL, MVT::i32)); 9169 DCI.AddToWorklist(Elt.getNode()); 9170 SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt, 9171 DAG.getConstant(LeftoverBitIdx, SL, MVT::i32)); 9172 DCI.AddToWorklist(Srl.getNode()); 9173 9174 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl); 9175 DCI.AddToWorklist(Trunc.getNode()); 9176 return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc); 9177 } 9178 9179 return SDValue(); 9180 } 9181 9182 SDValue 9183 SITargetLowering::performInsertVectorEltCombine(SDNode *N, 9184 DAGCombinerInfo &DCI) const { 9185 SDValue Vec = N->getOperand(0); 9186 SDValue Idx = N->getOperand(2); 9187 EVT VecVT = Vec.getValueType(); 9188 EVT EltVT = VecVT.getVectorElementType(); 9189 unsigned VecSize = VecVT.getSizeInBits(); 9190 unsigned EltSize = EltVT.getSizeInBits(); 9191 9192 // INSERT_VECTOR_ELT (<n x e>, var-idx) 9193 // => BUILD_VECTOR n x select (e, const-idx) 9194 // This elminates non-constant index and subsequent movrel or scratch access. 9195 // Sub-dword vectors of size 2 dword or less have better implementation. 9196 // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32 9197 // instructions. 9198 if (isa<ConstantSDNode>(Idx) || 9199 VecSize > 256 || (VecSize <= 64 && EltSize < 32)) 9200 return SDValue(); 9201 9202 SelectionDAG &DAG = DCI.DAG; 9203 SDLoc SL(N); 9204 SDValue Ins = N->getOperand(1); 9205 EVT IdxVT = Idx.getValueType(); 9206 9207 SmallVector<SDValue, 16> Ops; 9208 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { 9209 SDValue IC = DAG.getConstant(I, SL, IdxVT); 9210 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); 9211 SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ); 9212 Ops.push_back(V); 9213 } 9214 9215 return DAG.getBuildVector(VecVT, SL, Ops); 9216 } 9217 9218 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, 9219 const SDNode *N0, 9220 const SDNode *N1) const { 9221 EVT VT = N0->getValueType(0); 9222 9223 // Only do this if we are not trying to support denormals. v_mad_f32 does not 9224 // support denormals ever. 9225 if (((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || 9226 (VT == MVT::f16 && !Subtarget->hasFP16Denormals() && 9227 getSubtarget()->hasMadF16())) && 9228 isOperationLegal(ISD::FMAD, VT)) 9229 return ISD::FMAD; 9230 9231 const TargetOptions &Options = DAG.getTarget().Options; 9232 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || 9233 (N0->getFlags().hasAllowContract() && 9234 N1->getFlags().hasAllowContract())) && 9235 isFMAFasterThanFMulAndFAdd(VT)) { 9236 return ISD::FMA; 9237 } 9238 9239 return 0; 9240 } 9241 9242 // For a reassociatable opcode perform: 9243 // op x, (op y, z) -> op (op x, z), y, if x and z are uniform 9244 SDValue SITargetLowering::reassociateScalarOps(SDNode *N, 9245 SelectionDAG &DAG) const { 9246 EVT VT = N->getValueType(0); 9247 if (VT != MVT::i32 && VT != MVT::i64) 9248 return SDValue(); 9249 9250 unsigned Opc = N->getOpcode(); 9251 SDValue Op0 = N->getOperand(0); 9252 SDValue Op1 = N->getOperand(1); 9253 9254 if (!(Op0->isDivergent() ^ Op1->isDivergent())) 9255 return SDValue(); 9256 9257 if (Op0->isDivergent()) 9258 std::swap(Op0, Op1); 9259 9260 if (Op1.getOpcode() != Opc || !Op1.hasOneUse()) 9261 return SDValue(); 9262 9263 SDValue Op2 = Op1.getOperand(1); 9264 Op1 = Op1.getOperand(0); 9265 if (!(Op1->isDivergent() ^ Op2->isDivergent())) 9266 return SDValue(); 9267 9268 if (Op1->isDivergent()) 9269 std::swap(Op1, Op2); 9270 9271 // If either operand is constant this will conflict with 9272 // DAGCombiner::ReassociateOps(). 9273 if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) || 9274 DAG.isConstantIntBuildVectorOrConstantInt(Op1)) 9275 return SDValue(); 9276 9277 SDLoc SL(N); 9278 SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1); 9279 return DAG.getNode(Opc, SL, VT, Add1, Op2); 9280 } 9281 9282 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, 9283 EVT VT, 9284 SDValue N0, SDValue N1, SDValue N2, 9285 bool Signed) { 9286 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32; 9287 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1); 9288 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2); 9289 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); 9290 } 9291 9292 SDValue SITargetLowering::performAddCombine(SDNode *N, 9293 DAGCombinerInfo &DCI) const { 9294 SelectionDAG &DAG = DCI.DAG; 9295 EVT VT = N->getValueType(0); 9296 SDLoc SL(N); 9297 SDValue LHS = N->getOperand(0); 9298 SDValue RHS = N->getOperand(1); 9299 9300 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) 9301 && Subtarget->hasMad64_32() && 9302 !VT.isVector() && VT.getScalarSizeInBits() > 32 && 9303 VT.getScalarSizeInBits() <= 64) { 9304 if (LHS.getOpcode() != ISD::MUL) 9305 std::swap(LHS, RHS); 9306 9307 SDValue MulLHS = LHS.getOperand(0); 9308 SDValue MulRHS = LHS.getOperand(1); 9309 SDValue AddRHS = RHS; 9310 9311 // TODO: Maybe restrict if SGPR inputs. 9312 if (numBitsUnsigned(MulLHS, DAG) <= 32 && 9313 numBitsUnsigned(MulRHS, DAG) <= 32) { 9314 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32); 9315 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32); 9316 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64); 9317 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false); 9318 } 9319 9320 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) { 9321 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32); 9322 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32); 9323 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64); 9324 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true); 9325 } 9326 9327 return SDValue(); 9328 } 9329 9330 if (SDValue V = reassociateScalarOps(N, DAG)) { 9331 return V; 9332 } 9333 9334 if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG()) 9335 return SDValue(); 9336 9337 // add x, zext (setcc) => addcarry x, 0, setcc 9338 // add x, sext (setcc) => subcarry x, 0, setcc 9339 unsigned Opc = LHS.getOpcode(); 9340 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || 9341 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY) 9342 std::swap(RHS, LHS); 9343 9344 Opc = RHS.getOpcode(); 9345 switch (Opc) { 9346 default: break; 9347 case ISD::ZERO_EXTEND: 9348 case ISD::SIGN_EXTEND: 9349 case ISD::ANY_EXTEND: { 9350 auto Cond = RHS.getOperand(0); 9351 if (!isBoolSGPR(Cond)) 9352 break; 9353 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); 9354 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; 9355 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; 9356 return DAG.getNode(Opc, SL, VTList, Args); 9357 } 9358 case ISD::ADDCARRY: { 9359 // add x, (addcarry y, 0, cc) => addcarry x, y, cc 9360 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 9361 if (!C || C->getZExtValue() != 0) break; 9362 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; 9363 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); 9364 } 9365 } 9366 return SDValue(); 9367 } 9368 9369 SDValue SITargetLowering::performSubCombine(SDNode *N, 9370 DAGCombinerInfo &DCI) const { 9371 SelectionDAG &DAG = DCI.DAG; 9372 EVT VT = N->getValueType(0); 9373 9374 if (VT != MVT::i32) 9375 return SDValue(); 9376 9377 SDLoc SL(N); 9378 SDValue LHS = N->getOperand(0); 9379 SDValue RHS = N->getOperand(1); 9380 9381 if (LHS.getOpcode() == ISD::SUBCARRY) { 9382 // sub (subcarry x, 0, cc), y => subcarry x, y, cc 9383 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 9384 if (!C || !C->isNullValue()) 9385 return SDValue(); 9386 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; 9387 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); 9388 } 9389 return SDValue(); 9390 } 9391 9392 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, 9393 DAGCombinerInfo &DCI) const { 9394 9395 if (N->getValueType(0) != MVT::i32) 9396 return SDValue(); 9397 9398 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 9399 if (!C || C->getZExtValue() != 0) 9400 return SDValue(); 9401 9402 SelectionDAG &DAG = DCI.DAG; 9403 SDValue LHS = N->getOperand(0); 9404 9405 // addcarry (add x, y), 0, cc => addcarry x, y, cc 9406 // subcarry (sub x, y), 0, cc => subcarry x, y, cc 9407 unsigned LHSOpc = LHS.getOpcode(); 9408 unsigned Opc = N->getOpcode(); 9409 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || 9410 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { 9411 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; 9412 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args); 9413 } 9414 return SDValue(); 9415 } 9416 9417 SDValue SITargetLowering::performFAddCombine(SDNode *N, 9418 DAGCombinerInfo &DCI) const { 9419 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 9420 return SDValue(); 9421 9422 SelectionDAG &DAG = DCI.DAG; 9423 EVT VT = N->getValueType(0); 9424 9425 SDLoc SL(N); 9426 SDValue LHS = N->getOperand(0); 9427 SDValue RHS = N->getOperand(1); 9428 9429 // These should really be instruction patterns, but writing patterns with 9430 // source modiifiers is a pain. 9431 9432 // fadd (fadd (a, a), b) -> mad 2.0, a, b 9433 if (LHS.getOpcode() == ISD::FADD) { 9434 SDValue A = LHS.getOperand(0); 9435 if (A == LHS.getOperand(1)) { 9436 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 9437 if (FusedOp != 0) { 9438 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 9439 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); 9440 } 9441 } 9442 } 9443 9444 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 9445 if (RHS.getOpcode() == ISD::FADD) { 9446 SDValue A = RHS.getOperand(0); 9447 if (A == RHS.getOperand(1)) { 9448 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 9449 if (FusedOp != 0) { 9450 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 9451 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); 9452 } 9453 } 9454 } 9455 9456 return SDValue(); 9457 } 9458 9459 SDValue SITargetLowering::performFSubCombine(SDNode *N, 9460 DAGCombinerInfo &DCI) const { 9461 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 9462 return SDValue(); 9463 9464 SelectionDAG &DAG = DCI.DAG; 9465 SDLoc SL(N); 9466 EVT VT = N->getValueType(0); 9467 assert(!VT.isVector()); 9468 9469 // Try to get the fneg to fold into the source modifier. This undoes generic 9470 // DAG combines and folds them into the mad. 9471 // 9472 // Only do this if we are not trying to support denormals. v_mad_f32 does 9473 // not support denormals ever. 9474 SDValue LHS = N->getOperand(0); 9475 SDValue RHS = N->getOperand(1); 9476 if (LHS.getOpcode() == ISD::FADD) { 9477 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 9478 SDValue A = LHS.getOperand(0); 9479 if (A == LHS.getOperand(1)) { 9480 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 9481 if (FusedOp != 0){ 9482 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 9483 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 9484 9485 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); 9486 } 9487 } 9488 } 9489 9490 if (RHS.getOpcode() == ISD::FADD) { 9491 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 9492 9493 SDValue A = RHS.getOperand(0); 9494 if (A == RHS.getOperand(1)) { 9495 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 9496 if (FusedOp != 0){ 9497 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); 9498 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); 9499 } 9500 } 9501 } 9502 9503 return SDValue(); 9504 } 9505 9506 SDValue SITargetLowering::performFMACombine(SDNode *N, 9507 DAGCombinerInfo &DCI) const { 9508 SelectionDAG &DAG = DCI.DAG; 9509 EVT VT = N->getValueType(0); 9510 SDLoc SL(N); 9511 9512 if (!Subtarget->hasDot2Insts() || VT != MVT::f32) 9513 return SDValue(); 9514 9515 // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) -> 9516 // FDOT2((V2F16)S0, (V2F16)S1, (F32)z)) 9517 SDValue Op1 = N->getOperand(0); 9518 SDValue Op2 = N->getOperand(1); 9519 SDValue FMA = N->getOperand(2); 9520 9521 if (FMA.getOpcode() != ISD::FMA || 9522 Op1.getOpcode() != ISD::FP_EXTEND || 9523 Op2.getOpcode() != ISD::FP_EXTEND) 9524 return SDValue(); 9525 9526 // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero, 9527 // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract 9528 // is sufficient to allow generaing fdot2. 9529 const TargetOptions &Options = DAG.getTarget().Options; 9530 if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || 9531 (N->getFlags().hasAllowContract() && 9532 FMA->getFlags().hasAllowContract())) { 9533 Op1 = Op1.getOperand(0); 9534 Op2 = Op2.getOperand(0); 9535 if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9536 Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 9537 return SDValue(); 9538 9539 SDValue Vec1 = Op1.getOperand(0); 9540 SDValue Idx1 = Op1.getOperand(1); 9541 SDValue Vec2 = Op2.getOperand(0); 9542 9543 SDValue FMAOp1 = FMA.getOperand(0); 9544 SDValue FMAOp2 = FMA.getOperand(1); 9545 SDValue FMAAcc = FMA.getOperand(2); 9546 9547 if (FMAOp1.getOpcode() != ISD::FP_EXTEND || 9548 FMAOp2.getOpcode() != ISD::FP_EXTEND) 9549 return SDValue(); 9550 9551 FMAOp1 = FMAOp1.getOperand(0); 9552 FMAOp2 = FMAOp2.getOperand(0); 9553 if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9554 FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 9555 return SDValue(); 9556 9557 SDValue Vec3 = FMAOp1.getOperand(0); 9558 SDValue Vec4 = FMAOp2.getOperand(0); 9559 SDValue Idx2 = FMAOp1.getOperand(1); 9560 9561 if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) || 9562 // Idx1 and Idx2 cannot be the same. 9563 Idx1 == Idx2) 9564 return SDValue(); 9565 9566 if (Vec1 == Vec2 || Vec3 == Vec4) 9567 return SDValue(); 9568 9569 if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16) 9570 return SDValue(); 9571 9572 if ((Vec1 == Vec3 && Vec2 == Vec4) || 9573 (Vec1 == Vec4 && Vec2 == Vec3)) { 9574 return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc, 9575 DAG.getTargetConstant(0, SL, MVT::i1)); 9576 } 9577 } 9578 return SDValue(); 9579 } 9580 9581 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 9582 DAGCombinerInfo &DCI) const { 9583 SelectionDAG &DAG = DCI.DAG; 9584 SDLoc SL(N); 9585 9586 SDValue LHS = N->getOperand(0); 9587 SDValue RHS = N->getOperand(1); 9588 EVT VT = LHS.getValueType(); 9589 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 9590 9591 auto CRHS = dyn_cast<ConstantSDNode>(RHS); 9592 if (!CRHS) { 9593 CRHS = dyn_cast<ConstantSDNode>(LHS); 9594 if (CRHS) { 9595 std::swap(LHS, RHS); 9596 CC = getSetCCSwappedOperands(CC); 9597 } 9598 } 9599 9600 if (CRHS) { 9601 if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND && 9602 isBoolSGPR(LHS.getOperand(0))) { 9603 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1 9604 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc 9605 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1 9606 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc 9607 if ((CRHS->isAllOnesValue() && 9608 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) || 9609 (CRHS->isNullValue() && 9610 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE))) 9611 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), 9612 DAG.getConstant(-1, SL, MVT::i1)); 9613 if ((CRHS->isAllOnesValue() && 9614 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) || 9615 (CRHS->isNullValue() && 9616 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT))) 9617 return LHS.getOperand(0); 9618 } 9619 9620 uint64_t CRHSVal = CRHS->getZExtValue(); 9621 if ((CC == ISD::SETEQ || CC == ISD::SETNE) && 9622 LHS.getOpcode() == ISD::SELECT && 9623 isa<ConstantSDNode>(LHS.getOperand(1)) && 9624 isa<ConstantSDNode>(LHS.getOperand(2)) && 9625 LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) && 9626 isBoolSGPR(LHS.getOperand(0))) { 9627 // Given CT != FT: 9628 // setcc (select cc, CT, CF), CF, eq => xor cc, -1 9629 // setcc (select cc, CT, CF), CF, ne => cc 9630 // setcc (select cc, CT, CF), CT, ne => xor cc, -1 9631 // setcc (select cc, CT, CF), CT, eq => cc 9632 uint64_t CT = LHS.getConstantOperandVal(1); 9633 uint64_t CF = LHS.getConstantOperandVal(2); 9634 9635 if ((CF == CRHSVal && CC == ISD::SETEQ) || 9636 (CT == CRHSVal && CC == ISD::SETNE)) 9637 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), 9638 DAG.getConstant(-1, SL, MVT::i1)); 9639 if ((CF == CRHSVal && CC == ISD::SETNE) || 9640 (CT == CRHSVal && CC == ISD::SETEQ)) 9641 return LHS.getOperand(0); 9642 } 9643 } 9644 9645 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && 9646 VT != MVT::f16)) 9647 return SDValue(); 9648 9649 // Match isinf/isfinite pattern 9650 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 9651 // (fcmp one (fabs x), inf) -> (fp_class x, 9652 // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero) 9653 if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) { 9654 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 9655 if (!CRHS) 9656 return SDValue(); 9657 9658 const APFloat &APF = CRHS->getValueAPF(); 9659 if (APF.isInfinity() && !APF.isNegative()) { 9660 const unsigned IsInfMask = SIInstrFlags::P_INFINITY | 9661 SIInstrFlags::N_INFINITY; 9662 const unsigned IsFiniteMask = SIInstrFlags::N_ZERO | 9663 SIInstrFlags::P_ZERO | 9664 SIInstrFlags::N_NORMAL | 9665 SIInstrFlags::P_NORMAL | 9666 SIInstrFlags::N_SUBNORMAL | 9667 SIInstrFlags::P_SUBNORMAL; 9668 unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask; 9669 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 9670 DAG.getConstant(Mask, SL, MVT::i32)); 9671 } 9672 } 9673 9674 return SDValue(); 9675 } 9676 9677 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, 9678 DAGCombinerInfo &DCI) const { 9679 SelectionDAG &DAG = DCI.DAG; 9680 SDLoc SL(N); 9681 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 9682 9683 SDValue Src = N->getOperand(0); 9684 SDValue Srl = N->getOperand(0); 9685 if (Srl.getOpcode() == ISD::ZERO_EXTEND) 9686 Srl = Srl.getOperand(0); 9687 9688 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero. 9689 if (Srl.getOpcode() == ISD::SRL) { 9690 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 9691 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 9692 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 9693 9694 if (const ConstantSDNode *C = 9695 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) { 9696 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)), 9697 EVT(MVT::i32)); 9698 9699 unsigned SrcOffset = C->getZExtValue() + 8 * Offset; 9700 if (SrcOffset < 32 && SrcOffset % 8 == 0) { 9701 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL, 9702 MVT::f32, Srl); 9703 } 9704 } 9705 } 9706 9707 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 9708 9709 KnownBits Known; 9710 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 9711 !DCI.isBeforeLegalizeOps()); 9712 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9713 if (TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) { 9714 DCI.CommitTargetLoweringOpt(TLO); 9715 } 9716 9717 return SDValue(); 9718 } 9719 9720 SDValue SITargetLowering::performClampCombine(SDNode *N, 9721 DAGCombinerInfo &DCI) const { 9722 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 9723 if (!CSrc) 9724 return SDValue(); 9725 9726 const MachineFunction &MF = DCI.DAG.getMachineFunction(); 9727 const APFloat &F = CSrc->getValueAPF(); 9728 APFloat Zero = APFloat::getZero(F.getSemantics()); 9729 APFloat::cmpResult Cmp0 = F.compare(Zero); 9730 if (Cmp0 == APFloat::cmpLessThan || 9731 (Cmp0 == APFloat::cmpUnordered && 9732 MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) { 9733 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0)); 9734 } 9735 9736 APFloat One(F.getSemantics(), "1.0"); 9737 APFloat::cmpResult Cmp1 = F.compare(One); 9738 if (Cmp1 == APFloat::cmpGreaterThan) 9739 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0)); 9740 9741 return SDValue(CSrc, 0); 9742 } 9743 9744 9745 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 9746 DAGCombinerInfo &DCI) const { 9747 if (getTargetMachine().getOptLevel() == CodeGenOpt::None) 9748 return SDValue(); 9749 switch (N->getOpcode()) { 9750 default: 9751 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 9752 case ISD::ADD: 9753 return performAddCombine(N, DCI); 9754 case ISD::SUB: 9755 return performSubCombine(N, DCI); 9756 case ISD::ADDCARRY: 9757 case ISD::SUBCARRY: 9758 return performAddCarrySubCarryCombine(N, DCI); 9759 case ISD::FADD: 9760 return performFAddCombine(N, DCI); 9761 case ISD::FSUB: 9762 return performFSubCombine(N, DCI); 9763 case ISD::SETCC: 9764 return performSetCCCombine(N, DCI); 9765 case ISD::FMAXNUM: 9766 case ISD::FMINNUM: 9767 case ISD::FMAXNUM_IEEE: 9768 case ISD::FMINNUM_IEEE: 9769 case ISD::SMAX: 9770 case ISD::SMIN: 9771 case ISD::UMAX: 9772 case ISD::UMIN: 9773 case AMDGPUISD::FMIN_LEGACY: 9774 case AMDGPUISD::FMAX_LEGACY: 9775 return performMinMaxCombine(N, DCI); 9776 case ISD::FMA: 9777 return performFMACombine(N, DCI); 9778 case ISD::LOAD: { 9779 if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI)) 9780 return Widended; 9781 LLVM_FALLTHROUGH; 9782 } 9783 case ISD::STORE: 9784 case ISD::ATOMIC_LOAD: 9785 case ISD::ATOMIC_STORE: 9786 case ISD::ATOMIC_CMP_SWAP: 9787 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 9788 case ISD::ATOMIC_SWAP: 9789 case ISD::ATOMIC_LOAD_ADD: 9790 case ISD::ATOMIC_LOAD_SUB: 9791 case ISD::ATOMIC_LOAD_AND: 9792 case ISD::ATOMIC_LOAD_OR: 9793 case ISD::ATOMIC_LOAD_XOR: 9794 case ISD::ATOMIC_LOAD_NAND: 9795 case ISD::ATOMIC_LOAD_MIN: 9796 case ISD::ATOMIC_LOAD_MAX: 9797 case ISD::ATOMIC_LOAD_UMIN: 9798 case ISD::ATOMIC_LOAD_UMAX: 9799 case ISD::ATOMIC_LOAD_FADD: 9800 case AMDGPUISD::ATOMIC_INC: 9801 case AMDGPUISD::ATOMIC_DEC: 9802 case AMDGPUISD::ATOMIC_LOAD_FMIN: 9803 case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics. 9804 if (DCI.isBeforeLegalize()) 9805 break; 9806 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI); 9807 case ISD::AND: 9808 return performAndCombine(N, DCI); 9809 case ISD::OR: 9810 return performOrCombine(N, DCI); 9811 case ISD::XOR: 9812 return performXorCombine(N, DCI); 9813 case ISD::ZERO_EXTEND: 9814 return performZeroExtendCombine(N, DCI); 9815 case ISD::SIGN_EXTEND_INREG: 9816 return performSignExtendInRegCombine(N , DCI); 9817 case AMDGPUISD::FP_CLASS: 9818 return performClassCombine(N, DCI); 9819 case ISD::FCANONICALIZE: 9820 return performFCanonicalizeCombine(N, DCI); 9821 case AMDGPUISD::RCP: 9822 return performRcpCombine(N, DCI); 9823 case AMDGPUISD::FRACT: 9824 case AMDGPUISD::RSQ: 9825 case AMDGPUISD::RCP_LEGACY: 9826 case AMDGPUISD::RSQ_LEGACY: 9827 case AMDGPUISD::RCP_IFLAG: 9828 case AMDGPUISD::RSQ_CLAMP: 9829 case AMDGPUISD::LDEXP: { 9830 SDValue Src = N->getOperand(0); 9831 if (Src.isUndef()) 9832 return Src; 9833 break; 9834 } 9835 case ISD::SINT_TO_FP: 9836 case ISD::UINT_TO_FP: 9837 return performUCharToFloatCombine(N, DCI); 9838 case AMDGPUISD::CVT_F32_UBYTE0: 9839 case AMDGPUISD::CVT_F32_UBYTE1: 9840 case AMDGPUISD::CVT_F32_UBYTE2: 9841 case AMDGPUISD::CVT_F32_UBYTE3: 9842 return performCvtF32UByteNCombine(N, DCI); 9843 case AMDGPUISD::FMED3: 9844 return performFMed3Combine(N, DCI); 9845 case AMDGPUISD::CVT_PKRTZ_F16_F32: 9846 return performCvtPkRTZCombine(N, DCI); 9847 case AMDGPUISD::CLAMP: 9848 return performClampCombine(N, DCI); 9849 case ISD::SCALAR_TO_VECTOR: { 9850 SelectionDAG &DAG = DCI.DAG; 9851 EVT VT = N->getValueType(0); 9852 9853 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x)) 9854 if (VT == MVT::v2i16 || VT == MVT::v2f16) { 9855 SDLoc SL(N); 9856 SDValue Src = N->getOperand(0); 9857 EVT EltVT = Src.getValueType(); 9858 if (EltVT == MVT::f16) 9859 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); 9860 9861 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); 9862 return DAG.getNode(ISD::BITCAST, SL, VT, Ext); 9863 } 9864 9865 break; 9866 } 9867 case ISD::EXTRACT_VECTOR_ELT: 9868 return performExtractVectorEltCombine(N, DCI); 9869 case ISD::INSERT_VECTOR_ELT: 9870 return performInsertVectorEltCombine(N, DCI); 9871 } 9872 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 9873 } 9874 9875 /// Helper function for adjustWritemask 9876 static unsigned SubIdx2Lane(unsigned Idx) { 9877 switch (Idx) { 9878 default: return 0; 9879 case AMDGPU::sub0: return 0; 9880 case AMDGPU::sub1: return 1; 9881 case AMDGPU::sub2: return 2; 9882 case AMDGPU::sub3: return 3; 9883 case AMDGPU::sub4: return 4; // Possible with TFE/LWE 9884 } 9885 } 9886 9887 /// Adjust the writemask of MIMG instructions 9888 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node, 9889 SelectionDAG &DAG) const { 9890 unsigned Opcode = Node->getMachineOpcode(); 9891 9892 // Subtract 1 because the vdata output is not a MachineSDNode operand. 9893 int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1; 9894 if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx)) 9895 return Node; // not implemented for D16 9896 9897 SDNode *Users[5] = { nullptr }; 9898 unsigned Lane = 0; 9899 unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1; 9900 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 9901 unsigned NewDmask = 0; 9902 unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1; 9903 unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1; 9904 bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) || 9905 Node->getConstantOperandVal(LWEIdx)) ? 1 : 0; 9906 unsigned TFCLane = 0; 9907 bool HasChain = Node->getNumValues() > 1; 9908 9909 if (OldDmask == 0) { 9910 // These are folded out, but on the chance it happens don't assert. 9911 return Node; 9912 } 9913 9914 unsigned OldBitsSet = countPopulation(OldDmask); 9915 // Work out which is the TFE/LWE lane if that is enabled. 9916 if (UsesTFC) { 9917 TFCLane = OldBitsSet; 9918 } 9919 9920 // Try to figure out the used register components 9921 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 9922 I != E; ++I) { 9923 9924 // Don't look at users of the chain. 9925 if (I.getUse().getResNo() != 0) 9926 continue; 9927 9928 // Abort if we can't understand the usage 9929 if (!I->isMachineOpcode() || 9930 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 9931 return Node; 9932 9933 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used. 9934 // Note that subregs are packed, i.e. Lane==0 is the first bit set 9935 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 9936 // set, etc. 9937 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 9938 9939 // Check if the use is for the TFE/LWE generated result at VGPRn+1. 9940 if (UsesTFC && Lane == TFCLane) { 9941 Users[Lane] = *I; 9942 } else { 9943 // Set which texture component corresponds to the lane. 9944 unsigned Comp; 9945 for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) { 9946 Comp = countTrailingZeros(Dmask); 9947 Dmask &= ~(1 << Comp); 9948 } 9949 9950 // Abort if we have more than one user per component. 9951 if (Users[Lane]) 9952 return Node; 9953 9954 Users[Lane] = *I; 9955 NewDmask |= 1 << Comp; 9956 } 9957 } 9958 9959 // Don't allow 0 dmask, as hardware assumes one channel enabled. 9960 bool NoChannels = !NewDmask; 9961 if (NoChannels) { 9962 if (!UsesTFC) { 9963 // No uses of the result and not using TFC. Then do nothing. 9964 return Node; 9965 } 9966 // If the original dmask has one channel - then nothing to do 9967 if (OldBitsSet == 1) 9968 return Node; 9969 // Use an arbitrary dmask - required for the instruction to work 9970 NewDmask = 1; 9971 } 9972 // Abort if there's no change 9973 if (NewDmask == OldDmask) 9974 return Node; 9975 9976 unsigned BitsSet = countPopulation(NewDmask); 9977 9978 // Check for TFE or LWE - increase the number of channels by one to account 9979 // for the extra return value 9980 // This will need adjustment for D16 if this is also included in 9981 // adjustWriteMask (this function) but at present D16 are excluded. 9982 unsigned NewChannels = BitsSet + UsesTFC; 9983 9984 int NewOpcode = 9985 AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels); 9986 assert(NewOpcode != -1 && 9987 NewOpcode != static_cast<int>(Node->getMachineOpcode()) && 9988 "failed to find equivalent MIMG op"); 9989 9990 // Adjust the writemask in the node 9991 SmallVector<SDValue, 12> Ops; 9992 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 9993 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 9994 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 9995 9996 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT(); 9997 9998 MVT ResultVT = NewChannels == 1 ? 9999 SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 : 10000 NewChannels == 5 ? 8 : NewChannels); 10001 SDVTList NewVTList = HasChain ? 10002 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT); 10003 10004 10005 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node), 10006 NewVTList, Ops); 10007 10008 if (HasChain) { 10009 // Update chain. 10010 DAG.setNodeMemRefs(NewNode, Node->memoperands()); 10011 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1)); 10012 } 10013 10014 if (NewChannels == 1) { 10015 assert(Node->hasNUsesOfValue(1, 0)); 10016 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY, 10017 SDLoc(Node), Users[Lane]->getValueType(0), 10018 SDValue(NewNode, 0)); 10019 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 10020 return nullptr; 10021 } 10022 10023 // Update the users of the node with the new indices 10024 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) { 10025 SDNode *User = Users[i]; 10026 if (!User) { 10027 // Handle the special case of NoChannels. We set NewDmask to 1 above, but 10028 // Users[0] is still nullptr because channel 0 doesn't really have a use. 10029 if (i || !NoChannels) 10030 continue; 10031 } else { 10032 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 10033 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op); 10034 } 10035 10036 switch (Idx) { 10037 default: break; 10038 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 10039 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 10040 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 10041 case AMDGPU::sub3: Idx = AMDGPU::sub4; break; 10042 } 10043 } 10044 10045 DAG.RemoveDeadNode(Node); 10046 return nullptr; 10047 } 10048 10049 static bool isFrameIndexOp(SDValue Op) { 10050 if (Op.getOpcode() == ISD::AssertZext) 10051 Op = Op.getOperand(0); 10052 10053 return isa<FrameIndexSDNode>(Op); 10054 } 10055 10056 /// Legalize target independent instructions (e.g. INSERT_SUBREG) 10057 /// with frame index operands. 10058 /// LLVM assumes that inputs are to these instructions are registers. 10059 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 10060 SelectionDAG &DAG) const { 10061 if (Node->getOpcode() == ISD::CopyToReg) { 10062 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); 10063 SDValue SrcVal = Node->getOperand(2); 10064 10065 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have 10066 // to try understanding copies to physical registers. 10067 if (SrcVal.getValueType() == MVT::i1 && 10068 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) { 10069 SDLoc SL(Node); 10070 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 10071 SDValue VReg = DAG.getRegister( 10072 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); 10073 10074 SDNode *Glued = Node->getGluedNode(); 10075 SDValue ToVReg 10076 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, 10077 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); 10078 SDValue ToResultReg 10079 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), 10080 VReg, ToVReg.getValue(1)); 10081 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); 10082 DAG.RemoveDeadNode(Node); 10083 return ToResultReg.getNode(); 10084 } 10085 } 10086 10087 SmallVector<SDValue, 8> Ops; 10088 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 10089 if (!isFrameIndexOp(Node->getOperand(i))) { 10090 Ops.push_back(Node->getOperand(i)); 10091 continue; 10092 } 10093 10094 SDLoc DL(Node); 10095 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 10096 Node->getOperand(i).getValueType(), 10097 Node->getOperand(i)), 0)); 10098 } 10099 10100 return DAG.UpdateNodeOperands(Node, Ops); 10101 } 10102 10103 /// Fold the instructions after selecting them. 10104 /// Returns null if users were already updated. 10105 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 10106 SelectionDAG &DAG) const { 10107 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 10108 unsigned Opcode = Node->getMachineOpcode(); 10109 10110 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && 10111 !TII->isGather4(Opcode)) { 10112 return adjustWritemask(Node, DAG); 10113 } 10114 10115 if (Opcode == AMDGPU::INSERT_SUBREG || 10116 Opcode == AMDGPU::REG_SEQUENCE) { 10117 legalizeTargetIndependentNode(Node, DAG); 10118 return Node; 10119 } 10120 10121 switch (Opcode) { 10122 case AMDGPU::V_DIV_SCALE_F32: 10123 case AMDGPU::V_DIV_SCALE_F64: { 10124 // Satisfy the operand register constraint when one of the inputs is 10125 // undefined. Ordinarily each undef value will have its own implicit_def of 10126 // a vreg, so force these to use a single register. 10127 SDValue Src0 = Node->getOperand(0); 10128 SDValue Src1 = Node->getOperand(1); 10129 SDValue Src2 = Node->getOperand(2); 10130 10131 if ((Src0.isMachineOpcode() && 10132 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) && 10133 (Src0 == Src1 || Src0 == Src2)) 10134 break; 10135 10136 MVT VT = Src0.getValueType().getSimpleVT(); 10137 const TargetRegisterClass *RC = 10138 getRegClassFor(VT, Src0.getNode()->isDivergent()); 10139 10140 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 10141 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT); 10142 10143 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node), 10144 UndefReg, Src0, SDValue()); 10145 10146 // src0 must be the same register as src1 or src2, even if the value is 10147 // undefined, so make sure we don't violate this constraint. 10148 if (Src0.isMachineOpcode() && 10149 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) { 10150 if (Src1.isMachineOpcode() && 10151 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) 10152 Src0 = Src1; 10153 else if (Src2.isMachineOpcode() && 10154 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) 10155 Src0 = Src2; 10156 else { 10157 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF); 10158 Src0 = UndefReg; 10159 Src1 = UndefReg; 10160 } 10161 } else 10162 break; 10163 10164 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 }; 10165 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I) 10166 Ops.push_back(Node->getOperand(I)); 10167 10168 Ops.push_back(ImpDef.getValue(1)); 10169 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 10170 } 10171 case AMDGPU::V_PERMLANE16_B32: 10172 case AMDGPU::V_PERMLANEX16_B32: { 10173 ConstantSDNode *FI = cast<ConstantSDNode>(Node->getOperand(0)); 10174 ConstantSDNode *BC = cast<ConstantSDNode>(Node->getOperand(2)); 10175 if (!FI->getZExtValue() && !BC->getZExtValue()) 10176 break; 10177 SDValue VDstIn = Node->getOperand(6); 10178 if (VDstIn.isMachineOpcode() 10179 && VDstIn.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) 10180 break; 10181 MachineSDNode *ImpDef = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 10182 SDLoc(Node), MVT::i32); 10183 SmallVector<SDValue, 8> Ops = { SDValue(FI, 0), Node->getOperand(1), 10184 SDValue(BC, 0), Node->getOperand(3), 10185 Node->getOperand(4), Node->getOperand(5), 10186 SDValue(ImpDef, 0), Node->getOperand(7) }; 10187 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 10188 } 10189 default: 10190 break; 10191 } 10192 10193 return Node; 10194 } 10195 10196 /// Assign the register class depending on the number of 10197 /// bits set in the writemask 10198 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 10199 SDNode *Node) const { 10200 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 10201 10202 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 10203 10204 if (TII->isVOP3(MI.getOpcode())) { 10205 // Make sure constant bus requirements are respected. 10206 TII->legalizeOperandsVOP3(MRI, MI); 10207 10208 // Prefer VGPRs over AGPRs in mAI instructions where possible. 10209 // This saves a chain-copy of registers and better ballance register 10210 // use between vgpr and agpr as agpr tuples tend to be big. 10211 if (const MCOperandInfo *OpInfo = MI.getDesc().OpInfo) { 10212 unsigned Opc = MI.getOpcode(); 10213 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 10214 for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 10215 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) { 10216 if (I == -1) 10217 break; 10218 MachineOperand &Op = MI.getOperand(I); 10219 if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID && 10220 OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) || 10221 !TargetRegisterInfo::isVirtualRegister(Op.getReg()) || 10222 !TRI->isAGPR(MRI, Op.getReg())) 10223 continue; 10224 auto *Src = MRI.getUniqueVRegDef(Op.getReg()); 10225 if (!Src || !Src->isCopy() || 10226 !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg())) 10227 continue; 10228 auto *RC = TRI->getRegClassForReg(MRI, Op.getReg()); 10229 auto *NewRC = TRI->getEquivalentVGPRClass(RC); 10230 // All uses of agpr64 and agpr32 can also accept vgpr except for 10231 // v_accvgpr_read, but we do not produce agpr reads during selection, 10232 // so no use checks are needed. 10233 MRI.setRegClass(Op.getReg(), NewRC); 10234 } 10235 } 10236 10237 return; 10238 } 10239 10240 // Replace unused atomics with the no return version. 10241 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); 10242 if (NoRetAtomicOp != -1) { 10243 if (!Node->hasAnyUseOfValue(0)) { 10244 MI.setDesc(TII->get(NoRetAtomicOp)); 10245 MI.RemoveOperand(0); 10246 return; 10247 } 10248 10249 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg 10250 // instruction, because the return type of these instructions is a vec2 of 10251 // the memory type, so it can be tied to the input operand. 10252 // This means these instructions always have a use, so we need to add a 10253 // special case to check if the atomic has only one extract_subreg use, 10254 // which itself has no uses. 10255 if ((Node->hasNUsesOfValue(1, 0) && 10256 Node->use_begin()->isMachineOpcode() && 10257 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && 10258 !Node->use_begin()->hasAnyUseOfValue(0))) { 10259 unsigned Def = MI.getOperand(0).getReg(); 10260 10261 // Change this into a noret atomic. 10262 MI.setDesc(TII->get(NoRetAtomicOp)); 10263 MI.RemoveOperand(0); 10264 10265 // If we only remove the def operand from the atomic instruction, the 10266 // extract_subreg will be left with a use of a vreg without a def. 10267 // So we need to insert an implicit_def to avoid machine verifier 10268 // errors. 10269 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 10270 TII->get(AMDGPU::IMPLICIT_DEF), Def); 10271 } 10272 return; 10273 } 10274 } 10275 10276 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, 10277 uint64_t Val) { 10278 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 10279 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 10280 } 10281 10282 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 10283 const SDLoc &DL, 10284 SDValue Ptr) const { 10285 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 10286 10287 // Build the half of the subregister with the constants before building the 10288 // full 128-bit register. If we are building multiple resource descriptors, 10289 // this will allow CSEing of the 2-component register. 10290 const SDValue Ops0[] = { 10291 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 10292 buildSMovImm32(DAG, DL, 0), 10293 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 10294 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 10295 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 10296 }; 10297 10298 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 10299 MVT::v2i32, Ops0), 0); 10300 10301 // Combine the constants and the pointer. 10302 const SDValue Ops1[] = { 10303 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 10304 Ptr, 10305 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 10306 SubRegHi, 10307 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 10308 }; 10309 10310 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 10311 } 10312 10313 /// Return a resource descriptor with the 'Add TID' bit enabled 10314 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 10315 /// of the resource descriptor) to create an offset, which is added to 10316 /// the resource pointer. 10317 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, 10318 SDValue Ptr, uint32_t RsrcDword1, 10319 uint64_t RsrcDword2And3) const { 10320 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 10321 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 10322 if (RsrcDword1) { 10323 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 10324 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 10325 0); 10326 } 10327 10328 SDValue DataLo = buildSMovImm32(DAG, DL, 10329 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 10330 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 10331 10332 const SDValue Ops[] = { 10333 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 10334 PtrLo, 10335 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 10336 PtrHi, 10337 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 10338 DataLo, 10339 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 10340 DataHi, 10341 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 10342 }; 10343 10344 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 10345 } 10346 10347 //===----------------------------------------------------------------------===// 10348 // SI Inline Assembly Support 10349 //===----------------------------------------------------------------------===// 10350 10351 std::pair<unsigned, const TargetRegisterClass *> 10352 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 10353 StringRef Constraint, 10354 MVT VT) const { 10355 const TargetRegisterClass *RC = nullptr; 10356 if (Constraint.size() == 1) { 10357 switch (Constraint[0]) { 10358 default: 10359 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 10360 case 's': 10361 case 'r': 10362 switch (VT.getSizeInBits()) { 10363 default: 10364 return std::make_pair(0U, nullptr); 10365 case 32: 10366 case 16: 10367 RC = &AMDGPU::SReg_32_XM0RegClass; 10368 break; 10369 case 64: 10370 RC = &AMDGPU::SGPR_64RegClass; 10371 break; 10372 case 96: 10373 RC = &AMDGPU::SReg_96RegClass; 10374 break; 10375 case 128: 10376 RC = &AMDGPU::SReg_128RegClass; 10377 break; 10378 case 160: 10379 RC = &AMDGPU::SReg_160RegClass; 10380 break; 10381 case 256: 10382 RC = &AMDGPU::SReg_256RegClass; 10383 break; 10384 case 512: 10385 RC = &AMDGPU::SReg_512RegClass; 10386 break; 10387 } 10388 break; 10389 case 'v': 10390 switch (VT.getSizeInBits()) { 10391 default: 10392 return std::make_pair(0U, nullptr); 10393 case 32: 10394 case 16: 10395 RC = &AMDGPU::VGPR_32RegClass; 10396 break; 10397 case 64: 10398 RC = &AMDGPU::VReg_64RegClass; 10399 break; 10400 case 96: 10401 RC = &AMDGPU::VReg_96RegClass; 10402 break; 10403 case 128: 10404 RC = &AMDGPU::VReg_128RegClass; 10405 break; 10406 case 160: 10407 RC = &AMDGPU::VReg_160RegClass; 10408 break; 10409 case 256: 10410 RC = &AMDGPU::VReg_256RegClass; 10411 break; 10412 case 512: 10413 RC = &AMDGPU::VReg_512RegClass; 10414 break; 10415 } 10416 break; 10417 case 'a': 10418 switch (VT.getSizeInBits()) { 10419 default: 10420 return std::make_pair(0U, nullptr); 10421 case 32: 10422 case 16: 10423 RC = &AMDGPU::AGPR_32RegClass; 10424 break; 10425 case 64: 10426 RC = &AMDGPU::AReg_64RegClass; 10427 break; 10428 case 128: 10429 RC = &AMDGPU::AReg_128RegClass; 10430 break; 10431 case 512: 10432 RC = &AMDGPU::AReg_512RegClass; 10433 break; 10434 case 1024: 10435 RC = &AMDGPU::AReg_1024RegClass; 10436 // v32 types are not legal but we support them here. 10437 return std::make_pair(0U, RC); 10438 } 10439 break; 10440 } 10441 // We actually support i128, i16 and f16 as inline parameters 10442 // even if they are not reported as legal 10443 if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 || 10444 VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16)) 10445 return std::make_pair(0U, RC); 10446 } 10447 10448 if (Constraint.size() > 1) { 10449 if (Constraint[1] == 'v') { 10450 RC = &AMDGPU::VGPR_32RegClass; 10451 } else if (Constraint[1] == 's') { 10452 RC = &AMDGPU::SGPR_32RegClass; 10453 } else if (Constraint[1] == 'a') { 10454 RC = &AMDGPU::AGPR_32RegClass; 10455 } 10456 10457 if (RC) { 10458 uint32_t Idx; 10459 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 10460 if (!Failed && Idx < RC->getNumRegs()) 10461 return std::make_pair(RC->getRegister(Idx), RC); 10462 } 10463 } 10464 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 10465 } 10466 10467 SITargetLowering::ConstraintType 10468 SITargetLowering::getConstraintType(StringRef Constraint) const { 10469 if (Constraint.size() == 1) { 10470 switch (Constraint[0]) { 10471 default: break; 10472 case 's': 10473 case 'v': 10474 case 'a': 10475 return C_RegisterClass; 10476 } 10477 } 10478 return TargetLowering::getConstraintType(Constraint); 10479 } 10480 10481 // Figure out which registers should be reserved for stack access. Only after 10482 // the function is legalized do we know all of the non-spill stack objects or if 10483 // calls are present. 10484 void SITargetLowering::finalizeLowering(MachineFunction &MF) const { 10485 MachineRegisterInfo &MRI = MF.getRegInfo(); 10486 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 10487 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 10488 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 10489 10490 if (Info->isEntryFunction()) { 10491 // Callable functions have fixed registers used for stack access. 10492 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info); 10493 } 10494 10495 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(), 10496 Info->getStackPtrOffsetReg())); 10497 if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG) 10498 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg()); 10499 10500 // We need to worry about replacing the default register with itself in case 10501 // of MIR testcases missing the MFI. 10502 if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG) 10503 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg()); 10504 10505 if (Info->getFrameOffsetReg() != AMDGPU::FP_REG) 10506 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg()); 10507 10508 if (Info->getScratchWaveOffsetReg() != AMDGPU::SCRATCH_WAVE_OFFSET_REG) { 10509 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG, 10510 Info->getScratchWaveOffsetReg()); 10511 } 10512 10513 Info->limitOccupancy(MF); 10514 10515 if (ST.isWave32() && !MF.empty()) { 10516 // Add VCC_HI def because many instructions marked as imp-use VCC where 10517 // we may only define VCC_LO. If nothing defines VCC_HI we may end up 10518 // having a use of undef. 10519 10520 const SIInstrInfo *TII = ST.getInstrInfo(); 10521 DebugLoc DL; 10522 10523 MachineBasicBlock &MBB = MF.front(); 10524 MachineBasicBlock::iterator I = MBB.getFirstNonDebugInstr(); 10525 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), AMDGPU::VCC_HI); 10526 10527 for (auto &MBB : MF) { 10528 for (auto &MI : MBB) { 10529 TII->fixImplicitOperands(MI); 10530 } 10531 } 10532 } 10533 10534 TargetLoweringBase::finalizeLowering(MF); 10535 } 10536 10537 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op, 10538 KnownBits &Known, 10539 const APInt &DemandedElts, 10540 const SelectionDAG &DAG, 10541 unsigned Depth) const { 10542 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts, 10543 DAG, Depth); 10544 10545 // Set the high bits to zero based on the maximum allowed scratch size per 10546 // wave. We can't use vaddr in MUBUF instructions if we don't know the address 10547 // calculation won't overflow, so assume the sign bit is never set. 10548 Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); 10549 } 10550 10551 unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 10552 const unsigned PrefAlign = TargetLowering::getPrefLoopAlignment(ML); 10553 const unsigned CacheLineAlign = 6; // log2(64) 10554 10555 // Pre-GFX10 target did not benefit from loop alignment 10556 if (!ML || DisableLoopAlignment || 10557 (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) || 10558 getSubtarget()->hasInstFwdPrefetchBug()) 10559 return PrefAlign; 10560 10561 // On GFX10 I$ is 4 x 64 bytes cache lines. 10562 // By default prefetcher keeps one cache line behind and reads two ahead. 10563 // We can modify it with S_INST_PREFETCH for larger loops to have two lines 10564 // behind and one ahead. 10565 // Therefor we can benefit from aligning loop headers if loop fits 192 bytes. 10566 // If loop fits 64 bytes it always spans no more than two cache lines and 10567 // does not need an alignment. 10568 // Else if loop is less or equal 128 bytes we do not need to modify prefetch, 10569 // Else if loop is less or equal 192 bytes we need two lines behind. 10570 10571 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 10572 const MachineBasicBlock *Header = ML->getHeader(); 10573 if (Header->getAlignment() != PrefAlign) 10574 return Header->getAlignment(); // Already processed. 10575 10576 unsigned LoopSize = 0; 10577 for (const MachineBasicBlock *MBB : ML->blocks()) { 10578 // If inner loop block is aligned assume in average half of the alignment 10579 // size to be added as nops. 10580 if (MBB != Header) 10581 LoopSize += (1 << MBB->getAlignment()) / 2; 10582 10583 for (const MachineInstr &MI : *MBB) { 10584 LoopSize += TII->getInstSizeInBytes(MI); 10585 if (LoopSize > 192) 10586 return PrefAlign; 10587 } 10588 } 10589 10590 if (LoopSize <= 64) 10591 return PrefAlign; 10592 10593 if (LoopSize <= 128) 10594 return CacheLineAlign; 10595 10596 // If any of parent loops is surrounded by prefetch instructions do not 10597 // insert new for inner loop, which would reset parent's settings. 10598 for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) { 10599 if (MachineBasicBlock *Exit = P->getExitBlock()) { 10600 auto I = Exit->getFirstNonDebugInstr(); 10601 if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH) 10602 return CacheLineAlign; 10603 } 10604 } 10605 10606 MachineBasicBlock *Pre = ML->getLoopPreheader(); 10607 MachineBasicBlock *Exit = ML->getExitBlock(); 10608 10609 if (Pre && Exit) { 10610 BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(), 10611 TII->get(AMDGPU::S_INST_PREFETCH)) 10612 .addImm(1); // prefetch 2 lines behind PC 10613 10614 BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(), 10615 TII->get(AMDGPU::S_INST_PREFETCH)) 10616 .addImm(2); // prefetch 1 line behind PC 10617 } 10618 10619 return CacheLineAlign; 10620 } 10621 10622 LLVM_ATTRIBUTE_UNUSED 10623 static bool isCopyFromRegOfInlineAsm(const SDNode *N) { 10624 assert(N->getOpcode() == ISD::CopyFromReg); 10625 do { 10626 // Follow the chain until we find an INLINEASM node. 10627 N = N->getOperand(0).getNode(); 10628 if (N->getOpcode() == ISD::INLINEASM || 10629 N->getOpcode() == ISD::INLINEASM_BR) 10630 return true; 10631 } while (N->getOpcode() == ISD::CopyFromReg); 10632 return false; 10633 } 10634 10635 bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N, 10636 FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const 10637 { 10638 switch (N->getOpcode()) { 10639 case ISD::CopyFromReg: 10640 { 10641 const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1)); 10642 const MachineFunction * MF = FLI->MF; 10643 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 10644 const MachineRegisterInfo &MRI = MF->getRegInfo(); 10645 const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo(); 10646 unsigned Reg = R->getReg(); 10647 if (TRI.isPhysicalRegister(Reg)) 10648 return !TRI.isSGPRReg(MRI, Reg); 10649 10650 if (MRI.isLiveIn(Reg)) { 10651 // workitem.id.x workitem.id.y workitem.id.z 10652 // Any VGPR formal argument is also considered divergent 10653 if (!TRI.isSGPRReg(MRI, Reg)) 10654 return true; 10655 // Formal arguments of non-entry functions 10656 // are conservatively considered divergent 10657 else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv())) 10658 return true; 10659 return false; 10660 } 10661 const Value *V = FLI->getValueFromVirtualReg(Reg); 10662 if (V) 10663 return KDA->isDivergent(V); 10664 assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N)); 10665 return !TRI.isSGPRReg(MRI, Reg); 10666 } 10667 break; 10668 case ISD::LOAD: { 10669 const LoadSDNode *L = cast<LoadSDNode>(N); 10670 unsigned AS = L->getAddressSpace(); 10671 // A flat load may access private memory. 10672 return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS; 10673 } break; 10674 case ISD::CALLSEQ_END: 10675 return true; 10676 break; 10677 case ISD::INTRINSIC_WO_CHAIN: 10678 { 10679 10680 } 10681 return AMDGPU::isIntrinsicSourceOfDivergence( 10682 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()); 10683 case ISD::INTRINSIC_W_CHAIN: 10684 return AMDGPU::isIntrinsicSourceOfDivergence( 10685 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()); 10686 // In some cases intrinsics that are a source of divergence have been 10687 // lowered to AMDGPUISD so we also need to check those too. 10688 case AMDGPUISD::INTERP_MOV: 10689 case AMDGPUISD::INTERP_P1: 10690 case AMDGPUISD::INTERP_P2: 10691 return true; 10692 } 10693 return false; 10694 } 10695 10696 bool SITargetLowering::denormalsEnabledForType(EVT VT) const { 10697 switch (VT.getScalarType().getSimpleVT().SimpleTy) { 10698 case MVT::f32: 10699 return Subtarget->hasFP32Denormals(); 10700 case MVT::f64: 10701 return Subtarget->hasFP64Denormals(); 10702 case MVT::f16: 10703 return Subtarget->hasFP16Denormals(); 10704 default: 10705 return false; 10706 } 10707 } 10708 10709 bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 10710 const SelectionDAG &DAG, 10711 bool SNaN, 10712 unsigned Depth) const { 10713 if (Op.getOpcode() == AMDGPUISD::CLAMP) { 10714 const MachineFunction &MF = DAG.getMachineFunction(); 10715 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 10716 10717 if (Info->getMode().DX10Clamp) 10718 return true; // Clamped to 0. 10719 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 10720 } 10721 10722 return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG, 10723 SNaN, Depth); 10724 } 10725 10726 TargetLowering::AtomicExpansionKind 10727 SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { 10728 switch (RMW->getOperation()) { 10729 case AtomicRMWInst::FAdd: { 10730 Type *Ty = RMW->getType(); 10731 10732 // We don't have a way to support 16-bit atomics now, so just leave them 10733 // as-is. 10734 if (Ty->isHalfTy()) 10735 return AtomicExpansionKind::None; 10736 10737 if (!Ty->isFloatTy()) 10738 return AtomicExpansionKind::CmpXChg; 10739 10740 // TODO: Do have these for flat. Older targets also had them for buffers. 10741 unsigned AS = RMW->getPointerAddressSpace(); 10742 return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ? 10743 AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg; 10744 } 10745 default: 10746 break; 10747 } 10748 10749 return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW); 10750 } 10751