1 //===- AMDGPUGlobalISelUtils.cpp ---------------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AMDGPUGlobalISelUtils.h" 10 #include "GCNSubtarget.h" 11 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 12 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 13 #include "llvm/CodeGen/LowLevelType.h" 14 #include "llvm/IR/Constants.h" 15 16 using namespace llvm; 17 using namespace MIPatternMatch; 18 19 std::pair<Register, unsigned> 20 AMDGPU::getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg, 21 GISelKnownBits *KnownBits, bool CheckNUW) { 22 MachineInstr *Def = getDefIgnoringCopies(Reg, MRI); 23 if (Def->getOpcode() == TargetOpcode::G_CONSTANT) { 24 unsigned Offset; 25 const MachineOperand &Op = Def->getOperand(1); 26 if (Op.isImm()) 27 Offset = Op.getImm(); 28 else 29 Offset = Op.getCImm()->getZExtValue(); 30 31 return std::pair(Register(), Offset); 32 } 33 34 int64_t Offset; 35 if (Def->getOpcode() == TargetOpcode::G_ADD) { 36 // A 32-bit (address + offset) should not cause unsigned 32-bit integer 37 // wraparound, because s_load instructions perform the addition in 64 bits. 38 if (CheckNUW && !Def->getFlag(MachineInstr::NoUWrap)) { 39 assert(MRI.getType(Reg).getScalarSizeInBits() == 32); 40 return std::pair(Reg, 0); 41 } 42 // TODO: Handle G_OR used for add case 43 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(Offset))) 44 return std::pair(Def->getOperand(1).getReg(), Offset); 45 46 // FIXME: matcher should ignore copies 47 if (mi_match(Def->getOperand(2).getReg(), MRI, m_Copy(m_ICst(Offset)))) 48 return std::pair(Def->getOperand(1).getReg(), Offset); 49 } 50 51 Register Base; 52 if (KnownBits && mi_match(Reg, MRI, m_GOr(m_Reg(Base), m_ICst(Offset))) && 53 KnownBits->maskedValueIsZero(Base, APInt(32, Offset))) 54 return std::pair(Base, Offset); 55 56 // Handle G_PTRTOINT (G_PTR_ADD base, const) case 57 if (Def->getOpcode() == TargetOpcode::G_PTRTOINT) { 58 MachineInstr *Base; 59 if (mi_match(Def->getOperand(1).getReg(), MRI, 60 m_GPtrAdd(m_MInstr(Base), m_ICst(Offset)))) { 61 // If Base was int converted to pointer, simply return int and offset. 62 if (Base->getOpcode() == TargetOpcode::G_INTTOPTR) 63 return std::pair(Base->getOperand(1).getReg(), Offset); 64 65 // Register returned here will be of pointer type. 66 return std::pair(Base->getOperand(0).getReg(), Offset); 67 } 68 } 69 70 return std::pair(Reg, 0); 71 } 72 73 bool AMDGPU::hasAtomicFaddRtnForTy(const GCNSubtarget &Subtarget, 74 const LLT &Ty) { 75 if (Ty == LLT::scalar(32)) 76 return Subtarget.hasAtomicFaddRtnInsts(); 77 if (Ty == LLT::fixed_vector(2, 16) || Ty == LLT::scalar(64)) 78 return Subtarget.hasGFX90AInsts(); 79 return false; 80 } 81