Lines Matching +full:high +full:- +full:vt
1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
35 "amdgpu-bypass-slow-div",
36 cl::desc("Skip 64-bit divide for dynamic 32-bit values"),
40 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) { in getEquivalentMemType() argument
41 unsigned StoreSize = VT.getStoreSizeInBits(); in getEquivalentMemType()
48 return VT; in getEquivalentMemType()
56 // In order for this to be a signed 24-bit value, bit 23, must in numBitsSigned()
181 // There are no 64-bit extloads. These should be done as a 32-bit extload and in AMDGPUTargetLowering()
182 // an extension to 64-bit. in AMDGPUTargetLowering()
183 for (MVT VT : MVT::integer_valuetypes()) in AMDGPUTargetLowering() local
184 setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i64, VT, in AMDGPUTargetLowering()
187 for (MVT VT : MVT::integer_valuetypes()) { in AMDGPUTargetLowering() local
188 if (VT == MVT::i64) in AMDGPUTargetLowering()
192 setLoadExtAction(Op, VT, MVT::i1, Promote); in AMDGPUTargetLowering()
193 setLoadExtAction(Op, VT, MVT::i8, Legal); in AMDGPUTargetLowering()
194 setLoadExtAction(Op, VT, MVT::i16, Legal); in AMDGPUTargetLowering()
195 setLoadExtAction(Op, VT, MVT::i32, Expand); in AMDGPUTargetLowering()
199 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) in AMDGPUTargetLowering() local
202 setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, VT, MemVT, in AMDGPUTargetLowering()
411 if (Subtarget->has16BitInsts()) in AMDGPUTargetLowering()
461 for (MVT VT : ScalarIntVTs) { in AMDGPUTargetLowering() local
463 setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, VT, in AMDGPUTargetLowering()
467 setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, VT, Custom); in AMDGPUTargetLowering()
470 setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand); in AMDGPUTargetLowering()
472 setOperationAction({ISD::BSWAP, ISD::CTTZ, ISD::CTLZ}, VT, Expand); in AMDGPUTargetLowering()
475 setOperationAction({ISD::ADDC, ISD::SUBC, ISD::ADDE, ISD::SUBE}, VT, Legal); in AMDGPUTargetLowering()
478 // The hardware supports 32-bit FSHR, but not FSHL. in AMDGPUTargetLowering()
481 // The hardware supports 32-bit ROTR, but not ROTL. in AMDGPUTargetLowering()
500 for (auto VT : {MVT::i8, MVT::i16}) in AMDGPUTargetLowering()
501 setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, VT, Custom); in AMDGPUTargetLowering()
507 for (MVT VT : VectorIntTypes) { in AMDGPUTargetLowering() local
521 VT, Expand); in AMDGPUTargetLowering()
528 for (MVT VT : FloatVectorTypes) { in AMDGPUTargetLowering() local
541 VT, Expand); in AMDGPUTargetLowering()
609 // The expansion for 64-bit division is enormous. in AMDGPUTargetLowering()
633 const auto Flags = Op.getNode()->getFlags(); in mayIgnoreSignedZero()
640 //===----------------------------------------------------------------------===//
642 //===----------------------------------------------------------------------===//
683 unsigned Opc = N->getOpcode(); in fnegFoldsIntoOp()
687 SDValue BCSrc = N->getOperand(0); in fnegFoldsIntoOp()
699 /// \p returns true if the operation will definitely need to use a 64-bit
703 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) { in opMustUseVOP3Encoding() argument
704 return (N->getNumOperands() > 2 && N->getOpcode() != ISD::SELECT) || in opMustUseVOP3Encoding()
705 VT == MVT::f64; in opMustUseVOP3Encoding()
713 return N->getValueType(0) == MVT::f32; in selectSupportsSourceMods()
723 switch (N->getOpcode()) { in hasSourceMods()
738 switch (N->getConstantOperandVal(0)) { in hasSourceMods()
758 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus in allUsesHaveSourceMods()
764 MVT VT = N->getValueType(0).getScalarType().getSimpleVT(); in allUsesHaveSourceMods() local
766 assert(!N->use_empty()); in allUsesHaveSourceMods()
768 // XXX - Should this limit number of uses to check? in allUsesHaveSourceMods()
769 for (const SDNode *U : N->uses()) { in allUsesHaveSourceMods()
773 if (!opMustUseVOP3Encoding(U, VT)) { in allUsesHaveSourceMods()
782 EVT AMDGPUTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT, in getTypeForExtReturn() argument
784 assert(!VT.isVector() && "only scalar expected"); in getTypeForExtReturn()
786 // Round to the next multiple of 32-bits. in getTypeForExtReturn()
787 unsigned Size = VT.getSizeInBits(); in getTypeForExtReturn()
803 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, in isFPImmLegal() argument
805 EVT ScalarVT = VT.getScalarType(); in isFPImmLegal()
807 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts())); in isFPImmLegal()
811 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const { in ShouldShrinkFPConstant()
812 EVT ScalarVT = VT.getScalarType(); in ShouldShrinkFPConstant()
825 // If we are reducing to a 32-bit load or a smaller multi-dword load, in shouldReduceLoadWidth()
830 EVT OldVT = N->getValueType(0); in shouldReduceLoadWidth()
834 unsigned AS = MN->getAddressSpace(); in shouldReduceLoadWidth()
835 // Do not shrink an aligned scalar load to sub-dword. in shouldReduceLoadWidth()
836 // Scalar engine cannot do sub-dword loads. in shouldReduceLoadWidth()
837 // TODO: Update this for GFX12 which does have scalar sub-dword loads. in shouldReduceLoadWidth()
838 if (OldSize >= 32 && NewSize < 32 && MN->getAlign() >= Align(4) && in shouldReduceLoadWidth()
842 MN->isInvariant())) && in shouldReduceLoadWidth()
843 AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand())) in shouldReduceLoadWidth()
846 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar in shouldReduceLoadWidth()
877 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
878 // profitable with the expansion for 64-bit since it's generally good to
889 switch (N->getOpcode()) { in isSDNodeAlwaysUniform()
894 unsigned IntrID = N->getConstantOperandVal(0); in isSDNodeAlwaysUniform()
898 if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() == in isSDNodeAlwaysUniform()
902 case AMDGPUISD::SETCC: // ballot-style instruction in isSDNodeAlwaysUniform()
922 EVT VT = Op.getValueType(); in getNegatedExpression() local
928 return DAG.getNode(AMDGPUISD::RCP, SL, VT, NegSrc, Op->getFlags()); in getNegatedExpression()
939 //===---------------------------------------------------------------------===//
941 //===---------------------------------------------------------------------===//
943 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const { in isFAbsFree()
944 assert(VT.isFloatingPoint()); in isFAbsFree()
947 return VT == MVT::f32 || VT == MVT::f64 || in isFAbsFree()
948 (Subtarget->has16BitInsts() && (VT == MVT::f16 || VT == MVT::bf16)); in isFAbsFree()
951 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const { in isFNegFree()
952 assert(VT.isFloatingPoint()); in isFNegFree()
954 VT = VT.getScalarType(); in isFNegFree()
955 return VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f16 || VT == MVT::bf16; in isFNegFree()
988 unsigned SrcSize = Source->getScalarSizeInBits(); in isTruncateFree()
989 unsigned DestSize = Dest->getScalarSizeInBits(); in isTruncateFree()
991 if (DestSize== 16 && Subtarget->has16BitInsts()) in isTruncateFree()
998 unsigned SrcSize = Src->getScalarSizeInBits(); in isZExtFree()
999 unsigned DestSize = Dest->getScalarSizeInBits(); in isZExtFree()
1001 if (SrcSize == 16 && Subtarget->has16BitInsts()) in isZExtFree()
1008 // Any register load of a 64-bit value really requires 2 32-bit moves. For all in isZExtFree()
1009 // practical purposes, the extra mov 0 to load a 64-bit is free. As used, in isZExtFree()
1010 // this will enable reducing 64-bit operations the 32-bit, which is always in isZExtFree()
1020 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a in isNarrowingProfitable()
1021 // limited number of native 64-bit operations. Shrinking an operation to fit in isNarrowingProfitable()
1022 // in a single 32-bit register should always be helpful. As currently used, in isNarrowingProfitable()
1024 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is in isNarrowingProfitable()
1031 assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA || in isDesirableToCommuteWithShift()
1032 N->getOpcode() == ISD::SRL) && in isDesirableToCommuteWithShift()
1034 // Always commute pre-type legalization and right shifts. in isDesirableToCommuteWithShift()
1037 N->getOpcode() != ISD::SHL || N->getOperand(0).getOpcode() != ISD::OR) in isDesirableToCommuteWithShift()
1040 // If only user is a i32 right-shift, then don't destroy a BFE pattern. in isDesirableToCommuteWithShift()
1041 if (N->getValueType(0) == MVT::i32 && N->use_size() == 1 && in isDesirableToCommuteWithShift()
1042 (N->use_begin()->getOpcode() == ISD::SRA || in isDesirableToCommuteWithShift()
1043 N->use_begin()->getOpcode() == ISD::SRL)) in isDesirableToCommuteWithShift()
1053 return LHS0 && LHS1 && RHSLd && LHS0->getExtensionType() == ISD::ZEXTLOAD && in isDesirableToCommuteWithShift()
1054 LHS1->getAPIntValue() == LHS0->getMemoryVT().getScalarSizeInBits() && in isDesirableToCommuteWithShift()
1055 RHSLd->getExtensionType() == ISD::ZEXTLOAD; in isDesirableToCommuteWithShift()
1057 SDValue LHS = N->getOperand(0).getOperand(0); in isDesirableToCommuteWithShift()
1058 SDValue RHS = N->getOperand(0).getOperand(1); in isDesirableToCommuteWithShift()
1062 //===---------------------------------------------------------------------===//
1064 //===---------------------------------------------------------------------===//
1129 /// represents a single value that will be stored in registers. Ins[x].VT is
1134 /// argument. Since Ins[x].VT gives us the size of the register that will
1144 /// calling convention analysis function and the register type (Ins[x].VT) as
1151 LLVMContext &Ctx = Fn.getParent()->getContext(); in analyzeFormalArgumentsCompute()
1175 // to get accurate in-memory offsets. The "PartOffset" is completely useless in analyzeFormalArgumentsCompute()
1208 // all the floating-point vector types. in analyzeFormalArgumentsCompute()
1276 //===---------------------------------------------------------------------===//
1278 //===---------------------------------------------------------------------===//
1297 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1; in addTokenForArgument()
1305 for (SDNode *U : DAG.getEntryNode().getNode()->uses()) { in addTokenForArgument()
1307 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) { in addTokenForArgument()
1308 if (FI->getIndex() < 0) { in addTokenForArgument()
1309 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex()); in addTokenForArgument()
1311 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1; in addTokenForArgument()
1336 FuncName = G->getSymbol(); in lowerUnhandledCall()
1338 FuncName = G->getGlobal()->getName(); in lowerUnhandledCall()
1342 DAG.getContext()->diagnose(NoCalls); in lowerUnhandledCall()
1346 InVals.push_back(DAG.getUNDEF(Arg.VT)); in lowerUnhandledCall()
1363 DAG.getContext()->diagnose(NoDynamicAlloca); in LowerDYNAMIC_STACKALLOC()
1372 Op->print(errs(), &DAG); in LowerOperation()
1419 switch (N->getOpcode()) { in ReplaceNodeResults()
1462 const GlobalValue *GV = G->getGlobal(); in LowerGlobalAddress()
1464 if (!MFI->isModuleEntryFunction()) { in LowerGlobalAddress()
1471 if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || in LowerGlobalAddress()
1472 G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) { in LowerGlobalAddress()
1473 if (!MFI->isModuleEntryFunction() && in LowerGlobalAddress()
1474 GV->getName() != "llvm.amdgcn.module.lds") { in LowerGlobalAddress()
1478 Fn, "local memory global used by non-kernel function", in LowerGlobalAddress()
1480 DAG.getContext()->diagnose(BadLDSDecl); in LowerGlobalAddress()
1494 // XXX: What does the value of G->getOffset() mean? in LowerGlobalAddress()
1495 assert(G->getOffset() == 0 && in LowerGlobalAddress()
1496 "Do not know what to do with an non-zero offset"); in LowerGlobalAddress()
1501 unsigned Offset = MFI->allocateLDSGlobal(DL, *cast<GlobalVariable>(GV)); in LowerGlobalAddress()
1512 EVT VT = Op.getValueType(); in LowerCONCAT_VECTORS() local
1513 if (VT.getVectorElementType().getSizeInBits() < 32) { in LowerCONCAT_VECTORS()
1520 for (const SDUse &U : Op->ops()) { in LowerCONCAT_VECTORS()
1532 return DAG.getNode(ISD::BITCAST, SL, VT, BV); in LowerCONCAT_VECTORS()
1536 for (const SDUse &U : Op->ops()) in LowerCONCAT_VECTORS()
1547 EVT VT = Op.getValueType(); in LowerEXTRACT_SUBVECTOR() local
1550 if (VT.getScalarSizeInBits() == 16 && Start % 2 == 0) { in LowerEXTRACT_SUBVECTOR()
1551 unsigned NumElt = VT.getVectorNumElements(); in LowerEXTRACT_SUBVECTOR()
1555 // Extract 32-bit registers at a time. in LowerEXTRACT_SUBVECTOR()
1568 return DAG.getNode(ISD::BITCAST, SL, VT, Tmp); in LowerEXTRACT_SUBVECTOR()
1572 VT.getVectorNumElements()); in LowerEXTRACT_SUBVECTOR()
1596 const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True, in combineFMinMaxLegacyImpl() argument
1599 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); in combineFMinMaxLegacyImpl()
1617 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); in combineFMinMaxLegacyImpl()
1618 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); in combineFMinMaxLegacyImpl()
1636 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); in combineFMinMaxLegacyImpl()
1637 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); in combineFMinMaxLegacyImpl()
1642 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); in combineFMinMaxLegacyImpl()
1643 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); in combineFMinMaxLegacyImpl()
1654 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); in combineFMinMaxLegacyImpl()
1655 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); in combineFMinMaxLegacyImpl()
1664 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT, in combineFMinMaxLegacy() argument
1670 return combineFMinMaxLegacyImpl(DL, VT, LHS, RHS, True, False, CC, DCI); in combineFMinMaxLegacy()
1684 // select (fcmp olt (lhs, K)), (fneg lhs), -K in combineFMinMaxLegacy()
1685 // -> fneg (fmin_legacy lhs, K) in combineFMinMaxLegacy()
1689 APFloat NegRHS = neg(CRHS->getValueAPF()); in combineFMinMaxLegacy()
1690 if (NegRHS == CFalse->getValueAPF()) { in combineFMinMaxLegacy()
1692 combineFMinMaxLegacyImpl(DL, VT, LHS, RHS, NegTrue, False, CC, DCI); in combineFMinMaxLegacy()
1694 return DAG.getNode(ISD::FNEG, DL, VT, Combined); in combineFMinMaxLegacy()
1735 // otherwise be a 1-vector.
1737 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const { in getSplitDestVTs() argument
1739 EVT EltVT = VT.getVectorElementType(); in getSplitDestVTs()
1740 unsigned NumElts = VT.getVectorNumElements(); in getSplitDestVTs()
1743 HiVT = NumElts - LoNumElts == 1 in getSplitDestVTs()
1745 : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts); in getSplitDestVTs()
1770 EVT VT = Op.getValueType(); in SplitVectorLoad() local
1776 if (VT.getVectorNumElements() == 2) { in SplitVectorLoad()
1782 SDValue BasePtr = Load->getBasePtr(); in SplitVectorLoad()
1783 EVT MemVT = Load->getMemoryVT(); in SplitVectorLoad()
1785 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); in SplitVectorLoad()
1791 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG); in SplitVectorLoad()
1796 Align BaseAlign = Load->getAlign(); in SplitVectorLoad()
1799 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT, in SplitVectorLoad()
1800 Load->getChain(), BasePtr, SrcValue, LoMemVT, in SplitVectorLoad()
1801 BaseAlign, Load->getMemOperand()->getFlags()); in SplitVectorLoad()
1804 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(), in SplitVectorLoad()
1806 HiMemVT, HiAlign, Load->getMemOperand()->getFlags()); in SplitVectorLoad()
1811 Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad); in SplitVectorLoad()
1813 Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad, in SplitVectorLoad()
1817 VT, Join, HiLoad, in SplitVectorLoad()
1830 EVT VT = Op.getValueType(); in WidenOrSplitVectorLoad() local
1831 SDValue BasePtr = Load->getBasePtr(); in WidenOrSplitVectorLoad()
1832 EVT MemVT = Load->getMemoryVT(); in WidenOrSplitVectorLoad()
1834 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); in WidenOrSplitVectorLoad()
1835 Align BaseAlign = Load->getAlign(); in WidenOrSplitVectorLoad()
1838 // Widen from vec3 to vec4 when the load is at least 8-byte aligned in WidenOrSplitVectorLoad()
1839 // or 16-byte fully dereferenceable. Otherwise, split the vector load. in WidenOrSplitVectorLoad()
1848 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4); in WidenOrSplitVectorLoad()
1852 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue, in WidenOrSplitVectorLoad()
1853 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags()); in WidenOrSplitVectorLoad()
1855 {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad, in WidenOrSplitVectorLoad()
1864 SDValue Val = Store->getValue(); in SplitVectorStore()
1865 EVT VT = Val.getValueType(); in SplitVectorStore() local
1869 if (VT.getVectorNumElements() == 2) in SplitVectorStore()
1872 EVT MemVT = Store->getMemoryVT(); in SplitVectorStore()
1873 SDValue Chain = Store->getChain(); in SplitVectorStore()
1874 SDValue BasePtr = Store->getBasePtr(); in SplitVectorStore()
1881 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG); in SplitVectorStore()
1887 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo(); in SplitVectorStore()
1888 Align BaseAlign = Store->getAlign(); in SplitVectorStore()
1894 Store->getMemOperand()->getFlags()); in SplitVectorStore()
1897 HiMemVT, HiAlign, Store->getMemOperand()->getFlags()); in SplitVectorStore()
1902 // This is a shortcut for integer division because we have fast i32<->f32
1904 // float is enough to accurately represent up to a 24-bit signed integer.
1908 EVT VT = Op.getValueType(); in LowerDIVREM24() local
1922 unsigned BitSize = VT.getSizeInBits(); in LowerDIVREM24()
1924 unsigned DivBits = BitSize - SignBits; in LowerDIVREM24()
1935 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS); in LowerDIVREM24()
1937 // jq = jq >> (bitsize - 2) in LowerDIVREM24()
1938 jq = DAG.getNode(ISD::SRA, DL, VT, jq, in LowerDIVREM24()
1939 DAG.getConstant(BitSize - 2, DL, VT)); in LowerDIVREM24()
1942 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT)); in LowerDIVREM24()
1963 // float fqneg = -fq; in LowerDIVREM24()
1969 if (Subtarget->isGCN()) { in LowerDIVREM24()
1972 MFI->getMode().FP32Denormals != DenormalMode::getPreserveSign(); in LowerDIVREM24()
1976 unsigned OpCode = !Subtarget->hasMadMacF32Insts() ? (unsigned)ISD::FMA in LowerDIVREM24()
1990 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); in LowerDIVREM24()
1996 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT)); in LowerDIVREM24()
1999 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq); in LowerDIVREM24()
2002 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS); in LowerDIVREM24()
2003 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem); in LowerDIVREM24()
2009 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize); in LowerDIVREM24()
2010 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize); in LowerDIVREM24()
2012 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT); in LowerDIVREM24()
2013 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask); in LowerDIVREM24()
2014 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask); in LowerDIVREM24()
2024 EVT VT = Op.getValueType(); in LowerUDIVREM64() local
2026 assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64"); in LowerUDIVREM64()
2028 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); in LowerUDIVREM64()
2065 !Subtarget->hasMadMacF32Insts() ? (unsigned)ISD::FMA in LowerUDIVREM64()
2066 : MFI->getMode().FP32Denormals == DenormalMode::getPreserveSign() in LowerUDIVREM64()
2086 SDValue Rcp64 = DAG.getBitcast(VT, in LowerUDIVREM64()
2089 SDValue Zero64 = DAG.getConstant(0, DL, VT); in LowerUDIVREM64()
2090 SDValue One64 = DAG.getConstant(1, DL, VT); in LowerUDIVREM64()
2094 // First round of UNR (Unsigned integer Newton-Raphson). in LowerUDIVREM64()
2095 SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS); in LowerUDIVREM64()
2096 SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64); in LowerUDIVREM64()
2097 SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1); in LowerUDIVREM64()
2105 SDValue Add1 = DAG.getBitcast(VT, in LowerUDIVREM64()
2109 SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1); in LowerUDIVREM64()
2110 SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2); in LowerUDIVREM64()
2118 SDValue Add2 = DAG.getBitcast(VT, in LowerUDIVREM64()
2121 SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2); in LowerUDIVREM64()
2123 SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3); in LowerUDIVREM64()
2132 SDValue Sub1 = DAG.getBitcast(VT, in LowerUDIVREM64()
2153 SDValue Sub2 = DAG.getBitcast(VT, in LowerUDIVREM64()
2156 SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64); in LowerUDIVREM64()
2165 SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64); in LowerUDIVREM64()
2173 SDValue Sub3 = DAG.getBitcast(VT, in LowerUDIVREM64()
2206 const unsigned bitPos = halfBitWidth - i - 1; in LowerUDIVREM64()
2208 // Get value of high bit in LowerUDIVREM64()
2211 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit); in LowerUDIVREM64()
2214 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT)); in LowerUDIVREM64()
2215 // Add LHS high bit in LowerUDIVREM64()
2216 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit); in LowerUDIVREM64()
2224 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS); in LowerUDIVREM64()
2237 EVT VT = Op.getValueType(); in LowerUDIVREM() local
2239 if (VT == MVT::i64) { in LowerUDIVREM()
2245 if (VT == MVT::i32) { in LowerUDIVREM()
2257 SDValue Z = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Y); in LowerUDIVREM()
2260 SDValue NegY = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Y); in LowerUDIVREM()
2261 SDValue NegYZ = DAG.getNode(ISD::MUL, DL, VT, NegY, Z); in LowerUDIVREM()
2262 Z = DAG.getNode(ISD::ADD, DL, VT, Z, in LowerUDIVREM()
2263 DAG.getNode(ISD::MULHU, DL, VT, Z, NegYZ)); in LowerUDIVREM()
2266 SDValue Q = DAG.getNode(ISD::MULHU, DL, VT, X, Z); in LowerUDIVREM()
2268 DAG.getNode(ISD::SUB, DL, VT, X, DAG.getNode(ISD::MUL, DL, VT, Q, Y)); in LowerUDIVREM()
2271 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); in LowerUDIVREM()
2272 SDValue One = DAG.getConstant(1, DL, VT); in LowerUDIVREM()
2274 Q = DAG.getNode(ISD::SELECT, DL, VT, Cond, in LowerUDIVREM()
2275 DAG.getNode(ISD::ADD, DL, VT, Q, One), Q); in LowerUDIVREM()
2276 R = DAG.getNode(ISD::SELECT, DL, VT, Cond, in LowerUDIVREM()
2277 DAG.getNode(ISD::SUB, DL, VT, R, Y), R); in LowerUDIVREM()
2281 Q = DAG.getNode(ISD::SELECT, DL, VT, Cond, in LowerUDIVREM()
2282 DAG.getNode(ISD::ADD, DL, VT, Q, One), Q); in LowerUDIVREM()
2283 R = DAG.getNode(ISD::SELECT, DL, VT, Cond, in LowerUDIVREM()
2284 DAG.getNode(ISD::SUB, DL, VT, R, Y), R); in LowerUDIVREM()
2292 EVT VT = Op.getValueType(); in LowerSDIVREM() local
2297 SDValue Zero = DAG.getConstant(0, DL, VT); in LowerSDIVREM()
2298 SDValue NegOne = DAG.getConstant(-1, DL, VT); in LowerSDIVREM()
2300 if (VT == MVT::i32) { in LowerSDIVREM()
2305 if (VT == MVT::i64 && in LowerSDIVREM()
2308 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); in LowerSDIVREM()
2316 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)), in LowerSDIVREM()
2317 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1)) in LowerSDIVREM()
2324 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign); in LowerSDIVREM()
2327 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign); in LowerSDIVREM()
2328 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign); in LowerSDIVREM()
2330 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign); in LowerSDIVREM()
2331 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign); in LowerSDIVREM()
2333 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS); in LowerSDIVREM()
2336 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign); in LowerSDIVREM()
2337 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign); in LowerSDIVREM()
2339 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign); in LowerSDIVREM()
2340 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign); in LowerSDIVREM()
2349 // (frem x, y) -> (fma (fneg (ftrunc (fdiv x, y))), y, x)
2352 EVT VT = Op.getValueType(); in LowerFREM() local
2353 auto Flags = Op->getFlags(); in LowerFREM()
2357 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y, Flags); in LowerFREM()
2358 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, VT, Div, Flags); in LowerFREM()
2359 SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Trunc, Flags); in LowerFREM()
2361 return DAG.getNode(ISD::FMA, SL, VT, Neg, Y, X, Flags); in LowerFREM()
2385 // TODO: Should this propagate fast-math-flags? in LowerFCEIL()
2396 DAG.getConstant(FractBits - 32, SL, MVT::i32), in extractF64Exponent()
2424 // Extend back to 64-bits. in LowerFTRUNC()
2430 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64); in LowerFTRUNC()
2439 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32); in LowerFTRUNC()
2461 // TODO: Should this propagate fast-math-flags? in LowerFROUNDEVEN()
2488 auto VT = Op.getValueType(); in LowerFRINT() local
2490 return DAG.getNode(ISD::FROUNDEVEN, SDLoc(Op), VT, Arg); in LowerFRINT()
2493 // XXX - May require not supporting f32 denormals?
2501 EVT VT = Op.getValueType(); in LowerFROUND() local
2503 SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X); in LowerFROUND()
2505 // TODO: Should this propagate fast-math-flags? in LowerFROUND()
2507 SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T); in LowerFROUND()
2509 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff); in LowerFROUND()
2511 const SDValue Zero = DAG.getConstantFP(0.0, SL, VT); in LowerFROUND()
2512 const SDValue One = DAG.getConstantFP(1.0, SL, VT); in LowerFROUND()
2515 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); in LowerFROUND()
2517 const SDValue Half = DAG.getConstantFP(0.5, SL, VT); in LowerFROUND()
2519 SDValue OneOrZeroFP = DAG.getNode(ISD::SELECT, SL, VT, Cmp, One, Zero); in LowerFROUND()
2521 SDValue SignedOffset = DAG.getNode(ISD::FCOPYSIGN, SL, VT, OneOrZeroFP, X); in LowerFROUND()
2522 return DAG.getNode(ISD::FADD, SL, VT, T, SignedOffset); in LowerFROUND()
2531 // result += -1.0. in LowerFFLOOR()
2536 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64); in LowerFFLOOR()
2546 // TODO: Should this propagate fast-math-flags? in LowerFFLOOR()
2595 EVT VT = Src.getValueType(); in getIsLtSmallestNormal() local
2596 const fltSemantics &Semantics = SelectionDAG::EVTToAPFloatSemantics(VT); in getIsLtSmallestNormal()
2598 DAG.getConstantFP(APFloat::getSmallestNormalized(Semantics), SL, VT); in getIsLtSmallestNormal()
2603 SL, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), Src, in getIsLtSmallestNormal()
2612 EVT VT = Src.getValueType(); in getIsFinite() local
2613 const fltSemantics &Semantics = SelectionDAG::EVTToAPFloatSemantics(VT); in getIsFinite()
2614 SDValue Inf = DAG.getConstantFP(APFloat::getInf(Semantics), SL, VT); in getIsFinite()
2616 SDValue Fabs = DAG.getNode(ISD::FABS, SL, VT, Src, Flags); in getIsFinite()
2618 SL, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), Fabs, in getIsFinite()
2631 MVT VT = MVT::f32; in getScaledLogInput() local
2634 DAG.getConstantFP(APFloat::getSmallestNormalized(Semantics), SL, VT); in getScaledLogInput()
2637 SL, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), Src, in getScaledLogInput()
2640 SDValue Scale32 = DAG.getConstantFP(0x1.0p+32, SL, VT); in getScaledLogInput()
2641 SDValue One = DAG.getConstantFP(1.0, SL, VT); in getScaledLogInput()
2643 DAG.getNode(ISD::SELECT, SL, VT, IsLtSmallestNormal, Scale32, One, Flags); in getScaledLogInput()
2645 SDValue ScaledInput = DAG.getNode(ISD::FMUL, SL, VT, Src, ScaleFactor, Flags); in getScaledLogInput()
2654 // log2 = amdgpu_log2 - (is_denormal ? 32.0 : 0.0) in LowerFLOG2()
2657 EVT VT = Op.getValueType(); in LowerFLOG2() local
2659 SDNodeFlags Flags = Op->getFlags(); in LowerFLOG2()
2661 if (VT == MVT::f16) { in LowerFLOG2()
2663 assert(!Subtarget->has16BitInsts()); in LowerFLOG2()
2666 return DAG.getNode(ISD::FP_ROUND, SL, VT, Log, in LowerFLOG2()
2673 return DAG.getNode(AMDGPUISD::LOG, SL, VT, Src, Flags); in LowerFLOG2()
2675 SDValue Log2 = DAG.getNode(AMDGPUISD::LOG, SL, VT, ScaledInput, Flags); in LowerFLOG2()
2677 SDValue ThirtyTwo = DAG.getConstantFP(32.0, SL, VT); in LowerFLOG2()
2678 SDValue Zero = DAG.getConstantFP(0.0, SL, VT); in LowerFLOG2()
2680 DAG.getNode(ISD::SELECT, SL, VT, IsLtSmallestNormal, ThirtyTwo, Zero); in LowerFLOG2()
2681 return DAG.getNode(ISD::FSUB, SL, VT, Log2, ResultOffset, Flags); in LowerFLOG2()
2684 static SDValue getMad(SelectionDAG &DAG, const SDLoc &SL, EVT VT, SDValue X, in getMad() argument
2686 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, X, Y, Flags); in getMad()
2687 return DAG.getNode(ISD::FADD, SL, VT, Mul, C, Flags); in getMad()
2693 EVT VT = Op.getValueType(); in LowerFLOGCommon() local
2694 SDNodeFlags Flags = Op->getFlags(); in LowerFLOGCommon()
2701 if (VT == MVT::f16 || Flags.hasApproximateFuncs() || in LowerFLOGCommon()
2704 if (VT == MVT::f16 && !Subtarget->has16BitInsts()) { in LowerFLOGCommon()
2710 if (VT == MVT::f16 && !Subtarget->has16BitInsts()) { in LowerFLOGCommon()
2711 return DAG.getNode(ISD::FP_ROUND, DL, VT, Lowered, in LowerFLOGCommon()
2722 SDValue Y = DAG.getNode(AMDGPUISD::LOG, DL, VT, X, Flags); in LowerFLOGCommon()
2725 if (Subtarget->hasFastFMAF32()) { in LowerFLOGCommon()
2727 const float c_log10 = 0x1.344134p-2f; in LowerFLOGCommon()
2728 const float cc_log10 = 0x1.09f79ep-26f; in LowerFLOGCommon()
2731 const float c_log = 0x1.62e42ep-1f; in LowerFLOGCommon()
2732 const float cc_log = 0x1.efa39ep-25f; in LowerFLOGCommon()
2734 SDValue C = DAG.getConstantFP(IsLog10 ? c_log10 : c_log, DL, VT); in LowerFLOGCommon()
2735 SDValue CC = DAG.getConstantFP(IsLog10 ? cc_log10 : cc_log, DL, VT); in LowerFLOGCommon()
2737 R = DAG.getNode(ISD::FMUL, DL, VT, Y, C, Flags); in LowerFLOGCommon()
2738 SDValue NegR = DAG.getNode(ISD::FNEG, DL, VT, R, Flags); in LowerFLOGCommon()
2739 SDValue FMA0 = DAG.getNode(ISD::FMA, DL, VT, Y, C, NegR, Flags); in LowerFLOGCommon()
2740 SDValue FMA1 = DAG.getNode(ISD::FMA, DL, VT, Y, CC, FMA0, Flags); in LowerFLOGCommon()
2741 R = DAG.getNode(ISD::FADD, DL, VT, R, FMA1, Flags); in LowerFLOGCommon()
2744 const float ch_log10 = 0x1.344000p-2f; in LowerFLOGCommon()
2745 const float ct_log10 = 0x1.3509f6p-18f; in LowerFLOGCommon()
2748 const float ch_log = 0x1.62e000p-1f; in LowerFLOGCommon()
2749 const float ct_log = 0x1.0bfbe8p-15f; in LowerFLOGCommon()
2751 SDValue CH = DAG.getConstantFP(IsLog10 ? ch_log10 : ch_log, DL, VT); in LowerFLOGCommon()
2752 SDValue CT = DAG.getConstantFP(IsLog10 ? ct_log10 : ct_log, DL, VT); in LowerFLOGCommon()
2758 SDValue YT = DAG.getNode(ISD::FSUB, DL, VT, Y, YH, Flags); in LowerFLOGCommon()
2760 SDValue YTCT = DAG.getNode(ISD::FMUL, DL, VT, YT, CT, Flags); in LowerFLOGCommon()
2761 SDValue Mad0 = getMad(DAG, DL, VT, YH, CT, YTCT, Flags); in LowerFLOGCommon()
2762 SDValue Mad1 = getMad(DAG, DL, VT, YT, CH, Mad0, Flags); in LowerFLOGCommon()
2763 R = getMad(DAG, DL, VT, YH, CH, Mad1); in LowerFLOGCommon()
2772 R = DAG.getNode(ISD::SELECT, DL, VT, IsFinite, R, Y, Flags); in LowerFLOGCommon()
2776 SDValue Zero = DAG.getConstantFP(0.0f, DL, VT); in LowerFLOGCommon()
2778 DAG.getConstantFP(IsLog10 ? 0x1.344136p+3f : 0x1.62e430p+4f, DL, VT); in LowerFLOGCommon()
2780 DAG.getNode(ISD::SELECT, DL, VT, IsScaled, ShiftK, Zero, Flags); in LowerFLOGCommon()
2781 R = DAG.getNode(ISD::FSUB, DL, VT, R, Shift, Flags); in LowerFLOGCommon()
2796 EVT VT = Src.getValueType(); in LowerFLOGUnsafe() local
2798 VT == MVT::f32 ? (unsigned)AMDGPUISD::LOG : (unsigned)ISD::FLOG2; in LowerFLOGUnsafe()
2803 if (VT == MVT::f32) { in LowerFLOGUnsafe()
2806 SDValue LogSrc = DAG.getNode(AMDGPUISD::LOG, SL, VT, ScaledInput, Flags); in LowerFLOGUnsafe()
2808 DAG.getConstantFP(-32.0 * Log2BaseInverted, SL, VT); in LowerFLOGUnsafe()
2810 SDValue Zero = DAG.getConstantFP(0.0f, SL, VT); in LowerFLOGUnsafe()
2812 SDValue ResultOffset = DAG.getNode(ISD::SELECT, SL, VT, IsScaled, in LowerFLOGUnsafe()
2815 SDValue Log2Inv = DAG.getConstantFP(Log2BaseInverted, SL, VT); in LowerFLOGUnsafe()
2817 if (Subtarget->hasFastFMAF32()) in LowerFLOGUnsafe()
2818 return DAG.getNode(ISD::FMA, SL, VT, LogSrc, Log2Inv, ResultOffset, in LowerFLOGUnsafe()
2820 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, LogSrc, Log2Inv, Flags); in LowerFLOGUnsafe()
2821 return DAG.getNode(ISD::FADD, SL, VT, Mul, ResultOffset); in LowerFLOGUnsafe()
2825 SDValue Log2Operand = DAG.getNode(LogOp, SL, VT, Src, Flags); in LowerFLOGUnsafe()
2826 SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT); in LowerFLOGUnsafe()
2828 return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand, in LowerFLOGUnsafe()
2837 EVT VT = Op.getValueType(); in lowerFEXP2() local
2839 SDNodeFlags Flags = Op->getFlags(); in lowerFEXP2()
2841 if (VT == MVT::f16) { in lowerFEXP2()
2843 assert(!Subtarget->has16BitInsts()); in lowerFEXP2()
2846 return DAG.getNode(ISD::FP_ROUND, SL, VT, Log, in lowerFEXP2()
2850 assert(VT == MVT::f32); in lowerFEXP2()
2855 // bool needs_scaling = x < -0x1.f80000p+6f; in lowerFEXP2()
2856 // v_exp_f32(x + (s ? 0x1.0p+6f : 0.0f)) * (s ? 0x1.0p-64f : 1.0f); in lowerFEXP2()
2858 // -nextafter(128.0, -1) in lowerFEXP2()
2859 SDValue RangeCheckConst = DAG.getConstantFP(-0x1.f80000p+6f, SL, VT); in lowerFEXP2()
2861 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); in lowerFEXP2()
2866 SDValue SixtyFour = DAG.getConstantFP(0x1.0p+6f, SL, VT); in lowerFEXP2()
2867 SDValue Zero = DAG.getConstantFP(0.0, SL, VT); in lowerFEXP2()
2870 DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, SixtyFour, Zero); in lowerFEXP2()
2872 SDValue AddInput = DAG.getNode(ISD::FADD, SL, VT, Src, AddOffset, Flags); in lowerFEXP2()
2873 SDValue Exp2 = DAG.getNode(AMDGPUISD::EXP, SL, VT, AddInput, Flags); in lowerFEXP2()
2875 SDValue TwoExpNeg64 = DAG.getConstantFP(0x1.0p-64f, SL, VT); in lowerFEXP2()
2876 SDValue One = DAG.getConstantFP(1.0, SL, VT); in lowerFEXP2()
2878 DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, TwoExpNeg64, One); in lowerFEXP2()
2880 return DAG.getNode(ISD::FMUL, SL, VT, Exp2, ResultScale, Flags); in lowerFEXP2()
2886 EVT VT = X.getValueType(); in lowerFEXPUnsafe() local
2887 const SDValue Log2E = DAG.getConstantFP(numbers::log2e, SL, VT); in lowerFEXPUnsafe()
2889 if (VT != MVT::f32 || !needsDenormHandlingF32(DAG, X, Flags)) { in lowerFEXPUnsafe()
2891 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, X, Log2E, Flags); in lowerFEXPUnsafe()
2892 return DAG.getNode(VT == MVT::f32 ? (unsigned)AMDGPUISD::EXP in lowerFEXPUnsafe()
2894 SL, VT, Mul, Flags); in lowerFEXPUnsafe()
2897 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); in lowerFEXPUnsafe()
2899 SDValue Threshold = DAG.getConstantFP(-0x1.5d58a0p+6f, SL, VT); in lowerFEXPUnsafe()
2902 SDValue ScaleOffset = DAG.getConstantFP(0x1.0p+6f, SL, VT); in lowerFEXPUnsafe()
2904 SDValue ScaledX = DAG.getNode(ISD::FADD, SL, VT, X, ScaleOffset, Flags); in lowerFEXPUnsafe()
2907 DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, ScaledX, X); in lowerFEXPUnsafe()
2909 SDValue ExpInput = DAG.getNode(ISD::FMUL, SL, VT, AdjustedX, Log2E, Flags); in lowerFEXPUnsafe()
2911 SDValue Exp2 = DAG.getNode(AMDGPUISD::EXP, SL, VT, ExpInput, Flags); in lowerFEXPUnsafe()
2913 SDValue ResultScaleFactor = DAG.getConstantFP(0x1.969d48p-93f, SL, VT); in lowerFEXPUnsafe()
2915 DAG.getNode(ISD::FMUL, SL, VT, Exp2, ResultScaleFactor, Flags); in lowerFEXPUnsafe()
2917 return DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, AdjustedResult, Exp2, in lowerFEXPUnsafe()
2921 /// Emit approx-funcs appropriate lowering for exp10. inf/nan should still be
2926 const EVT VT = X.getValueType(); in lowerFEXP10Unsafe() local
2927 const unsigned Exp2Op = VT == MVT::f32 ? AMDGPUISD::EXP : ISD::FEXP2; in lowerFEXP10Unsafe()
2929 if (VT != MVT::f32 || !needsDenormHandlingF32(DAG, X, Flags)) { in lowerFEXP10Unsafe()
2930 // exp2(x * 0x1.a92000p+1f) * exp2(x * 0x1.4f0978p-11f); in lowerFEXP10Unsafe()
2931 SDValue K0 = DAG.getConstantFP(0x1.a92000p+1f, SL, VT); in lowerFEXP10Unsafe()
2932 SDValue K1 = DAG.getConstantFP(0x1.4f0978p-11f, SL, VT); in lowerFEXP10Unsafe()
2934 SDValue Mul0 = DAG.getNode(ISD::FMUL, SL, VT, X, K0, Flags); in lowerFEXP10Unsafe()
2935 SDValue Exp2_0 = DAG.getNode(Exp2Op, SL, VT, Mul0, Flags); in lowerFEXP10Unsafe()
2936 SDValue Mul1 = DAG.getNode(ISD::FMUL, SL, VT, X, K1, Flags); in lowerFEXP10Unsafe()
2937 SDValue Exp2_1 = DAG.getNode(Exp2Op, SL, VT, Mul1, Flags); in lowerFEXP10Unsafe()
2938 return DAG.getNode(ISD::FMUL, SL, VT, Exp2_0, Exp2_1); in lowerFEXP10Unsafe()
2941 // bool s = x < -0x1.2f7030p+5f; in lowerFEXP10Unsafe()
2944 // exp2(x * 0x1.4f0978p-11f) * in lowerFEXP10Unsafe()
2945 // (s ? 0x1.9f623ep-107f : 1.0f); in lowerFEXP10Unsafe()
2947 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); in lowerFEXP10Unsafe()
2949 SDValue Threshold = DAG.getConstantFP(-0x1.2f7030p+5f, SL, VT); in lowerFEXP10Unsafe()
2952 SDValue ScaleOffset = DAG.getConstantFP(0x1.0p+5f, SL, VT); in lowerFEXP10Unsafe()
2953 SDValue ScaledX = DAG.getNode(ISD::FADD, SL, VT, X, ScaleOffset, Flags); in lowerFEXP10Unsafe()
2955 DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, ScaledX, X); in lowerFEXP10Unsafe()
2957 SDValue K0 = DAG.getConstantFP(0x1.a92000p+1f, SL, VT); in lowerFEXP10Unsafe()
2958 SDValue K1 = DAG.getConstantFP(0x1.4f0978p-11f, SL, VT); in lowerFEXP10Unsafe()
2960 SDValue Mul0 = DAG.getNode(ISD::FMUL, SL, VT, AdjustedX, K0, Flags); in lowerFEXP10Unsafe()
2961 SDValue Exp2_0 = DAG.getNode(Exp2Op, SL, VT, Mul0, Flags); in lowerFEXP10Unsafe()
2962 SDValue Mul1 = DAG.getNode(ISD::FMUL, SL, VT, AdjustedX, K1, Flags); in lowerFEXP10Unsafe()
2963 SDValue Exp2_1 = DAG.getNode(Exp2Op, SL, VT, Mul1, Flags); in lowerFEXP10Unsafe()
2965 SDValue MulExps = DAG.getNode(ISD::FMUL, SL, VT, Exp2_0, Exp2_1, Flags); in lowerFEXP10Unsafe()
2967 SDValue ResultScaleFactor = DAG.getConstantFP(0x1.9f623ep-107f, SL, VT); in lowerFEXP10Unsafe()
2969 DAG.getNode(ISD::FMUL, SL, VT, MulExps, ResultScaleFactor, Flags); in lowerFEXP10Unsafe()
2971 return DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, AdjustedResult, MulExps, in lowerFEXP10Unsafe()
2976 EVT VT = Op.getValueType(); in lowerFEXP() local
2979 SDNodeFlags Flags = Op->getFlags(); in lowerFEXP()
2982 if (VT.getScalarType() == MVT::f16) { in lowerFEXP()
2987 if (VT.isVector()) in lowerFEXP()
2990 // exp(f16 x) -> in lowerFEXP()
2996 return DAG.getNode(ISD::FP_ROUND, SL, VT, Lowered, in lowerFEXP()
3000 assert(VT == MVT::f32); in lowerFEXP()
3003 // library behavior. Also, is known-not-daz source sufficient? in lowerFEXP()
3020 // f = x*(64/ln(2)) - n in lowerFEXP()
3021 // r = f*(ln(2)/64) = x - n*(ln(2)/64) in lowerFEXP()
3037 if (Subtarget->hasFastFMAF32()) { in lowerFEXP()
3039 const float cc_exp = 0x1.4ae0bep-26f; // c+cc are 49 bits in lowerFEXP()
3041 const float cc_exp10 = 0x1.2f346ep-24f; in lowerFEXP()
3043 SDValue C = DAG.getConstantFP(IsExp10 ? c_exp10 : c_exp, SL, VT); in lowerFEXP()
3044 SDValue CC = DAG.getConstantFP(IsExp10 ? cc_exp10 : cc_exp, SL, VT); in lowerFEXP()
3046 PH = DAG.getNode(ISD::FMUL, SL, VT, X, C, Flags); in lowerFEXP()
3047 SDValue NegPH = DAG.getNode(ISD::FNEG, SL, VT, PH, Flags); in lowerFEXP()
3048 SDValue FMA0 = DAG.getNode(ISD::FMA, SL, VT, X, C, NegPH, Flags); in lowerFEXP()
3049 PL = DAG.getNode(ISD::FMA, SL, VT, X, CC, FMA0, Flags); in lowerFEXP()
3052 const float cl_exp = 0x1.47652ap-12f; // ch + cl are 36 bits in lowerFEXP()
3055 const float cl_exp10 = 0x1.4f0978p-11f; in lowerFEXP()
3057 SDValue CH = DAG.getConstantFP(IsExp10 ? ch_exp10 : ch_exp, SL, VT); in lowerFEXP()
3058 SDValue CL = DAG.getConstantFP(IsExp10 ? cl_exp10 : cl_exp, SL, VT); in lowerFEXP()
3063 SDValue XH = DAG.getNode(ISD::BITCAST, SL, VT, XHAsInt); in lowerFEXP()
3064 SDValue XL = DAG.getNode(ISD::FSUB, SL, VT, X, XH, Flags); in lowerFEXP()
3066 PH = DAG.getNode(ISD::FMUL, SL, VT, XH, CH, Flags); in lowerFEXP()
3068 SDValue XLCL = DAG.getNode(ISD::FMUL, SL, VT, XL, CL, Flags); in lowerFEXP()
3069 SDValue Mad0 = getMad(DAG, SL, VT, XL, CH, XLCL, Flags); in lowerFEXP()
3070 PL = getMad(DAG, SL, VT, XH, CL, Mad0, Flags); in lowerFEXP()
3073 SDValue E = DAG.getNode(ISD::FROUNDEVEN, SL, VT, PH, Flags); in lowerFEXP()
3076 SDValue PHSubE = DAG.getNode(ISD::FSUB, SL, VT, PH, E, FlagsNoContract); in lowerFEXP()
3078 SDValue A = DAG.getNode(ISD::FADD, SL, VT, PHSubE, PL, Flags); in lowerFEXP()
3080 SDValue Exp2 = DAG.getNode(AMDGPUISD::EXP, SL, VT, A, Flags); in lowerFEXP()
3082 SDValue R = DAG.getNode(ISD::FLDEXP, SL, VT, Exp2, IntE, Flags); in lowerFEXP()
3085 DAG.getConstantFP(IsExp10 ? -0x1.66d3e8p+5f : -0x1.9d1da0p+6f, SL, VT); in lowerFEXP()
3087 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); in lowerFEXP()
3088 SDValue Zero = DAG.getConstantFP(0.0, SL, VT); in lowerFEXP()
3092 R = DAG.getNode(ISD::SELECT, SL, VT, Underflow, Zero, R); in lowerFEXP()
3097 DAG.getConstantFP(IsExp10 ? 0x1.344136p+5f : 0x1.62e430p+6f, SL, VT); in lowerFEXP()
3101 DAG.getConstantFP(APFloat::getInf(APFloat::IEEEsingle()), SL, VT); in lowerFEXP()
3102 R = DAG.getNode(ISD::SELECT, SL, VT, Overflow, Inf, R); in lowerFEXP()
3130 SDValue NumExtBits = DAG.getConstant(32u - NumBits, SL, MVT::i32); in lowerCTLZResults()
3156 bool Is64BitScalar = !Src->isDivergent() && Src.getValueType() == MVT::i64; in LowerCTLZ_CTTZ()
3159 // (ctlz hi:lo) -> (umin (ffbh src), 32) in LowerCTLZ_CTTZ()
3160 // (cttz hi:lo) -> (umin (ffbl src), 32) in LowerCTLZ_CTTZ()
3161 // (ctlz_zero_undef src) -> (ffbh src) in LowerCTLZ_CTTZ()
3162 // (cttz_zero_undef src) -> (ffbl src) in LowerCTLZ_CTTZ()
3164 // 64-bit scalar version produce 32-bit result in LowerCTLZ_CTTZ()
3165 // (ctlz hi:lo) -> (umin (S_FLBIT_I32_B64 src), 64) in LowerCTLZ_CTTZ()
3166 // (cttz hi:lo) -> (umin (S_FF1_I32_B64 src), 64) in LowerCTLZ_CTTZ()
3167 // (ctlz_zero_undef src) -> (S_FLBIT_I32_B64 src) in LowerCTLZ_CTTZ()
3168 // (cttz_zero_undef src) -> (S_FF1_I32_B64 src) in LowerCTLZ_CTTZ()
3184 // (ctlz hi:lo) -> (umin3 (ffbh hi), (uaddsat (ffbh lo), 32), 64) in LowerCTLZ_CTTZ()
3185 // (cttz hi:lo) -> (umin3 (uaddsat (ffbl hi), 32), (ffbl lo), 64) in LowerCTLZ_CTTZ()
3186 // (ctlz_zero_undef hi:lo) -> (umin (ffbh hi), (add (ffbh lo), 32)) in LowerCTLZ_CTTZ()
3187 // (cttz_zero_undef hi:lo) -> (umin (add (ffbl hi), 32), (ffbl lo)) in LowerCTLZ_CTTZ()
3208 // The regular method converting a 64-bit integer to float roughly consists of in LowerINT_TO_FP32()
3210 // conversion from a 64-bit integer to a float is essentially the same as the in LowerINT_TO_FP32()
3211 // one from a 32-bit integer. The only difference is that it has more in LowerINT_TO_FP32()
3212 // trailing bits to be rounded. To leverage the native 32-bit conversion, a in LowerINT_TO_FP32()
3213 // 64-bit integer could be preprocessed and fit into a 32-bit integer then in LowerINT_TO_FP32()
3221 // // reduced to a 32-bit one automatically. in LowerINT_TO_FP32()
3226 // // convert it as a 32-bit integer and scale the result back. in LowerINT_TO_FP32()
3227 // return uitofp(hi) * 2^(32 - shamt); in LowerINT_TO_FP32()
3241 if (Signed && Subtarget->isGCN()) { in LowerINT_TO_FP32()
3243 // i.e. Hi is 0 or -1. However, that only needs to take the MSB into in LowerINT_TO_FP32()
3245 // - 32 if Lo and Hi have opposite signs; in LowerINT_TO_FP32()
3246 // - 33 if Lo and Hi have the same sign. in LowerINT_TO_FP32()
3251 // - -1 if Lo and Hi have opposite signs; and in LowerINT_TO_FP32()
3252 // - 0 otherwise. in LowerINT_TO_FP32()
3256 // umin(sffbh(Hi), 33 + (Lo^Hi)>>31) - 1. in LowerINT_TO_FP32()
3260 // umin(sffbh(Hi) - 1, 32 + (Lo^Hi)>>31). in LowerINT_TO_FP32()
3291 // Normalize the given 64-bit integer. in LowerINT_TO_FP32()
3299 // Get the 32-bit normalized integer. in LowerINT_TO_FP32()
3301 // Convert the normalized 32-bit integer into f32. in LowerINT_TO_FP32()
3303 (Signed && Subtarget->isGCN()) ? ISD::SINT_TO_FP : ISD::UINT_TO_FP; in LowerINT_TO_FP32()
3307 // 64-bit integer is converted as a 32-bit one. in LowerINT_TO_FP32()
3311 if (Subtarget->isGCN()) in LowerINT_TO_FP32()
3315 // part directly to emulate the multiplication of 2^ShAmt. That 8-bit in LowerINT_TO_FP32()
3347 // TODO: Should this propagate fast-math-flags? in LowerINT_TO_FP64()
3378 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { in LowerUINT_TO_FP()
3426 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { in LowerSINT_TO_FP()
3455 // The basic idea of converting a floating point number into a pair of 32-bit in LowerFP_TO_INT64()
3459 // hif := floor(tf * 2^-32); in LowerFP_TO_INT64()
3460 // lof := tf - hif * 2^32; // lof is always positive due to floor. in LowerFP_TO_INT64()
3467 // However, a 32-bit floating point number has only 23 bits mantissa and in LowerFP_TO_INT64()
3481 llvm::bit_cast<double>(UINT64_C(/*2^-32*/ 0x3df0000000000000)), SL, in LowerFP_TO_INT64()
3484 llvm::bit_cast<double>(UINT64_C(/*-2^32*/ 0xc1f0000000000000)), SL, in LowerFP_TO_INT64()
3488 llvm::bit_cast<float>(UINT32_C(/*2^-32*/ 0x2f800000)), SL, SrcVT); in LowerFP_TO_INT64()
3490 llvm::bit_cast<float>(UINT32_C(/*-2^32*/ 0xcf800000)), SL, SrcVT); in LowerFP_TO_INT64()
3492 // TODO: Should this propagate fast-math-flags? in LowerFP_TO_INT64()
3512 // r := xor(r, sign) - sign; in LowerFP_TO_INT64()
3536 // f64 -> f16 conversion using round-to-nearest-even rounding mode. in LowerFP_TO_FP16()
3554 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32)); in LowerFP_TO_FP16()
3578 // B = clamp(1-E, 0, 13); in LowerFP_TO_FP16()
3666 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); in LowerSIGN_EXTEND_INREG()
3667 MVT VT = Op.getSimpleValueType(); in LowerSIGN_EXTEND_INREG() local
3668 MVT ScalarVT = VT.getScalarType(); in LowerSIGN_EXTEND_INREG()
3670 assert(VT.isVector()); in LowerSIGN_EXTEND_INREG()
3676 unsigned NElts = VT.getVectorNumElements(); in LowerSIGN_EXTEND_INREG()
3684 return DAG.getBuildVector(VT, DL, Args); in LowerSIGN_EXTEND_INREG()
3687 //===----------------------------------------------------------------------===//
3689 //===----------------------------------------------------------------------===//
3696 EVT VT = Op.getValueType(); in isI24() local
3697 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated in isI24()
3698 // as unsigned 24-bit values. in isI24()
3706 bool IsIntrin = Node24->getOpcode() == ISD::INTRINSIC_WO_CHAIN; in simplifyMul24()
3708 SDValue LHS = IsIntrin ? Node24->getOperand(1) : Node24->getOperand(0); in simplifyMul24()
3709 SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1); in simplifyMul24()
3710 unsigned NewOpcode = Node24->getOpcode(); in simplifyMul24()
3712 unsigned IID = Node24->getConstantOperandVal(0); in simplifyMul24()
3727 llvm_unreachable("Expected 24-bit mul intrinsic"); in simplifyMul24()
3739 return DAG.getNode(NewOpcode, SDLoc(Node24), Node24->getVTList(), in simplifyMul24()
3757 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width); in constantFoldBFE()
3758 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width); in constantFoldBFE()
3766 for (SDNode *U : Val->uses()) { in hasVolatileUser()
3768 if (M->isVolatile()) in hasVolatileUser()
3776 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const { in shouldCombineMemoryType()
3778 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT)) in shouldCombineMemoryType()
3781 if (!VT.isByteSized()) in shouldCombineMemoryType()
3784 unsigned Size = VT.getStoreSize(); in shouldCombineMemoryType()
3786 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector()) in shouldCombineMemoryType()
3803 if (!LN->isSimple() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN)) in performLoadCombine()
3808 EVT VT = LN->getMemoryVT(); in performLoadCombine() local
3810 unsigned Size = VT.getStoreSize(); in performLoadCombine()
3811 Align Alignment = LN->getAlign(); in performLoadCombine()
3812 if (Alignment < Size && isTypeLegal(VT)) { in performLoadCombine()
3814 unsigned AS = LN->getAddressSpace(); in performLoadCombine()
3820 VT, AS, Alignment, LN->getMemOperand()->getFlags(), &IsFast)) { in performLoadCombine()
3821 if (VT.isVector()) in performLoadCombine()
3834 if (!shouldCombineMemoryType(VT)) in performLoadCombine()
3837 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); in performLoadCombine()
3840 = DAG.getLoad(NewVT, SL, LN->getChain(), in performLoadCombine()
3841 LN->getBasePtr(), LN->getMemOperand()); in performLoadCombine()
3843 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad); in performLoadCombine()
3856 if (!SN->isSimple() || !ISD::isNormalStore(SN)) in performStoreCombine()
3859 EVT VT = SN->getMemoryVT(); in performStoreCombine() local
3860 unsigned Size = VT.getStoreSize(); in performStoreCombine()
3864 Align Alignment = SN->getAlign(); in performStoreCombine()
3865 if (Alignment < Size && isTypeLegal(VT)) { in performStoreCombine()
3867 unsigned AS = SN->getAddressSpace(); in performStoreCombine()
3874 VT, AS, Alignment, SN->getMemOperand()->getFlags(), &IsFast)) { in performStoreCombine()
3875 if (VT.isVector()) in performStoreCombine()
3885 if (!shouldCombineMemoryType(VT)) in performStoreCombine()
3888 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); in performStoreCombine()
3889 SDValue Val = SN->getValue(); in performStoreCombine()
3896 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal); in performStoreCombine()
3900 return DAG.getStore(SN->getChain(), SL, CastVal, in performStoreCombine()
3901 SN->getBasePtr(), SN->getMemOperand()); in performStoreCombine()
3910 SDValue N0 = N->getOperand(0); in performAssertSZExtCombine()
3912 // (vt2 (assertzext (truncate vt0:x), vt1)) -> in performAssertSZExtCombine()
3915 SDValue N1 = N->getOperand(1); in performAssertSZExtCombine()
3916 EVT ExtVT = cast<VTSDNode>(N1)->getVT(); in performAssertSZExtCombine()
3922 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1); in performAssertSZExtCombine()
3923 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg); in performAssertSZExtCombine()
3932 unsigned IID = N->getConstantOperandVal(0); in performIntrinsicWOChainCombine()
3945 SDValue Src = N->getOperand(1); in performIntrinsicWOChainCombine()
3949 // frexp_exp (fneg x) -> frexp_exp x in performIntrinsicWOChainCombine()
3950 // frexp_exp (fabs x) -> frexp_exp x in performIntrinsicWOChainCombine()
3951 // frexp_exp (fneg (fabs x)) -> frexp_exp x in performIntrinsicWOChainCombine()
3952 SDValue Src = N->getOperand(1); in performIntrinsicWOChainCombine()
3956 return SDValue(DCI.DAG.UpdateNodeOperands(N, N->getOperand(0), PeekSign), in performIntrinsicWOChainCombine()
3964 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3980 // Re-visit the ands. It's possible we eliminated one of them and it could in splitBinaryBitConstantOpImpl()
3991 EVT VT = N->getValueType(0); in performShlCombine() local
3993 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); in performShlCombine()
3997 SDValue LHS = N->getOperand(0); in performShlCombine()
3998 unsigned RHSVal = RHS->getZExtValue(); in performShlCombine()
4005 switch (LHS->getOpcode()) { in performShlCombine()
4011 SDValue X = LHS->getOperand(0); in performShlCombine()
4013 if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 && in performShlCombine()
4016 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x in performShlCombine()
4018 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) }); in performShlCombine()
4023 if (VT != MVT::i64) in performShlCombine()
4031 return DAG.getZExtOrTrunc(Shl, SL, VT); in performShlCombine()
4035 if (VT != MVT::i64) in performShlCombine()
4038 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32)) in performShlCombine()
4040 // On some subtargets, 64-bit shift is a quarter rate instruction. In the in performShlCombine()
4041 // common case, splitting this into a move and a 32-bit shift is faster and in performShlCombine()
4046 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32); in performShlCombine()
4059 if (N->getValueType(0) != MVT::i64) in performSraCombine()
4062 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); in performSraCombine()
4068 unsigned RHSVal = RHS->getZExtValue(); in performSraCombine()
4070 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31) in performSraCombine()
4072 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); in performSraCombine()
4080 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31) in performSraCombine()
4082 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); in performSraCombine()
4094 auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); in performSrlCombine()
4098 EVT VT = N->getValueType(0); in performSrlCombine() local
4099 SDValue LHS = N->getOperand(0); in performSrlCombine()
4100 unsigned ShiftAmt = RHS->getZExtValue(); in performSrlCombine()
4104 // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1) in performSrlCombine()
4109 if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) && in performSrlCombine()
4112 ISD::AND, SL, VT, in performSrlCombine()
4113 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)), in performSrlCombine()
4114 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1))); in performSrlCombine()
4119 if (VT != MVT::i64) in performSrlCombine()
4127 // build_pair (srl hi_32(x), C - 32), 0 in performSrlCombine()
4132 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32); in performSrlCombine()
4144 EVT VT = N->getValueType(0); in performTruncateCombine() local
4145 SDValue Src = N->getOperand(0); in performTruncateCombine()
4147 // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x) in performTruncateCombine()
4148 if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) { in performTruncateCombine()
4153 if (VT.getFixedSizeInBits() <= EltVT.getFixedSizeInBits()) { in performTruncateCombine()
4159 return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0); in performTruncateCombine()
4164 // Equivalent of above for accessing the high element of a vector as an in performTruncateCombine()
4166 // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y) in performTruncateCombine()
4167 if (Src.getOpcode() == ISD::SRL && !VT.isVector()) { in performTruncateCombine()
4169 if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) { in performTruncateCombine()
4180 return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt); in performTruncateCombine()
4186 // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit. in performTruncateCombine()
4188 // i16 (trunc (srl i64:x, K)), K <= 16 -> in performTruncateCombine()
4190 if (VT.getScalarSizeInBits() < 32) { in performTruncateCombine()
4199 // - For left shifts, do the transform as long as the shift in performTruncateCombine()
4201 // - For right shift, do it if ShiftAmt <= (32 - Size) to avoid in performTruncateCombine()
4202 // losing information stored in the high bits when truncating. in performTruncateCombine()
4204 (Src.getOpcode() == ISD::SHL) ? 31 : (32 - VT.getScalarSizeInBits()); in performTruncateCombine()
4206 EVT MidVT = VT.isVector() ? in performTruncateCombine()
4208 VT.getVectorNumElements()) : MVT::i32; in performTruncateCombine()
4222 return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift); in performTruncateCombine()
4253 if (V->getOpcode() != ISD::ADD) in getAddOneOp()
4256 return isOneConstant(V->getOperand(1)) ? V->getOperand(0) : SDValue(); in getAddOneOp()
4261 assert(N->getOpcode() == ISD::MUL); in performMulCombine()
4262 EVT VT = N->getValueType(0); in performMulCombine() local
4264 // Don't generate 24-bit multiplies on values that are in SGPRs, since in performMulCombine()
4265 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs in performMulCombine()
4268 if (!N->isDivergent()) in performMulCombine()
4271 unsigned Size = VT.getSizeInBits(); in performMulCombine()
4272 if (VT.isVector() || Size > 64) in performMulCombine()
4278 SDValue N0 = N->getOperand(0); in performMulCombine()
4279 SDValue N1 = N->getOperand(1); in performMulCombine()
4281 // Undo InstCombine canonicalize X * (Y + 1) -> X * Y + X to enable mad in performMulCombine()
4284 // mul x, (add y, 1) -> add (mul x, y), x in performMulCombine()
4285 auto IsFoldableAdd = [](SDValue V) -> SDValue { in performMulCombine()
4290 if (V.hasOneUse() || all_of(V->uses(), [](const SDNode *U) -> bool { in performMulCombine()
4291 return U->getOpcode() == ISD::MUL; in performMulCombine()
4301 SDValue MulVal = DAG.getNode(N->getOpcode(), DL, VT, N1, MulOper); in performMulCombine()
4302 return DAG.getNode(ISD::ADD, DL, VT, MulVal, N1); in performMulCombine()
4306 SDValue MulVal = DAG.getNode(N->getOpcode(), DL, VT, N0, MulOper); in performMulCombine()
4307 return DAG.getNode(ISD::ADD, DL, VT, MulVal, N0); in performMulCombine()
4311 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16)) in performMulCombine()
4316 // we can assume the high bits are whatever we want, use the underlying value in performMulCombine()
4317 // to avoid the unknown high bits from interfering. in performMulCombine()
4326 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { in performMulCombine()
4330 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { in performMulCombine()
4339 // for signed multiply of 8 and 16-bit types. in performMulCombine()
4340 return DAG.getSExtOrTrunc(Mul, DL, VT); in performMulCombine()
4346 if (N->getValueType(0) != MVT::i32) in performMulLoHiCombine()
4352 bool Signed = N->getOpcode() == ISD::SMUL_LOHI; in performMulLoHiCombine()
4353 SDValue N0 = N->getOperand(0); in performMulLoHiCombine()
4354 SDValue N1 = N->getOperand(1); in performMulLoHiCombine()
4358 // we can assume the high bits are whatever we want, use the underlying value in performMulLoHiCombine()
4359 // to avoid the unknown high bits from interfering. in performMulLoHiCombine()
4365 // Try to use two fast 24-bit multiplies (one for each half of the result) in performMulLoHiCombine()
4370 if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { in performMulLoHiCombine()
4377 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { in performMulLoHiCombine()
4395 EVT VT = N->getValueType(0); in performMulhsCombine() local
4397 if (!Subtarget->hasMulI24() || VT.isVector()) in performMulhsCombine()
4400 // Don't generate 24-bit multiplies on values that are in SGPRs, since in performMulhsCombine()
4401 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs in performMulhsCombine()
4406 if (Subtarget->hasSMulHi() && !N->isDivergent()) in performMulhsCombine()
4412 SDValue N0 = N->getOperand(0); in performMulhsCombine()
4413 SDValue N1 = N->getOperand(1); in performMulhsCombine()
4423 return DAG.getSExtOrTrunc(Mulhi, DL, VT); in performMulhsCombine()
4428 EVT VT = N->getValueType(0); in performMulhuCombine() local
4430 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32) in performMulhuCombine()
4433 // Don't generate 24-bit multiplies on values that are in SGPRs, since in performMulhuCombine()
4434 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs in performMulhuCombine()
4439 if (Subtarget->hasSMulHi() && !N->isDivergent()) in performMulhuCombine()
4445 SDValue N0 = N->getOperand(0); in performMulhuCombine()
4446 SDValue N1 = N->getOperand(1); in performMulhuCombine()
4456 return DAG.getZExtOrTrunc(Mulhi, DL, VT); in performMulhuCombine()
4463 EVT VT = Op.getValueType(); in getFFBX_U32() local
4464 EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT); in getFFBX_U32()
4465 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() && in getFFBX_U32()
4469 if (VT != MVT::i32) in getFFBX_U32()
4473 if (VT != MVT::i32) in getFFBX_U32()
4474 FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX); in getFFBX_U32()
4479 // The native instructions return -1 on 0 input. Optimize out a select that
4480 // produces -1 on 0.
4493 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); in performCtlz_CttzCombine()
4496 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x in performCtlz_CttzCombine()
4497 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x in performCtlz_CttzCombine()
4506 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x in performCtlz_CttzCombine()
4507 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x in performCtlz_CttzCombine()
4527 EVT VT = N1.getValueType(); in distributeOpThroughSelect() local
4529 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond, in distributeOpThroughSelect()
4532 return DAG.getNode(Op, SL, VT, NewSelect); in distributeOpThroughSelect()
4537 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
4538 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
4540 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
4541 // select c, (fabs x), +k -> fabs (select c, x, k)
4550 EVT VT = N.getValueType(); in foldFreeOpFromSelect() local
4572 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative. in foldFreeOpFromSelect()
4588 if (LHS.getOpcode() == ISD::FABS && CRHS->isNegative()) in foldFreeOpFromSelect()
4605 NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); in foldFreeOpFromSelect()
4610 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, in foldFreeOpFromSelect()
4613 return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect); in foldFreeOpFromSelect()
4625 SDValue Cond = N->getOperand(0); in performSelectCombine()
4629 EVT VT = N->getValueType(0); in performSelectCombine() local
4634 SDValue True = N->getOperand(1); in performSelectCombine()
4635 SDValue False = N->getOperand(2); in performSelectCombine()
4643 // select (setcc x, y), k, x -> select (setccinv x, y), x, k in performSelectCombine()
4647 getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), LHS.getValueType()); in performSelectCombine()
4650 return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True); in performSelectCombine()
4653 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) { in performSelectCombine()
4655 = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI); in performSelectCombine()
4680 if (C->isZero()) in getConstantNegateCost()
4681 return C->isNegative() ? NegatibleCost::Cheaper : NegatibleCost::Expensive; in getConstantNegateCost()
4683 if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF())) in getConstantNegateCost()
4684 return C->isNegative() ? NegatibleCost::Cheaper : NegatibleCost::Expensive; in getConstantNegateCost()
4748 SDValue N0 = N->getOperand(0); in performFNegCombine()
4749 EVT VT = N->getValueType(0); in performFNegCombine() local
4762 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y)) in performFNegCombine()
4767 LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS); in performFNegCombine()
4772 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); in performFNegCombine()
4776 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags()); in performFNegCombine()
4780 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); in performFNegCombine()
4785 // (fneg (fmul x, y)) -> (fmul x, (fneg y)) in performFNegCombine()
4786 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y)) in performFNegCombine()
4795 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); in performFNegCombine()
4797 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags()); in performFNegCombine()
4801 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); in performFNegCombine()
4810 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z)) in performFNegCombine()
4820 MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS); in performFNegCombine()
4823 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); in performFNegCombine()
4827 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS); in performFNegCombine()
4831 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); in performFNegCombine()
4842 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y) in performFNegCombine()
4843 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y) in performFNegCombine()
4844 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y) in performFNegCombine()
4845 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y) in performFNegCombine()
4855 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS); in performFNegCombine()
4856 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); in performFNegCombine()
4859 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags()); in performFNegCombine()
4863 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); in performFNegCombine()
4869 Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags()); in performFNegCombine()
4871 SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags()); in performFNegCombine()
4876 SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Res); in performFNegCombine()
4879 for (SDNode *U : Neg->uses()) in performFNegCombine()
4888 case ISD::FNEARBYINT: // XXX - Should fround be handled? in performFNegCombine()
4898 // (fneg (fp_extend (fneg x))) -> (fp_extend x) in performFNegCombine()
4899 // (fneg (rcp (fneg x))) -> (rcp x) in performFNegCombine()
4900 return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0)); in performFNegCombine()
4906 // (fneg (fp_extend x)) -> (fp_extend (fneg x)) in performFNegCombine()
4907 // (fneg (rcp x)) -> (rcp (fneg x)) in performFNegCombine()
4909 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags()); in performFNegCombine()
4915 // (fneg (fp_round (fneg x))) -> (fp_round x) in performFNegCombine()
4916 return DAG.getNode(ISD::FP_ROUND, SL, VT, in performFNegCombine()
4923 // (fneg (fp_round x)) -> (fp_round (fneg x)) in performFNegCombine()
4925 return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1)); in performFNegCombine()
4928 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal in performFNegCombine()
4936 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000) in performFNegCombine()
4939 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg); in performFNegCombine()
4942 // fneg (select c, a, b) -> select c, (fneg a), (fneg b) in performFNegCombine()
4950 SDValue HighBits = BCSrc.getOperand(BCSrc.getNumOperands() - 1); in performFNegCombine()
4955 // f64 fneg only really needs to operate on the high half of of the in performFNegCombine()
4960 // fneg (f64 (bitcast (build_vector x, y))) -> in performFNegCombine()
4969 SmallVector<SDValue, 8> Ops(BCSrc->op_begin(), BCSrc->op_end()); in performFNegCombine()
4974 SDValue Result = DAG.getNode(ISD::BITCAST, SL, VT, Build); in performFNegCombine()
4977 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Result)); in performFNegCombine()
4981 if (BCSrc.getOpcode() == ISD::SELECT && VT == MVT::f32 && in performFNegCombine()
4983 // fneg (bitcast (f32 (select cond, i32:lhs, i32:rhs))) -> in performFNegCombine()
5010 SDValue N0 = N->getOperand(0); in performFAbsCombine()
5017 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal"); in performFAbsCombine()
5022 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff) in performFAbsCombine()
5025 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs); in performFAbsCombine()
5034 const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); in performRcpCombine()
5038 // XXX - Should this flush denormals? in performRcpCombine()
5039 const APFloat &Val = CFP->getValueAPF(); in performRcpCombine()
5041 return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0)); in performRcpCombine()
5049 switch(N->getOpcode()) { in PerformDAGCombine()
5053 EVT DestVT = N->getValueType(0); in PerformDAGCombine()
5061 SDValue Src = N->getOperand(0); in PerformDAGCombine()
5088 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k) in PerformDAGCombine()
5090 SDValue Src = N->getOperand(0); in PerformDAGCombine()
5093 uint64_t CVal = C->getZExtValue(); in PerformDAGCombine()
5101 const APInt &Val = C->getValueAPF().bitcastToAPInt(); in PerformDAGCombine()
5159 assert(!N->getValueType(0).isVector() && in PerformDAGCombine()
5161 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2)); in PerformDAGCombine()
5165 uint32_t WidthVal = Width->getZExtValue() & 0x1f; in PerformDAGCombine()
5169 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); in PerformDAGCombine()
5173 SDValue BitsFrom = N->getOperand(0); in PerformDAGCombine()
5174 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f; in PerformDAGCombine()
5176 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32; in PerformDAGCombine()
5180 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal); in PerformDAGCombine()
5204 CVal->getSExtValue(), in PerformDAGCombine()
5211 CVal->getZExtValue(), in PerformDAGCombine()
5218 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) { in PerformDAGCombine()
5254 SDValue N0 = N->getOperand(0); in PerformDAGCombine()
5255 SDValue N1 = N->getOperand(1); in PerformDAGCombine()
5256 SDValue N2 = N->getOperand(2); in PerformDAGCombine()
5257 EVT VT = N->getValueType(0); in PerformDAGCombine() local
5268 return V.isNegative() ? -Zero : Zero; in PerformDAGCombine()
5273 APFloat V0 = FTZ(N0CFP->getValueAPF()); in PerformDAGCombine()
5274 APFloat V1 = FTZ(N1CFP->getValueAPF()); in PerformDAGCombine()
5275 APFloat V2 = FTZ(N2CFP->getValueAPF()); in PerformDAGCombine()
5279 return DAG.getConstantFP(FTZ(V0), DL, VT); in PerformDAGCombine()
5287 //===----------------------------------------------------------------------===//
5289 //===----------------------------------------------------------------------===//
5293 Register Reg, EVT VT, in CreateLiveInRegister() argument
5308 return DAG.getRegister(VReg, VT); in CreateLiveInRegister()
5310 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT); in CreateLiveInRegister()
5328 EVT VT, in loadStackInputValue() argument
5333 int FI = getOrCreateFixedStackObject(MFI, VT.getStoreSize(), Offset); in loadStackInputValue()
5338 return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, Align(4), in loadStackInputValue()
5355 DAG.getCopyFromReg(Chain, SL, Info->getStackPtrOffsetReg(), MVT::i32); in storeStackInputValue()
5364 EVT VT, const SDLoc &SL, in loadInputValue() argument
5369 CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) : in loadInputValue()
5370 loadStackInputValue(DAG, VT, SL, Arg.getStackOffset()); in loadInputValue()
5377 V = DAG.getNode(ISD::SRL, SL, VT, V, in loadInputValue()
5378 DAG.getShiftAmountConstant(Shift, VT, SL)); in loadInputValue()
5379 return DAG.getNode(ISD::AND, SL, VT, V, in loadInputValue()
5380 DAG.getConstant(Mask >> Shift, SL, VT)); in loadInputValue()
5385 unsigned ExplicitArgOffset = Subtarget->getExplicitKernelArgOffset(); in getImplicitParameterOffset()
5386 const Align Alignment = Subtarget->getAlignmentForImplicitArgPtr(); in getImplicitParameterOffset()
5405 return getImplicitParameterOffset(MFI->getExplicitKernArgSize(), Param); in getImplicitParameterOffset()
5582 EVT VT = Operand.getValueType(); in getSqrtEstimate() local
5584 if (VT == MVT::f32) { in getSqrtEstimate()
5586 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand); in getSqrtEstimate()
5598 EVT VT = Operand.getValueType(); in getRecipEstimate() local
5600 if (VT == MVT::f32) { in getRecipEstimate()
5607 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand); in getRecipEstimate()
5652 uint32_t Width = CWidth->getZExtValue() & 0x1f; in computeKnownBitsForTargetNode()
5655 Known.Zero = APInt::getHighBitsSet(32, 32 - Width); in computeKnownBitsForTargetNode()
5662 // High bits are zero. in computeKnownBitsForTargetNode()
5663 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16); in computeKnownBitsForTargetNode()
5687 unsigned SignBits = 32 - MaxValBits + 1; in computeKnownBitsForTargetNode()
5716 unsigned Sel = CMask->getZExtValue(); in computeKnownBitsForTargetNode()
5747 Align Alignment = GA->getGlobal()->getPointerAlignment(DAG.getDataLayout()); in computeKnownBitsForTargetNode()
5782 unsigned MaxValue = Subtarget->getMaxWorkitemID( in computeKnownBitsForTargetNode()
5803 unsigned SignBits = 32 - Width->getZExtValue() + 1; in ComputeNumSignBitsForTargetNode()
5807 // TODO: Could probably figure something out with non-0 offsets. in ComputeNumSignBitsForTargetNode()
5814 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1; in ComputeNumSignBitsForTargetNode()
5864 switch (MI->getOpcode()) { in computeNumSignBitsForTargetInstr()
5875 auto [Dst, Src0, Src1, Src2] = MI->getFirst4Regs(); in computeNumSignBitsForTargetInstr()
6013 switch (RMW->getOperation()) { in shouldExpandAtomicRMWInIR()
6021 const DataLayout &DL = RMW->getFunction()->getDataLayout(); in shouldExpandAtomicRMWInIR()
6022 unsigned ValSize = DL.getTypeSizeInBits(RMW->getType()); in shouldExpandAtomicRMWInIR()
6028 if (auto *IntTy = dyn_cast<IntegerType>(RMW->getType())) { in shouldExpandAtomicRMWInIR()
6029 unsigned Size = IntTy->getBitWidth(); in shouldExpandAtomicRMWInIR()
6046 for (auto &Op : I->operands()) { in shouldSinkOperands()
6048 if (any_of(Ops, [&](Use *U) { return U->get() == Op.get(); })) in shouldSinkOperands()