Lines Matching +full:fsin +full:- +full:output

1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
35 "amdgpu-bypass-slow-div",
36 cl::desc("Skip 64-bit divide for dynamic 32-bit values"),
56 // In order for this to be a signed 24-bit value, bit 23, must in numBitsSigned()
181 // There are no 64-bit extloads. These should be done as a 32-bit extload and in AMDGPUTargetLowering()
182 // an extension to 64-bit. in AMDGPUTargetLowering()
411 if (Subtarget->has16BitInsts()) in AMDGPUTargetLowering()
478 // The hardware supports 32-bit FSHR, but not FSHL. in AMDGPUTargetLowering()
481 // The hardware supports 32-bit ROTR, but not ROTL. in AMDGPUTargetLowering()
537 ISD::FSQRT, ISD::FSIN, ISD::FSUB, in AMDGPUTargetLowering()
609 // The expansion for 64-bit division is enormous. in AMDGPUTargetLowering()
633 const auto Flags = Op.getNode()->getFlags(); in mayIgnoreSignedZero()
640 //===----------------------------------------------------------------------===//
642 //===----------------------------------------------------------------------===//
659 case ISD::FSIN: in fnegFoldsIntoOpcode()
683 unsigned Opc = N->getOpcode(); in fnegFoldsIntoOp()
687 SDValue BCSrc = N->getOperand(0); in fnegFoldsIntoOp()
699 /// \p returns true if the operation will definitely need to use a 64-bit
704 return (N->getNumOperands() > 2 && N->getOpcode() != ISD::SELECT) || in opMustUseVOP3Encoding()
713 return N->getValueType(0) == MVT::f32; in selectSupportsSourceMods()
723 switch (N->getOpcode()) { in hasSourceMods()
738 switch (N->getConstantOperandVal(0)) { in hasSourceMods()
758 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus in allUsesHaveSourceMods()
764 MVT VT = N->getValueType(0).getScalarType().getSimpleVT(); in allUsesHaveSourceMods()
766 assert(!N->use_empty()); in allUsesHaveSourceMods()
768 // XXX - Should this limit number of uses to check? in allUsesHaveSourceMods()
769 for (const SDNode *U : N->uses()) { in allUsesHaveSourceMods()
786 // Round to the next multiple of 32-bits. in getTypeForExtReturn()
807 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts())); in isFPImmLegal()
825 // If we are reducing to a 32-bit load or a smaller multi-dword load, in shouldReduceLoadWidth()
830 EVT OldVT = N->getValueType(0); in shouldReduceLoadWidth()
834 unsigned AS = MN->getAddressSpace(); in shouldReduceLoadWidth()
835 // Do not shrink an aligned scalar load to sub-dword. in shouldReduceLoadWidth()
836 // Scalar engine cannot do sub-dword loads. in shouldReduceLoadWidth()
837 // TODO: Update this for GFX12 which does have scalar sub-dword loads. in shouldReduceLoadWidth()
838 if (OldSize >= 32 && NewSize < 32 && MN->getAlign() >= Align(4) && in shouldReduceLoadWidth()
842 MN->isInvariant())) && in shouldReduceLoadWidth()
843 AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand())) in shouldReduceLoadWidth()
846 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar in shouldReduceLoadWidth()
877 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
878 // profitable with the expansion for 64-bit since it's generally good to
889 switch (N->getOpcode()) { in isSDNodeAlwaysUniform()
894 unsigned IntrID = N->getConstantOperandVal(0); in isSDNodeAlwaysUniform()
898 if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() == in isSDNodeAlwaysUniform()
902 case AMDGPUISD::SETCC: // ballot-style instruction in isSDNodeAlwaysUniform()
928 return DAG.getNode(AMDGPUISD::RCP, SL, VT, NegSrc, Op->getFlags()); in getNegatedExpression()
939 //===---------------------------------------------------------------------===//
941 //===---------------------------------------------------------------------===//
948 (Subtarget->has16BitInsts() && (VT == MVT::f16 || VT == MVT::bf16)); in isFAbsFree()
988 unsigned SrcSize = Source->getScalarSizeInBits(); in isTruncateFree()
989 unsigned DestSize = Dest->getScalarSizeInBits(); in isTruncateFree()
991 if (DestSize== 16 && Subtarget->has16BitInsts()) in isTruncateFree()
998 unsigned SrcSize = Src->getScalarSizeInBits(); in isZExtFree()
999 unsigned DestSize = Dest->getScalarSizeInBits(); in isZExtFree()
1001 if (SrcSize == 16 && Subtarget->has16BitInsts()) in isZExtFree()
1008 // Any register load of a 64-bit value really requires 2 32-bit moves. For all in isZExtFree()
1009 // practical purposes, the extra mov 0 to load a 64-bit is free. As used, in isZExtFree()
1010 // this will enable reducing 64-bit operations the 32-bit, which is always in isZExtFree()
1020 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a in isNarrowingProfitable()
1021 // limited number of native 64-bit operations. Shrinking an operation to fit in isNarrowingProfitable()
1022 // in a single 32-bit register should always be helpful. As currently used, in isNarrowingProfitable()
1024 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is in isNarrowingProfitable()
1031 assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA || in isDesirableToCommuteWithShift()
1032 N->getOpcode() == ISD::SRL) && in isDesirableToCommuteWithShift()
1034 // Always commute pre-type legalization and right shifts. in isDesirableToCommuteWithShift()
1037 N->getOpcode() != ISD::SHL || N->getOperand(0).getOpcode() != ISD::OR) in isDesirableToCommuteWithShift()
1040 // If only user is a i32 right-shift, then don't destroy a BFE pattern. in isDesirableToCommuteWithShift()
1041 if (N->getValueType(0) == MVT::i32 && N->use_size() == 1 && in isDesirableToCommuteWithShift()
1042 (N->use_begin()->getOpcode() == ISD::SRA || in isDesirableToCommuteWithShift()
1043 N->use_begin()->getOpcode() == ISD::SRL)) in isDesirableToCommuteWithShift()
1053 return LHS0 && LHS1 && RHSLd && LHS0->getExtensionType() == ISD::ZEXTLOAD && in isDesirableToCommuteWithShift()
1054 LHS1->getAPIntValue() == LHS0->getMemoryVT().getScalarSizeInBits() && in isDesirableToCommuteWithShift()
1055 RHSLd->getExtensionType() == ISD::ZEXTLOAD; in isDesirableToCommuteWithShift()
1057 SDValue LHS = N->getOperand(0).getOperand(0); in isDesirableToCommuteWithShift()
1058 SDValue RHS = N->getOperand(0).getOperand(1); in isDesirableToCommuteWithShift()
1062 //===---------------------------------------------------------------------===//
1064 //===---------------------------------------------------------------------===//
1151 LLVMContext &Ctx = Fn.getParent()->getContext(); in analyzeFormalArgumentsCompute()
1175 // to get accurate in-memory offsets. The "PartOffset" is completely useless in analyzeFormalArgumentsCompute()
1208 // all the floating-point vector types. in analyzeFormalArgumentsCompute()
1276 //===---------------------------------------------------------------------===//
1278 //===---------------------------------------------------------------------===//
1297 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1; in addTokenForArgument()
1305 for (SDNode *U : DAG.getEntryNode().getNode()->uses()) { in addTokenForArgument()
1307 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) { in addTokenForArgument()
1308 if (FI->getIndex() < 0) { in addTokenForArgument()
1309 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex()); in addTokenForArgument()
1311 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1; in addTokenForArgument()
1336 FuncName = G->getSymbol(); in lowerUnhandledCall()
1338 FuncName = G->getGlobal()->getName(); in lowerUnhandledCall()
1342 DAG.getContext()->diagnose(NoCalls); in lowerUnhandledCall()
1363 DAG.getContext()->diagnose(NoDynamicAlloca); in LowerDYNAMIC_STACKALLOC()
1372 Op->print(errs(), &DAG); in LowerOperation()
1419 switch (N->getOpcode()) { in ReplaceNodeResults()
1462 const GlobalValue *GV = G->getGlobal(); in LowerGlobalAddress()
1464 if (!MFI->isModuleEntryFunction()) { in LowerGlobalAddress()
1471 if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || in LowerGlobalAddress()
1472 G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) { in LowerGlobalAddress()
1473 if (!MFI->isModuleEntryFunction() && in LowerGlobalAddress()
1474 GV->getName() != "llvm.amdgcn.module.lds") { in LowerGlobalAddress()
1478 Fn, "local memory global used by non-kernel function", in LowerGlobalAddress()
1480 DAG.getContext()->diagnose(BadLDSDecl); in LowerGlobalAddress()
1494 // XXX: What does the value of G->getOffset() mean? in LowerGlobalAddress()
1495 assert(G->getOffset() == 0 && in LowerGlobalAddress()
1496 "Do not know what to do with an non-zero offset"); in LowerGlobalAddress()
1501 unsigned Offset = MFI->allocateLDSGlobal(DL, *cast<GlobalVariable>(GV)); in LowerGlobalAddress()
1520 for (const SDUse &U : Op->ops()) { in LowerCONCAT_VECTORS()
1536 for (const SDUse &U : Op->ops()) in LowerCONCAT_VECTORS()
1555 // Extract 32-bit registers at a time. in LowerEXTRACT_SUBVECTOR()
1599 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); in combineFMinMaxLegacyImpl()
1684 // select (fcmp olt (lhs, K)), (fneg lhs), -K in combineFMinMaxLegacy()
1685 // -> fneg (fmin_legacy lhs, K) in combineFMinMaxLegacy()
1689 APFloat NegRHS = neg(CRHS->getValueAPF()); in combineFMinMaxLegacy()
1690 if (NegRHS == CFalse->getValueAPF()) { in combineFMinMaxLegacy()
1735 // otherwise be a 1-vector.
1743 HiVT = NumElts - LoNumElts == 1 in getSplitDestVTs()
1745 : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts); in getSplitDestVTs()
1782 SDValue BasePtr = Load->getBasePtr(); in SplitVectorLoad()
1783 EVT MemVT = Load->getMemoryVT(); in SplitVectorLoad()
1785 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); in SplitVectorLoad()
1796 Align BaseAlign = Load->getAlign(); in SplitVectorLoad()
1799 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT, in SplitVectorLoad()
1800 Load->getChain(), BasePtr, SrcValue, LoMemVT, in SplitVectorLoad()
1801 BaseAlign, Load->getMemOperand()->getFlags()); in SplitVectorLoad()
1804 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(), in SplitVectorLoad()
1806 HiMemVT, HiAlign, Load->getMemOperand()->getFlags()); in SplitVectorLoad()
1831 SDValue BasePtr = Load->getBasePtr(); in WidenOrSplitVectorLoad()
1832 EVT MemVT = Load->getMemoryVT(); in WidenOrSplitVectorLoad()
1834 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); in WidenOrSplitVectorLoad()
1835 Align BaseAlign = Load->getAlign(); in WidenOrSplitVectorLoad()
1838 // Widen from vec3 to vec4 when the load is at least 8-byte aligned in WidenOrSplitVectorLoad()
1839 // or 16-byte fully dereferenceable. Otherwise, split the vector load. in WidenOrSplitVectorLoad()
1852 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue, in WidenOrSplitVectorLoad()
1853 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags()); in WidenOrSplitVectorLoad()
1864 SDValue Val = Store->getValue(); in SplitVectorStore()
1872 EVT MemVT = Store->getMemoryVT(); in SplitVectorStore()
1873 SDValue Chain = Store->getChain(); in SplitVectorStore()
1874 SDValue BasePtr = Store->getBasePtr(); in SplitVectorStore()
1887 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo(); in SplitVectorStore()
1888 Align BaseAlign = Store->getAlign(); in SplitVectorStore()
1894 Store->getMemOperand()->getFlags()); in SplitVectorStore()
1897 HiMemVT, HiAlign, Store->getMemOperand()->getFlags()); in SplitVectorStore()
1902 // This is a shortcut for integer division because we have fast i32<->f32
1904 // float is enough to accurately represent up to a 24-bit signed integer.
1924 unsigned DivBits = BitSize - SignBits; in LowerDIVREM24()
1937 // jq = jq >> (bitsize - 2) in LowerDIVREM24()
1939 DAG.getConstant(BitSize - 2, DL, VT)); in LowerDIVREM24()
1963 // float fqneg = -fq; in LowerDIVREM24()
1969 if (Subtarget->isGCN()) { in LowerDIVREM24()
1972 MFI->getMode().FP32Denormals != DenormalMode::getPreserveSign(); in LowerDIVREM24()
1976 unsigned OpCode = !Subtarget->hasMadMacF32Insts() ? (unsigned)ISD::FMA in LowerDIVREM24()
2012 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT); in LowerDIVREM24()
2065 !Subtarget->hasMadMacF32Insts() ? (unsigned)ISD::FMA in LowerUDIVREM64()
2066 : MFI->getMode().FP32Denormals == DenormalMode::getPreserveSign() in LowerUDIVREM64()
2094 // First round of UNR (Unsigned integer Newton-Raphson). in LowerUDIVREM64()
2206 const unsigned bitPos = halfBitWidth - i - 1; in LowerUDIVREM64()
2298 SDValue NegOne = DAG.getConstant(-1, DL, VT); in LowerSDIVREM()
2349 // (frem x, y) -> (fma (fneg (ftrunc (fdiv x, y))), y, x)
2353 auto Flags = Op->getFlags(); in LowerFREM()
2385 // TODO: Should this propagate fast-math-flags? in LowerFCEIL()
2396 DAG.getConstant(FractBits - 32, SL, MVT::i32), in extractF64Exponent()
2424 // Extend back to 64-bits. in LowerFTRUNC()
2430 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64); in LowerFTRUNC()
2439 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32); in LowerFTRUNC()
2461 // TODO: Should this propagate fast-math-flags? in LowerFROUNDEVEN()
2493 // XXX - May require not supporting f32 denormals?
2505 // TODO: Should this propagate fast-math-flags? in LowerFROUND()
2531 // result += -1.0. in LowerFFLOOR()
2536 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64); in LowerFFLOOR()
2546 // TODO: Should this propagate fast-math-flags? in LowerFFLOOR()
2654 // log2 = amdgpu_log2 - (is_denormal ? 32.0 : 0.0) in LowerFLOG2()
2659 SDNodeFlags Flags = Op->getFlags(); in LowerFLOG2()
2663 assert(!Subtarget->has16BitInsts()); in LowerFLOG2()
2694 SDNodeFlags Flags = Op->getFlags(); in LowerFLOGCommon()
2704 if (VT == MVT::f16 && !Subtarget->has16BitInsts()) { in LowerFLOGCommon()
2710 if (VT == MVT::f16 && !Subtarget->has16BitInsts()) { in LowerFLOGCommon()
2725 if (Subtarget->hasFastFMAF32()) { in LowerFLOGCommon()
2727 const float c_log10 = 0x1.344134p-2f; in LowerFLOGCommon()
2728 const float cc_log10 = 0x1.09f79ep-26f; in LowerFLOGCommon()
2731 const float c_log = 0x1.62e42ep-1f; in LowerFLOGCommon()
2732 const float cc_log = 0x1.efa39ep-25f; in LowerFLOGCommon()
2744 const float ch_log10 = 0x1.344000p-2f; in LowerFLOGCommon()
2745 const float ct_log10 = 0x1.3509f6p-18f; in LowerFLOGCommon()
2748 const float ch_log = 0x1.62e000p-1f; in LowerFLOGCommon()
2749 const float ct_log = 0x1.0bfbe8p-15f; in LowerFLOGCommon()
2808 DAG.getConstantFP(-32.0 * Log2BaseInverted, SL, VT); in LowerFLOGUnsafe()
2817 if (Subtarget->hasFastFMAF32()) in LowerFLOGUnsafe()
2839 SDNodeFlags Flags = Op->getFlags(); in lowerFEXP2()
2843 assert(!Subtarget->has16BitInsts()); in lowerFEXP2()
2855 // bool needs_scaling = x < -0x1.f80000p+6f; in lowerFEXP2()
2856 // v_exp_f32(x + (s ? 0x1.0p+6f : 0.0f)) * (s ? 0x1.0p-64f : 1.0f); in lowerFEXP2()
2858 // -nextafter(128.0, -1) in lowerFEXP2()
2859 SDValue RangeCheckConst = DAG.getConstantFP(-0x1.f80000p+6f, SL, VT); in lowerFEXP2()
2875 SDValue TwoExpNeg64 = DAG.getConstantFP(0x1.0p-64f, SL, VT); in lowerFEXP2()
2899 SDValue Threshold = DAG.getConstantFP(-0x1.5d58a0p+6f, SL, VT); in lowerFEXPUnsafe()
2913 SDValue ResultScaleFactor = DAG.getConstantFP(0x1.969d48p-93f, SL, VT); in lowerFEXPUnsafe()
2921 /// Emit approx-funcs appropriate lowering for exp10. inf/nan should still be
2930 // exp2(x * 0x1.a92000p+1f) * exp2(x * 0x1.4f0978p-11f); in lowerFEXP10Unsafe()
2932 SDValue K1 = DAG.getConstantFP(0x1.4f0978p-11f, SL, VT); in lowerFEXP10Unsafe()
2941 // bool s = x < -0x1.2f7030p+5f; in lowerFEXP10Unsafe()
2944 // exp2(x * 0x1.4f0978p-11f) * in lowerFEXP10Unsafe()
2945 // (s ? 0x1.9f623ep-107f : 1.0f); in lowerFEXP10Unsafe()
2949 SDValue Threshold = DAG.getConstantFP(-0x1.2f7030p+5f, SL, VT); in lowerFEXP10Unsafe()
2958 SDValue K1 = DAG.getConstantFP(0x1.4f0978p-11f, SL, VT); in lowerFEXP10Unsafe()
2967 SDValue ResultScaleFactor = DAG.getConstantFP(0x1.9f623ep-107f, SL, VT); in lowerFEXP10Unsafe()
2979 SDNodeFlags Flags = Op->getFlags(); in lowerFEXP()
2990 // exp(f16 x) -> in lowerFEXP()
3003 // library behavior. Also, is known-not-daz source sufficient? in lowerFEXP()
3020 // f = x*(64/ln(2)) - n in lowerFEXP()
3021 // r = f*(ln(2)/64) = x - n*(ln(2)/64) in lowerFEXP()
3037 if (Subtarget->hasFastFMAF32()) { in lowerFEXP()
3039 const float cc_exp = 0x1.4ae0bep-26f; // c+cc are 49 bits in lowerFEXP()
3041 const float cc_exp10 = 0x1.2f346ep-24f; in lowerFEXP()
3052 const float cl_exp = 0x1.47652ap-12f; // ch + cl are 36 bits in lowerFEXP()
3055 const float cl_exp10 = 0x1.4f0978p-11f; in lowerFEXP()
3085 DAG.getConstantFP(IsExp10 ? -0x1.66d3e8p+5f : -0x1.9d1da0p+6f, SL, VT); in lowerFEXP()
3130 SDValue NumExtBits = DAG.getConstant(32u - NumBits, SL, MVT::i32); in lowerCTLZResults()
3156 bool Is64BitScalar = !Src->isDivergent() && Src.getValueType() == MVT::i64; in LowerCTLZ_CTTZ()
3159 // (ctlz hi:lo) -> (umin (ffbh src), 32) in LowerCTLZ_CTTZ()
3160 // (cttz hi:lo) -> (umin (ffbl src), 32) in LowerCTLZ_CTTZ()
3161 // (ctlz_zero_undef src) -> (ffbh src) in LowerCTLZ_CTTZ()
3162 // (cttz_zero_undef src) -> (ffbl src) in LowerCTLZ_CTTZ()
3164 // 64-bit scalar version produce 32-bit result in LowerCTLZ_CTTZ()
3165 // (ctlz hi:lo) -> (umin (S_FLBIT_I32_B64 src), 64) in LowerCTLZ_CTTZ()
3166 // (cttz hi:lo) -> (umin (S_FF1_I32_B64 src), 64) in LowerCTLZ_CTTZ()
3167 // (ctlz_zero_undef src) -> (S_FLBIT_I32_B64 src) in LowerCTLZ_CTTZ()
3168 // (cttz_zero_undef src) -> (S_FF1_I32_B64 src) in LowerCTLZ_CTTZ()
3184 // (ctlz hi:lo) -> (umin3 (ffbh hi), (uaddsat (ffbh lo), 32), 64) in LowerCTLZ_CTTZ()
3185 // (cttz hi:lo) -> (umin3 (uaddsat (ffbl hi), 32), (ffbl lo), 64) in LowerCTLZ_CTTZ()
3186 // (ctlz_zero_undef hi:lo) -> (umin (ffbh hi), (add (ffbh lo), 32)) in LowerCTLZ_CTTZ()
3187 // (cttz_zero_undef hi:lo) -> (umin (add (ffbl hi), 32), (ffbl lo)) in LowerCTLZ_CTTZ()
3208 // The regular method converting a 64-bit integer to float roughly consists of in LowerINT_TO_FP32()
3210 // conversion from a 64-bit integer to a float is essentially the same as the in LowerINT_TO_FP32()
3211 // one from a 32-bit integer. The only difference is that it has more in LowerINT_TO_FP32()
3212 // trailing bits to be rounded. To leverage the native 32-bit conversion, a in LowerINT_TO_FP32()
3213 // 64-bit integer could be preprocessed and fit into a 32-bit integer then in LowerINT_TO_FP32()
3221 // // reduced to a 32-bit one automatically. in LowerINT_TO_FP32()
3226 // // convert it as a 32-bit integer and scale the result back. in LowerINT_TO_FP32()
3227 // return uitofp(hi) * 2^(32 - shamt); in LowerINT_TO_FP32()
3241 if (Signed && Subtarget->isGCN()) { in LowerINT_TO_FP32()
3243 // i.e. Hi is 0 or -1. However, that only needs to take the MSB into in LowerINT_TO_FP32()
3245 // - 32 if Lo and Hi have opposite signs; in LowerINT_TO_FP32()
3246 // - 33 if Lo and Hi have the same sign. in LowerINT_TO_FP32()
3251 // - -1 if Lo and Hi have opposite signs; and in LowerINT_TO_FP32()
3252 // - 0 otherwise. in LowerINT_TO_FP32()
3256 // umin(sffbh(Hi), 33 + (Lo^Hi)>>31) - 1. in LowerINT_TO_FP32()
3260 // umin(sffbh(Hi) - 1, 32 + (Lo^Hi)>>31). in LowerINT_TO_FP32()
3291 // Normalize the given 64-bit integer. in LowerINT_TO_FP32()
3299 // Get the 32-bit normalized integer. in LowerINT_TO_FP32()
3301 // Convert the normalized 32-bit integer into f32. in LowerINT_TO_FP32()
3303 (Signed && Subtarget->isGCN()) ? ISD::SINT_TO_FP : ISD::UINT_TO_FP; in LowerINT_TO_FP32()
3307 // 64-bit integer is converted as a 32-bit one. in LowerINT_TO_FP32()
3311 if (Subtarget->isGCN()) in LowerINT_TO_FP32()
3315 // part directly to emulate the multiplication of 2^ShAmt. That 8-bit in LowerINT_TO_FP32()
3347 // TODO: Should this propagate fast-math-flags? in LowerINT_TO_FP64()
3378 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { in LowerUINT_TO_FP()
3426 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { in LowerSINT_TO_FP()
3455 // The basic idea of converting a floating point number into a pair of 32-bit in LowerFP_TO_INT64()
3459 // hif := floor(tf * 2^-32); in LowerFP_TO_INT64()
3460 // lof := tf - hif * 2^32; // lof is always positive due to floor. in LowerFP_TO_INT64()
3467 // However, a 32-bit floating point number has only 23 bits mantissa and in LowerFP_TO_INT64()
3481 llvm::bit_cast<double>(UINT64_C(/*2^-32*/ 0x3df0000000000000)), SL, in LowerFP_TO_INT64()
3484 llvm::bit_cast<double>(UINT64_C(/*-2^32*/ 0xc1f0000000000000)), SL, in LowerFP_TO_INT64()
3488 llvm::bit_cast<float>(UINT32_C(/*2^-32*/ 0x2f800000)), SL, SrcVT); in LowerFP_TO_INT64()
3490 llvm::bit_cast<float>(UINT32_C(/*-2^32*/ 0xcf800000)), SL, SrcVT); in LowerFP_TO_INT64()
3492 // TODO: Should this propagate fast-math-flags? in LowerFP_TO_INT64()
3512 // r := xor(r, sign) - sign; in LowerFP_TO_INT64()
3536 // f64 -> f16 conversion using round-to-nearest-even rounding mode. in LowerFP_TO_FP16()
3554 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32)); in LowerFP_TO_FP16()
3578 // B = clamp(1-E, 0, 13); in LowerFP_TO_FP16()
3666 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); in LowerSIGN_EXTEND_INREG()
3687 //===----------------------------------------------------------------------===//
3689 //===----------------------------------------------------------------------===//
3697 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated in isI24()
3698 // as unsigned 24-bit values. in isI24()
3706 bool IsIntrin = Node24->getOpcode() == ISD::INTRINSIC_WO_CHAIN; in simplifyMul24()
3708 SDValue LHS = IsIntrin ? Node24->getOperand(1) : Node24->getOperand(0); in simplifyMul24()
3709 SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1); in simplifyMul24()
3710 unsigned NewOpcode = Node24->getOpcode(); in simplifyMul24()
3712 unsigned IID = Node24->getConstantOperandVal(0); in simplifyMul24()
3727 llvm_unreachable("Expected 24-bit mul intrinsic"); in simplifyMul24()
3739 return DAG.getNode(NewOpcode, SDLoc(Node24), Node24->getVTList(), in simplifyMul24()
3757 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width); in constantFoldBFE()
3758 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width); in constantFoldBFE()
3766 for (SDNode *U : Val->uses()) { in hasVolatileUser()
3768 if (M->isVolatile()) in hasVolatileUser()
3803 if (!LN->isSimple() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN)) in performLoadCombine()
3808 EVT VT = LN->getMemoryVT(); in performLoadCombine()
3811 Align Alignment = LN->getAlign(); in performLoadCombine()
3814 unsigned AS = LN->getAddressSpace(); in performLoadCombine()
3820 VT, AS, Alignment, LN->getMemOperand()->getFlags(), &IsFast)) { in performLoadCombine()
3840 = DAG.getLoad(NewVT, SL, LN->getChain(), in performLoadCombine()
3841 LN->getBasePtr(), LN->getMemOperand()); in performLoadCombine()
3856 if (!SN->isSimple() || !ISD::isNormalStore(SN)) in performStoreCombine()
3859 EVT VT = SN->getMemoryVT(); in performStoreCombine()
3864 Align Alignment = SN->getAlign(); in performStoreCombine()
3867 unsigned AS = SN->getAddressSpace(); in performStoreCombine()
3874 VT, AS, Alignment, SN->getMemOperand()->getFlags(), &IsFast)) { in performStoreCombine()
3889 SDValue Val = SN->getValue(); in performStoreCombine()
3900 return DAG.getStore(SN->getChain(), SL, CastVal, in performStoreCombine()
3901 SN->getBasePtr(), SN->getMemOperand()); in performStoreCombine()
3910 SDValue N0 = N->getOperand(0); in performAssertSZExtCombine()
3912 // (vt2 (assertzext (truncate vt0:x), vt1)) -> in performAssertSZExtCombine()
3915 SDValue N1 = N->getOperand(1); in performAssertSZExtCombine()
3916 EVT ExtVT = cast<VTSDNode>(N1)->getVT(); in performAssertSZExtCombine()
3922 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1); in performAssertSZExtCombine()
3923 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg); in performAssertSZExtCombine()
3932 unsigned IID = N->getConstantOperandVal(0); in performIntrinsicWOChainCombine()
3945 SDValue Src = N->getOperand(1); in performIntrinsicWOChainCombine()
3949 // frexp_exp (fneg x) -> frexp_exp x in performIntrinsicWOChainCombine()
3950 // frexp_exp (fabs x) -> frexp_exp x in performIntrinsicWOChainCombine()
3951 // frexp_exp (fneg (fabs x)) -> frexp_exp x in performIntrinsicWOChainCombine()
3952 SDValue Src = N->getOperand(1); in performIntrinsicWOChainCombine()
3956 return SDValue(DCI.DAG.UpdateNodeOperands(N, N->getOperand(0), PeekSign), in performIntrinsicWOChainCombine()
3964 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3980 // Re-visit the ands. It's possible we eliminated one of them and it could in splitBinaryBitConstantOpImpl()
3991 EVT VT = N->getValueType(0); in performShlCombine()
3993 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); in performShlCombine()
3997 SDValue LHS = N->getOperand(0); in performShlCombine()
3998 unsigned RHSVal = RHS->getZExtValue(); in performShlCombine()
4005 switch (LHS->getOpcode()) { in performShlCombine()
4011 SDValue X = LHS->getOperand(0); in performShlCombine()
4016 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x in performShlCombine()
4018 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) }); in performShlCombine()
4038 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32)) in performShlCombine()
4040 // On some subtargets, 64-bit shift is a quarter rate instruction. In the in performShlCombine()
4041 // common case, splitting this into a move and a 32-bit shift is faster and in performShlCombine()
4046 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32); in performShlCombine()
4059 if (N->getValueType(0) != MVT::i64) in performSraCombine()
4062 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); in performSraCombine()
4068 unsigned RHSVal = RHS->getZExtValue(); in performSraCombine()
4070 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31) in performSraCombine()
4072 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); in performSraCombine()
4080 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31) in performSraCombine()
4082 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); in performSraCombine()
4094 auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); in performSrlCombine()
4098 EVT VT = N->getValueType(0); in performSrlCombine()
4099 SDValue LHS = N->getOperand(0); in performSrlCombine()
4100 unsigned ShiftAmt = RHS->getZExtValue(); in performSrlCombine()
4104 // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1) in performSrlCombine()
4109 if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) && in performSrlCombine()
4113 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)), in performSrlCombine()
4114 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1))); in performSrlCombine()
4127 // build_pair (srl hi_32(x), C - 32), 0 in performSrlCombine()
4132 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32); in performSrlCombine()
4144 EVT VT = N->getValueType(0); in performTruncateCombine()
4145 SDValue Src = N->getOperand(0); in performTruncateCombine()
4147 // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x) in performTruncateCombine()
4166 // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y) in performTruncateCombine()
4169 if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) { in performTruncateCombine()
4186 // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit. in performTruncateCombine()
4188 // i16 (trunc (srl i64:x, K)), K <= 16 -> in performTruncateCombine()
4199 // - For left shifts, do the transform as long as the shift in performTruncateCombine()
4201 // - For right shift, do it if ShiftAmt <= (32 - Size) to avoid in performTruncateCombine()
4204 (Src.getOpcode() == ISD::SHL) ? 31 : (32 - VT.getScalarSizeInBits()); in performTruncateCombine()
4253 if (V->getOpcode() != ISD::ADD) in getAddOneOp()
4256 return isOneConstant(V->getOperand(1)) ? V->getOperand(0) : SDValue(); in getAddOneOp()
4261 assert(N->getOpcode() == ISD::MUL); in performMulCombine()
4262 EVT VT = N->getValueType(0); in performMulCombine()
4264 // Don't generate 24-bit multiplies on values that are in SGPRs, since in performMulCombine()
4265 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs in performMulCombine()
4268 if (!N->isDivergent()) in performMulCombine()
4278 SDValue N0 = N->getOperand(0); in performMulCombine()
4279 SDValue N1 = N->getOperand(1); in performMulCombine()
4281 // Undo InstCombine canonicalize X * (Y + 1) -> X * Y + X to enable mad in performMulCombine()
4284 // mul x, (add y, 1) -> add (mul x, y), x in performMulCombine()
4285 auto IsFoldableAdd = [](SDValue V) -> SDValue { in performMulCombine()
4290 if (V.hasOneUse() || all_of(V->uses(), [](const SDNode *U) -> bool { in performMulCombine()
4291 return U->getOpcode() == ISD::MUL; in performMulCombine()
4301 SDValue MulVal = DAG.getNode(N->getOpcode(), DL, VT, N1, MulOper); in performMulCombine()
4306 SDValue MulVal = DAG.getNode(N->getOpcode(), DL, VT, N0, MulOper); in performMulCombine()
4311 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16)) in performMulCombine()
4326 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { in performMulCombine()
4330 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { in performMulCombine()
4339 // for signed multiply of 8 and 16-bit types. in performMulCombine()
4346 if (N->getValueType(0) != MVT::i32) in performMulLoHiCombine()
4352 bool Signed = N->getOpcode() == ISD::SMUL_LOHI; in performMulLoHiCombine()
4353 SDValue N0 = N->getOperand(0); in performMulLoHiCombine()
4354 SDValue N1 = N->getOperand(1); in performMulLoHiCombine()
4365 // Try to use two fast 24-bit multiplies (one for each half of the result) in performMulLoHiCombine()
4370 if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { in performMulLoHiCombine()
4377 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { in performMulLoHiCombine()
4395 EVT VT = N->getValueType(0); in performMulhsCombine()
4397 if (!Subtarget->hasMulI24() || VT.isVector()) in performMulhsCombine()
4400 // Don't generate 24-bit multiplies on values that are in SGPRs, since in performMulhsCombine()
4401 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs in performMulhsCombine()
4406 if (Subtarget->hasSMulHi() && !N->isDivergent()) in performMulhsCombine()
4412 SDValue N0 = N->getOperand(0); in performMulhsCombine()
4413 SDValue N1 = N->getOperand(1); in performMulhsCombine()
4428 EVT VT = N->getValueType(0); in performMulhuCombine()
4430 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32) in performMulhuCombine()
4433 // Don't generate 24-bit multiplies on values that are in SGPRs, since in performMulhuCombine()
4434 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs in performMulhuCombine()
4439 if (Subtarget->hasSMulHi() && !N->isDivergent()) in performMulhuCombine()
4445 SDValue N0 = N->getOperand(0); in performMulhuCombine()
4446 SDValue N1 = N->getOperand(1); in performMulhuCombine()
4465 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() && in getFFBX_U32()
4479 // The native instructions return -1 on 0 input. Optimize out a select that
4480 // produces -1 on 0.
4482 // TODO: If zero is not undef, we could also do this if the output is compared
4493 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); in performCtlz_CttzCombine()
4496 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x in performCtlz_CttzCombine()
4497 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x in performCtlz_CttzCombine()
4506 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x in performCtlz_CttzCombine()
4507 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x in performCtlz_CttzCombine()
4537 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
4538 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
4540 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
4541 // select c, (fabs x), +k -> fabs (select c, x, k)
4572 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative. in foldFreeOpFromSelect()
4588 if (LHS.getOpcode() == ISD::FABS && CRHS->isNegative()) in foldFreeOpFromSelect()
4625 SDValue Cond = N->getOperand(0); in performSelectCombine()
4629 EVT VT = N->getValueType(0); in performSelectCombine()
4634 SDValue True = N->getOperand(1); in performSelectCombine()
4635 SDValue False = N->getOperand(2); in performSelectCombine()
4643 // select (setcc x, y), k, x -> select (setccinv x, y), x, k in performSelectCombine()
4647 getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), LHS.getValueType()); in performSelectCombine()
4653 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) { in performSelectCombine()
4680 if (C->isZero()) in getConstantNegateCost()
4681 return C->isNegative() ? NegatibleCost::Cheaper : NegatibleCost::Expensive; in getConstantNegateCost()
4683 if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF())) in getConstantNegateCost()
4684 return C->isNegative() ? NegatibleCost::Cheaper : NegatibleCost::Expensive; in getConstantNegateCost()
4748 SDValue N0 = N->getOperand(0); in performFNegCombine()
4749 EVT VT = N->getValueType(0); in performFNegCombine()
4762 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y)) in performFNegCombine()
4776 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags()); in performFNegCombine()
4785 // (fneg (fmul x, y)) -> (fmul x, (fneg y)) in performFNegCombine()
4786 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y)) in performFNegCombine()
4797 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags()); in performFNegCombine()
4810 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z)) in performFNegCombine()
4842 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y) in performFNegCombine()
4843 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y) in performFNegCombine()
4844 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y) in performFNegCombine()
4845 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y) in performFNegCombine()
4859 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags()); in performFNegCombine()
4869 Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags()); in performFNegCombine()
4871 SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags()); in performFNegCombine()
4879 for (SDNode *U : Neg->uses()) in performFNegCombine()
4888 case ISD::FNEARBYINT: // XXX - Should fround be handled? in performFNegCombine()
4890 case ISD::FSIN: in performFNegCombine()
4898 // (fneg (fp_extend (fneg x))) -> (fp_extend x) in performFNegCombine()
4899 // (fneg (rcp (fneg x))) -> (rcp x) in performFNegCombine()
4906 // (fneg (fp_extend x)) -> (fp_extend (fneg x)) in performFNegCombine()
4907 // (fneg (rcp x)) -> (rcp (fneg x)) in performFNegCombine()
4909 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags()); in performFNegCombine()
4915 // (fneg (fp_round (fneg x))) -> (fp_round x) in performFNegCombine()
4923 // (fneg (fp_round x)) -> (fp_round (fneg x)) in performFNegCombine()
4928 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal in performFNegCombine()
4936 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000) in performFNegCombine()
4939 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg); in performFNegCombine()
4942 // fneg (select c, a, b) -> select c, (fneg a), (fneg b) in performFNegCombine()
4950 SDValue HighBits = BCSrc.getOperand(BCSrc.getNumOperands() - 1); in performFNegCombine()
4960 // fneg (f64 (bitcast (build_vector x, y))) -> in performFNegCombine()
4969 SmallVector<SDValue, 8> Ops(BCSrc->op_begin(), BCSrc->op_end()); in performFNegCombine()
4983 // fneg (bitcast (f32 (select cond, i32:lhs, i32:rhs))) -> in performFNegCombine()
5010 SDValue N0 = N->getOperand(0); in performFAbsCombine()
5017 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal"); in performFAbsCombine()
5022 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff) in performFAbsCombine()
5025 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs); in performFAbsCombine()
5034 const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); in performRcpCombine()
5038 // XXX - Should this flush denormals? in performRcpCombine()
5039 const APFloat &Val = CFP->getValueAPF(); in performRcpCombine()
5041 return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0)); in performRcpCombine()
5049 switch(N->getOpcode()) { in PerformDAGCombine()
5053 EVT DestVT = N->getValueType(0); in PerformDAGCombine()
5061 SDValue Src = N->getOperand(0); in PerformDAGCombine()
5088 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k) in PerformDAGCombine()
5090 SDValue Src = N->getOperand(0); in PerformDAGCombine()
5093 uint64_t CVal = C->getZExtValue(); in PerformDAGCombine()
5101 const APInt &Val = C->getValueAPF().bitcastToAPInt(); in PerformDAGCombine()
5159 assert(!N->getValueType(0).isVector() && in PerformDAGCombine()
5161 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2)); in PerformDAGCombine()
5165 uint32_t WidthVal = Width->getZExtValue() & 0x1f; in PerformDAGCombine()
5169 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); in PerformDAGCombine()
5173 SDValue BitsFrom = N->getOperand(0); in PerformDAGCombine()
5174 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f; in PerformDAGCombine()
5176 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32; in PerformDAGCombine()
5180 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal); in PerformDAGCombine()
5204 CVal->getSExtValue(), in PerformDAGCombine()
5211 CVal->getZExtValue(), in PerformDAGCombine()
5218 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) { in PerformDAGCombine()
5254 SDValue N0 = N->getOperand(0); in PerformDAGCombine()
5255 SDValue N1 = N->getOperand(1); in PerformDAGCombine()
5256 SDValue N2 = N->getOperand(2); in PerformDAGCombine()
5257 EVT VT = N->getValueType(0); in PerformDAGCombine()
5260 // We flush the inputs, the intermediate step, and the output. in PerformDAGCombine()
5268 return V.isNegative() ? -Zero : Zero; in PerformDAGCombine()
5273 APFloat V0 = FTZ(N0CFP->getValueAPF()); in PerformDAGCombine()
5274 APFloat V1 = FTZ(N1CFP->getValueAPF()); in PerformDAGCombine()
5275 APFloat V2 = FTZ(N2CFP->getValueAPF()); in PerformDAGCombine()
5287 //===----------------------------------------------------------------------===//
5289 //===----------------------------------------------------------------------===//
5355 DAG.getCopyFromReg(Chain, SL, Info->getStackPtrOffsetReg(), MVT::i32); in storeStackInputValue()
5385 unsigned ExplicitArgOffset = Subtarget->getExplicitKernelArgOffset(); in getImplicitParameterOffset()
5386 const Align Alignment = Subtarget->getAlignmentForImplicitArgPtr(); in getImplicitParameterOffset()
5405 return getImplicitParameterOffset(MFI->getExplicitKernArgSize(), Param); in getImplicitParameterOffset()
5652 uint32_t Width = CWidth->getZExtValue() & 0x1f; in computeKnownBitsForTargetNode()
5655 Known.Zero = APInt::getHighBitsSet(32, 32 - Width); in computeKnownBitsForTargetNode()
5663 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16); in computeKnownBitsForTargetNode()
5687 unsigned SignBits = 32 - MaxValBits + 1; in computeKnownBitsForTargetNode()
5716 unsigned Sel = CMask->getZExtValue(); in computeKnownBitsForTargetNode()
5747 Align Alignment = GA->getGlobal()->getPointerAlignment(DAG.getDataLayout()); in computeKnownBitsForTargetNode()
5782 unsigned MaxValue = Subtarget->getMaxWorkitemID( in computeKnownBitsForTargetNode()
5803 unsigned SignBits = 32 - Width->getZExtValue() + 1; in ComputeNumSignBitsForTargetNode()
5807 // TODO: Could probably figure something out with non-0 offsets. in ComputeNumSignBitsForTargetNode()
5814 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1; in ComputeNumSignBitsForTargetNode()
5864 switch (MI->getOpcode()) { in computeNumSignBitsForTargetInstr()
5875 auto [Dst, Src0, Src1, Src2] = MI->getFirst4Regs(); in computeNumSignBitsForTargetInstr()
6013 switch (RMW->getOperation()) { in shouldExpandAtomicRMWInIR()
6021 const DataLayout &DL = RMW->getFunction()->getDataLayout(); in shouldExpandAtomicRMWInIR()
6022 unsigned ValSize = DL.getTypeSizeInBits(RMW->getType()); in shouldExpandAtomicRMWInIR()
6028 if (auto *IntTy = dyn_cast<IntegerType>(RMW->getType())) { in shouldExpandAtomicRMWInIR()
6029 unsigned Size = IntTy->getBitWidth(); in shouldExpandAtomicRMWInIR()
6046 for (auto &Op : I->operands()) { in shouldSinkOperands()
6048 if (any_of(Ops, [&](Use *U) { return U->get() == Op.get(); })) in shouldSinkOperands()