Lines Matching +full:32 +full:kb
56 GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
112 // 33 bits are sign extended and with S_MUL_U64_U32_PSEUDO if the higher 32
132 GISelKnownBits &KB, GISelCSEInfo *CSEInfo, in AMDGPUPostLegalizerCombinerImpl() argument
135 : Combiner(MF, CInfo, TPC, &KB, CSEInfo), RuleConfig(RuleConfig), STI(STI), in AMDGPUPostLegalizerCombinerImpl()
137 Helper(Observer, B, /*IsPreLegalize*/ false, &KB, MDT, LI), in AMDGPUPostLegalizerCombinerImpl()
153 // common case, splitting this into a move and a 32-bit shift is faster and in tryCombineAll()
155 return Helper.tryCombineShiftToUnmerge(MI, 32); in tryCombineAll()
216 if (Ty == LLT::scalar(32) || Ty == LLT::scalar(16)) { in matchUCharToFloat()
219 assert(SrcSize == 16 || SrcSize == 32 || SrcSize == 64); in matchUCharToFloat()
229 const LLT S32 = LLT::scalar(32); in applyUCharToFloat()
338 return ShiftOffset < 32 && ShiftOffset >= 8 && (ShiftOffset % 8) == 0; in matchCvtF32UByteN()
349 const LLT S32 = LLT::scalar(32); in applyCvtF32UByteN()
424 if (KB->getKnownBits(Src1).countMinLeadingZeros() >= 32 && in matchCombine_s_mul_u64()
425 KB->getKnownBits(Src0).countMinLeadingZeros() >= 32) { in matchCombine_s_mul_u64()
430 if (KB->computeNumSignBits(Src1) >= 33 && in matchCombine_s_mul_u64()
431 KB->computeNumSignBits(Src0) >= 33) { in matchCombine_s_mul_u64()
495 GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF); in runOnMachineFunction() local
503 AMDGPUPostLegalizerCombinerImpl Impl(MF, CInfo, TPC, *KB, /*CSEInfo*/ nullptr, in runOnMachineFunction()