Lines Matching refs:XLen

354   // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
8835 // We need to special case these when the scalar is larger than XLen.
12338 unsigned XLen = Subtarget.getXLen();
12340 if (Size > XLen) {
12341 assert(Size == (XLen * 2) && "Unexpected custom legalisation");
12344 APInt HighMask = APInt::getHighBitsSet(Size, XLen);
12361 bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
12362 bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
12816 // Extend inputs to XLen, and shift by 32. This will add 64 trailing zeros
13224 // Maybe harmful when VT is wider than XLen.
15364 // Only handle XLen or i32 types. Other types narrower than XLen will
15394 // Only handle XLen types. Other types narrower than XLen will eventually be
15818 // Fold ((srl (and X, 1<<C), C), 0, eq/ne) -> ((shl X, XLen-1-C), 0, ge/lt)
17343 // any power-of-two size up to XLen bits, provided that they aren't too
17998 // element type is wider than XLen, the least-significant XLEN bits are
18000 unsigned XLen = Subtarget.getXLen();
18002 if (EltBits <= XLen)
18003 return XLen - EltBits + 1;
18949 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
18953 unsigned XLenInBytes = XLen / 8;
18967 if (!EABI || XLen != 32)
18999 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
19000 assert(XLen == 32 || XLen == 64);
19001 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
19057 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
19074 unsigned TwoXLenInBytes = (2 * XLen) / 8;
19093 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
19151 XLen, State, VA, AF, ValNo, ValVT, LocVT, ArgFlags,
19157 unsigned StoreSizeBytes = XLen / 8;
19158 Align StackAlign = Align(XLen / 8);
20672 // TODO: Support fixed vectors up to XLen for P extension?
20986 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
20987 if (XLen == 32) {
21010 if (XLen == 64) {
21033 llvm_unreachable("Unexpected XLen\n");
21055 unsigned XLen = Subtarget.getXLen();
21057 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
21061 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
21063 if (XLen == 64) {
21073 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
21082 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
21090 if (XLen == 64)
21112 unsigned XLen = Subtarget.getXLen();
21113 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
21115 if (XLen == 64) {
21126 if (XLen == 64)
21355 // size exceeds XLen.