/freebsd/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ |
H A D | ScheduleDAGSDNodes.cpp | 247 int64_t Offset1, Offset2; in ClusterNeighboringLoads() local 248 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) || in ClusterNeighboringLoads() 249 Offset1 == Offset2 || in ClusterNeighboringLoads() 257 O2SMap.insert(std::make_pair(Offset2, User)); in ClusterNeighboringLoads() 258 Offsets.push_back(Offset2); in ClusterNeighboringLoads() 259 if (Offset2 < Offset1) in ClusterNeighboringLoads()
|
/freebsd/contrib/llvm-project/llvm/lib/Transforms/AggressiveInstCombine/ |
H A D | AggressiveInstCombine.cpp | 682 APInt Offset2(DL.getIndexTypeSizeInBits(Load2Ptr->getType()), 0); in foldLoadsRecursive() local 684 Load2Ptr->stripAndAccumulateConstantOffsets(DL, Offset2, in foldLoadsRecursive() 721 if (Offset2.slt(Offset1)) { in foldLoadsRecursive() 724 std::swap(Offset1, Offset2); in foldLoadsRecursive() 755 if ((Shift2 - Shift1) != ShiftDiff || (Offset2 - Offset1) != PrevSize) in foldLoadsRecursive()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Mips/ |
H A D | MicroMipsSizeReduction.cpp | 400 int64_t Offset1, Offset2; in ConsecutiveInstr() local 403 if (!GetImm(MI2, 2, Offset2)) in ConsecutiveInstr() 409 return ((Offset1 == (Offset2 - 4)) && (ConsecutiveRegisters(Reg1, Reg2))); in ConsecutiveInstr()
|
/freebsd/contrib/llvm-project/llvm/lib/IR/ |
H A D | Value.cpp | 1033 APInt Offset2(DL.getIndexTypeSizeInBits(Ptr2->getType()), 0); in getPointerOffsetFrom() local 1035 Ptr2 = Ptr2->stripAndAccumulateConstantOffsets(DL, Offset2, true); in getPointerOffsetFrom() 1039 return Offset2.getSExtValue() - Offset1.getSExtValue(); in getPointerOffsetFrom() 1063 return *IOffset2 - *IOffset1 + Offset2.getSExtValue() - in getPointerOffsetFrom()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86InstrInfo.h | 477 int64_t &Offset2) const override; 495 int64_t Offset2,
|
H A D | X86InstrInfo.cpp | 8653 int64_t &Offset2) const { in areLoadsFromSameBasePtr() 8773 Offset2 = Disp2->getSExtValue(); in areLoadsFromSameBasePtr() 8778 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument 8780 assert(Offset2 > Offset1); in shouldScheduleLoadsNear() 8781 if ((Offset2 - Offset1) / 8 > 64) in shouldScheduleLoadsNear()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/ |
H A D | TargetInstrInfo.h | 1446 int64_t &Offset2) const { in areLoadsFromSameBasePtr() argument 1459 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument 1564 int64_t Offset2, bool OffsetIsScalable2, in shouldClusterMemOps() argument
|
/freebsd/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ |
H A D | ContainerModeling.cpp | 132 SymbolRef Offset2, 970 SymbolRef Offset2, in invalidateIteratorPositions() argument 974 compare(State, Pos.getOffset(), Offset2, Opc2); in invalidateIteratorPositions()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/ |
H A D | RISCVInstrInfo.h | 177 int64_t Offset2, bool OffsetIsScalable2,
|
H A D | RISCVInstrInfo.cpp | 2691 int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, in shouldClusterMemOps() 2712 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) < CacheLineSize; 2684 shouldClusterMemOps(ArrayRef<const MachineOperand * > BaseOps1,int64_t Offset1,bool OffsetIsScalable1,ArrayRef<const MachineOperand * > BaseOps2,int64_t Offset2,bool OffsetIsScalable2,unsigned ClusterSize,unsigned NumBytes) const shouldClusterMemOps() argument
|
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | ARMBaseInstrInfo.h | 253 int64_t &Offset2) const override; 264 int64_t Offset1, int64_t Offset2,
|
H A D | ARMBaseInstrInfo.cpp | 1950 int64_t &Offset2) const { in areLoadsFromSameBasePtr() 1997 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue(); in areLoadsFromSameBasePtr() 2016 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument 2021 assert(Offset2 > Offset1); in shouldScheduleLoadsNear() 2023 if ((Offset2 - Offset1) / 8 > 64) in shouldScheduleLoadsNear()
|
/freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/ |
H A D | SeparateConstOffsetFromGEP.cpp | 1369 Value *Offset2 = Second->getOperand(1); in swapGEPOperand() local 1370 First->setOperand(1, Offset2); in swapGEPOperand()
|
H A D | ConstraintElimination.cpp | 667 int64_t Offset2 = BDec.Offset; in getConstraint() local 718 if (AddOverflow(Offset1, Offset2, OffsetSum)) in getConstraint()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64InstrInfo.h | 327 int64_t Offset2, bool OffsetIsScalable2,
|
H A D | AArch64InstrInfo.cpp | 4284 int64_t Offset2, unsigned Opcode2) { in shouldClusterFI() argument 4301 ObjectOffset2 += Offset2; in shouldClusterFI() 4355 int64_t Offset2 = SecondLdSt.getOperand(2).getImm(); in shouldClusterMemOps() local 4356 if (hasUnscaledLdStOffset(SecondOpc) && !scaleOffset(SecondOpc, Offset2)) in shouldClusterMemOps() 4366 assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) && in shouldClusterMemOps() 4372 BaseOp2.getIndex(), Offset2, SecondOpc); in shouldClusterMemOps() 4375 assert(Offset1 <= Offset2 && "Caller should have ordered offsets."); in shouldClusterMemOps() 4377 return Offset1 + 1 == Offset2; in shouldClusterMemOps()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCInstrInfo.h | 578 int64_t Offset2, bool OffsetIsScalable2,
|
H A D | PPCInstrInfo.cpp | 2930 int64_t Offset1 = 0, Offset2 = 0; in shouldClusterMemOps() local 2934 !getMemOperandWithOffsetWidth(SecondLdSt, Base2, Offset2, Width2, TRI) || in shouldClusterMemOps() 2941 assert(Offset1 <= Offset2 && "Caller should have ordered offsets."); in shouldClusterMemOps() 2942 return Offset1 + (int64_t)Width1.getValue() == Offset2; in shouldClusterMemOps()
|
H A D | PPCISelLowering.cpp | 13835 int64_t Offset1 = 0, Offset2 = 0; in isConsecutiveLSLoc() local 13837 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); in isConsecutiveLSLoc() 13838 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) in isConsecutiveLSLoc() 13845 Offset2 = 0; in isConsecutiveLSLoc() 13847 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); in isConsecutiveLSLoc() 13849 return Offset1 == (Offset2 + Dist*Bytes); in isConsecutiveLSLoc()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/SystemZ/ |
H A D | SystemZInstrInfo.cpp | 1898 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); in getOpcodeForOffset() local 1899 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) { in getOpcodeForOffset() 1909 if (isInt<20>(Offset) && isInt<20>(Offset2)) { in getOpcodeForOffset()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | MachinePipeliner.cpp | 873 int64_t Offset1, Offset2; in addLoopCarriedDependences() local 877 TII->getMemOperandWithOffset(MI, BaseOp2, Offset2, in addLoopCarriedDependences() 881 (int)Offset1 < (int)Offset2) { in addLoopCarriedDependences()
|
H A D | CodeGenPrepare.cpp | 2409 uint64_t Offset2 = Offset.getLimitedValue(); in optimizeCallInst() local 2410 if (!isAligned(PrefAlign, Offset2)) in optimizeCallInst() 2414 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) in optimizeCallInst() 2423 DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2) in optimizeCallInst()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SIInstrInfo.h | 249 int64_t Offset2, bool OffsetIsScalable2,
|
/freebsd/contrib/llvm-project/llvm/lib/Transforms/InstCombine/ |
H A D | InstCombineAndOrXor.cpp | 1281 const APInt *Offset1 = nullptr, *Offset2 = nullptr; in foldAndOrOfICmpsUsingRanges() local 1286 if (match(V2, m_Add(m_Value(X), m_APInt(Offset2)))) in foldAndOrOfICmpsUsingRanges() 1300 if (Offset2) in foldAndOrOfICmpsUsingRanges() 1301 CR2 = CR2.subtract(*Offset2); in foldAndOrOfICmpsUsingRanges()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | CombinerHelper.cpp | 7039 std::optional<APInt> Offset2; in tryFoldAndOrOrICmpsUsingRanges() local 7054 Offset2 = MaybeOffset2->Value; in tryFoldAndOrOrICmpsUsingRanges() 7070 if (Offset2) in tryFoldAndOrOrICmpsUsingRanges() 7071 CR2 = CR2.subtract(*Offset2); in tryFoldAndOrOrICmpsUsingRanges()
|