Home
last modified time | relevance | path

Searched refs:Offset2 (Results 1 – 25 of 30) sorted by relevance

12

/freebsd/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/
H A DScheduleDAGSDNodes.cpp247 int64_t Offset1, Offset2; in ClusterNeighboringLoads() local
248 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) || in ClusterNeighboringLoads()
249 Offset1 == Offset2 || in ClusterNeighboringLoads()
257 O2SMap.insert(std::make_pair(Offset2, User)); in ClusterNeighboringLoads()
258 Offsets.push_back(Offset2); in ClusterNeighboringLoads()
259 if (Offset2 < Offset1) in ClusterNeighboringLoads()
/freebsd/contrib/llvm-project/llvm/lib/Transforms/AggressiveInstCombine/
H A DAggressiveInstCombine.cpp690 APInt Offset2(DL.getIndexTypeSizeInBits(Load2Ptr->getType()), 0); in foldLoadsRecursive() local
692 Load2Ptr->stripAndAccumulateConstantOffsets(DL, Offset2, in foldLoadsRecursive()
728 if (Offset2.slt(Offset1)) { in foldLoadsRecursive()
731 std::swap(Offset1, Offset2); in foldLoadsRecursive()
762 if ((Shift2 - Shift1) != ShiftDiff || (Offset2 - Offset1) != PrevSize) in foldLoadsRecursive()
/freebsd/contrib/llvm-project/llvm/lib/Target/Mips/
H A DMicroMipsSizeReduction.cpp400 int64_t Offset1, Offset2; in ConsecutiveInstr() local
403 if (!GetImm(MI2, 2, Offset2)) in ConsecutiveInstr()
409 return ((Offset1 == (Offset2 - 4)) && (ConsecutiveRegisters(Reg1, Reg2))); in ConsecutiveInstr()
/freebsd/contrib/llvm-project/llvm/lib/IR/
H A DValue.cpp1061 APInt Offset2(DL.getIndexTypeSizeInBits(Ptr2->getType()), 0); in getPointerOffsetFrom() local
1063 Ptr2 = Ptr2->stripAndAccumulateConstantOffsets(DL, Offset2, true); in getPointerOffsetFrom()
1067 return Offset2.getSExtValue() - Offset1.getSExtValue(); in getPointerOffsetFrom()
1091 return *IOffset2 - *IOffset1 + Offset2.getSExtValue() - in getPointerOffsetFrom()
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/
H A DX86InstrInfo.h525 int64_t &Offset2) const override;
543 int64_t Offset2,
H A DX86InstrInfo.cpp8748 int64_t &Offset2) const { in areLoadsFromSameBasePtr()
8868 Offset2 = Disp2->getSExtValue(); in areLoadsFromSameBasePtr()
8873 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument
8875 assert(Offset2 > Offset1); in shouldScheduleLoadsNear()
8876 if ((Offset2 - Offset1) / 8 > 64) in shouldScheduleLoadsNear()
/freebsd/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/
H A DContainerModeling.cpp132 SymbolRef Offset2,
975 SymbolRef Offset2, in invalidateIteratorPositions() argument
979 compare(State, Pos.getOffset(), Offset2, Opc2); in invalidateIteratorPositions()
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/
H A DTargetInstrInfo.h1512 int64_t &Offset2) const { in areLoadsFromSameBasePtr() argument
1525 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument
1630 int64_t Offset2, bool OffsetIsScalable2, in shouldClusterMemOps() argument
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/
H A DARMBaseInstrInfo.h250 int64_t &Offset2) const override;
261 int64_t Offset1, int64_t Offset2,
H A DARMBaseInstrInfo.cpp1793 int64_t &Offset2) const { in areLoadsFromSameBasePtr()
1840 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue(); in areLoadsFromSameBasePtr()
1859 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument
1864 assert(Offset2 > Offset1); in shouldScheduleLoadsNear()
1866 if ((Offset2 - Offset1) / 8 > 64) in shouldScheduleLoadsNear()
/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/
H A DRISCVInstrInfo.h181 int64_t Offset2, bool OffsetIsScalable2,
H A DRISCVInstrInfo.cpp3235 int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, in shouldClusterMemOps() argument
3256 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) < CacheLineSize; in shouldClusterMemOps()
/freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/
H A DSeparateConstOffsetFromGEP.cpp1435 Value *Offset2 = Second->getOperand(1); in swapGEPOperand() local
1436 First->setOperand(1, Offset2); in swapGEPOperand()
H A DConstraintElimination.cpp725 int64_t Offset2 = BDec.Offset; in getConstraint() local
776 if (AddOverflow(Offset1, Offset2, OffsetSum)) in getConstraint()
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/
H A DPPCInstrInfo.h688 int64_t Offset2, bool OffsetIsScalable2,
H A DPPCInstrInfo.cpp2953 int64_t Offset1 = 0, Offset2 = 0; in shouldClusterMemOps() local
2958 !getMemOperandWithOffsetWidth(SecondLdSt, Base2, Offset2, Width2, TRI) || in shouldClusterMemOps()
2965 assert(Offset1 <= Offset2 && "Caller should have ordered offsets."); in shouldClusterMemOps()
2966 return Offset1 + (int64_t)Width1.getValue() == Offset2; in shouldClusterMemOps()
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/
H A DAArch64InstrInfo.h333 int64_t Offset2, bool OffsetIsScalable2,
H A DAArch64InstrInfo.cpp4869 int64_t Offset2, unsigned Opcode2) { in shouldClusterFI() argument
4886 ObjectOffset2 += Offset2; in shouldClusterFI()
4940 int64_t Offset2 = SecondLdSt.getOperand(2).getImm(); in shouldClusterMemOps() local
4941 if (hasUnscaledLdStOffset(SecondOpc) && !scaleOffset(SecondOpc, Offset2)) in shouldClusterMemOps()
4951 assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) && in shouldClusterMemOps()
4957 BaseOp2.getIndex(), Offset2, SecondOpc); in shouldClusterMemOps()
4960 assert(Offset1 <= Offset2 && "Caller should have ordered offsets."); in shouldClusterMemOps()
4962 return Offset1 + 1 == Offset2; in shouldClusterMemOps()
/freebsd/contrib/llvm-project/llvm/lib/Target/SystemZ/
H A DSystemZInstrInfo.cpp1942 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); in getOpcodeForOffset() local
1943 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) { in getOpcodeForOffset()
1953 if (isInt<20>(Offset) && isInt<20>(Offset2)) { in getOpcodeForOffset()
/freebsd/contrib/llvm-project/llvm/lib/Transforms/InstCombine/
H A DInstCombineAddSub.cpp2165 Value *Offset2 = EmitGEPOffsets(Base.RHSGEPs, Base.RHSNW, IdxTy, RewriteGEPs); in OptimizePointerDifference() local
2170 if (IsNUW && match(Offset2, m_Zero()) && Base.LHSNW.isInBounds() && in OptimizePointerDifference()
2181 if (!match(Offset2, m_Zero())) { in OptimizePointerDifference()
2183 Builder.CreateSub(Result, Offset2, "gepdiff", in OptimizePointerDifference()
H A DInstCombineAndOrXor.cpp1329 const APInt *Offset1 = nullptr, *Offset2 = nullptr; in foldAndOrOfICmpsUsingRanges() local
1334 if (match(V2, m_Add(m_Value(X), m_APInt(Offset2)))) in foldAndOrOfICmpsUsingRanges()
1348 if (Offset2) in foldAndOrOfICmpsUsingRanges()
1349 CR2 = CR2.subtract(*Offset2); in foldAndOrOfICmpsUsingRanges()
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/
H A DMachinePipeliner.cpp1016 int64_t Offset1, Offset2; in hasLoopCarriedMemDep() local
1020 TII->getMemOperandWithOffset(DstMI, BaseOp2, Offset2, Offset2IsScalable, in hasLoopCarriedMemDep()
1024 (int)Offset1 < (int)Offset2) { in hasLoopCarriedMemDep()
H A DCodeGenPrepare.cpp2656 uint64_t Offset2 = Offset.getLimitedValue(); in optimizeCallInst() local
2657 if (!isAligned(PrefAlign, Offset2)) in optimizeCallInst()
2661 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) in optimizeCallInst()
2670 DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2) in optimizeCallInst()
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/
H A DSIInstrInfo.h260 int64_t Offset2, bool OffsetIsScalable2,
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/
H A DCombinerHelper.cpp7376 std::optional<APInt> Offset2; in tryFoldAndOrOrICmpsUsingRanges() local
7391 Offset2 = MaybeOffset2->Value; in tryFoldAndOrOrICmpsUsingRanges()
7407 if (Offset2) in tryFoldAndOrOrICmpsUsingRanges()
7408 CR2 = CR2.subtract(*Offset2); in tryFoldAndOrOrICmpsUsingRanges()

12