/freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/ |
H A D | MergeICmps.cpp | 76 BCEAtom(GetElementPtrInst *GEP, LoadInst *LoadI, int BaseId, APInt Offset) in BCEAtom() 77 : GEP(GEP), LoadI(LoadI), BaseId(BaseId), Offset(std::move(Offset)) {} in BCEAtom() 87 LoadI = that.LoadI; in operator =() 108 LoadInst *LoadI = nullptr; member 136 auto *const LoadI = dyn_cast<LoadInst>(Val); in visitICmpLoadOperand() local 137 if (!LoadI) in visitICmpLoadOperand() 140 if (LoadI->isUsedOutsideOfBlock(LoadI->getParent())) { in visitICmpLoadOperand() 145 if (!LoadI->isSimple()) { in visitICmpLoadOperand() 149 Value *Addr = LoadI->getOperand(0); in visitICmpLoadOperand() 154 const auto &DL = LoadI->getDataLayout(); in visitICmpLoadOperand() [all …]
|
H A D | JumpThreading.cpp | 1112 if (LoadInst *LoadI = dyn_cast<LoadInst>(SimplifyValue)) in processBlock() local 1113 if (simplifyPartiallyRedundantLoad(LoadI)) in processBlock() 1223 bool JumpThreadingPass::simplifyPartiallyRedundantLoad(LoadInst *LoadI) { in simplifyPartiallyRedundantLoad() argument 1225 if (!LoadI->isUnordered()) return false; in simplifyPartiallyRedundantLoad() 1229 BasicBlock *LoadBB = LoadI->getParent(); in simplifyPartiallyRedundantLoad() 1239 Value *LoadedPtr = LoadI->getOperand(0); in simplifyPartiallyRedundantLoad() 1248 BasicBlock::iterator BBIt(LoadI); in simplifyPartiallyRedundantLoad() 1254 LoadI, LoadBB, BBIt, DefMaxInstsToScan, &BatchAA, &IsLoadCSE)) { in simplifyPartiallyRedundantLoad() 1260 combineMetadataForCSE(NLoadI, LoadI, false); in simplifyPartiallyRedundantLoad() 1266 if (AvailableVal == LoadI) in simplifyPartiallyRedundantLoad() [all …]
|
H A D | DeadStoreElimination.cpp | 2010 if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) { in storeIsNoop() local 2011 if (LoadI->getPointerOperand() == Store->getOperand(1)) { in storeIsNoop() 2013 auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess(); in storeIsNoop()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64LoadStoreOptimizer.cpp | 166 promoteLoadFromStore(MachineBasicBlock::iterator LoadI, 1139 AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI, in promoteLoadFromStore() argument 1142 next_nodbg(LoadI, LoadI->getParent()->end()); in promoteLoadFromStore() 1144 int LoadSize = TII->getMemScale(*LoadI); in promoteLoadFromStore() 1146 Register LdRt = getLdStRegOp(*LoadI).getReg(); in promoteLoadFromStore() 1161 LoadI->getIterator())) { in promoteLoadFromStore() 1168 LLVM_DEBUG(LoadI->print(dbgs())); in promoteLoadFromStore() 1170 LoadI->eraseFromParent(); in promoteLoadFromStore() 1175 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(), in promoteLoadFromStore() 1180 .setMIFlags(LoadI->getFlags()); in promoteLoadFromStore() [all …]
|
H A D | AArch64FalkorHWPFFix.cpp | 153 LoadInst *LoadI = dyn_cast<LoadInst>(&I); in runOnLoop() local 154 if (!LoadI) in runOnLoop() 157 Value *PtrValue = LoadI->getPointerOperand(); in runOnLoop() 166 LoadI->setMetadata(FALKOR_STRIDED_ACCESS_MD, in runOnLoop() 167 MDNode::get(LoadI->getContext(), {})); in runOnLoop()
|
/freebsd/contrib/llvm-project/llvm/lib/Transforms/InstCombine/ |
H A D | InstCombinePHI.cpp | 120 if (LoadInst *LoadI = dyn_cast<LoadInst>(U)) { in foldIntegerTypedPHI() local 121 Ptr = LoadI->getPointerOperand(); in foldIntegerTypedPHI() 176 auto *LoadI = dyn_cast<LoadInst>(Arg); in foldIntegerTypedPHI() local 177 if (!LoadI) in foldIntegerTypedPHI() 180 if (!LoadI->hasOneUse()) in foldIntegerTypedPHI() 186 AvailablePtrVals.emplace_back(LoadI); in foldIntegerTypedPHI() 263 LoadInst *LoadI = dyn_cast<LoadInst>(IncomingVal); in foldIntegerTypedPHI() local 266 (LoadI && LoadI->hasOneUse())) && in foldIntegerTypedPHI()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/ |
H A D | HexagonVLIWPacketizer.cpp | 1531 bool LoadI = I.mayLoad(), StoreI = I.mayStore(); in isLegalToPacketizeTogether() local 1539 if (LoadJ && LoadI && HII->isPureSlot0(J)) { in isLegalToPacketizeTogether() 1546 (StoreJ && LoadI && !NVStoreJ)) && in isLegalToPacketizeTogether() 1554 if (StoreJ && LoadI && alias(J, I)) { in isLegalToPacketizeTogether() 1560 if (!LoadJ || (!LoadI && !StoreI)) { in isLegalToPacketizeTogether()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | SelectOptimize.cpp | 1014 static bool isSafeToSinkLoad(Instruction *LoadI, Instruction *SI) { in isSafeToSinkLoad() argument 1016 if (LoadI->getParent() != SI->getParent()) in isSafeToSinkLoad() 1018 auto It = LoadI->getIterator(); in isSafeToSinkLoad()
|
/freebsd/contrib/llvm-project/llvm/lib/Transforms/IPO/ |
H A D | OpenMPOpt.cpp | 1158 LoadInst *LoadI = new LoadInst(I.getType(), AllocaI, in mergeParallelRegions() local 1161 UsrI->replaceUsesOfWith(&I, LoadI); in mergeParallelRegions() 4031 LoadInst *LoadI = new LoadInst( in insertInstructionGuardsHelper() local 4037 A.changeUseAfterManifest(*U, *LoadI); in insertInstructionGuardsHelper()
|
H A D | AttributorAttributes.cpp | 1740 if (auto *LoadI = dyn_cast<LoadInst>(Usr)) { in updateImpl() local 1748 if (!handleAccess(A, *LoadI, /* Content */ nullptr, AK, in updateImpl() 1750 *LoadI->getType())) in updateImpl() 1770 BasicBlock *BB = LoadI->getParent(); in updateImpl() 1776 if (IsImpactedInRange(LoadI->getNextNonDebugInstruction(), &IntrI)) in updateImpl() 1793 if (IsImpactedInRange(LoadI->getNextNonDebugInstruction(), in updateImpl() 1803 for (const Use &LoadU : LoadI->uses()) { in updateImpl() 1826 << *Assumption.second << ": " << *LoadI in updateImpl() 1836 OffsetInfoMap[CurPtr].Offsets, Changed, *LoadI->getType()); in updateImpl()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/SystemZ/ |
H A D | SystemZISelLowering.cpp | 1040 if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand())) in supportedAddressingMode() local 1041 if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent()) in supportedAddressingMode() 1043 return getLoadStoreAddrMode(HasVector, LoadI->getType()); in supportedAddressingMode()
|