1 //===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file implements interprocedural passes which walk the 11 /// call-graph deducing and/or propagating function attributes. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/FunctionAttrs.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/SCCIterator.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Analysis/AssumptionCache.h" 26 #include "llvm/Analysis/BasicAliasAnalysis.h" 27 #include "llvm/Analysis/CFG.h" 28 #include "llvm/Analysis/CGSCCPassManager.h" 29 #include "llvm/Analysis/CallGraph.h" 30 #include "llvm/Analysis/CallGraphSCCPass.h" 31 #include "llvm/Analysis/CaptureTracking.h" 32 #include "llvm/Analysis/LazyCallGraph.h" 33 #include "llvm/Analysis/MemoryLocation.h" 34 #include "llvm/Analysis/ValueTracking.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/Constants.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/InstIterator.h" 42 #include "llvm/IR/InstrTypes.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/IR/Metadata.h" 47 #include "llvm/IR/ModuleSummaryIndex.h" 48 #include "llvm/IR/PassManager.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/IR/Use.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/CommandLine.h" 55 #include "llvm/Support/Compiler.h" 56 #include "llvm/Support/Debug.h" 57 #include "llvm/Support/ErrorHandling.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Transforms/IPO.h" 60 #include "llvm/Transforms/Utils/Local.h" 61 #include <cassert> 62 #include <iterator> 63 #include <map> 64 #include <optional> 65 #include <vector> 66 67 using namespace llvm; 68 69 #define DEBUG_TYPE "function-attrs" 70 71 STATISTIC(NumMemoryAttr, "Number of functions with improved memory attribute"); 72 STATISTIC(NumNoCapture, "Number of arguments marked nocapture"); 73 STATISTIC(NumReturned, "Number of arguments marked returned"); 74 STATISTIC(NumReadNoneArg, "Number of arguments marked readnone"); 75 STATISTIC(NumReadOnlyArg, "Number of arguments marked readonly"); 76 STATISTIC(NumWriteOnlyArg, "Number of arguments marked writeonly"); 77 STATISTIC(NumNoAlias, "Number of function returns marked noalias"); 78 STATISTIC(NumNonNullReturn, "Number of function returns marked nonnull"); 79 STATISTIC(NumNoRecurse, "Number of functions marked as norecurse"); 80 STATISTIC(NumNoUnwind, "Number of functions marked as nounwind"); 81 STATISTIC(NumNoFree, "Number of functions marked as nofree"); 82 STATISTIC(NumWillReturn, "Number of functions marked as willreturn"); 83 STATISTIC(NumNoSync, "Number of functions marked as nosync"); 84 85 STATISTIC(NumThinLinkNoRecurse, 86 "Number of functions marked as norecurse during thinlink"); 87 STATISTIC(NumThinLinkNoUnwind, 88 "Number of functions marked as nounwind during thinlink"); 89 90 static cl::opt<bool> EnableNonnullArgPropagation( 91 "enable-nonnull-arg-prop", cl::init(true), cl::Hidden, 92 cl::desc("Try to propagate nonnull argument attributes from callsites to " 93 "caller functions.")); 94 95 static cl::opt<bool> DisableNoUnwindInference( 96 "disable-nounwind-inference", cl::Hidden, 97 cl::desc("Stop inferring nounwind attribute during function-attrs pass")); 98 99 static cl::opt<bool> DisableNoFreeInference( 100 "disable-nofree-inference", cl::Hidden, 101 cl::desc("Stop inferring nofree attribute during function-attrs pass")); 102 103 static cl::opt<bool> DisableThinLTOPropagation( 104 "disable-thinlto-funcattrs", cl::init(true), cl::Hidden, 105 cl::desc("Don't propagate function-attrs in thinLTO")); 106 107 namespace { 108 109 using SCCNodeSet = SmallSetVector<Function *, 8>; 110 111 } // end anonymous namespace 112 113 /// Returns the memory access attribute for function F using AAR for AA results, 114 /// where SCCNodes is the current SCC. 115 /// 116 /// If ThisBody is true, this function may examine the function body and will 117 /// return a result pertaining to this copy of the function. If it is false, the 118 /// result will be based only on AA results for the function declaration; it 119 /// will be assumed that some other (perhaps less optimized) version of the 120 /// function may be selected at link time. 121 static MemoryEffects checkFunctionMemoryAccess(Function &F, bool ThisBody, 122 AAResults &AAR, 123 const SCCNodeSet &SCCNodes) { 124 MemoryEffects OrigME = AAR.getMemoryEffects(&F); 125 if (OrigME.doesNotAccessMemory()) 126 // Already perfect! 127 return OrigME; 128 129 if (!ThisBody) 130 return OrigME; 131 132 MemoryEffects ME = MemoryEffects::none(); 133 // Inalloca and preallocated arguments are always clobbered by the call. 134 if (F.getAttributes().hasAttrSomewhere(Attribute::InAlloca) || 135 F.getAttributes().hasAttrSomewhere(Attribute::Preallocated)) 136 ME |= MemoryEffects::argMemOnly(ModRefInfo::ModRef); 137 138 auto AddLocAccess = [&](const MemoryLocation &Loc, ModRefInfo MR) { 139 // Ignore accesses to known-invariant or local memory. 140 MR &= AAR.getModRefInfoMask(Loc, /*IgnoreLocal=*/true); 141 if (isNoModRef(MR)) 142 return; 143 144 const Value *UO = getUnderlyingObject(Loc.Ptr); 145 assert(!isa<AllocaInst>(UO) && 146 "Should have been handled by getModRefInfoMask()"); 147 if (isa<Argument>(UO)) { 148 ME |= MemoryEffects::argMemOnly(MR); 149 return; 150 } 151 152 // If it's not an identified object, it might be an argument. 153 if (!isIdentifiedObject(UO)) 154 ME |= MemoryEffects::argMemOnly(MR); 155 ME |= MemoryEffects(IRMemLocation::Other, MR); 156 }; 157 // Scan the function body for instructions that may read or write memory. 158 for (Instruction &I : instructions(F)) { 159 // Some instructions can be ignored even if they read or write memory. 160 // Detect these now, skipping to the next instruction if one is found. 161 if (auto *Call = dyn_cast<CallBase>(&I)) { 162 // Ignore calls to functions in the same SCC, as long as the call sites 163 // don't have operand bundles. Calls with operand bundles are allowed to 164 // have memory effects not described by the memory effects of the call 165 // target. 166 if (!Call->hasOperandBundles() && Call->getCalledFunction() && 167 SCCNodes.count(Call->getCalledFunction())) 168 continue; 169 MemoryEffects CallME = AAR.getMemoryEffects(Call); 170 171 // If the call doesn't access memory, we're done. 172 if (CallME.doesNotAccessMemory()) 173 continue; 174 175 // A pseudo probe call shouldn't change any function attribute since it 176 // doesn't translate to a real instruction. It comes with a memory access 177 // tag to prevent itself being removed by optimizations and not block 178 // other instructions being optimized. 179 if (isa<PseudoProbeInst>(I)) 180 continue; 181 182 ME |= CallME.getWithoutLoc(IRMemLocation::ArgMem); 183 184 // If the call accesses captured memory (currently part of "other") and 185 // an argument is captured (currently not tracked), then it may also 186 // access argument memory. 187 ModRefInfo OtherMR = CallME.getModRef(IRMemLocation::Other); 188 ME |= MemoryEffects::argMemOnly(OtherMR); 189 190 // Check whether all pointer arguments point to local memory, and 191 // ignore calls that only access local memory. 192 ModRefInfo ArgMR = CallME.getModRef(IRMemLocation::ArgMem); 193 if (ArgMR != ModRefInfo::NoModRef) { 194 for (const Use &U : Call->args()) { 195 const Value *Arg = U; 196 if (!Arg->getType()->isPtrOrPtrVectorTy()) 197 continue; 198 199 AddLocAccess(MemoryLocation::getBeforeOrAfter(Arg, I.getAAMetadata()), ArgMR); 200 } 201 } 202 continue; 203 } 204 205 ModRefInfo MR = ModRefInfo::NoModRef; 206 if (I.mayWriteToMemory()) 207 MR |= ModRefInfo::Mod; 208 if (I.mayReadFromMemory()) 209 MR |= ModRefInfo::Ref; 210 if (MR == ModRefInfo::NoModRef) 211 continue; 212 213 std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(&I); 214 if (!Loc) { 215 // If no location is known, conservatively assume anything can be 216 // accessed. 217 ME |= MemoryEffects(MR); 218 continue; 219 } 220 221 // Volatile operations may access inaccessible memory. 222 if (I.isVolatile()) 223 ME |= MemoryEffects::inaccessibleMemOnly(MR); 224 225 AddLocAccess(*Loc, MR); 226 } 227 228 return OrigME & ME; 229 } 230 231 MemoryEffects llvm::computeFunctionBodyMemoryAccess(Function &F, 232 AAResults &AAR) { 233 return checkFunctionMemoryAccess(F, /*ThisBody=*/true, AAR, {}); 234 } 235 236 /// Deduce readonly/readnone/writeonly attributes for the SCC. 237 template <typename AARGetterT> 238 static void addMemoryAttrs(const SCCNodeSet &SCCNodes, AARGetterT &&AARGetter, 239 SmallSet<Function *, 8> &Changed) { 240 MemoryEffects ME = MemoryEffects::none(); 241 for (Function *F : SCCNodes) { 242 // Call the callable parameter to look up AA results for this function. 243 AAResults &AAR = AARGetter(*F); 244 // Non-exact function definitions may not be selected at link time, and an 245 // alternative version that writes to memory may be selected. See the 246 // comment on GlobalValue::isDefinitionExact for more details. 247 ME |= checkFunctionMemoryAccess(*F, F->hasExactDefinition(), AAR, SCCNodes); 248 // Reached bottom of the lattice, we will not be able to improve the result. 249 if (ME == MemoryEffects::unknown()) 250 return; 251 } 252 253 for (Function *F : SCCNodes) { 254 MemoryEffects OldME = F->getMemoryEffects(); 255 MemoryEffects NewME = ME & OldME; 256 if (NewME != OldME) { 257 ++NumMemoryAttr; 258 F->setMemoryEffects(NewME); 259 Changed.insert(F); 260 } 261 } 262 } 263 264 // Compute definitive function attributes for a function taking into account 265 // prevailing definitions and linkage types 266 static FunctionSummary *calculatePrevailingSummary( 267 ValueInfo VI, 268 DenseMap<ValueInfo, FunctionSummary *> &CachedPrevailingSummary, 269 function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)> 270 IsPrevailing) { 271 272 if (CachedPrevailingSummary.count(VI)) 273 return CachedPrevailingSummary[VI]; 274 275 /// At this point, prevailing symbols have been resolved. The following leads 276 /// to returning a conservative result: 277 /// - Multiple instances with local linkage. Normally local linkage would be 278 /// unique per module 279 /// as the GUID includes the module path. We could have a guid alias if 280 /// there wasn't any distinguishing path when each file was compiled, but 281 /// that should be rare so we'll punt on those. 282 283 /// These next 2 cases should not happen and will assert: 284 /// - Multiple instances with external linkage. This should be caught in 285 /// symbol resolution 286 /// - Non-existent FunctionSummary for Aliasee. This presents a hole in our 287 /// knowledge meaning we have to go conservative. 288 289 /// Otherwise, we calculate attributes for a function as: 290 /// 1. If we have a local linkage, take its attributes. If there's somehow 291 /// multiple, bail and go conservative. 292 /// 2. If we have an external/WeakODR/LinkOnceODR linkage check that it is 293 /// prevailing, take its attributes. 294 /// 3. If we have a Weak/LinkOnce linkage the copies can have semantic 295 /// differences. However, if the prevailing copy is known it will be used 296 /// so take its attributes. If the prevailing copy is in a native file 297 /// all IR copies will be dead and propagation will go conservative. 298 /// 4. AvailableExternally summaries without a prevailing copy are known to 299 /// occur in a couple of circumstances: 300 /// a. An internal function gets imported due to its caller getting 301 /// imported, it becomes AvailableExternally but no prevailing 302 /// definition exists. Because it has to get imported along with its 303 /// caller the attributes will be captured by propagating on its 304 /// caller. 305 /// b. C++11 [temp.explicit]p10 can generate AvailableExternally 306 /// definitions of explicitly instanced template declarations 307 /// for inlining which are ultimately dropped from the TU. Since this 308 /// is localized to the TU the attributes will have already made it to 309 /// the callers. 310 /// These are edge cases and already captured by their callers so we 311 /// ignore these for now. If they become relevant to optimize in the 312 /// future this can be revisited. 313 /// 5. Otherwise, go conservative. 314 315 CachedPrevailingSummary[VI] = nullptr; 316 FunctionSummary *Local = nullptr; 317 FunctionSummary *Prevailing = nullptr; 318 319 for (const auto &GVS : VI.getSummaryList()) { 320 if (!GVS->isLive()) 321 continue; 322 323 FunctionSummary *FS = dyn_cast<FunctionSummary>(GVS->getBaseObject()); 324 // Virtual and Unknown (e.g. indirect) calls require going conservative 325 if (!FS || FS->fflags().HasUnknownCall) 326 return nullptr; 327 328 const auto &Linkage = GVS->linkage(); 329 if (GlobalValue::isLocalLinkage(Linkage)) { 330 if (Local) { 331 LLVM_DEBUG( 332 dbgs() 333 << "ThinLTO FunctionAttrs: Multiple Local Linkage, bailing on " 334 "function " 335 << VI.name() << " from " << FS->modulePath() << ". Previous module " 336 << Local->modulePath() << "\n"); 337 return nullptr; 338 } 339 Local = FS; 340 } else if (GlobalValue::isExternalLinkage(Linkage)) { 341 assert(IsPrevailing(VI.getGUID(), GVS.get())); 342 Prevailing = FS; 343 break; 344 } else if (GlobalValue::isWeakODRLinkage(Linkage) || 345 GlobalValue::isLinkOnceODRLinkage(Linkage) || 346 GlobalValue::isWeakAnyLinkage(Linkage) || 347 GlobalValue::isLinkOnceAnyLinkage(Linkage)) { 348 if (IsPrevailing(VI.getGUID(), GVS.get())) { 349 Prevailing = FS; 350 break; 351 } 352 } else if (GlobalValue::isAvailableExternallyLinkage(Linkage)) { 353 // TODO: Handle these cases if they become meaningful 354 continue; 355 } 356 } 357 358 if (Local) { 359 assert(!Prevailing); 360 CachedPrevailingSummary[VI] = Local; 361 } else if (Prevailing) { 362 assert(!Local); 363 CachedPrevailingSummary[VI] = Prevailing; 364 } 365 366 return CachedPrevailingSummary[VI]; 367 } 368 369 bool llvm::thinLTOPropagateFunctionAttrs( 370 ModuleSummaryIndex &Index, 371 function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)> 372 IsPrevailing) { 373 // TODO: implement addNoAliasAttrs once 374 // there's more information about the return type in the summary 375 if (DisableThinLTOPropagation) 376 return false; 377 378 DenseMap<ValueInfo, FunctionSummary *> CachedPrevailingSummary; 379 bool Changed = false; 380 381 auto PropagateAttributes = [&](std::vector<ValueInfo> &SCCNodes) { 382 // Assume we can propagate unless we discover otherwise 383 FunctionSummary::FFlags InferredFlags; 384 InferredFlags.NoRecurse = (SCCNodes.size() == 1); 385 InferredFlags.NoUnwind = true; 386 387 for (auto &V : SCCNodes) { 388 FunctionSummary *CallerSummary = 389 calculatePrevailingSummary(V, CachedPrevailingSummary, IsPrevailing); 390 391 // Function summaries can fail to contain information such as declarations 392 if (!CallerSummary) 393 return; 394 395 if (CallerSummary->fflags().MayThrow) 396 InferredFlags.NoUnwind = false; 397 398 for (const auto &Callee : CallerSummary->calls()) { 399 FunctionSummary *CalleeSummary = calculatePrevailingSummary( 400 Callee.first, CachedPrevailingSummary, IsPrevailing); 401 402 if (!CalleeSummary) 403 return; 404 405 if (!CalleeSummary->fflags().NoRecurse) 406 InferredFlags.NoRecurse = false; 407 408 if (!CalleeSummary->fflags().NoUnwind) 409 InferredFlags.NoUnwind = false; 410 411 if (!InferredFlags.NoUnwind && !InferredFlags.NoRecurse) 412 break; 413 } 414 } 415 416 if (InferredFlags.NoUnwind || InferredFlags.NoRecurse) { 417 Changed = true; 418 for (auto &V : SCCNodes) { 419 if (InferredFlags.NoRecurse) { 420 LLVM_DEBUG(dbgs() << "ThinLTO FunctionAttrs: Propagated NoRecurse to " 421 << V.name() << "\n"); 422 ++NumThinLinkNoRecurse; 423 } 424 425 if (InferredFlags.NoUnwind) { 426 LLVM_DEBUG(dbgs() << "ThinLTO FunctionAttrs: Propagated NoUnwind to " 427 << V.name() << "\n"); 428 ++NumThinLinkNoUnwind; 429 } 430 431 for (const auto &S : V.getSummaryList()) { 432 if (auto *FS = dyn_cast<FunctionSummary>(S.get())) { 433 if (InferredFlags.NoRecurse) 434 FS->setNoRecurse(); 435 436 if (InferredFlags.NoUnwind) 437 FS->setNoUnwind(); 438 } 439 } 440 } 441 } 442 }; 443 444 // Call propagation functions on each SCC in the Index 445 for (scc_iterator<ModuleSummaryIndex *> I = scc_begin(&Index); !I.isAtEnd(); 446 ++I) { 447 std::vector<ValueInfo> Nodes(*I); 448 PropagateAttributes(Nodes); 449 } 450 return Changed; 451 } 452 453 namespace { 454 455 /// For a given pointer Argument, this retains a list of Arguments of functions 456 /// in the same SCC that the pointer data flows into. We use this to build an 457 /// SCC of the arguments. 458 struct ArgumentGraphNode { 459 Argument *Definition; 460 SmallVector<ArgumentGraphNode *, 4> Uses; 461 }; 462 463 class ArgumentGraph { 464 // We store pointers to ArgumentGraphNode objects, so it's important that 465 // that they not move around upon insert. 466 using ArgumentMapTy = std::map<Argument *, ArgumentGraphNode>; 467 468 ArgumentMapTy ArgumentMap; 469 470 // There is no root node for the argument graph, in fact: 471 // void f(int *x, int *y) { if (...) f(x, y); } 472 // is an example where the graph is disconnected. The SCCIterator requires a 473 // single entry point, so we maintain a fake ("synthetic") root node that 474 // uses every node. Because the graph is directed and nothing points into 475 // the root, it will not participate in any SCCs (except for its own). 476 ArgumentGraphNode SyntheticRoot; 477 478 public: 479 ArgumentGraph() { SyntheticRoot.Definition = nullptr; } 480 481 using iterator = SmallVectorImpl<ArgumentGraphNode *>::iterator; 482 483 iterator begin() { return SyntheticRoot.Uses.begin(); } 484 iterator end() { return SyntheticRoot.Uses.end(); } 485 ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; } 486 487 ArgumentGraphNode *operator[](Argument *A) { 488 ArgumentGraphNode &Node = ArgumentMap[A]; 489 Node.Definition = A; 490 SyntheticRoot.Uses.push_back(&Node); 491 return &Node; 492 } 493 }; 494 495 /// This tracker checks whether callees are in the SCC, and if so it does not 496 /// consider that a capture, instead adding it to the "Uses" list and 497 /// continuing with the analysis. 498 struct ArgumentUsesTracker : public CaptureTracker { 499 ArgumentUsesTracker(const SCCNodeSet &SCCNodes) : SCCNodes(SCCNodes) {} 500 501 void tooManyUses() override { Captured = true; } 502 503 bool captured(const Use *U) override { 504 CallBase *CB = dyn_cast<CallBase>(U->getUser()); 505 if (!CB) { 506 Captured = true; 507 return true; 508 } 509 510 Function *F = CB->getCalledFunction(); 511 if (!F || !F->hasExactDefinition() || !SCCNodes.count(F)) { 512 Captured = true; 513 return true; 514 } 515 516 assert(!CB->isCallee(U) && "callee operand reported captured?"); 517 const unsigned UseIndex = CB->getDataOperandNo(U); 518 if (UseIndex >= CB->arg_size()) { 519 // Data operand, but not a argument operand -- must be a bundle operand 520 assert(CB->hasOperandBundles() && "Must be!"); 521 522 // CaptureTracking told us that we're being captured by an operand bundle 523 // use. In this case it does not matter if the callee is within our SCC 524 // or not -- we've been captured in some unknown way, and we have to be 525 // conservative. 526 Captured = true; 527 return true; 528 } 529 530 if (UseIndex >= F->arg_size()) { 531 assert(F->isVarArg() && "More params than args in non-varargs call"); 532 Captured = true; 533 return true; 534 } 535 536 Uses.push_back(&*std::next(F->arg_begin(), UseIndex)); 537 return false; 538 } 539 540 // True only if certainly captured (used outside our SCC). 541 bool Captured = false; 542 543 // Uses within our SCC. 544 SmallVector<Argument *, 4> Uses; 545 546 const SCCNodeSet &SCCNodes; 547 }; 548 549 } // end anonymous namespace 550 551 namespace llvm { 552 553 template <> struct GraphTraits<ArgumentGraphNode *> { 554 using NodeRef = ArgumentGraphNode *; 555 using ChildIteratorType = SmallVectorImpl<ArgumentGraphNode *>::iterator; 556 557 static NodeRef getEntryNode(NodeRef A) { return A; } 558 static ChildIteratorType child_begin(NodeRef N) { return N->Uses.begin(); } 559 static ChildIteratorType child_end(NodeRef N) { return N->Uses.end(); } 560 }; 561 562 template <> 563 struct GraphTraits<ArgumentGraph *> : public GraphTraits<ArgumentGraphNode *> { 564 static NodeRef getEntryNode(ArgumentGraph *AG) { return AG->getEntryNode(); } 565 566 static ChildIteratorType nodes_begin(ArgumentGraph *AG) { 567 return AG->begin(); 568 } 569 570 static ChildIteratorType nodes_end(ArgumentGraph *AG) { return AG->end(); } 571 }; 572 573 } // end namespace llvm 574 575 /// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone. 576 static Attribute::AttrKind 577 determinePointerAccessAttrs(Argument *A, 578 const SmallPtrSet<Argument *, 8> &SCCNodes) { 579 SmallVector<Use *, 32> Worklist; 580 SmallPtrSet<Use *, 32> Visited; 581 582 // inalloca arguments are always clobbered by the call. 583 if (A->hasInAllocaAttr() || A->hasPreallocatedAttr()) 584 return Attribute::None; 585 586 bool IsRead = false; 587 bool IsWrite = false; 588 589 for (Use &U : A->uses()) { 590 Visited.insert(&U); 591 Worklist.push_back(&U); 592 } 593 594 while (!Worklist.empty()) { 595 if (IsWrite && IsRead) 596 // No point in searching further.. 597 return Attribute::None; 598 599 Use *U = Worklist.pop_back_val(); 600 Instruction *I = cast<Instruction>(U->getUser()); 601 602 switch (I->getOpcode()) { 603 case Instruction::BitCast: 604 case Instruction::GetElementPtr: 605 case Instruction::PHI: 606 case Instruction::Select: 607 case Instruction::AddrSpaceCast: 608 // The original value is not read/written via this if the new value isn't. 609 for (Use &UU : I->uses()) 610 if (Visited.insert(&UU).second) 611 Worklist.push_back(&UU); 612 break; 613 614 case Instruction::Call: 615 case Instruction::Invoke: { 616 CallBase &CB = cast<CallBase>(*I); 617 if (CB.isCallee(U)) { 618 IsRead = true; 619 // Note that indirect calls do not capture, see comment in 620 // CaptureTracking for context 621 continue; 622 } 623 624 // Given we've explictily handled the callee operand above, what's left 625 // must be a data operand (e.g. argument or operand bundle) 626 const unsigned UseIndex = CB.getDataOperandNo(U); 627 628 if (!CB.doesNotCapture(UseIndex)) { 629 if (!CB.onlyReadsMemory()) 630 // If the callee can save a copy into other memory, then simply 631 // scanning uses of the call is insufficient. We have no way 632 // of tracking copies of the pointer through memory to see 633 // if a reloaded copy is written to, thus we must give up. 634 return Attribute::None; 635 // Push users for processing once we finish this one 636 if (!I->getType()->isVoidTy()) 637 for (Use &UU : I->uses()) 638 if (Visited.insert(&UU).second) 639 Worklist.push_back(&UU); 640 } 641 642 if (CB.doesNotAccessMemory()) 643 continue; 644 645 if (Function *F = CB.getCalledFunction()) 646 if (CB.isArgOperand(U) && UseIndex < F->arg_size() && 647 SCCNodes.count(F->getArg(UseIndex))) 648 // This is an argument which is part of the speculative SCC. Note 649 // that only operands corresponding to formal arguments of the callee 650 // can participate in the speculation. 651 break; 652 653 // The accessors used on call site here do the right thing for calls and 654 // invokes with operand bundles. 655 if (CB.doesNotAccessMemory(UseIndex)) { 656 /* nop */ 657 } else if (CB.onlyReadsMemory() || CB.onlyReadsMemory(UseIndex)) { 658 IsRead = true; 659 } else if (CB.hasFnAttr(Attribute::WriteOnly) || 660 CB.dataOperandHasImpliedAttr(UseIndex, Attribute::WriteOnly)) { 661 IsWrite = true; 662 } else { 663 return Attribute::None; 664 } 665 break; 666 } 667 668 case Instruction::Load: 669 // A volatile load has side effects beyond what readonly can be relied 670 // upon. 671 if (cast<LoadInst>(I)->isVolatile()) 672 return Attribute::None; 673 674 IsRead = true; 675 break; 676 677 case Instruction::Store: 678 if (cast<StoreInst>(I)->getValueOperand() == *U) 679 // untrackable capture 680 return Attribute::None; 681 682 // A volatile store has side effects beyond what writeonly can be relied 683 // upon. 684 if (cast<StoreInst>(I)->isVolatile()) 685 return Attribute::None; 686 687 IsWrite = true; 688 break; 689 690 case Instruction::ICmp: 691 case Instruction::Ret: 692 break; 693 694 default: 695 return Attribute::None; 696 } 697 } 698 699 if (IsWrite && IsRead) 700 return Attribute::None; 701 else if (IsRead) 702 return Attribute::ReadOnly; 703 else if (IsWrite) 704 return Attribute::WriteOnly; 705 else 706 return Attribute::ReadNone; 707 } 708 709 /// Deduce returned attributes for the SCC. 710 static void addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes, 711 SmallSet<Function *, 8> &Changed) { 712 // Check each function in turn, determining if an argument is always returned. 713 for (Function *F : SCCNodes) { 714 // We can infer and propagate function attributes only when we know that the 715 // definition we'll get at link time is *exactly* the definition we see now. 716 // For more details, see GlobalValue::mayBeDerefined. 717 if (!F->hasExactDefinition()) 718 continue; 719 720 if (F->getReturnType()->isVoidTy()) 721 continue; 722 723 // There is nothing to do if an argument is already marked as 'returned'. 724 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned)) 725 continue; 726 727 auto FindRetArg = [&]() -> Argument * { 728 Argument *RetArg = nullptr; 729 for (BasicBlock &BB : *F) 730 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) { 731 // Note that stripPointerCasts should look through functions with 732 // returned arguments. 733 auto *RetVal = 734 dyn_cast<Argument>(Ret->getReturnValue()->stripPointerCasts()); 735 if (!RetVal || RetVal->getType() != F->getReturnType()) 736 return nullptr; 737 738 if (!RetArg) 739 RetArg = RetVal; 740 else if (RetArg != RetVal) 741 return nullptr; 742 } 743 744 return RetArg; 745 }; 746 747 if (Argument *RetArg = FindRetArg()) { 748 RetArg->addAttr(Attribute::Returned); 749 ++NumReturned; 750 Changed.insert(F); 751 } 752 } 753 } 754 755 /// If a callsite has arguments that are also arguments to the parent function, 756 /// try to propagate attributes from the callsite's arguments to the parent's 757 /// arguments. This may be important because inlining can cause information loss 758 /// when attribute knowledge disappears with the inlined call. 759 static bool addArgumentAttrsFromCallsites(Function &F) { 760 if (!EnableNonnullArgPropagation) 761 return false; 762 763 bool Changed = false; 764 765 // For an argument attribute to transfer from a callsite to the parent, the 766 // call must be guaranteed to execute every time the parent is called. 767 // Conservatively, just check for calls in the entry block that are guaranteed 768 // to execute. 769 // TODO: This could be enhanced by testing if the callsite post-dominates the 770 // entry block or by doing simple forward walks or backward walks to the 771 // callsite. 772 BasicBlock &Entry = F.getEntryBlock(); 773 for (Instruction &I : Entry) { 774 if (auto *CB = dyn_cast<CallBase>(&I)) { 775 if (auto *CalledFunc = CB->getCalledFunction()) { 776 for (auto &CSArg : CalledFunc->args()) { 777 if (!CSArg.hasNonNullAttr(/* AllowUndefOrPoison */ false)) 778 continue; 779 780 // If the non-null callsite argument operand is an argument to 'F' 781 // (the caller) and the call is guaranteed to execute, then the value 782 // must be non-null throughout 'F'. 783 auto *FArg = dyn_cast<Argument>(CB->getArgOperand(CSArg.getArgNo())); 784 if (FArg && !FArg->hasNonNullAttr()) { 785 FArg->addAttr(Attribute::NonNull); 786 Changed = true; 787 } 788 } 789 } 790 } 791 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 792 break; 793 } 794 795 return Changed; 796 } 797 798 static bool addAccessAttr(Argument *A, Attribute::AttrKind R) { 799 assert((R == Attribute::ReadOnly || R == Attribute::ReadNone || 800 R == Attribute::WriteOnly) 801 && "Must be an access attribute."); 802 assert(A && "Argument must not be null."); 803 804 // If the argument already has the attribute, nothing needs to be done. 805 if (A->hasAttribute(R)) 806 return false; 807 808 // Otherwise, remove potentially conflicting attribute, add the new one, 809 // and update statistics. 810 A->removeAttr(Attribute::WriteOnly); 811 A->removeAttr(Attribute::ReadOnly); 812 A->removeAttr(Attribute::ReadNone); 813 A->addAttr(R); 814 if (R == Attribute::ReadOnly) 815 ++NumReadOnlyArg; 816 else if (R == Attribute::WriteOnly) 817 ++NumWriteOnlyArg; 818 else 819 ++NumReadNoneArg; 820 return true; 821 } 822 823 /// Deduce nocapture attributes for the SCC. 824 static void addArgumentAttrs(const SCCNodeSet &SCCNodes, 825 SmallSet<Function *, 8> &Changed) { 826 ArgumentGraph AG; 827 828 // Check each function in turn, determining which pointer arguments are not 829 // captured. 830 for (Function *F : SCCNodes) { 831 // We can infer and propagate function attributes only when we know that the 832 // definition we'll get at link time is *exactly* the definition we see now. 833 // For more details, see GlobalValue::mayBeDerefined. 834 if (!F->hasExactDefinition()) 835 continue; 836 837 if (addArgumentAttrsFromCallsites(*F)) 838 Changed.insert(F); 839 840 // Functions that are readonly (or readnone) and nounwind and don't return 841 // a value can't capture arguments. Don't analyze them. 842 if (F->onlyReadsMemory() && F->doesNotThrow() && 843 F->getReturnType()->isVoidTy()) { 844 for (Argument &A : F->args()) { 845 if (A.getType()->isPointerTy() && !A.hasNoCaptureAttr()) { 846 A.addAttr(Attribute::NoCapture); 847 ++NumNoCapture; 848 Changed.insert(F); 849 } 850 } 851 continue; 852 } 853 854 for (Argument &A : F->args()) { 855 if (!A.getType()->isPointerTy()) 856 continue; 857 bool HasNonLocalUses = false; 858 if (!A.hasNoCaptureAttr()) { 859 ArgumentUsesTracker Tracker(SCCNodes); 860 PointerMayBeCaptured(&A, &Tracker); 861 if (!Tracker.Captured) { 862 if (Tracker.Uses.empty()) { 863 // If it's trivially not captured, mark it nocapture now. 864 A.addAttr(Attribute::NoCapture); 865 ++NumNoCapture; 866 Changed.insert(F); 867 } else { 868 // If it's not trivially captured and not trivially not captured, 869 // then it must be calling into another function in our SCC. Save 870 // its particulars for Argument-SCC analysis later. 871 ArgumentGraphNode *Node = AG[&A]; 872 for (Argument *Use : Tracker.Uses) { 873 Node->Uses.push_back(AG[Use]); 874 if (Use != &A) 875 HasNonLocalUses = true; 876 } 877 } 878 } 879 // Otherwise, it's captured. Don't bother doing SCC analysis on it. 880 } 881 if (!HasNonLocalUses && !A.onlyReadsMemory()) { 882 // Can we determine that it's readonly/readnone/writeonly without doing 883 // an SCC? Note that we don't allow any calls at all here, or else our 884 // result will be dependent on the iteration order through the 885 // functions in the SCC. 886 SmallPtrSet<Argument *, 8> Self; 887 Self.insert(&A); 888 Attribute::AttrKind R = determinePointerAccessAttrs(&A, Self); 889 if (R != Attribute::None) 890 if (addAccessAttr(&A, R)) 891 Changed.insert(F); 892 } 893 } 894 } 895 896 // The graph we've collected is partial because we stopped scanning for 897 // argument uses once we solved the argument trivially. These partial nodes 898 // show up as ArgumentGraphNode objects with an empty Uses list, and for 899 // these nodes the final decision about whether they capture has already been 900 // made. If the definition doesn't have a 'nocapture' attribute by now, it 901 // captures. 902 903 for (scc_iterator<ArgumentGraph *> I = scc_begin(&AG); !I.isAtEnd(); ++I) { 904 const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I; 905 if (ArgumentSCC.size() == 1) { 906 if (!ArgumentSCC[0]->Definition) 907 continue; // synthetic root node 908 909 // eg. "void f(int* x) { if (...) f(x); }" 910 if (ArgumentSCC[0]->Uses.size() == 1 && 911 ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) { 912 Argument *A = ArgumentSCC[0]->Definition; 913 A->addAttr(Attribute::NoCapture); 914 ++NumNoCapture; 915 Changed.insert(A->getParent()); 916 917 // Infer the access attributes given the new nocapture one 918 SmallPtrSet<Argument *, 8> Self; 919 Self.insert(&*A); 920 Attribute::AttrKind R = determinePointerAccessAttrs(&*A, Self); 921 if (R != Attribute::None) 922 addAccessAttr(A, R); 923 } 924 continue; 925 } 926 927 bool SCCCaptured = false; 928 for (ArgumentGraphNode *Node : ArgumentSCC) { 929 if (Node->Uses.empty() && !Node->Definition->hasNoCaptureAttr()) { 930 SCCCaptured = true; 931 break; 932 } 933 } 934 if (SCCCaptured) 935 continue; 936 937 SmallPtrSet<Argument *, 8> ArgumentSCCNodes; 938 // Fill ArgumentSCCNodes with the elements of the ArgumentSCC. Used for 939 // quickly looking up whether a given Argument is in this ArgumentSCC. 940 for (ArgumentGraphNode *I : ArgumentSCC) { 941 ArgumentSCCNodes.insert(I->Definition); 942 } 943 944 for (ArgumentGraphNode *N : ArgumentSCC) { 945 for (ArgumentGraphNode *Use : N->Uses) { 946 Argument *A = Use->Definition; 947 if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A)) 948 continue; 949 SCCCaptured = true; 950 break; 951 } 952 if (SCCCaptured) 953 break; 954 } 955 if (SCCCaptured) 956 continue; 957 958 for (ArgumentGraphNode *N : ArgumentSCC) { 959 Argument *A = N->Definition; 960 A->addAttr(Attribute::NoCapture); 961 ++NumNoCapture; 962 Changed.insert(A->getParent()); 963 } 964 965 // We also want to compute readonly/readnone/writeonly. With a small number 966 // of false negatives, we can assume that any pointer which is captured 967 // isn't going to be provably readonly or readnone, since by definition 968 // we can't analyze all uses of a captured pointer. 969 // 970 // The false negatives happen when the pointer is captured by a function 971 // that promises readonly/readnone behaviour on the pointer, then the 972 // pointer's lifetime ends before anything that writes to arbitrary memory. 973 // Also, a readonly/readnone pointer may be returned, but returning a 974 // pointer is capturing it. 975 976 auto meetAccessAttr = [](Attribute::AttrKind A, Attribute::AttrKind B) { 977 if (A == B) 978 return A; 979 if (A == Attribute::ReadNone) 980 return B; 981 if (B == Attribute::ReadNone) 982 return A; 983 return Attribute::None; 984 }; 985 986 Attribute::AttrKind AccessAttr = Attribute::ReadNone; 987 for (ArgumentGraphNode *N : ArgumentSCC) { 988 Argument *A = N->Definition; 989 Attribute::AttrKind K = determinePointerAccessAttrs(A, ArgumentSCCNodes); 990 AccessAttr = meetAccessAttr(AccessAttr, K); 991 if (AccessAttr == Attribute::None) 992 break; 993 } 994 995 if (AccessAttr != Attribute::None) { 996 for (ArgumentGraphNode *N : ArgumentSCC) { 997 Argument *A = N->Definition; 998 if (addAccessAttr(A, AccessAttr)) 999 Changed.insert(A->getParent()); 1000 } 1001 } 1002 } 1003 } 1004 1005 /// Tests whether a function is "malloc-like". 1006 /// 1007 /// A function is "malloc-like" if it returns either null or a pointer that 1008 /// doesn't alias any other pointer visible to the caller. 1009 static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) { 1010 SmallSetVector<Value *, 8> FlowsToReturn; 1011 for (BasicBlock &BB : *F) 1012 if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) 1013 FlowsToReturn.insert(Ret->getReturnValue()); 1014 1015 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { 1016 Value *RetVal = FlowsToReturn[i]; 1017 1018 if (Constant *C = dyn_cast<Constant>(RetVal)) { 1019 if (!C->isNullValue() && !isa<UndefValue>(C)) 1020 return false; 1021 1022 continue; 1023 } 1024 1025 if (isa<Argument>(RetVal)) 1026 return false; 1027 1028 if (Instruction *RVI = dyn_cast<Instruction>(RetVal)) 1029 switch (RVI->getOpcode()) { 1030 // Extend the analysis by looking upwards. 1031 case Instruction::BitCast: 1032 case Instruction::GetElementPtr: 1033 case Instruction::AddrSpaceCast: 1034 FlowsToReturn.insert(RVI->getOperand(0)); 1035 continue; 1036 case Instruction::Select: { 1037 SelectInst *SI = cast<SelectInst>(RVI); 1038 FlowsToReturn.insert(SI->getTrueValue()); 1039 FlowsToReturn.insert(SI->getFalseValue()); 1040 continue; 1041 } 1042 case Instruction::PHI: { 1043 PHINode *PN = cast<PHINode>(RVI); 1044 for (Value *IncValue : PN->incoming_values()) 1045 FlowsToReturn.insert(IncValue); 1046 continue; 1047 } 1048 1049 // Check whether the pointer came from an allocation. 1050 case Instruction::Alloca: 1051 break; 1052 case Instruction::Call: 1053 case Instruction::Invoke: { 1054 CallBase &CB = cast<CallBase>(*RVI); 1055 if (CB.hasRetAttr(Attribute::NoAlias)) 1056 break; 1057 if (CB.getCalledFunction() && SCCNodes.count(CB.getCalledFunction())) 1058 break; 1059 [[fallthrough]]; 1060 } 1061 default: 1062 return false; // Did not come from an allocation. 1063 } 1064 1065 if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false)) 1066 return false; 1067 } 1068 1069 return true; 1070 } 1071 1072 /// Deduce noalias attributes for the SCC. 1073 static void addNoAliasAttrs(const SCCNodeSet &SCCNodes, 1074 SmallSet<Function *, 8> &Changed) { 1075 // Check each function in turn, determining which functions return noalias 1076 // pointers. 1077 for (Function *F : SCCNodes) { 1078 // Already noalias. 1079 if (F->returnDoesNotAlias()) 1080 continue; 1081 1082 // We can infer and propagate function attributes only when we know that the 1083 // definition we'll get at link time is *exactly* the definition we see now. 1084 // For more details, see GlobalValue::mayBeDerefined. 1085 if (!F->hasExactDefinition()) 1086 return; 1087 1088 // We annotate noalias return values, which are only applicable to 1089 // pointer types. 1090 if (!F->getReturnType()->isPointerTy()) 1091 continue; 1092 1093 if (!isFunctionMallocLike(F, SCCNodes)) 1094 return; 1095 } 1096 1097 for (Function *F : SCCNodes) { 1098 if (F->returnDoesNotAlias() || 1099 !F->getReturnType()->isPointerTy()) 1100 continue; 1101 1102 F->setReturnDoesNotAlias(); 1103 ++NumNoAlias; 1104 Changed.insert(F); 1105 } 1106 } 1107 1108 /// Tests whether this function is known to not return null. 1109 /// 1110 /// Requires that the function returns a pointer. 1111 /// 1112 /// Returns true if it believes the function will not return a null, and sets 1113 /// \p Speculative based on whether the returned conclusion is a speculative 1114 /// conclusion due to SCC calls. 1115 static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes, 1116 bool &Speculative) { 1117 assert(F->getReturnType()->isPointerTy() && 1118 "nonnull only meaningful on pointer types"); 1119 Speculative = false; 1120 1121 SmallSetVector<Value *, 8> FlowsToReturn; 1122 for (BasicBlock &BB : *F) 1123 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) 1124 FlowsToReturn.insert(Ret->getReturnValue()); 1125 1126 auto &DL = F->getParent()->getDataLayout(); 1127 1128 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { 1129 Value *RetVal = FlowsToReturn[i]; 1130 1131 // If this value is locally known to be non-null, we're good 1132 if (isKnownNonZero(RetVal, DL)) 1133 continue; 1134 1135 // Otherwise, we need to look upwards since we can't make any local 1136 // conclusions. 1137 Instruction *RVI = dyn_cast<Instruction>(RetVal); 1138 if (!RVI) 1139 return false; 1140 switch (RVI->getOpcode()) { 1141 // Extend the analysis by looking upwards. 1142 case Instruction::BitCast: 1143 case Instruction::GetElementPtr: 1144 case Instruction::AddrSpaceCast: 1145 FlowsToReturn.insert(RVI->getOperand(0)); 1146 continue; 1147 case Instruction::Select: { 1148 SelectInst *SI = cast<SelectInst>(RVI); 1149 FlowsToReturn.insert(SI->getTrueValue()); 1150 FlowsToReturn.insert(SI->getFalseValue()); 1151 continue; 1152 } 1153 case Instruction::PHI: { 1154 PHINode *PN = cast<PHINode>(RVI); 1155 for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1156 FlowsToReturn.insert(PN->getIncomingValue(i)); 1157 continue; 1158 } 1159 case Instruction::Call: 1160 case Instruction::Invoke: { 1161 CallBase &CB = cast<CallBase>(*RVI); 1162 Function *Callee = CB.getCalledFunction(); 1163 // A call to a node within the SCC is assumed to return null until 1164 // proven otherwise 1165 if (Callee && SCCNodes.count(Callee)) { 1166 Speculative = true; 1167 continue; 1168 } 1169 return false; 1170 } 1171 default: 1172 return false; // Unknown source, may be null 1173 }; 1174 llvm_unreachable("should have either continued or returned"); 1175 } 1176 1177 return true; 1178 } 1179 1180 /// Deduce nonnull attributes for the SCC. 1181 static void addNonNullAttrs(const SCCNodeSet &SCCNodes, 1182 SmallSet<Function *, 8> &Changed) { 1183 // Speculative that all functions in the SCC return only nonnull 1184 // pointers. We may refute this as we analyze functions. 1185 bool SCCReturnsNonNull = true; 1186 1187 // Check each function in turn, determining which functions return nonnull 1188 // pointers. 1189 for (Function *F : SCCNodes) { 1190 // Already nonnull. 1191 if (F->getAttributes().hasRetAttr(Attribute::NonNull)) 1192 continue; 1193 1194 // We can infer and propagate function attributes only when we know that the 1195 // definition we'll get at link time is *exactly* the definition we see now. 1196 // For more details, see GlobalValue::mayBeDerefined. 1197 if (!F->hasExactDefinition()) 1198 return; 1199 1200 // We annotate nonnull return values, which are only applicable to 1201 // pointer types. 1202 if (!F->getReturnType()->isPointerTy()) 1203 continue; 1204 1205 bool Speculative = false; 1206 if (isReturnNonNull(F, SCCNodes, Speculative)) { 1207 if (!Speculative) { 1208 // Mark the function eagerly since we may discover a function 1209 // which prevents us from speculating about the entire SCC 1210 LLVM_DEBUG(dbgs() << "Eagerly marking " << F->getName() 1211 << " as nonnull\n"); 1212 F->addRetAttr(Attribute::NonNull); 1213 ++NumNonNullReturn; 1214 Changed.insert(F); 1215 } 1216 continue; 1217 } 1218 // At least one function returns something which could be null, can't 1219 // speculate any more. 1220 SCCReturnsNonNull = false; 1221 } 1222 1223 if (SCCReturnsNonNull) { 1224 for (Function *F : SCCNodes) { 1225 if (F->getAttributes().hasRetAttr(Attribute::NonNull) || 1226 !F->getReturnType()->isPointerTy()) 1227 continue; 1228 1229 LLVM_DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n"); 1230 F->addRetAttr(Attribute::NonNull); 1231 ++NumNonNullReturn; 1232 Changed.insert(F); 1233 } 1234 } 1235 } 1236 1237 namespace { 1238 1239 /// Collects a set of attribute inference requests and performs them all in one 1240 /// go on a single SCC Node. Inference involves scanning function bodies 1241 /// looking for instructions that violate attribute assumptions. 1242 /// As soon as all the bodies are fine we are free to set the attribute. 1243 /// Customization of inference for individual attributes is performed by 1244 /// providing a handful of predicates for each attribute. 1245 class AttributeInferer { 1246 public: 1247 /// Describes a request for inference of a single attribute. 1248 struct InferenceDescriptor { 1249 1250 /// Returns true if this function does not have to be handled. 1251 /// General intent for this predicate is to provide an optimization 1252 /// for functions that do not need this attribute inference at all 1253 /// (say, for functions that already have the attribute). 1254 std::function<bool(const Function &)> SkipFunction; 1255 1256 /// Returns true if this instruction violates attribute assumptions. 1257 std::function<bool(Instruction &)> InstrBreaksAttribute; 1258 1259 /// Sets the inferred attribute for this function. 1260 std::function<void(Function &)> SetAttribute; 1261 1262 /// Attribute we derive. 1263 Attribute::AttrKind AKind; 1264 1265 /// If true, only "exact" definitions can be used to infer this attribute. 1266 /// See GlobalValue::isDefinitionExact. 1267 bool RequiresExactDefinition; 1268 1269 InferenceDescriptor(Attribute::AttrKind AK, 1270 std::function<bool(const Function &)> SkipFunc, 1271 std::function<bool(Instruction &)> InstrScan, 1272 std::function<void(Function &)> SetAttr, 1273 bool ReqExactDef) 1274 : SkipFunction(SkipFunc), InstrBreaksAttribute(InstrScan), 1275 SetAttribute(SetAttr), AKind(AK), 1276 RequiresExactDefinition(ReqExactDef) {} 1277 }; 1278 1279 private: 1280 SmallVector<InferenceDescriptor, 4> InferenceDescriptors; 1281 1282 public: 1283 void registerAttrInference(InferenceDescriptor AttrInference) { 1284 InferenceDescriptors.push_back(AttrInference); 1285 } 1286 1287 void run(const SCCNodeSet &SCCNodes, SmallSet<Function *, 8> &Changed); 1288 }; 1289 1290 /// Perform all the requested attribute inference actions according to the 1291 /// attribute predicates stored before. 1292 void AttributeInferer::run(const SCCNodeSet &SCCNodes, 1293 SmallSet<Function *, 8> &Changed) { 1294 SmallVector<InferenceDescriptor, 4> InferInSCC = InferenceDescriptors; 1295 // Go through all the functions in SCC and check corresponding attribute 1296 // assumptions for each of them. Attributes that are invalid for this SCC 1297 // will be removed from InferInSCC. 1298 for (Function *F : SCCNodes) { 1299 1300 // No attributes whose assumptions are still valid - done. 1301 if (InferInSCC.empty()) 1302 return; 1303 1304 // Check if our attributes ever need scanning/can be scanned. 1305 llvm::erase_if(InferInSCC, [F](const InferenceDescriptor &ID) { 1306 if (ID.SkipFunction(*F)) 1307 return false; 1308 1309 // Remove from further inference (invalidate) when visiting a function 1310 // that has no instructions to scan/has an unsuitable definition. 1311 return F->isDeclaration() || 1312 (ID.RequiresExactDefinition && !F->hasExactDefinition()); 1313 }); 1314 1315 // For each attribute still in InferInSCC that doesn't explicitly skip F, 1316 // set up the F instructions scan to verify assumptions of the attribute. 1317 SmallVector<InferenceDescriptor, 4> InferInThisFunc; 1318 llvm::copy_if( 1319 InferInSCC, std::back_inserter(InferInThisFunc), 1320 [F](const InferenceDescriptor &ID) { return !ID.SkipFunction(*F); }); 1321 1322 if (InferInThisFunc.empty()) 1323 continue; 1324 1325 // Start instruction scan. 1326 for (Instruction &I : instructions(*F)) { 1327 llvm::erase_if(InferInThisFunc, [&](const InferenceDescriptor &ID) { 1328 if (!ID.InstrBreaksAttribute(I)) 1329 return false; 1330 // Remove attribute from further inference on any other functions 1331 // because attribute assumptions have just been violated. 1332 llvm::erase_if(InferInSCC, [&ID](const InferenceDescriptor &D) { 1333 return D.AKind == ID.AKind; 1334 }); 1335 // Remove attribute from the rest of current instruction scan. 1336 return true; 1337 }); 1338 1339 if (InferInThisFunc.empty()) 1340 break; 1341 } 1342 } 1343 1344 if (InferInSCC.empty()) 1345 return; 1346 1347 for (Function *F : SCCNodes) 1348 // At this point InferInSCC contains only functions that were either: 1349 // - explicitly skipped from scan/inference, or 1350 // - verified to have no instructions that break attribute assumptions. 1351 // Hence we just go and force the attribute for all non-skipped functions. 1352 for (auto &ID : InferInSCC) { 1353 if (ID.SkipFunction(*F)) 1354 continue; 1355 Changed.insert(F); 1356 ID.SetAttribute(*F); 1357 } 1358 } 1359 1360 struct SCCNodesResult { 1361 SCCNodeSet SCCNodes; 1362 bool HasUnknownCall; 1363 }; 1364 1365 } // end anonymous namespace 1366 1367 /// Helper for non-Convergent inference predicate InstrBreaksAttribute. 1368 static bool InstrBreaksNonConvergent(Instruction &I, 1369 const SCCNodeSet &SCCNodes) { 1370 const CallBase *CB = dyn_cast<CallBase>(&I); 1371 // Breaks non-convergent assumption if CS is a convergent call to a function 1372 // not in the SCC. 1373 return CB && CB->isConvergent() && 1374 !SCCNodes.contains(CB->getCalledFunction()); 1375 } 1376 1377 /// Helper for NoUnwind inference predicate InstrBreaksAttribute. 1378 static bool InstrBreaksNonThrowing(Instruction &I, const SCCNodeSet &SCCNodes) { 1379 if (!I.mayThrow(/* IncludePhaseOneUnwind */ true)) 1380 return false; 1381 if (const auto *CI = dyn_cast<CallInst>(&I)) { 1382 if (Function *Callee = CI->getCalledFunction()) { 1383 // I is a may-throw call to a function inside our SCC. This doesn't 1384 // invalidate our current working assumption that the SCC is no-throw; we 1385 // just have to scan that other function. 1386 if (SCCNodes.contains(Callee)) 1387 return false; 1388 } 1389 } 1390 return true; 1391 } 1392 1393 /// Helper for NoFree inference predicate InstrBreaksAttribute. 1394 static bool InstrBreaksNoFree(Instruction &I, const SCCNodeSet &SCCNodes) { 1395 CallBase *CB = dyn_cast<CallBase>(&I); 1396 if (!CB) 1397 return false; 1398 1399 if (CB->hasFnAttr(Attribute::NoFree)) 1400 return false; 1401 1402 // Speculatively assume in SCC. 1403 if (Function *Callee = CB->getCalledFunction()) 1404 if (SCCNodes.contains(Callee)) 1405 return false; 1406 1407 return true; 1408 } 1409 1410 // Return true if this is an atomic which has an ordering stronger than 1411 // unordered. Note that this is different than the predicate we use in 1412 // Attributor. Here we chose to be conservative and consider monotonic 1413 // operations potentially synchronizing. We generally don't do much with 1414 // monotonic operations, so this is simply risk reduction. 1415 static bool isOrderedAtomic(Instruction *I) { 1416 if (!I->isAtomic()) 1417 return false; 1418 1419 if (auto *FI = dyn_cast<FenceInst>(I)) 1420 // All legal orderings for fence are stronger than monotonic. 1421 return FI->getSyncScopeID() != SyncScope::SingleThread; 1422 else if (isa<AtomicCmpXchgInst>(I) || isa<AtomicRMWInst>(I)) 1423 return true; 1424 else if (auto *SI = dyn_cast<StoreInst>(I)) 1425 return !SI->isUnordered(); 1426 else if (auto *LI = dyn_cast<LoadInst>(I)) 1427 return !LI->isUnordered(); 1428 else { 1429 llvm_unreachable("unknown atomic instruction?"); 1430 } 1431 } 1432 1433 static bool InstrBreaksNoSync(Instruction &I, const SCCNodeSet &SCCNodes) { 1434 // Volatile may synchronize 1435 if (I.isVolatile()) 1436 return true; 1437 1438 // An ordered atomic may synchronize. (See comment about on monotonic.) 1439 if (isOrderedAtomic(&I)) 1440 return true; 1441 1442 auto *CB = dyn_cast<CallBase>(&I); 1443 if (!CB) 1444 // Non call site cases covered by the two checks above 1445 return false; 1446 1447 if (CB->hasFnAttr(Attribute::NoSync)) 1448 return false; 1449 1450 // Non volatile memset/memcpy/memmoves are nosync 1451 // NOTE: Only intrinsics with volatile flags should be handled here. All 1452 // others should be marked in Intrinsics.td. 1453 if (auto *MI = dyn_cast<MemIntrinsic>(&I)) 1454 if (!MI->isVolatile()) 1455 return false; 1456 1457 // Speculatively assume in SCC. 1458 if (Function *Callee = CB->getCalledFunction()) 1459 if (SCCNodes.contains(Callee)) 1460 return false; 1461 1462 return true; 1463 } 1464 1465 /// Attempt to remove convergent function attribute when possible. 1466 /// 1467 /// Returns true if any changes to function attributes were made. 1468 static void inferConvergent(const SCCNodeSet &SCCNodes, 1469 SmallSet<Function *, 8> &Changed) { 1470 AttributeInferer AI; 1471 1472 // Request to remove the convergent attribute from all functions in the SCC 1473 // if every callsite within the SCC is not convergent (except for calls 1474 // to functions within the SCC). 1475 // Note: Removal of the attr from the callsites will happen in 1476 // InstCombineCalls separately. 1477 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{ 1478 Attribute::Convergent, 1479 // Skip non-convergent functions. 1480 [](const Function &F) { return !F.isConvergent(); }, 1481 // Instructions that break non-convergent assumption. 1482 [SCCNodes](Instruction &I) { 1483 return InstrBreaksNonConvergent(I, SCCNodes); 1484 }, 1485 [](Function &F) { 1486 LLVM_DEBUG(dbgs() << "Removing convergent attr from fn " << F.getName() 1487 << "\n"); 1488 F.setNotConvergent(); 1489 }, 1490 /* RequiresExactDefinition= */ false}); 1491 // Perform all the requested attribute inference actions. 1492 AI.run(SCCNodes, Changed); 1493 } 1494 1495 /// Infer attributes from all functions in the SCC by scanning every 1496 /// instruction for compliance to the attribute assumptions. 1497 /// 1498 /// Returns true if any changes to function attributes were made. 1499 static void inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes, 1500 SmallSet<Function *, 8> &Changed) { 1501 AttributeInferer AI; 1502 1503 if (!DisableNoUnwindInference) 1504 // Request to infer nounwind attribute for all the functions in the SCC if 1505 // every callsite within the SCC is not throwing (except for calls to 1506 // functions within the SCC). Note that nounwind attribute suffers from 1507 // derefinement - results may change depending on how functions are 1508 // optimized. Thus it can be inferred only from exact definitions. 1509 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{ 1510 Attribute::NoUnwind, 1511 // Skip non-throwing functions. 1512 [](const Function &F) { return F.doesNotThrow(); }, 1513 // Instructions that break non-throwing assumption. 1514 [&SCCNodes](Instruction &I) { 1515 return InstrBreaksNonThrowing(I, SCCNodes); 1516 }, 1517 [](Function &F) { 1518 LLVM_DEBUG(dbgs() 1519 << "Adding nounwind attr to fn " << F.getName() << "\n"); 1520 F.setDoesNotThrow(); 1521 ++NumNoUnwind; 1522 }, 1523 /* RequiresExactDefinition= */ true}); 1524 1525 if (!DisableNoFreeInference) 1526 // Request to infer nofree attribute for all the functions in the SCC if 1527 // every callsite within the SCC does not directly or indirectly free 1528 // memory (except for calls to functions within the SCC). Note that nofree 1529 // attribute suffers from derefinement - results may change depending on 1530 // how functions are optimized. Thus it can be inferred only from exact 1531 // definitions. 1532 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{ 1533 Attribute::NoFree, 1534 // Skip functions known not to free memory. 1535 [](const Function &F) { return F.doesNotFreeMemory(); }, 1536 // Instructions that break non-deallocating assumption. 1537 [&SCCNodes](Instruction &I) { 1538 return InstrBreaksNoFree(I, SCCNodes); 1539 }, 1540 [](Function &F) { 1541 LLVM_DEBUG(dbgs() 1542 << "Adding nofree attr to fn " << F.getName() << "\n"); 1543 F.setDoesNotFreeMemory(); 1544 ++NumNoFree; 1545 }, 1546 /* RequiresExactDefinition= */ true}); 1547 1548 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{ 1549 Attribute::NoSync, 1550 // Skip already marked functions. 1551 [](const Function &F) { return F.hasNoSync(); }, 1552 // Instructions that break nosync assumption. 1553 [&SCCNodes](Instruction &I) { 1554 return InstrBreaksNoSync(I, SCCNodes); 1555 }, 1556 [](Function &F) { 1557 LLVM_DEBUG(dbgs() 1558 << "Adding nosync attr to fn " << F.getName() << "\n"); 1559 F.setNoSync(); 1560 ++NumNoSync; 1561 }, 1562 /* RequiresExactDefinition= */ true}); 1563 1564 // Perform all the requested attribute inference actions. 1565 AI.run(SCCNodes, Changed); 1566 } 1567 1568 static void addNoRecurseAttrs(const SCCNodeSet &SCCNodes, 1569 SmallSet<Function *, 8> &Changed) { 1570 // Try and identify functions that do not recurse. 1571 1572 // If the SCC contains multiple nodes we know for sure there is recursion. 1573 if (SCCNodes.size() != 1) 1574 return; 1575 1576 Function *F = *SCCNodes.begin(); 1577 if (!F || !F->hasExactDefinition() || F->doesNotRecurse()) 1578 return; 1579 1580 // If all of the calls in F are identifiable and are to norecurse functions, F 1581 // is norecurse. This check also detects self-recursion as F is not currently 1582 // marked norecurse, so any called from F to F will not be marked norecurse. 1583 for (auto &BB : *F) 1584 for (auto &I : BB.instructionsWithoutDebug()) 1585 if (auto *CB = dyn_cast<CallBase>(&I)) { 1586 Function *Callee = CB->getCalledFunction(); 1587 if (!Callee || Callee == F || !Callee->doesNotRecurse()) 1588 // Function calls a potentially recursive function. 1589 return; 1590 } 1591 1592 // Every call was to a non-recursive function other than this function, and 1593 // we have no indirect recursion as the SCC size is one. This function cannot 1594 // recurse. 1595 F->setDoesNotRecurse(); 1596 ++NumNoRecurse; 1597 Changed.insert(F); 1598 } 1599 1600 static bool instructionDoesNotReturn(Instruction &I) { 1601 if (auto *CB = dyn_cast<CallBase>(&I)) 1602 return CB->hasFnAttr(Attribute::NoReturn); 1603 return false; 1604 } 1605 1606 // A basic block can only return if it terminates with a ReturnInst and does not 1607 // contain calls to noreturn functions. 1608 static bool basicBlockCanReturn(BasicBlock &BB) { 1609 if (!isa<ReturnInst>(BB.getTerminator())) 1610 return false; 1611 return none_of(BB, instructionDoesNotReturn); 1612 } 1613 1614 // FIXME: this doesn't handle recursion. 1615 static bool canReturn(Function &F) { 1616 SmallVector<BasicBlock *, 16> Worklist; 1617 SmallPtrSet<BasicBlock *, 16> Visited; 1618 1619 Visited.insert(&F.front()); 1620 Worklist.push_back(&F.front()); 1621 1622 do { 1623 BasicBlock *BB = Worklist.pop_back_val(); 1624 if (basicBlockCanReturn(*BB)) 1625 return true; 1626 for (BasicBlock *Succ : successors(BB)) 1627 if (Visited.insert(Succ).second) 1628 Worklist.push_back(Succ); 1629 } while (!Worklist.empty()); 1630 1631 return false; 1632 } 1633 1634 // Set the noreturn function attribute if possible. 1635 static void addNoReturnAttrs(const SCCNodeSet &SCCNodes, 1636 SmallSet<Function *, 8> &Changed) { 1637 for (Function *F : SCCNodes) { 1638 if (!F || !F->hasExactDefinition() || F->hasFnAttribute(Attribute::Naked) || 1639 F->doesNotReturn()) 1640 continue; 1641 1642 if (!canReturn(*F)) { 1643 F->setDoesNotReturn(); 1644 Changed.insert(F); 1645 } 1646 } 1647 } 1648 1649 static bool functionWillReturn(const Function &F) { 1650 // We can infer and propagate function attributes only when we know that the 1651 // definition we'll get at link time is *exactly* the definition we see now. 1652 // For more details, see GlobalValue::mayBeDerefined. 1653 if (!F.hasExactDefinition()) 1654 return false; 1655 1656 // Must-progress function without side-effects must return. 1657 if (F.mustProgress() && F.onlyReadsMemory()) 1658 return true; 1659 1660 // Can only analyze functions with a definition. 1661 if (F.isDeclaration()) 1662 return false; 1663 1664 // Functions with loops require more sophisticated analysis, as the loop 1665 // may be infinite. For now, don't try to handle them. 1666 SmallVector<std::pair<const BasicBlock *, const BasicBlock *>> Backedges; 1667 FindFunctionBackedges(F, Backedges); 1668 if (!Backedges.empty()) 1669 return false; 1670 1671 // If there are no loops, then the function is willreturn if all calls in 1672 // it are willreturn. 1673 return all_of(instructions(F), [](const Instruction &I) { 1674 return I.willReturn(); 1675 }); 1676 } 1677 1678 // Set the willreturn function attribute if possible. 1679 static void addWillReturn(const SCCNodeSet &SCCNodes, 1680 SmallSet<Function *, 8> &Changed) { 1681 for (Function *F : SCCNodes) { 1682 if (!F || F->willReturn() || !functionWillReturn(*F)) 1683 continue; 1684 1685 F->setWillReturn(); 1686 NumWillReturn++; 1687 Changed.insert(F); 1688 } 1689 } 1690 1691 static SCCNodesResult createSCCNodeSet(ArrayRef<Function *> Functions) { 1692 SCCNodesResult Res; 1693 Res.HasUnknownCall = false; 1694 for (Function *F : Functions) { 1695 if (!F || F->hasOptNone() || F->hasFnAttribute(Attribute::Naked) || 1696 F->isPresplitCoroutine()) { 1697 // Treat any function we're trying not to optimize as if it were an 1698 // indirect call and omit it from the node set used below. 1699 Res.HasUnknownCall = true; 1700 continue; 1701 } 1702 // Track whether any functions in this SCC have an unknown call edge. 1703 // Note: if this is ever a performance hit, we can common it with 1704 // subsequent routines which also do scans over the instructions of the 1705 // function. 1706 if (!Res.HasUnknownCall) { 1707 for (Instruction &I : instructions(*F)) { 1708 if (auto *CB = dyn_cast<CallBase>(&I)) { 1709 if (!CB->getCalledFunction()) { 1710 Res.HasUnknownCall = true; 1711 break; 1712 } 1713 } 1714 } 1715 } 1716 Res.SCCNodes.insert(F); 1717 } 1718 return Res; 1719 } 1720 1721 template <typename AARGetterT> 1722 static SmallSet<Function *, 8> 1723 deriveAttrsInPostOrder(ArrayRef<Function *> Functions, AARGetterT &&AARGetter) { 1724 SCCNodesResult Nodes = createSCCNodeSet(Functions); 1725 1726 // Bail if the SCC only contains optnone functions. 1727 if (Nodes.SCCNodes.empty()) 1728 return {}; 1729 1730 SmallSet<Function *, 8> Changed; 1731 1732 addArgumentReturnedAttrs(Nodes.SCCNodes, Changed); 1733 addMemoryAttrs(Nodes.SCCNodes, AARGetter, Changed); 1734 addArgumentAttrs(Nodes.SCCNodes, Changed); 1735 inferConvergent(Nodes.SCCNodes, Changed); 1736 addNoReturnAttrs(Nodes.SCCNodes, Changed); 1737 addWillReturn(Nodes.SCCNodes, Changed); 1738 1739 // If we have no external nodes participating in the SCC, we can deduce some 1740 // more precise attributes as well. 1741 if (!Nodes.HasUnknownCall) { 1742 addNoAliasAttrs(Nodes.SCCNodes, Changed); 1743 addNonNullAttrs(Nodes.SCCNodes, Changed); 1744 inferAttrsFromFunctionBodies(Nodes.SCCNodes, Changed); 1745 addNoRecurseAttrs(Nodes.SCCNodes, Changed); 1746 } 1747 1748 // Finally, infer the maximal set of attributes from the ones we've inferred 1749 // above. This is handling the cases where one attribute on a signature 1750 // implies another, but for implementation reasons the inference rule for 1751 // the later is missing (or simply less sophisticated). 1752 for (Function *F : Nodes.SCCNodes) 1753 if (F) 1754 if (inferAttributesFromOthers(*F)) 1755 Changed.insert(F); 1756 1757 return Changed; 1758 } 1759 1760 PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C, 1761 CGSCCAnalysisManager &AM, 1762 LazyCallGraph &CG, 1763 CGSCCUpdateResult &) { 1764 // Skip non-recursive functions if requested. 1765 if (C.size() == 1 && SkipNonRecursive) { 1766 LazyCallGraph::Node &N = *C.begin(); 1767 if (!N->lookup(N)) 1768 return PreservedAnalyses::all(); 1769 } 1770 1771 FunctionAnalysisManager &FAM = 1772 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 1773 1774 // We pass a lambda into functions to wire them up to the analysis manager 1775 // for getting function analyses. 1776 auto AARGetter = [&](Function &F) -> AAResults & { 1777 return FAM.getResult<AAManager>(F); 1778 }; 1779 1780 SmallVector<Function *, 8> Functions; 1781 for (LazyCallGraph::Node &N : C) { 1782 Functions.push_back(&N.getFunction()); 1783 } 1784 1785 auto ChangedFunctions = deriveAttrsInPostOrder(Functions, AARGetter); 1786 if (ChangedFunctions.empty()) 1787 return PreservedAnalyses::all(); 1788 1789 // Invalidate analyses for modified functions so that we don't have to 1790 // invalidate all analyses for all functions in this SCC. 1791 PreservedAnalyses FuncPA; 1792 // We haven't changed the CFG for modified functions. 1793 FuncPA.preserveSet<CFGAnalyses>(); 1794 for (Function *Changed : ChangedFunctions) { 1795 FAM.invalidate(*Changed, FuncPA); 1796 // Also invalidate any direct callers of changed functions since analyses 1797 // may care about attributes of direct callees. For example, MemorySSA cares 1798 // about whether or not a call's callee modifies memory and queries that 1799 // through function attributes. 1800 for (auto *U : Changed->users()) { 1801 if (auto *Call = dyn_cast<CallBase>(U)) { 1802 if (Call->getCalledFunction() == Changed) 1803 FAM.invalidate(*Call->getFunction(), FuncPA); 1804 } 1805 } 1806 } 1807 1808 PreservedAnalyses PA; 1809 // We have not added or removed functions. 1810 PA.preserve<FunctionAnalysisManagerCGSCCProxy>(); 1811 // We already invalidated all relevant function analyses above. 1812 PA.preserveSet<AllAnalysesOn<Function>>(); 1813 return PA; 1814 } 1815 1816 void PostOrderFunctionAttrsPass::printPipeline( 1817 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 1818 static_cast<PassInfoMixin<PostOrderFunctionAttrsPass> *>(this)->printPipeline( 1819 OS, MapClassName2PassName); 1820 if (SkipNonRecursive) 1821 OS << "<skip-non-recursive>"; 1822 } 1823 1824 template <typename AARGetterT> 1825 static bool runImpl(CallGraphSCC &SCC, AARGetterT AARGetter) { 1826 SmallVector<Function *, 8> Functions; 1827 for (CallGraphNode *I : SCC) { 1828 Functions.push_back(I->getFunction()); 1829 } 1830 1831 return !deriveAttrsInPostOrder(Functions, AARGetter).empty(); 1832 } 1833 1834 static bool addNoRecurseAttrsTopDown(Function &F) { 1835 // We check the preconditions for the function prior to calling this to avoid 1836 // the cost of building up a reversible post-order list. We assert them here 1837 // to make sure none of the invariants this relies on were violated. 1838 assert(!F.isDeclaration() && "Cannot deduce norecurse without a definition!"); 1839 assert(!F.doesNotRecurse() && 1840 "This function has already been deduced as norecurs!"); 1841 assert(F.hasInternalLinkage() && 1842 "Can only do top-down deduction for internal linkage functions!"); 1843 1844 // If F is internal and all of its uses are calls from a non-recursive 1845 // functions, then none of its calls could in fact recurse without going 1846 // through a function marked norecurse, and so we can mark this function too 1847 // as norecurse. Note that the uses must actually be calls -- otherwise 1848 // a pointer to this function could be returned from a norecurse function but 1849 // this function could be recursively (indirectly) called. Note that this 1850 // also detects if F is directly recursive as F is not yet marked as 1851 // a norecurse function. 1852 for (auto &U : F.uses()) { 1853 auto *I = dyn_cast<Instruction>(U.getUser()); 1854 if (!I) 1855 return false; 1856 CallBase *CB = dyn_cast<CallBase>(I); 1857 if (!CB || !CB->isCallee(&U) || 1858 !CB->getParent()->getParent()->doesNotRecurse()) 1859 return false; 1860 } 1861 F.setDoesNotRecurse(); 1862 ++NumNoRecurse; 1863 return true; 1864 } 1865 1866 static bool deduceFunctionAttributeInRPO(Module &M, LazyCallGraph &CG) { 1867 // We only have a post-order SCC traversal (because SCCs are inherently 1868 // discovered in post-order), so we accumulate them in a vector and then walk 1869 // it in reverse. This is simpler than using the RPO iterator infrastructure 1870 // because we need to combine SCC detection and the PO walk of the call 1871 // graph. We can also cheat egregiously because we're primarily interested in 1872 // synthesizing norecurse and so we can only save the singular SCCs as SCCs 1873 // with multiple functions in them will clearly be recursive. 1874 1875 SmallVector<Function *, 16> Worklist; 1876 CG.buildRefSCCs(); 1877 for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs()) { 1878 for (LazyCallGraph::SCC &SCC : RC) { 1879 if (SCC.size() != 1) 1880 continue; 1881 Function &F = SCC.begin()->getFunction(); 1882 if (!F.isDeclaration() && !F.doesNotRecurse() && F.hasInternalLinkage()) 1883 Worklist.push_back(&F); 1884 } 1885 } 1886 bool Changed = false; 1887 for (auto *F : llvm::reverse(Worklist)) 1888 Changed |= addNoRecurseAttrsTopDown(*F); 1889 1890 return Changed; 1891 } 1892 1893 PreservedAnalyses 1894 ReversePostOrderFunctionAttrsPass::run(Module &M, ModuleAnalysisManager &AM) { 1895 auto &CG = AM.getResult<LazyCallGraphAnalysis>(M); 1896 1897 if (!deduceFunctionAttributeInRPO(M, CG)) 1898 return PreservedAnalyses::all(); 1899 1900 PreservedAnalyses PA; 1901 PA.preserve<LazyCallGraphAnalysis>(); 1902 return PA; 1903 } 1904