1 //===- ArgumentPromotion.cpp - Promote by-reference arguments -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass promotes "by reference" arguments to be "by value" arguments. In 10 // practice, this means looking for internal functions that have pointer 11 // arguments. If it can prove, through the use of alias analysis, that an 12 // argument is *only* loaded, then it can pass the value into the function 13 // instead of the address of the value. This can cause recursive simplification 14 // of code and lead to the elimination of allocas (especially in C++ template 15 // code like the STL). 16 // 17 // This pass also handles aggregate arguments that are passed into a function, 18 // scalarizing them if the elements of the aggregate are only loaded. Note that 19 // by default it refuses to scalarize aggregates which would require passing in 20 // more than three operands to the function, because passing thousands of 21 // operands for a large array or structure is unprofitable! This limit can be 22 // configured or disabled, however. 23 // 24 // Note that this transformation could also be done for arguments that are only 25 // stored to (returning the value instead), but does not currently. This case 26 // would be best handled when and if LLVM begins supporting multiple return 27 // values from functions. 28 // 29 //===----------------------------------------------------------------------===// 30 31 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 32 #include "llvm/ADT/DepthFirstIterator.h" 33 #include "llvm/ADT/None.h" 34 #include "llvm/ADT/Optional.h" 35 #include "llvm/ADT/STLExtras.h" 36 #include "llvm/ADT/ScopeExit.h" 37 #include "llvm/ADT/SmallPtrSet.h" 38 #include "llvm/ADT/SmallVector.h" 39 #include "llvm/ADT/Statistic.h" 40 #include "llvm/ADT/Twine.h" 41 #include "llvm/Analysis/AssumptionCache.h" 42 #include "llvm/Analysis/BasicAliasAnalysis.h" 43 #include "llvm/Analysis/CGSCCPassManager.h" 44 #include "llvm/Analysis/CallGraph.h" 45 #include "llvm/Analysis/CallGraphSCCPass.h" 46 #include "llvm/Analysis/LazyCallGraph.h" 47 #include "llvm/Analysis/Loads.h" 48 #include "llvm/Analysis/MemoryLocation.h" 49 #include "llvm/Analysis/TargetLibraryInfo.h" 50 #include "llvm/Analysis/TargetTransformInfo.h" 51 #include "llvm/IR/Argument.h" 52 #include "llvm/IR/Attributes.h" 53 #include "llvm/IR/BasicBlock.h" 54 #include "llvm/IR/CFG.h" 55 #include "llvm/IR/Constants.h" 56 #include "llvm/IR/DataLayout.h" 57 #include "llvm/IR/DerivedTypes.h" 58 #include "llvm/IR/Function.h" 59 #include "llvm/IR/IRBuilder.h" 60 #include "llvm/IR/InstrTypes.h" 61 #include "llvm/IR/Instruction.h" 62 #include "llvm/IR/Instructions.h" 63 #include "llvm/IR/Metadata.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/NoFolder.h" 66 #include "llvm/IR/PassManager.h" 67 #include "llvm/IR/Type.h" 68 #include "llvm/IR/Use.h" 69 #include "llvm/IR/User.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/InitializePasses.h" 72 #include "llvm/Pass.h" 73 #include "llvm/Support/Casting.h" 74 #include "llvm/Support/Debug.h" 75 #include "llvm/Support/FormatVariadic.h" 76 #include "llvm/Support/raw_ostream.h" 77 #include "llvm/Transforms/IPO.h" 78 #include <algorithm> 79 #include <cassert> 80 #include <cstdint> 81 #include <functional> 82 #include <iterator> 83 #include <map> 84 #include <set> 85 #include <utility> 86 #include <vector> 87 88 using namespace llvm; 89 90 #define DEBUG_TYPE "argpromotion" 91 92 STATISTIC(NumArgumentsPromoted, "Number of pointer arguments promoted"); 93 STATISTIC(NumAggregatesPromoted, "Number of aggregate arguments promoted"); 94 STATISTIC(NumByValArgsPromoted, "Number of byval arguments promoted"); 95 STATISTIC(NumArgumentsDead, "Number of dead pointer args eliminated"); 96 97 /// A vector used to hold the indices of a single GEP instruction 98 using IndicesVector = std::vector<uint64_t>; 99 100 /// DoPromotion - This method actually performs the promotion of the specified 101 /// arguments, and returns the new function. At this point, we know that it's 102 /// safe to do so. 103 static Function * 104 doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote, 105 SmallPtrSetImpl<Argument *> &ByValArgsToTransform, 106 Optional<function_ref<void(CallBase &OldCS, CallBase &NewCS)>> 107 ReplaceCallSite) { 108 // Start by computing a new prototype for the function, which is the same as 109 // the old function, but has modified arguments. 110 FunctionType *FTy = F->getFunctionType(); 111 std::vector<Type *> Params; 112 113 using ScalarizeTable = std::set<std::pair<Type *, IndicesVector>>; 114 115 // ScalarizedElements - If we are promoting a pointer that has elements 116 // accessed out of it, keep track of which elements are accessed so that we 117 // can add one argument for each. 118 // 119 // Arguments that are directly loaded will have a zero element value here, to 120 // handle cases where there are both a direct load and GEP accesses. 121 std::map<Argument *, ScalarizeTable> ScalarizedElements; 122 123 // OriginalLoads - Keep track of a representative load instruction from the 124 // original function so that we can tell the alias analysis implementation 125 // what the new GEP/Load instructions we are inserting look like. 126 // We need to keep the original loads for each argument and the elements 127 // of the argument that are accessed. 128 std::map<std::pair<Argument *, IndicesVector>, LoadInst *> OriginalLoads; 129 130 // Attribute - Keep track of the parameter attributes for the arguments 131 // that we are *not* promoting. For the ones that we do promote, the parameter 132 // attributes are lost 133 SmallVector<AttributeSet, 8> ArgAttrVec; 134 AttributeList PAL = F->getAttributes(); 135 136 // First, determine the new argument list 137 unsigned ArgNo = 0; 138 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; 139 ++I, ++ArgNo) { 140 if (ByValArgsToTransform.count(&*I)) { 141 // Simple byval argument? Just add all the struct element types. 142 Type *AgTy = I->getParamByValType(); 143 StructType *STy = cast<StructType>(AgTy); 144 llvm::append_range(Params, STy->elements()); 145 ArgAttrVec.insert(ArgAttrVec.end(), STy->getNumElements(), 146 AttributeSet()); 147 ++NumByValArgsPromoted; 148 } else if (!ArgsToPromote.count(&*I)) { 149 // Unchanged argument 150 Params.push_back(I->getType()); 151 ArgAttrVec.push_back(PAL.getParamAttributes(ArgNo)); 152 } else if (I->use_empty()) { 153 // Dead argument (which are always marked as promotable) 154 ++NumArgumentsDead; 155 } else { 156 // Okay, this is being promoted. This means that the only uses are loads 157 // or GEPs which are only used by loads 158 159 // In this table, we will track which indices are loaded from the argument 160 // (where direct loads are tracked as no indices). 161 ScalarizeTable &ArgIndices = ScalarizedElements[&*I]; 162 for (User *U : make_early_inc_range(I->users())) { 163 Instruction *UI = cast<Instruction>(U); 164 Type *SrcTy; 165 if (LoadInst *L = dyn_cast<LoadInst>(UI)) 166 SrcTy = L->getType(); 167 else 168 SrcTy = cast<GetElementPtrInst>(UI)->getSourceElementType(); 169 // Skip dead GEPs and remove them. 170 if (isa<GetElementPtrInst>(UI) && UI->use_empty()) { 171 UI->eraseFromParent(); 172 continue; 173 } 174 175 IndicesVector Indices; 176 Indices.reserve(UI->getNumOperands() - 1); 177 // Since loads will only have a single operand, and GEPs only a single 178 // non-index operand, this will record direct loads without any indices, 179 // and gep+loads with the GEP indices. 180 for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end(); 181 II != IE; ++II) 182 Indices.push_back(cast<ConstantInt>(*II)->getSExtValue()); 183 // GEPs with a single 0 index can be merged with direct loads 184 if (Indices.size() == 1 && Indices.front() == 0) 185 Indices.clear(); 186 ArgIndices.insert(std::make_pair(SrcTy, Indices)); 187 LoadInst *OrigLoad; 188 if (LoadInst *L = dyn_cast<LoadInst>(UI)) 189 OrigLoad = L; 190 else 191 // Take any load, we will use it only to update Alias Analysis 192 OrigLoad = cast<LoadInst>(UI->user_back()); 193 OriginalLoads[std::make_pair(&*I, Indices)] = OrigLoad; 194 } 195 196 // Add a parameter to the function for each element passed in. 197 for (const auto &ArgIndex : ArgIndices) { 198 // not allowed to dereference ->begin() if size() is 0 199 Params.push_back(GetElementPtrInst::getIndexedType( 200 cast<PointerType>(I->getType())->getElementType(), 201 ArgIndex.second)); 202 ArgAttrVec.push_back(AttributeSet()); 203 assert(Params.back()); 204 } 205 206 if (ArgIndices.size() == 1 && ArgIndices.begin()->second.empty()) 207 ++NumArgumentsPromoted; 208 else 209 ++NumAggregatesPromoted; 210 } 211 } 212 213 Type *RetTy = FTy->getReturnType(); 214 215 // Construct the new function type using the new arguments. 216 FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg()); 217 218 // Create the new function body and insert it into the module. 219 Function *NF = Function::Create(NFTy, F->getLinkage(), F->getAddressSpace(), 220 F->getName()); 221 NF->copyAttributesFrom(F); 222 NF->copyMetadata(F, 0); 223 224 // The new function will have the !dbg metadata copied from the original 225 // function. The original function may not be deleted, and dbg metadata need 226 // to be unique so we need to drop it. 227 F->setSubprogram(nullptr); 228 229 LLVM_DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n" 230 << "From: " << *F); 231 232 // Recompute the parameter attributes list based on the new arguments for 233 // the function. 234 NF->setAttributes(AttributeList::get(F->getContext(), PAL.getFnAttributes(), 235 PAL.getRetAttributes(), ArgAttrVec)); 236 ArgAttrVec.clear(); 237 238 F->getParent()->getFunctionList().insert(F->getIterator(), NF); 239 NF->takeName(F); 240 241 // Loop over all of the callers of the function, transforming the call sites 242 // to pass in the loaded pointers. 243 // 244 SmallVector<Value *, 16> Args; 245 const DataLayout &DL = F->getParent()->getDataLayout(); 246 while (!F->use_empty()) { 247 CallBase &CB = cast<CallBase>(*F->user_back()); 248 assert(CB.getCalledFunction() == F); 249 const AttributeList &CallPAL = CB.getAttributes(); 250 IRBuilder<NoFolder> IRB(&CB); 251 252 // Loop over the operands, inserting GEP and loads in the caller as 253 // appropriate. 254 auto AI = CB.arg_begin(); 255 ArgNo = 0; 256 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; 257 ++I, ++AI, ++ArgNo) 258 if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) { 259 Args.push_back(*AI); // Unmodified argument 260 ArgAttrVec.push_back(CallPAL.getParamAttributes(ArgNo)); 261 } else if (ByValArgsToTransform.count(&*I)) { 262 // Emit a GEP and load for each element of the struct. 263 Type *AgTy = I->getParamByValType(); 264 StructType *STy = cast<StructType>(AgTy); 265 Value *Idxs[2] = { 266 ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr}; 267 const StructLayout *SL = DL.getStructLayout(STy); 268 Align StructAlign = *I->getParamAlign(); 269 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 270 Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); 271 auto *Idx = 272 IRB.CreateGEP(STy, *AI, Idxs, (*AI)->getName() + "." + Twine(i)); 273 // TODO: Tell AA about the new values? 274 Align Alignment = 275 commonAlignment(StructAlign, SL->getElementOffset(i)); 276 Args.push_back(IRB.CreateAlignedLoad( 277 STy->getElementType(i), Idx, Alignment, Idx->getName() + ".val")); 278 ArgAttrVec.push_back(AttributeSet()); 279 } 280 } else if (!I->use_empty()) { 281 // Non-dead argument: insert GEPs and loads as appropriate. 282 ScalarizeTable &ArgIndices = ScalarizedElements[&*I]; 283 // Store the Value* version of the indices in here, but declare it now 284 // for reuse. 285 std::vector<Value *> Ops; 286 for (const auto &ArgIndex : ArgIndices) { 287 Value *V = *AI; 288 LoadInst *OrigLoad = 289 OriginalLoads[std::make_pair(&*I, ArgIndex.second)]; 290 if (!ArgIndex.second.empty()) { 291 Ops.reserve(ArgIndex.second.size()); 292 Type *ElTy = V->getType(); 293 for (auto II : ArgIndex.second) { 294 // Use i32 to index structs, and i64 for others (pointers/arrays). 295 // This satisfies GEP constraints. 296 Type *IdxTy = 297 (ElTy->isStructTy() ? Type::getInt32Ty(F->getContext()) 298 : Type::getInt64Ty(F->getContext())); 299 Ops.push_back(ConstantInt::get(IdxTy, II)); 300 // Keep track of the type we're currently indexing. 301 if (auto *ElPTy = dyn_cast<PointerType>(ElTy)) 302 ElTy = ElPTy->getElementType(); 303 else 304 ElTy = GetElementPtrInst::getTypeAtIndex(ElTy, II); 305 } 306 // And create a GEP to extract those indices. 307 V = IRB.CreateGEP(ArgIndex.first, V, Ops, V->getName() + ".idx"); 308 Ops.clear(); 309 } 310 // Since we're replacing a load make sure we take the alignment 311 // of the previous load. 312 LoadInst *newLoad = 313 IRB.CreateLoad(OrigLoad->getType(), V, V->getName() + ".val"); 314 newLoad->setAlignment(OrigLoad->getAlign()); 315 // Transfer the AA info too. 316 AAMDNodes AAInfo; 317 OrigLoad->getAAMetadata(AAInfo); 318 newLoad->setAAMetadata(AAInfo); 319 320 Args.push_back(newLoad); 321 ArgAttrVec.push_back(AttributeSet()); 322 } 323 } 324 325 // Push any varargs arguments on the list. 326 for (; AI != CB.arg_end(); ++AI, ++ArgNo) { 327 Args.push_back(*AI); 328 ArgAttrVec.push_back(CallPAL.getParamAttributes(ArgNo)); 329 } 330 331 SmallVector<OperandBundleDef, 1> OpBundles; 332 CB.getOperandBundlesAsDefs(OpBundles); 333 334 CallBase *NewCS = nullptr; 335 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 336 NewCS = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(), 337 Args, OpBundles, "", &CB); 338 } else { 339 auto *NewCall = CallInst::Create(NF, Args, OpBundles, "", &CB); 340 NewCall->setTailCallKind(cast<CallInst>(&CB)->getTailCallKind()); 341 NewCS = NewCall; 342 } 343 NewCS->setCallingConv(CB.getCallingConv()); 344 NewCS->setAttributes( 345 AttributeList::get(F->getContext(), CallPAL.getFnAttributes(), 346 CallPAL.getRetAttributes(), ArgAttrVec)); 347 NewCS->copyMetadata(CB, {LLVMContext::MD_prof, LLVMContext::MD_dbg}); 348 Args.clear(); 349 ArgAttrVec.clear(); 350 351 // Update the callgraph to know that the callsite has been transformed. 352 if (ReplaceCallSite) 353 (*ReplaceCallSite)(CB, *NewCS); 354 355 if (!CB.use_empty()) { 356 CB.replaceAllUsesWith(NewCS); 357 NewCS->takeName(&CB); 358 } 359 360 // Finally, remove the old call from the program, reducing the use-count of 361 // F. 362 CB.eraseFromParent(); 363 } 364 365 // Since we have now created the new function, splice the body of the old 366 // function right into the new function, leaving the old rotting hulk of the 367 // function empty. 368 NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList()); 369 370 // Loop over the argument list, transferring uses of the old arguments over to 371 // the new arguments, also transferring over the names as well. 372 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(), 373 I2 = NF->arg_begin(); 374 I != E; ++I) { 375 if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) { 376 // If this is an unmodified argument, move the name and users over to the 377 // new version. 378 I->replaceAllUsesWith(&*I2); 379 I2->takeName(&*I); 380 ++I2; 381 continue; 382 } 383 384 if (ByValArgsToTransform.count(&*I)) { 385 // In the callee, we create an alloca, and store each of the new incoming 386 // arguments into the alloca. 387 Instruction *InsertPt = &NF->begin()->front(); 388 389 // Just add all the struct element types. 390 Type *AgTy = I->getParamByValType(); 391 Align StructAlign = *I->getParamAlign(); 392 Value *TheAlloca = new AllocaInst(AgTy, DL.getAllocaAddrSpace(), nullptr, 393 StructAlign, "", InsertPt); 394 StructType *STy = cast<StructType>(AgTy); 395 Value *Idxs[2] = {ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 396 nullptr}; 397 const StructLayout *SL = DL.getStructLayout(STy); 398 399 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 400 Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); 401 Value *Idx = GetElementPtrInst::Create( 402 AgTy, TheAlloca, Idxs, TheAlloca->getName() + "." + Twine(i), 403 InsertPt); 404 I2->setName(I->getName() + "." + Twine(i)); 405 Align Alignment = commonAlignment(StructAlign, SL->getElementOffset(i)); 406 new StoreInst(&*I2++, Idx, false, Alignment, InsertPt); 407 } 408 409 // Anything that used the arg should now use the alloca. 410 I->replaceAllUsesWith(TheAlloca); 411 TheAlloca->takeName(&*I); 412 continue; 413 } 414 415 // There potentially are metadata uses for things like llvm.dbg.value. 416 // Replace them with undef, after handling the other regular uses. 417 auto RauwUndefMetadata = make_scope_exit( 418 [&]() { I->replaceAllUsesWith(UndefValue::get(I->getType())); }); 419 420 if (I->use_empty()) 421 continue; 422 423 // Otherwise, if we promoted this argument, then all users are load 424 // instructions (or GEPs with only load users), and all loads should be 425 // using the new argument that we added. 426 ScalarizeTable &ArgIndices = ScalarizedElements[&*I]; 427 428 while (!I->use_empty()) { 429 if (LoadInst *LI = dyn_cast<LoadInst>(I->user_back())) { 430 assert(ArgIndices.begin()->second.empty() && 431 "Load element should sort to front!"); 432 I2->setName(I->getName() + ".val"); 433 LI->replaceAllUsesWith(&*I2); 434 LI->eraseFromParent(); 435 LLVM_DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName() 436 << "' in function '" << F->getName() << "'\n"); 437 } else { 438 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back()); 439 assert(!GEP->use_empty() && 440 "GEPs without uses should be cleaned up already"); 441 IndicesVector Operands; 442 Operands.reserve(GEP->getNumIndices()); 443 for (const Use &Idx : GEP->indices()) 444 Operands.push_back(cast<ConstantInt>(Idx)->getSExtValue()); 445 446 // GEPs with a single 0 index can be merged with direct loads 447 if (Operands.size() == 1 && Operands.front() == 0) 448 Operands.clear(); 449 450 Function::arg_iterator TheArg = I2; 451 for (ScalarizeTable::iterator It = ArgIndices.begin(); 452 It->second != Operands; ++It, ++TheArg) { 453 assert(It != ArgIndices.end() && "GEP not handled??"); 454 } 455 456 TheArg->setName(formatv("{0}.{1:$[.]}.val", I->getName(), 457 make_range(Operands.begin(), Operands.end()))); 458 459 LLVM_DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName() 460 << "' of function '" << NF->getName() << "'\n"); 461 462 // All of the uses must be load instructions. Replace them all with 463 // the argument specified by ArgNo. 464 while (!GEP->use_empty()) { 465 LoadInst *L = cast<LoadInst>(GEP->user_back()); 466 L->replaceAllUsesWith(&*TheArg); 467 L->eraseFromParent(); 468 } 469 GEP->eraseFromParent(); 470 } 471 } 472 // Increment I2 past all of the arguments added for this promoted pointer. 473 std::advance(I2, ArgIndices.size()); 474 } 475 476 return NF; 477 } 478 479 /// Return true if we can prove that all callees pass in a valid pointer for the 480 /// specified function argument. 481 static bool allCallersPassValidPointerForArgument(Argument *Arg, Type *Ty) { 482 Function *Callee = Arg->getParent(); 483 const DataLayout &DL = Callee->getParent()->getDataLayout(); 484 485 unsigned ArgNo = Arg->getArgNo(); 486 487 // Look at all call sites of the function. At this point we know we only have 488 // direct callees. 489 for (User *U : Callee->users()) { 490 CallBase &CB = cast<CallBase>(*U); 491 492 if (!isDereferenceablePointer(CB.getArgOperand(ArgNo), Ty, DL)) 493 return false; 494 } 495 return true; 496 } 497 498 /// Returns true if Prefix is a prefix of longer. That means, Longer has a size 499 /// that is greater than or equal to the size of prefix, and each of the 500 /// elements in Prefix is the same as the corresponding elements in Longer. 501 /// 502 /// This means it also returns true when Prefix and Longer are equal! 503 static bool isPrefix(const IndicesVector &Prefix, const IndicesVector &Longer) { 504 if (Prefix.size() > Longer.size()) 505 return false; 506 return std::equal(Prefix.begin(), Prefix.end(), Longer.begin()); 507 } 508 509 /// Checks if Indices, or a prefix of Indices, is in Set. 510 static bool prefixIn(const IndicesVector &Indices, 511 std::set<IndicesVector> &Set) { 512 std::set<IndicesVector>::iterator Low; 513 Low = Set.upper_bound(Indices); 514 if (Low != Set.begin()) 515 Low--; 516 // Low is now the last element smaller than or equal to Indices. This means 517 // it points to a prefix of Indices (possibly Indices itself), if such 518 // prefix exists. 519 // 520 // This load is safe if any prefix of its operands is safe to load. 521 return Low != Set.end() && isPrefix(*Low, Indices); 522 } 523 524 /// Mark the given indices (ToMark) as safe in the given set of indices 525 /// (Safe). Marking safe usually means adding ToMark to Safe. However, if there 526 /// is already a prefix of Indices in Safe, Indices are implicitely marked safe 527 /// already. Furthermore, any indices that Indices is itself a prefix of, are 528 /// removed from Safe (since they are implicitely safe because of Indices now). 529 static void markIndicesSafe(const IndicesVector &ToMark, 530 std::set<IndicesVector> &Safe) { 531 std::set<IndicesVector>::iterator Low; 532 Low = Safe.upper_bound(ToMark); 533 // Guard against the case where Safe is empty 534 if (Low != Safe.begin()) 535 Low--; 536 // Low is now the last element smaller than or equal to Indices. This 537 // means it points to a prefix of Indices (possibly Indices itself), if 538 // such prefix exists. 539 if (Low != Safe.end()) { 540 if (isPrefix(*Low, ToMark)) 541 // If there is already a prefix of these indices (or exactly these 542 // indices) marked a safe, don't bother adding these indices 543 return; 544 545 // Increment Low, so we can use it as a "insert before" hint 546 ++Low; 547 } 548 // Insert 549 Low = Safe.insert(Low, ToMark); 550 ++Low; 551 // If there we're a prefix of longer index list(s), remove those 552 std::set<IndicesVector>::iterator End = Safe.end(); 553 while (Low != End && isPrefix(ToMark, *Low)) { 554 std::set<IndicesVector>::iterator Remove = Low; 555 ++Low; 556 Safe.erase(Remove); 557 } 558 } 559 560 /// isSafeToPromoteArgument - As you might guess from the name of this method, 561 /// it checks to see if it is both safe and useful to promote the argument. 562 /// This method limits promotion of aggregates to only promote up to three 563 /// elements of the aggregate in order to avoid exploding the number of 564 /// arguments passed in. 565 static bool isSafeToPromoteArgument(Argument *Arg, Type *ByValTy, AAResults &AAR, 566 unsigned MaxElements) { 567 using GEPIndicesSet = std::set<IndicesVector>; 568 569 // Quick exit for unused arguments 570 if (Arg->use_empty()) 571 return true; 572 573 // We can only promote this argument if all of the uses are loads, or are GEP 574 // instructions (with constant indices) that are subsequently loaded. 575 // 576 // Promoting the argument causes it to be loaded in the caller 577 // unconditionally. This is only safe if we can prove that either the load 578 // would have happened in the callee anyway (ie, there is a load in the entry 579 // block) or the pointer passed in at every call site is guaranteed to be 580 // valid. 581 // In the former case, invalid loads can happen, but would have happened 582 // anyway, in the latter case, invalid loads won't happen. This prevents us 583 // from introducing an invalid load that wouldn't have happened in the 584 // original code. 585 // 586 // This set will contain all sets of indices that are loaded in the entry 587 // block, and thus are safe to unconditionally load in the caller. 588 GEPIndicesSet SafeToUnconditionallyLoad; 589 590 // This set contains all the sets of indices that we are planning to promote. 591 // This makes it possible to limit the number of arguments added. 592 GEPIndicesSet ToPromote; 593 594 // If the pointer is always valid, any load with first index 0 is valid. 595 596 if (ByValTy) 597 SafeToUnconditionallyLoad.insert(IndicesVector(1, 0)); 598 599 // Whenever a new underlying type for the operand is found, make sure it's 600 // consistent with the GEPs and loads we've already seen and, if necessary, 601 // use it to see if all incoming pointers are valid (which implies the 0-index 602 // is safe). 603 Type *BaseTy = ByValTy; 604 auto UpdateBaseTy = [&](Type *NewBaseTy) { 605 if (BaseTy) 606 return BaseTy == NewBaseTy; 607 608 BaseTy = NewBaseTy; 609 if (allCallersPassValidPointerForArgument(Arg, BaseTy)) { 610 assert(SafeToUnconditionallyLoad.empty()); 611 SafeToUnconditionallyLoad.insert(IndicesVector(1, 0)); 612 } 613 614 return true; 615 }; 616 617 // First, iterate the entry block and mark loads of (geps of) arguments as 618 // safe. 619 BasicBlock &EntryBlock = Arg->getParent()->front(); 620 // Declare this here so we can reuse it 621 IndicesVector Indices; 622 for (Instruction &I : EntryBlock) 623 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 624 Value *V = LI->getPointerOperand(); 625 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) { 626 V = GEP->getPointerOperand(); 627 if (V == Arg) { 628 // This load actually loads (part of) Arg? Check the indices then. 629 Indices.reserve(GEP->getNumIndices()); 630 for (Use &Idx : GEP->indices()) 631 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) 632 Indices.push_back(CI->getSExtValue()); 633 else 634 // We found a non-constant GEP index for this argument? Bail out 635 // right away, can't promote this argument at all. 636 return false; 637 638 if (!UpdateBaseTy(GEP->getSourceElementType())) 639 return false; 640 641 // Indices checked out, mark them as safe 642 markIndicesSafe(Indices, SafeToUnconditionallyLoad); 643 Indices.clear(); 644 } 645 } else if (V == Arg) { 646 // Direct loads are equivalent to a GEP with a single 0 index. 647 markIndicesSafe(IndicesVector(1, 0), SafeToUnconditionallyLoad); 648 649 if (BaseTy && LI->getType() != BaseTy) 650 return false; 651 652 BaseTy = LI->getType(); 653 } 654 } 655 656 // Now, iterate all uses of the argument to see if there are any uses that are 657 // not (GEP+)loads, or any (GEP+)loads that are not safe to promote. 658 SmallVector<LoadInst *, 16> Loads; 659 IndicesVector Operands; 660 for (Use &U : Arg->uses()) { 661 User *UR = U.getUser(); 662 Operands.clear(); 663 if (LoadInst *LI = dyn_cast<LoadInst>(UR)) { 664 // Don't hack volatile/atomic loads 665 if (!LI->isSimple()) 666 return false; 667 Loads.push_back(LI); 668 // Direct loads are equivalent to a GEP with a zero index and then a load. 669 Operands.push_back(0); 670 671 if (!UpdateBaseTy(LI->getType())) 672 return false; 673 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UR)) { 674 if (GEP->use_empty()) { 675 // Dead GEP's cause trouble later. Just remove them if we run into 676 // them. 677 continue; 678 } 679 680 if (!UpdateBaseTy(GEP->getSourceElementType())) 681 return false; 682 683 // Ensure that all of the indices are constants. 684 for (Use &Idx : GEP->indices()) 685 if (ConstantInt *C = dyn_cast<ConstantInt>(Idx)) 686 Operands.push_back(C->getSExtValue()); 687 else 688 return false; // Not a constant operand GEP! 689 690 // Ensure that the only users of the GEP are load instructions. 691 for (User *GEPU : GEP->users()) 692 if (LoadInst *LI = dyn_cast<LoadInst>(GEPU)) { 693 // Don't hack volatile/atomic loads 694 if (!LI->isSimple()) 695 return false; 696 Loads.push_back(LI); 697 } else { 698 // Other uses than load? 699 return false; 700 } 701 } else { 702 return false; // Not a load or a GEP. 703 } 704 705 // Now, see if it is safe to promote this load / loads of this GEP. Loading 706 // is safe if Operands, or a prefix of Operands, is marked as safe. 707 if (!prefixIn(Operands, SafeToUnconditionallyLoad)) 708 return false; 709 710 // See if we are already promoting a load with these indices. If not, check 711 // to make sure that we aren't promoting too many elements. If so, nothing 712 // to do. 713 if (ToPromote.find(Operands) == ToPromote.end()) { 714 if (MaxElements > 0 && ToPromote.size() == MaxElements) { 715 LLVM_DEBUG(dbgs() << "argpromotion not promoting argument '" 716 << Arg->getName() 717 << "' because it would require adding more " 718 << "than " << MaxElements 719 << " arguments to the function.\n"); 720 // We limit aggregate promotion to only promoting up to a fixed number 721 // of elements of the aggregate. 722 return false; 723 } 724 ToPromote.insert(std::move(Operands)); 725 } 726 } 727 728 if (Loads.empty()) 729 return true; // No users, this is a dead argument. 730 731 // Okay, now we know that the argument is only used by load instructions and 732 // it is safe to unconditionally perform all of them. Use alias analysis to 733 // check to see if the pointer is guaranteed to not be modified from entry of 734 // the function to each of the load instructions. 735 736 // Because there could be several/many load instructions, remember which 737 // blocks we know to be transparent to the load. 738 df_iterator_default_set<BasicBlock *, 16> TranspBlocks; 739 740 for (LoadInst *Load : Loads) { 741 // Check to see if the load is invalidated from the start of the block to 742 // the load itself. 743 BasicBlock *BB = Load->getParent(); 744 745 MemoryLocation Loc = MemoryLocation::get(Load); 746 if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, ModRefInfo::Mod)) 747 return false; // Pointer is invalidated! 748 749 // Now check every path from the entry block to the load for transparency. 750 // To do this, we perform a depth first search on the inverse CFG from the 751 // loading block. 752 for (BasicBlock *P : predecessors(BB)) { 753 for (BasicBlock *TranspBB : inverse_depth_first_ext(P, TranspBlocks)) 754 if (AAR.canBasicBlockModify(*TranspBB, Loc)) 755 return false; 756 } 757 } 758 759 // If the path from the entry of the function to each load is free of 760 // instructions that potentially invalidate the load, we can make the 761 // transformation! 762 return true; 763 } 764 765 bool ArgumentPromotionPass::isDenselyPacked(Type *type, const DataLayout &DL) { 766 // There is no size information, so be conservative. 767 if (!type->isSized()) 768 return false; 769 770 // If the alloc size is not equal to the storage size, then there are padding 771 // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128. 772 if (DL.getTypeSizeInBits(type) != DL.getTypeAllocSizeInBits(type)) 773 return false; 774 775 // FIXME: This isn't the right way to check for padding in vectors with 776 // non-byte-size elements. 777 if (VectorType *seqTy = dyn_cast<VectorType>(type)) 778 return isDenselyPacked(seqTy->getElementType(), DL); 779 780 // For array types, check for padding within members. 781 if (ArrayType *seqTy = dyn_cast<ArrayType>(type)) 782 return isDenselyPacked(seqTy->getElementType(), DL); 783 784 if (!isa<StructType>(type)) 785 return true; 786 787 // Check for padding within and between elements of a struct. 788 StructType *StructTy = cast<StructType>(type); 789 const StructLayout *Layout = DL.getStructLayout(StructTy); 790 uint64_t StartPos = 0; 791 for (unsigned i = 0, E = StructTy->getNumElements(); i < E; ++i) { 792 Type *ElTy = StructTy->getElementType(i); 793 if (!isDenselyPacked(ElTy, DL)) 794 return false; 795 if (StartPos != Layout->getElementOffsetInBits(i)) 796 return false; 797 StartPos += DL.getTypeAllocSizeInBits(ElTy); 798 } 799 800 return true; 801 } 802 803 /// Checks if the padding bytes of an argument could be accessed. 804 static bool canPaddingBeAccessed(Argument *arg) { 805 assert(arg->hasByValAttr()); 806 807 // Track all the pointers to the argument to make sure they are not captured. 808 SmallPtrSet<Value *, 16> PtrValues; 809 PtrValues.insert(arg); 810 811 // Track all of the stores. 812 SmallVector<StoreInst *, 16> Stores; 813 814 // Scan through the uses recursively to make sure the pointer is always used 815 // sanely. 816 SmallVector<Value *, 16> WorkList(arg->users()); 817 while (!WorkList.empty()) { 818 Value *V = WorkList.pop_back_val(); 819 if (isa<GetElementPtrInst>(V) || isa<PHINode>(V)) { 820 if (PtrValues.insert(V).second) 821 llvm::append_range(WorkList, V->users()); 822 } else if (StoreInst *Store = dyn_cast<StoreInst>(V)) { 823 Stores.push_back(Store); 824 } else if (!isa<LoadInst>(V)) { 825 return true; 826 } 827 } 828 829 // Check to make sure the pointers aren't captured 830 for (StoreInst *Store : Stores) 831 if (PtrValues.count(Store->getValueOperand())) 832 return true; 833 834 return false; 835 } 836 837 bool ArgumentPromotionPass::areFunctionArgsABICompatible( 838 const Function &F, const TargetTransformInfo &TTI, 839 SmallPtrSetImpl<Argument *> &ArgsToPromote, 840 SmallPtrSetImpl<Argument *> &ByValArgsToTransform) { 841 for (const Use &U : F.uses()) { 842 CallBase *CB = dyn_cast<CallBase>(U.getUser()); 843 if (!CB) 844 return false; 845 const Function *Caller = CB->getCaller(); 846 const Function *Callee = CB->getCalledFunction(); 847 if (!TTI.areFunctionArgsABICompatible(Caller, Callee, ArgsToPromote) || 848 !TTI.areFunctionArgsABICompatible(Caller, Callee, ByValArgsToTransform)) 849 return false; 850 } 851 return true; 852 } 853 854 /// PromoteArguments - This method checks the specified function to see if there 855 /// are any promotable arguments and if it is safe to promote the function (for 856 /// example, all callers are direct). If safe to promote some arguments, it 857 /// calls the DoPromotion method. 858 static Function * 859 promoteArguments(Function *F, function_ref<AAResults &(Function &F)> AARGetter, 860 unsigned MaxElements, 861 Optional<function_ref<void(CallBase &OldCS, CallBase &NewCS)>> 862 ReplaceCallSite, 863 const TargetTransformInfo &TTI) { 864 // Don't perform argument promotion for naked functions; otherwise we can end 865 // up removing parameters that are seemingly 'not used' as they are referred 866 // to in the assembly. 867 if(F->hasFnAttribute(Attribute::Naked)) 868 return nullptr; 869 870 // Make sure that it is local to this module. 871 if (!F->hasLocalLinkage()) 872 return nullptr; 873 874 // Don't promote arguments for variadic functions. Adding, removing, or 875 // changing non-pack parameters can change the classification of pack 876 // parameters. Frontends encode that classification at the call site in the 877 // IR, while in the callee the classification is determined dynamically based 878 // on the number of registers consumed so far. 879 if (F->isVarArg()) 880 return nullptr; 881 882 // Don't transform functions that receive inallocas, as the transformation may 883 // not be safe depending on calling convention. 884 if (F->getAttributes().hasAttrSomewhere(Attribute::InAlloca)) 885 return nullptr; 886 887 // First check: see if there are any pointer arguments! If not, quick exit. 888 SmallVector<Argument *, 16> PointerArgs; 889 for (Argument &I : F->args()) 890 if (I.getType()->isPointerTy()) 891 PointerArgs.push_back(&I); 892 if (PointerArgs.empty()) 893 return nullptr; 894 895 // Second check: make sure that all callers are direct callers. We can't 896 // transform functions that have indirect callers. Also see if the function 897 // is self-recursive and check that target features are compatible. 898 bool isSelfRecursive = false; 899 for (Use &U : F->uses()) { 900 CallBase *CB = dyn_cast<CallBase>(U.getUser()); 901 // Must be a direct call. 902 if (CB == nullptr || !CB->isCallee(&U)) 903 return nullptr; 904 905 // Can't change signature of musttail callee 906 if (CB->isMustTailCall()) 907 return nullptr; 908 909 if (CB->getParent()->getParent() == F) 910 isSelfRecursive = true; 911 } 912 913 // Can't change signature of musttail caller 914 // FIXME: Support promoting whole chain of musttail functions 915 for (BasicBlock &BB : *F) 916 if (BB.getTerminatingMustTailCall()) 917 return nullptr; 918 919 const DataLayout &DL = F->getParent()->getDataLayout(); 920 921 AAResults &AAR = AARGetter(*F); 922 923 // Check to see which arguments are promotable. If an argument is promotable, 924 // add it to ArgsToPromote. 925 SmallPtrSet<Argument *, 8> ArgsToPromote; 926 SmallPtrSet<Argument *, 8> ByValArgsToTransform; 927 for (Argument *PtrArg : PointerArgs) { 928 Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType(); 929 930 // Replace sret attribute with noalias. This reduces register pressure by 931 // avoiding a register copy. 932 if (PtrArg->hasStructRetAttr()) { 933 unsigned ArgNo = PtrArg->getArgNo(); 934 F->removeParamAttr(ArgNo, Attribute::StructRet); 935 F->addParamAttr(ArgNo, Attribute::NoAlias); 936 for (Use &U : F->uses()) { 937 CallBase &CB = cast<CallBase>(*U.getUser()); 938 CB.removeParamAttr(ArgNo, Attribute::StructRet); 939 CB.addParamAttr(ArgNo, Attribute::NoAlias); 940 } 941 } 942 943 // If this is a byval argument, and if the aggregate type is small, just 944 // pass the elements, which is always safe, if the passed value is densely 945 // packed or if we can prove the padding bytes are never accessed. 946 // 947 // Only handle arguments with specified alignment; if it's unspecified, the 948 // actual alignment of the argument is target-specific. 949 bool isSafeToPromote = PtrArg->hasByValAttr() && PtrArg->getParamAlign() && 950 (ArgumentPromotionPass::isDenselyPacked(AgTy, DL) || 951 !canPaddingBeAccessed(PtrArg)); 952 if (isSafeToPromote) { 953 if (StructType *STy = dyn_cast<StructType>(AgTy)) { 954 if (MaxElements > 0 && STy->getNumElements() > MaxElements) { 955 LLVM_DEBUG(dbgs() << "argpromotion disable promoting argument '" 956 << PtrArg->getName() 957 << "' because it would require adding more" 958 << " than " << MaxElements 959 << " arguments to the function.\n"); 960 continue; 961 } 962 963 // If all the elements are single-value types, we can promote it. 964 bool AllSimple = true; 965 for (const auto *EltTy : STy->elements()) { 966 if (!EltTy->isSingleValueType()) { 967 AllSimple = false; 968 break; 969 } 970 } 971 972 // Safe to transform, don't even bother trying to "promote" it. 973 // Passing the elements as a scalar will allow sroa to hack on 974 // the new alloca we introduce. 975 if (AllSimple) { 976 ByValArgsToTransform.insert(PtrArg); 977 continue; 978 } 979 } 980 } 981 982 // If the argument is a recursive type and we're in a recursive 983 // function, we could end up infinitely peeling the function argument. 984 if (isSelfRecursive) { 985 if (StructType *STy = dyn_cast<StructType>(AgTy)) { 986 bool RecursiveType = 987 llvm::is_contained(STy->elements(), PtrArg->getType()); 988 if (RecursiveType) 989 continue; 990 } 991 } 992 993 // Otherwise, see if we can promote the pointer to its value. 994 Type *ByValTy = 995 PtrArg->hasByValAttr() ? PtrArg->getParamByValType() : nullptr; 996 if (isSafeToPromoteArgument(PtrArg, ByValTy, AAR, MaxElements)) 997 ArgsToPromote.insert(PtrArg); 998 } 999 1000 // No promotable pointer arguments. 1001 if (ArgsToPromote.empty() && ByValArgsToTransform.empty()) 1002 return nullptr; 1003 1004 if (!ArgumentPromotionPass::areFunctionArgsABICompatible( 1005 *F, TTI, ArgsToPromote, ByValArgsToTransform)) 1006 return nullptr; 1007 1008 return doPromotion(F, ArgsToPromote, ByValArgsToTransform, ReplaceCallSite); 1009 } 1010 1011 PreservedAnalyses ArgumentPromotionPass::run(LazyCallGraph::SCC &C, 1012 CGSCCAnalysisManager &AM, 1013 LazyCallGraph &CG, 1014 CGSCCUpdateResult &UR) { 1015 bool Changed = false, LocalChange; 1016 1017 // Iterate until we stop promoting from this SCC. 1018 do { 1019 LocalChange = false; 1020 1021 for (LazyCallGraph::Node &N : C) { 1022 Function &OldF = N.getFunction(); 1023 1024 FunctionAnalysisManager &FAM = 1025 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 1026 // FIXME: This lambda must only be used with this function. We should 1027 // skip the lambda and just get the AA results directly. 1028 auto AARGetter = [&](Function &F) -> AAResults & { 1029 assert(&F == &OldF && "Called with an unexpected function!"); 1030 return FAM.getResult<AAManager>(F); 1031 }; 1032 1033 const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(OldF); 1034 Function *NewF = 1035 promoteArguments(&OldF, AARGetter, MaxElements, None, TTI); 1036 if (!NewF) 1037 continue; 1038 LocalChange = true; 1039 1040 // Directly substitute the functions in the call graph. Note that this 1041 // requires the old function to be completely dead and completely 1042 // replaced by the new function. It does no call graph updates, it merely 1043 // swaps out the particular function mapped to a particular node in the 1044 // graph. 1045 C.getOuterRefSCC().replaceNodeFunction(N, *NewF); 1046 FAM.clear(OldF, OldF.getName()); 1047 OldF.eraseFromParent(); 1048 } 1049 1050 Changed |= LocalChange; 1051 } while (LocalChange); 1052 1053 if (!Changed) 1054 return PreservedAnalyses::all(); 1055 1056 return PreservedAnalyses::none(); 1057 } 1058 1059 namespace { 1060 1061 /// ArgPromotion - The 'by reference' to 'by value' argument promotion pass. 1062 struct ArgPromotion : public CallGraphSCCPass { 1063 // Pass identification, replacement for typeid 1064 static char ID; 1065 1066 explicit ArgPromotion(unsigned MaxElements = 3) 1067 : CallGraphSCCPass(ID), MaxElements(MaxElements) { 1068 initializeArgPromotionPass(*PassRegistry::getPassRegistry()); 1069 } 1070 1071 void getAnalysisUsage(AnalysisUsage &AU) const override { 1072 AU.addRequired<AssumptionCacheTracker>(); 1073 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1074 AU.addRequired<TargetTransformInfoWrapperPass>(); 1075 getAAResultsAnalysisUsage(AU); 1076 CallGraphSCCPass::getAnalysisUsage(AU); 1077 } 1078 1079 bool runOnSCC(CallGraphSCC &SCC) override; 1080 1081 private: 1082 using llvm::Pass::doInitialization; 1083 1084 bool doInitialization(CallGraph &CG) override; 1085 1086 /// The maximum number of elements to expand, or 0 for unlimited. 1087 unsigned MaxElements; 1088 }; 1089 1090 } // end anonymous namespace 1091 1092 char ArgPromotion::ID = 0; 1093 1094 INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion", 1095 "Promote 'by reference' arguments to scalars", false, 1096 false) 1097 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1098 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 1099 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1100 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1101 INITIALIZE_PASS_END(ArgPromotion, "argpromotion", 1102 "Promote 'by reference' arguments to scalars", false, false) 1103 1104 Pass *llvm::createArgumentPromotionPass(unsigned MaxElements) { 1105 return new ArgPromotion(MaxElements); 1106 } 1107 1108 bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) { 1109 if (skipSCC(SCC)) 1110 return false; 1111 1112 // Get the callgraph information that we need to update to reflect our 1113 // changes. 1114 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 1115 1116 LegacyAARGetter AARGetter(*this); 1117 1118 bool Changed = false, LocalChange; 1119 1120 // Iterate until we stop promoting from this SCC. 1121 do { 1122 LocalChange = false; 1123 // Attempt to promote arguments from all functions in this SCC. 1124 for (CallGraphNode *OldNode : SCC) { 1125 Function *OldF = OldNode->getFunction(); 1126 if (!OldF) 1127 continue; 1128 1129 auto ReplaceCallSite = [&](CallBase &OldCS, CallBase &NewCS) { 1130 Function *Caller = OldCS.getParent()->getParent(); 1131 CallGraphNode *NewCalleeNode = 1132 CG.getOrInsertFunction(NewCS.getCalledFunction()); 1133 CallGraphNode *CallerNode = CG[Caller]; 1134 CallerNode->replaceCallEdge(cast<CallBase>(OldCS), 1135 cast<CallBase>(NewCS), NewCalleeNode); 1136 }; 1137 1138 const TargetTransformInfo &TTI = 1139 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*OldF); 1140 if (Function *NewF = promoteArguments(OldF, AARGetter, MaxElements, 1141 {ReplaceCallSite}, TTI)) { 1142 LocalChange = true; 1143 1144 // Update the call graph for the newly promoted function. 1145 CallGraphNode *NewNode = CG.getOrInsertFunction(NewF); 1146 NewNode->stealCalledFunctionsFrom(OldNode); 1147 if (OldNode->getNumReferences() == 0) 1148 delete CG.removeFunctionFromModule(OldNode); 1149 else 1150 OldF->setLinkage(Function::ExternalLinkage); 1151 1152 // And updat ethe SCC we're iterating as well. 1153 SCC.ReplaceNode(OldNode, NewNode); 1154 } 1155 } 1156 // Remember that we changed something. 1157 Changed |= LocalChange; 1158 } while (LocalChange); 1159 1160 return Changed; 1161 } 1162 1163 bool ArgPromotion::doInitialization(CallGraph &CG) { 1164 return CallGraphSCCPass::doInitialization(CG); 1165 } 1166