1 //===- CloneFunction.cpp - Clone a function into another function ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the CloneFunctionInto interface, which is used as the 10 // low-level function cloner. This is used by the CloneFunction and function 11 // inliner to do the dirty work of copying the body of a function around. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/SetVector.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/Analysis/DomTreeUpdater.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/IR/CFG.h" 21 #include "llvm/IR/Constants.h" 22 #include "llvm/IR/DebugInfo.h" 23 #include "llvm/IR/DerivedTypes.h" 24 #include "llvm/IR/Function.h" 25 #include "llvm/IR/Instructions.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/IR/LLVMContext.h" 28 #include "llvm/IR/MDBuilder.h" 29 #include "llvm/IR/Metadata.h" 30 #include "llvm/IR/Module.h" 31 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 32 #include "llvm/Transforms/Utils/Cloning.h" 33 #include "llvm/Transforms/Utils/Local.h" 34 #include "llvm/Transforms/Utils/ValueMapper.h" 35 #include <map> 36 #include <optional> 37 using namespace llvm; 38 39 #define DEBUG_TYPE "clone-function" 40 41 /// See comments in Cloning.h. 42 BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap, 43 const Twine &NameSuffix, Function *F, 44 ClonedCodeInfo *CodeInfo, 45 DebugInfoFinder *DIFinder) { 46 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "", F); 47 NewBB->IsNewDbgInfoFormat = BB->IsNewDbgInfoFormat; 48 if (BB->hasName()) 49 NewBB->setName(BB->getName() + NameSuffix); 50 51 bool hasCalls = false, hasDynamicAllocas = false, hasMemProfMetadata = false; 52 Module *TheModule = F ? F->getParent() : nullptr; 53 54 // Loop over all instructions, and copy them over. 55 for (const Instruction &I : *BB) { 56 if (DIFinder && TheModule) 57 DIFinder->processInstruction(*TheModule, I); 58 59 Instruction *NewInst = I.clone(); 60 if (I.hasName()) 61 NewInst->setName(I.getName() + NameSuffix); 62 63 NewInst->insertBefore(*NewBB, NewBB->end()); 64 NewInst->cloneDebugInfoFrom(&I); 65 66 VMap[&I] = NewInst; // Add instruction map to value. 67 68 if (isa<CallInst>(I) && !I.isDebugOrPseudoInst()) { 69 hasCalls = true; 70 hasMemProfMetadata |= I.hasMetadata(LLVMContext::MD_memprof); 71 } 72 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { 73 if (!AI->isStaticAlloca()) { 74 hasDynamicAllocas = true; 75 } 76 } 77 } 78 79 if (CodeInfo) { 80 CodeInfo->ContainsCalls |= hasCalls; 81 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata; 82 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas; 83 } 84 return NewBB; 85 } 86 87 // Clone OldFunc into NewFunc, transforming the old arguments into references to 88 // VMap values. 89 // 90 void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc, 91 ValueToValueMapTy &VMap, 92 CloneFunctionChangeType Changes, 93 SmallVectorImpl<ReturnInst *> &Returns, 94 const char *NameSuffix, ClonedCodeInfo *CodeInfo, 95 ValueMapTypeRemapper *TypeMapper, 96 ValueMaterializer *Materializer) { 97 NewFunc->setIsNewDbgInfoFormat(OldFunc->IsNewDbgInfoFormat); 98 assert(NameSuffix && "NameSuffix cannot be null!"); 99 100 #ifndef NDEBUG 101 for (const Argument &I : OldFunc->args()) 102 assert(VMap.count(&I) && "No mapping from source argument specified!"); 103 #endif 104 105 bool ModuleLevelChanges = Changes > CloneFunctionChangeType::LocalChangesOnly; 106 107 // Copy all attributes other than those stored in the AttributeList. We need 108 // to remap the parameter indices of the AttributeList. 109 AttributeList NewAttrs = NewFunc->getAttributes(); 110 NewFunc->copyAttributesFrom(OldFunc); 111 NewFunc->setAttributes(NewAttrs); 112 113 const RemapFlags FuncGlobalRefFlags = 114 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges; 115 116 // Fix up the personality function that got copied over. 117 if (OldFunc->hasPersonalityFn()) 118 NewFunc->setPersonalityFn(MapValue(OldFunc->getPersonalityFn(), VMap, 119 FuncGlobalRefFlags, TypeMapper, 120 Materializer)); 121 122 if (OldFunc->hasPrefixData()) { 123 NewFunc->setPrefixData(MapValue(OldFunc->getPrefixData(), VMap, 124 FuncGlobalRefFlags, TypeMapper, 125 Materializer)); 126 } 127 128 if (OldFunc->hasPrologueData()) { 129 NewFunc->setPrologueData(MapValue(OldFunc->getPrologueData(), VMap, 130 FuncGlobalRefFlags, TypeMapper, 131 Materializer)); 132 } 133 134 SmallVector<AttributeSet, 4> NewArgAttrs(NewFunc->arg_size()); 135 AttributeList OldAttrs = OldFunc->getAttributes(); 136 137 // Clone any argument attributes that are present in the VMap. 138 for (const Argument &OldArg : OldFunc->args()) { 139 if (Argument *NewArg = dyn_cast<Argument>(VMap[&OldArg])) { 140 NewArgAttrs[NewArg->getArgNo()] = 141 OldAttrs.getParamAttrs(OldArg.getArgNo()); 142 } 143 } 144 145 NewFunc->setAttributes( 146 AttributeList::get(NewFunc->getContext(), OldAttrs.getFnAttrs(), 147 OldAttrs.getRetAttrs(), NewArgAttrs)); 148 149 // Everything else beyond this point deals with function instructions, 150 // so if we are dealing with a function declaration, we're done. 151 if (OldFunc->isDeclaration()) 152 return; 153 154 // When we remap instructions within the same module, we want to avoid 155 // duplicating inlined DISubprograms, so record all subprograms we find as we 156 // duplicate instructions and then freeze them in the MD map. We also record 157 // information about dbg.value and dbg.declare to avoid duplicating the 158 // types. 159 std::optional<DebugInfoFinder> DIFinder; 160 161 // Track the subprogram attachment that needs to be cloned to fine-tune the 162 // mapping within the same module. 163 DISubprogram *SPClonedWithinModule = nullptr; 164 if (Changes < CloneFunctionChangeType::DifferentModule) { 165 assert((NewFunc->getParent() == nullptr || 166 NewFunc->getParent() == OldFunc->getParent()) && 167 "Expected NewFunc to have the same parent, or no parent"); 168 169 // Need to find subprograms, types, and compile units. 170 DIFinder.emplace(); 171 172 SPClonedWithinModule = OldFunc->getSubprogram(); 173 if (SPClonedWithinModule) 174 DIFinder->processSubprogram(SPClonedWithinModule); 175 } else { 176 assert((NewFunc->getParent() == nullptr || 177 NewFunc->getParent() != OldFunc->getParent()) && 178 "Expected NewFunc to have different parents, or no parent"); 179 180 if (Changes == CloneFunctionChangeType::DifferentModule) { 181 assert(NewFunc->getParent() && 182 "Need parent of new function to maintain debug info invariants"); 183 184 // Need to find all the compile units. 185 DIFinder.emplace(); 186 } 187 } 188 189 // Loop over all of the basic blocks in the function, cloning them as 190 // appropriate. Note that we save BE this way in order to handle cloning of 191 // recursive functions into themselves. 192 for (const BasicBlock &BB : *OldFunc) { 193 194 // Create a new basic block and copy instructions into it! 195 BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc, CodeInfo, 196 DIFinder ? &*DIFinder : nullptr); 197 198 // Add basic block mapping. 199 VMap[&BB] = CBB; 200 201 // It is only legal to clone a function if a block address within that 202 // function is never referenced outside of the function. Given that, we 203 // want to map block addresses from the old function to block addresses in 204 // the clone. (This is different from the generic ValueMapper 205 // implementation, which generates an invalid blockaddress when 206 // cloning a function.) 207 if (BB.hasAddressTaken()) { 208 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc), 209 const_cast<BasicBlock *>(&BB)); 210 VMap[OldBBAddr] = BlockAddress::get(NewFunc, CBB); 211 } 212 213 // Note return instructions for the caller. 214 if (ReturnInst *RI = dyn_cast<ReturnInst>(CBB->getTerminator())) 215 Returns.push_back(RI); 216 } 217 218 if (Changes < CloneFunctionChangeType::DifferentModule && 219 DIFinder->subprogram_count() > 0) { 220 // Turn on module-level changes, since we need to clone (some of) the 221 // debug info metadata. 222 // 223 // FIXME: Metadata effectively owned by a function should be made 224 // local, and only that local metadata should be cloned. 225 ModuleLevelChanges = true; 226 227 auto mapToSelfIfNew = [&VMap](MDNode *N) { 228 // Avoid clobbering an existing mapping. 229 (void)VMap.MD().try_emplace(N, N); 230 }; 231 232 // Avoid cloning types, compile units, and (other) subprograms. 233 SmallPtrSet<const DISubprogram *, 16> MappedToSelfSPs; 234 for (DISubprogram *ISP : DIFinder->subprograms()) { 235 if (ISP != SPClonedWithinModule) { 236 mapToSelfIfNew(ISP); 237 MappedToSelfSPs.insert(ISP); 238 } 239 } 240 241 // If a subprogram isn't going to be cloned skip its lexical blocks as well. 242 for (DIScope *S : DIFinder->scopes()) { 243 auto *LScope = dyn_cast<DILocalScope>(S); 244 if (LScope && MappedToSelfSPs.count(LScope->getSubprogram())) 245 mapToSelfIfNew(S); 246 } 247 248 for (DICompileUnit *CU : DIFinder->compile_units()) 249 mapToSelfIfNew(CU); 250 251 for (DIType *Type : DIFinder->types()) 252 mapToSelfIfNew(Type); 253 } else { 254 assert(!SPClonedWithinModule && 255 "Subprogram should be in DIFinder->subprogram_count()..."); 256 } 257 258 const auto RemapFlag = ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges; 259 // Duplicate the metadata that is attached to the cloned function. 260 // Subprograms/CUs/types that were already mapped to themselves won't be 261 // duplicated. 262 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs; 263 OldFunc->getAllMetadata(MDs); 264 for (auto MD : MDs) { 265 NewFunc->addMetadata(MD.first, *MapMetadata(MD.second, VMap, RemapFlag, 266 TypeMapper, Materializer)); 267 } 268 269 // Loop over all of the instructions in the new function, fixing up operand 270 // references as we go. This uses VMap to do all the hard work. 271 for (Function::iterator 272 BB = cast<BasicBlock>(VMap[&OldFunc->front()])->getIterator(), 273 BE = NewFunc->end(); 274 BB != BE; ++BB) 275 // Loop over all instructions, fixing each one as we find it, and any 276 // attached debug-info records. 277 for (Instruction &II : *BB) { 278 RemapInstruction(&II, VMap, RemapFlag, TypeMapper, Materializer); 279 RemapDPValueRange(II.getModule(), II.getDbgValueRange(), VMap, RemapFlag, 280 TypeMapper, Materializer); 281 } 282 283 // Only update !llvm.dbg.cu for DifferentModule (not CloneModule). In the 284 // same module, the compile unit will already be listed (or not). When 285 // cloning a module, CloneModule() will handle creating the named metadata. 286 if (Changes != CloneFunctionChangeType::DifferentModule) 287 return; 288 289 // Update !llvm.dbg.cu with compile units added to the new module if this 290 // function is being cloned in isolation. 291 // 292 // FIXME: This is making global / module-level changes, which doesn't seem 293 // like the right encapsulation Consider dropping the requirement to update 294 // !llvm.dbg.cu (either obsoleting the node, or restricting it to 295 // non-discardable compile units) instead of discovering compile units by 296 // visiting the metadata attached to global values, which would allow this 297 // code to be deleted. Alternatively, perhaps give responsibility for this 298 // update to CloneFunctionInto's callers. 299 auto *NewModule = NewFunc->getParent(); 300 auto *NMD = NewModule->getOrInsertNamedMetadata("llvm.dbg.cu"); 301 // Avoid multiple insertions of the same DICompileUnit to NMD. 302 SmallPtrSet<const void *, 8> Visited; 303 for (auto *Operand : NMD->operands()) 304 Visited.insert(Operand); 305 for (auto *Unit : DIFinder->compile_units()) { 306 MDNode *MappedUnit = 307 MapMetadata(Unit, VMap, RF_None, TypeMapper, Materializer); 308 if (Visited.insert(MappedUnit).second) 309 NMD->addOperand(MappedUnit); 310 } 311 } 312 313 /// Return a copy of the specified function and add it to that function's 314 /// module. Also, any references specified in the VMap are changed to refer to 315 /// their mapped value instead of the original one. If any of the arguments to 316 /// the function are in the VMap, the arguments are deleted from the resultant 317 /// function. The VMap is updated to include mappings from all of the 318 /// instructions and basicblocks in the function from their old to new values. 319 /// 320 Function *llvm::CloneFunction(Function *F, ValueToValueMapTy &VMap, 321 ClonedCodeInfo *CodeInfo) { 322 std::vector<Type *> ArgTypes; 323 324 // The user might be deleting arguments to the function by specifying them in 325 // the VMap. If so, we need to not add the arguments to the arg ty vector 326 // 327 for (const Argument &I : F->args()) 328 if (VMap.count(&I) == 0) // Haven't mapped the argument to anything yet? 329 ArgTypes.push_back(I.getType()); 330 331 // Create a new function type... 332 FunctionType *FTy = 333 FunctionType::get(F->getFunctionType()->getReturnType(), ArgTypes, 334 F->getFunctionType()->isVarArg()); 335 336 // Create the new function... 337 Function *NewF = Function::Create(FTy, F->getLinkage(), F->getAddressSpace(), 338 F->getName(), F->getParent()); 339 NewF->setIsNewDbgInfoFormat(F->IsNewDbgInfoFormat); 340 341 // Loop over the arguments, copying the names of the mapped arguments over... 342 Function::arg_iterator DestI = NewF->arg_begin(); 343 for (const Argument &I : F->args()) 344 if (VMap.count(&I) == 0) { // Is this argument preserved? 345 DestI->setName(I.getName()); // Copy the name over... 346 VMap[&I] = &*DestI++; // Add mapping to VMap 347 } 348 349 SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned. 350 CloneFunctionInto(NewF, F, VMap, CloneFunctionChangeType::LocalChangesOnly, 351 Returns, "", CodeInfo); 352 353 return NewF; 354 } 355 356 namespace { 357 /// This is a private class used to implement CloneAndPruneFunctionInto. 358 struct PruningFunctionCloner { 359 Function *NewFunc; 360 const Function *OldFunc; 361 ValueToValueMapTy &VMap; 362 bool ModuleLevelChanges; 363 const char *NameSuffix; 364 ClonedCodeInfo *CodeInfo; 365 bool HostFuncIsStrictFP; 366 367 Instruction *cloneInstruction(BasicBlock::const_iterator II); 368 369 public: 370 PruningFunctionCloner(Function *newFunc, const Function *oldFunc, 371 ValueToValueMapTy &valueMap, bool moduleLevelChanges, 372 const char *nameSuffix, ClonedCodeInfo *codeInfo) 373 : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap), 374 ModuleLevelChanges(moduleLevelChanges), NameSuffix(nameSuffix), 375 CodeInfo(codeInfo) { 376 HostFuncIsStrictFP = 377 newFunc->getAttributes().hasFnAttr(Attribute::StrictFP); 378 } 379 380 /// The specified block is found to be reachable, clone it and 381 /// anything that it can reach. 382 void CloneBlock(const BasicBlock *BB, BasicBlock::const_iterator StartingInst, 383 std::vector<const BasicBlock *> &ToClone); 384 }; 385 } // namespace 386 387 static bool hasRoundingModeOperand(Intrinsic::ID CIID) { 388 switch (CIID) { 389 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 390 case Intrinsic::INTRINSIC: \ 391 return ROUND_MODE == 1; 392 #define FUNCTION INSTRUCTION 393 #include "llvm/IR/ConstrainedOps.def" 394 default: 395 llvm_unreachable("Unexpected constrained intrinsic id"); 396 } 397 } 398 399 Instruction * 400 PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) { 401 const Instruction &OldInst = *II; 402 Instruction *NewInst = nullptr; 403 if (HostFuncIsStrictFP) { 404 Intrinsic::ID CIID = getConstrainedIntrinsicID(OldInst); 405 if (CIID != Intrinsic::not_intrinsic) { 406 // Instead of cloning the instruction, a call to constrained intrinsic 407 // should be created. 408 // Assume the first arguments of constrained intrinsics are the same as 409 // the operands of original instruction. 410 411 // Determine overloaded types of the intrinsic. 412 SmallVector<Type *, 2> TParams; 413 SmallVector<Intrinsic::IITDescriptor, 8> Descriptor; 414 getIntrinsicInfoTableEntries(CIID, Descriptor); 415 for (unsigned I = 0, E = Descriptor.size(); I != E; ++I) { 416 Intrinsic::IITDescriptor Operand = Descriptor[I]; 417 switch (Operand.Kind) { 418 case Intrinsic::IITDescriptor::Argument: 419 if (Operand.getArgumentKind() != 420 Intrinsic::IITDescriptor::AK_MatchType) { 421 if (I == 0) 422 TParams.push_back(OldInst.getType()); 423 else 424 TParams.push_back(OldInst.getOperand(I - 1)->getType()); 425 } 426 break; 427 case Intrinsic::IITDescriptor::SameVecWidthArgument: 428 ++I; 429 break; 430 default: 431 break; 432 } 433 } 434 435 // Create intrinsic call. 436 LLVMContext &Ctx = NewFunc->getContext(); 437 Function *IFn = 438 Intrinsic::getDeclaration(NewFunc->getParent(), CIID, TParams); 439 SmallVector<Value *, 4> Args; 440 unsigned NumOperands = OldInst.getNumOperands(); 441 if (isa<CallInst>(OldInst)) 442 --NumOperands; 443 for (unsigned I = 0; I < NumOperands; ++I) { 444 Value *Op = OldInst.getOperand(I); 445 Args.push_back(Op); 446 } 447 if (const auto *CmpI = dyn_cast<FCmpInst>(&OldInst)) { 448 FCmpInst::Predicate Pred = CmpI->getPredicate(); 449 StringRef PredName = FCmpInst::getPredicateName(Pred); 450 Args.push_back(MetadataAsValue::get(Ctx, MDString::get(Ctx, PredName))); 451 } 452 453 // The last arguments of a constrained intrinsic are metadata that 454 // represent rounding mode (absents in some intrinsics) and exception 455 // behavior. The inlined function uses default settings. 456 if (hasRoundingModeOperand(CIID)) 457 Args.push_back( 458 MetadataAsValue::get(Ctx, MDString::get(Ctx, "round.tonearest"))); 459 Args.push_back( 460 MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore"))); 461 462 NewInst = CallInst::Create(IFn, Args, OldInst.getName() + ".strict"); 463 } 464 } 465 if (!NewInst) 466 NewInst = II->clone(); 467 return NewInst; 468 } 469 470 /// The specified block is found to be reachable, clone it and 471 /// anything that it can reach. 472 void PruningFunctionCloner::CloneBlock( 473 const BasicBlock *BB, BasicBlock::const_iterator StartingInst, 474 std::vector<const BasicBlock *> &ToClone) { 475 WeakTrackingVH &BBEntry = VMap[BB]; 476 477 // Have we already cloned this block? 478 if (BBEntry) 479 return; 480 481 // Nope, clone it now. 482 BasicBlock *NewBB; 483 Twine NewName(BB->hasName() ? Twine(BB->getName()) + NameSuffix : ""); 484 BBEntry = NewBB = BasicBlock::Create(BB->getContext(), NewName, NewFunc); 485 NewBB->IsNewDbgInfoFormat = BB->IsNewDbgInfoFormat; 486 487 // It is only legal to clone a function if a block address within that 488 // function is never referenced outside of the function. Given that, we 489 // want to map block addresses from the old function to block addresses in 490 // the clone. (This is different from the generic ValueMapper 491 // implementation, which generates an invalid blockaddress when 492 // cloning a function.) 493 // 494 // Note that we don't need to fix the mapping for unreachable blocks; 495 // the default mapping there is safe. 496 if (BB->hasAddressTaken()) { 497 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc), 498 const_cast<BasicBlock *>(BB)); 499 VMap[OldBBAddr] = BlockAddress::get(NewFunc, NewBB); 500 } 501 502 bool hasCalls = false, hasDynamicAllocas = false, hasStaticAllocas = false; 503 bool hasMemProfMetadata = false; 504 505 // Keep a cursor pointing at the last place we cloned debug-info records from. 506 BasicBlock::const_iterator DbgCursor = StartingInst; 507 auto CloneDbgRecordsToHere = 508 [NewBB, &DbgCursor](Instruction *NewInst, BasicBlock::const_iterator II) { 509 if (!NewBB->IsNewDbgInfoFormat) 510 return; 511 512 // Clone debug-info records onto this instruction. Iterate through any 513 // source-instructions we've cloned and then subsequently optimised 514 // away, so that their debug-info doesn't go missing. 515 for (; DbgCursor != II; ++DbgCursor) 516 NewInst->cloneDebugInfoFrom(&*DbgCursor, std::nullopt, false); 517 NewInst->cloneDebugInfoFrom(&*II); 518 DbgCursor = std::next(II); 519 }; 520 521 // Loop over all instructions, and copy them over, DCE'ing as we go. This 522 // loop doesn't include the terminator. 523 for (BasicBlock::const_iterator II = StartingInst, IE = --BB->end(); II != IE; 524 ++II) { 525 526 Instruction *NewInst = cloneInstruction(II); 527 NewInst->insertInto(NewBB, NewBB->end()); 528 529 if (HostFuncIsStrictFP) { 530 // All function calls in the inlined function must get 'strictfp' 531 // attribute to prevent undesirable optimizations. 532 if (auto *Call = dyn_cast<CallInst>(NewInst)) 533 Call->addFnAttr(Attribute::StrictFP); 534 } 535 536 // Eagerly remap operands to the newly cloned instruction, except for PHI 537 // nodes for which we defer processing until we update the CFG. Also defer 538 // debug intrinsic processing because they may contain use-before-defs. 539 if (!isa<PHINode>(NewInst) && !isa<DbgVariableIntrinsic>(NewInst)) { 540 RemapInstruction(NewInst, VMap, 541 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges); 542 543 // If we can simplify this instruction to some other value, simply add 544 // a mapping to that value rather than inserting a new instruction into 545 // the basic block. 546 if (Value *V = 547 simplifyInstruction(NewInst, BB->getModule()->getDataLayout())) { 548 // On the off-chance that this simplifies to an instruction in the old 549 // function, map it back into the new function. 550 if (NewFunc != OldFunc) 551 if (Value *MappedV = VMap.lookup(V)) 552 V = MappedV; 553 554 if (!NewInst->mayHaveSideEffects()) { 555 VMap[&*II] = V; 556 NewInst->eraseFromParent(); 557 continue; 558 } 559 } 560 } 561 562 if (II->hasName()) 563 NewInst->setName(II->getName() + NameSuffix); 564 VMap[&*II] = NewInst; // Add instruction map to value. 565 if (isa<CallInst>(II) && !II->isDebugOrPseudoInst()) { 566 hasCalls = true; 567 hasMemProfMetadata |= II->hasMetadata(LLVMContext::MD_memprof); 568 } 569 570 CloneDbgRecordsToHere(NewInst, II); 571 572 if (CodeInfo) { 573 CodeInfo->OrigVMap[&*II] = NewInst; 574 if (auto *CB = dyn_cast<CallBase>(&*II)) 575 if (CB->hasOperandBundles()) 576 CodeInfo->OperandBundleCallSites.push_back(NewInst); 577 } 578 579 if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) { 580 if (isa<ConstantInt>(AI->getArraySize())) 581 hasStaticAllocas = true; 582 else 583 hasDynamicAllocas = true; 584 } 585 } 586 587 // Finally, clone over the terminator. 588 const Instruction *OldTI = BB->getTerminator(); 589 bool TerminatorDone = false; 590 if (const BranchInst *BI = dyn_cast<BranchInst>(OldTI)) { 591 if (BI->isConditional()) { 592 // If the condition was a known constant in the callee... 593 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); 594 // Or is a known constant in the caller... 595 if (!Cond) { 596 Value *V = VMap.lookup(BI->getCondition()); 597 Cond = dyn_cast_or_null<ConstantInt>(V); 598 } 599 600 // Constant fold to uncond branch! 601 if (Cond) { 602 BasicBlock *Dest = BI->getSuccessor(!Cond->getZExtValue()); 603 VMap[OldTI] = BranchInst::Create(Dest, NewBB); 604 ToClone.push_back(Dest); 605 TerminatorDone = true; 606 } 607 } 608 } else if (const SwitchInst *SI = dyn_cast<SwitchInst>(OldTI)) { 609 // If switching on a value known constant in the caller. 610 ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition()); 611 if (!Cond) { // Or known constant after constant prop in the callee... 612 Value *V = VMap.lookup(SI->getCondition()); 613 Cond = dyn_cast_or_null<ConstantInt>(V); 614 } 615 if (Cond) { // Constant fold to uncond branch! 616 SwitchInst::ConstCaseHandle Case = *SI->findCaseValue(Cond); 617 BasicBlock *Dest = const_cast<BasicBlock *>(Case.getCaseSuccessor()); 618 VMap[OldTI] = BranchInst::Create(Dest, NewBB); 619 ToClone.push_back(Dest); 620 TerminatorDone = true; 621 } 622 } 623 624 if (!TerminatorDone) { 625 Instruction *NewInst = OldTI->clone(); 626 if (OldTI->hasName()) 627 NewInst->setName(OldTI->getName() + NameSuffix); 628 NewInst->insertInto(NewBB, NewBB->end()); 629 630 CloneDbgRecordsToHere(NewInst, OldTI->getIterator()); 631 632 VMap[OldTI] = NewInst; // Add instruction map to value. 633 634 if (CodeInfo) { 635 CodeInfo->OrigVMap[OldTI] = NewInst; 636 if (auto *CB = dyn_cast<CallBase>(OldTI)) 637 if (CB->hasOperandBundles()) 638 CodeInfo->OperandBundleCallSites.push_back(NewInst); 639 } 640 641 // Recursively clone any reachable successor blocks. 642 append_range(ToClone, successors(BB->getTerminator())); 643 } else { 644 // If we didn't create a new terminator, clone DPValues from the old 645 // terminator onto the new terminator. 646 Instruction *NewInst = NewBB->getTerminator(); 647 assert(NewInst); 648 649 CloneDbgRecordsToHere(NewInst, OldTI->getIterator()); 650 } 651 652 if (CodeInfo) { 653 CodeInfo->ContainsCalls |= hasCalls; 654 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata; 655 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas; 656 CodeInfo->ContainsDynamicAllocas |= 657 hasStaticAllocas && BB != &BB->getParent()->front(); 658 } 659 } 660 661 /// This works like CloneAndPruneFunctionInto, except that it does not clone the 662 /// entire function. Instead it starts at an instruction provided by the caller 663 /// and copies (and prunes) only the code reachable from that instruction. 664 void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc, 665 const Instruction *StartingInst, 666 ValueToValueMapTy &VMap, 667 bool ModuleLevelChanges, 668 SmallVectorImpl<ReturnInst *> &Returns, 669 const char *NameSuffix, 670 ClonedCodeInfo *CodeInfo) { 671 assert(NameSuffix && "NameSuffix cannot be null!"); 672 673 ValueMapTypeRemapper *TypeMapper = nullptr; 674 ValueMaterializer *Materializer = nullptr; 675 676 #ifndef NDEBUG 677 // If the cloning starts at the beginning of the function, verify that 678 // the function arguments are mapped. 679 if (!StartingInst) 680 for (const Argument &II : OldFunc->args()) 681 assert(VMap.count(&II) && "No mapping from source argument specified!"); 682 #endif 683 684 PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges, 685 NameSuffix, CodeInfo); 686 const BasicBlock *StartingBB; 687 if (StartingInst) 688 StartingBB = StartingInst->getParent(); 689 else { 690 StartingBB = &OldFunc->getEntryBlock(); 691 StartingInst = &StartingBB->front(); 692 } 693 694 // Collect debug intrinsics for remapping later. 695 SmallVector<const DbgVariableIntrinsic *, 8> DbgIntrinsics; 696 for (const auto &BB : *OldFunc) { 697 for (const auto &I : BB) { 698 if (const auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I)) 699 DbgIntrinsics.push_back(DVI); 700 } 701 } 702 703 // Clone the entry block, and anything recursively reachable from it. 704 std::vector<const BasicBlock *> CloneWorklist; 705 PFC.CloneBlock(StartingBB, StartingInst->getIterator(), CloneWorklist); 706 while (!CloneWorklist.empty()) { 707 const BasicBlock *BB = CloneWorklist.back(); 708 CloneWorklist.pop_back(); 709 PFC.CloneBlock(BB, BB->begin(), CloneWorklist); 710 } 711 712 // Loop over all of the basic blocks in the old function. If the block was 713 // reachable, we have cloned it and the old block is now in the value map: 714 // insert it into the new function in the right order. If not, ignore it. 715 // 716 // Defer PHI resolution until rest of function is resolved. 717 SmallVector<const PHINode *, 16> PHIToResolve; 718 for (const BasicBlock &BI : *OldFunc) { 719 Value *V = VMap.lookup(&BI); 720 BasicBlock *NewBB = cast_or_null<BasicBlock>(V); 721 if (!NewBB) 722 continue; // Dead block. 723 724 // Move the new block to preserve the order in the original function. 725 NewBB->moveBefore(NewFunc->end()); 726 727 // Handle PHI nodes specially, as we have to remove references to dead 728 // blocks. 729 for (const PHINode &PN : BI.phis()) { 730 // PHI nodes may have been remapped to non-PHI nodes by the caller or 731 // during the cloning process. 732 if (isa<PHINode>(VMap[&PN])) 733 PHIToResolve.push_back(&PN); 734 else 735 break; 736 } 737 738 // Finally, remap the terminator instructions, as those can't be remapped 739 // until all BBs are mapped. 740 RemapInstruction(NewBB->getTerminator(), VMap, 741 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 742 TypeMapper, Materializer); 743 } 744 745 // Defer PHI resolution until rest of function is resolved, PHI resolution 746 // requires the CFG to be up-to-date. 747 for (unsigned phino = 0, e = PHIToResolve.size(); phino != e;) { 748 const PHINode *OPN = PHIToResolve[phino]; 749 unsigned NumPreds = OPN->getNumIncomingValues(); 750 const BasicBlock *OldBB = OPN->getParent(); 751 BasicBlock *NewBB = cast<BasicBlock>(VMap[OldBB]); 752 753 // Map operands for blocks that are live and remove operands for blocks 754 // that are dead. 755 for (; phino != PHIToResolve.size() && 756 PHIToResolve[phino]->getParent() == OldBB; 757 ++phino) { 758 OPN = PHIToResolve[phino]; 759 PHINode *PN = cast<PHINode>(VMap[OPN]); 760 for (unsigned pred = 0, e = NumPreds; pred != e; ++pred) { 761 Value *V = VMap.lookup(PN->getIncomingBlock(pred)); 762 if (BasicBlock *MappedBlock = cast_or_null<BasicBlock>(V)) { 763 Value *InVal = 764 MapValue(PN->getIncomingValue(pred), VMap, 765 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges); 766 assert(InVal && "Unknown input value?"); 767 PN->setIncomingValue(pred, InVal); 768 PN->setIncomingBlock(pred, MappedBlock); 769 } else { 770 PN->removeIncomingValue(pred, false); 771 --pred; // Revisit the next entry. 772 --e; 773 } 774 } 775 } 776 777 // The loop above has removed PHI entries for those blocks that are dead 778 // and has updated others. However, if a block is live (i.e. copied over) 779 // but its terminator has been changed to not go to this block, then our 780 // phi nodes will have invalid entries. Update the PHI nodes in this 781 // case. 782 PHINode *PN = cast<PHINode>(NewBB->begin()); 783 NumPreds = pred_size(NewBB); 784 if (NumPreds != PN->getNumIncomingValues()) { 785 assert(NumPreds < PN->getNumIncomingValues()); 786 // Count how many times each predecessor comes to this block. 787 std::map<BasicBlock *, unsigned> PredCount; 788 for (BasicBlock *Pred : predecessors(NewBB)) 789 --PredCount[Pred]; 790 791 // Figure out how many entries to remove from each PHI. 792 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 793 ++PredCount[PN->getIncomingBlock(i)]; 794 795 // At this point, the excess predecessor entries are positive in the 796 // map. Loop over all of the PHIs and remove excess predecessor 797 // entries. 798 BasicBlock::iterator I = NewBB->begin(); 799 for (; (PN = dyn_cast<PHINode>(I)); ++I) { 800 for (const auto &PCI : PredCount) { 801 BasicBlock *Pred = PCI.first; 802 for (unsigned NumToRemove = PCI.second; NumToRemove; --NumToRemove) 803 PN->removeIncomingValue(Pred, false); 804 } 805 } 806 } 807 808 // If the loops above have made these phi nodes have 0 or 1 operand, 809 // replace them with poison or the input value. We must do this for 810 // correctness, because 0-operand phis are not valid. 811 PN = cast<PHINode>(NewBB->begin()); 812 if (PN->getNumIncomingValues() == 0) { 813 BasicBlock::iterator I = NewBB->begin(); 814 BasicBlock::const_iterator OldI = OldBB->begin(); 815 while ((PN = dyn_cast<PHINode>(I++))) { 816 Value *NV = PoisonValue::get(PN->getType()); 817 PN->replaceAllUsesWith(NV); 818 assert(VMap[&*OldI] == PN && "VMap mismatch"); 819 VMap[&*OldI] = NV; 820 PN->eraseFromParent(); 821 ++OldI; 822 } 823 } 824 } 825 826 // Make a second pass over the PHINodes now that all of them have been 827 // remapped into the new function, simplifying the PHINode and performing any 828 // recursive simplifications exposed. This will transparently update the 829 // WeakTrackingVH in the VMap. Notably, we rely on that so that if we coalesce 830 // two PHINodes, the iteration over the old PHIs remains valid, and the 831 // mapping will just map us to the new node (which may not even be a PHI 832 // node). 833 const DataLayout &DL = NewFunc->getParent()->getDataLayout(); 834 SmallSetVector<const Value *, 8> Worklist; 835 for (unsigned Idx = 0, Size = PHIToResolve.size(); Idx != Size; ++Idx) 836 if (isa<PHINode>(VMap[PHIToResolve[Idx]])) 837 Worklist.insert(PHIToResolve[Idx]); 838 839 // Note that we must test the size on each iteration, the worklist can grow. 840 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) { 841 const Value *OrigV = Worklist[Idx]; 842 auto *I = dyn_cast_or_null<Instruction>(VMap.lookup(OrigV)); 843 if (!I) 844 continue; 845 846 // Skip over non-intrinsic callsites, we don't want to remove any nodes from 847 // the CGSCC. 848 CallBase *CB = dyn_cast<CallBase>(I); 849 if (CB && CB->getCalledFunction() && 850 !CB->getCalledFunction()->isIntrinsic()) 851 continue; 852 853 // See if this instruction simplifies. 854 Value *SimpleV = simplifyInstruction(I, DL); 855 if (!SimpleV) 856 continue; 857 858 // Stash away all the uses of the old instruction so we can check them for 859 // recursive simplifications after a RAUW. This is cheaper than checking all 860 // uses of To on the recursive step in most cases. 861 for (const User *U : OrigV->users()) 862 Worklist.insert(cast<Instruction>(U)); 863 864 // Replace the instruction with its simplified value. 865 I->replaceAllUsesWith(SimpleV); 866 867 // If the original instruction had no side effects, remove it. 868 if (isInstructionTriviallyDead(I)) 869 I->eraseFromParent(); 870 else 871 VMap[OrigV] = I; 872 } 873 874 // Remap debug intrinsic operands now that all values have been mapped. 875 // Doing this now (late) preserves use-before-defs in debug intrinsics. If 876 // we didn't do this, ValueAsMetadata(use-before-def) operands would be 877 // replaced by empty metadata. This would signal later cleanup passes to 878 // remove the debug intrinsics, potentially causing incorrect locations. 879 for (const auto *DVI : DbgIntrinsics) { 880 if (DbgVariableIntrinsic *NewDVI = 881 cast_or_null<DbgVariableIntrinsic>(VMap.lookup(DVI))) 882 RemapInstruction(NewDVI, VMap, 883 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 884 TypeMapper, Materializer); 885 } 886 887 // Do the same for DPValues, touching all the instructions in the cloned 888 // range of blocks. 889 Function::iterator Begin = cast<BasicBlock>(VMap[StartingBB])->getIterator(); 890 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) { 891 for (Instruction &I : BB) { 892 RemapDPValueRange(I.getModule(), I.getDbgValueRange(), VMap, 893 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 894 TypeMapper, Materializer); 895 } 896 } 897 898 // Simplify conditional branches and switches with a constant operand. We try 899 // to prune these out when cloning, but if the simplification required 900 // looking through PHI nodes, those are only available after forming the full 901 // basic block. That may leave some here, and we still want to prune the dead 902 // code as early as possible. 903 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) 904 ConstantFoldTerminator(&BB); 905 906 // Some blocks may have become unreachable as a result. Find and delete them. 907 { 908 SmallPtrSet<BasicBlock *, 16> ReachableBlocks; 909 SmallVector<BasicBlock *, 16> Worklist; 910 Worklist.push_back(&*Begin); 911 while (!Worklist.empty()) { 912 BasicBlock *BB = Worklist.pop_back_val(); 913 if (ReachableBlocks.insert(BB).second) 914 append_range(Worklist, successors(BB)); 915 } 916 917 SmallVector<BasicBlock *, 16> UnreachableBlocks; 918 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) 919 if (!ReachableBlocks.contains(&BB)) 920 UnreachableBlocks.push_back(&BB); 921 DeleteDeadBlocks(UnreachableBlocks); 922 } 923 924 // Now that the inlined function body has been fully constructed, go through 925 // and zap unconditional fall-through branches. This happens all the time when 926 // specializing code: code specialization turns conditional branches into 927 // uncond branches, and this code folds them. 928 Function::iterator I = Begin; 929 while (I != NewFunc->end()) { 930 BranchInst *BI = dyn_cast<BranchInst>(I->getTerminator()); 931 if (!BI || BI->isConditional()) { 932 ++I; 933 continue; 934 } 935 936 BasicBlock *Dest = BI->getSuccessor(0); 937 if (!Dest->getSinglePredecessor()) { 938 ++I; 939 continue; 940 } 941 942 // We shouldn't be able to get single-entry PHI nodes here, as instsimplify 943 // above should have zapped all of them.. 944 assert(!isa<PHINode>(Dest->begin())); 945 946 // We know all single-entry PHI nodes in the inlined function have been 947 // removed, so we just need to splice the blocks. 948 BI->eraseFromParent(); 949 950 // Make all PHI nodes that referred to Dest now refer to I as their source. 951 Dest->replaceAllUsesWith(&*I); 952 953 // Move all the instructions in the succ to the pred. 954 I->splice(I->end(), Dest); 955 956 // Remove the dest block. 957 Dest->eraseFromParent(); 958 959 // Do not increment I, iteratively merge all things this block branches to. 960 } 961 962 // Make a final pass over the basic blocks from the old function to gather 963 // any return instructions which survived folding. We have to do this here 964 // because we can iteratively remove and merge returns above. 965 for (Function::iterator I = cast<BasicBlock>(VMap[StartingBB])->getIterator(), 966 E = NewFunc->end(); 967 I != E; ++I) 968 if (ReturnInst *RI = dyn_cast<ReturnInst>(I->getTerminator())) 969 Returns.push_back(RI); 970 } 971 972 /// This works exactly like CloneFunctionInto, 973 /// except that it does some simple constant prop and DCE on the fly. The 974 /// effect of this is to copy significantly less code in cases where (for 975 /// example) a function call with constant arguments is inlined, and those 976 /// constant arguments cause a significant amount of code in the callee to be 977 /// dead. Since this doesn't produce an exact copy of the input, it can't be 978 /// used for things like CloneFunction or CloneModule. 979 void llvm::CloneAndPruneFunctionInto( 980 Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, 981 bool ModuleLevelChanges, SmallVectorImpl<ReturnInst *> &Returns, 982 const char *NameSuffix, ClonedCodeInfo *CodeInfo) { 983 CloneAndPruneIntoFromInst(NewFunc, OldFunc, &OldFunc->front().front(), VMap, 984 ModuleLevelChanges, Returns, NameSuffix, CodeInfo); 985 } 986 987 /// Remaps instructions in \p Blocks using the mapping in \p VMap. 988 void llvm::remapInstructionsInBlocks(ArrayRef<BasicBlock *> Blocks, 989 ValueToValueMapTy &VMap) { 990 // Rewrite the code to refer to itself. 991 for (auto *BB : Blocks) { 992 for (auto &Inst : *BB) { 993 RemapDPValueRange(Inst.getModule(), Inst.getDbgValueRange(), VMap, 994 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 995 RemapInstruction(&Inst, VMap, 996 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 997 } 998 } 999 } 1000 1001 /// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p 1002 /// Blocks. 1003 /// 1004 /// Updates LoopInfo and DominatorTree assuming the loop is dominated by block 1005 /// \p LoopDomBB. Insert the new blocks before block specified in \p Before. 1006 Loop *llvm::cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB, 1007 Loop *OrigLoop, ValueToValueMapTy &VMap, 1008 const Twine &NameSuffix, LoopInfo *LI, 1009 DominatorTree *DT, 1010 SmallVectorImpl<BasicBlock *> &Blocks) { 1011 Function *F = OrigLoop->getHeader()->getParent(); 1012 Loop *ParentLoop = OrigLoop->getParentLoop(); 1013 DenseMap<Loop *, Loop *> LMap; 1014 1015 Loop *NewLoop = LI->AllocateLoop(); 1016 LMap[OrigLoop] = NewLoop; 1017 if (ParentLoop) 1018 ParentLoop->addChildLoop(NewLoop); 1019 else 1020 LI->addTopLevelLoop(NewLoop); 1021 1022 BasicBlock *OrigPH = OrigLoop->getLoopPreheader(); 1023 assert(OrigPH && "No preheader"); 1024 BasicBlock *NewPH = CloneBasicBlock(OrigPH, VMap, NameSuffix, F); 1025 // To rename the loop PHIs. 1026 VMap[OrigPH] = NewPH; 1027 Blocks.push_back(NewPH); 1028 1029 // Update LoopInfo. 1030 if (ParentLoop) 1031 ParentLoop->addBasicBlockToLoop(NewPH, *LI); 1032 1033 // Update DominatorTree. 1034 DT->addNewBlock(NewPH, LoopDomBB); 1035 1036 for (Loop *CurLoop : OrigLoop->getLoopsInPreorder()) { 1037 Loop *&NewLoop = LMap[CurLoop]; 1038 if (!NewLoop) { 1039 NewLoop = LI->AllocateLoop(); 1040 1041 // Establish the parent/child relationship. 1042 Loop *OrigParent = CurLoop->getParentLoop(); 1043 assert(OrigParent && "Could not find the original parent loop"); 1044 Loop *NewParentLoop = LMap[OrigParent]; 1045 assert(NewParentLoop && "Could not find the new parent loop"); 1046 1047 NewParentLoop->addChildLoop(NewLoop); 1048 } 1049 } 1050 1051 for (BasicBlock *BB : OrigLoop->getBlocks()) { 1052 Loop *CurLoop = LI->getLoopFor(BB); 1053 Loop *&NewLoop = LMap[CurLoop]; 1054 assert(NewLoop && "Expecting new loop to be allocated"); 1055 1056 BasicBlock *NewBB = CloneBasicBlock(BB, VMap, NameSuffix, F); 1057 VMap[BB] = NewBB; 1058 1059 // Update LoopInfo. 1060 NewLoop->addBasicBlockToLoop(NewBB, *LI); 1061 1062 // Add DominatorTree node. After seeing all blocks, update to correct 1063 // IDom. 1064 DT->addNewBlock(NewBB, NewPH); 1065 1066 Blocks.push_back(NewBB); 1067 } 1068 1069 for (BasicBlock *BB : OrigLoop->getBlocks()) { 1070 // Update loop headers. 1071 Loop *CurLoop = LI->getLoopFor(BB); 1072 if (BB == CurLoop->getHeader()) 1073 LMap[CurLoop]->moveToHeader(cast<BasicBlock>(VMap[BB])); 1074 1075 // Update DominatorTree. 1076 BasicBlock *IDomBB = DT->getNode(BB)->getIDom()->getBlock(); 1077 DT->changeImmediateDominator(cast<BasicBlock>(VMap[BB]), 1078 cast<BasicBlock>(VMap[IDomBB])); 1079 } 1080 1081 // Move them physically from the end of the block list. 1082 F->splice(Before->getIterator(), F, NewPH->getIterator()); 1083 F->splice(Before->getIterator(), F, NewLoop->getHeader()->getIterator(), 1084 F->end()); 1085 1086 return NewLoop; 1087 } 1088 1089 /// Duplicate non-Phi instructions from the beginning of block up to 1090 /// StopAt instruction into a split block between BB and its predecessor. 1091 BasicBlock *llvm::DuplicateInstructionsInSplitBetween( 1092 BasicBlock *BB, BasicBlock *PredBB, Instruction *StopAt, 1093 ValueToValueMapTy &ValueMapping, DomTreeUpdater &DTU) { 1094 1095 assert(count(successors(PredBB), BB) == 1 && 1096 "There must be a single edge between PredBB and BB!"); 1097 // We are going to have to map operands from the original BB block to the new 1098 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to 1099 // account for entry from PredBB. 1100 BasicBlock::iterator BI = BB->begin(); 1101 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 1102 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 1103 1104 BasicBlock *NewBB = SplitEdge(PredBB, BB); 1105 NewBB->setName(PredBB->getName() + ".split"); 1106 Instruction *NewTerm = NewBB->getTerminator(); 1107 1108 // FIXME: SplitEdge does not yet take a DTU, so we include the split edge 1109 // in the update set here. 1110 DTU.applyUpdates({{DominatorTree::Delete, PredBB, BB}, 1111 {DominatorTree::Insert, PredBB, NewBB}, 1112 {DominatorTree::Insert, NewBB, BB}}); 1113 1114 // Clone the non-phi instructions of BB into NewBB, keeping track of the 1115 // mapping and using it to remap operands in the cloned instructions. 1116 // Stop once we see the terminator too. This covers the case where BB's 1117 // terminator gets replaced and StopAt == BB's terminator. 1118 for (; StopAt != &*BI && BB->getTerminator() != &*BI; ++BI) { 1119 Instruction *New = BI->clone(); 1120 New->setName(BI->getName()); 1121 New->insertBefore(NewTerm); 1122 New->cloneDebugInfoFrom(&*BI); 1123 ValueMapping[&*BI] = New; 1124 1125 // Remap operands to patch up intra-block references. 1126 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 1127 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 1128 auto I = ValueMapping.find(Inst); 1129 if (I != ValueMapping.end()) 1130 New->setOperand(i, I->second); 1131 } 1132 } 1133 1134 return NewBB; 1135 } 1136 1137 void llvm::cloneNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1138 DenseMap<MDNode *, MDNode *> &ClonedScopes, 1139 StringRef Ext, LLVMContext &Context) { 1140 MDBuilder MDB(Context); 1141 1142 for (auto *ScopeList : NoAliasDeclScopes) { 1143 for (const auto &MDOperand : ScopeList->operands()) { 1144 if (MDNode *MD = dyn_cast<MDNode>(MDOperand)) { 1145 AliasScopeNode SNANode(MD); 1146 1147 std::string Name; 1148 auto ScopeName = SNANode.getName(); 1149 if (!ScopeName.empty()) 1150 Name = (Twine(ScopeName) + ":" + Ext).str(); 1151 else 1152 Name = std::string(Ext); 1153 1154 MDNode *NewScope = MDB.createAnonymousAliasScope( 1155 const_cast<MDNode *>(SNANode.getDomain()), Name); 1156 ClonedScopes.insert(std::make_pair(MD, NewScope)); 1157 } 1158 } 1159 } 1160 } 1161 1162 void llvm::adaptNoAliasScopes(Instruction *I, 1163 const DenseMap<MDNode *, MDNode *> &ClonedScopes, 1164 LLVMContext &Context) { 1165 auto CloneScopeList = [&](const MDNode *ScopeList) -> MDNode * { 1166 bool NeedsReplacement = false; 1167 SmallVector<Metadata *, 8> NewScopeList; 1168 for (const auto &MDOp : ScopeList->operands()) { 1169 if (MDNode *MD = dyn_cast<MDNode>(MDOp)) { 1170 if (auto *NewMD = ClonedScopes.lookup(MD)) { 1171 NewScopeList.push_back(NewMD); 1172 NeedsReplacement = true; 1173 continue; 1174 } 1175 NewScopeList.push_back(MD); 1176 } 1177 } 1178 if (NeedsReplacement) 1179 return MDNode::get(Context, NewScopeList); 1180 return nullptr; 1181 }; 1182 1183 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(I)) 1184 if (auto *NewScopeList = CloneScopeList(Decl->getScopeList())) 1185 Decl->setScopeList(NewScopeList); 1186 1187 auto replaceWhenNeeded = [&](unsigned MD_ID) { 1188 if (const MDNode *CSNoAlias = I->getMetadata(MD_ID)) 1189 if (auto *NewScopeList = CloneScopeList(CSNoAlias)) 1190 I->setMetadata(MD_ID, NewScopeList); 1191 }; 1192 replaceWhenNeeded(LLVMContext::MD_noalias); 1193 replaceWhenNeeded(LLVMContext::MD_alias_scope); 1194 } 1195 1196 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1197 ArrayRef<BasicBlock *> NewBlocks, 1198 LLVMContext &Context, StringRef Ext) { 1199 if (NoAliasDeclScopes.empty()) 1200 return; 1201 1202 DenseMap<MDNode *, MDNode *> ClonedScopes; 1203 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning " 1204 << NoAliasDeclScopes.size() << " node(s)\n"); 1205 1206 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context); 1207 // Identify instructions using metadata that needs adaptation 1208 for (BasicBlock *NewBlock : NewBlocks) 1209 for (Instruction &I : *NewBlock) 1210 adaptNoAliasScopes(&I, ClonedScopes, Context); 1211 } 1212 1213 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1214 Instruction *IStart, Instruction *IEnd, 1215 LLVMContext &Context, StringRef Ext) { 1216 if (NoAliasDeclScopes.empty()) 1217 return; 1218 1219 DenseMap<MDNode *, MDNode *> ClonedScopes; 1220 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning " 1221 << NoAliasDeclScopes.size() << " node(s)\n"); 1222 1223 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context); 1224 // Identify instructions using metadata that needs adaptation 1225 assert(IStart->getParent() == IEnd->getParent() && "different basic block ?"); 1226 auto ItStart = IStart->getIterator(); 1227 auto ItEnd = IEnd->getIterator(); 1228 ++ItEnd; // IEnd is included, increment ItEnd to get the end of the range 1229 for (auto &I : llvm::make_range(ItStart, ItEnd)) 1230 adaptNoAliasScopes(&I, ClonedScopes, Context); 1231 } 1232 1233 void llvm::identifyNoAliasScopesToClone( 1234 ArrayRef<BasicBlock *> BBs, SmallVectorImpl<MDNode *> &NoAliasDeclScopes) { 1235 for (BasicBlock *BB : BBs) 1236 for (Instruction &I : *BB) 1237 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1238 NoAliasDeclScopes.push_back(Decl->getScopeList()); 1239 } 1240 1241 void llvm::identifyNoAliasScopesToClone( 1242 BasicBlock::iterator Start, BasicBlock::iterator End, 1243 SmallVectorImpl<MDNode *> &NoAliasDeclScopes) { 1244 for (Instruction &I : make_range(Start, End)) 1245 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1246 NoAliasDeclScopes.push_back(Decl->getScopeList()); 1247 } 1248