1 //===- CloneFunction.cpp - Clone a function into another function ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the CloneFunctionInto interface, which is used as the 10 // low-level function cloner. This is used by the CloneFunction and function 11 // inliner to do the dirty work of copying the body of a function around. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/SetVector.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/Analysis/DomTreeUpdater.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/IR/CFG.h" 21 #include "llvm/IR/Constants.h" 22 #include "llvm/IR/DebugInfo.h" 23 #include "llvm/IR/DerivedTypes.h" 24 #include "llvm/IR/Function.h" 25 #include "llvm/IR/Instructions.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/IR/LLVMContext.h" 28 #include "llvm/IR/MDBuilder.h" 29 #include "llvm/IR/Metadata.h" 30 #include "llvm/IR/Module.h" 31 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 32 #include "llvm/Transforms/Utils/Cloning.h" 33 #include "llvm/Transforms/Utils/Local.h" 34 #include "llvm/Transforms/Utils/ValueMapper.h" 35 #include <map> 36 #include <optional> 37 using namespace llvm; 38 39 #define DEBUG_TYPE "clone-function" 40 41 /// See comments in Cloning.h. 42 BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap, 43 const Twine &NameSuffix, Function *F, 44 ClonedCodeInfo *CodeInfo, 45 DebugInfoFinder *DIFinder) { 46 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "", F); 47 if (BB->hasName()) 48 NewBB->setName(BB->getName() + NameSuffix); 49 50 bool hasCalls = false, hasDynamicAllocas = false, hasMemProfMetadata = false; 51 Module *TheModule = F ? F->getParent() : nullptr; 52 53 // Loop over all instructions, and copy them over. 54 for (const Instruction &I : *BB) { 55 if (DIFinder && TheModule) 56 DIFinder->processInstruction(*TheModule, I); 57 58 Instruction *NewInst = I.clone(); 59 if (I.hasName()) 60 NewInst->setName(I.getName() + NameSuffix); 61 NewInst->insertInto(NewBB, NewBB->end()); 62 VMap[&I] = NewInst; // Add instruction map to value. 63 64 if (isa<CallInst>(I) && !I.isDebugOrPseudoInst()) { 65 hasCalls = true; 66 hasMemProfMetadata |= I.hasMetadata(LLVMContext::MD_memprof); 67 } 68 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { 69 if (!AI->isStaticAlloca()) { 70 hasDynamicAllocas = true; 71 } 72 } 73 } 74 75 if (CodeInfo) { 76 CodeInfo->ContainsCalls |= hasCalls; 77 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata; 78 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas; 79 } 80 return NewBB; 81 } 82 83 // Clone OldFunc into NewFunc, transforming the old arguments into references to 84 // VMap values. 85 // 86 void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc, 87 ValueToValueMapTy &VMap, 88 CloneFunctionChangeType Changes, 89 SmallVectorImpl<ReturnInst *> &Returns, 90 const char *NameSuffix, ClonedCodeInfo *CodeInfo, 91 ValueMapTypeRemapper *TypeMapper, 92 ValueMaterializer *Materializer) { 93 assert(NameSuffix && "NameSuffix cannot be null!"); 94 95 #ifndef NDEBUG 96 for (const Argument &I : OldFunc->args()) 97 assert(VMap.count(&I) && "No mapping from source argument specified!"); 98 #endif 99 100 bool ModuleLevelChanges = Changes > CloneFunctionChangeType::LocalChangesOnly; 101 102 // Copy all attributes other than those stored in the AttributeList. We need 103 // to remap the parameter indices of the AttributeList. 104 AttributeList NewAttrs = NewFunc->getAttributes(); 105 NewFunc->copyAttributesFrom(OldFunc); 106 NewFunc->setAttributes(NewAttrs); 107 108 const RemapFlags FuncGlobalRefFlags = 109 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges; 110 111 // Fix up the personality function that got copied over. 112 if (OldFunc->hasPersonalityFn()) 113 NewFunc->setPersonalityFn(MapValue(OldFunc->getPersonalityFn(), VMap, 114 FuncGlobalRefFlags, TypeMapper, 115 Materializer)); 116 117 if (OldFunc->hasPrefixData()) { 118 NewFunc->setPrefixData(MapValue(OldFunc->getPrefixData(), VMap, 119 FuncGlobalRefFlags, TypeMapper, 120 Materializer)); 121 } 122 123 if (OldFunc->hasPrologueData()) { 124 NewFunc->setPrologueData(MapValue(OldFunc->getPrologueData(), VMap, 125 FuncGlobalRefFlags, TypeMapper, 126 Materializer)); 127 } 128 129 SmallVector<AttributeSet, 4> NewArgAttrs(NewFunc->arg_size()); 130 AttributeList OldAttrs = OldFunc->getAttributes(); 131 132 // Clone any argument attributes that are present in the VMap. 133 for (const Argument &OldArg : OldFunc->args()) { 134 if (Argument *NewArg = dyn_cast<Argument>(VMap[&OldArg])) { 135 NewArgAttrs[NewArg->getArgNo()] = 136 OldAttrs.getParamAttrs(OldArg.getArgNo()); 137 } 138 } 139 140 NewFunc->setAttributes( 141 AttributeList::get(NewFunc->getContext(), OldAttrs.getFnAttrs(), 142 OldAttrs.getRetAttrs(), NewArgAttrs)); 143 144 // Everything else beyond this point deals with function instructions, 145 // so if we are dealing with a function declaration, we're done. 146 if (OldFunc->isDeclaration()) 147 return; 148 149 // When we remap instructions within the same module, we want to avoid 150 // duplicating inlined DISubprograms, so record all subprograms we find as we 151 // duplicate instructions and then freeze them in the MD map. We also record 152 // information about dbg.value and dbg.declare to avoid duplicating the 153 // types. 154 std::optional<DebugInfoFinder> DIFinder; 155 156 // Track the subprogram attachment that needs to be cloned to fine-tune the 157 // mapping within the same module. 158 DISubprogram *SPClonedWithinModule = nullptr; 159 if (Changes < CloneFunctionChangeType::DifferentModule) { 160 assert((NewFunc->getParent() == nullptr || 161 NewFunc->getParent() == OldFunc->getParent()) && 162 "Expected NewFunc to have the same parent, or no parent"); 163 164 // Need to find subprograms, types, and compile units. 165 DIFinder.emplace(); 166 167 SPClonedWithinModule = OldFunc->getSubprogram(); 168 if (SPClonedWithinModule) 169 DIFinder->processSubprogram(SPClonedWithinModule); 170 } else { 171 assert((NewFunc->getParent() == nullptr || 172 NewFunc->getParent() != OldFunc->getParent()) && 173 "Expected NewFunc to have different parents, or no parent"); 174 175 if (Changes == CloneFunctionChangeType::DifferentModule) { 176 assert(NewFunc->getParent() && 177 "Need parent of new function to maintain debug info invariants"); 178 179 // Need to find all the compile units. 180 DIFinder.emplace(); 181 } 182 } 183 184 // Loop over all of the basic blocks in the function, cloning them as 185 // appropriate. Note that we save BE this way in order to handle cloning of 186 // recursive functions into themselves. 187 for (const BasicBlock &BB : *OldFunc) { 188 189 // Create a new basic block and copy instructions into it! 190 BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc, CodeInfo, 191 DIFinder ? &*DIFinder : nullptr); 192 193 // Add basic block mapping. 194 VMap[&BB] = CBB; 195 196 // It is only legal to clone a function if a block address within that 197 // function is never referenced outside of the function. Given that, we 198 // want to map block addresses from the old function to block addresses in 199 // the clone. (This is different from the generic ValueMapper 200 // implementation, which generates an invalid blockaddress when 201 // cloning a function.) 202 if (BB.hasAddressTaken()) { 203 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc), 204 const_cast<BasicBlock *>(&BB)); 205 VMap[OldBBAddr] = BlockAddress::get(NewFunc, CBB); 206 } 207 208 // Note return instructions for the caller. 209 if (ReturnInst *RI = dyn_cast<ReturnInst>(CBB->getTerminator())) 210 Returns.push_back(RI); 211 } 212 213 if (Changes < CloneFunctionChangeType::DifferentModule && 214 DIFinder->subprogram_count() > 0) { 215 // Turn on module-level changes, since we need to clone (some of) the 216 // debug info metadata. 217 // 218 // FIXME: Metadata effectively owned by a function should be made 219 // local, and only that local metadata should be cloned. 220 ModuleLevelChanges = true; 221 222 auto mapToSelfIfNew = [&VMap](MDNode *N) { 223 // Avoid clobbering an existing mapping. 224 (void)VMap.MD().try_emplace(N, N); 225 }; 226 227 // Avoid cloning types, compile units, and (other) subprograms. 228 SmallPtrSet<const DISubprogram *, 16> MappedToSelfSPs; 229 for (DISubprogram *ISP : DIFinder->subprograms()) { 230 if (ISP != SPClonedWithinModule) { 231 mapToSelfIfNew(ISP); 232 MappedToSelfSPs.insert(ISP); 233 } 234 } 235 236 // If a subprogram isn't going to be cloned skip its lexical blocks as well. 237 for (DIScope *S : DIFinder->scopes()) { 238 auto *LScope = dyn_cast<DILocalScope>(S); 239 if (LScope && MappedToSelfSPs.count(LScope->getSubprogram())) 240 mapToSelfIfNew(S); 241 } 242 243 for (DICompileUnit *CU : DIFinder->compile_units()) 244 mapToSelfIfNew(CU); 245 246 for (DIType *Type : DIFinder->types()) 247 mapToSelfIfNew(Type); 248 } else { 249 assert(!SPClonedWithinModule && 250 "Subprogram should be in DIFinder->subprogram_count()..."); 251 } 252 253 const auto RemapFlag = ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges; 254 // Duplicate the metadata that is attached to the cloned function. 255 // Subprograms/CUs/types that were already mapped to themselves won't be 256 // duplicated. 257 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs; 258 OldFunc->getAllMetadata(MDs); 259 for (auto MD : MDs) { 260 NewFunc->addMetadata(MD.first, *MapMetadata(MD.second, VMap, RemapFlag, 261 TypeMapper, Materializer)); 262 } 263 264 // Loop over all of the instructions in the new function, fixing up operand 265 // references as we go. This uses VMap to do all the hard work. 266 for (Function::iterator 267 BB = cast<BasicBlock>(VMap[&OldFunc->front()])->getIterator(), 268 BE = NewFunc->end(); 269 BB != BE; ++BB) 270 // Loop over all instructions, fixing each one as we find it... 271 for (Instruction &II : *BB) 272 RemapInstruction(&II, VMap, RemapFlag, TypeMapper, Materializer); 273 274 // Only update !llvm.dbg.cu for DifferentModule (not CloneModule). In the 275 // same module, the compile unit will already be listed (or not). When 276 // cloning a module, CloneModule() will handle creating the named metadata. 277 if (Changes != CloneFunctionChangeType::DifferentModule) 278 return; 279 280 // Update !llvm.dbg.cu with compile units added to the new module if this 281 // function is being cloned in isolation. 282 // 283 // FIXME: This is making global / module-level changes, which doesn't seem 284 // like the right encapsulation Consider dropping the requirement to update 285 // !llvm.dbg.cu (either obsoleting the node, or restricting it to 286 // non-discardable compile units) instead of discovering compile units by 287 // visiting the metadata attached to global values, which would allow this 288 // code to be deleted. Alternatively, perhaps give responsibility for this 289 // update to CloneFunctionInto's callers. 290 auto *NewModule = NewFunc->getParent(); 291 auto *NMD = NewModule->getOrInsertNamedMetadata("llvm.dbg.cu"); 292 // Avoid multiple insertions of the same DICompileUnit to NMD. 293 SmallPtrSet<const void *, 8> Visited; 294 for (auto *Operand : NMD->operands()) 295 Visited.insert(Operand); 296 for (auto *Unit : DIFinder->compile_units()) { 297 MDNode *MappedUnit = 298 MapMetadata(Unit, VMap, RF_None, TypeMapper, Materializer); 299 if (Visited.insert(MappedUnit).second) 300 NMD->addOperand(MappedUnit); 301 } 302 } 303 304 /// Return a copy of the specified function and add it to that function's 305 /// module. Also, any references specified in the VMap are changed to refer to 306 /// their mapped value instead of the original one. If any of the arguments to 307 /// the function are in the VMap, the arguments are deleted from the resultant 308 /// function. The VMap is updated to include mappings from all of the 309 /// instructions and basicblocks in the function from their old to new values. 310 /// 311 Function *llvm::CloneFunction(Function *F, ValueToValueMapTy &VMap, 312 ClonedCodeInfo *CodeInfo) { 313 std::vector<Type *> ArgTypes; 314 315 // The user might be deleting arguments to the function by specifying them in 316 // the VMap. If so, we need to not add the arguments to the arg ty vector 317 // 318 for (const Argument &I : F->args()) 319 if (VMap.count(&I) == 0) // Haven't mapped the argument to anything yet? 320 ArgTypes.push_back(I.getType()); 321 322 // Create a new function type... 323 FunctionType *FTy = 324 FunctionType::get(F->getFunctionType()->getReturnType(), ArgTypes, 325 F->getFunctionType()->isVarArg()); 326 327 // Create the new function... 328 Function *NewF = Function::Create(FTy, F->getLinkage(), F->getAddressSpace(), 329 F->getName(), F->getParent()); 330 331 // Loop over the arguments, copying the names of the mapped arguments over... 332 Function::arg_iterator DestI = NewF->arg_begin(); 333 for (const Argument &I : F->args()) 334 if (VMap.count(&I) == 0) { // Is this argument preserved? 335 DestI->setName(I.getName()); // Copy the name over... 336 VMap[&I] = &*DestI++; // Add mapping to VMap 337 } 338 339 SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned. 340 CloneFunctionInto(NewF, F, VMap, CloneFunctionChangeType::LocalChangesOnly, 341 Returns, "", CodeInfo); 342 343 return NewF; 344 } 345 346 namespace { 347 /// This is a private class used to implement CloneAndPruneFunctionInto. 348 struct PruningFunctionCloner { 349 Function *NewFunc; 350 const Function *OldFunc; 351 ValueToValueMapTy &VMap; 352 bool ModuleLevelChanges; 353 const char *NameSuffix; 354 ClonedCodeInfo *CodeInfo; 355 bool HostFuncIsStrictFP; 356 357 Instruction *cloneInstruction(BasicBlock::const_iterator II); 358 359 public: 360 PruningFunctionCloner(Function *newFunc, const Function *oldFunc, 361 ValueToValueMapTy &valueMap, bool moduleLevelChanges, 362 const char *nameSuffix, ClonedCodeInfo *codeInfo) 363 : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap), 364 ModuleLevelChanges(moduleLevelChanges), NameSuffix(nameSuffix), 365 CodeInfo(codeInfo) { 366 HostFuncIsStrictFP = 367 newFunc->getAttributes().hasFnAttr(Attribute::StrictFP); 368 } 369 370 /// The specified block is found to be reachable, clone it and 371 /// anything that it can reach. 372 void CloneBlock(const BasicBlock *BB, BasicBlock::const_iterator StartingInst, 373 std::vector<const BasicBlock *> &ToClone); 374 }; 375 } // namespace 376 377 static bool hasRoundingModeOperand(Intrinsic::ID CIID) { 378 switch (CIID) { 379 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 380 case Intrinsic::INTRINSIC: \ 381 return ROUND_MODE == 1; 382 #define FUNCTION INSTRUCTION 383 #include "llvm/IR/ConstrainedOps.def" 384 default: 385 llvm_unreachable("Unexpected constrained intrinsic id"); 386 } 387 } 388 389 Instruction * 390 PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) { 391 const Instruction &OldInst = *II; 392 Instruction *NewInst = nullptr; 393 if (HostFuncIsStrictFP) { 394 Intrinsic::ID CIID = getConstrainedIntrinsicID(OldInst); 395 if (CIID != Intrinsic::not_intrinsic) { 396 // Instead of cloning the instruction, a call to constrained intrinsic 397 // should be created. 398 // Assume the first arguments of constrained intrinsics are the same as 399 // the operands of original instruction. 400 401 // Determine overloaded types of the intrinsic. 402 SmallVector<Type *, 2> TParams; 403 SmallVector<Intrinsic::IITDescriptor, 8> Descriptor; 404 getIntrinsicInfoTableEntries(CIID, Descriptor); 405 for (unsigned I = 0, E = Descriptor.size(); I != E; ++I) { 406 Intrinsic::IITDescriptor Operand = Descriptor[I]; 407 switch (Operand.Kind) { 408 case Intrinsic::IITDescriptor::Argument: 409 if (Operand.getArgumentKind() != 410 Intrinsic::IITDescriptor::AK_MatchType) { 411 if (I == 0) 412 TParams.push_back(OldInst.getType()); 413 else 414 TParams.push_back(OldInst.getOperand(I - 1)->getType()); 415 } 416 break; 417 case Intrinsic::IITDescriptor::SameVecWidthArgument: 418 ++I; 419 break; 420 default: 421 break; 422 } 423 } 424 425 // Create intrinsic call. 426 LLVMContext &Ctx = NewFunc->getContext(); 427 Function *IFn = 428 Intrinsic::getDeclaration(NewFunc->getParent(), CIID, TParams); 429 SmallVector<Value *, 4> Args; 430 unsigned NumOperands = OldInst.getNumOperands(); 431 if (isa<CallInst>(OldInst)) 432 --NumOperands; 433 for (unsigned I = 0; I < NumOperands; ++I) { 434 Value *Op = OldInst.getOperand(I); 435 Args.push_back(Op); 436 } 437 if (const auto *CmpI = dyn_cast<FCmpInst>(&OldInst)) { 438 FCmpInst::Predicate Pred = CmpI->getPredicate(); 439 StringRef PredName = FCmpInst::getPredicateName(Pred); 440 Args.push_back(MetadataAsValue::get(Ctx, MDString::get(Ctx, PredName))); 441 } 442 443 // The last arguments of a constrained intrinsic are metadata that 444 // represent rounding mode (absents in some intrinsics) and exception 445 // behavior. The inlined function uses default settings. 446 if (hasRoundingModeOperand(CIID)) 447 Args.push_back( 448 MetadataAsValue::get(Ctx, MDString::get(Ctx, "round.tonearest"))); 449 Args.push_back( 450 MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore"))); 451 452 NewInst = CallInst::Create(IFn, Args, OldInst.getName() + ".strict"); 453 } 454 } 455 if (!NewInst) 456 NewInst = II->clone(); 457 return NewInst; 458 } 459 460 /// The specified block is found to be reachable, clone it and 461 /// anything that it can reach. 462 void PruningFunctionCloner::CloneBlock( 463 const BasicBlock *BB, BasicBlock::const_iterator StartingInst, 464 std::vector<const BasicBlock *> &ToClone) { 465 WeakTrackingVH &BBEntry = VMap[BB]; 466 467 // Have we already cloned this block? 468 if (BBEntry) 469 return; 470 471 // Nope, clone it now. 472 BasicBlock *NewBB; 473 Twine NewName(BB->hasName() ? Twine(BB->getName()) + NameSuffix : ""); 474 BBEntry = NewBB = BasicBlock::Create(BB->getContext(), NewName, NewFunc); 475 476 // It is only legal to clone a function if a block address within that 477 // function is never referenced outside of the function. Given that, we 478 // want to map block addresses from the old function to block addresses in 479 // the clone. (This is different from the generic ValueMapper 480 // implementation, which generates an invalid blockaddress when 481 // cloning a function.) 482 // 483 // Note that we don't need to fix the mapping for unreachable blocks; 484 // the default mapping there is safe. 485 if (BB->hasAddressTaken()) { 486 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc), 487 const_cast<BasicBlock *>(BB)); 488 VMap[OldBBAddr] = BlockAddress::get(NewFunc, NewBB); 489 } 490 491 bool hasCalls = false, hasDynamicAllocas = false, hasStaticAllocas = false; 492 bool hasMemProfMetadata = false; 493 494 // Loop over all instructions, and copy them over, DCE'ing as we go. This 495 // loop doesn't include the terminator. 496 for (BasicBlock::const_iterator II = StartingInst, IE = --BB->end(); II != IE; 497 ++II) { 498 499 Instruction *NewInst = cloneInstruction(II); 500 NewInst->insertInto(NewBB, NewBB->end()); 501 502 if (HostFuncIsStrictFP) { 503 // All function calls in the inlined function must get 'strictfp' 504 // attribute to prevent undesirable optimizations. 505 if (auto *Call = dyn_cast<CallInst>(NewInst)) 506 Call->addFnAttr(Attribute::StrictFP); 507 } 508 509 // Eagerly remap operands to the newly cloned instruction, except for PHI 510 // nodes for which we defer processing until we update the CFG. Also defer 511 // debug intrinsic processing because they may contain use-before-defs. 512 if (!isa<PHINode>(NewInst) && !isa<DbgVariableIntrinsic>(NewInst)) { 513 RemapInstruction(NewInst, VMap, 514 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges); 515 516 // If we can simplify this instruction to some other value, simply add 517 // a mapping to that value rather than inserting a new instruction into 518 // the basic block. 519 if (Value *V = 520 simplifyInstruction(NewInst, BB->getModule()->getDataLayout())) { 521 // On the off-chance that this simplifies to an instruction in the old 522 // function, map it back into the new function. 523 if (NewFunc != OldFunc) 524 if (Value *MappedV = VMap.lookup(V)) 525 V = MappedV; 526 527 if (!NewInst->mayHaveSideEffects()) { 528 VMap[&*II] = V; 529 NewInst->eraseFromParent(); 530 continue; 531 } 532 } 533 } 534 535 if (II->hasName()) 536 NewInst->setName(II->getName() + NameSuffix); 537 VMap[&*II] = NewInst; // Add instruction map to value. 538 if (isa<CallInst>(II) && !II->isDebugOrPseudoInst()) { 539 hasCalls = true; 540 hasMemProfMetadata |= II->hasMetadata(LLVMContext::MD_memprof); 541 } 542 543 if (CodeInfo) { 544 CodeInfo->OrigVMap[&*II] = NewInst; 545 if (auto *CB = dyn_cast<CallBase>(&*II)) 546 if (CB->hasOperandBundles()) 547 CodeInfo->OperandBundleCallSites.push_back(NewInst); 548 } 549 550 if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) { 551 if (isa<ConstantInt>(AI->getArraySize())) 552 hasStaticAllocas = true; 553 else 554 hasDynamicAllocas = true; 555 } 556 } 557 558 // Finally, clone over the terminator. 559 const Instruction *OldTI = BB->getTerminator(); 560 bool TerminatorDone = false; 561 if (const BranchInst *BI = dyn_cast<BranchInst>(OldTI)) { 562 if (BI->isConditional()) { 563 // If the condition was a known constant in the callee... 564 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); 565 // Or is a known constant in the caller... 566 if (!Cond) { 567 Value *V = VMap.lookup(BI->getCondition()); 568 Cond = dyn_cast_or_null<ConstantInt>(V); 569 } 570 571 // Constant fold to uncond branch! 572 if (Cond) { 573 BasicBlock *Dest = BI->getSuccessor(!Cond->getZExtValue()); 574 VMap[OldTI] = BranchInst::Create(Dest, NewBB); 575 ToClone.push_back(Dest); 576 TerminatorDone = true; 577 } 578 } 579 } else if (const SwitchInst *SI = dyn_cast<SwitchInst>(OldTI)) { 580 // If switching on a value known constant in the caller. 581 ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition()); 582 if (!Cond) { // Or known constant after constant prop in the callee... 583 Value *V = VMap.lookup(SI->getCondition()); 584 Cond = dyn_cast_or_null<ConstantInt>(V); 585 } 586 if (Cond) { // Constant fold to uncond branch! 587 SwitchInst::ConstCaseHandle Case = *SI->findCaseValue(Cond); 588 BasicBlock *Dest = const_cast<BasicBlock *>(Case.getCaseSuccessor()); 589 VMap[OldTI] = BranchInst::Create(Dest, NewBB); 590 ToClone.push_back(Dest); 591 TerminatorDone = true; 592 } 593 } 594 595 if (!TerminatorDone) { 596 Instruction *NewInst = OldTI->clone(); 597 if (OldTI->hasName()) 598 NewInst->setName(OldTI->getName() + NameSuffix); 599 NewInst->insertInto(NewBB, NewBB->end()); 600 VMap[OldTI] = NewInst; // Add instruction map to value. 601 602 if (CodeInfo) { 603 CodeInfo->OrigVMap[OldTI] = NewInst; 604 if (auto *CB = dyn_cast<CallBase>(OldTI)) 605 if (CB->hasOperandBundles()) 606 CodeInfo->OperandBundleCallSites.push_back(NewInst); 607 } 608 609 // Recursively clone any reachable successor blocks. 610 append_range(ToClone, successors(BB->getTerminator())); 611 } 612 613 if (CodeInfo) { 614 CodeInfo->ContainsCalls |= hasCalls; 615 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata; 616 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas; 617 CodeInfo->ContainsDynamicAllocas |= 618 hasStaticAllocas && BB != &BB->getParent()->front(); 619 } 620 } 621 622 /// This works like CloneAndPruneFunctionInto, except that it does not clone the 623 /// entire function. Instead it starts at an instruction provided by the caller 624 /// and copies (and prunes) only the code reachable from that instruction. 625 void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc, 626 const Instruction *StartingInst, 627 ValueToValueMapTy &VMap, 628 bool ModuleLevelChanges, 629 SmallVectorImpl<ReturnInst *> &Returns, 630 const char *NameSuffix, 631 ClonedCodeInfo *CodeInfo) { 632 assert(NameSuffix && "NameSuffix cannot be null!"); 633 634 ValueMapTypeRemapper *TypeMapper = nullptr; 635 ValueMaterializer *Materializer = nullptr; 636 637 #ifndef NDEBUG 638 // If the cloning starts at the beginning of the function, verify that 639 // the function arguments are mapped. 640 if (!StartingInst) 641 for (const Argument &II : OldFunc->args()) 642 assert(VMap.count(&II) && "No mapping from source argument specified!"); 643 #endif 644 645 PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges, 646 NameSuffix, CodeInfo); 647 const BasicBlock *StartingBB; 648 if (StartingInst) 649 StartingBB = StartingInst->getParent(); 650 else { 651 StartingBB = &OldFunc->getEntryBlock(); 652 StartingInst = &StartingBB->front(); 653 } 654 655 // Collect debug intrinsics for remapping later. 656 SmallVector<const DbgVariableIntrinsic *, 8> DbgIntrinsics; 657 for (const auto &BB : *OldFunc) { 658 for (const auto &I : BB) { 659 if (const auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I)) 660 DbgIntrinsics.push_back(DVI); 661 } 662 } 663 664 // Clone the entry block, and anything recursively reachable from it. 665 std::vector<const BasicBlock *> CloneWorklist; 666 PFC.CloneBlock(StartingBB, StartingInst->getIterator(), CloneWorklist); 667 while (!CloneWorklist.empty()) { 668 const BasicBlock *BB = CloneWorklist.back(); 669 CloneWorklist.pop_back(); 670 PFC.CloneBlock(BB, BB->begin(), CloneWorklist); 671 } 672 673 // Loop over all of the basic blocks in the old function. If the block was 674 // reachable, we have cloned it and the old block is now in the value map: 675 // insert it into the new function in the right order. If not, ignore it. 676 // 677 // Defer PHI resolution until rest of function is resolved. 678 SmallVector<const PHINode *, 16> PHIToResolve; 679 for (const BasicBlock &BI : *OldFunc) { 680 Value *V = VMap.lookup(&BI); 681 BasicBlock *NewBB = cast_or_null<BasicBlock>(V); 682 if (!NewBB) 683 continue; // Dead block. 684 685 // Move the new block to preserve the order in the original function. 686 NewBB->moveBefore(NewFunc->end()); 687 688 // Handle PHI nodes specially, as we have to remove references to dead 689 // blocks. 690 for (const PHINode &PN : BI.phis()) { 691 // PHI nodes may have been remapped to non-PHI nodes by the caller or 692 // during the cloning process. 693 if (isa<PHINode>(VMap[&PN])) 694 PHIToResolve.push_back(&PN); 695 else 696 break; 697 } 698 699 // Finally, remap the terminator instructions, as those can't be remapped 700 // until all BBs are mapped. 701 RemapInstruction(NewBB->getTerminator(), VMap, 702 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 703 TypeMapper, Materializer); 704 } 705 706 // Defer PHI resolution until rest of function is resolved, PHI resolution 707 // requires the CFG to be up-to-date. 708 for (unsigned phino = 0, e = PHIToResolve.size(); phino != e;) { 709 const PHINode *OPN = PHIToResolve[phino]; 710 unsigned NumPreds = OPN->getNumIncomingValues(); 711 const BasicBlock *OldBB = OPN->getParent(); 712 BasicBlock *NewBB = cast<BasicBlock>(VMap[OldBB]); 713 714 // Map operands for blocks that are live and remove operands for blocks 715 // that are dead. 716 for (; phino != PHIToResolve.size() && 717 PHIToResolve[phino]->getParent() == OldBB; 718 ++phino) { 719 OPN = PHIToResolve[phino]; 720 PHINode *PN = cast<PHINode>(VMap[OPN]); 721 for (unsigned pred = 0, e = NumPreds; pred != e; ++pred) { 722 Value *V = VMap.lookup(PN->getIncomingBlock(pred)); 723 if (BasicBlock *MappedBlock = cast_or_null<BasicBlock>(V)) { 724 Value *InVal = 725 MapValue(PN->getIncomingValue(pred), VMap, 726 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges); 727 assert(InVal && "Unknown input value?"); 728 PN->setIncomingValue(pred, InVal); 729 PN->setIncomingBlock(pred, MappedBlock); 730 } else { 731 PN->removeIncomingValue(pred, false); 732 --pred; // Revisit the next entry. 733 --e; 734 } 735 } 736 } 737 738 // The loop above has removed PHI entries for those blocks that are dead 739 // and has updated others. However, if a block is live (i.e. copied over) 740 // but its terminator has been changed to not go to this block, then our 741 // phi nodes will have invalid entries. Update the PHI nodes in this 742 // case. 743 PHINode *PN = cast<PHINode>(NewBB->begin()); 744 NumPreds = pred_size(NewBB); 745 if (NumPreds != PN->getNumIncomingValues()) { 746 assert(NumPreds < PN->getNumIncomingValues()); 747 // Count how many times each predecessor comes to this block. 748 std::map<BasicBlock *, unsigned> PredCount; 749 for (BasicBlock *Pred : predecessors(NewBB)) 750 --PredCount[Pred]; 751 752 // Figure out how many entries to remove from each PHI. 753 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 754 ++PredCount[PN->getIncomingBlock(i)]; 755 756 // At this point, the excess predecessor entries are positive in the 757 // map. Loop over all of the PHIs and remove excess predecessor 758 // entries. 759 BasicBlock::iterator I = NewBB->begin(); 760 for (; (PN = dyn_cast<PHINode>(I)); ++I) { 761 for (const auto &PCI : PredCount) { 762 BasicBlock *Pred = PCI.first; 763 for (unsigned NumToRemove = PCI.second; NumToRemove; --NumToRemove) 764 PN->removeIncomingValue(Pred, false); 765 } 766 } 767 } 768 769 // If the loops above have made these phi nodes have 0 or 1 operand, 770 // replace them with poison or the input value. We must do this for 771 // correctness, because 0-operand phis are not valid. 772 PN = cast<PHINode>(NewBB->begin()); 773 if (PN->getNumIncomingValues() == 0) { 774 BasicBlock::iterator I = NewBB->begin(); 775 BasicBlock::const_iterator OldI = OldBB->begin(); 776 while ((PN = dyn_cast<PHINode>(I++))) { 777 Value *NV = PoisonValue::get(PN->getType()); 778 PN->replaceAllUsesWith(NV); 779 assert(VMap[&*OldI] == PN && "VMap mismatch"); 780 VMap[&*OldI] = NV; 781 PN->eraseFromParent(); 782 ++OldI; 783 } 784 } 785 } 786 787 // Make a second pass over the PHINodes now that all of them have been 788 // remapped into the new function, simplifying the PHINode and performing any 789 // recursive simplifications exposed. This will transparently update the 790 // WeakTrackingVH in the VMap. Notably, we rely on that so that if we coalesce 791 // two PHINodes, the iteration over the old PHIs remains valid, and the 792 // mapping will just map us to the new node (which may not even be a PHI 793 // node). 794 const DataLayout &DL = NewFunc->getParent()->getDataLayout(); 795 SmallSetVector<const Value *, 8> Worklist; 796 for (unsigned Idx = 0, Size = PHIToResolve.size(); Idx != Size; ++Idx) 797 if (isa<PHINode>(VMap[PHIToResolve[Idx]])) 798 Worklist.insert(PHIToResolve[Idx]); 799 800 // Note that we must test the size on each iteration, the worklist can grow. 801 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) { 802 const Value *OrigV = Worklist[Idx]; 803 auto *I = dyn_cast_or_null<Instruction>(VMap.lookup(OrigV)); 804 if (!I) 805 continue; 806 807 // Skip over non-intrinsic callsites, we don't want to remove any nodes from 808 // the CGSCC. 809 CallBase *CB = dyn_cast<CallBase>(I); 810 if (CB && CB->getCalledFunction() && 811 !CB->getCalledFunction()->isIntrinsic()) 812 continue; 813 814 // See if this instruction simplifies. 815 Value *SimpleV = simplifyInstruction(I, DL); 816 if (!SimpleV) 817 continue; 818 819 // Stash away all the uses of the old instruction so we can check them for 820 // recursive simplifications after a RAUW. This is cheaper than checking all 821 // uses of To on the recursive step in most cases. 822 for (const User *U : OrigV->users()) 823 Worklist.insert(cast<Instruction>(U)); 824 825 // Replace the instruction with its simplified value. 826 I->replaceAllUsesWith(SimpleV); 827 828 // If the original instruction had no side effects, remove it. 829 if (isInstructionTriviallyDead(I)) 830 I->eraseFromParent(); 831 else 832 VMap[OrigV] = I; 833 } 834 835 // Remap debug intrinsic operands now that all values have been mapped. 836 // Doing this now (late) preserves use-before-defs in debug intrinsics. If 837 // we didn't do this, ValueAsMetadata(use-before-def) operands would be 838 // replaced by empty metadata. This would signal later cleanup passes to 839 // remove the debug intrinsics, potentially causing incorrect locations. 840 for (const auto *DVI : DbgIntrinsics) { 841 if (DbgVariableIntrinsic *NewDVI = 842 cast_or_null<DbgVariableIntrinsic>(VMap.lookup(DVI))) 843 RemapInstruction(NewDVI, VMap, 844 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 845 TypeMapper, Materializer); 846 } 847 848 // Simplify conditional branches and switches with a constant operand. We try 849 // to prune these out when cloning, but if the simplification required 850 // looking through PHI nodes, those are only available after forming the full 851 // basic block. That may leave some here, and we still want to prune the dead 852 // code as early as possible. 853 Function::iterator Begin = cast<BasicBlock>(VMap[StartingBB])->getIterator(); 854 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) 855 ConstantFoldTerminator(&BB); 856 857 // Some blocks may have become unreachable as a result. Find and delete them. 858 { 859 SmallPtrSet<BasicBlock *, 16> ReachableBlocks; 860 SmallVector<BasicBlock *, 16> Worklist; 861 Worklist.push_back(&*Begin); 862 while (!Worklist.empty()) { 863 BasicBlock *BB = Worklist.pop_back_val(); 864 if (ReachableBlocks.insert(BB).second) 865 append_range(Worklist, successors(BB)); 866 } 867 868 SmallVector<BasicBlock *, 16> UnreachableBlocks; 869 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) 870 if (!ReachableBlocks.contains(&BB)) 871 UnreachableBlocks.push_back(&BB); 872 DeleteDeadBlocks(UnreachableBlocks); 873 } 874 875 // Now that the inlined function body has been fully constructed, go through 876 // and zap unconditional fall-through branches. This happens all the time when 877 // specializing code: code specialization turns conditional branches into 878 // uncond branches, and this code folds them. 879 Function::iterator I = Begin; 880 while (I != NewFunc->end()) { 881 BranchInst *BI = dyn_cast<BranchInst>(I->getTerminator()); 882 if (!BI || BI->isConditional()) { 883 ++I; 884 continue; 885 } 886 887 BasicBlock *Dest = BI->getSuccessor(0); 888 if (!Dest->getSinglePredecessor()) { 889 ++I; 890 continue; 891 } 892 893 // We shouldn't be able to get single-entry PHI nodes here, as instsimplify 894 // above should have zapped all of them.. 895 assert(!isa<PHINode>(Dest->begin())); 896 897 // We know all single-entry PHI nodes in the inlined function have been 898 // removed, so we just need to splice the blocks. 899 BI->eraseFromParent(); 900 901 // Make all PHI nodes that referred to Dest now refer to I as their source. 902 Dest->replaceAllUsesWith(&*I); 903 904 // Move all the instructions in the succ to the pred. 905 I->splice(I->end(), Dest); 906 907 // Remove the dest block. 908 Dest->eraseFromParent(); 909 910 // Do not increment I, iteratively merge all things this block branches to. 911 } 912 913 // Make a final pass over the basic blocks from the old function to gather 914 // any return instructions which survived folding. We have to do this here 915 // because we can iteratively remove and merge returns above. 916 for (Function::iterator I = cast<BasicBlock>(VMap[StartingBB])->getIterator(), 917 E = NewFunc->end(); 918 I != E; ++I) 919 if (ReturnInst *RI = dyn_cast<ReturnInst>(I->getTerminator())) 920 Returns.push_back(RI); 921 } 922 923 /// This works exactly like CloneFunctionInto, 924 /// except that it does some simple constant prop and DCE on the fly. The 925 /// effect of this is to copy significantly less code in cases where (for 926 /// example) a function call with constant arguments is inlined, and those 927 /// constant arguments cause a significant amount of code in the callee to be 928 /// dead. Since this doesn't produce an exact copy of the input, it can't be 929 /// used for things like CloneFunction or CloneModule. 930 void llvm::CloneAndPruneFunctionInto( 931 Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, 932 bool ModuleLevelChanges, SmallVectorImpl<ReturnInst *> &Returns, 933 const char *NameSuffix, ClonedCodeInfo *CodeInfo) { 934 CloneAndPruneIntoFromInst(NewFunc, OldFunc, &OldFunc->front().front(), VMap, 935 ModuleLevelChanges, Returns, NameSuffix, CodeInfo); 936 } 937 938 /// Remaps instructions in \p Blocks using the mapping in \p VMap. 939 void llvm::remapInstructionsInBlocks(ArrayRef<BasicBlock *> Blocks, 940 ValueToValueMapTy &VMap) { 941 // Rewrite the code to refer to itself. 942 for (auto *BB : Blocks) 943 for (auto &Inst : *BB) 944 RemapInstruction(&Inst, VMap, 945 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 946 } 947 948 /// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p 949 /// Blocks. 950 /// 951 /// Updates LoopInfo and DominatorTree assuming the loop is dominated by block 952 /// \p LoopDomBB. Insert the new blocks before block specified in \p Before. 953 Loop *llvm::cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB, 954 Loop *OrigLoop, ValueToValueMapTy &VMap, 955 const Twine &NameSuffix, LoopInfo *LI, 956 DominatorTree *DT, 957 SmallVectorImpl<BasicBlock *> &Blocks) { 958 Function *F = OrigLoop->getHeader()->getParent(); 959 Loop *ParentLoop = OrigLoop->getParentLoop(); 960 DenseMap<Loop *, Loop *> LMap; 961 962 Loop *NewLoop = LI->AllocateLoop(); 963 LMap[OrigLoop] = NewLoop; 964 if (ParentLoop) 965 ParentLoop->addChildLoop(NewLoop); 966 else 967 LI->addTopLevelLoop(NewLoop); 968 969 BasicBlock *OrigPH = OrigLoop->getLoopPreheader(); 970 assert(OrigPH && "No preheader"); 971 BasicBlock *NewPH = CloneBasicBlock(OrigPH, VMap, NameSuffix, F); 972 // To rename the loop PHIs. 973 VMap[OrigPH] = NewPH; 974 Blocks.push_back(NewPH); 975 976 // Update LoopInfo. 977 if (ParentLoop) 978 ParentLoop->addBasicBlockToLoop(NewPH, *LI); 979 980 // Update DominatorTree. 981 DT->addNewBlock(NewPH, LoopDomBB); 982 983 for (Loop *CurLoop : OrigLoop->getLoopsInPreorder()) { 984 Loop *&NewLoop = LMap[CurLoop]; 985 if (!NewLoop) { 986 NewLoop = LI->AllocateLoop(); 987 988 // Establish the parent/child relationship. 989 Loop *OrigParent = CurLoop->getParentLoop(); 990 assert(OrigParent && "Could not find the original parent loop"); 991 Loop *NewParentLoop = LMap[OrigParent]; 992 assert(NewParentLoop && "Could not find the new parent loop"); 993 994 NewParentLoop->addChildLoop(NewLoop); 995 } 996 } 997 998 for (BasicBlock *BB : OrigLoop->getBlocks()) { 999 Loop *CurLoop = LI->getLoopFor(BB); 1000 Loop *&NewLoop = LMap[CurLoop]; 1001 assert(NewLoop && "Expecting new loop to be allocated"); 1002 1003 BasicBlock *NewBB = CloneBasicBlock(BB, VMap, NameSuffix, F); 1004 VMap[BB] = NewBB; 1005 1006 // Update LoopInfo. 1007 NewLoop->addBasicBlockToLoop(NewBB, *LI); 1008 1009 // Add DominatorTree node. After seeing all blocks, update to correct 1010 // IDom. 1011 DT->addNewBlock(NewBB, NewPH); 1012 1013 Blocks.push_back(NewBB); 1014 } 1015 1016 for (BasicBlock *BB : OrigLoop->getBlocks()) { 1017 // Update loop headers. 1018 Loop *CurLoop = LI->getLoopFor(BB); 1019 if (BB == CurLoop->getHeader()) 1020 LMap[CurLoop]->moveToHeader(cast<BasicBlock>(VMap[BB])); 1021 1022 // Update DominatorTree. 1023 BasicBlock *IDomBB = DT->getNode(BB)->getIDom()->getBlock(); 1024 DT->changeImmediateDominator(cast<BasicBlock>(VMap[BB]), 1025 cast<BasicBlock>(VMap[IDomBB])); 1026 } 1027 1028 // Move them physically from the end of the block list. 1029 F->splice(Before->getIterator(), F, NewPH->getIterator()); 1030 F->splice(Before->getIterator(), F, NewLoop->getHeader()->getIterator(), 1031 F->end()); 1032 1033 return NewLoop; 1034 } 1035 1036 /// Duplicate non-Phi instructions from the beginning of block up to 1037 /// StopAt instruction into a split block between BB and its predecessor. 1038 BasicBlock *llvm::DuplicateInstructionsInSplitBetween( 1039 BasicBlock *BB, BasicBlock *PredBB, Instruction *StopAt, 1040 ValueToValueMapTy &ValueMapping, DomTreeUpdater &DTU) { 1041 1042 assert(count(successors(PredBB), BB) == 1 && 1043 "There must be a single edge between PredBB and BB!"); 1044 // We are going to have to map operands from the original BB block to the new 1045 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to 1046 // account for entry from PredBB. 1047 BasicBlock::iterator BI = BB->begin(); 1048 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 1049 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 1050 1051 BasicBlock *NewBB = SplitEdge(PredBB, BB); 1052 NewBB->setName(PredBB->getName() + ".split"); 1053 Instruction *NewTerm = NewBB->getTerminator(); 1054 1055 // FIXME: SplitEdge does not yet take a DTU, so we include the split edge 1056 // in the update set here. 1057 DTU.applyUpdates({{DominatorTree::Delete, PredBB, BB}, 1058 {DominatorTree::Insert, PredBB, NewBB}, 1059 {DominatorTree::Insert, NewBB, BB}}); 1060 1061 // Clone the non-phi instructions of BB into NewBB, keeping track of the 1062 // mapping and using it to remap operands in the cloned instructions. 1063 // Stop once we see the terminator too. This covers the case where BB's 1064 // terminator gets replaced and StopAt == BB's terminator. 1065 for (; StopAt != &*BI && BB->getTerminator() != &*BI; ++BI) { 1066 Instruction *New = BI->clone(); 1067 New->setName(BI->getName()); 1068 New->insertBefore(NewTerm); 1069 ValueMapping[&*BI] = New; 1070 1071 // Remap operands to patch up intra-block references. 1072 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 1073 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 1074 auto I = ValueMapping.find(Inst); 1075 if (I != ValueMapping.end()) 1076 New->setOperand(i, I->second); 1077 } 1078 } 1079 1080 return NewBB; 1081 } 1082 1083 void llvm::cloneNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1084 DenseMap<MDNode *, MDNode *> &ClonedScopes, 1085 StringRef Ext, LLVMContext &Context) { 1086 MDBuilder MDB(Context); 1087 1088 for (auto *ScopeList : NoAliasDeclScopes) { 1089 for (const auto &MDOperand : ScopeList->operands()) { 1090 if (MDNode *MD = dyn_cast<MDNode>(MDOperand)) { 1091 AliasScopeNode SNANode(MD); 1092 1093 std::string Name; 1094 auto ScopeName = SNANode.getName(); 1095 if (!ScopeName.empty()) 1096 Name = (Twine(ScopeName) + ":" + Ext).str(); 1097 else 1098 Name = std::string(Ext); 1099 1100 MDNode *NewScope = MDB.createAnonymousAliasScope( 1101 const_cast<MDNode *>(SNANode.getDomain()), Name); 1102 ClonedScopes.insert(std::make_pair(MD, NewScope)); 1103 } 1104 } 1105 } 1106 } 1107 1108 void llvm::adaptNoAliasScopes(Instruction *I, 1109 const DenseMap<MDNode *, MDNode *> &ClonedScopes, 1110 LLVMContext &Context) { 1111 auto CloneScopeList = [&](const MDNode *ScopeList) -> MDNode * { 1112 bool NeedsReplacement = false; 1113 SmallVector<Metadata *, 8> NewScopeList; 1114 for (const auto &MDOp : ScopeList->operands()) { 1115 if (MDNode *MD = dyn_cast<MDNode>(MDOp)) { 1116 if (auto *NewMD = ClonedScopes.lookup(MD)) { 1117 NewScopeList.push_back(NewMD); 1118 NeedsReplacement = true; 1119 continue; 1120 } 1121 NewScopeList.push_back(MD); 1122 } 1123 } 1124 if (NeedsReplacement) 1125 return MDNode::get(Context, NewScopeList); 1126 return nullptr; 1127 }; 1128 1129 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(I)) 1130 if (auto *NewScopeList = CloneScopeList(Decl->getScopeList())) 1131 Decl->setScopeList(NewScopeList); 1132 1133 auto replaceWhenNeeded = [&](unsigned MD_ID) { 1134 if (const MDNode *CSNoAlias = I->getMetadata(MD_ID)) 1135 if (auto *NewScopeList = CloneScopeList(CSNoAlias)) 1136 I->setMetadata(MD_ID, NewScopeList); 1137 }; 1138 replaceWhenNeeded(LLVMContext::MD_noalias); 1139 replaceWhenNeeded(LLVMContext::MD_alias_scope); 1140 } 1141 1142 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1143 ArrayRef<BasicBlock *> NewBlocks, 1144 LLVMContext &Context, StringRef Ext) { 1145 if (NoAliasDeclScopes.empty()) 1146 return; 1147 1148 DenseMap<MDNode *, MDNode *> ClonedScopes; 1149 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning " 1150 << NoAliasDeclScopes.size() << " node(s)\n"); 1151 1152 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context); 1153 // Identify instructions using metadata that needs adaptation 1154 for (BasicBlock *NewBlock : NewBlocks) 1155 for (Instruction &I : *NewBlock) 1156 adaptNoAliasScopes(&I, ClonedScopes, Context); 1157 } 1158 1159 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1160 Instruction *IStart, Instruction *IEnd, 1161 LLVMContext &Context, StringRef Ext) { 1162 if (NoAliasDeclScopes.empty()) 1163 return; 1164 1165 DenseMap<MDNode *, MDNode *> ClonedScopes; 1166 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning " 1167 << NoAliasDeclScopes.size() << " node(s)\n"); 1168 1169 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context); 1170 // Identify instructions using metadata that needs adaptation 1171 assert(IStart->getParent() == IEnd->getParent() && "different basic block ?"); 1172 auto ItStart = IStart->getIterator(); 1173 auto ItEnd = IEnd->getIterator(); 1174 ++ItEnd; // IEnd is included, increment ItEnd to get the end of the range 1175 for (auto &I : llvm::make_range(ItStart, ItEnd)) 1176 adaptNoAliasScopes(&I, ClonedScopes, Context); 1177 } 1178 1179 void llvm::identifyNoAliasScopesToClone( 1180 ArrayRef<BasicBlock *> BBs, SmallVectorImpl<MDNode *> &NoAliasDeclScopes) { 1181 for (BasicBlock *BB : BBs) 1182 for (Instruction &I : *BB) 1183 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1184 NoAliasDeclScopes.push_back(Decl->getScopeList()); 1185 } 1186 1187 void llvm::identifyNoAliasScopesToClone( 1188 BasicBlock::iterator Start, BasicBlock::iterator End, 1189 SmallVectorImpl<MDNode *> &NoAliasDeclScopes) { 1190 for (Instruction &I : make_range(Start, End)) 1191 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1192 NoAliasDeclScopes.push_back(Decl->getScopeList()); 1193 } 1194