1 //===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Rewrite call/invoke instructions so as to make potential relocations 10 // performed by the garbage collector explicit in the IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h" 15 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SetVector.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/DomTreeUpdater.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/TargetTransformInfo.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/BasicBlock.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/Constant.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DerivedTypes.h" 39 #include "llvm/IR/Dominators.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/IRBuilder.h" 42 #include "llvm/IR/InstIterator.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/LLVMContext.h" 49 #include "llvm/IR/MDBuilder.h" 50 #include "llvm/IR/Metadata.h" 51 #include "llvm/IR/Module.h" 52 #include "llvm/IR/Statepoint.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/IR/ValueHandle.h" 57 #include "llvm/InitializePasses.h" 58 #include "llvm/Pass.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Compiler.h" 62 #include "llvm/Support/Debug.h" 63 #include "llvm/Support/ErrorHandling.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Transforms/Scalar.h" 66 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 69 #include <algorithm> 70 #include <cassert> 71 #include <cstddef> 72 #include <cstdint> 73 #include <iterator> 74 #include <set> 75 #include <string> 76 #include <utility> 77 #include <vector> 78 79 #define DEBUG_TYPE "rewrite-statepoints-for-gc" 80 81 using namespace llvm; 82 83 // Print the liveset found at the insert location 84 static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden, 85 cl::init(false)); 86 static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, 87 cl::init(false)); 88 89 // Print out the base pointers for debugging 90 static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden, 91 cl::init(false)); 92 93 // Cost threshold measuring when it is profitable to rematerialize value instead 94 // of relocating it 95 static cl::opt<unsigned> 96 RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden, 97 cl::init(6)); 98 99 #ifdef EXPENSIVE_CHECKS 100 static bool ClobberNonLive = true; 101 #else 102 static bool ClobberNonLive = false; 103 #endif 104 105 static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live", 106 cl::location(ClobberNonLive), 107 cl::Hidden); 108 109 static cl::opt<bool> 110 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info", 111 cl::Hidden, cl::init(true)); 112 113 /// The IR fed into RewriteStatepointsForGC may have had attributes and 114 /// metadata implying dereferenceability that are no longer valid/correct after 115 /// RewriteStatepointsForGC has run. This is because semantically, after 116 /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire 117 /// heap. stripNonValidData (conservatively) restores 118 /// correctness by erasing all attributes in the module that externally imply 119 /// dereferenceability. Similar reasoning also applies to the noalias 120 /// attributes and metadata. gc.statepoint can touch the entire heap including 121 /// noalias objects. 122 /// Apart from attributes and metadata, we also remove instructions that imply 123 /// constant physical memory: llvm.invariant.start. 124 static void stripNonValidData(Module &M); 125 126 static bool shouldRewriteStatepointsIn(Function &F); 127 128 PreservedAnalyses RewriteStatepointsForGC::run(Module &M, 129 ModuleAnalysisManager &AM) { 130 bool Changed = false; 131 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 132 for (Function &F : M) { 133 // Nothing to do for declarations. 134 if (F.isDeclaration() || F.empty()) 135 continue; 136 137 // Policy choice says not to rewrite - the most common reason is that we're 138 // compiling code without a GCStrategy. 139 if (!shouldRewriteStatepointsIn(F)) 140 continue; 141 142 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F); 143 auto &TTI = FAM.getResult<TargetIRAnalysis>(F); 144 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F); 145 Changed |= runOnFunction(F, DT, TTI, TLI); 146 } 147 if (!Changed) 148 return PreservedAnalyses::all(); 149 150 // stripNonValidData asserts that shouldRewriteStatepointsIn 151 // returns true for at least one function in the module. Since at least 152 // one function changed, we know that the precondition is satisfied. 153 stripNonValidData(M); 154 155 PreservedAnalyses PA; 156 PA.preserve<TargetIRAnalysis>(); 157 PA.preserve<TargetLibraryAnalysis>(); 158 return PA; 159 } 160 161 namespace { 162 163 class RewriteStatepointsForGCLegacyPass : public ModulePass { 164 RewriteStatepointsForGC Impl; 165 166 public: 167 static char ID; // Pass identification, replacement for typeid 168 169 RewriteStatepointsForGCLegacyPass() : ModulePass(ID), Impl() { 170 initializeRewriteStatepointsForGCLegacyPassPass( 171 *PassRegistry::getPassRegistry()); 172 } 173 174 bool runOnModule(Module &M) override { 175 bool Changed = false; 176 for (Function &F : M) { 177 // Nothing to do for declarations. 178 if (F.isDeclaration() || F.empty()) 179 continue; 180 181 // Policy choice says not to rewrite - the most common reason is that 182 // we're compiling code without a GCStrategy. 183 if (!shouldRewriteStatepointsIn(F)) 184 continue; 185 186 TargetTransformInfo &TTI = 187 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 188 const TargetLibraryInfo &TLI = 189 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 190 auto &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 191 192 Changed |= Impl.runOnFunction(F, DT, TTI, TLI); 193 } 194 195 if (!Changed) 196 return false; 197 198 // stripNonValidData asserts that shouldRewriteStatepointsIn 199 // returns true for at least one function in the module. Since at least 200 // one function changed, we know that the precondition is satisfied. 201 stripNonValidData(M); 202 return true; 203 } 204 205 void getAnalysisUsage(AnalysisUsage &AU) const override { 206 // We add and rewrite a bunch of instructions, but don't really do much 207 // else. We could in theory preserve a lot more analyses here. 208 AU.addRequired<DominatorTreeWrapperPass>(); 209 AU.addRequired<TargetTransformInfoWrapperPass>(); 210 AU.addRequired<TargetLibraryInfoWrapperPass>(); 211 } 212 }; 213 214 } // end anonymous namespace 215 216 char RewriteStatepointsForGCLegacyPass::ID = 0; 217 218 ModulePass *llvm::createRewriteStatepointsForGCLegacyPass() { 219 return new RewriteStatepointsForGCLegacyPass(); 220 } 221 222 INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass, 223 "rewrite-statepoints-for-gc", 224 "Make relocations explicit at statepoints", false, false) 225 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 226 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 227 INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass, 228 "rewrite-statepoints-for-gc", 229 "Make relocations explicit at statepoints", false, false) 230 231 namespace { 232 233 struct GCPtrLivenessData { 234 /// Values defined in this block. 235 MapVector<BasicBlock *, SetVector<Value *>> KillSet; 236 237 /// Values used in this block (and thus live); does not included values 238 /// killed within this block. 239 MapVector<BasicBlock *, SetVector<Value *>> LiveSet; 240 241 /// Values live into this basic block (i.e. used by any 242 /// instruction in this basic block or ones reachable from here) 243 MapVector<BasicBlock *, SetVector<Value *>> LiveIn; 244 245 /// Values live out of this basic block (i.e. live into 246 /// any successor block) 247 MapVector<BasicBlock *, SetVector<Value *>> LiveOut; 248 }; 249 250 // The type of the internal cache used inside the findBasePointers family 251 // of functions. From the callers perspective, this is an opaque type and 252 // should not be inspected. 253 // 254 // In the actual implementation this caches two relations: 255 // - The base relation itself (i.e. this pointer is based on that one) 256 // - The base defining value relation (i.e. before base_phi insertion) 257 // Generally, after the execution of a full findBasePointer call, only the 258 // base relation will remain. Internally, we add a mixture of the two 259 // types, then update all the second type to the first type 260 using DefiningValueMapTy = MapVector<Value *, Value *>; 261 using StatepointLiveSetTy = SetVector<Value *>; 262 using RematerializedValueMapTy = 263 MapVector<AssertingVH<Instruction>, AssertingVH<Value>>; 264 265 struct PartiallyConstructedSafepointRecord { 266 /// The set of values known to be live across this safepoint 267 StatepointLiveSetTy LiveSet; 268 269 /// Mapping from live pointers to a base-defining-value 270 MapVector<Value *, Value *> PointerToBase; 271 272 /// The *new* gc.statepoint instruction itself. This produces the token 273 /// that normal path gc.relocates and the gc.result are tied to. 274 GCStatepointInst *StatepointToken; 275 276 /// Instruction to which exceptional gc relocates are attached 277 /// Makes it easier to iterate through them during relocationViaAlloca. 278 Instruction *UnwindToken; 279 280 /// Record live values we are rematerialized instead of relocating. 281 /// They are not included into 'LiveSet' field. 282 /// Maps rematerialized copy to it's original value. 283 RematerializedValueMapTy RematerializedValues; 284 }; 285 286 } // end anonymous namespace 287 288 static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) { 289 Optional<OperandBundleUse> DeoptBundle = 290 Call->getOperandBundle(LLVMContext::OB_deopt); 291 292 if (!DeoptBundle.hasValue()) { 293 assert(AllowStatepointWithNoDeoptInfo && 294 "Found non-leaf call without deopt info!"); 295 return None; 296 } 297 298 return DeoptBundle.getValue().Inputs; 299 } 300 301 /// Compute the live-in set for every basic block in the function 302 static void computeLiveInValues(DominatorTree &DT, Function &F, 303 GCPtrLivenessData &Data); 304 305 /// Given results from the dataflow liveness computation, find the set of live 306 /// Values at a particular instruction. 307 static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, 308 StatepointLiveSetTy &out); 309 310 // TODO: Once we can get to the GCStrategy, this becomes 311 // Optional<bool> isGCManagedPointer(const Type *Ty) const override { 312 313 static bool isGCPointerType(Type *T) { 314 if (auto *PT = dyn_cast<PointerType>(T)) 315 // For the sake of this example GC, we arbitrarily pick addrspace(1) as our 316 // GC managed heap. We know that a pointer into this heap needs to be 317 // updated and that no other pointer does. 318 return PT->getAddressSpace() == 1; 319 return false; 320 } 321 322 // Return true if this type is one which a) is a gc pointer or contains a GC 323 // pointer and b) is of a type this code expects to encounter as a live value. 324 // (The insertion code will assert that a type which matches (a) and not (b) 325 // is not encountered.) 326 static bool isHandledGCPointerType(Type *T) { 327 // We fully support gc pointers 328 if (isGCPointerType(T)) 329 return true; 330 // We partially support vectors of gc pointers. The code will assert if it 331 // can't handle something. 332 if (auto VT = dyn_cast<VectorType>(T)) 333 if (isGCPointerType(VT->getElementType())) 334 return true; 335 return false; 336 } 337 338 #ifndef NDEBUG 339 /// Returns true if this type contains a gc pointer whether we know how to 340 /// handle that type or not. 341 static bool containsGCPtrType(Type *Ty) { 342 if (isGCPointerType(Ty)) 343 return true; 344 if (VectorType *VT = dyn_cast<VectorType>(Ty)) 345 return isGCPointerType(VT->getScalarType()); 346 if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) 347 return containsGCPtrType(AT->getElementType()); 348 if (StructType *ST = dyn_cast<StructType>(Ty)) 349 return llvm::any_of(ST->elements(), containsGCPtrType); 350 return false; 351 } 352 353 // Returns true if this is a type which a) is a gc pointer or contains a GC 354 // pointer and b) is of a type which the code doesn't expect (i.e. first class 355 // aggregates). Used to trip assertions. 356 static bool isUnhandledGCPointerType(Type *Ty) { 357 return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty); 358 } 359 #endif 360 361 // Return the name of the value suffixed with the provided value, or if the 362 // value didn't have a name, the default value specified. 363 static std::string suffixed_name_or(Value *V, StringRef Suffix, 364 StringRef DefaultName) { 365 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str(); 366 } 367 368 // Conservatively identifies any definitions which might be live at the 369 // given instruction. The analysis is performed immediately before the 370 // given instruction. Values defined by that instruction are not considered 371 // live. Values used by that instruction are considered live. 372 static void analyzeParsePointLiveness( 373 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call, 374 PartiallyConstructedSafepointRecord &Result) { 375 StatepointLiveSetTy LiveSet; 376 findLiveSetAtInst(Call, OriginalLivenessData, LiveSet); 377 378 if (PrintLiveSet) { 379 dbgs() << "Live Variables:\n"; 380 for (Value *V : LiveSet) 381 dbgs() << " " << V->getName() << " " << *V << "\n"; 382 } 383 if (PrintLiveSetSize) { 384 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n"; 385 dbgs() << "Number live values: " << LiveSet.size() << "\n"; 386 } 387 Result.LiveSet = LiveSet; 388 } 389 390 // Returns true is V is a knownBaseResult. 391 static bool isKnownBaseResult(Value *V); 392 393 // Returns true if V is a BaseResult that already exists in the IR, i.e. it is 394 // not created by the findBasePointers algorithm. 395 static bool isOriginalBaseResult(Value *V); 396 397 namespace { 398 399 /// A single base defining value - An immediate base defining value for an 400 /// instruction 'Def' is an input to 'Def' whose base is also a base of 'Def'. 401 /// For instructions which have multiple pointer [vector] inputs or that 402 /// transition between vector and scalar types, there is no immediate base 403 /// defining value. The 'base defining value' for 'Def' is the transitive 404 /// closure of this relation stopping at the first instruction which has no 405 /// immediate base defining value. The b.d.v. might itself be a base pointer, 406 /// but it can also be an arbitrary derived pointer. 407 struct BaseDefiningValueResult { 408 /// Contains the value which is the base defining value. 409 Value * const BDV; 410 411 /// True if the base defining value is also known to be an actual base 412 /// pointer. 413 const bool IsKnownBase; 414 415 BaseDefiningValueResult(Value *BDV, bool IsKnownBase) 416 : BDV(BDV), IsKnownBase(IsKnownBase) { 417 #ifndef NDEBUG 418 // Check consistency between new and old means of checking whether a BDV is 419 // a base. 420 bool MustBeBase = isKnownBaseResult(BDV); 421 assert(!MustBeBase || MustBeBase == IsKnownBase); 422 #endif 423 } 424 }; 425 426 } // end anonymous namespace 427 428 static BaseDefiningValueResult findBaseDefiningValue(Value *I); 429 430 /// Return a base defining value for the 'Index' element of the given vector 431 /// instruction 'I'. If Index is null, returns a BDV for the entire vector 432 /// 'I'. As an optimization, this method will try to determine when the 433 /// element is known to already be a base pointer. If this can be established, 434 /// the second value in the returned pair will be true. Note that either a 435 /// vector or a pointer typed value can be returned. For the former, the 436 /// vector returned is a BDV (and possibly a base) of the entire vector 'I'. 437 /// If the later, the return pointer is a BDV (or possibly a base) for the 438 /// particular element in 'I'. 439 static BaseDefiningValueResult 440 findBaseDefiningValueOfVector(Value *I) { 441 // Each case parallels findBaseDefiningValue below, see that code for 442 // detailed motivation. 443 444 if (isa<Argument>(I)) 445 // An incoming argument to the function is a base pointer 446 return BaseDefiningValueResult(I, true); 447 448 if (isa<Constant>(I)) 449 // Base of constant vector consists only of constant null pointers. 450 // For reasoning see similar case inside 'findBaseDefiningValue' function. 451 return BaseDefiningValueResult(ConstantAggregateZero::get(I->getType()), 452 true); 453 454 if (isa<LoadInst>(I)) 455 return BaseDefiningValueResult(I, true); 456 457 if (isa<InsertElementInst>(I)) 458 // We don't know whether this vector contains entirely base pointers or 459 // not. To be conservatively correct, we treat it as a BDV and will 460 // duplicate code as needed to construct a parallel vector of bases. 461 return BaseDefiningValueResult(I, false); 462 463 if (isa<ShuffleVectorInst>(I)) 464 // We don't know whether this vector contains entirely base pointers or 465 // not. To be conservatively correct, we treat it as a BDV and will 466 // duplicate code as needed to construct a parallel vector of bases. 467 // TODO: There a number of local optimizations which could be applied here 468 // for particular sufflevector patterns. 469 return BaseDefiningValueResult(I, false); 470 471 // The behavior of getelementptr instructions is the same for vector and 472 // non-vector data types. 473 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) 474 return findBaseDefiningValue(GEP->getPointerOperand()); 475 476 // If the pointer comes through a bitcast of a vector of pointers to 477 // a vector of another type of pointer, then look through the bitcast 478 if (auto *BC = dyn_cast<BitCastInst>(I)) 479 return findBaseDefiningValue(BC->getOperand(0)); 480 481 // We assume that functions in the source language only return base 482 // pointers. This should probably be generalized via attributes to support 483 // both source language and internal functions. 484 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 485 return BaseDefiningValueResult(I, true); 486 487 // A PHI or Select is a base defining value. The outer findBasePointer 488 // algorithm is responsible for constructing a base value for this BDV. 489 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 490 "unknown vector instruction - no base found for vector element"); 491 return BaseDefiningValueResult(I, false); 492 } 493 494 /// Helper function for findBasePointer - Will return a value which either a) 495 /// defines the base pointer for the input, b) blocks the simple search 496 /// (i.e. a PHI or Select of two derived pointers), or c) involves a change 497 /// from pointer to vector type or back. 498 static BaseDefiningValueResult findBaseDefiningValue(Value *I) { 499 assert(I->getType()->isPtrOrPtrVectorTy() && 500 "Illegal to ask for the base pointer of a non-pointer type"); 501 502 if (I->getType()->isVectorTy()) 503 return findBaseDefiningValueOfVector(I); 504 505 if (isa<Argument>(I)) 506 // An incoming argument to the function is a base pointer 507 // We should have never reached here if this argument isn't an gc value 508 return BaseDefiningValueResult(I, true); 509 510 if (isa<Constant>(I)) { 511 // We assume that objects with a constant base (e.g. a global) can't move 512 // and don't need to be reported to the collector because they are always 513 // live. Besides global references, all kinds of constants (e.g. undef, 514 // constant expressions, null pointers) can be introduced by the inliner or 515 // the optimizer, especially on dynamically dead paths. 516 // Here we treat all of them as having single null base. By doing this we 517 // trying to avoid problems reporting various conflicts in a form of 518 // "phi (const1, const2)" or "phi (const, regular gc ptr)". 519 // See constant.ll file for relevant test cases. 520 521 return BaseDefiningValueResult( 522 ConstantPointerNull::get(cast<PointerType>(I->getType())), true); 523 } 524 525 if (CastInst *CI = dyn_cast<CastInst>(I)) { 526 Value *Def = CI->stripPointerCasts(); 527 // If stripping pointer casts changes the address space there is an 528 // addrspacecast in between. 529 assert(cast<PointerType>(Def->getType())->getAddressSpace() == 530 cast<PointerType>(CI->getType())->getAddressSpace() && 531 "unsupported addrspacecast"); 532 // If we find a cast instruction here, it means we've found a cast which is 533 // not simply a pointer cast (i.e. an inttoptr). We don't know how to 534 // handle int->ptr conversion. 535 assert(!isa<CastInst>(Def) && "shouldn't find another cast here"); 536 return findBaseDefiningValue(Def); 537 } 538 539 if (isa<LoadInst>(I)) 540 // The value loaded is an gc base itself 541 return BaseDefiningValueResult(I, true); 542 543 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) 544 // The base of this GEP is the base 545 return findBaseDefiningValue(GEP->getPointerOperand()); 546 547 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 548 switch (II->getIntrinsicID()) { 549 default: 550 // fall through to general call handling 551 break; 552 case Intrinsic::experimental_gc_statepoint: 553 llvm_unreachable("statepoints don't produce pointers"); 554 case Intrinsic::experimental_gc_relocate: 555 // Rerunning safepoint insertion after safepoints are already 556 // inserted is not supported. It could probably be made to work, 557 // but why are you doing this? There's no good reason. 558 llvm_unreachable("repeat safepoint insertion is not supported"); 559 case Intrinsic::gcroot: 560 // Currently, this mechanism hasn't been extended to work with gcroot. 561 // There's no reason it couldn't be, but I haven't thought about the 562 // implications much. 563 llvm_unreachable( 564 "interaction with the gcroot mechanism is not supported"); 565 } 566 } 567 // We assume that functions in the source language only return base 568 // pointers. This should probably be generalized via attributes to support 569 // both source language and internal functions. 570 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 571 return BaseDefiningValueResult(I, true); 572 573 // TODO: I have absolutely no idea how to implement this part yet. It's not 574 // necessarily hard, I just haven't really looked at it yet. 575 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented"); 576 577 if (isa<AtomicCmpXchgInst>(I)) 578 // A CAS is effectively a atomic store and load combined under a 579 // predicate. From the perspective of base pointers, we just treat it 580 // like a load. 581 return BaseDefiningValueResult(I, true); 582 583 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are " 584 "binary ops which don't apply to pointers"); 585 586 // The aggregate ops. Aggregates can either be in the heap or on the 587 // stack, but in either case, this is simply a field load. As a result, 588 // this is a defining definition of the base just like a load is. 589 if (isa<ExtractValueInst>(I)) 590 return BaseDefiningValueResult(I, true); 591 592 // We should never see an insert vector since that would require we be 593 // tracing back a struct value not a pointer value. 594 assert(!isa<InsertValueInst>(I) && 595 "Base pointer for a struct is meaningless"); 596 597 // An extractelement produces a base result exactly when it's input does. 598 // We may need to insert a parallel instruction to extract the appropriate 599 // element out of the base vector corresponding to the input. Given this, 600 // it's analogous to the phi and select case even though it's not a merge. 601 if (isa<ExtractElementInst>(I)) 602 // Note: There a lot of obvious peephole cases here. This are deliberately 603 // handled after the main base pointer inference algorithm to make writing 604 // test cases to exercise that code easier. 605 return BaseDefiningValueResult(I, false); 606 607 // The last two cases here don't return a base pointer. Instead, they 608 // return a value which dynamically selects from among several base 609 // derived pointers (each with it's own base potentially). It's the job of 610 // the caller to resolve these. 611 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 612 "missing instruction case in findBaseDefiningValing"); 613 return BaseDefiningValueResult(I, false); 614 } 615 616 /// Returns the base defining value for this value. 617 static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) { 618 Value *&Cached = Cache[I]; 619 if (!Cached) { 620 Cached = findBaseDefiningValue(I).BDV; 621 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> " 622 << Cached->getName() << "\n"); 623 } 624 assert(Cache[I] != nullptr); 625 return Cached; 626 } 627 628 /// Return a base pointer for this value if known. Otherwise, return it's 629 /// base defining value. 630 static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) { 631 Value *Def = findBaseDefiningValueCached(I, Cache); 632 auto Found = Cache.find(Def); 633 if (Found != Cache.end()) { 634 // Either a base-of relation, or a self reference. Caller must check. 635 return Found->second; 636 } 637 // Only a BDV available 638 return Def; 639 } 640 641 /// This value is a base pointer that is not generated by RS4GC, i.e. it already 642 /// exists in the code. 643 static bool isOriginalBaseResult(Value *V) { 644 // no recursion possible 645 return !isa<PHINode>(V) && !isa<SelectInst>(V) && 646 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) && 647 !isa<ShuffleVectorInst>(V); 648 } 649 650 /// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV, 651 /// is it known to be a base pointer? Or do we need to continue searching. 652 static bool isKnownBaseResult(Value *V) { 653 if (isOriginalBaseResult(V)) 654 return true; 655 if (isa<Instruction>(V) && 656 cast<Instruction>(V)->getMetadata("is_base_value")) { 657 // This is a previously inserted base phi or select. We know 658 // that this is a base value. 659 return true; 660 } 661 662 // We need to keep searching 663 return false; 664 } 665 666 // Returns true if First and Second values are both scalar or both vector. 667 static bool areBothVectorOrScalar(Value *First, Value *Second) { 668 return isa<VectorType>(First->getType()) == 669 isa<VectorType>(Second->getType()); 670 } 671 672 namespace { 673 674 /// Models the state of a single base defining value in the findBasePointer 675 /// algorithm for determining where a new instruction is needed to propagate 676 /// the base of this BDV. 677 class BDVState { 678 public: 679 enum Status { Unknown, Base, Conflict }; 680 681 BDVState() : BaseValue(nullptr) {} 682 683 explicit BDVState(Status Status, Value *BaseValue = nullptr) 684 : Status(Status), BaseValue(BaseValue) { 685 assert(Status != Base || BaseValue); 686 } 687 688 explicit BDVState(Value *BaseValue) : Status(Base), BaseValue(BaseValue) {} 689 690 Status getStatus() const { return Status; } 691 Value *getBaseValue() const { return BaseValue; } 692 693 bool isBase() const { return getStatus() == Base; } 694 bool isUnknown() const { return getStatus() == Unknown; } 695 bool isConflict() const { return getStatus() == Conflict; } 696 697 bool operator==(const BDVState &Other) const { 698 return BaseValue == Other.BaseValue && Status == Other.Status; 699 } 700 701 bool operator!=(const BDVState &other) const { return !(*this == other); } 702 703 LLVM_DUMP_METHOD 704 void dump() const { 705 print(dbgs()); 706 dbgs() << '\n'; 707 } 708 709 void print(raw_ostream &OS) const { 710 switch (getStatus()) { 711 case Unknown: 712 OS << "U"; 713 break; 714 case Base: 715 OS << "B"; 716 break; 717 case Conflict: 718 OS << "C"; 719 break; 720 } 721 OS << " (" << getBaseValue() << " - " 722 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << "): "; 723 } 724 725 private: 726 Status Status = Unknown; 727 AssertingVH<Value> BaseValue; // Non-null only if Status == Base. 728 }; 729 730 } // end anonymous namespace 731 732 #ifndef NDEBUG 733 static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) { 734 State.print(OS); 735 return OS; 736 } 737 #endif 738 739 static BDVState meetBDVStateImpl(const BDVState &LHS, const BDVState &RHS) { 740 switch (LHS.getStatus()) { 741 case BDVState::Unknown: 742 return RHS; 743 744 case BDVState::Base: 745 assert(LHS.getBaseValue() && "can't be null"); 746 if (RHS.isUnknown()) 747 return LHS; 748 749 if (RHS.isBase()) { 750 if (LHS.getBaseValue() == RHS.getBaseValue()) { 751 assert(LHS == RHS && "equality broken!"); 752 return LHS; 753 } 754 return BDVState(BDVState::Conflict); 755 } 756 assert(RHS.isConflict() && "only three states!"); 757 return BDVState(BDVState::Conflict); 758 759 case BDVState::Conflict: 760 return LHS; 761 } 762 llvm_unreachable("only three states!"); 763 } 764 765 // Values of type BDVState form a lattice, and this function implements the meet 766 // operation. 767 static BDVState meetBDVState(const BDVState &LHS, const BDVState &RHS) { 768 BDVState Result = meetBDVStateImpl(LHS, RHS); 769 assert(Result == meetBDVStateImpl(RHS, LHS) && 770 "Math is wrong: meet does not commute!"); 771 return Result; 772 } 773 774 /// For a given value or instruction, figure out what base ptr its derived from. 775 /// For gc objects, this is simply itself. On success, returns a value which is 776 /// the base pointer. (This is reliable and can be used for relocation.) On 777 /// failure, returns nullptr. 778 static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) { 779 Value *Def = findBaseOrBDV(I, Cache); 780 781 if (isKnownBaseResult(Def) && areBothVectorOrScalar(Def, I)) 782 return Def; 783 784 // Here's the rough algorithm: 785 // - For every SSA value, construct a mapping to either an actual base 786 // pointer or a PHI which obscures the base pointer. 787 // - Construct a mapping from PHI to unknown TOP state. Use an 788 // optimistic algorithm to propagate base pointer information. Lattice 789 // looks like: 790 // UNKNOWN 791 // b1 b2 b3 b4 792 // CONFLICT 793 // When algorithm terminates, all PHIs will either have a single concrete 794 // base or be in a conflict state. 795 // - For every conflict, insert a dummy PHI node without arguments. Add 796 // these to the base[Instruction] = BasePtr mapping. For every 797 // non-conflict, add the actual base. 798 // - For every conflict, add arguments for the base[a] of each input 799 // arguments. 800 // 801 // Note: A simpler form of this would be to add the conflict form of all 802 // PHIs without running the optimistic algorithm. This would be 803 // analogous to pessimistic data flow and would likely lead to an 804 // overall worse solution. 805 806 #ifndef NDEBUG 807 auto isExpectedBDVType = [](Value *BDV) { 808 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) || 809 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) || 810 isa<ShuffleVectorInst>(BDV); 811 }; 812 #endif 813 814 // Once populated, will contain a mapping from each potentially non-base BDV 815 // to a lattice value (described above) which corresponds to that BDV. 816 // We use the order of insertion (DFS over the def/use graph) to provide a 817 // stable deterministic ordering for visiting DenseMaps (which are unordered) 818 // below. This is important for deterministic compilation. 819 MapVector<Value *, BDVState> States; 820 821 // Recursively fill in all base defining values reachable from the initial 822 // one for which we don't already know a definite base value for 823 /* scope */ { 824 SmallVector<Value*, 16> Worklist; 825 Worklist.push_back(Def); 826 States.insert({Def, BDVState()}); 827 while (!Worklist.empty()) { 828 Value *Current = Worklist.pop_back_val(); 829 assert(!isOriginalBaseResult(Current) && "why did it get added?"); 830 831 auto visitIncomingValue = [&](Value *InVal) { 832 Value *Base = findBaseOrBDV(InVal, Cache); 833 if (isKnownBaseResult(Base) && areBothVectorOrScalar(Base, InVal)) 834 // Known bases won't need new instructions introduced and can be 835 // ignored safely. However, this can only be done when InVal and Base 836 // are both scalar or both vector. Otherwise, we need to find a 837 // correct BDV for InVal, by creating an entry in the lattice 838 // (States). 839 return; 840 assert(isExpectedBDVType(Base) && "the only non-base values " 841 "we see should be base defining values"); 842 if (States.insert(std::make_pair(Base, BDVState())).second) 843 Worklist.push_back(Base); 844 }; 845 if (PHINode *PN = dyn_cast<PHINode>(Current)) { 846 for (Value *InVal : PN->incoming_values()) 847 visitIncomingValue(InVal); 848 } else if (SelectInst *SI = dyn_cast<SelectInst>(Current)) { 849 visitIncomingValue(SI->getTrueValue()); 850 visitIncomingValue(SI->getFalseValue()); 851 } else if (auto *EE = dyn_cast<ExtractElementInst>(Current)) { 852 visitIncomingValue(EE->getVectorOperand()); 853 } else if (auto *IE = dyn_cast<InsertElementInst>(Current)) { 854 visitIncomingValue(IE->getOperand(0)); // vector operand 855 visitIncomingValue(IE->getOperand(1)); // scalar operand 856 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(Current)) { 857 visitIncomingValue(SV->getOperand(0)); 858 visitIncomingValue(SV->getOperand(1)); 859 } 860 else { 861 llvm_unreachable("Unimplemented instruction case"); 862 } 863 } 864 } 865 866 #ifndef NDEBUG 867 LLVM_DEBUG(dbgs() << "States after initialization:\n"); 868 for (auto Pair : States) { 869 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 870 } 871 #endif 872 873 // Return a phi state for a base defining value. We'll generate a new 874 // base state for known bases and expect to find a cached state otherwise. 875 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) { 876 if (isKnownBaseResult(BaseValue) && areBothVectorOrScalar(BaseValue, Input)) 877 return BDVState(BaseValue); 878 auto I = States.find(BaseValue); 879 assert(I != States.end() && "lookup failed!"); 880 return I->second; 881 }; 882 883 bool Progress = true; 884 while (Progress) { 885 #ifndef NDEBUG 886 const size_t OldSize = States.size(); 887 #endif 888 Progress = false; 889 // We're only changing values in this loop, thus safe to keep iterators. 890 // Since this is computing a fixed point, the order of visit does not 891 // effect the result. TODO: We could use a worklist here and make this run 892 // much faster. 893 for (auto Pair : States) { 894 Value *BDV = Pair.first; 895 // Only values that do not have known bases or those that have differing 896 // type (scalar versus vector) from a possible known base should be in the 897 // lattice. 898 assert((!isKnownBaseResult(BDV) || 899 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) && 900 "why did it get added?"); 901 902 // Given an input value for the current instruction, return a BDVState 903 // instance which represents the BDV of that value. 904 auto getStateForInput = [&](Value *V) mutable { 905 Value *BDV = findBaseOrBDV(V, Cache); 906 return GetStateForBDV(BDV, V); 907 }; 908 909 BDVState NewState; 910 if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) { 911 NewState = meetBDVState(NewState, getStateForInput(SI->getTrueValue())); 912 NewState = 913 meetBDVState(NewState, getStateForInput(SI->getFalseValue())); 914 } else if (PHINode *PN = dyn_cast<PHINode>(BDV)) { 915 for (Value *Val : PN->incoming_values()) 916 NewState = meetBDVState(NewState, getStateForInput(Val)); 917 } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) { 918 // The 'meet' for an extractelement is slightly trivial, but it's still 919 // useful in that it drives us to conflict if our input is. 920 NewState = 921 meetBDVState(NewState, getStateForInput(EE->getVectorOperand())); 922 } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)){ 923 // Given there's a inherent type mismatch between the operands, will 924 // *always* produce Conflict. 925 NewState = meetBDVState(NewState, getStateForInput(IE->getOperand(0))); 926 NewState = meetBDVState(NewState, getStateForInput(IE->getOperand(1))); 927 } else { 928 // The only instance this does not return a Conflict is when both the 929 // vector operands are the same vector. 930 auto *SV = cast<ShuffleVectorInst>(BDV); 931 NewState = meetBDVState(NewState, getStateForInput(SV->getOperand(0))); 932 NewState = meetBDVState(NewState, getStateForInput(SV->getOperand(1))); 933 } 934 935 BDVState OldState = States[BDV]; 936 if (OldState != NewState) { 937 Progress = true; 938 States[BDV] = NewState; 939 } 940 } 941 942 assert(OldSize == States.size() && 943 "fixed point shouldn't be adding any new nodes to state"); 944 } 945 946 #ifndef NDEBUG 947 LLVM_DEBUG(dbgs() << "States after meet iteration:\n"); 948 for (auto Pair : States) { 949 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 950 } 951 #endif 952 953 // Handle all instructions that have a vector BDV, but the instruction itself 954 // is of scalar type. 955 for (auto Pair : States) { 956 Instruction *I = cast<Instruction>(Pair.first); 957 BDVState State = Pair.second; 958 auto *BaseValue = State.getBaseValue(); 959 // Only values that do not have known bases or those that have differing 960 // type (scalar versus vector) from a possible known base should be in the 961 // lattice. 962 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, BaseValue)) && 963 "why did it get added?"); 964 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 965 966 if (!State.isBase() || !isa<VectorType>(BaseValue->getType())) 967 continue; 968 // extractelement instructions are a bit special in that we may need to 969 // insert an extract even when we know an exact base for the instruction. 970 // The problem is that we need to convert from a vector base to a scalar 971 // base for the particular indice we're interested in. 972 if (isa<ExtractElementInst>(I)) { 973 auto *EE = cast<ExtractElementInst>(I); 974 // TODO: In many cases, the new instruction is just EE itself. We should 975 // exploit this, but can't do it here since it would break the invariant 976 // about the BDV not being known to be a base. 977 auto *BaseInst = ExtractElementInst::Create( 978 State.getBaseValue(), EE->getIndexOperand(), "base_ee", EE); 979 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 980 States[I] = BDVState(BDVState::Base, BaseInst); 981 } else if (!isa<VectorType>(I->getType())) { 982 // We need to handle cases that have a vector base but the instruction is 983 // a scalar type (these could be phis or selects or any instruction that 984 // are of scalar type, but the base can be a vector type). We 985 // conservatively set this as conflict. Setting the base value for these 986 // conflicts is handled in the next loop which traverses States. 987 States[I] = BDVState(BDVState::Conflict); 988 } 989 } 990 991 // Insert Phis for all conflicts 992 // TODO: adjust naming patterns to avoid this order of iteration dependency 993 for (auto Pair : States) { 994 Instruction *I = cast<Instruction>(Pair.first); 995 BDVState State = Pair.second; 996 // Only values that do not have known bases or those that have differing 997 // type (scalar versus vector) from a possible known base should be in the 998 // lattice. 999 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, State.getBaseValue())) && 1000 "why did it get added?"); 1001 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1002 1003 // Since we're joining a vector and scalar base, they can never be the 1004 // same. As a result, we should always see insert element having reached 1005 // the conflict state. 1006 assert(!isa<InsertElementInst>(I) || State.isConflict()); 1007 1008 if (!State.isConflict()) 1009 continue; 1010 1011 /// Create and insert a new instruction which will represent the base of 1012 /// the given instruction 'I'. 1013 auto MakeBaseInstPlaceholder = [](Instruction *I) -> Instruction* { 1014 if (isa<PHINode>(I)) { 1015 BasicBlock *BB = I->getParent(); 1016 int NumPreds = pred_size(BB); 1017 assert(NumPreds > 0 && "how did we reach here"); 1018 std::string Name = suffixed_name_or(I, ".base", "base_phi"); 1019 return PHINode::Create(I->getType(), NumPreds, Name, I); 1020 } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 1021 // The undef will be replaced later 1022 UndefValue *Undef = UndefValue::get(SI->getType()); 1023 std::string Name = suffixed_name_or(I, ".base", "base_select"); 1024 return SelectInst::Create(SI->getCondition(), Undef, Undef, Name, SI); 1025 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 1026 UndefValue *Undef = UndefValue::get(EE->getVectorOperand()->getType()); 1027 std::string Name = suffixed_name_or(I, ".base", "base_ee"); 1028 return ExtractElementInst::Create(Undef, EE->getIndexOperand(), Name, 1029 EE); 1030 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 1031 UndefValue *VecUndef = UndefValue::get(IE->getOperand(0)->getType()); 1032 UndefValue *ScalarUndef = UndefValue::get(IE->getOperand(1)->getType()); 1033 std::string Name = suffixed_name_or(I, ".base", "base_ie"); 1034 return InsertElementInst::Create(VecUndef, ScalarUndef, 1035 IE->getOperand(2), Name, IE); 1036 } else { 1037 auto *SV = cast<ShuffleVectorInst>(I); 1038 UndefValue *VecUndef = UndefValue::get(SV->getOperand(0)->getType()); 1039 std::string Name = suffixed_name_or(I, ".base", "base_sv"); 1040 return new ShuffleVectorInst(VecUndef, VecUndef, SV->getShuffleMask(), 1041 Name, SV); 1042 } 1043 }; 1044 Instruction *BaseInst = MakeBaseInstPlaceholder(I); 1045 // Add metadata marking this as a base value 1046 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 1047 States[I] = BDVState(BDVState::Conflict, BaseInst); 1048 } 1049 1050 // Returns a instruction which produces the base pointer for a given 1051 // instruction. The instruction is assumed to be an input to one of the BDVs 1052 // seen in the inference algorithm above. As such, we must either already 1053 // know it's base defining value is a base, or have inserted a new 1054 // instruction to propagate the base of it's BDV and have entered that newly 1055 // introduced instruction into the state table. In either case, we are 1056 // assured to be able to determine an instruction which produces it's base 1057 // pointer. 1058 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) { 1059 Value *BDV = findBaseOrBDV(Input, Cache); 1060 Value *Base = nullptr; 1061 if (isKnownBaseResult(BDV) && areBothVectorOrScalar(BDV, Input)) { 1062 Base = BDV; 1063 } else { 1064 // Either conflict or base. 1065 assert(States.count(BDV)); 1066 Base = States[BDV].getBaseValue(); 1067 } 1068 assert(Base && "Can't be null"); 1069 // The cast is needed since base traversal may strip away bitcasts 1070 if (Base->getType() != Input->getType() && InsertPt) 1071 Base = new BitCastInst(Base, Input->getType(), "cast", InsertPt); 1072 return Base; 1073 }; 1074 1075 // Fixup all the inputs of the new PHIs. Visit order needs to be 1076 // deterministic and predictable because we're naming newly created 1077 // instructions. 1078 for (auto Pair : States) { 1079 Instruction *BDV = cast<Instruction>(Pair.first); 1080 BDVState State = Pair.second; 1081 1082 // Only values that do not have known bases or those that have differing 1083 // type (scalar versus vector) from a possible known base should be in the 1084 // lattice. 1085 assert((!isKnownBaseResult(BDV) || 1086 !areBothVectorOrScalar(BDV, State.getBaseValue())) && 1087 "why did it get added?"); 1088 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1089 if (!State.isConflict()) 1090 continue; 1091 1092 if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) { 1093 PHINode *PN = cast<PHINode>(BDV); 1094 unsigned NumPHIValues = PN->getNumIncomingValues(); 1095 for (unsigned i = 0; i < NumPHIValues; i++) { 1096 Value *InVal = PN->getIncomingValue(i); 1097 BasicBlock *InBB = PN->getIncomingBlock(i); 1098 1099 // If we've already seen InBB, add the same incoming value 1100 // we added for it earlier. The IR verifier requires phi 1101 // nodes with multiple entries from the same basic block 1102 // to have the same incoming value for each of those 1103 // entries. If we don't do this check here and basephi 1104 // has a different type than base, we'll end up adding two 1105 // bitcasts (and hence two distinct values) as incoming 1106 // values for the same basic block. 1107 1108 int BlockIndex = BasePHI->getBasicBlockIndex(InBB); 1109 if (BlockIndex != -1) { 1110 Value *OldBase = BasePHI->getIncomingValue(BlockIndex); 1111 BasePHI->addIncoming(OldBase, InBB); 1112 1113 #ifndef NDEBUG 1114 Value *Base = getBaseForInput(InVal, nullptr); 1115 // In essence this assert states: the only way two values 1116 // incoming from the same basic block may be different is by 1117 // being different bitcasts of the same value. A cleanup 1118 // that remains TODO is changing findBaseOrBDV to return an 1119 // llvm::Value of the correct type (and still remain pure). 1120 // This will remove the need to add bitcasts. 1121 assert(Base->stripPointerCasts() == OldBase->stripPointerCasts() && 1122 "Sanity -- findBaseOrBDV should be pure!"); 1123 #endif 1124 continue; 1125 } 1126 1127 // Find the instruction which produces the base for each input. We may 1128 // need to insert a bitcast in the incoming block. 1129 // TODO: Need to split critical edges if insertion is needed 1130 Value *Base = getBaseForInput(InVal, InBB->getTerminator()); 1131 BasePHI->addIncoming(Base, InBB); 1132 } 1133 assert(BasePHI->getNumIncomingValues() == NumPHIValues); 1134 } else if (SelectInst *BaseSI = 1135 dyn_cast<SelectInst>(State.getBaseValue())) { 1136 SelectInst *SI = cast<SelectInst>(BDV); 1137 1138 // Find the instruction which produces the base for each input. 1139 // We may need to insert a bitcast. 1140 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI)); 1141 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI)); 1142 } else if (auto *BaseEE = 1143 dyn_cast<ExtractElementInst>(State.getBaseValue())) { 1144 Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand(); 1145 // Find the instruction which produces the base for each input. We may 1146 // need to insert a bitcast. 1147 BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE)); 1148 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){ 1149 auto *BdvIE = cast<InsertElementInst>(BDV); 1150 auto UpdateOperand = [&](int OperandIdx) { 1151 Value *InVal = BdvIE->getOperand(OperandIdx); 1152 Value *Base = getBaseForInput(InVal, BaseIE); 1153 BaseIE->setOperand(OperandIdx, Base); 1154 }; 1155 UpdateOperand(0); // vector operand 1156 UpdateOperand(1); // scalar operand 1157 } else { 1158 auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue()); 1159 auto *BdvSV = cast<ShuffleVectorInst>(BDV); 1160 auto UpdateOperand = [&](int OperandIdx) { 1161 Value *InVal = BdvSV->getOperand(OperandIdx); 1162 Value *Base = getBaseForInput(InVal, BaseSV); 1163 BaseSV->setOperand(OperandIdx, Base); 1164 }; 1165 UpdateOperand(0); // vector operand 1166 UpdateOperand(1); // vector operand 1167 } 1168 } 1169 1170 // Cache all of our results so we can cheaply reuse them 1171 // NOTE: This is actually two caches: one of the base defining value 1172 // relation and one of the base pointer relation! FIXME 1173 for (auto Pair : States) { 1174 auto *BDV = Pair.first; 1175 Value *Base = Pair.second.getBaseValue(); 1176 assert(BDV && Base); 1177 // Only values that do not have known bases or those that have differing 1178 // type (scalar versus vector) from a possible known base should be in the 1179 // lattice. 1180 assert((!isKnownBaseResult(BDV) || !areBothVectorOrScalar(BDV, Base)) && 1181 "why did it get added?"); 1182 1183 LLVM_DEBUG( 1184 dbgs() << "Updating base value cache" 1185 << " for: " << BDV->getName() << " from: " 1186 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none") 1187 << " to: " << Base->getName() << "\n"); 1188 1189 if (Cache.count(BDV)) { 1190 assert(isKnownBaseResult(Base) && 1191 "must be something we 'know' is a base pointer"); 1192 // Once we transition from the BDV relation being store in the Cache to 1193 // the base relation being stored, it must be stable 1194 assert((!isKnownBaseResult(Cache[BDV]) || Cache[BDV] == Base) && 1195 "base relation should be stable"); 1196 } 1197 Cache[BDV] = Base; 1198 } 1199 assert(Cache.count(Def)); 1200 return Cache[Def]; 1201 } 1202 1203 // For a set of live pointers (base and/or derived), identify the base 1204 // pointer of the object which they are derived from. This routine will 1205 // mutate the IR graph as needed to make the 'base' pointer live at the 1206 // definition site of 'derived'. This ensures that any use of 'derived' can 1207 // also use 'base'. This may involve the insertion of a number of 1208 // additional PHI nodes. 1209 // 1210 // preconditions: live is a set of pointer type Values 1211 // 1212 // side effects: may insert PHI nodes into the existing CFG, will preserve 1213 // CFG, will not remove or mutate any existing nodes 1214 // 1215 // post condition: PointerToBase contains one (derived, base) pair for every 1216 // pointer in live. Note that derived can be equal to base if the original 1217 // pointer was a base pointer. 1218 static void 1219 findBasePointers(const StatepointLiveSetTy &live, 1220 MapVector<Value *, Value *> &PointerToBase, 1221 DominatorTree *DT, DefiningValueMapTy &DVCache) { 1222 for (Value *ptr : live) { 1223 Value *base = findBasePointer(ptr, DVCache); 1224 assert(base && "failed to find base pointer"); 1225 PointerToBase[ptr] = base; 1226 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) || 1227 DT->dominates(cast<Instruction>(base)->getParent(), 1228 cast<Instruction>(ptr)->getParent())) && 1229 "The base we found better dominate the derived pointer"); 1230 } 1231 } 1232 1233 /// Find the required based pointers (and adjust the live set) for the given 1234 /// parse point. 1235 static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache, 1236 CallBase *Call, 1237 PartiallyConstructedSafepointRecord &result) { 1238 MapVector<Value *, Value *> PointerToBase; 1239 findBasePointers(result.LiveSet, PointerToBase, &DT, DVCache); 1240 1241 if (PrintBasePointers) { 1242 errs() << "Base Pairs (w/o Relocation):\n"; 1243 for (auto &Pair : PointerToBase) { 1244 errs() << " derived "; 1245 Pair.first->printAsOperand(errs(), false); 1246 errs() << " base "; 1247 Pair.second->printAsOperand(errs(), false); 1248 errs() << "\n";; 1249 } 1250 } 1251 1252 result.PointerToBase = PointerToBase; 1253 } 1254 1255 /// Given an updated version of the dataflow liveness results, update the 1256 /// liveset and base pointer maps for the call site CS. 1257 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 1258 CallBase *Call, 1259 PartiallyConstructedSafepointRecord &result); 1260 1261 static void recomputeLiveInValues( 1262 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 1263 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 1264 // TODO-PERF: reuse the original liveness, then simply run the dataflow 1265 // again. The old values are still live and will help it stabilize quickly. 1266 GCPtrLivenessData RevisedLivenessData; 1267 computeLiveInValues(DT, F, RevisedLivenessData); 1268 for (size_t i = 0; i < records.size(); i++) { 1269 struct PartiallyConstructedSafepointRecord &info = records[i]; 1270 recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info); 1271 } 1272 } 1273 1274 // When inserting gc.relocate and gc.result calls, we need to ensure there are 1275 // no uses of the original value / return value between the gc.statepoint and 1276 // the gc.relocate / gc.result call. One case which can arise is a phi node 1277 // starting one of the successor blocks. We also need to be able to insert the 1278 // gc.relocates only on the path which goes through the statepoint. We might 1279 // need to split an edge to make this possible. 1280 static BasicBlock * 1281 normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, 1282 DominatorTree &DT) { 1283 BasicBlock *Ret = BB; 1284 if (!BB->getUniquePredecessor()) 1285 Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT); 1286 1287 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes 1288 // from it 1289 FoldSingleEntryPHINodes(Ret); 1290 assert(!isa<PHINode>(Ret->begin()) && 1291 "All PHI nodes should have been removed!"); 1292 1293 // At this point, we can safely insert a gc.relocate or gc.result as the first 1294 // instruction in Ret if needed. 1295 return Ret; 1296 } 1297 1298 // Create new attribute set containing only attributes which can be transferred 1299 // from original call to the safepoint. 1300 static AttributeList legalizeCallAttributes(LLVMContext &Ctx, 1301 AttributeList AL) { 1302 if (AL.isEmpty()) 1303 return AL; 1304 1305 // Remove the readonly, readnone, and statepoint function attributes. 1306 AttrBuilder FnAttrs = AL.getFnAttributes(); 1307 FnAttrs.removeAttribute(Attribute::ReadNone); 1308 FnAttrs.removeAttribute(Attribute::ReadOnly); 1309 for (Attribute A : AL.getFnAttributes()) { 1310 if (isStatepointDirectiveAttr(A)) 1311 FnAttrs.remove(A); 1312 } 1313 1314 // Just skip parameter and return attributes for now 1315 return AttributeList::get(Ctx, AttributeList::FunctionIndex, 1316 AttributeSet::get(Ctx, FnAttrs)); 1317 } 1318 1319 /// Helper function to place all gc relocates necessary for the given 1320 /// statepoint. 1321 /// Inputs: 1322 /// liveVariables - list of variables to be relocated. 1323 /// basePtrs - base pointers. 1324 /// statepointToken - statepoint instruction to which relocates should be 1325 /// bound. 1326 /// Builder - Llvm IR builder to be used to construct new calls. 1327 static void CreateGCRelocates(ArrayRef<Value *> LiveVariables, 1328 ArrayRef<Value *> BasePtrs, 1329 Instruction *StatepointToken, 1330 IRBuilder<> &Builder) { 1331 if (LiveVariables.empty()) 1332 return; 1333 1334 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) { 1335 auto ValIt = llvm::find(LiveVec, Val); 1336 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!"); 1337 size_t Index = std::distance(LiveVec.begin(), ValIt); 1338 assert(Index < LiveVec.size() && "Bug in std::find?"); 1339 return Index; 1340 }; 1341 Module *M = StatepointToken->getModule(); 1342 1343 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose 1344 // element type is i8 addrspace(1)*). We originally generated unique 1345 // declarations for each pointer type, but this proved problematic because 1346 // the intrinsic mangling code is incomplete and fragile. Since we're moving 1347 // towards a single unified pointer type anyways, we can just cast everything 1348 // to an i8* of the right address space. A bitcast is added later to convert 1349 // gc_relocate to the actual value's type. 1350 auto getGCRelocateDecl = [&] (Type *Ty) { 1351 assert(isHandledGCPointerType(Ty)); 1352 auto AS = Ty->getScalarType()->getPointerAddressSpace(); 1353 Type *NewTy = Type::getInt8PtrTy(M->getContext(), AS); 1354 if (auto *VT = dyn_cast<VectorType>(Ty)) 1355 NewTy = FixedVectorType::get(NewTy, 1356 cast<FixedVectorType>(VT)->getNumElements()); 1357 return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, 1358 {NewTy}); 1359 }; 1360 1361 // Lazily populated map from input types to the canonicalized form mentioned 1362 // in the comment above. This should probably be cached somewhere more 1363 // broadly. 1364 DenseMap<Type *, Function *> TypeToDeclMap; 1365 1366 for (unsigned i = 0; i < LiveVariables.size(); i++) { 1367 // Generate the gc.relocate call and save the result 1368 Value *BaseIdx = Builder.getInt32(FindIndex(LiveVariables, BasePtrs[i])); 1369 Value *LiveIdx = Builder.getInt32(i); 1370 1371 Type *Ty = LiveVariables[i]->getType(); 1372 if (!TypeToDeclMap.count(Ty)) 1373 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty); 1374 Function *GCRelocateDecl = TypeToDeclMap[Ty]; 1375 1376 // only specify a debug name if we can give a useful one 1377 CallInst *Reloc = Builder.CreateCall( 1378 GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx}, 1379 suffixed_name_or(LiveVariables[i], ".relocated", "")); 1380 // Trick CodeGen into thinking there are lots of free registers at this 1381 // fake call. 1382 Reloc->setCallingConv(CallingConv::Cold); 1383 } 1384 } 1385 1386 namespace { 1387 1388 /// This struct is used to defer RAUWs and `eraseFromParent` s. Using this 1389 /// avoids having to worry about keeping around dangling pointers to Values. 1390 class DeferredReplacement { 1391 AssertingVH<Instruction> Old; 1392 AssertingVH<Instruction> New; 1393 bool IsDeoptimize = false; 1394 1395 DeferredReplacement() = default; 1396 1397 public: 1398 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) { 1399 assert(Old != New && Old && New && 1400 "Cannot RAUW equal values or to / from null!"); 1401 1402 DeferredReplacement D; 1403 D.Old = Old; 1404 D.New = New; 1405 return D; 1406 } 1407 1408 static DeferredReplacement createDelete(Instruction *ToErase) { 1409 DeferredReplacement D; 1410 D.Old = ToErase; 1411 return D; 1412 } 1413 1414 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) { 1415 #ifndef NDEBUG 1416 auto *F = cast<CallInst>(Old)->getCalledFunction(); 1417 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize && 1418 "Only way to construct a deoptimize deferred replacement"); 1419 #endif 1420 DeferredReplacement D; 1421 D.Old = Old; 1422 D.IsDeoptimize = true; 1423 return D; 1424 } 1425 1426 /// Does the task represented by this instance. 1427 void doReplacement() { 1428 Instruction *OldI = Old; 1429 Instruction *NewI = New; 1430 1431 assert(OldI != NewI && "Disallowed at construction?!"); 1432 assert((!IsDeoptimize || !New) && 1433 "Deoptimize intrinsics are not replaced!"); 1434 1435 Old = nullptr; 1436 New = nullptr; 1437 1438 if (NewI) 1439 OldI->replaceAllUsesWith(NewI); 1440 1441 if (IsDeoptimize) { 1442 // Note: we've inserted instructions, so the call to llvm.deoptimize may 1443 // not necessarily be followed by the matching return. 1444 auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator()); 1445 new UnreachableInst(RI->getContext(), RI); 1446 RI->eraseFromParent(); 1447 } 1448 1449 OldI->eraseFromParent(); 1450 } 1451 }; 1452 1453 } // end anonymous namespace 1454 1455 static StringRef getDeoptLowering(CallBase *Call) { 1456 const char *DeoptLowering = "deopt-lowering"; 1457 if (Call->hasFnAttr(DeoptLowering)) { 1458 // FIXME: Calls have a *really* confusing interface around attributes 1459 // with values. 1460 const AttributeList &CSAS = Call->getAttributes(); 1461 if (CSAS.hasAttribute(AttributeList::FunctionIndex, DeoptLowering)) 1462 return CSAS.getAttribute(AttributeList::FunctionIndex, DeoptLowering) 1463 .getValueAsString(); 1464 Function *F = Call->getCalledFunction(); 1465 assert(F && F->hasFnAttribute(DeoptLowering)); 1466 return F->getFnAttribute(DeoptLowering).getValueAsString(); 1467 } 1468 return "live-through"; 1469 } 1470 1471 static void 1472 makeStatepointExplicitImpl(CallBase *Call, /* to replace */ 1473 const SmallVectorImpl<Value *> &BasePtrs, 1474 const SmallVectorImpl<Value *> &LiveVariables, 1475 PartiallyConstructedSafepointRecord &Result, 1476 std::vector<DeferredReplacement> &Replacements) { 1477 assert(BasePtrs.size() == LiveVariables.size()); 1478 1479 // Then go ahead and use the builder do actually do the inserts. We insert 1480 // immediately before the previous instruction under the assumption that all 1481 // arguments will be available here. We can't insert afterwards since we may 1482 // be replacing a terminator. 1483 IRBuilder<> Builder(Call); 1484 1485 ArrayRef<Value *> GCArgs(LiveVariables); 1486 uint64_t StatepointID = StatepointDirectives::DefaultStatepointID; 1487 uint32_t NumPatchBytes = 0; 1488 uint32_t Flags = uint32_t(StatepointFlags::None); 1489 1490 SmallVector<Value *, 8> CallArgs(Call->args()); 1491 Optional<ArrayRef<Use>> DeoptArgs; 1492 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_deopt)) 1493 DeoptArgs = Bundle->Inputs; 1494 Optional<ArrayRef<Use>> TransitionArgs; 1495 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_gc_transition)) { 1496 TransitionArgs = Bundle->Inputs; 1497 // TODO: This flag no longer serves a purpose and can be removed later 1498 Flags |= uint32_t(StatepointFlags::GCTransition); 1499 } 1500 1501 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls 1502 // with a return value, we lower then as never returning calls to 1503 // __llvm_deoptimize that are followed by unreachable to get better codegen. 1504 bool IsDeoptimize = false; 1505 1506 StatepointDirectives SD = 1507 parseStatepointDirectivesFromAttrs(Call->getAttributes()); 1508 if (SD.NumPatchBytes) 1509 NumPatchBytes = *SD.NumPatchBytes; 1510 if (SD.StatepointID) 1511 StatepointID = *SD.StatepointID; 1512 1513 // Pass through the requested lowering if any. The default is live-through. 1514 StringRef DeoptLowering = getDeoptLowering(Call); 1515 if (DeoptLowering.equals("live-in")) 1516 Flags |= uint32_t(StatepointFlags::DeoptLiveIn); 1517 else { 1518 assert(DeoptLowering.equals("live-through") && "Unsupported value!"); 1519 } 1520 1521 Value *CallTarget = Call->getCalledOperand(); 1522 if (Function *F = dyn_cast<Function>(CallTarget)) { 1523 auto IID = F->getIntrinsicID(); 1524 if (IID == Intrinsic::experimental_deoptimize) { 1525 // Calls to llvm.experimental.deoptimize are lowered to calls to the 1526 // __llvm_deoptimize symbol. We want to resolve this now, since the 1527 // verifier does not allow taking the address of an intrinsic function. 1528 1529 SmallVector<Type *, 8> DomainTy; 1530 for (Value *Arg : CallArgs) 1531 DomainTy.push_back(Arg->getType()); 1532 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1533 /* isVarArg = */ false); 1534 1535 // Note: CallTarget can be a bitcast instruction of a symbol if there are 1536 // calls to @llvm.experimental.deoptimize with different argument types in 1537 // the same module. This is fine -- we assume the frontend knew what it 1538 // was doing when generating this kind of IR. 1539 CallTarget = F->getParent() 1540 ->getOrInsertFunction("__llvm_deoptimize", FTy) 1541 .getCallee(); 1542 1543 IsDeoptimize = true; 1544 } else if (IID == Intrinsic::memcpy_element_unordered_atomic || 1545 IID == Intrinsic::memmove_element_unordered_atomic) { 1546 // Unordered atomic memcpy and memmove intrinsics which are not explicitly 1547 // marked as "gc-leaf-function" should be lowered in a GC parseable way. 1548 // Specifically, these calls should be lowered to the 1549 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols. 1550 // Similarly to __llvm_deoptimize we want to resolve this now, since the 1551 // verifier does not allow taking the address of an intrinsic function. 1552 // 1553 // Moreover we need to shuffle the arguments for the call in order to 1554 // accommodate GC. The underlying source and destination objects might be 1555 // relocated during copy operation should the GC occur. To relocate the 1556 // derived source and destination pointers the implementation of the 1557 // intrinsic should know the corresponding base pointers. 1558 // 1559 // To make the base pointers available pass them explicitly as arguments: 1560 // memcpy(dest_derived, source_derived, ...) => 1561 // memcpy(dest_base, dest_offset, source_base, source_offset, ...) 1562 auto &Context = Call->getContext(); 1563 auto &DL = Call->getModule()->getDataLayout(); 1564 auto GetBaseAndOffset = [&](Value *Derived) { 1565 assert(Result.PointerToBase.count(Derived)); 1566 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace(); 1567 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace); 1568 Value *Base = Result.PointerToBase.find(Derived)->second; 1569 Value *Base_int = Builder.CreatePtrToInt( 1570 Base, Type::getIntNTy(Context, IntPtrSize)); 1571 Value *Derived_int = Builder.CreatePtrToInt( 1572 Derived, Type::getIntNTy(Context, IntPtrSize)); 1573 return std::make_pair(Base, Builder.CreateSub(Derived_int, Base_int)); 1574 }; 1575 1576 auto *Dest = CallArgs[0]; 1577 Value *DestBase, *DestOffset; 1578 std::tie(DestBase, DestOffset) = GetBaseAndOffset(Dest); 1579 1580 auto *Source = CallArgs[1]; 1581 Value *SourceBase, *SourceOffset; 1582 std::tie(SourceBase, SourceOffset) = GetBaseAndOffset(Source); 1583 1584 auto *LengthInBytes = CallArgs[2]; 1585 auto *ElementSizeCI = cast<ConstantInt>(CallArgs[3]); 1586 1587 CallArgs.clear(); 1588 CallArgs.push_back(DestBase); 1589 CallArgs.push_back(DestOffset); 1590 CallArgs.push_back(SourceBase); 1591 CallArgs.push_back(SourceOffset); 1592 CallArgs.push_back(LengthInBytes); 1593 1594 SmallVector<Type *, 8> DomainTy; 1595 for (Value *Arg : CallArgs) 1596 DomainTy.push_back(Arg->getType()); 1597 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1598 /* isVarArg = */ false); 1599 1600 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) { 1601 uint64_t ElementSize = ElementSizeCI->getZExtValue(); 1602 if (IID == Intrinsic::memcpy_element_unordered_atomic) { 1603 switch (ElementSize) { 1604 case 1: 1605 return "__llvm_memcpy_element_unordered_atomic_safepoint_1"; 1606 case 2: 1607 return "__llvm_memcpy_element_unordered_atomic_safepoint_2"; 1608 case 4: 1609 return "__llvm_memcpy_element_unordered_atomic_safepoint_4"; 1610 case 8: 1611 return "__llvm_memcpy_element_unordered_atomic_safepoint_8"; 1612 case 16: 1613 return "__llvm_memcpy_element_unordered_atomic_safepoint_16"; 1614 default: 1615 llvm_unreachable("unexpected element size!"); 1616 } 1617 } 1618 assert(IID == Intrinsic::memmove_element_unordered_atomic); 1619 switch (ElementSize) { 1620 case 1: 1621 return "__llvm_memmove_element_unordered_atomic_safepoint_1"; 1622 case 2: 1623 return "__llvm_memmove_element_unordered_atomic_safepoint_2"; 1624 case 4: 1625 return "__llvm_memmove_element_unordered_atomic_safepoint_4"; 1626 case 8: 1627 return "__llvm_memmove_element_unordered_atomic_safepoint_8"; 1628 case 16: 1629 return "__llvm_memmove_element_unordered_atomic_safepoint_16"; 1630 default: 1631 llvm_unreachable("unexpected element size!"); 1632 } 1633 }; 1634 1635 CallTarget = 1636 F->getParent() 1637 ->getOrInsertFunction(GetFunctionName(IID, ElementSizeCI), FTy) 1638 .getCallee(); 1639 } 1640 } 1641 1642 // Create the statepoint given all the arguments 1643 GCStatepointInst *Token = nullptr; 1644 if (auto *CI = dyn_cast<CallInst>(Call)) { 1645 CallInst *SPCall = Builder.CreateGCStatepointCall( 1646 StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs, 1647 TransitionArgs, DeoptArgs, GCArgs, "safepoint_token"); 1648 1649 SPCall->setTailCallKind(CI->getTailCallKind()); 1650 SPCall->setCallingConv(CI->getCallingConv()); 1651 1652 // Currently we will fail on parameter attributes and on certain 1653 // function attributes. In case if we can handle this set of attributes - 1654 // set up function attrs directly on statepoint and return attrs later for 1655 // gc_result intrinsic. 1656 SPCall->setAttributes( 1657 legalizeCallAttributes(CI->getContext(), CI->getAttributes())); 1658 1659 Token = cast<GCStatepointInst>(SPCall); 1660 1661 // Put the following gc_result and gc_relocate calls immediately after the 1662 // the old call (which we're about to delete) 1663 assert(CI->getNextNode() && "Not a terminator, must have next!"); 1664 Builder.SetInsertPoint(CI->getNextNode()); 1665 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc()); 1666 } else { 1667 auto *II = cast<InvokeInst>(Call); 1668 1669 // Insert the new invoke into the old block. We'll remove the old one in a 1670 // moment at which point this will become the new terminator for the 1671 // original block. 1672 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke( 1673 StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(), 1674 II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs, 1675 "statepoint_token"); 1676 1677 SPInvoke->setCallingConv(II->getCallingConv()); 1678 1679 // Currently we will fail on parameter attributes and on certain 1680 // function attributes. In case if we can handle this set of attributes - 1681 // set up function attrs directly on statepoint and return attrs later for 1682 // gc_result intrinsic. 1683 SPInvoke->setAttributes( 1684 legalizeCallAttributes(II->getContext(), II->getAttributes())); 1685 1686 Token = cast<GCStatepointInst>(SPInvoke); 1687 1688 // Generate gc relocates in exceptional path 1689 BasicBlock *UnwindBlock = II->getUnwindDest(); 1690 assert(!isa<PHINode>(UnwindBlock->begin()) && 1691 UnwindBlock->getUniquePredecessor() && 1692 "can't safely insert in this block!"); 1693 1694 Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt()); 1695 Builder.SetCurrentDebugLocation(II->getDebugLoc()); 1696 1697 // Attach exceptional gc relocates to the landingpad. 1698 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst(); 1699 Result.UnwindToken = ExceptionalToken; 1700 1701 CreateGCRelocates(LiveVariables, BasePtrs, ExceptionalToken, Builder); 1702 1703 // Generate gc relocates and returns for normal block 1704 BasicBlock *NormalDest = II->getNormalDest(); 1705 assert(!isa<PHINode>(NormalDest->begin()) && 1706 NormalDest->getUniquePredecessor() && 1707 "can't safely insert in this block!"); 1708 1709 Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt()); 1710 1711 // gc relocates will be generated later as if it were regular call 1712 // statepoint 1713 } 1714 assert(Token && "Should be set in one of the above branches!"); 1715 1716 if (IsDeoptimize) { 1717 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we 1718 // transform the tail-call like structure to a call to a void function 1719 // followed by unreachable to get better codegen. 1720 Replacements.push_back( 1721 DeferredReplacement::createDeoptimizeReplacement(Call)); 1722 } else { 1723 Token->setName("statepoint_token"); 1724 if (!Call->getType()->isVoidTy() && !Call->use_empty()) { 1725 StringRef Name = Call->hasName() ? Call->getName() : ""; 1726 CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name); 1727 GCResult->setAttributes( 1728 AttributeList::get(GCResult->getContext(), AttributeList::ReturnIndex, 1729 Call->getAttributes().getRetAttributes())); 1730 1731 // We cannot RAUW or delete CS.getInstruction() because it could be in the 1732 // live set of some other safepoint, in which case that safepoint's 1733 // PartiallyConstructedSafepointRecord will hold a raw pointer to this 1734 // llvm::Instruction. Instead, we defer the replacement and deletion to 1735 // after the live sets have been made explicit in the IR, and we no longer 1736 // have raw pointers to worry about. 1737 Replacements.emplace_back( 1738 DeferredReplacement::createRAUW(Call, GCResult)); 1739 } else { 1740 Replacements.emplace_back(DeferredReplacement::createDelete(Call)); 1741 } 1742 } 1743 1744 Result.StatepointToken = Token; 1745 1746 // Second, create a gc.relocate for every live variable 1747 CreateGCRelocates(LiveVariables, BasePtrs, Token, Builder); 1748 } 1749 1750 // Replace an existing gc.statepoint with a new one and a set of gc.relocates 1751 // which make the relocations happening at this safepoint explicit. 1752 // 1753 // WARNING: Does not do any fixup to adjust users of the original live 1754 // values. That's the callers responsibility. 1755 static void 1756 makeStatepointExplicit(DominatorTree &DT, CallBase *Call, 1757 PartiallyConstructedSafepointRecord &Result, 1758 std::vector<DeferredReplacement> &Replacements) { 1759 const auto &LiveSet = Result.LiveSet; 1760 const auto &PointerToBase = Result.PointerToBase; 1761 1762 // Convert to vector for efficient cross referencing. 1763 SmallVector<Value *, 64> BaseVec, LiveVec; 1764 LiveVec.reserve(LiveSet.size()); 1765 BaseVec.reserve(LiveSet.size()); 1766 for (Value *L : LiveSet) { 1767 LiveVec.push_back(L); 1768 assert(PointerToBase.count(L)); 1769 Value *Base = PointerToBase.find(L)->second; 1770 BaseVec.push_back(Base); 1771 } 1772 assert(LiveVec.size() == BaseVec.size()); 1773 1774 // Do the actual rewriting and delete the old statepoint 1775 makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements); 1776 } 1777 1778 // Helper function for the relocationViaAlloca. 1779 // 1780 // It receives iterator to the statepoint gc relocates and emits a store to the 1781 // assigned location (via allocaMap) for the each one of them. It adds the 1782 // visited values into the visitedLiveValues set, which we will later use them 1783 // for sanity checking. 1784 static void 1785 insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs, 1786 DenseMap<Value *, AllocaInst *> &AllocaMap, 1787 DenseSet<Value *> &VisitedLiveValues) { 1788 for (User *U : GCRelocs) { 1789 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U); 1790 if (!Relocate) 1791 continue; 1792 1793 Value *OriginalValue = Relocate->getDerivedPtr(); 1794 assert(AllocaMap.count(OriginalValue)); 1795 Value *Alloca = AllocaMap[OriginalValue]; 1796 1797 // Emit store into the related alloca 1798 // All gc_relocates are i8 addrspace(1)* typed, and it must be bitcasted to 1799 // the correct type according to alloca. 1800 assert(Relocate->getNextNode() && 1801 "Should always have one since it's not a terminator"); 1802 IRBuilder<> Builder(Relocate->getNextNode()); 1803 Value *CastedRelocatedValue = 1804 Builder.CreateBitCast(Relocate, 1805 cast<AllocaInst>(Alloca)->getAllocatedType(), 1806 suffixed_name_or(Relocate, ".casted", "")); 1807 1808 new StoreInst(CastedRelocatedValue, Alloca, 1809 cast<Instruction>(CastedRelocatedValue)->getNextNode()); 1810 1811 #ifndef NDEBUG 1812 VisitedLiveValues.insert(OriginalValue); 1813 #endif 1814 } 1815 } 1816 1817 // Helper function for the "relocationViaAlloca". Similar to the 1818 // "insertRelocationStores" but works for rematerialized values. 1819 static void insertRematerializationStores( 1820 const RematerializedValueMapTy &RematerializedValues, 1821 DenseMap<Value *, AllocaInst *> &AllocaMap, 1822 DenseSet<Value *> &VisitedLiveValues) { 1823 for (auto RematerializedValuePair: RematerializedValues) { 1824 Instruction *RematerializedValue = RematerializedValuePair.first; 1825 Value *OriginalValue = RematerializedValuePair.second; 1826 1827 assert(AllocaMap.count(OriginalValue) && 1828 "Can not find alloca for rematerialized value"); 1829 Value *Alloca = AllocaMap[OriginalValue]; 1830 1831 new StoreInst(RematerializedValue, Alloca, 1832 RematerializedValue->getNextNode()); 1833 1834 #ifndef NDEBUG 1835 VisitedLiveValues.insert(OriginalValue); 1836 #endif 1837 } 1838 } 1839 1840 /// Do all the relocation update via allocas and mem2reg 1841 static void relocationViaAlloca( 1842 Function &F, DominatorTree &DT, ArrayRef<Value *> Live, 1843 ArrayRef<PartiallyConstructedSafepointRecord> Records) { 1844 #ifndef NDEBUG 1845 // record initial number of (static) allocas; we'll check we have the same 1846 // number when we get done. 1847 int InitialAllocaNum = 0; 1848 for (Instruction &I : F.getEntryBlock()) 1849 if (isa<AllocaInst>(I)) 1850 InitialAllocaNum++; 1851 #endif 1852 1853 // TODO-PERF: change data structures, reserve 1854 DenseMap<Value *, AllocaInst *> AllocaMap; 1855 SmallVector<AllocaInst *, 200> PromotableAllocas; 1856 // Used later to chack that we have enough allocas to store all values 1857 std::size_t NumRematerializedValues = 0; 1858 PromotableAllocas.reserve(Live.size()); 1859 1860 // Emit alloca for "LiveValue" and record it in "allocaMap" and 1861 // "PromotableAllocas" 1862 const DataLayout &DL = F.getParent()->getDataLayout(); 1863 auto emitAllocaFor = [&](Value *LiveValue) { 1864 AllocaInst *Alloca = new AllocaInst(LiveValue->getType(), 1865 DL.getAllocaAddrSpace(), "", 1866 F.getEntryBlock().getFirstNonPHI()); 1867 AllocaMap[LiveValue] = Alloca; 1868 PromotableAllocas.push_back(Alloca); 1869 }; 1870 1871 // Emit alloca for each live gc pointer 1872 for (Value *V : Live) 1873 emitAllocaFor(V); 1874 1875 // Emit allocas for rematerialized values 1876 for (const auto &Info : Records) 1877 for (auto RematerializedValuePair : Info.RematerializedValues) { 1878 Value *OriginalValue = RematerializedValuePair.second; 1879 if (AllocaMap.count(OriginalValue) != 0) 1880 continue; 1881 1882 emitAllocaFor(OriginalValue); 1883 ++NumRematerializedValues; 1884 } 1885 1886 // The next two loops are part of the same conceptual operation. We need to 1887 // insert a store to the alloca after the original def and at each 1888 // redefinition. We need to insert a load before each use. These are split 1889 // into distinct loops for performance reasons. 1890 1891 // Update gc pointer after each statepoint: either store a relocated value or 1892 // null (if no relocated value was found for this gc pointer and it is not a 1893 // gc_result). This must happen before we update the statepoint with load of 1894 // alloca otherwise we lose the link between statepoint and old def. 1895 for (const auto &Info : Records) { 1896 Value *Statepoint = Info.StatepointToken; 1897 1898 // This will be used for consistency check 1899 DenseSet<Value *> VisitedLiveValues; 1900 1901 // Insert stores for normal statepoint gc relocates 1902 insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues); 1903 1904 // In case if it was invoke statepoint 1905 // we will insert stores for exceptional path gc relocates. 1906 if (isa<InvokeInst>(Statepoint)) { 1907 insertRelocationStores(Info.UnwindToken->users(), AllocaMap, 1908 VisitedLiveValues); 1909 } 1910 1911 // Do similar thing with rematerialized values 1912 insertRematerializationStores(Info.RematerializedValues, AllocaMap, 1913 VisitedLiveValues); 1914 1915 if (ClobberNonLive) { 1916 // As a debugging aid, pretend that an unrelocated pointer becomes null at 1917 // the gc.statepoint. This will turn some subtle GC problems into 1918 // slightly easier to debug SEGVs. Note that on large IR files with 1919 // lots of gc.statepoints this is extremely costly both memory and time 1920 // wise. 1921 SmallVector<AllocaInst *, 64> ToClobber; 1922 for (auto Pair : AllocaMap) { 1923 Value *Def = Pair.first; 1924 AllocaInst *Alloca = Pair.second; 1925 1926 // This value was relocated 1927 if (VisitedLiveValues.count(Def)) { 1928 continue; 1929 } 1930 ToClobber.push_back(Alloca); 1931 } 1932 1933 auto InsertClobbersAt = [&](Instruction *IP) { 1934 for (auto *AI : ToClobber) { 1935 auto PT = cast<PointerType>(AI->getAllocatedType()); 1936 Constant *CPN = ConstantPointerNull::get(PT); 1937 new StoreInst(CPN, AI, IP); 1938 } 1939 }; 1940 1941 // Insert the clobbering stores. These may get intermixed with the 1942 // gc.results and gc.relocates, but that's fine. 1943 if (auto II = dyn_cast<InvokeInst>(Statepoint)) { 1944 InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt()); 1945 InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt()); 1946 } else { 1947 InsertClobbersAt(cast<Instruction>(Statepoint)->getNextNode()); 1948 } 1949 } 1950 } 1951 1952 // Update use with load allocas and add store for gc_relocated. 1953 for (auto Pair : AllocaMap) { 1954 Value *Def = Pair.first; 1955 AllocaInst *Alloca = Pair.second; 1956 1957 // We pre-record the uses of allocas so that we dont have to worry about 1958 // later update that changes the user information.. 1959 1960 SmallVector<Instruction *, 20> Uses; 1961 // PERF: trade a linear scan for repeated reallocation 1962 Uses.reserve(Def->getNumUses()); 1963 for (User *U : Def->users()) { 1964 if (!isa<ConstantExpr>(U)) { 1965 // If the def has a ConstantExpr use, then the def is either a 1966 // ConstantExpr use itself or null. In either case 1967 // (recursively in the first, directly in the second), the oop 1968 // it is ultimately dependent on is null and this particular 1969 // use does not need to be fixed up. 1970 Uses.push_back(cast<Instruction>(U)); 1971 } 1972 } 1973 1974 llvm::sort(Uses); 1975 auto Last = std::unique(Uses.begin(), Uses.end()); 1976 Uses.erase(Last, Uses.end()); 1977 1978 for (Instruction *Use : Uses) { 1979 if (isa<PHINode>(Use)) { 1980 PHINode *Phi = cast<PHINode>(Use); 1981 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { 1982 if (Def == Phi->getIncomingValue(i)) { 1983 LoadInst *Load = 1984 new LoadInst(Alloca->getAllocatedType(), Alloca, "", 1985 Phi->getIncomingBlock(i)->getTerminator()); 1986 Phi->setIncomingValue(i, Load); 1987 } 1988 } 1989 } else { 1990 LoadInst *Load = 1991 new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use); 1992 Use->replaceUsesOfWith(Def, Load); 1993 } 1994 } 1995 1996 // Emit store for the initial gc value. Store must be inserted after load, 1997 // otherwise store will be in alloca's use list and an extra load will be 1998 // inserted before it. 1999 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false, 2000 DL.getABITypeAlign(Def->getType())); 2001 if (Instruction *Inst = dyn_cast<Instruction>(Def)) { 2002 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) { 2003 // InvokeInst is a terminator so the store need to be inserted into its 2004 // normal destination block. 2005 BasicBlock *NormalDest = Invoke->getNormalDest(); 2006 Store->insertBefore(NormalDest->getFirstNonPHI()); 2007 } else { 2008 assert(!Inst->isTerminator() && 2009 "The only terminator that can produce a value is " 2010 "InvokeInst which is handled above."); 2011 Store->insertAfter(Inst); 2012 } 2013 } else { 2014 assert(isa<Argument>(Def)); 2015 Store->insertAfter(cast<Instruction>(Alloca)); 2016 } 2017 } 2018 2019 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues && 2020 "we must have the same allocas with lives"); 2021 if (!PromotableAllocas.empty()) { 2022 // Apply mem2reg to promote alloca to SSA 2023 PromoteMemToReg(PromotableAllocas, DT); 2024 } 2025 2026 #ifndef NDEBUG 2027 for (auto &I : F.getEntryBlock()) 2028 if (isa<AllocaInst>(I)) 2029 InitialAllocaNum--; 2030 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas"); 2031 #endif 2032 } 2033 2034 /// Implement a unique function which doesn't require we sort the input 2035 /// vector. Doing so has the effect of changing the output of a couple of 2036 /// tests in ways which make them less useful in testing fused safepoints. 2037 template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) { 2038 SmallSet<T, 8> Seen; 2039 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; }); 2040 } 2041 2042 /// Insert holders so that each Value is obviously live through the entire 2043 /// lifetime of the call. 2044 static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values, 2045 SmallVectorImpl<CallInst *> &Holders) { 2046 if (Values.empty()) 2047 // No values to hold live, might as well not insert the empty holder 2048 return; 2049 2050 Module *M = Call->getModule(); 2051 // Use a dummy vararg function to actually hold the values live 2052 FunctionCallee Func = M->getOrInsertFunction( 2053 "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true)); 2054 if (isa<CallInst>(Call)) { 2055 // For call safepoints insert dummy calls right after safepoint 2056 Holders.push_back( 2057 CallInst::Create(Func, Values, "", &*++Call->getIterator())); 2058 return; 2059 } 2060 // For invoke safepooints insert dummy calls both in normal and 2061 // exceptional destination blocks 2062 auto *II = cast<InvokeInst>(Call); 2063 Holders.push_back(CallInst::Create( 2064 Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt())); 2065 Holders.push_back(CallInst::Create( 2066 Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt())); 2067 } 2068 2069 static void findLiveReferences( 2070 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 2071 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 2072 GCPtrLivenessData OriginalLivenessData; 2073 computeLiveInValues(DT, F, OriginalLivenessData); 2074 for (size_t i = 0; i < records.size(); i++) { 2075 struct PartiallyConstructedSafepointRecord &info = records[i]; 2076 analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info); 2077 } 2078 } 2079 2080 // Helper function for the "rematerializeLiveValues". It walks use chain 2081 // starting from the "CurrentValue" until it reaches the root of the chain, i.e. 2082 // the base or a value it cannot process. Only "simple" values are processed 2083 // (currently it is GEP's and casts). The returned root is examined by the 2084 // callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array 2085 // with all visited values. 2086 static Value* findRematerializableChainToBasePointer( 2087 SmallVectorImpl<Instruction*> &ChainToBase, 2088 Value *CurrentValue) { 2089 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) { 2090 ChainToBase.push_back(GEP); 2091 return findRematerializableChainToBasePointer(ChainToBase, 2092 GEP->getPointerOperand()); 2093 } 2094 2095 if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) { 2096 if (!CI->isNoopCast(CI->getModule()->getDataLayout())) 2097 return CI; 2098 2099 ChainToBase.push_back(CI); 2100 return findRematerializableChainToBasePointer(ChainToBase, 2101 CI->getOperand(0)); 2102 } 2103 2104 // We have reached the root of the chain, which is either equal to the base or 2105 // is the first unsupported value along the use chain. 2106 return CurrentValue; 2107 } 2108 2109 // Helper function for the "rematerializeLiveValues". Compute cost of the use 2110 // chain we are going to rematerialize. 2111 static InstructionCost 2112 chainToBasePointerCost(SmallVectorImpl<Instruction *> &Chain, 2113 TargetTransformInfo &TTI) { 2114 InstructionCost Cost = 0; 2115 2116 for (Instruction *Instr : Chain) { 2117 if (CastInst *CI = dyn_cast<CastInst>(Instr)) { 2118 assert(CI->isNoopCast(CI->getModule()->getDataLayout()) && 2119 "non noop cast is found during rematerialization"); 2120 2121 Type *SrcTy = CI->getOperand(0)->getType(); 2122 Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy, 2123 TTI::getCastContextHint(CI), 2124 TargetTransformInfo::TCK_SizeAndLatency, CI); 2125 2126 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) { 2127 // Cost of the address calculation 2128 Type *ValTy = GEP->getSourceElementType(); 2129 Cost += TTI.getAddressComputationCost(ValTy); 2130 2131 // And cost of the GEP itself 2132 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not 2133 // allowed for the external usage) 2134 if (!GEP->hasAllConstantIndices()) 2135 Cost += 2; 2136 2137 } else { 2138 llvm_unreachable("unsupported instruction type during rematerialization"); 2139 } 2140 } 2141 2142 return Cost; 2143 } 2144 2145 static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) { 2146 unsigned PhiNum = OrigRootPhi.getNumIncomingValues(); 2147 if (PhiNum != AlternateRootPhi.getNumIncomingValues() || 2148 OrigRootPhi.getParent() != AlternateRootPhi.getParent()) 2149 return false; 2150 // Map of incoming values and their corresponding basic blocks of 2151 // OrigRootPhi. 2152 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues; 2153 for (unsigned i = 0; i < PhiNum; i++) 2154 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] = 2155 OrigRootPhi.getIncomingBlock(i); 2156 2157 // Both current and base PHIs should have same incoming values and 2158 // the same basic blocks corresponding to the incoming values. 2159 for (unsigned i = 0; i < PhiNum; i++) { 2160 auto CIVI = 2161 CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i)); 2162 if (CIVI == CurrentIncomingValues.end()) 2163 return false; 2164 BasicBlock *CurrentIncomingBB = CIVI->second; 2165 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i)) 2166 return false; 2167 } 2168 return true; 2169 } 2170 2171 // From the statepoint live set pick values that are cheaper to recompute then 2172 // to relocate. Remove this values from the live set, rematerialize them after 2173 // statepoint and record them in "Info" structure. Note that similar to 2174 // relocated values we don't do any user adjustments here. 2175 static void rematerializeLiveValues(CallBase *Call, 2176 PartiallyConstructedSafepointRecord &Info, 2177 TargetTransformInfo &TTI) { 2178 const unsigned int ChainLengthThreshold = 10; 2179 2180 // Record values we are going to delete from this statepoint live set. 2181 // We can not di this in following loop due to iterator invalidation. 2182 SmallVector<Value *, 32> LiveValuesToBeDeleted; 2183 2184 for (Value *LiveValue: Info.LiveSet) { 2185 // For each live pointer find its defining chain 2186 SmallVector<Instruction *, 3> ChainToBase; 2187 assert(Info.PointerToBase.count(LiveValue)); 2188 Value *RootOfChain = 2189 findRematerializableChainToBasePointer(ChainToBase, 2190 LiveValue); 2191 2192 // Nothing to do, or chain is too long 2193 if ( ChainToBase.size() == 0 || 2194 ChainToBase.size() > ChainLengthThreshold) 2195 continue; 2196 2197 // Handle the scenario where the RootOfChain is not equal to the 2198 // Base Value, but they are essentially the same phi values. 2199 if (RootOfChain != Info.PointerToBase[LiveValue]) { 2200 PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain); 2201 PHINode *AlternateRootPhi = dyn_cast<PHINode>(Info.PointerToBase[LiveValue]); 2202 if (!OrigRootPhi || !AlternateRootPhi) 2203 continue; 2204 // PHI nodes that have the same incoming values, and belonging to the same 2205 // basic blocks are essentially the same SSA value. When the original phi 2206 // has incoming values with different base pointers, the original phi is 2207 // marked as conflict, and an additional `AlternateRootPhi` with the same 2208 // incoming values get generated by the findBasePointer function. We need 2209 // to identify the newly generated AlternateRootPhi (.base version of phi) 2210 // and RootOfChain (the original phi node itself) are the same, so that we 2211 // can rematerialize the gep and casts. This is a workaround for the 2212 // deficiency in the findBasePointer algorithm. 2213 if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi)) 2214 continue; 2215 // Now that the phi nodes are proved to be the same, assert that 2216 // findBasePointer's newly generated AlternateRootPhi is present in the 2217 // liveset of the call. 2218 assert(Info.LiveSet.count(AlternateRootPhi)); 2219 } 2220 // Compute cost of this chain 2221 InstructionCost Cost = chainToBasePointerCost(ChainToBase, TTI); 2222 // TODO: We can also account for cases when we will be able to remove some 2223 // of the rematerialized values by later optimization passes. I.e if 2224 // we rematerialized several intersecting chains. Or if original values 2225 // don't have any uses besides this statepoint. 2226 2227 // For invokes we need to rematerialize each chain twice - for normal and 2228 // for unwind basic blocks. Model this by multiplying cost by two. 2229 if (isa<InvokeInst>(Call)) { 2230 Cost *= 2; 2231 } 2232 // If it's too expensive - skip it 2233 if (Cost >= RematerializationThreshold) 2234 continue; 2235 2236 // Remove value from the live set 2237 LiveValuesToBeDeleted.push_back(LiveValue); 2238 2239 // Clone instructions and record them inside "Info" structure 2240 2241 // Walk backwards to visit top-most instructions first 2242 std::reverse(ChainToBase.begin(), ChainToBase.end()); 2243 2244 // Utility function which clones all instructions from "ChainToBase" 2245 // and inserts them before "InsertBefore". Returns rematerialized value 2246 // which should be used after statepoint. 2247 auto rematerializeChain = [&ChainToBase]( 2248 Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase) { 2249 Instruction *LastClonedValue = nullptr; 2250 Instruction *LastValue = nullptr; 2251 for (Instruction *Instr: ChainToBase) { 2252 // Only GEP's and casts are supported as we need to be careful to not 2253 // introduce any new uses of pointers not in the liveset. 2254 // Note that it's fine to introduce new uses of pointers which were 2255 // otherwise not used after this statepoint. 2256 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr)); 2257 2258 Instruction *ClonedValue = Instr->clone(); 2259 ClonedValue->insertBefore(InsertBefore); 2260 ClonedValue->setName(Instr->getName() + ".remat"); 2261 2262 // If it is not first instruction in the chain then it uses previously 2263 // cloned value. We should update it to use cloned value. 2264 if (LastClonedValue) { 2265 assert(LastValue); 2266 ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue); 2267 #ifndef NDEBUG 2268 for (auto OpValue : ClonedValue->operand_values()) { 2269 // Assert that cloned instruction does not use any instructions from 2270 // this chain other than LastClonedValue 2271 assert(!is_contained(ChainToBase, OpValue) && 2272 "incorrect use in rematerialization chain"); 2273 // Assert that the cloned instruction does not use the RootOfChain 2274 // or the AlternateLiveBase. 2275 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase); 2276 } 2277 #endif 2278 } else { 2279 // For the first instruction, replace the use of unrelocated base i.e. 2280 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the 2281 // live set. They have been proved to be the same PHI nodes. Note 2282 // that the *only* use of the RootOfChain in the ChainToBase list is 2283 // the first Value in the list. 2284 if (RootOfChain != AlternateLiveBase) 2285 ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase); 2286 } 2287 2288 LastClonedValue = ClonedValue; 2289 LastValue = Instr; 2290 } 2291 assert(LastClonedValue); 2292 return LastClonedValue; 2293 }; 2294 2295 // Different cases for calls and invokes. For invokes we need to clone 2296 // instructions both on normal and unwind path. 2297 if (isa<CallInst>(Call)) { 2298 Instruction *InsertBefore = Call->getNextNode(); 2299 assert(InsertBefore); 2300 Instruction *RematerializedValue = rematerializeChain( 2301 InsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2302 Info.RematerializedValues[RematerializedValue] = LiveValue; 2303 } else { 2304 auto *Invoke = cast<InvokeInst>(Call); 2305 2306 Instruction *NormalInsertBefore = 2307 &*Invoke->getNormalDest()->getFirstInsertionPt(); 2308 Instruction *UnwindInsertBefore = 2309 &*Invoke->getUnwindDest()->getFirstInsertionPt(); 2310 2311 Instruction *NormalRematerializedValue = rematerializeChain( 2312 NormalInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2313 Instruction *UnwindRematerializedValue = rematerializeChain( 2314 UnwindInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2315 2316 Info.RematerializedValues[NormalRematerializedValue] = LiveValue; 2317 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue; 2318 } 2319 } 2320 2321 // Remove rematerializaed values from the live set 2322 for (auto LiveValue: LiveValuesToBeDeleted) { 2323 Info.LiveSet.remove(LiveValue); 2324 } 2325 } 2326 2327 static bool insertParsePoints(Function &F, DominatorTree &DT, 2328 TargetTransformInfo &TTI, 2329 SmallVectorImpl<CallBase *> &ToUpdate) { 2330 #ifndef NDEBUG 2331 // sanity check the input 2332 std::set<CallBase *> Uniqued; 2333 Uniqued.insert(ToUpdate.begin(), ToUpdate.end()); 2334 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!"); 2335 2336 for (CallBase *Call : ToUpdate) 2337 assert(Call->getFunction() == &F); 2338 #endif 2339 2340 // When inserting gc.relocates for invokes, we need to be able to insert at 2341 // the top of the successor blocks. See the comment on 2342 // normalForInvokeSafepoint on exactly what is needed. Note that this step 2343 // may restructure the CFG. 2344 for (CallBase *Call : ToUpdate) { 2345 auto *II = dyn_cast<InvokeInst>(Call); 2346 if (!II) 2347 continue; 2348 normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT); 2349 normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT); 2350 } 2351 2352 // A list of dummy calls added to the IR to keep various values obviously 2353 // live in the IR. We'll remove all of these when done. 2354 SmallVector<CallInst *, 64> Holders; 2355 2356 // Insert a dummy call with all of the deopt operands we'll need for the 2357 // actual safepoint insertion as arguments. This ensures reference operands 2358 // in the deopt argument list are considered live through the safepoint (and 2359 // thus makes sure they get relocated.) 2360 for (CallBase *Call : ToUpdate) { 2361 SmallVector<Value *, 64> DeoptValues; 2362 2363 for (Value *Arg : GetDeoptBundleOperands(Call)) { 2364 assert(!isUnhandledGCPointerType(Arg->getType()) && 2365 "support for FCA unimplemented"); 2366 if (isHandledGCPointerType(Arg->getType())) 2367 DeoptValues.push_back(Arg); 2368 } 2369 2370 insertUseHolderAfter(Call, DeoptValues, Holders); 2371 } 2372 2373 SmallVector<PartiallyConstructedSafepointRecord, 64> Records(ToUpdate.size()); 2374 2375 // A) Identify all gc pointers which are statically live at the given call 2376 // site. 2377 findLiveReferences(F, DT, ToUpdate, Records); 2378 2379 // B) Find the base pointers for each live pointer 2380 /* scope for caching */ { 2381 // Cache the 'defining value' relation used in the computation and 2382 // insertion of base phis and selects. This ensures that we don't insert 2383 // large numbers of duplicate base_phis. 2384 DefiningValueMapTy DVCache; 2385 2386 for (size_t i = 0; i < Records.size(); i++) { 2387 PartiallyConstructedSafepointRecord &info = Records[i]; 2388 findBasePointers(DT, DVCache, ToUpdate[i], info); 2389 } 2390 } // end of cache scope 2391 2392 // The base phi insertion logic (for any safepoint) may have inserted new 2393 // instructions which are now live at some safepoint. The simplest such 2394 // example is: 2395 // loop: 2396 // phi a <-- will be a new base_phi here 2397 // safepoint 1 <-- that needs to be live here 2398 // gep a + 1 2399 // safepoint 2 2400 // br loop 2401 // We insert some dummy calls after each safepoint to definitely hold live 2402 // the base pointers which were identified for that safepoint. We'll then 2403 // ask liveness for _every_ base inserted to see what is now live. Then we 2404 // remove the dummy calls. 2405 Holders.reserve(Holders.size() + Records.size()); 2406 for (size_t i = 0; i < Records.size(); i++) { 2407 PartiallyConstructedSafepointRecord &Info = Records[i]; 2408 2409 SmallVector<Value *, 128> Bases; 2410 for (auto Pair : Info.PointerToBase) 2411 Bases.push_back(Pair.second); 2412 2413 insertUseHolderAfter(ToUpdate[i], Bases, Holders); 2414 } 2415 2416 // By selecting base pointers, we've effectively inserted new uses. Thus, we 2417 // need to rerun liveness. We may *also* have inserted new defs, but that's 2418 // not the key issue. 2419 recomputeLiveInValues(F, DT, ToUpdate, Records); 2420 2421 if (PrintBasePointers) { 2422 for (auto &Info : Records) { 2423 errs() << "Base Pairs: (w/Relocation)\n"; 2424 for (auto Pair : Info.PointerToBase) { 2425 errs() << " derived "; 2426 Pair.first->printAsOperand(errs(), false); 2427 errs() << " base "; 2428 Pair.second->printAsOperand(errs(), false); 2429 errs() << "\n"; 2430 } 2431 } 2432 } 2433 2434 // It is possible that non-constant live variables have a constant base. For 2435 // example, a GEP with a variable offset from a global. In this case we can 2436 // remove it from the liveset. We already don't add constants to the liveset 2437 // because we assume they won't move at runtime and the GC doesn't need to be 2438 // informed about them. The same reasoning applies if the base is constant. 2439 // Note that the relocation placement code relies on this filtering for 2440 // correctness as it expects the base to be in the liveset, which isn't true 2441 // if the base is constant. 2442 for (auto &Info : Records) 2443 for (auto &BasePair : Info.PointerToBase) 2444 if (isa<Constant>(BasePair.second)) 2445 Info.LiveSet.remove(BasePair.first); 2446 2447 for (CallInst *CI : Holders) 2448 CI->eraseFromParent(); 2449 2450 Holders.clear(); 2451 2452 // In order to reduce live set of statepoint we might choose to rematerialize 2453 // some values instead of relocating them. This is purely an optimization and 2454 // does not influence correctness. 2455 for (size_t i = 0; i < Records.size(); i++) 2456 rematerializeLiveValues(ToUpdate[i], Records[i], TTI); 2457 2458 // We need this to safely RAUW and delete call or invoke return values that 2459 // may themselves be live over a statepoint. For details, please see usage in 2460 // makeStatepointExplicitImpl. 2461 std::vector<DeferredReplacement> Replacements; 2462 2463 // Now run through and replace the existing statepoints with new ones with 2464 // the live variables listed. We do not yet update uses of the values being 2465 // relocated. We have references to live variables that need to 2466 // survive to the last iteration of this loop. (By construction, the 2467 // previous statepoint can not be a live variable, thus we can and remove 2468 // the old statepoint calls as we go.) 2469 for (size_t i = 0; i < Records.size(); i++) 2470 makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements); 2471 2472 ToUpdate.clear(); // prevent accident use of invalid calls. 2473 2474 for (auto &PR : Replacements) 2475 PR.doReplacement(); 2476 2477 Replacements.clear(); 2478 2479 for (auto &Info : Records) { 2480 // These live sets may contain state Value pointers, since we replaced calls 2481 // with operand bundles with calls wrapped in gc.statepoint, and some of 2482 // those calls may have been def'ing live gc pointers. Clear these out to 2483 // avoid accidentally using them. 2484 // 2485 // TODO: We should create a separate data structure that does not contain 2486 // these live sets, and migrate to using that data structure from this point 2487 // onward. 2488 Info.LiveSet.clear(); 2489 Info.PointerToBase.clear(); 2490 } 2491 2492 // Do all the fixups of the original live variables to their relocated selves 2493 SmallVector<Value *, 128> Live; 2494 for (size_t i = 0; i < Records.size(); i++) { 2495 PartiallyConstructedSafepointRecord &Info = Records[i]; 2496 2497 // We can't simply save the live set from the original insertion. One of 2498 // the live values might be the result of a call which needs a safepoint. 2499 // That Value* no longer exists and we need to use the new gc_result. 2500 // Thankfully, the live set is embedded in the statepoint (and updated), so 2501 // we just grab that. 2502 llvm::append_range(Live, Info.StatepointToken->gc_args()); 2503 #ifndef NDEBUG 2504 // Do some basic sanity checks on our liveness results before performing 2505 // relocation. Relocation can and will turn mistakes in liveness results 2506 // into non-sensical code which is must harder to debug. 2507 // TODO: It would be nice to test consistency as well 2508 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) && 2509 "statepoint must be reachable or liveness is meaningless"); 2510 for (Value *V : Info.StatepointToken->gc_args()) { 2511 if (!isa<Instruction>(V)) 2512 // Non-instruction values trivial dominate all possible uses 2513 continue; 2514 auto *LiveInst = cast<Instruction>(V); 2515 assert(DT.isReachableFromEntry(LiveInst->getParent()) && 2516 "unreachable values should never be live"); 2517 assert(DT.dominates(LiveInst, Info.StatepointToken) && 2518 "basic SSA liveness expectation violated by liveness analysis"); 2519 } 2520 #endif 2521 } 2522 unique_unsorted(Live); 2523 2524 #ifndef NDEBUG 2525 // sanity check 2526 for (auto *Ptr : Live) 2527 assert(isHandledGCPointerType(Ptr->getType()) && 2528 "must be a gc pointer type"); 2529 #endif 2530 2531 relocationViaAlloca(F, DT, Live, Records); 2532 return !Records.empty(); 2533 } 2534 2535 // Handles both return values and arguments for Functions and calls. 2536 template <typename AttrHolder> 2537 static void RemoveNonValidAttrAtIndex(LLVMContext &Ctx, AttrHolder &AH, 2538 unsigned Index) { 2539 AttrBuilder R; 2540 if (AH.getDereferenceableBytes(Index)) 2541 R.addAttribute(Attribute::get(Ctx, Attribute::Dereferenceable, 2542 AH.getDereferenceableBytes(Index))); 2543 if (AH.getDereferenceableOrNullBytes(Index)) 2544 R.addAttribute(Attribute::get(Ctx, Attribute::DereferenceableOrNull, 2545 AH.getDereferenceableOrNullBytes(Index))); 2546 if (AH.getAttributes().hasAttribute(Index, Attribute::NoAlias)) 2547 R.addAttribute(Attribute::NoAlias); 2548 2549 if (!R.empty()) 2550 AH.setAttributes(AH.getAttributes().removeAttributes(Ctx, Index, R)); 2551 } 2552 2553 static void stripNonValidAttributesFromPrototype(Function &F) { 2554 LLVMContext &Ctx = F.getContext(); 2555 2556 for (Argument &A : F.args()) 2557 if (isa<PointerType>(A.getType())) 2558 RemoveNonValidAttrAtIndex(Ctx, F, 2559 A.getArgNo() + AttributeList::FirstArgIndex); 2560 2561 if (isa<PointerType>(F.getReturnType())) 2562 RemoveNonValidAttrAtIndex(Ctx, F, AttributeList::ReturnIndex); 2563 } 2564 2565 /// Certain metadata on instructions are invalid after running RS4GC. 2566 /// Optimizations that run after RS4GC can incorrectly use this metadata to 2567 /// optimize functions. We drop such metadata on the instruction. 2568 static void stripInvalidMetadataFromInstruction(Instruction &I) { 2569 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 2570 return; 2571 // These are the attributes that are still valid on loads and stores after 2572 // RS4GC. 2573 // The metadata implying dereferenceability and noalias are (conservatively) 2574 // dropped. This is because semantically, after RewriteStatepointsForGC runs, 2575 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can 2576 // touch the entire heap including noalias objects. Note: The reasoning is 2577 // same as stripping the dereferenceability and noalias attributes that are 2578 // analogous to the metadata counterparts. 2579 // We also drop the invariant.load metadata on the load because that metadata 2580 // implies the address operand to the load points to memory that is never 2581 // changed once it became dereferenceable. This is no longer true after RS4GC. 2582 // Similar reasoning applies to invariant.group metadata, which applies to 2583 // loads within a group. 2584 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa, 2585 LLVMContext::MD_range, 2586 LLVMContext::MD_alias_scope, 2587 LLVMContext::MD_nontemporal, 2588 LLVMContext::MD_nonnull, 2589 LLVMContext::MD_align, 2590 LLVMContext::MD_type}; 2591 2592 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC. 2593 I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC); 2594 } 2595 2596 static void stripNonValidDataFromBody(Function &F) { 2597 if (F.empty()) 2598 return; 2599 2600 LLVMContext &Ctx = F.getContext(); 2601 MDBuilder Builder(Ctx); 2602 2603 // Set of invariantstart instructions that we need to remove. 2604 // Use this to avoid invalidating the instruction iterator. 2605 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions; 2606 2607 for (Instruction &I : instructions(F)) { 2608 // invariant.start on memory location implies that the referenced memory 2609 // location is constant and unchanging. This is no longer true after 2610 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint 2611 // which frees the entire heap and the presence of invariant.start allows 2612 // the optimizer to sink the load of a memory location past a statepoint, 2613 // which is incorrect. 2614 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 2615 if (II->getIntrinsicID() == Intrinsic::invariant_start) { 2616 InvariantStartInstructions.push_back(II); 2617 continue; 2618 } 2619 2620 if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) { 2621 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag); 2622 I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA); 2623 } 2624 2625 stripInvalidMetadataFromInstruction(I); 2626 2627 if (auto *Call = dyn_cast<CallBase>(&I)) { 2628 for (int i = 0, e = Call->arg_size(); i != e; i++) 2629 if (isa<PointerType>(Call->getArgOperand(i)->getType())) 2630 RemoveNonValidAttrAtIndex(Ctx, *Call, 2631 i + AttributeList::FirstArgIndex); 2632 if (isa<PointerType>(Call->getType())) 2633 RemoveNonValidAttrAtIndex(Ctx, *Call, AttributeList::ReturnIndex); 2634 } 2635 } 2636 2637 // Delete the invariant.start instructions and RAUW undef. 2638 for (auto *II : InvariantStartInstructions) { 2639 II->replaceAllUsesWith(UndefValue::get(II->getType())); 2640 II->eraseFromParent(); 2641 } 2642 } 2643 2644 /// Returns true if this function should be rewritten by this pass. The main 2645 /// point of this function is as an extension point for custom logic. 2646 static bool shouldRewriteStatepointsIn(Function &F) { 2647 // TODO: This should check the GCStrategy 2648 if (F.hasGC()) { 2649 const auto &FunctionGCName = F.getGC(); 2650 const StringRef StatepointExampleName("statepoint-example"); 2651 const StringRef CoreCLRName("coreclr"); 2652 return (StatepointExampleName == FunctionGCName) || 2653 (CoreCLRName == FunctionGCName); 2654 } else 2655 return false; 2656 } 2657 2658 static void stripNonValidData(Module &M) { 2659 #ifndef NDEBUG 2660 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!"); 2661 #endif 2662 2663 for (Function &F : M) 2664 stripNonValidAttributesFromPrototype(F); 2665 2666 for (Function &F : M) 2667 stripNonValidDataFromBody(F); 2668 } 2669 2670 bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT, 2671 TargetTransformInfo &TTI, 2672 const TargetLibraryInfo &TLI) { 2673 assert(!F.isDeclaration() && !F.empty() && 2674 "need function body to rewrite statepoints in"); 2675 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision"); 2676 2677 auto NeedsRewrite = [&TLI](Instruction &I) { 2678 if (const auto *Call = dyn_cast<CallBase>(&I)) { 2679 if (isa<GCStatepointInst>(Call)) 2680 return false; 2681 if (callsGCLeafFunction(Call, TLI)) 2682 return false; 2683 2684 // Normally it's up to the frontend to make sure that non-leaf calls also 2685 // have proper deopt state if it is required. We make an exception for 2686 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics 2687 // these are non-leaf by default. They might be generated by the optimizer 2688 // which doesn't know how to produce a proper deopt state. So if we see a 2689 // non-leaf memcpy/memmove without deopt state just treat it as a leaf 2690 // copy and don't produce a statepoint. 2691 if (!AllowStatepointWithNoDeoptInfo && 2692 !Call->getOperandBundle(LLVMContext::OB_deopt)) { 2693 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) && 2694 "Don't expect any other calls here!"); 2695 return false; 2696 } 2697 return true; 2698 } 2699 return false; 2700 }; 2701 2702 // Delete any unreachable statepoints so that we don't have unrewritten 2703 // statepoints surviving this pass. This makes testing easier and the 2704 // resulting IR less confusing to human readers. 2705 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); 2706 bool MadeChange = removeUnreachableBlocks(F, &DTU); 2707 // Flush the Dominator Tree. 2708 DTU.getDomTree(); 2709 2710 // Gather all the statepoints which need rewritten. Be careful to only 2711 // consider those in reachable code since we need to ask dominance queries 2712 // when rewriting. We'll delete the unreachable ones in a moment. 2713 SmallVector<CallBase *, 64> ParsePointNeeded; 2714 for (Instruction &I : instructions(F)) { 2715 // TODO: only the ones with the flag set! 2716 if (NeedsRewrite(I)) { 2717 // NOTE removeUnreachableBlocks() is stronger than 2718 // DominatorTree::isReachableFromEntry(). In other words 2719 // removeUnreachableBlocks can remove some blocks for which 2720 // isReachableFromEntry() returns true. 2721 assert(DT.isReachableFromEntry(I.getParent()) && 2722 "no unreachable blocks expected"); 2723 ParsePointNeeded.push_back(cast<CallBase>(&I)); 2724 } 2725 } 2726 2727 // Return early if no work to do. 2728 if (ParsePointNeeded.empty()) 2729 return MadeChange; 2730 2731 // As a prepass, go ahead and aggressively destroy single entry phi nodes. 2732 // These are created by LCSSA. They have the effect of increasing the size 2733 // of liveness sets for no good reason. It may be harder to do this post 2734 // insertion since relocations and base phis can confuse things. 2735 for (BasicBlock &BB : F) 2736 if (BB.getUniquePredecessor()) 2737 MadeChange |= FoldSingleEntryPHINodes(&BB); 2738 2739 // Before we start introducing relocations, we want to tweak the IR a bit to 2740 // avoid unfortunate code generation effects. The main example is that we 2741 // want to try to make sure the comparison feeding a branch is after any 2742 // safepoints. Otherwise, we end up with a comparison of pre-relocation 2743 // values feeding a branch after relocation. This is semantically correct, 2744 // but results in extra register pressure since both the pre-relocation and 2745 // post-relocation copies must be available in registers. For code without 2746 // relocations this is handled elsewhere, but teaching the scheduler to 2747 // reverse the transform we're about to do would be slightly complex. 2748 // Note: This may extend the live range of the inputs to the icmp and thus 2749 // increase the liveset of any statepoint we move over. This is profitable 2750 // as long as all statepoints are in rare blocks. If we had in-register 2751 // lowering for live values this would be a much safer transform. 2752 auto getConditionInst = [](Instruction *TI) -> Instruction * { 2753 if (auto *BI = dyn_cast<BranchInst>(TI)) 2754 if (BI->isConditional()) 2755 return dyn_cast<Instruction>(BI->getCondition()); 2756 // TODO: Extend this to handle switches 2757 return nullptr; 2758 }; 2759 for (BasicBlock &BB : F) { 2760 Instruction *TI = BB.getTerminator(); 2761 if (auto *Cond = getConditionInst(TI)) 2762 // TODO: Handle more than just ICmps here. We should be able to move 2763 // most instructions without side effects or memory access. 2764 if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) { 2765 MadeChange = true; 2766 Cond->moveBefore(TI); 2767 } 2768 } 2769 2770 // Nasty workaround - The base computation code in the main algorithm doesn't 2771 // consider the fact that a GEP can be used to convert a scalar to a vector. 2772 // The right fix for this is to integrate GEPs into the base rewriting 2773 // algorithm properly, this is just a short term workaround to prevent 2774 // crashes by canonicalizing such GEPs into fully vector GEPs. 2775 for (Instruction &I : instructions(F)) { 2776 if (!isa<GetElementPtrInst>(I)) 2777 continue; 2778 2779 unsigned VF = 0; 2780 for (unsigned i = 0; i < I.getNumOperands(); i++) 2781 if (auto *OpndVTy = dyn_cast<VectorType>(I.getOperand(i)->getType())) { 2782 assert(VF == 0 || 2783 VF == cast<FixedVectorType>(OpndVTy)->getNumElements()); 2784 VF = cast<FixedVectorType>(OpndVTy)->getNumElements(); 2785 } 2786 2787 // It's the vector to scalar traversal through the pointer operand which 2788 // confuses base pointer rewriting, so limit ourselves to that case. 2789 if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) { 2790 IRBuilder<> B(&I); 2791 auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0)); 2792 I.setOperand(0, Splat); 2793 MadeChange = true; 2794 } 2795 } 2796 2797 MadeChange |= insertParsePoints(F, DT, TTI, ParsePointNeeded); 2798 return MadeChange; 2799 } 2800 2801 // liveness computation via standard dataflow 2802 // ------------------------------------------------------------------- 2803 2804 // TODO: Consider using bitvectors for liveness, the set of potentially 2805 // interesting values should be small and easy to pre-compute. 2806 2807 /// Compute the live-in set for the location rbegin starting from 2808 /// the live-out set of the basic block 2809 static void computeLiveInValues(BasicBlock::reverse_iterator Begin, 2810 BasicBlock::reverse_iterator End, 2811 SetVector<Value *> &LiveTmp) { 2812 for (auto &I : make_range(Begin, End)) { 2813 // KILL/Def - Remove this definition from LiveIn 2814 LiveTmp.remove(&I); 2815 2816 // Don't consider *uses* in PHI nodes, we handle their contribution to 2817 // predecessor blocks when we seed the LiveOut sets 2818 if (isa<PHINode>(I)) 2819 continue; 2820 2821 // USE - Add to the LiveIn set for this instruction 2822 for (Value *V : I.operands()) { 2823 assert(!isUnhandledGCPointerType(V->getType()) && 2824 "support for FCA unimplemented"); 2825 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) { 2826 // The choice to exclude all things constant here is slightly subtle. 2827 // There are two independent reasons: 2828 // - We assume that things which are constant (from LLVM's definition) 2829 // do not move at runtime. For example, the address of a global 2830 // variable is fixed, even though it's contents may not be. 2831 // - Second, we can't disallow arbitrary inttoptr constants even 2832 // if the language frontend does. Optimization passes are free to 2833 // locally exploit facts without respect to global reachability. This 2834 // can create sections of code which are dynamically unreachable and 2835 // contain just about anything. (see constants.ll in tests) 2836 LiveTmp.insert(V); 2837 } 2838 } 2839 } 2840 } 2841 2842 static void computeLiveOutSeed(BasicBlock *BB, SetVector<Value *> &LiveTmp) { 2843 for (BasicBlock *Succ : successors(BB)) { 2844 for (auto &I : *Succ) { 2845 PHINode *PN = dyn_cast<PHINode>(&I); 2846 if (!PN) 2847 break; 2848 2849 Value *V = PN->getIncomingValueForBlock(BB); 2850 assert(!isUnhandledGCPointerType(V->getType()) && 2851 "support for FCA unimplemented"); 2852 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) 2853 LiveTmp.insert(V); 2854 } 2855 } 2856 } 2857 2858 static SetVector<Value *> computeKillSet(BasicBlock *BB) { 2859 SetVector<Value *> KillSet; 2860 for (Instruction &I : *BB) 2861 if (isHandledGCPointerType(I.getType())) 2862 KillSet.insert(&I); 2863 return KillSet; 2864 } 2865 2866 #ifndef NDEBUG 2867 /// Check that the items in 'Live' dominate 'TI'. This is used as a basic 2868 /// sanity check for the liveness computation. 2869 static void checkBasicSSA(DominatorTree &DT, SetVector<Value *> &Live, 2870 Instruction *TI, bool TermOkay = false) { 2871 for (Value *V : Live) { 2872 if (auto *I = dyn_cast<Instruction>(V)) { 2873 // The terminator can be a member of the LiveOut set. LLVM's definition 2874 // of instruction dominance states that V does not dominate itself. As 2875 // such, we need to special case this to allow it. 2876 if (TermOkay && TI == I) 2877 continue; 2878 assert(DT.dominates(I, TI) && 2879 "basic SSA liveness expectation violated by liveness analysis"); 2880 } 2881 } 2882 } 2883 2884 /// Check that all the liveness sets used during the computation of liveness 2885 /// obey basic SSA properties. This is useful for finding cases where we miss 2886 /// a def. 2887 static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data, 2888 BasicBlock &BB) { 2889 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator()); 2890 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true); 2891 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator()); 2892 } 2893 #endif 2894 2895 static void computeLiveInValues(DominatorTree &DT, Function &F, 2896 GCPtrLivenessData &Data) { 2897 SmallSetVector<BasicBlock *, 32> Worklist; 2898 2899 // Seed the liveness for each individual block 2900 for (BasicBlock &BB : F) { 2901 Data.KillSet[&BB] = computeKillSet(&BB); 2902 Data.LiveSet[&BB].clear(); 2903 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]); 2904 2905 #ifndef NDEBUG 2906 for (Value *Kill : Data.KillSet[&BB]) 2907 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); 2908 #endif 2909 2910 Data.LiveOut[&BB] = SetVector<Value *>(); 2911 computeLiveOutSeed(&BB, Data.LiveOut[&BB]); 2912 Data.LiveIn[&BB] = Data.LiveSet[&BB]; 2913 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]); 2914 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]); 2915 if (!Data.LiveIn[&BB].empty()) 2916 Worklist.insert(pred_begin(&BB), pred_end(&BB)); 2917 } 2918 2919 // Propagate that liveness until stable 2920 while (!Worklist.empty()) { 2921 BasicBlock *BB = Worklist.pop_back_val(); 2922 2923 // Compute our new liveout set, then exit early if it hasn't changed despite 2924 // the contribution of our successor. 2925 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 2926 const auto OldLiveOutSize = LiveOut.size(); 2927 for (BasicBlock *Succ : successors(BB)) { 2928 assert(Data.LiveIn.count(Succ)); 2929 LiveOut.set_union(Data.LiveIn[Succ]); 2930 } 2931 // assert OutLiveOut is a subset of LiveOut 2932 if (OldLiveOutSize == LiveOut.size()) { 2933 // If the sets are the same size, then we didn't actually add anything 2934 // when unioning our successors LiveIn. Thus, the LiveIn of this block 2935 // hasn't changed. 2936 continue; 2937 } 2938 Data.LiveOut[BB] = LiveOut; 2939 2940 // Apply the effects of this basic block 2941 SetVector<Value *> LiveTmp = LiveOut; 2942 LiveTmp.set_union(Data.LiveSet[BB]); 2943 LiveTmp.set_subtract(Data.KillSet[BB]); 2944 2945 assert(Data.LiveIn.count(BB)); 2946 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB]; 2947 // assert: OldLiveIn is a subset of LiveTmp 2948 if (OldLiveIn.size() != LiveTmp.size()) { 2949 Data.LiveIn[BB] = LiveTmp; 2950 Worklist.insert(pred_begin(BB), pred_end(BB)); 2951 } 2952 } // while (!Worklist.empty()) 2953 2954 #ifndef NDEBUG 2955 // Sanity check our output against SSA properties. This helps catch any 2956 // missing kills during the above iteration. 2957 for (BasicBlock &BB : F) 2958 checkBasicSSA(DT, Data, BB); 2959 #endif 2960 } 2961 2962 static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data, 2963 StatepointLiveSetTy &Out) { 2964 BasicBlock *BB = Inst->getParent(); 2965 2966 // Note: The copy is intentional and required 2967 assert(Data.LiveOut.count(BB)); 2968 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 2969 2970 // We want to handle the statepoint itself oddly. It's 2971 // call result is not live (normal), nor are it's arguments 2972 // (unless they're used again later). This adjustment is 2973 // specifically what we need to relocate 2974 computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(), 2975 LiveOut); 2976 LiveOut.remove(Inst); 2977 Out.insert(LiveOut.begin(), LiveOut.end()); 2978 } 2979 2980 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 2981 CallBase *Call, 2982 PartiallyConstructedSafepointRecord &Info) { 2983 StatepointLiveSetTy Updated; 2984 findLiveSetAtInst(Call, RevisedLivenessData, Updated); 2985 2986 // We may have base pointers which are now live that weren't before. We need 2987 // to update the PointerToBase structure to reflect this. 2988 for (auto V : Updated) 2989 if (Info.PointerToBase.insert({V, V}).second) { 2990 assert(isKnownBaseResult(V) && 2991 "Can't find base for unexpected live value!"); 2992 continue; 2993 } 2994 2995 #ifndef NDEBUG 2996 for (auto V : Updated) 2997 assert(Info.PointerToBase.count(V) && 2998 "Must be able to find base for live value!"); 2999 #endif 3000 3001 // Remove any stale base mappings - this can happen since our liveness is 3002 // more precise then the one inherent in the base pointer analysis. 3003 DenseSet<Value *> ToErase; 3004 for (auto KVPair : Info.PointerToBase) 3005 if (!Updated.count(KVPair.first)) 3006 ToErase.insert(KVPair.first); 3007 3008 for (auto *V : ToErase) 3009 Info.PointerToBase.erase(V); 3010 3011 #ifndef NDEBUG 3012 for (auto KVPair : Info.PointerToBase) 3013 assert(Updated.count(KVPair.first) && "record for non-live value"); 3014 #endif 3015 3016 Info.LiveSet = Updated; 3017 } 3018