1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass transforms simple global variables that never have their address 10 // taken. If obviously true, it marks read/write globals as constant, deletes 11 // variables only stored to, etc. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/GlobalOpt.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/ADT/Twine.h" 23 #include "llvm/ADT/iterator_range.h" 24 #include "llvm/Analysis/BlockFrequencyInfo.h" 25 #include "llvm/Analysis/ConstantFolding.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/TargetLibraryInfo.h" 28 #include "llvm/Analysis/TargetTransformInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/BinaryFormat/Dwarf.h" 31 #include "llvm/IR/Attributes.h" 32 #include "llvm/IR/BasicBlock.h" 33 #include "llvm/IR/CallingConv.h" 34 #include "llvm/IR/Constant.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DebugInfoMetadata.h" 38 #include "llvm/IR/DerivedTypes.h" 39 #include "llvm/IR/Dominators.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/GlobalAlias.h" 42 #include "llvm/IR/GlobalValue.h" 43 #include "llvm/IR/GlobalVariable.h" 44 #include "llvm/IR/IRBuilder.h" 45 #include "llvm/IR/InstrTypes.h" 46 #include "llvm/IR/Instruction.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/IntrinsicInst.h" 49 #include "llvm/IR/Module.h" 50 #include "llvm/IR/Operator.h" 51 #include "llvm/IR/Type.h" 52 #include "llvm/IR/Use.h" 53 #include "llvm/IR/User.h" 54 #include "llvm/IR/Value.h" 55 #include "llvm/IR/ValueHandle.h" 56 #include "llvm/InitializePasses.h" 57 #include "llvm/Pass.h" 58 #include "llvm/Support/AtomicOrdering.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Debug.h" 62 #include "llvm/Support/ErrorHandling.h" 63 #include "llvm/Support/raw_ostream.h" 64 #include "llvm/Transforms/IPO.h" 65 #include "llvm/Transforms/Utils/CtorUtils.h" 66 #include "llvm/Transforms/Utils/Evaluator.h" 67 #include "llvm/Transforms/Utils/GlobalStatus.h" 68 #include "llvm/Transforms/Utils/Local.h" 69 #include <cassert> 70 #include <cstdint> 71 #include <optional> 72 #include <utility> 73 #include <vector> 74 75 using namespace llvm; 76 77 #define DEBUG_TYPE "globalopt" 78 79 STATISTIC(NumMarked , "Number of globals marked constant"); 80 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr"); 81 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); 82 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); 83 STATISTIC(NumDeleted , "Number of globals deleted"); 84 STATISTIC(NumGlobUses , "Number of global uses devirtualized"); 85 STATISTIC(NumLocalized , "Number of globals localized"); 86 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); 87 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); 88 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); 89 STATISTIC(NumNestRemoved , "Number of nest attributes removed"); 90 STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); 91 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); 92 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); 93 STATISTIC(NumInternalFunc, "Number of internal functions"); 94 STATISTIC(NumColdCC, "Number of functions marked coldcc"); 95 96 static cl::opt<bool> 97 EnableColdCCStressTest("enable-coldcc-stress-test", 98 cl::desc("Enable stress test of coldcc by adding " 99 "calling conv to all internal functions."), 100 cl::init(false), cl::Hidden); 101 102 static cl::opt<int> ColdCCRelFreq( 103 "coldcc-rel-freq", cl::Hidden, cl::init(2), 104 cl::desc( 105 "Maximum block frequency, expressed as a percentage of caller's " 106 "entry frequency, for a call site to be considered cold for enabling" 107 "coldcc")); 108 109 /// Is this global variable possibly used by a leak checker as a root? If so, 110 /// we might not really want to eliminate the stores to it. 111 static bool isLeakCheckerRoot(GlobalVariable *GV) { 112 // A global variable is a root if it is a pointer, or could plausibly contain 113 // a pointer. There are two challenges; one is that we could have a struct 114 // the has an inner member which is a pointer. We recurse through the type to 115 // detect these (up to a point). The other is that we may actually be a union 116 // of a pointer and another type, and so our LLVM type is an integer which 117 // gets converted into a pointer, or our type is an [i8 x #] with a pointer 118 // potentially contained here. 119 120 if (GV->hasPrivateLinkage()) 121 return false; 122 123 SmallVector<Type *, 4> Types; 124 Types.push_back(GV->getValueType()); 125 126 unsigned Limit = 20; 127 do { 128 Type *Ty = Types.pop_back_val(); 129 switch (Ty->getTypeID()) { 130 default: break; 131 case Type::PointerTyID: 132 return true; 133 case Type::FixedVectorTyID: 134 case Type::ScalableVectorTyID: 135 if (cast<VectorType>(Ty)->getElementType()->isPointerTy()) 136 return true; 137 break; 138 case Type::ArrayTyID: 139 Types.push_back(cast<ArrayType>(Ty)->getElementType()); 140 break; 141 case Type::StructTyID: { 142 StructType *STy = cast<StructType>(Ty); 143 if (STy->isOpaque()) return true; 144 for (Type *InnerTy : STy->elements()) { 145 if (isa<PointerType>(InnerTy)) return true; 146 if (isa<StructType>(InnerTy) || isa<ArrayType>(InnerTy) || 147 isa<VectorType>(InnerTy)) 148 Types.push_back(InnerTy); 149 } 150 break; 151 } 152 } 153 if (--Limit == 0) return true; 154 } while (!Types.empty()); 155 return false; 156 } 157 158 /// Given a value that is stored to a global but never read, determine whether 159 /// it's safe to remove the store and the chain of computation that feeds the 160 /// store. 161 static bool IsSafeComputationToRemove( 162 Value *V, function_ref<TargetLibraryInfo &(Function &)> GetTLI) { 163 do { 164 if (isa<Constant>(V)) 165 return true; 166 if (!V->hasOneUse()) 167 return false; 168 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) || 169 isa<GlobalValue>(V)) 170 return false; 171 if (isAllocationFn(V, GetTLI)) 172 return true; 173 174 Instruction *I = cast<Instruction>(V); 175 if (I->mayHaveSideEffects()) 176 return false; 177 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 178 if (!GEP->hasAllConstantIndices()) 179 return false; 180 } else if (I->getNumOperands() != 1) { 181 return false; 182 } 183 184 V = I->getOperand(0); 185 } while (true); 186 } 187 188 /// This GV is a pointer root. Loop over all users of the global and clean up 189 /// any that obviously don't assign the global a value that isn't dynamically 190 /// allocated. 191 static bool 192 CleanupPointerRootUsers(GlobalVariable *GV, 193 function_ref<TargetLibraryInfo &(Function &)> GetTLI) { 194 // A brief explanation of leak checkers. The goal is to find bugs where 195 // pointers are forgotten, causing an accumulating growth in memory 196 // usage over time. The common strategy for leak checkers is to explicitly 197 // allow the memory pointed to by globals at exit. This is popular because it 198 // also solves another problem where the main thread of a C++ program may shut 199 // down before other threads that are still expecting to use those globals. To 200 // handle that case, we expect the program may create a singleton and never 201 // destroy it. 202 203 bool Changed = false; 204 205 // If Dead[n].first is the only use of a malloc result, we can delete its 206 // chain of computation and the store to the global in Dead[n].second. 207 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead; 208 209 // Constants can't be pointers to dynamically allocated memory. 210 for (User *U : llvm::make_early_inc_range(GV->users())) { 211 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 212 Value *V = SI->getValueOperand(); 213 if (isa<Constant>(V)) { 214 Changed = true; 215 SI->eraseFromParent(); 216 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 217 if (I->hasOneUse()) 218 Dead.push_back(std::make_pair(I, SI)); 219 } 220 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) { 221 if (isa<Constant>(MSI->getValue())) { 222 Changed = true; 223 MSI->eraseFromParent(); 224 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) { 225 if (I->hasOneUse()) 226 Dead.push_back(std::make_pair(I, MSI)); 227 } 228 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) { 229 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource()); 230 if (MemSrc && MemSrc->isConstant()) { 231 Changed = true; 232 MTI->eraseFromParent(); 233 } else if (Instruction *I = dyn_cast<Instruction>(MTI->getSource())) { 234 if (I->hasOneUse()) 235 Dead.push_back(std::make_pair(I, MTI)); 236 } 237 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 238 if (CE->use_empty()) { 239 CE->destroyConstant(); 240 Changed = true; 241 } 242 } else if (Constant *C = dyn_cast<Constant>(U)) { 243 if (isSafeToDestroyConstant(C)) { 244 C->destroyConstant(); 245 // This could have invalidated UI, start over from scratch. 246 Dead.clear(); 247 CleanupPointerRootUsers(GV, GetTLI); 248 return true; 249 } 250 } 251 } 252 253 for (int i = 0, e = Dead.size(); i != e; ++i) { 254 if (IsSafeComputationToRemove(Dead[i].first, GetTLI)) { 255 Dead[i].second->eraseFromParent(); 256 Instruction *I = Dead[i].first; 257 do { 258 if (isAllocationFn(I, GetTLI)) 259 break; 260 Instruction *J = dyn_cast<Instruction>(I->getOperand(0)); 261 if (!J) 262 break; 263 I->eraseFromParent(); 264 I = J; 265 } while (true); 266 I->eraseFromParent(); 267 Changed = true; 268 } 269 } 270 271 return Changed; 272 } 273 274 /// We just marked GV constant. Loop over all users of the global, cleaning up 275 /// the obvious ones. This is largely just a quick scan over the use list to 276 /// clean up the easy and obvious cruft. This returns true if it made a change. 277 static bool CleanupConstantGlobalUsers(GlobalVariable *GV, 278 const DataLayout &DL) { 279 Constant *Init = GV->getInitializer(); 280 SmallVector<User *, 8> WorkList(GV->users()); 281 SmallPtrSet<User *, 8> Visited; 282 bool Changed = false; 283 284 SmallVector<WeakTrackingVH> MaybeDeadInsts; 285 auto EraseFromParent = [&](Instruction *I) { 286 for (Value *Op : I->operands()) 287 if (auto *OpI = dyn_cast<Instruction>(Op)) 288 MaybeDeadInsts.push_back(OpI); 289 I->eraseFromParent(); 290 Changed = true; 291 }; 292 while (!WorkList.empty()) { 293 User *U = WorkList.pop_back_val(); 294 if (!Visited.insert(U).second) 295 continue; 296 297 if (auto *BO = dyn_cast<BitCastOperator>(U)) 298 append_range(WorkList, BO->users()); 299 if (auto *ASC = dyn_cast<AddrSpaceCastOperator>(U)) 300 append_range(WorkList, ASC->users()); 301 else if (auto *GEP = dyn_cast<GEPOperator>(U)) 302 append_range(WorkList, GEP->users()); 303 else if (auto *LI = dyn_cast<LoadInst>(U)) { 304 // A load from a uniform value is always the same, regardless of any 305 // applied offset. 306 Type *Ty = LI->getType(); 307 if (Constant *Res = ConstantFoldLoadFromUniformValue(Init, Ty)) { 308 LI->replaceAllUsesWith(Res); 309 EraseFromParent(LI); 310 continue; 311 } 312 313 Value *PtrOp = LI->getPointerOperand(); 314 APInt Offset(DL.getIndexTypeSizeInBits(PtrOp->getType()), 0); 315 PtrOp = PtrOp->stripAndAccumulateConstantOffsets( 316 DL, Offset, /* AllowNonInbounds */ true); 317 if (PtrOp == GV) { 318 if (auto *Value = ConstantFoldLoadFromConst(Init, Ty, Offset, DL)) { 319 LI->replaceAllUsesWith(Value); 320 EraseFromParent(LI); 321 } 322 } 323 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 324 // Store must be unreachable or storing Init into the global. 325 EraseFromParent(SI); 326 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv 327 if (getUnderlyingObject(MI->getRawDest()) == GV) 328 EraseFromParent(MI); 329 } 330 } 331 332 Changed |= 333 RecursivelyDeleteTriviallyDeadInstructionsPermissive(MaybeDeadInsts); 334 GV->removeDeadConstantUsers(); 335 return Changed; 336 } 337 338 /// Look at all uses of the global and determine which (offset, type) pairs it 339 /// can be split into. 340 static bool collectSRATypes(DenseMap<uint64_t, Type *> &Types, GlobalValue *GV, 341 const DataLayout &DL) { 342 SmallVector<Use *, 16> Worklist; 343 SmallPtrSet<Use *, 16> Visited; 344 auto AppendUses = [&](Value *V) { 345 for (Use &U : V->uses()) 346 if (Visited.insert(&U).second) 347 Worklist.push_back(&U); 348 }; 349 AppendUses(GV); 350 while (!Worklist.empty()) { 351 Use *U = Worklist.pop_back_val(); 352 User *V = U->getUser(); 353 354 auto *GEP = dyn_cast<GEPOperator>(V); 355 if (isa<BitCastOperator>(V) || isa<AddrSpaceCastOperator>(V) || 356 (GEP && GEP->hasAllConstantIndices())) { 357 AppendUses(V); 358 continue; 359 } 360 361 if (Value *Ptr = getLoadStorePointerOperand(V)) { 362 // This is storing the global address into somewhere, not storing into 363 // the global. 364 if (isa<StoreInst>(V) && U->getOperandNo() == 0) 365 return false; 366 367 APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 368 Ptr = Ptr->stripAndAccumulateConstantOffsets(DL, Offset, 369 /* AllowNonInbounds */ true); 370 if (Ptr != GV || Offset.getActiveBits() >= 64) 371 return false; 372 373 // TODO: We currently require that all accesses at a given offset must 374 // use the same type. This could be relaxed. 375 Type *Ty = getLoadStoreType(V); 376 auto It = Types.try_emplace(Offset.getZExtValue(), Ty).first; 377 if (Ty != It->second) 378 return false; 379 380 // Scalable types not currently supported. 381 if (isa<ScalableVectorType>(Ty)) 382 return false; 383 384 continue; 385 } 386 387 // Ignore dead constant users. 388 if (auto *C = dyn_cast<Constant>(V)) { 389 if (!isSafeToDestroyConstant(C)) 390 return false; 391 continue; 392 } 393 394 // Unknown user. 395 return false; 396 } 397 398 return true; 399 } 400 401 /// Copy over the debug info for a variable to its SRA replacements. 402 static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV, 403 uint64_t FragmentOffsetInBits, 404 uint64_t FragmentSizeInBits, 405 uint64_t VarSize) { 406 SmallVector<DIGlobalVariableExpression *, 1> GVs; 407 GV->getDebugInfo(GVs); 408 for (auto *GVE : GVs) { 409 DIVariable *Var = GVE->getVariable(); 410 DIExpression *Expr = GVE->getExpression(); 411 int64_t CurVarOffsetInBytes = 0; 412 uint64_t CurVarOffsetInBits = 0; 413 414 // Calculate the offset (Bytes), Continue if unknown. 415 if (!Expr->extractIfOffset(CurVarOffsetInBytes)) 416 continue; 417 418 // Ignore negative offset. 419 if (CurVarOffsetInBytes < 0) 420 continue; 421 422 // Convert offset to bits. 423 CurVarOffsetInBits = CHAR_BIT * (uint64_t)CurVarOffsetInBytes; 424 425 // Current var starts after the fragment, ignore. 426 if (CurVarOffsetInBits >= (FragmentOffsetInBits + FragmentSizeInBits)) 427 continue; 428 429 uint64_t CurVarSize = Var->getType()->getSizeInBits(); 430 // Current variable ends before start of fragment, ignore. 431 if (CurVarSize != 0 && 432 (CurVarOffsetInBits + CurVarSize) <= FragmentOffsetInBits) 433 continue; 434 435 // Current variable fits in the fragment. 436 if (CurVarOffsetInBits == FragmentOffsetInBits && 437 CurVarSize == FragmentSizeInBits) 438 Expr = DIExpression::get(Expr->getContext(), {}); 439 // If the FragmentSize is smaller than the variable, 440 // emit a fragment expression. 441 else if (FragmentSizeInBits < VarSize) { 442 if (auto E = DIExpression::createFragmentExpression( 443 Expr, FragmentOffsetInBits, FragmentSizeInBits)) 444 Expr = *E; 445 else 446 return; 447 } 448 auto *NGVE = DIGlobalVariableExpression::get(GVE->getContext(), Var, Expr); 449 NGV->addDebugInfo(NGVE); 450 } 451 } 452 453 /// Perform scalar replacement of aggregates on the specified global variable. 454 /// This opens the door for other optimizations by exposing the behavior of the 455 /// program in a more fine-grained way. We have determined that this 456 /// transformation is safe already. We return the first global variable we 457 /// insert so that the caller can reprocess it. 458 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { 459 assert(GV->hasLocalLinkage()); 460 461 // Collect types to split into. 462 DenseMap<uint64_t, Type *> Types; 463 if (!collectSRATypes(Types, GV, DL) || Types.empty()) 464 return nullptr; 465 466 // Make sure we don't SRA back to the same type. 467 if (Types.size() == 1 && Types.begin()->second == GV->getValueType()) 468 return nullptr; 469 470 // Don't perform SRA if we would have to split into many globals. 471 if (Types.size() > 16) 472 return nullptr; 473 474 // Sort by offset. 475 SmallVector<std::pair<uint64_t, Type *>, 16> TypesVector; 476 append_range(TypesVector, Types); 477 sort(TypesVector, llvm::less_first()); 478 479 // Check that the types are non-overlapping. 480 uint64_t Offset = 0; 481 for (const auto &Pair : TypesVector) { 482 // Overlaps with previous type. 483 if (Pair.first < Offset) 484 return nullptr; 485 486 Offset = Pair.first + DL.getTypeAllocSize(Pair.second); 487 } 488 489 // Some accesses go beyond the end of the global, don't bother. 490 if (Offset > DL.getTypeAllocSize(GV->getValueType())) 491 return nullptr; 492 493 // Collect initializers for new globals. 494 Constant *OrigInit = GV->getInitializer(); 495 DenseMap<uint64_t, Constant *> Initializers; 496 for (const auto &Pair : Types) { 497 Constant *NewInit = ConstantFoldLoadFromConst(OrigInit, Pair.second, 498 APInt(64, Pair.first), DL); 499 if (!NewInit) { 500 LLVM_DEBUG(dbgs() << "Global SRA: Failed to evaluate initializer of " 501 << *GV << " with type " << *Pair.second << " at offset " 502 << Pair.first << "\n"); 503 return nullptr; 504 } 505 Initializers.insert({Pair.first, NewInit}); 506 } 507 508 LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n"); 509 510 // Get the alignment of the global, either explicit or target-specific. 511 Align StartAlignment = 512 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType()); 513 uint64_t VarSize = DL.getTypeSizeInBits(GV->getValueType()); 514 515 // Create replacement globals. 516 DenseMap<uint64_t, GlobalVariable *> NewGlobals; 517 unsigned NameSuffix = 0; 518 for (auto &Pair : TypesVector) { 519 uint64_t Offset = Pair.first; 520 Type *Ty = Pair.second; 521 GlobalVariable *NGV = new GlobalVariable( 522 *GV->getParent(), Ty, false, GlobalVariable::InternalLinkage, 523 Initializers[Offset], GV->getName() + "." + Twine(NameSuffix++), GV, 524 GV->getThreadLocalMode(), GV->getAddressSpace()); 525 NGV->copyAttributesFrom(GV); 526 NewGlobals.insert({Offset, NGV}); 527 528 // Calculate the known alignment of the field. If the original aggregate 529 // had 256 byte alignment for example, something might depend on that: 530 // propagate info to each field. 531 Align NewAlign = commonAlignment(StartAlignment, Offset); 532 if (NewAlign > DL.getABITypeAlign(Ty)) 533 NGV->setAlignment(NewAlign); 534 535 // Copy over the debug info for the variable. 536 transferSRADebugInfo(GV, NGV, Offset * 8, DL.getTypeAllocSizeInBits(Ty), 537 VarSize); 538 } 539 540 // Replace uses of the original global with uses of the new global. 541 SmallVector<Value *, 16> Worklist; 542 SmallPtrSet<Value *, 16> Visited; 543 SmallVector<WeakTrackingVH, 16> DeadInsts; 544 auto AppendUsers = [&](Value *V) { 545 for (User *U : V->users()) 546 if (Visited.insert(U).second) 547 Worklist.push_back(U); 548 }; 549 AppendUsers(GV); 550 while (!Worklist.empty()) { 551 Value *V = Worklist.pop_back_val(); 552 if (isa<BitCastOperator>(V) || isa<AddrSpaceCastOperator>(V) || 553 isa<GEPOperator>(V)) { 554 AppendUsers(V); 555 if (isa<Instruction>(V)) 556 DeadInsts.push_back(V); 557 continue; 558 } 559 560 if (Value *Ptr = getLoadStorePointerOperand(V)) { 561 APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 562 Ptr = Ptr->stripAndAccumulateConstantOffsets(DL, Offset, 563 /* AllowNonInbounds */ true); 564 assert(Ptr == GV && "Load/store must be from/to global"); 565 GlobalVariable *NGV = NewGlobals[Offset.getZExtValue()]; 566 assert(NGV && "Must have replacement global for this offset"); 567 568 // Update the pointer operand and recalculate alignment. 569 Align PrefAlign = DL.getPrefTypeAlign(getLoadStoreType(V)); 570 Align NewAlign = 571 getOrEnforceKnownAlignment(NGV, PrefAlign, DL, cast<Instruction>(V)); 572 573 if (auto *LI = dyn_cast<LoadInst>(V)) { 574 LI->setOperand(0, NGV); 575 LI->setAlignment(NewAlign); 576 } else { 577 auto *SI = cast<StoreInst>(V); 578 SI->setOperand(1, NGV); 579 SI->setAlignment(NewAlign); 580 } 581 continue; 582 } 583 584 assert(isa<Constant>(V) && isSafeToDestroyConstant(cast<Constant>(V)) && 585 "Other users can only be dead constants"); 586 } 587 588 // Delete old instructions and global. 589 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts); 590 GV->removeDeadConstantUsers(); 591 GV->eraseFromParent(); 592 ++NumSRA; 593 594 assert(NewGlobals.size() > 0); 595 return NewGlobals.begin()->second; 596 } 597 598 /// Return true if all users of the specified value will trap if the value is 599 /// dynamically null. PHIs keeps track of any phi nodes we've seen to avoid 600 /// reprocessing them. 601 static bool AllUsesOfValueWillTrapIfNull(const Value *V, 602 SmallPtrSetImpl<const PHINode*> &PHIs) { 603 for (const User *U : V->users()) { 604 if (const Instruction *I = dyn_cast<Instruction>(U)) { 605 // If null pointer is considered valid, then all uses are non-trapping. 606 // Non address-space 0 globals have already been pruned by the caller. 607 if (NullPointerIsDefined(I->getFunction())) 608 return false; 609 } 610 if (isa<LoadInst>(U)) { 611 // Will trap. 612 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 613 if (SI->getOperand(0) == V) { 614 return false; // Storing the value. 615 } 616 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { 617 if (CI->getCalledOperand() != V) { 618 return false; // Not calling the ptr 619 } 620 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { 621 if (II->getCalledOperand() != V) { 622 return false; // Not calling the ptr 623 } 624 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { 625 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; 626 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 627 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; 628 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { 629 // If we've already seen this phi node, ignore it, it has already been 630 // checked. 631 if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) 632 return false; 633 } else if (isa<ICmpInst>(U) && 634 !ICmpInst::isSigned(cast<ICmpInst>(U)->getPredicate()) && 635 isa<LoadInst>(U->getOperand(0)) && 636 isa<ConstantPointerNull>(U->getOperand(1))) { 637 assert(isa<GlobalValue>(cast<LoadInst>(U->getOperand(0)) 638 ->getPointerOperand() 639 ->stripPointerCasts()) && 640 "Should be GlobalVariable"); 641 // This and only this kind of non-signed ICmpInst is to be replaced with 642 // the comparing of the value of the created global init bool later in 643 // optimizeGlobalAddressOfAllocation for the global variable. 644 } else { 645 return false; 646 } 647 } 648 return true; 649 } 650 651 /// Return true if all uses of any loads from GV will trap if the loaded value 652 /// is null. Note that this also permits comparisons of the loaded value 653 /// against null, as a special case. 654 static bool allUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { 655 SmallVector<const Value *, 4> Worklist; 656 Worklist.push_back(GV); 657 while (!Worklist.empty()) { 658 const Value *P = Worklist.pop_back_val(); 659 for (const auto *U : P->users()) { 660 if (auto *LI = dyn_cast<LoadInst>(U)) { 661 SmallPtrSet<const PHINode *, 8> PHIs; 662 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) 663 return false; 664 } else if (auto *SI = dyn_cast<StoreInst>(U)) { 665 // Ignore stores to the global. 666 if (SI->getPointerOperand() != P) 667 return false; 668 } else if (auto *CE = dyn_cast<ConstantExpr>(U)) { 669 if (CE->stripPointerCasts() != GV) 670 return false; 671 // Check further the ConstantExpr. 672 Worklist.push_back(CE); 673 } else { 674 // We don't know or understand this user, bail out. 675 return false; 676 } 677 } 678 } 679 680 return true; 681 } 682 683 /// Get all the loads/store uses for global variable \p GV. 684 static void allUsesOfLoadAndStores(GlobalVariable *GV, 685 SmallVector<Value *, 4> &Uses) { 686 SmallVector<Value *, 4> Worklist; 687 Worklist.push_back(GV); 688 while (!Worklist.empty()) { 689 auto *P = Worklist.pop_back_val(); 690 for (auto *U : P->users()) { 691 if (auto *CE = dyn_cast<ConstantExpr>(U)) { 692 Worklist.push_back(CE); 693 continue; 694 } 695 696 assert((isa<LoadInst>(U) || isa<StoreInst>(U)) && 697 "Expect only load or store instructions"); 698 Uses.push_back(U); 699 } 700 } 701 } 702 703 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { 704 bool Changed = false; 705 for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) { 706 Instruction *I = cast<Instruction>(*UI++); 707 // Uses are non-trapping if null pointer is considered valid. 708 // Non address-space 0 globals are already pruned by the caller. 709 if (NullPointerIsDefined(I->getFunction())) 710 return false; 711 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 712 LI->setOperand(0, NewV); 713 Changed = true; 714 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 715 if (SI->getOperand(1) == V) { 716 SI->setOperand(1, NewV); 717 Changed = true; 718 } 719 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 720 CallBase *CB = cast<CallBase>(I); 721 if (CB->getCalledOperand() == V) { 722 // Calling through the pointer! Turn into a direct call, but be careful 723 // that the pointer is not also being passed as an argument. 724 CB->setCalledOperand(NewV); 725 Changed = true; 726 bool PassedAsArg = false; 727 for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) 728 if (CB->getArgOperand(i) == V) { 729 PassedAsArg = true; 730 CB->setArgOperand(i, NewV); 731 } 732 733 if (PassedAsArg) { 734 // Being passed as an argument also. Be careful to not invalidate UI! 735 UI = V->user_begin(); 736 } 737 } 738 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 739 Changed |= OptimizeAwayTrappingUsesOfValue(CI, 740 ConstantExpr::getCast(CI->getOpcode(), 741 NewV, CI->getType())); 742 if (CI->use_empty()) { 743 Changed = true; 744 CI->eraseFromParent(); 745 } 746 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 747 // Should handle GEP here. 748 SmallVector<Constant*, 8> Idxs; 749 Idxs.reserve(GEPI->getNumOperands()-1); 750 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); 751 i != e; ++i) 752 if (Constant *C = dyn_cast<Constant>(*i)) 753 Idxs.push_back(C); 754 else 755 break; 756 if (Idxs.size() == GEPI->getNumOperands()-1) 757 Changed |= OptimizeAwayTrappingUsesOfValue( 758 GEPI, ConstantExpr::getGetElementPtr(GEPI->getSourceElementType(), 759 NewV, Idxs)); 760 if (GEPI->use_empty()) { 761 Changed = true; 762 GEPI->eraseFromParent(); 763 } 764 } 765 } 766 767 return Changed; 768 } 769 770 /// The specified global has only one non-null value stored into it. If there 771 /// are uses of the loaded value that would trap if the loaded value is 772 /// dynamically null, then we know that they cannot be reachable with a null 773 /// optimize away the load. 774 static bool OptimizeAwayTrappingUsesOfLoads( 775 GlobalVariable *GV, Constant *LV, const DataLayout &DL, 776 function_ref<TargetLibraryInfo &(Function &)> GetTLI) { 777 bool Changed = false; 778 779 // Keep track of whether we are able to remove all the uses of the global 780 // other than the store that defines it. 781 bool AllNonStoreUsesGone = true; 782 783 // Replace all uses of loads with uses of uses of the stored value. 784 for (User *GlobalUser : llvm::make_early_inc_range(GV->users())) { 785 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { 786 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); 787 // If we were able to delete all uses of the loads 788 if (LI->use_empty()) { 789 LI->eraseFromParent(); 790 Changed = true; 791 } else { 792 AllNonStoreUsesGone = false; 793 } 794 } else if (isa<StoreInst>(GlobalUser)) { 795 // Ignore the store that stores "LV" to the global. 796 assert(GlobalUser->getOperand(1) == GV && 797 "Must be storing *to* the global"); 798 } else { 799 AllNonStoreUsesGone = false; 800 801 // If we get here we could have other crazy uses that are transitively 802 // loaded. 803 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || 804 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || 805 isa<BitCastInst>(GlobalUser) || 806 isa<GetElementPtrInst>(GlobalUser)) && 807 "Only expect load and stores!"); 808 } 809 } 810 811 if (Changed) { 812 LLVM_DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV 813 << "\n"); 814 ++NumGlobUses; 815 } 816 817 // If we nuked all of the loads, then none of the stores are needed either, 818 // nor is the global. 819 if (AllNonStoreUsesGone) { 820 if (isLeakCheckerRoot(GV)) { 821 Changed |= CleanupPointerRootUsers(GV, GetTLI); 822 } else { 823 Changed = true; 824 CleanupConstantGlobalUsers(GV, DL); 825 } 826 if (GV->use_empty()) { 827 LLVM_DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); 828 Changed = true; 829 GV->eraseFromParent(); 830 ++NumDeleted; 831 } 832 } 833 return Changed; 834 } 835 836 /// Walk the use list of V, constant folding all of the instructions that are 837 /// foldable. 838 static void ConstantPropUsersOf(Value *V, const DataLayout &DL, 839 TargetLibraryInfo *TLI) { 840 for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; ) 841 if (Instruction *I = dyn_cast<Instruction>(*UI++)) 842 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) { 843 I->replaceAllUsesWith(NewC); 844 845 // Advance UI to the next non-I use to avoid invalidating it! 846 // Instructions could multiply use V. 847 while (UI != E && *UI == I) 848 ++UI; 849 if (isInstructionTriviallyDead(I, TLI)) 850 I->eraseFromParent(); 851 } 852 } 853 854 /// This function takes the specified global variable, and transforms the 855 /// program as if it always contained the result of the specified malloc. 856 /// Because it is always the result of the specified malloc, there is no reason 857 /// to actually DO the malloc. Instead, turn the malloc into a global, and any 858 /// loads of GV as uses of the new global. 859 static GlobalVariable * 860 OptimizeGlobalAddressOfAllocation(GlobalVariable *GV, CallInst *CI, 861 uint64_t AllocSize, Constant *InitVal, 862 const DataLayout &DL, 863 TargetLibraryInfo *TLI) { 864 LLVM_DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI 865 << '\n'); 866 867 // Create global of type [AllocSize x i8]. 868 Type *GlobalType = ArrayType::get(Type::getInt8Ty(GV->getContext()), 869 AllocSize); 870 871 // Create the new global variable. The contents of the allocated memory is 872 // undefined initially, so initialize with an undef value. 873 GlobalVariable *NewGV = new GlobalVariable( 874 *GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage, 875 UndefValue::get(GlobalType), GV->getName() + ".body", nullptr, 876 GV->getThreadLocalMode()); 877 878 // Initialize the global at the point of the original call. Note that this 879 // is a different point from the initialization referred to below for the 880 // nullability handling. Sublety: We have not proven the original global was 881 // only initialized once. As such, we can not fold this into the initializer 882 // of the new global as may need to re-init the storage multiple times. 883 if (!isa<UndefValue>(InitVal)) { 884 IRBuilder<> Builder(CI->getNextNode()); 885 // TODO: Use alignment above if align!=1 886 Builder.CreateMemSet(NewGV, InitVal, AllocSize, std::nullopt); 887 } 888 889 // Update users of the allocation to use the new global instead. 890 BitCastInst *TheBC = nullptr; 891 while (!CI->use_empty()) { 892 Instruction *User = cast<Instruction>(CI->user_back()); 893 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 894 if (BCI->getType() == NewGV->getType()) { 895 BCI->replaceAllUsesWith(NewGV); 896 BCI->eraseFromParent(); 897 } else { 898 BCI->setOperand(0, NewGV); 899 } 900 } else { 901 if (!TheBC) 902 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); 903 User->replaceUsesOfWith(CI, TheBC); 904 } 905 } 906 907 SmallSetVector<Constant *, 1> RepValues; 908 RepValues.insert(NewGV); 909 910 // If there is a comparison against null, we will insert a global bool to 911 // keep track of whether the global was initialized yet or not. 912 GlobalVariable *InitBool = 913 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, 914 GlobalValue::InternalLinkage, 915 ConstantInt::getFalse(GV->getContext()), 916 GV->getName()+".init", GV->getThreadLocalMode()); 917 bool InitBoolUsed = false; 918 919 // Loop over all instruction uses of GV, processing them in turn. 920 SmallVector<Value *, 4> Guses; 921 allUsesOfLoadAndStores(GV, Guses); 922 for (auto *U : Guses) { 923 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 924 // The global is initialized when the store to it occurs. If the stored 925 // value is null value, the global bool is set to false, otherwise true. 926 new StoreInst(ConstantInt::getBool( 927 GV->getContext(), 928 !isa<ConstantPointerNull>(SI->getValueOperand())), 929 InitBool, false, Align(1), SI->getOrdering(), 930 SI->getSyncScopeID(), SI); 931 SI->eraseFromParent(); 932 continue; 933 } 934 935 LoadInst *LI = cast<LoadInst>(U); 936 while (!LI->use_empty()) { 937 Use &LoadUse = *LI->use_begin(); 938 ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser()); 939 if (!ICI) { 940 auto *CE = ConstantExpr::getBitCast(NewGV, LI->getType()); 941 RepValues.insert(CE); 942 LoadUse.set(CE); 943 continue; 944 } 945 946 // Replace the cmp X, 0 with a use of the bool value. 947 Value *LV = new LoadInst(InitBool->getValueType(), InitBool, 948 InitBool->getName() + ".val", false, Align(1), 949 LI->getOrdering(), LI->getSyncScopeID(), LI); 950 InitBoolUsed = true; 951 switch (ICI->getPredicate()) { 952 default: llvm_unreachable("Unknown ICmp Predicate!"); 953 case ICmpInst::ICMP_ULT: // X < null -> always false 954 LV = ConstantInt::getFalse(GV->getContext()); 955 break; 956 case ICmpInst::ICMP_UGE: // X >= null -> always true 957 LV = ConstantInt::getTrue(GV->getContext()); 958 break; 959 case ICmpInst::ICMP_ULE: 960 case ICmpInst::ICMP_EQ: 961 LV = BinaryOperator::CreateNot(LV, "notinit", ICI); 962 break; 963 case ICmpInst::ICMP_NE: 964 case ICmpInst::ICMP_UGT: 965 break; // no change. 966 } 967 ICI->replaceAllUsesWith(LV); 968 ICI->eraseFromParent(); 969 } 970 LI->eraseFromParent(); 971 } 972 973 // If the initialization boolean was used, insert it, otherwise delete it. 974 if (!InitBoolUsed) { 975 while (!InitBool->use_empty()) // Delete initializations 976 cast<StoreInst>(InitBool->user_back())->eraseFromParent(); 977 delete InitBool; 978 } else 979 GV->getParent()->getGlobalList().insert(GV->getIterator(), InitBool); 980 981 // Now the GV is dead, nuke it and the allocation.. 982 GV->eraseFromParent(); 983 CI->eraseFromParent(); 984 985 // To further other optimizations, loop over all users of NewGV and try to 986 // constant prop them. This will promote GEP instructions with constant 987 // indices into GEP constant-exprs, which will allow global-opt to hack on it. 988 for (auto *CE : RepValues) 989 ConstantPropUsersOf(CE, DL, TLI); 990 991 return NewGV; 992 } 993 994 /// Scan the use-list of GV checking to make sure that there are no complex uses 995 /// of GV. We permit simple things like dereferencing the pointer, but not 996 /// storing through the address, unless it is to the specified global. 997 static bool 998 valueIsOnlyUsedLocallyOrStoredToOneGlobal(const CallInst *CI, 999 const GlobalVariable *GV) { 1000 SmallPtrSet<const Value *, 4> Visited; 1001 SmallVector<const Value *, 4> Worklist; 1002 Worklist.push_back(CI); 1003 1004 while (!Worklist.empty()) { 1005 const Value *V = Worklist.pop_back_val(); 1006 if (!Visited.insert(V).second) 1007 continue; 1008 1009 for (const Use &VUse : V->uses()) { 1010 const User *U = VUse.getUser(); 1011 if (isa<LoadInst>(U) || isa<CmpInst>(U)) 1012 continue; // Fine, ignore. 1013 1014 if (auto *SI = dyn_cast<StoreInst>(U)) { 1015 if (SI->getValueOperand() == V && 1016 SI->getPointerOperand()->stripPointerCasts() != GV) 1017 return false; // Storing the pointer not into GV... bad. 1018 continue; // Otherwise, storing through it, or storing into GV... fine. 1019 } 1020 1021 if (auto *BCI = dyn_cast<BitCastInst>(U)) { 1022 Worklist.push_back(BCI); 1023 continue; 1024 } 1025 1026 if (auto *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1027 Worklist.push_back(GEPI); 1028 continue; 1029 } 1030 1031 return false; 1032 } 1033 } 1034 1035 return true; 1036 } 1037 1038 /// If we have a global that is only initialized with a fixed size allocation 1039 /// try to transform the program to use global memory instead of heap 1040 /// allocated memory. This eliminates dynamic allocation, avoids an indirection 1041 /// accessing the data, and exposes the resultant global to further GlobalOpt. 1042 static bool tryToOptimizeStoreOfAllocationToGlobal(GlobalVariable *GV, 1043 CallInst *CI, 1044 const DataLayout &DL, 1045 TargetLibraryInfo *TLI) { 1046 if (!isRemovableAlloc(CI, TLI)) 1047 // Must be able to remove the call when we get done.. 1048 return false; 1049 1050 Type *Int8Ty = Type::getInt8Ty(CI->getFunction()->getContext()); 1051 Constant *InitVal = getInitialValueOfAllocation(CI, TLI, Int8Ty); 1052 if (!InitVal) 1053 // Must be able to emit a memset for initialization 1054 return false; 1055 1056 uint64_t AllocSize; 1057 if (!getObjectSize(CI, AllocSize, DL, TLI, ObjectSizeOpts())) 1058 return false; 1059 1060 // Restrict this transformation to only working on small allocations 1061 // (2048 bytes currently), as we don't want to introduce a 16M global or 1062 // something. 1063 if (AllocSize >= 2048) 1064 return false; 1065 1066 // We can't optimize this global unless all uses of it are *known* to be 1067 // of the malloc value, not of the null initializer value (consider a use 1068 // that compares the global's value against zero to see if the malloc has 1069 // been reached). To do this, we check to see if all uses of the global 1070 // would trap if the global were null: this proves that they must all 1071 // happen after the malloc. 1072 if (!allUsesOfLoadedValueWillTrapIfNull(GV)) 1073 return false; 1074 1075 // We can't optimize this if the malloc itself is used in a complex way, 1076 // for example, being stored into multiple globals. This allows the 1077 // malloc to be stored into the specified global, loaded, gep, icmp'd. 1078 // These are all things we could transform to using the global for. 1079 if (!valueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV)) 1080 return false; 1081 1082 OptimizeGlobalAddressOfAllocation(GV, CI, AllocSize, InitVal, DL, TLI); 1083 return true; 1084 } 1085 1086 // Try to optimize globals based on the knowledge that only one value (besides 1087 // its initializer) is ever stored to the global. 1088 static bool 1089 optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, 1090 const DataLayout &DL, 1091 function_ref<TargetLibraryInfo &(Function &)> GetTLI) { 1092 // Ignore no-op GEPs and bitcasts. 1093 StoredOnceVal = StoredOnceVal->stripPointerCasts(); 1094 1095 // If we are dealing with a pointer global that is initialized to null and 1096 // only has one (non-null) value stored into it, then we can optimize any 1097 // users of the loaded value (often calls and loads) that would trap if the 1098 // value was null. 1099 if (GV->getInitializer()->getType()->isPointerTy() && 1100 GV->getInitializer()->isNullValue() && 1101 StoredOnceVal->getType()->isPointerTy() && 1102 !NullPointerIsDefined( 1103 nullptr /* F */, 1104 GV->getInitializer()->getType()->getPointerAddressSpace())) { 1105 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { 1106 if (GV->getInitializer()->getType() != SOVC->getType()) 1107 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); 1108 1109 // Optimize away any trapping uses of the loaded value. 1110 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, GetTLI)) 1111 return true; 1112 } else if (isAllocationFn(StoredOnceVal, GetTLI)) { 1113 if (auto *CI = dyn_cast<CallInst>(StoredOnceVal)) { 1114 auto *TLI = &GetTLI(*CI->getFunction()); 1115 if (tryToOptimizeStoreOfAllocationToGlobal(GV, CI, DL, TLI)) 1116 return true; 1117 } 1118 } 1119 } 1120 1121 return false; 1122 } 1123 1124 /// At this point, we have learned that the only two values ever stored into GV 1125 /// are its initializer and OtherVal. See if we can shrink the global into a 1126 /// boolean and select between the two values whenever it is used. This exposes 1127 /// the values to other scalar optimizations. 1128 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { 1129 Type *GVElType = GV->getValueType(); 1130 1131 // If GVElType is already i1, it is already shrunk. If the type of the GV is 1132 // an FP value, pointer or vector, don't do this optimization because a select 1133 // between them is very expensive and unlikely to lead to later 1134 // simplification. In these cases, we typically end up with "cond ? v1 : v2" 1135 // where v1 and v2 both require constant pool loads, a big loss. 1136 if (GVElType == Type::getInt1Ty(GV->getContext()) || 1137 GVElType->isFloatingPointTy() || 1138 GVElType->isPointerTy() || GVElType->isVectorTy()) 1139 return false; 1140 1141 // Walk the use list of the global seeing if all the uses are load or store. 1142 // If there is anything else, bail out. 1143 for (User *U : GV->users()) { 1144 if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) 1145 return false; 1146 if (getLoadStoreType(U) != GVElType) 1147 return false; 1148 } 1149 1150 LLVM_DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n"); 1151 1152 // Create the new global, initializing it to false. 1153 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), 1154 false, 1155 GlobalValue::InternalLinkage, 1156 ConstantInt::getFalse(GV->getContext()), 1157 GV->getName()+".b", 1158 GV->getThreadLocalMode(), 1159 GV->getType()->getAddressSpace()); 1160 NewGV->copyAttributesFrom(GV); 1161 GV->getParent()->getGlobalList().insert(GV->getIterator(), NewGV); 1162 1163 Constant *InitVal = GV->getInitializer(); 1164 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) && 1165 "No reason to shrink to bool!"); 1166 1167 SmallVector<DIGlobalVariableExpression *, 1> GVs; 1168 GV->getDebugInfo(GVs); 1169 1170 // If initialized to zero and storing one into the global, we can use a cast 1171 // instead of a select to synthesize the desired value. 1172 bool IsOneZero = false; 1173 bool EmitOneOrZero = true; 1174 auto *CI = dyn_cast<ConstantInt>(OtherVal); 1175 if (CI && CI->getValue().getActiveBits() <= 64) { 1176 IsOneZero = InitVal->isNullValue() && CI->isOne(); 1177 1178 auto *CIInit = dyn_cast<ConstantInt>(GV->getInitializer()); 1179 if (CIInit && CIInit->getValue().getActiveBits() <= 64) { 1180 uint64_t ValInit = CIInit->getZExtValue(); 1181 uint64_t ValOther = CI->getZExtValue(); 1182 uint64_t ValMinus = ValOther - ValInit; 1183 1184 for(auto *GVe : GVs){ 1185 DIGlobalVariable *DGV = GVe->getVariable(); 1186 DIExpression *E = GVe->getExpression(); 1187 const DataLayout &DL = GV->getParent()->getDataLayout(); 1188 unsigned SizeInOctets = 1189 DL.getTypeAllocSizeInBits(NewGV->getValueType()) / 8; 1190 1191 // It is expected that the address of global optimized variable is on 1192 // top of the stack. After optimization, value of that variable will 1193 // be ether 0 for initial value or 1 for other value. The following 1194 // expression should return constant integer value depending on the 1195 // value at global object address: 1196 // val * (ValOther - ValInit) + ValInit: 1197 // DW_OP_deref DW_OP_constu <ValMinus> 1198 // DW_OP_mul DW_OP_constu <ValInit> DW_OP_plus DW_OP_stack_value 1199 SmallVector<uint64_t, 12> Ops = { 1200 dwarf::DW_OP_deref_size, SizeInOctets, 1201 dwarf::DW_OP_constu, ValMinus, 1202 dwarf::DW_OP_mul, dwarf::DW_OP_constu, ValInit, 1203 dwarf::DW_OP_plus}; 1204 bool WithStackValue = true; 1205 E = DIExpression::prependOpcodes(E, Ops, WithStackValue); 1206 DIGlobalVariableExpression *DGVE = 1207 DIGlobalVariableExpression::get(NewGV->getContext(), DGV, E); 1208 NewGV->addDebugInfo(DGVE); 1209 } 1210 EmitOneOrZero = false; 1211 } 1212 } 1213 1214 if (EmitOneOrZero) { 1215 // FIXME: This will only emit address for debugger on which will 1216 // be written only 0 or 1. 1217 for(auto *GV : GVs) 1218 NewGV->addDebugInfo(GV); 1219 } 1220 1221 while (!GV->use_empty()) { 1222 Instruction *UI = cast<Instruction>(GV->user_back()); 1223 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 1224 // Change the store into a boolean store. 1225 bool StoringOther = SI->getOperand(0) == OtherVal; 1226 // Only do this if we weren't storing a loaded value. 1227 Value *StoreVal; 1228 if (StoringOther || SI->getOperand(0) == InitVal) { 1229 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), 1230 StoringOther); 1231 } else { 1232 // Otherwise, we are storing a previously loaded copy. To do this, 1233 // change the copy from copying the original value to just copying the 1234 // bool. 1235 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); 1236 1237 // If we've already replaced the input, StoredVal will be a cast or 1238 // select instruction. If not, it will be a load of the original 1239 // global. 1240 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 1241 assert(LI->getOperand(0) == GV && "Not a copy!"); 1242 // Insert a new load, to preserve the saved value. 1243 StoreVal = new LoadInst(NewGV->getValueType(), NewGV, 1244 LI->getName() + ".b", false, Align(1), 1245 LI->getOrdering(), LI->getSyncScopeID(), LI); 1246 } else { 1247 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && 1248 "This is not a form that we understand!"); 1249 StoreVal = StoredVal->getOperand(0); 1250 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); 1251 } 1252 } 1253 StoreInst *NSI = 1254 new StoreInst(StoreVal, NewGV, false, Align(1), SI->getOrdering(), 1255 SI->getSyncScopeID(), SI); 1256 NSI->setDebugLoc(SI->getDebugLoc()); 1257 } else { 1258 // Change the load into a load of bool then a select. 1259 LoadInst *LI = cast<LoadInst>(UI); 1260 LoadInst *NLI = new LoadInst(NewGV->getValueType(), NewGV, 1261 LI->getName() + ".b", false, Align(1), 1262 LI->getOrdering(), LI->getSyncScopeID(), LI); 1263 Instruction *NSI; 1264 if (IsOneZero) 1265 NSI = new ZExtInst(NLI, LI->getType(), "", LI); 1266 else 1267 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); 1268 NSI->takeName(LI); 1269 // Since LI is split into two instructions, NLI and NSI both inherit the 1270 // same DebugLoc 1271 NLI->setDebugLoc(LI->getDebugLoc()); 1272 NSI->setDebugLoc(LI->getDebugLoc()); 1273 LI->replaceAllUsesWith(NSI); 1274 } 1275 UI->eraseFromParent(); 1276 } 1277 1278 // Retain the name of the old global variable. People who are debugging their 1279 // programs may expect these variables to be named the same. 1280 NewGV->takeName(GV); 1281 GV->eraseFromParent(); 1282 return true; 1283 } 1284 1285 static bool 1286 deleteIfDead(GlobalValue &GV, 1287 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats, 1288 function_ref<void(Function &)> DeleteFnCallback = nullptr) { 1289 GV.removeDeadConstantUsers(); 1290 1291 if (!GV.isDiscardableIfUnused() && !GV.isDeclaration()) 1292 return false; 1293 1294 if (const Comdat *C = GV.getComdat()) 1295 if (!GV.hasLocalLinkage() && NotDiscardableComdats.count(C)) 1296 return false; 1297 1298 bool Dead; 1299 if (auto *F = dyn_cast<Function>(&GV)) 1300 Dead = (F->isDeclaration() && F->use_empty()) || F->isDefTriviallyDead(); 1301 else 1302 Dead = GV.use_empty(); 1303 if (!Dead) 1304 return false; 1305 1306 LLVM_DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n"); 1307 if (auto *F = dyn_cast<Function>(&GV)) { 1308 if (DeleteFnCallback) 1309 DeleteFnCallback(*F); 1310 } 1311 GV.eraseFromParent(); 1312 ++NumDeleted; 1313 return true; 1314 } 1315 1316 static bool isPointerValueDeadOnEntryToFunction( 1317 const Function *F, GlobalValue *GV, 1318 function_ref<DominatorTree &(Function &)> LookupDomTree) { 1319 // Find all uses of GV. We expect them all to be in F, and if we can't 1320 // identify any of the uses we bail out. 1321 // 1322 // On each of these uses, identify if the memory that GV points to is 1323 // used/required/live at the start of the function. If it is not, for example 1324 // if the first thing the function does is store to the GV, the GV can 1325 // possibly be demoted. 1326 // 1327 // We don't do an exhaustive search for memory operations - simply look 1328 // through bitcasts as they're quite common and benign. 1329 const DataLayout &DL = GV->getParent()->getDataLayout(); 1330 SmallVector<LoadInst *, 4> Loads; 1331 SmallVector<StoreInst *, 4> Stores; 1332 for (auto *U : GV->users()) { 1333 if (Operator::getOpcode(U) == Instruction::BitCast) { 1334 for (auto *UU : U->users()) { 1335 if (auto *LI = dyn_cast<LoadInst>(UU)) 1336 Loads.push_back(LI); 1337 else if (auto *SI = dyn_cast<StoreInst>(UU)) 1338 Stores.push_back(SI); 1339 else 1340 return false; 1341 } 1342 continue; 1343 } 1344 1345 Instruction *I = dyn_cast<Instruction>(U); 1346 if (!I) 1347 return false; 1348 assert(I->getParent()->getParent() == F); 1349 1350 if (auto *LI = dyn_cast<LoadInst>(I)) 1351 Loads.push_back(LI); 1352 else if (auto *SI = dyn_cast<StoreInst>(I)) 1353 Stores.push_back(SI); 1354 else 1355 return false; 1356 } 1357 1358 // We have identified all uses of GV into loads and stores. Now check if all 1359 // of them are known not to depend on the value of the global at the function 1360 // entry point. We do this by ensuring that every load is dominated by at 1361 // least one store. 1362 auto &DT = LookupDomTree(*const_cast<Function *>(F)); 1363 1364 // The below check is quadratic. Check we're not going to do too many tests. 1365 // FIXME: Even though this will always have worst-case quadratic time, we 1366 // could put effort into minimizing the average time by putting stores that 1367 // have been shown to dominate at least one load at the beginning of the 1368 // Stores array, making subsequent dominance checks more likely to succeed 1369 // early. 1370 // 1371 // The threshold here is fairly large because global->local demotion is a 1372 // very powerful optimization should it fire. 1373 const unsigned Threshold = 100; 1374 if (Loads.size() * Stores.size() > Threshold) 1375 return false; 1376 1377 for (auto *L : Loads) { 1378 auto *LTy = L->getType(); 1379 if (none_of(Stores, [&](const StoreInst *S) { 1380 auto *STy = S->getValueOperand()->getType(); 1381 // The load is only dominated by the store if DomTree says so 1382 // and the number of bits loaded in L is less than or equal to 1383 // the number of bits stored in S. 1384 return DT.dominates(S, L) && 1385 DL.getTypeStoreSize(LTy).getFixedValue() <= 1386 DL.getTypeStoreSize(STy).getFixedValue(); 1387 })) 1388 return false; 1389 } 1390 // All loads have known dependences inside F, so the global can be localized. 1391 return true; 1392 } 1393 1394 /// C may have non-instruction users. Can all of those users be turned into 1395 /// instructions? 1396 static bool allNonInstructionUsersCanBeMadeInstructions(Constant *C) { 1397 // We don't do this exhaustively. The most common pattern that we really need 1398 // to care about is a constant GEP or constant bitcast - so just looking 1399 // through one single ConstantExpr. 1400 // 1401 // The set of constants that this function returns true for must be able to be 1402 // handled by makeAllConstantUsesInstructions. 1403 for (auto *U : C->users()) { 1404 if (isa<Instruction>(U)) 1405 continue; 1406 if (!isa<ConstantExpr>(U)) 1407 // Non instruction, non-constantexpr user; cannot convert this. 1408 return false; 1409 for (auto *UU : U->users()) 1410 if (!isa<Instruction>(UU)) 1411 // A constantexpr used by another constant. We don't try and recurse any 1412 // further but just bail out at this point. 1413 return false; 1414 } 1415 1416 return true; 1417 } 1418 1419 /// C may have non-instruction users, and 1420 /// allNonInstructionUsersCanBeMadeInstructions has returned true. Convert the 1421 /// non-instruction users to instructions. 1422 static void makeAllConstantUsesInstructions(Constant *C) { 1423 SmallVector<ConstantExpr*,4> Users; 1424 for (auto *U : C->users()) { 1425 if (isa<ConstantExpr>(U)) 1426 Users.push_back(cast<ConstantExpr>(U)); 1427 else 1428 // We should never get here; allNonInstructionUsersCanBeMadeInstructions 1429 // should not have returned true for C. 1430 assert( 1431 isa<Instruction>(U) && 1432 "Can't transform non-constantexpr non-instruction to instruction!"); 1433 } 1434 1435 SmallVector<Value*,4> UUsers; 1436 for (auto *U : Users) { 1437 UUsers.clear(); 1438 append_range(UUsers, U->users()); 1439 for (auto *UU : UUsers) { 1440 Instruction *UI = cast<Instruction>(UU); 1441 Instruction *NewU = U->getAsInstruction(UI); 1442 UI->replaceUsesOfWith(U, NewU); 1443 } 1444 // We've replaced all the uses, so destroy the constant. (destroyConstant 1445 // will update value handles and metadata.) 1446 U->destroyConstant(); 1447 } 1448 } 1449 1450 // For a global variable with one store, if the store dominates any loads, 1451 // those loads will always load the stored value (as opposed to the 1452 // initializer), even in the presence of recursion. 1453 static bool forwardStoredOnceStore( 1454 GlobalVariable *GV, const StoreInst *StoredOnceStore, 1455 function_ref<DominatorTree &(Function &)> LookupDomTree) { 1456 const Value *StoredOnceValue = StoredOnceStore->getValueOperand(); 1457 // We can do this optimization for non-constants in nosync + norecurse 1458 // functions, but globals used in exactly one norecurse functions are already 1459 // promoted to an alloca. 1460 if (!isa<Constant>(StoredOnceValue)) 1461 return false; 1462 const Function *F = StoredOnceStore->getFunction(); 1463 SmallVector<LoadInst *> Loads; 1464 for (User *U : GV->users()) { 1465 if (auto *LI = dyn_cast<LoadInst>(U)) { 1466 if (LI->getFunction() == F && 1467 LI->getType() == StoredOnceValue->getType() && LI->isSimple()) 1468 Loads.push_back(LI); 1469 } 1470 } 1471 // Only compute DT if we have any loads to examine. 1472 bool MadeChange = false; 1473 if (!Loads.empty()) { 1474 auto &DT = LookupDomTree(*const_cast<Function *>(F)); 1475 for (auto *LI : Loads) { 1476 if (DT.dominates(StoredOnceStore, LI)) { 1477 LI->replaceAllUsesWith(const_cast<Value *>(StoredOnceValue)); 1478 LI->eraseFromParent(); 1479 MadeChange = true; 1480 } 1481 } 1482 } 1483 return MadeChange; 1484 } 1485 1486 /// Analyze the specified global variable and optimize 1487 /// it if possible. If we make a change, return true. 1488 static bool 1489 processInternalGlobal(GlobalVariable *GV, const GlobalStatus &GS, 1490 function_ref<TargetTransformInfo &(Function &)> GetTTI, 1491 function_ref<TargetLibraryInfo &(Function &)> GetTLI, 1492 function_ref<DominatorTree &(Function &)> LookupDomTree) { 1493 auto &DL = GV->getParent()->getDataLayout(); 1494 // If this is a first class global and has only one accessing function and 1495 // this function is non-recursive, we replace the global with a local alloca 1496 // in this function. 1497 // 1498 // NOTE: It doesn't make sense to promote non-single-value types since we 1499 // are just replacing static memory to stack memory. 1500 // 1501 // If the global is in different address space, don't bring it to stack. 1502 if (!GS.HasMultipleAccessingFunctions && 1503 GS.AccessingFunction && 1504 GV->getValueType()->isSingleValueType() && 1505 GV->getType()->getAddressSpace() == 0 && 1506 !GV->isExternallyInitialized() && 1507 allNonInstructionUsersCanBeMadeInstructions(GV) && 1508 GS.AccessingFunction->doesNotRecurse() && 1509 isPointerValueDeadOnEntryToFunction(GS.AccessingFunction, GV, 1510 LookupDomTree)) { 1511 const DataLayout &DL = GV->getParent()->getDataLayout(); 1512 1513 LLVM_DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n"); 1514 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction 1515 ->getEntryBlock().begin()); 1516 Type *ElemTy = GV->getValueType(); 1517 // FIXME: Pass Global's alignment when globals have alignment 1518 AllocaInst *Alloca = new AllocaInst(ElemTy, DL.getAllocaAddrSpace(), nullptr, 1519 GV->getName(), &FirstI); 1520 if (!isa<UndefValue>(GV->getInitializer())) 1521 new StoreInst(GV->getInitializer(), Alloca, &FirstI); 1522 1523 makeAllConstantUsesInstructions(GV); 1524 1525 GV->replaceAllUsesWith(Alloca); 1526 GV->eraseFromParent(); 1527 ++NumLocalized; 1528 return true; 1529 } 1530 1531 bool Changed = false; 1532 1533 // If the global is never loaded (but may be stored to), it is dead. 1534 // Delete it now. 1535 if (!GS.IsLoaded) { 1536 LLVM_DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n"); 1537 1538 if (isLeakCheckerRoot(GV)) { 1539 // Delete any constant stores to the global. 1540 Changed = CleanupPointerRootUsers(GV, GetTLI); 1541 } else { 1542 // Delete any stores we can find to the global. We may not be able to 1543 // make it completely dead though. 1544 Changed = CleanupConstantGlobalUsers(GV, DL); 1545 } 1546 1547 // If the global is dead now, delete it. 1548 if (GV->use_empty()) { 1549 GV->eraseFromParent(); 1550 ++NumDeleted; 1551 Changed = true; 1552 } 1553 return Changed; 1554 1555 } 1556 if (GS.StoredType <= GlobalStatus::InitializerStored) { 1557 LLVM_DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n"); 1558 1559 // Don't actually mark a global constant if it's atomic because atomic loads 1560 // are implemented by a trivial cmpxchg in some edge-cases and that usually 1561 // requires write access to the variable even if it's not actually changed. 1562 if (GS.Ordering == AtomicOrdering::NotAtomic) { 1563 assert(!GV->isConstant() && "Expected a non-constant global"); 1564 GV->setConstant(true); 1565 Changed = true; 1566 } 1567 1568 // Clean up any obviously simplifiable users now. 1569 Changed |= CleanupConstantGlobalUsers(GV, DL); 1570 1571 // If the global is dead now, just nuke it. 1572 if (GV->use_empty()) { 1573 LLVM_DEBUG(dbgs() << " *** Marking constant allowed us to simplify " 1574 << "all users and delete global!\n"); 1575 GV->eraseFromParent(); 1576 ++NumDeleted; 1577 return true; 1578 } 1579 1580 // Fall through to the next check; see if we can optimize further. 1581 ++NumMarked; 1582 } 1583 if (!GV->getInitializer()->getType()->isSingleValueType()) { 1584 const DataLayout &DL = GV->getParent()->getDataLayout(); 1585 if (SRAGlobal(GV, DL)) 1586 return true; 1587 } 1588 Value *StoredOnceValue = GS.getStoredOnceValue(); 1589 if (GS.StoredType == GlobalStatus::StoredOnce && StoredOnceValue) { 1590 Function &StoreFn = 1591 const_cast<Function &>(*GS.StoredOnceStore->getFunction()); 1592 bool CanHaveNonUndefGlobalInitializer = 1593 GetTTI(StoreFn).canHaveNonUndefGlobalInitializerInAddressSpace( 1594 GV->getType()->getAddressSpace()); 1595 // If the initial value for the global was an undef value, and if only 1596 // one other value was stored into it, we can just change the 1597 // initializer to be the stored value, then delete all stores to the 1598 // global. This allows us to mark it constant. 1599 // This is restricted to address spaces that allow globals to have 1600 // initializers. NVPTX, for example, does not support initializers for 1601 // shared memory (AS 3). 1602 auto *SOVConstant = dyn_cast<Constant>(StoredOnceValue); 1603 if (SOVConstant && isa<UndefValue>(GV->getInitializer()) && 1604 DL.getTypeAllocSize(SOVConstant->getType()) == 1605 DL.getTypeAllocSize(GV->getValueType()) && 1606 CanHaveNonUndefGlobalInitializer) { 1607 if (SOVConstant->getType() == GV->getValueType()) { 1608 // Change the initializer in place. 1609 GV->setInitializer(SOVConstant); 1610 } else { 1611 // Create a new global with adjusted type. 1612 auto *NGV = new GlobalVariable( 1613 *GV->getParent(), SOVConstant->getType(), GV->isConstant(), 1614 GV->getLinkage(), SOVConstant, "", GV, GV->getThreadLocalMode(), 1615 GV->getAddressSpace()); 1616 NGV->takeName(GV); 1617 NGV->copyAttributesFrom(GV); 1618 GV->replaceAllUsesWith(ConstantExpr::getBitCast(NGV, GV->getType())); 1619 GV->eraseFromParent(); 1620 GV = NGV; 1621 } 1622 1623 // Clean up any obviously simplifiable users now. 1624 CleanupConstantGlobalUsers(GV, DL); 1625 1626 if (GV->use_empty()) { 1627 LLVM_DEBUG(dbgs() << " *** Substituting initializer allowed us to " 1628 << "simplify all users and delete global!\n"); 1629 GV->eraseFromParent(); 1630 ++NumDeleted; 1631 } 1632 ++NumSubstitute; 1633 return true; 1634 } 1635 1636 // Try to optimize globals based on the knowledge that only one value 1637 // (besides its initializer) is ever stored to the global. 1638 if (optimizeOnceStoredGlobal(GV, StoredOnceValue, DL, GetTLI)) 1639 return true; 1640 1641 // Try to forward the store to any loads. If we have more than one store, we 1642 // may have a store of the initializer between StoredOnceStore and a load. 1643 if (GS.NumStores == 1) 1644 if (forwardStoredOnceStore(GV, GS.StoredOnceStore, LookupDomTree)) 1645 return true; 1646 1647 // Otherwise, if the global was not a boolean, we can shrink it to be a 1648 // boolean. Skip this optimization for AS that doesn't allow an initializer. 1649 if (SOVConstant && GS.Ordering == AtomicOrdering::NotAtomic && 1650 (!isa<UndefValue>(GV->getInitializer()) || 1651 CanHaveNonUndefGlobalInitializer)) { 1652 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { 1653 ++NumShrunkToBool; 1654 return true; 1655 } 1656 } 1657 } 1658 1659 return Changed; 1660 } 1661 1662 /// Analyze the specified global variable and optimize it if possible. If we 1663 /// make a change, return true. 1664 static bool 1665 processGlobal(GlobalValue &GV, 1666 function_ref<TargetTransformInfo &(Function &)> GetTTI, 1667 function_ref<TargetLibraryInfo &(Function &)> GetTLI, 1668 function_ref<DominatorTree &(Function &)> LookupDomTree) { 1669 if (GV.getName().startswith("llvm.")) 1670 return false; 1671 1672 GlobalStatus GS; 1673 1674 if (GlobalStatus::analyzeGlobal(&GV, GS)) 1675 return false; 1676 1677 bool Changed = false; 1678 if (!GS.IsCompared && !GV.hasGlobalUnnamedAddr()) { 1679 auto NewUnnamedAddr = GV.hasLocalLinkage() ? GlobalValue::UnnamedAddr::Global 1680 : GlobalValue::UnnamedAddr::Local; 1681 if (NewUnnamedAddr != GV.getUnnamedAddr()) { 1682 GV.setUnnamedAddr(NewUnnamedAddr); 1683 NumUnnamed++; 1684 Changed = true; 1685 } 1686 } 1687 1688 // Do more involved optimizations if the global is internal. 1689 if (!GV.hasLocalLinkage()) 1690 return Changed; 1691 1692 auto *GVar = dyn_cast<GlobalVariable>(&GV); 1693 if (!GVar) 1694 return Changed; 1695 1696 if (GVar->isConstant() || !GVar->hasInitializer()) 1697 return Changed; 1698 1699 return processInternalGlobal(GVar, GS, GetTTI, GetTLI, LookupDomTree) || 1700 Changed; 1701 } 1702 1703 /// Walk all of the direct calls of the specified function, changing them to 1704 /// FastCC. 1705 static void ChangeCalleesToFastCall(Function *F) { 1706 for (User *U : F->users()) { 1707 if (isa<BlockAddress>(U)) 1708 continue; 1709 cast<CallBase>(U)->setCallingConv(CallingConv::Fast); 1710 } 1711 } 1712 1713 static AttributeList StripAttr(LLVMContext &C, AttributeList Attrs, 1714 Attribute::AttrKind A) { 1715 unsigned AttrIndex; 1716 if (Attrs.hasAttrSomewhere(A, &AttrIndex)) 1717 return Attrs.removeAttributeAtIndex(C, AttrIndex, A); 1718 return Attrs; 1719 } 1720 1721 static void RemoveAttribute(Function *F, Attribute::AttrKind A) { 1722 F->setAttributes(StripAttr(F->getContext(), F->getAttributes(), A)); 1723 for (User *U : F->users()) { 1724 if (isa<BlockAddress>(U)) 1725 continue; 1726 CallBase *CB = cast<CallBase>(U); 1727 CB->setAttributes(StripAttr(F->getContext(), CB->getAttributes(), A)); 1728 } 1729 } 1730 1731 /// Return true if this is a calling convention that we'd like to change. The 1732 /// idea here is that we don't want to mess with the convention if the user 1733 /// explicitly requested something with performance implications like coldcc, 1734 /// GHC, or anyregcc. 1735 static bool hasChangeableCC(Function *F) { 1736 CallingConv::ID CC = F->getCallingConv(); 1737 1738 // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc? 1739 if (CC != CallingConv::C && CC != CallingConv::X86_ThisCall) 1740 return false; 1741 1742 // FIXME: Change CC for the whole chain of musttail calls when possible. 1743 // 1744 // Can't change CC of the function that either has musttail calls, or is a 1745 // musttail callee itself 1746 for (User *U : F->users()) { 1747 if (isa<BlockAddress>(U)) 1748 continue; 1749 CallInst* CI = dyn_cast<CallInst>(U); 1750 if (!CI) 1751 continue; 1752 1753 if (CI->isMustTailCall()) 1754 return false; 1755 } 1756 1757 for (BasicBlock &BB : *F) 1758 if (BB.getTerminatingMustTailCall()) 1759 return false; 1760 1761 return true; 1762 } 1763 1764 /// Return true if the block containing the call site has a BlockFrequency of 1765 /// less than ColdCCRelFreq% of the entry block. 1766 static bool isColdCallSite(CallBase &CB, BlockFrequencyInfo &CallerBFI) { 1767 const BranchProbability ColdProb(ColdCCRelFreq, 100); 1768 auto *CallSiteBB = CB.getParent(); 1769 auto CallSiteFreq = CallerBFI.getBlockFreq(CallSiteBB); 1770 auto CallerEntryFreq = 1771 CallerBFI.getBlockFreq(&(CB.getCaller()->getEntryBlock())); 1772 return CallSiteFreq < CallerEntryFreq * ColdProb; 1773 } 1774 1775 // This function checks if the input function F is cold at all call sites. It 1776 // also looks each call site's containing function, returning false if the 1777 // caller function contains other non cold calls. The input vector AllCallsCold 1778 // contains a list of functions that only have call sites in cold blocks. 1779 static bool 1780 isValidCandidateForColdCC(Function &F, 1781 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 1782 const std::vector<Function *> &AllCallsCold) { 1783 1784 if (F.user_empty()) 1785 return false; 1786 1787 for (User *U : F.users()) { 1788 if (isa<BlockAddress>(U)) 1789 continue; 1790 1791 CallBase &CB = cast<CallBase>(*U); 1792 Function *CallerFunc = CB.getParent()->getParent(); 1793 BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc); 1794 if (!isColdCallSite(CB, CallerBFI)) 1795 return false; 1796 if (!llvm::is_contained(AllCallsCold, CallerFunc)) 1797 return false; 1798 } 1799 return true; 1800 } 1801 1802 static void changeCallSitesToColdCC(Function *F) { 1803 for (User *U : F->users()) { 1804 if (isa<BlockAddress>(U)) 1805 continue; 1806 cast<CallBase>(U)->setCallingConv(CallingConv::Cold); 1807 } 1808 } 1809 1810 // This function iterates over all the call instructions in the input Function 1811 // and checks that all call sites are in cold blocks and are allowed to use the 1812 // coldcc calling convention. 1813 static bool 1814 hasOnlyColdCalls(Function &F, 1815 function_ref<BlockFrequencyInfo &(Function &)> GetBFI) { 1816 for (BasicBlock &BB : F) { 1817 for (Instruction &I : BB) { 1818 if (CallInst *CI = dyn_cast<CallInst>(&I)) { 1819 // Skip over isline asm instructions since they aren't function calls. 1820 if (CI->isInlineAsm()) 1821 continue; 1822 Function *CalledFn = CI->getCalledFunction(); 1823 if (!CalledFn) 1824 return false; 1825 // Skip over intrinsics since they won't remain as function calls. 1826 // Important to do this check before the linkage check below so we 1827 // won't bail out on debug intrinsics, possibly making the generated 1828 // code dependent on the presence of debug info. 1829 if (CalledFn->getIntrinsicID() != Intrinsic::not_intrinsic) 1830 continue; 1831 if (!CalledFn->hasLocalLinkage()) 1832 return false; 1833 // Check if it's valid to use coldcc calling convention. 1834 if (!hasChangeableCC(CalledFn) || CalledFn->isVarArg() || 1835 CalledFn->hasAddressTaken()) 1836 return false; 1837 BlockFrequencyInfo &CallerBFI = GetBFI(F); 1838 if (!isColdCallSite(*CI, CallerBFI)) 1839 return false; 1840 } 1841 } 1842 } 1843 return true; 1844 } 1845 1846 static bool hasMustTailCallers(Function *F) { 1847 for (User *U : F->users()) { 1848 CallBase *CB = dyn_cast<CallBase>(U); 1849 if (!CB) { 1850 assert(isa<BlockAddress>(U) && 1851 "Expected either CallBase or BlockAddress"); 1852 continue; 1853 } 1854 if (CB->isMustTailCall()) 1855 return true; 1856 } 1857 return false; 1858 } 1859 1860 static bool hasInvokeCallers(Function *F) { 1861 for (User *U : F->users()) 1862 if (isa<InvokeInst>(U)) 1863 return true; 1864 return false; 1865 } 1866 1867 static void RemovePreallocated(Function *F) { 1868 RemoveAttribute(F, Attribute::Preallocated); 1869 1870 auto *M = F->getParent(); 1871 1872 IRBuilder<> Builder(M->getContext()); 1873 1874 // Cannot modify users() while iterating over it, so make a copy. 1875 SmallVector<User *, 4> PreallocatedCalls(F->users()); 1876 for (User *U : PreallocatedCalls) { 1877 CallBase *CB = dyn_cast<CallBase>(U); 1878 if (!CB) 1879 continue; 1880 1881 assert( 1882 !CB->isMustTailCall() && 1883 "Shouldn't call RemotePreallocated() on a musttail preallocated call"); 1884 // Create copy of call without "preallocated" operand bundle. 1885 SmallVector<OperandBundleDef, 1> OpBundles; 1886 CB->getOperandBundlesAsDefs(OpBundles); 1887 CallBase *PreallocatedSetup = nullptr; 1888 for (auto *It = OpBundles.begin(); It != OpBundles.end(); ++It) { 1889 if (It->getTag() == "preallocated") { 1890 PreallocatedSetup = cast<CallBase>(*It->input_begin()); 1891 OpBundles.erase(It); 1892 break; 1893 } 1894 } 1895 assert(PreallocatedSetup && "Did not find preallocated bundle"); 1896 uint64_t ArgCount = 1897 cast<ConstantInt>(PreallocatedSetup->getArgOperand(0))->getZExtValue(); 1898 1899 assert((isa<CallInst>(CB) || isa<InvokeInst>(CB)) && 1900 "Unknown indirect call type"); 1901 CallBase *NewCB = CallBase::Create(CB, OpBundles, CB); 1902 CB->replaceAllUsesWith(NewCB); 1903 NewCB->takeName(CB); 1904 CB->eraseFromParent(); 1905 1906 Builder.SetInsertPoint(PreallocatedSetup); 1907 auto *StackSave = 1908 Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stacksave)); 1909 1910 Builder.SetInsertPoint(NewCB->getNextNonDebugInstruction()); 1911 Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackrestore), 1912 StackSave); 1913 1914 // Replace @llvm.call.preallocated.arg() with alloca. 1915 // Cannot modify users() while iterating over it, so make a copy. 1916 // @llvm.call.preallocated.arg() can be called with the same index multiple 1917 // times. So for each @llvm.call.preallocated.arg(), we see if we have 1918 // already created a Value* for the index, and if not, create an alloca and 1919 // bitcast right after the @llvm.call.preallocated.setup() so that it 1920 // dominates all uses. 1921 SmallVector<Value *, 2> ArgAllocas(ArgCount); 1922 SmallVector<User *, 2> PreallocatedArgs(PreallocatedSetup->users()); 1923 for (auto *User : PreallocatedArgs) { 1924 auto *UseCall = cast<CallBase>(User); 1925 assert(UseCall->getCalledFunction()->getIntrinsicID() == 1926 Intrinsic::call_preallocated_arg && 1927 "preallocated token use was not a llvm.call.preallocated.arg"); 1928 uint64_t AllocArgIndex = 1929 cast<ConstantInt>(UseCall->getArgOperand(1))->getZExtValue(); 1930 Value *AllocaReplacement = ArgAllocas[AllocArgIndex]; 1931 if (!AllocaReplacement) { 1932 auto AddressSpace = UseCall->getType()->getPointerAddressSpace(); 1933 auto *ArgType = 1934 UseCall->getFnAttr(Attribute::Preallocated).getValueAsType(); 1935 auto *InsertBefore = PreallocatedSetup->getNextNonDebugInstruction(); 1936 Builder.SetInsertPoint(InsertBefore); 1937 auto *Alloca = 1938 Builder.CreateAlloca(ArgType, AddressSpace, nullptr, "paarg"); 1939 auto *BitCast = Builder.CreateBitCast( 1940 Alloca, Type::getInt8PtrTy(M->getContext()), UseCall->getName()); 1941 ArgAllocas[AllocArgIndex] = BitCast; 1942 AllocaReplacement = BitCast; 1943 } 1944 1945 UseCall->replaceAllUsesWith(AllocaReplacement); 1946 UseCall->eraseFromParent(); 1947 } 1948 // Remove @llvm.call.preallocated.setup(). 1949 cast<Instruction>(PreallocatedSetup)->eraseFromParent(); 1950 } 1951 } 1952 1953 static bool 1954 OptimizeFunctions(Module &M, 1955 function_ref<TargetLibraryInfo &(Function &)> GetTLI, 1956 function_ref<TargetTransformInfo &(Function &)> GetTTI, 1957 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 1958 function_ref<DominatorTree &(Function &)> LookupDomTree, 1959 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats, 1960 function_ref<void(Function &F)> ChangedCFGCallback, 1961 function_ref<void(Function &F)> DeleteFnCallback) { 1962 1963 bool Changed = false; 1964 1965 std::vector<Function *> AllCallsCold; 1966 for (Function &F : llvm::make_early_inc_range(M)) 1967 if (hasOnlyColdCalls(F, GetBFI)) 1968 AllCallsCold.push_back(&F); 1969 1970 // Optimize functions. 1971 for (Function &F : llvm::make_early_inc_range(M)) { 1972 // Don't perform global opt pass on naked functions; we don't want fast 1973 // calling conventions for naked functions. 1974 if (F.hasFnAttribute(Attribute::Naked)) 1975 continue; 1976 1977 // Functions without names cannot be referenced outside this module. 1978 if (!F.hasName() && !F.isDeclaration() && !F.hasLocalLinkage()) 1979 F.setLinkage(GlobalValue::InternalLinkage); 1980 1981 if (deleteIfDead(F, NotDiscardableComdats, DeleteFnCallback)) { 1982 Changed = true; 1983 continue; 1984 } 1985 1986 // LLVM's definition of dominance allows instructions that are cyclic 1987 // in unreachable blocks, e.g.: 1988 // %pat = select i1 %condition, @global, i16* %pat 1989 // because any instruction dominates an instruction in a block that's 1990 // not reachable from entry. 1991 // So, remove unreachable blocks from the function, because a) there's 1992 // no point in analyzing them and b) GlobalOpt should otherwise grow 1993 // some more complicated logic to break these cycles. 1994 // Notify the analysis manager that we've modified the function's CFG. 1995 if (!F.isDeclaration()) { 1996 if (removeUnreachableBlocks(F)) { 1997 Changed = true; 1998 ChangedCFGCallback(F); 1999 } 2000 } 2001 2002 Changed |= processGlobal(F, GetTTI, GetTLI, LookupDomTree); 2003 2004 if (!F.hasLocalLinkage()) 2005 continue; 2006 2007 // If we have an inalloca parameter that we can safely remove the 2008 // inalloca attribute from, do so. This unlocks optimizations that 2009 // wouldn't be safe in the presence of inalloca. 2010 // FIXME: We should also hoist alloca affected by this to the entry 2011 // block if possible. 2012 if (F.getAttributes().hasAttrSomewhere(Attribute::InAlloca) && 2013 !F.hasAddressTaken() && !hasMustTailCallers(&F) && !F.isVarArg()) { 2014 RemoveAttribute(&F, Attribute::InAlloca); 2015 Changed = true; 2016 } 2017 2018 // FIXME: handle invokes 2019 // FIXME: handle musttail 2020 if (F.getAttributes().hasAttrSomewhere(Attribute::Preallocated)) { 2021 if (!F.hasAddressTaken() && !hasMustTailCallers(&F) && 2022 !hasInvokeCallers(&F)) { 2023 RemovePreallocated(&F); 2024 Changed = true; 2025 } 2026 continue; 2027 } 2028 2029 if (hasChangeableCC(&F) && !F.isVarArg() && !F.hasAddressTaken()) { 2030 NumInternalFunc++; 2031 TargetTransformInfo &TTI = GetTTI(F); 2032 // Change the calling convention to coldcc if either stress testing is 2033 // enabled or the target would like to use coldcc on functions which are 2034 // cold at all call sites and the callers contain no other non coldcc 2035 // calls. 2036 if (EnableColdCCStressTest || 2037 (TTI.useColdCCForColdCall(F) && 2038 isValidCandidateForColdCC(F, GetBFI, AllCallsCold))) { 2039 F.setCallingConv(CallingConv::Cold); 2040 changeCallSitesToColdCC(&F); 2041 Changed = true; 2042 NumColdCC++; 2043 } 2044 } 2045 2046 if (hasChangeableCC(&F) && !F.isVarArg() && !F.hasAddressTaken()) { 2047 // If this function has a calling convention worth changing, is not a 2048 // varargs function, and is only called directly, promote it to use the 2049 // Fast calling convention. 2050 F.setCallingConv(CallingConv::Fast); 2051 ChangeCalleesToFastCall(&F); 2052 ++NumFastCallFns; 2053 Changed = true; 2054 } 2055 2056 if (F.getAttributes().hasAttrSomewhere(Attribute::Nest) && 2057 !F.hasAddressTaken()) { 2058 // The function is not used by a trampoline intrinsic, so it is safe 2059 // to remove the 'nest' attribute. 2060 RemoveAttribute(&F, Attribute::Nest); 2061 ++NumNestRemoved; 2062 Changed = true; 2063 } 2064 } 2065 return Changed; 2066 } 2067 2068 static bool 2069 OptimizeGlobalVars(Module &M, 2070 function_ref<TargetTransformInfo &(Function &)> GetTTI, 2071 function_ref<TargetLibraryInfo &(Function &)> GetTLI, 2072 function_ref<DominatorTree &(Function &)> LookupDomTree, 2073 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 2074 bool Changed = false; 2075 2076 for (GlobalVariable &GV : llvm::make_early_inc_range(M.globals())) { 2077 // Global variables without names cannot be referenced outside this module. 2078 if (!GV.hasName() && !GV.isDeclaration() && !GV.hasLocalLinkage()) 2079 GV.setLinkage(GlobalValue::InternalLinkage); 2080 // Simplify the initializer. 2081 if (GV.hasInitializer()) 2082 if (auto *C = dyn_cast<Constant>(GV.getInitializer())) { 2083 auto &DL = M.getDataLayout(); 2084 // TLI is not used in the case of a Constant, so use default nullptr 2085 // for that optional parameter, since we don't have a Function to 2086 // provide GetTLI anyway. 2087 Constant *New = ConstantFoldConstant(C, DL, /*TLI*/ nullptr); 2088 if (New != C) 2089 GV.setInitializer(New); 2090 } 2091 2092 if (deleteIfDead(GV, NotDiscardableComdats)) { 2093 Changed = true; 2094 continue; 2095 } 2096 2097 Changed |= processGlobal(GV, GetTTI, GetTLI, LookupDomTree); 2098 } 2099 return Changed; 2100 } 2101 2102 /// Evaluate static constructors in the function, if we can. Return true if we 2103 /// can, false otherwise. 2104 static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL, 2105 TargetLibraryInfo *TLI) { 2106 // Skip external functions. 2107 if (F->isDeclaration()) 2108 return false; 2109 // Call the function. 2110 Evaluator Eval(DL, TLI); 2111 Constant *RetValDummy; 2112 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy, 2113 SmallVector<Constant*, 0>()); 2114 2115 if (EvalSuccess) { 2116 ++NumCtorsEvaluated; 2117 2118 // We succeeded at evaluation: commit the result. 2119 auto NewInitializers = Eval.getMutatedInitializers(); 2120 LLVM_DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" 2121 << F->getName() << "' to " << NewInitializers.size() 2122 << " stores.\n"); 2123 for (const auto &Pair : NewInitializers) 2124 Pair.first->setInitializer(Pair.second); 2125 for (GlobalVariable *GV : Eval.getInvariants()) 2126 GV->setConstant(true); 2127 } 2128 2129 return EvalSuccess; 2130 } 2131 2132 static int compareNames(Constant *const *A, Constant *const *B) { 2133 Value *AStripped = (*A)->stripPointerCasts(); 2134 Value *BStripped = (*B)->stripPointerCasts(); 2135 return AStripped->getName().compare(BStripped->getName()); 2136 } 2137 2138 static void setUsedInitializer(GlobalVariable &V, 2139 const SmallPtrSetImpl<GlobalValue *> &Init) { 2140 if (Init.empty()) { 2141 V.eraseFromParent(); 2142 return; 2143 } 2144 2145 // Type of pointer to the array of pointers. 2146 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0); 2147 2148 SmallVector<Constant *, 8> UsedArray; 2149 for (GlobalValue *GV : Init) { 2150 Constant *Cast 2151 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy); 2152 UsedArray.push_back(Cast); 2153 } 2154 // Sort to get deterministic order. 2155 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames); 2156 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size()); 2157 2158 Module *M = V.getParent(); 2159 V.removeFromParent(); 2160 GlobalVariable *NV = 2161 new GlobalVariable(*M, ATy, false, GlobalValue::AppendingLinkage, 2162 ConstantArray::get(ATy, UsedArray), ""); 2163 NV->takeName(&V); 2164 NV->setSection("llvm.metadata"); 2165 delete &V; 2166 } 2167 2168 namespace { 2169 2170 /// An easy to access representation of llvm.used and llvm.compiler.used. 2171 class LLVMUsed { 2172 SmallPtrSet<GlobalValue *, 4> Used; 2173 SmallPtrSet<GlobalValue *, 4> CompilerUsed; 2174 GlobalVariable *UsedV; 2175 GlobalVariable *CompilerUsedV; 2176 2177 public: 2178 LLVMUsed(Module &M) { 2179 SmallVector<GlobalValue *, 4> Vec; 2180 UsedV = collectUsedGlobalVariables(M, Vec, false); 2181 Used = {Vec.begin(), Vec.end()}; 2182 Vec.clear(); 2183 CompilerUsedV = collectUsedGlobalVariables(M, Vec, true); 2184 CompilerUsed = {Vec.begin(), Vec.end()}; 2185 } 2186 2187 using iterator = SmallPtrSet<GlobalValue *, 4>::iterator; 2188 using used_iterator_range = iterator_range<iterator>; 2189 2190 iterator usedBegin() { return Used.begin(); } 2191 iterator usedEnd() { return Used.end(); } 2192 2193 used_iterator_range used() { 2194 return used_iterator_range(usedBegin(), usedEnd()); 2195 } 2196 2197 iterator compilerUsedBegin() { return CompilerUsed.begin(); } 2198 iterator compilerUsedEnd() { return CompilerUsed.end(); } 2199 2200 used_iterator_range compilerUsed() { 2201 return used_iterator_range(compilerUsedBegin(), compilerUsedEnd()); 2202 } 2203 2204 bool usedCount(GlobalValue *GV) const { return Used.count(GV); } 2205 2206 bool compilerUsedCount(GlobalValue *GV) const { 2207 return CompilerUsed.count(GV); 2208 } 2209 2210 bool usedErase(GlobalValue *GV) { return Used.erase(GV); } 2211 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); } 2212 bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; } 2213 2214 bool compilerUsedInsert(GlobalValue *GV) { 2215 return CompilerUsed.insert(GV).second; 2216 } 2217 2218 void syncVariablesAndSets() { 2219 if (UsedV) 2220 setUsedInitializer(*UsedV, Used); 2221 if (CompilerUsedV) 2222 setUsedInitializer(*CompilerUsedV, CompilerUsed); 2223 } 2224 }; 2225 2226 } // end anonymous namespace 2227 2228 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) { 2229 if (GA.use_empty()) // No use at all. 2230 return false; 2231 2232 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && 2233 "We should have removed the duplicated " 2234 "element from llvm.compiler.used"); 2235 if (!GA.hasOneUse()) 2236 // Strictly more than one use. So at least one is not in llvm.used and 2237 // llvm.compiler.used. 2238 return true; 2239 2240 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used. 2241 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA); 2242 } 2243 2244 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V, 2245 const LLVMUsed &U) { 2246 unsigned N = 2; 2247 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) && 2248 "We should have removed the duplicated " 2249 "element from llvm.compiler.used"); 2250 if (U.usedCount(&V) || U.compilerUsedCount(&V)) 2251 ++N; 2252 return V.hasNUsesOrMore(N); 2253 } 2254 2255 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) { 2256 if (!GA.hasLocalLinkage()) 2257 return true; 2258 2259 return U.usedCount(&GA) || U.compilerUsedCount(&GA); 2260 } 2261 2262 static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U, 2263 bool &RenameTarget) { 2264 RenameTarget = false; 2265 bool Ret = false; 2266 if (hasUseOtherThanLLVMUsed(GA, U)) 2267 Ret = true; 2268 2269 // If the alias is externally visible, we may still be able to simplify it. 2270 if (!mayHaveOtherReferences(GA, U)) 2271 return Ret; 2272 2273 // If the aliasee has internal linkage, give it the name and linkage 2274 // of the alias, and delete the alias. This turns: 2275 // define internal ... @f(...) 2276 // @a = alias ... @f 2277 // into: 2278 // define ... @a(...) 2279 Constant *Aliasee = GA.getAliasee(); 2280 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 2281 if (!Target->hasLocalLinkage()) 2282 return Ret; 2283 2284 // Do not perform the transform if multiple aliases potentially target the 2285 // aliasee. This check also ensures that it is safe to replace the section 2286 // and other attributes of the aliasee with those of the alias. 2287 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U)) 2288 return Ret; 2289 2290 RenameTarget = true; 2291 return true; 2292 } 2293 2294 static bool 2295 OptimizeGlobalAliases(Module &M, 2296 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 2297 bool Changed = false; 2298 LLVMUsed Used(M); 2299 2300 for (GlobalValue *GV : Used.used()) 2301 Used.compilerUsedErase(GV); 2302 2303 // Return whether GV is explicitly or implicitly dso_local and not replaceable 2304 // by another definition in the current linkage unit. 2305 auto IsModuleLocal = [](GlobalValue &GV) { 2306 return !GlobalValue::isInterposableLinkage(GV.getLinkage()) && 2307 (GV.isDSOLocal() || GV.isImplicitDSOLocal()); 2308 }; 2309 2310 for (GlobalAlias &J : llvm::make_early_inc_range(M.aliases())) { 2311 // Aliases without names cannot be referenced outside this module. 2312 if (!J.hasName() && !J.isDeclaration() && !J.hasLocalLinkage()) 2313 J.setLinkage(GlobalValue::InternalLinkage); 2314 2315 if (deleteIfDead(J, NotDiscardableComdats)) { 2316 Changed = true; 2317 continue; 2318 } 2319 2320 // If the alias can change at link time, nothing can be done - bail out. 2321 if (!IsModuleLocal(J)) 2322 continue; 2323 2324 Constant *Aliasee = J.getAliasee(); 2325 GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts()); 2326 // We can't trivially replace the alias with the aliasee if the aliasee is 2327 // non-trivial in some way. We also can't replace the alias with the aliasee 2328 // if the aliasee may be preemptible at runtime. On ELF, a non-preemptible 2329 // alias can be used to access the definition as if preemption did not 2330 // happen. 2331 // TODO: Try to handle non-zero GEPs of local aliasees. 2332 if (!Target || !IsModuleLocal(*Target)) 2333 continue; 2334 2335 Target->removeDeadConstantUsers(); 2336 2337 // Make all users of the alias use the aliasee instead. 2338 bool RenameTarget; 2339 if (!hasUsesToReplace(J, Used, RenameTarget)) 2340 continue; 2341 2342 J.replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J.getType())); 2343 ++NumAliasesResolved; 2344 Changed = true; 2345 2346 if (RenameTarget) { 2347 // Give the aliasee the name, linkage and other attributes of the alias. 2348 Target->takeName(&J); 2349 Target->setLinkage(J.getLinkage()); 2350 Target->setDSOLocal(J.isDSOLocal()); 2351 Target->setVisibility(J.getVisibility()); 2352 Target->setDLLStorageClass(J.getDLLStorageClass()); 2353 2354 if (Used.usedErase(&J)) 2355 Used.usedInsert(Target); 2356 2357 if (Used.compilerUsedErase(&J)) 2358 Used.compilerUsedInsert(Target); 2359 } else if (mayHaveOtherReferences(J, Used)) 2360 continue; 2361 2362 // Delete the alias. 2363 M.getAliasList().erase(&J); 2364 ++NumAliasesRemoved; 2365 Changed = true; 2366 } 2367 2368 Used.syncVariablesAndSets(); 2369 2370 return Changed; 2371 } 2372 2373 static Function * 2374 FindCXAAtExit(Module &M, function_ref<TargetLibraryInfo &(Function &)> GetTLI) { 2375 // Hack to get a default TLI before we have actual Function. 2376 auto FuncIter = M.begin(); 2377 if (FuncIter == M.end()) 2378 return nullptr; 2379 auto *TLI = &GetTLI(*FuncIter); 2380 2381 LibFunc F = LibFunc_cxa_atexit; 2382 if (!TLI->has(F)) 2383 return nullptr; 2384 2385 Function *Fn = M.getFunction(TLI->getName(F)); 2386 if (!Fn) 2387 return nullptr; 2388 2389 // Now get the actual TLI for Fn. 2390 TLI = &GetTLI(*Fn); 2391 2392 // Make sure that the function has the correct prototype. 2393 if (!TLI->getLibFunc(*Fn, F) || F != LibFunc_cxa_atexit) 2394 return nullptr; 2395 2396 return Fn; 2397 } 2398 2399 /// Returns whether the given function is an empty C++ destructor and can 2400 /// therefore be eliminated. 2401 /// Note that we assume that other optimization passes have already simplified 2402 /// the code so we simply check for 'ret'. 2403 static bool cxxDtorIsEmpty(const Function &Fn) { 2404 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and 2405 // nounwind, but that doesn't seem worth doing. 2406 if (Fn.isDeclaration()) 2407 return false; 2408 2409 for (const auto &I : Fn.getEntryBlock()) { 2410 if (I.isDebugOrPseudoInst()) 2411 continue; 2412 if (isa<ReturnInst>(I)) 2413 return true; 2414 break; 2415 } 2416 return false; 2417 } 2418 2419 static bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { 2420 /// Itanium C++ ABI p3.3.5: 2421 /// 2422 /// After constructing a global (or local static) object, that will require 2423 /// destruction on exit, a termination function is registered as follows: 2424 /// 2425 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); 2426 /// 2427 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the 2428 /// call f(p) when DSO d is unloaded, before all such termination calls 2429 /// registered before this one. It returns zero if registration is 2430 /// successful, nonzero on failure. 2431 2432 // This pass will look for calls to __cxa_atexit where the function is trivial 2433 // and remove them. 2434 bool Changed = false; 2435 2436 for (User *U : llvm::make_early_inc_range(CXAAtExitFn->users())) { 2437 // We're only interested in calls. Theoretically, we could handle invoke 2438 // instructions as well, but neither llvm-gcc nor clang generate invokes 2439 // to __cxa_atexit. 2440 CallInst *CI = dyn_cast<CallInst>(U); 2441 if (!CI) 2442 continue; 2443 2444 Function *DtorFn = 2445 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); 2446 if (!DtorFn || !cxxDtorIsEmpty(*DtorFn)) 2447 continue; 2448 2449 // Just remove the call. 2450 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); 2451 CI->eraseFromParent(); 2452 2453 ++NumCXXDtorsRemoved; 2454 2455 Changed |= true; 2456 } 2457 2458 return Changed; 2459 } 2460 2461 static bool 2462 optimizeGlobalsInModule(Module &M, const DataLayout &DL, 2463 function_ref<TargetLibraryInfo &(Function &)> GetTLI, 2464 function_ref<TargetTransformInfo &(Function &)> GetTTI, 2465 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2466 function_ref<DominatorTree &(Function &)> LookupDomTree, 2467 function_ref<void(Function &F)> ChangedCFGCallback, 2468 function_ref<void(Function &F)> DeleteFnCallback) { 2469 SmallPtrSet<const Comdat *, 8> NotDiscardableComdats; 2470 bool Changed = false; 2471 bool LocalChange = true; 2472 std::optional<uint32_t> FirstNotFullyEvaluatedPriority; 2473 2474 while (LocalChange) { 2475 LocalChange = false; 2476 2477 NotDiscardableComdats.clear(); 2478 for (const GlobalVariable &GV : M.globals()) 2479 if (const Comdat *C = GV.getComdat()) 2480 if (!GV.isDiscardableIfUnused() || !GV.use_empty()) 2481 NotDiscardableComdats.insert(C); 2482 for (Function &F : M) 2483 if (const Comdat *C = F.getComdat()) 2484 if (!F.isDefTriviallyDead()) 2485 NotDiscardableComdats.insert(C); 2486 for (GlobalAlias &GA : M.aliases()) 2487 if (const Comdat *C = GA.getComdat()) 2488 if (!GA.isDiscardableIfUnused() || !GA.use_empty()) 2489 NotDiscardableComdats.insert(C); 2490 2491 // Delete functions that are trivially dead, ccc -> fastcc 2492 LocalChange |= OptimizeFunctions(M, GetTLI, GetTTI, GetBFI, LookupDomTree, 2493 NotDiscardableComdats, ChangedCFGCallback, 2494 DeleteFnCallback); 2495 2496 // Optimize global_ctors list. 2497 LocalChange |= 2498 optimizeGlobalCtorsList(M, [&](uint32_t Priority, Function *F) { 2499 if (FirstNotFullyEvaluatedPriority && 2500 *FirstNotFullyEvaluatedPriority != Priority) 2501 return false; 2502 bool Evaluated = EvaluateStaticConstructor(F, DL, &GetTLI(*F)); 2503 if (!Evaluated) 2504 FirstNotFullyEvaluatedPriority = Priority; 2505 return Evaluated; 2506 }); 2507 2508 // Optimize non-address-taken globals. 2509 LocalChange |= OptimizeGlobalVars(M, GetTTI, GetTLI, LookupDomTree, 2510 NotDiscardableComdats); 2511 2512 // Resolve aliases, when possible. 2513 LocalChange |= OptimizeGlobalAliases(M, NotDiscardableComdats); 2514 2515 // Try to remove trivial global destructors if they are not removed 2516 // already. 2517 Function *CXAAtExitFn = FindCXAAtExit(M, GetTLI); 2518 if (CXAAtExitFn) 2519 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); 2520 2521 Changed |= LocalChange; 2522 } 2523 2524 // TODO: Move all global ctors functions to the end of the module for code 2525 // layout. 2526 2527 return Changed; 2528 } 2529 2530 PreservedAnalyses GlobalOptPass::run(Module &M, ModuleAnalysisManager &AM) { 2531 auto &DL = M.getDataLayout(); 2532 auto &FAM = 2533 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 2534 auto LookupDomTree = [&FAM](Function &F) -> DominatorTree &{ 2535 return FAM.getResult<DominatorTreeAnalysis>(F); 2536 }; 2537 auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & { 2538 return FAM.getResult<TargetLibraryAnalysis>(F); 2539 }; 2540 auto GetTTI = [&FAM](Function &F) -> TargetTransformInfo & { 2541 return FAM.getResult<TargetIRAnalysis>(F); 2542 }; 2543 2544 auto GetBFI = [&FAM](Function &F) -> BlockFrequencyInfo & { 2545 return FAM.getResult<BlockFrequencyAnalysis>(F); 2546 }; 2547 auto ChangedCFGCallback = [&FAM](Function &F) { 2548 FAM.invalidate(F, PreservedAnalyses::none()); 2549 }; 2550 auto DeleteFnCallback = [&FAM](Function &F) { FAM.clear(F, F.getName()); }; 2551 2552 if (!optimizeGlobalsInModule(M, DL, GetTLI, GetTTI, GetBFI, LookupDomTree, 2553 ChangedCFGCallback, DeleteFnCallback)) 2554 return PreservedAnalyses::all(); 2555 2556 PreservedAnalyses PA = PreservedAnalyses::none(); 2557 // We made sure to clear analyses for deleted functions. 2558 PA.preserve<FunctionAnalysisManagerModuleProxy>(); 2559 // The only place we modify the CFG is when calling 2560 // removeUnreachableBlocks(), but there we make sure to invalidate analyses 2561 // for modified functions. 2562 PA.preserveSet<CFGAnalyses>(); 2563 return PA; 2564 } 2565 2566 namespace { 2567 2568 struct GlobalOptLegacyPass : public ModulePass { 2569 static char ID; // Pass identification, replacement for typeid 2570 2571 GlobalOptLegacyPass() : ModulePass(ID) { 2572 initializeGlobalOptLegacyPassPass(*PassRegistry::getPassRegistry()); 2573 } 2574 2575 bool runOnModule(Module &M) override { 2576 if (skipModule(M)) 2577 return false; 2578 2579 auto &DL = M.getDataLayout(); 2580 auto LookupDomTree = [this](Function &F) -> DominatorTree & { 2581 return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 2582 }; 2583 auto GetTLI = [this](Function &F) -> TargetLibraryInfo & { 2584 return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 2585 }; 2586 auto GetTTI = [this](Function &F) -> TargetTransformInfo & { 2587 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2588 }; 2589 2590 auto GetBFI = [this](Function &F) -> BlockFrequencyInfo & { 2591 return this->getAnalysis<BlockFrequencyInfoWrapperPass>(F).getBFI(); 2592 }; 2593 2594 auto ChangedCFGCallback = [&LookupDomTree](Function &F) { 2595 auto &DT = LookupDomTree(F); 2596 DT.recalculate(F); 2597 }; 2598 2599 return optimizeGlobalsInModule(M, DL, GetTLI, GetTTI, GetBFI, LookupDomTree, 2600 ChangedCFGCallback, nullptr); 2601 } 2602 2603 void getAnalysisUsage(AnalysisUsage &AU) const override { 2604 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2605 AU.addRequired<TargetTransformInfoWrapperPass>(); 2606 AU.addRequired<DominatorTreeWrapperPass>(); 2607 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2608 } 2609 }; 2610 2611 } // end anonymous namespace 2612 2613 char GlobalOptLegacyPass::ID = 0; 2614 2615 INITIALIZE_PASS_BEGIN(GlobalOptLegacyPass, "globalopt", 2616 "Global Variable Optimizer", false, false) 2617 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2618 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 2619 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 2620 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2621 INITIALIZE_PASS_END(GlobalOptLegacyPass, "globalopt", 2622 "Global Variable Optimizer", false, false) 2623 2624 ModulePass *llvm::createGlobalOptimizerPass() { 2625 return new GlobalOptLegacyPass(); 2626 } 2627