1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass transforms simple global variables that never have their address 10 // taken. If obviously true, it marks read/write globals as constant, deletes 11 // variables only stored to, etc. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/GlobalOpt.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/ADT/Twine.h" 22 #include "llvm/ADT/iterator_range.h" 23 #include "llvm/Analysis/BlockFrequencyInfo.h" 24 #include "llvm/Analysis/ConstantFolding.h" 25 #include "llvm/Analysis/MemoryBuiltins.h" 26 #include "llvm/Analysis/TargetLibraryInfo.h" 27 #include "llvm/Analysis/TargetTransformInfo.h" 28 #include "llvm/Transforms/Utils/Local.h" 29 #include "llvm/BinaryFormat/Dwarf.h" 30 #include "llvm/IR/Attributes.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/CallSite.h" 33 #include "llvm/IR/CallingConv.h" 34 #include "llvm/IR/Constant.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DebugInfoMetadata.h" 38 #include "llvm/IR/DerivedTypes.h" 39 #include "llvm/IR/Dominators.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/GetElementPtrTypeIterator.h" 42 #include "llvm/IR/GlobalAlias.h" 43 #include "llvm/IR/GlobalValue.h" 44 #include "llvm/IR/GlobalVariable.h" 45 #include "llvm/IR/InstrTypes.h" 46 #include "llvm/IR/Instruction.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/IntrinsicInst.h" 49 #include "llvm/IR/Module.h" 50 #include "llvm/IR/Operator.h" 51 #include "llvm/IR/Type.h" 52 #include "llvm/IR/Use.h" 53 #include "llvm/IR/User.h" 54 #include "llvm/IR/Value.h" 55 #include "llvm/IR/ValueHandle.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/AtomicOrdering.h" 58 #include "llvm/Support/Casting.h" 59 #include "llvm/Support/CommandLine.h" 60 #include "llvm/Support/Debug.h" 61 #include "llvm/Support/ErrorHandling.h" 62 #include "llvm/Support/MathExtras.h" 63 #include "llvm/Support/raw_ostream.h" 64 #include "llvm/Transforms/IPO.h" 65 #include "llvm/Transforms/Utils/CtorUtils.h" 66 #include "llvm/Transforms/Utils/Evaluator.h" 67 #include "llvm/Transforms/Utils/GlobalStatus.h" 68 #include <cassert> 69 #include <cstdint> 70 #include <utility> 71 #include <vector> 72 73 using namespace llvm; 74 75 #define DEBUG_TYPE "globalopt" 76 77 STATISTIC(NumMarked , "Number of globals marked constant"); 78 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr"); 79 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); 80 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); 81 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); 82 STATISTIC(NumDeleted , "Number of globals deleted"); 83 STATISTIC(NumGlobUses , "Number of global uses devirtualized"); 84 STATISTIC(NumLocalized , "Number of globals localized"); 85 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); 86 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); 87 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); 88 STATISTIC(NumNestRemoved , "Number of nest attributes removed"); 89 STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); 90 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); 91 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); 92 STATISTIC(NumInternalFunc, "Number of internal functions"); 93 STATISTIC(NumColdCC, "Number of functions marked coldcc"); 94 95 static cl::opt<bool> 96 EnableColdCCStressTest("enable-coldcc-stress-test", 97 cl::desc("Enable stress test of coldcc by adding " 98 "calling conv to all internal functions."), 99 cl::init(false), cl::Hidden); 100 101 static cl::opt<int> ColdCCRelFreq( 102 "coldcc-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore, 103 cl::desc( 104 "Maximum block frequency, expressed as a percentage of caller's " 105 "entry frequency, for a call site to be considered cold for enabling" 106 "coldcc")); 107 108 /// Is this global variable possibly used by a leak checker as a root? If so, 109 /// we might not really want to eliminate the stores to it. 110 static bool isLeakCheckerRoot(GlobalVariable *GV) { 111 // A global variable is a root if it is a pointer, or could plausibly contain 112 // a pointer. There are two challenges; one is that we could have a struct 113 // the has an inner member which is a pointer. We recurse through the type to 114 // detect these (up to a point). The other is that we may actually be a union 115 // of a pointer and another type, and so our LLVM type is an integer which 116 // gets converted into a pointer, or our type is an [i8 x #] with a pointer 117 // potentially contained here. 118 119 if (GV->hasPrivateLinkage()) 120 return false; 121 122 SmallVector<Type *, 4> Types; 123 Types.push_back(GV->getValueType()); 124 125 unsigned Limit = 20; 126 do { 127 Type *Ty = Types.pop_back_val(); 128 switch (Ty->getTypeID()) { 129 default: break; 130 case Type::PointerTyID: return true; 131 case Type::ArrayTyID: 132 case Type::VectorTyID: { 133 SequentialType *STy = cast<SequentialType>(Ty); 134 Types.push_back(STy->getElementType()); 135 break; 136 } 137 case Type::StructTyID: { 138 StructType *STy = cast<StructType>(Ty); 139 if (STy->isOpaque()) return true; 140 for (StructType::element_iterator I = STy->element_begin(), 141 E = STy->element_end(); I != E; ++I) { 142 Type *InnerTy = *I; 143 if (isa<PointerType>(InnerTy)) return true; 144 if (isa<CompositeType>(InnerTy)) 145 Types.push_back(InnerTy); 146 } 147 break; 148 } 149 } 150 if (--Limit == 0) return true; 151 } while (!Types.empty()); 152 return false; 153 } 154 155 /// Given a value that is stored to a global but never read, determine whether 156 /// it's safe to remove the store and the chain of computation that feeds the 157 /// store. 158 static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) { 159 do { 160 if (isa<Constant>(V)) 161 return true; 162 if (!V->hasOneUse()) 163 return false; 164 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) || 165 isa<GlobalValue>(V)) 166 return false; 167 if (isAllocationFn(V, TLI)) 168 return true; 169 170 Instruction *I = cast<Instruction>(V); 171 if (I->mayHaveSideEffects()) 172 return false; 173 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 174 if (!GEP->hasAllConstantIndices()) 175 return false; 176 } else if (I->getNumOperands() != 1) { 177 return false; 178 } 179 180 V = I->getOperand(0); 181 } while (true); 182 } 183 184 /// This GV is a pointer root. Loop over all users of the global and clean up 185 /// any that obviously don't assign the global a value that isn't dynamically 186 /// allocated. 187 static bool CleanupPointerRootUsers(GlobalVariable *GV, 188 const TargetLibraryInfo *TLI) { 189 // A brief explanation of leak checkers. The goal is to find bugs where 190 // pointers are forgotten, causing an accumulating growth in memory 191 // usage over time. The common strategy for leak checkers is to whitelist the 192 // memory pointed to by globals at exit. This is popular because it also 193 // solves another problem where the main thread of a C++ program may shut down 194 // before other threads that are still expecting to use those globals. To 195 // handle that case, we expect the program may create a singleton and never 196 // destroy it. 197 198 bool Changed = false; 199 200 // If Dead[n].first is the only use of a malloc result, we can delete its 201 // chain of computation and the store to the global in Dead[n].second. 202 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead; 203 204 // Constants can't be pointers to dynamically allocated memory. 205 for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end(); 206 UI != E;) { 207 User *U = *UI++; 208 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 209 Value *V = SI->getValueOperand(); 210 if (isa<Constant>(V)) { 211 Changed = true; 212 SI->eraseFromParent(); 213 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 214 if (I->hasOneUse()) 215 Dead.push_back(std::make_pair(I, SI)); 216 } 217 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) { 218 if (isa<Constant>(MSI->getValue())) { 219 Changed = true; 220 MSI->eraseFromParent(); 221 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) { 222 if (I->hasOneUse()) 223 Dead.push_back(std::make_pair(I, MSI)); 224 } 225 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) { 226 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource()); 227 if (MemSrc && MemSrc->isConstant()) { 228 Changed = true; 229 MTI->eraseFromParent(); 230 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) { 231 if (I->hasOneUse()) 232 Dead.push_back(std::make_pair(I, MTI)); 233 } 234 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 235 if (CE->use_empty()) { 236 CE->destroyConstant(); 237 Changed = true; 238 } 239 } else if (Constant *C = dyn_cast<Constant>(U)) { 240 if (isSafeToDestroyConstant(C)) { 241 C->destroyConstant(); 242 // This could have invalidated UI, start over from scratch. 243 Dead.clear(); 244 CleanupPointerRootUsers(GV, TLI); 245 return true; 246 } 247 } 248 } 249 250 for (int i = 0, e = Dead.size(); i != e; ++i) { 251 if (IsSafeComputationToRemove(Dead[i].first, TLI)) { 252 Dead[i].second->eraseFromParent(); 253 Instruction *I = Dead[i].first; 254 do { 255 if (isAllocationFn(I, TLI)) 256 break; 257 Instruction *J = dyn_cast<Instruction>(I->getOperand(0)); 258 if (!J) 259 break; 260 I->eraseFromParent(); 261 I = J; 262 } while (true); 263 I->eraseFromParent(); 264 } 265 } 266 267 return Changed; 268 } 269 270 /// We just marked GV constant. Loop over all users of the global, cleaning up 271 /// the obvious ones. This is largely just a quick scan over the use list to 272 /// clean up the easy and obvious cruft. This returns true if it made a change. 273 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, 274 const DataLayout &DL, 275 TargetLibraryInfo *TLI) { 276 bool Changed = false; 277 // Note that we need to use a weak value handle for the worklist items. When 278 // we delete a constant array, we may also be holding pointer to one of its 279 // elements (or an element of one of its elements if we're dealing with an 280 // array of arrays) in the worklist. 281 SmallVector<WeakTrackingVH, 8> WorkList(V->user_begin(), V->user_end()); 282 while (!WorkList.empty()) { 283 Value *UV = WorkList.pop_back_val(); 284 if (!UV) 285 continue; 286 287 User *U = cast<User>(UV); 288 289 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 290 if (Init) { 291 // Replace the load with the initializer. 292 LI->replaceAllUsesWith(Init); 293 LI->eraseFromParent(); 294 Changed = true; 295 } 296 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 297 // Store must be unreachable or storing Init into the global. 298 SI->eraseFromParent(); 299 Changed = true; 300 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 301 if (CE->getOpcode() == Instruction::GetElementPtr) { 302 Constant *SubInit = nullptr; 303 if (Init) 304 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 305 Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI); 306 } else if ((CE->getOpcode() == Instruction::BitCast && 307 CE->getType()->isPointerTy()) || 308 CE->getOpcode() == Instruction::AddrSpaceCast) { 309 // Pointer cast, delete any stores and memsets to the global. 310 Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, TLI); 311 } 312 313 if (CE->use_empty()) { 314 CE->destroyConstant(); 315 Changed = true; 316 } 317 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { 318 // Do not transform "gepinst (gep constexpr (GV))" here, because forming 319 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold 320 // and will invalidate our notion of what Init is. 321 Constant *SubInit = nullptr; 322 if (!isa<ConstantExpr>(GEP->getOperand(0))) { 323 ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>( 324 ConstantFoldInstruction(GEP, DL, TLI)); 325 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) 326 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 327 328 // If the initializer is an all-null value and we have an inbounds GEP, 329 // we already know what the result of any load from that GEP is. 330 // TODO: Handle splats. 331 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds()) 332 SubInit = Constant::getNullValue(GEP->getResultElementType()); 333 } 334 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI); 335 336 if (GEP->use_empty()) { 337 GEP->eraseFromParent(); 338 Changed = true; 339 } 340 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv 341 if (MI->getRawDest() == V) { 342 MI->eraseFromParent(); 343 Changed = true; 344 } 345 346 } else if (Constant *C = dyn_cast<Constant>(U)) { 347 // If we have a chain of dead constantexprs or other things dangling from 348 // us, and if they are all dead, nuke them without remorse. 349 if (isSafeToDestroyConstant(C)) { 350 C->destroyConstant(); 351 CleanupConstantGlobalUsers(V, Init, DL, TLI); 352 return true; 353 } 354 } 355 } 356 return Changed; 357 } 358 359 static bool isSafeSROAElementUse(Value *V); 360 361 /// Return true if the specified GEP is a safe user of a derived 362 /// expression from a global that we want to SROA. 363 static bool isSafeSROAGEP(User *U) { 364 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we 365 // don't like < 3 operand CE's, and we don't like non-constant integer 366 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some 367 // value of C. 368 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || 369 !cast<Constant>(U->getOperand(1))->isNullValue()) 370 return false; 371 372 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); 373 ++GEPI; // Skip over the pointer index. 374 375 // For all other level we require that the indices are constant and inrange. 376 // In particular, consider: A[0][i]. We cannot know that the user isn't doing 377 // invalid things like allowing i to index an out-of-range subscript that 378 // accesses A[1]. This can also happen between different members of a struct 379 // in llvm IR. 380 for (; GEPI != E; ++GEPI) { 381 if (GEPI.isStruct()) 382 continue; 383 384 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); 385 if (!IdxVal || (GEPI.isBoundedSequential() && 386 IdxVal->getZExtValue() >= GEPI.getSequentialNumElements())) 387 return false; 388 } 389 390 return llvm::all_of(U->users(), 391 [](User *UU) { return isSafeSROAElementUse(UU); }); 392 } 393 394 /// Return true if the specified instruction is a safe user of a derived 395 /// expression from a global that we want to SROA. 396 static bool isSafeSROAElementUse(Value *V) { 397 // We might have a dead and dangling constant hanging off of here. 398 if (Constant *C = dyn_cast<Constant>(V)) 399 return isSafeToDestroyConstant(C); 400 401 Instruction *I = dyn_cast<Instruction>(V); 402 if (!I) return false; 403 404 // Loads are ok. 405 if (isa<LoadInst>(I)) return true; 406 407 // Stores *to* the pointer are ok. 408 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 409 return SI->getOperand(0) != V; 410 411 // Otherwise, it must be a GEP. Check it and its users are safe to SRA. 412 return isa<GetElementPtrInst>(I) && isSafeSROAGEP(I); 413 } 414 415 /// Look at all uses of the global and decide whether it is safe for us to 416 /// perform this transformation. 417 static bool GlobalUsersSafeToSRA(GlobalValue *GV) { 418 for (User *U : GV->users()) { 419 // The user of the global must be a GEP Inst or a ConstantExpr GEP. 420 if (!isa<GetElementPtrInst>(U) && 421 (!isa<ConstantExpr>(U) || 422 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) 423 return false; 424 425 // Check the gep and it's users are safe to SRA 426 if (!isSafeSROAGEP(U)) 427 return false; 428 } 429 430 return true; 431 } 432 433 /// Copy over the debug info for a variable to its SRA replacements. 434 static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV, 435 uint64_t FragmentOffsetInBits, 436 uint64_t FragmentSizeInBits, 437 unsigned NumElements) { 438 SmallVector<DIGlobalVariableExpression *, 1> GVs; 439 GV->getDebugInfo(GVs); 440 for (auto *GVE : GVs) { 441 DIVariable *Var = GVE->getVariable(); 442 DIExpression *Expr = GVE->getExpression(); 443 if (NumElements > 1) { 444 if (auto E = DIExpression::createFragmentExpression( 445 Expr, FragmentOffsetInBits, FragmentSizeInBits)) 446 Expr = *E; 447 else 448 return; 449 } 450 auto *NGVE = DIGlobalVariableExpression::get(GVE->getContext(), Var, Expr); 451 NGV->addDebugInfo(NGVE); 452 } 453 } 454 455 /// Perform scalar replacement of aggregates on the specified global variable. 456 /// This opens the door for other optimizations by exposing the behavior of the 457 /// program in a more fine-grained way. We have determined that this 458 /// transformation is safe already. We return the first global variable we 459 /// insert so that the caller can reprocess it. 460 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { 461 // Make sure this global only has simple uses that we can SRA. 462 if (!GlobalUsersSafeToSRA(GV)) 463 return nullptr; 464 465 assert(GV->hasLocalLinkage()); 466 Constant *Init = GV->getInitializer(); 467 Type *Ty = Init->getType(); 468 469 std::vector<GlobalVariable *> NewGlobals; 470 Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); 471 472 // Get the alignment of the global, either explicit or target-specific. 473 unsigned StartAlignment = GV->getAlignment(); 474 if (StartAlignment == 0) 475 StartAlignment = DL.getABITypeAlignment(GV->getType()); 476 477 if (StructType *STy = dyn_cast<StructType>(Ty)) { 478 unsigned NumElements = STy->getNumElements(); 479 NewGlobals.reserve(NumElements); 480 const StructLayout &Layout = *DL.getStructLayout(STy); 481 for (unsigned i = 0, e = NumElements; i != e; ++i) { 482 Constant *In = Init->getAggregateElement(i); 483 assert(In && "Couldn't get element of initializer?"); 484 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false, 485 GlobalVariable::InternalLinkage, 486 In, GV->getName()+"."+Twine(i), 487 GV->getThreadLocalMode(), 488 GV->getType()->getAddressSpace()); 489 NGV->setExternallyInitialized(GV->isExternallyInitialized()); 490 NGV->copyAttributesFrom(GV); 491 Globals.push_back(NGV); 492 NewGlobals.push_back(NGV); 493 494 // Calculate the known alignment of the field. If the original aggregate 495 // had 256 byte alignment for example, something might depend on that: 496 // propagate info to each field. 497 uint64_t FieldOffset = Layout.getElementOffset(i); 498 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); 499 if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i))) 500 NGV->setAlignment(NewAlign); 501 502 // Copy over the debug info for the variable. 503 uint64_t Size = DL.getTypeAllocSizeInBits(NGV->getValueType()); 504 uint64_t FragmentOffsetInBits = Layout.getElementOffsetInBits(i); 505 transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size, NumElements); 506 } 507 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) { 508 unsigned NumElements = STy->getNumElements(); 509 if (NumElements > 16 && GV->hasNUsesOrMore(16)) 510 return nullptr; // It's not worth it. 511 NewGlobals.reserve(NumElements); 512 auto ElTy = STy->getElementType(); 513 uint64_t EltSize = DL.getTypeAllocSize(ElTy); 514 unsigned EltAlign = DL.getABITypeAlignment(ElTy); 515 uint64_t FragmentSizeInBits = DL.getTypeAllocSizeInBits(ElTy); 516 for (unsigned i = 0, e = NumElements; i != e; ++i) { 517 Constant *In = Init->getAggregateElement(i); 518 assert(In && "Couldn't get element of initializer?"); 519 520 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false, 521 GlobalVariable::InternalLinkage, 522 In, GV->getName()+"."+Twine(i), 523 GV->getThreadLocalMode(), 524 GV->getType()->getAddressSpace()); 525 NGV->setExternallyInitialized(GV->isExternallyInitialized()); 526 NGV->copyAttributesFrom(GV); 527 Globals.push_back(NGV); 528 NewGlobals.push_back(NGV); 529 530 // Calculate the known alignment of the field. If the original aggregate 531 // had 256 byte alignment for example, something might depend on that: 532 // propagate info to each field. 533 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); 534 if (NewAlign > EltAlign) 535 NGV->setAlignment(NewAlign); 536 transferSRADebugInfo(GV, NGV, FragmentSizeInBits * i, FragmentSizeInBits, 537 NumElements); 538 } 539 } 540 541 if (NewGlobals.empty()) 542 return nullptr; 543 544 LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n"); 545 546 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); 547 548 // Loop over all of the uses of the global, replacing the constantexpr geps, 549 // with smaller constantexpr geps or direct references. 550 while (!GV->use_empty()) { 551 User *GEP = GV->user_back(); 552 assert(((isa<ConstantExpr>(GEP) && 553 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| 554 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); 555 556 // Ignore the 1th operand, which has to be zero or else the program is quite 557 // broken (undefined). Get the 2nd operand, which is the structure or array 558 // index. 559 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 560 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. 561 562 Value *NewPtr = NewGlobals[Val]; 563 Type *NewTy = NewGlobals[Val]->getValueType(); 564 565 // Form a shorter GEP if needed. 566 if (GEP->getNumOperands() > 3) { 567 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { 568 SmallVector<Constant*, 8> Idxs; 569 Idxs.push_back(NullInt); 570 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) 571 Idxs.push_back(CE->getOperand(i)); 572 NewPtr = 573 ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs); 574 } else { 575 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); 576 SmallVector<Value*, 8> Idxs; 577 Idxs.push_back(NullInt); 578 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) 579 Idxs.push_back(GEPI->getOperand(i)); 580 NewPtr = GetElementPtrInst::Create( 581 NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(Val), GEPI); 582 } 583 } 584 GEP->replaceAllUsesWith(NewPtr); 585 586 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) 587 GEPI->eraseFromParent(); 588 else 589 cast<ConstantExpr>(GEP)->destroyConstant(); 590 } 591 592 // Delete the old global, now that it is dead. 593 Globals.erase(GV); 594 ++NumSRA; 595 596 // Loop over the new globals array deleting any globals that are obviously 597 // dead. This can arise due to scalarization of a structure or an array that 598 // has elements that are dead. 599 unsigned FirstGlobal = 0; 600 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) 601 if (NewGlobals[i]->use_empty()) { 602 Globals.erase(NewGlobals[i]); 603 if (FirstGlobal == i) ++FirstGlobal; 604 } 605 606 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : nullptr; 607 } 608 609 /// Return true if all users of the specified value will trap if the value is 610 /// dynamically null. PHIs keeps track of any phi nodes we've seen to avoid 611 /// reprocessing them. 612 static bool AllUsesOfValueWillTrapIfNull(const Value *V, 613 SmallPtrSetImpl<const PHINode*> &PHIs) { 614 for (const User *U : V->users()) { 615 if (const Instruction *I = dyn_cast<Instruction>(U)) { 616 // If null pointer is considered valid, then all uses are non-trapping. 617 // Non address-space 0 globals have already been pruned by the caller. 618 if (NullPointerIsDefined(I->getFunction())) 619 return false; 620 } 621 if (isa<LoadInst>(U)) { 622 // Will trap. 623 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 624 if (SI->getOperand(0) == V) { 625 //cerr << "NONTRAPPING USE: " << *U; 626 return false; // Storing the value. 627 } 628 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { 629 if (CI->getCalledValue() != V) { 630 //cerr << "NONTRAPPING USE: " << *U; 631 return false; // Not calling the ptr 632 } 633 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { 634 if (II->getCalledValue() != V) { 635 //cerr << "NONTRAPPING USE: " << *U; 636 return false; // Not calling the ptr 637 } 638 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { 639 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; 640 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 641 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; 642 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { 643 // If we've already seen this phi node, ignore it, it has already been 644 // checked. 645 if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) 646 return false; 647 } else if (isa<ICmpInst>(U) && 648 isa<ConstantPointerNull>(U->getOperand(1))) { 649 // Ignore icmp X, null 650 } else { 651 //cerr << "NONTRAPPING USE: " << *U; 652 return false; 653 } 654 } 655 return true; 656 } 657 658 /// Return true if all uses of any loads from GV will trap if the loaded value 659 /// is null. Note that this also permits comparisons of the loaded value 660 /// against null, as a special case. 661 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { 662 for (const User *U : GV->users()) 663 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 664 SmallPtrSet<const PHINode*, 8> PHIs; 665 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) 666 return false; 667 } else if (isa<StoreInst>(U)) { 668 // Ignore stores to the global. 669 } else { 670 // We don't know or understand this user, bail out. 671 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U; 672 return false; 673 } 674 return true; 675 } 676 677 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { 678 bool Changed = false; 679 for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) { 680 Instruction *I = cast<Instruction>(*UI++); 681 // Uses are non-trapping if null pointer is considered valid. 682 // Non address-space 0 globals are already pruned by the caller. 683 if (NullPointerIsDefined(I->getFunction())) 684 return false; 685 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 686 LI->setOperand(0, NewV); 687 Changed = true; 688 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 689 if (SI->getOperand(1) == V) { 690 SI->setOperand(1, NewV); 691 Changed = true; 692 } 693 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 694 CallSite CS(I); 695 if (CS.getCalledValue() == V) { 696 // Calling through the pointer! Turn into a direct call, but be careful 697 // that the pointer is not also being passed as an argument. 698 CS.setCalledFunction(NewV); 699 Changed = true; 700 bool PassedAsArg = false; 701 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 702 if (CS.getArgument(i) == V) { 703 PassedAsArg = true; 704 CS.setArgument(i, NewV); 705 } 706 707 if (PassedAsArg) { 708 // Being passed as an argument also. Be careful to not invalidate UI! 709 UI = V->user_begin(); 710 } 711 } 712 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 713 Changed |= OptimizeAwayTrappingUsesOfValue(CI, 714 ConstantExpr::getCast(CI->getOpcode(), 715 NewV, CI->getType())); 716 if (CI->use_empty()) { 717 Changed = true; 718 CI->eraseFromParent(); 719 } 720 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 721 // Should handle GEP here. 722 SmallVector<Constant*, 8> Idxs; 723 Idxs.reserve(GEPI->getNumOperands()-1); 724 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); 725 i != e; ++i) 726 if (Constant *C = dyn_cast<Constant>(*i)) 727 Idxs.push_back(C); 728 else 729 break; 730 if (Idxs.size() == GEPI->getNumOperands()-1) 731 Changed |= OptimizeAwayTrappingUsesOfValue( 732 GEPI, ConstantExpr::getGetElementPtr(GEPI->getSourceElementType(), 733 NewV, Idxs)); 734 if (GEPI->use_empty()) { 735 Changed = true; 736 GEPI->eraseFromParent(); 737 } 738 } 739 } 740 741 return Changed; 742 } 743 744 /// The specified global has only one non-null value stored into it. If there 745 /// are uses of the loaded value that would trap if the loaded value is 746 /// dynamically null, then we know that they cannot be reachable with a null 747 /// optimize away the load. 748 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, 749 const DataLayout &DL, 750 TargetLibraryInfo *TLI) { 751 bool Changed = false; 752 753 // Keep track of whether we are able to remove all the uses of the global 754 // other than the store that defines it. 755 bool AllNonStoreUsesGone = true; 756 757 // Replace all uses of loads with uses of uses of the stored value. 758 for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){ 759 User *GlobalUser = *GUI++; 760 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { 761 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); 762 // If we were able to delete all uses of the loads 763 if (LI->use_empty()) { 764 LI->eraseFromParent(); 765 Changed = true; 766 } else { 767 AllNonStoreUsesGone = false; 768 } 769 } else if (isa<StoreInst>(GlobalUser)) { 770 // Ignore the store that stores "LV" to the global. 771 assert(GlobalUser->getOperand(1) == GV && 772 "Must be storing *to* the global"); 773 } else { 774 AllNonStoreUsesGone = false; 775 776 // If we get here we could have other crazy uses that are transitively 777 // loaded. 778 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || 779 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || 780 isa<BitCastInst>(GlobalUser) || 781 isa<GetElementPtrInst>(GlobalUser)) && 782 "Only expect load and stores!"); 783 } 784 } 785 786 if (Changed) { 787 LLVM_DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV 788 << "\n"); 789 ++NumGlobUses; 790 } 791 792 // If we nuked all of the loads, then none of the stores are needed either, 793 // nor is the global. 794 if (AllNonStoreUsesGone) { 795 if (isLeakCheckerRoot(GV)) { 796 Changed |= CleanupPointerRootUsers(GV, TLI); 797 } else { 798 Changed = true; 799 CleanupConstantGlobalUsers(GV, nullptr, DL, TLI); 800 } 801 if (GV->use_empty()) { 802 LLVM_DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); 803 Changed = true; 804 GV->eraseFromParent(); 805 ++NumDeleted; 806 } 807 } 808 return Changed; 809 } 810 811 /// Walk the use list of V, constant folding all of the instructions that are 812 /// foldable. 813 static void ConstantPropUsersOf(Value *V, const DataLayout &DL, 814 TargetLibraryInfo *TLI) { 815 for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; ) 816 if (Instruction *I = dyn_cast<Instruction>(*UI++)) 817 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) { 818 I->replaceAllUsesWith(NewC); 819 820 // Advance UI to the next non-I use to avoid invalidating it! 821 // Instructions could multiply use V. 822 while (UI != E && *UI == I) 823 ++UI; 824 if (isInstructionTriviallyDead(I, TLI)) 825 I->eraseFromParent(); 826 } 827 } 828 829 /// This function takes the specified global variable, and transforms the 830 /// program as if it always contained the result of the specified malloc. 831 /// Because it is always the result of the specified malloc, there is no reason 832 /// to actually DO the malloc. Instead, turn the malloc into a global, and any 833 /// loads of GV as uses of the new global. 834 static GlobalVariable * 835 OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy, 836 ConstantInt *NElements, const DataLayout &DL, 837 TargetLibraryInfo *TLI) { 838 LLVM_DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI 839 << '\n'); 840 841 Type *GlobalType; 842 if (NElements->getZExtValue() == 1) 843 GlobalType = AllocTy; 844 else 845 // If we have an array allocation, the global variable is of an array. 846 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue()); 847 848 // Create the new global variable. The contents of the malloc'd memory is 849 // undefined, so initialize with an undef value. 850 GlobalVariable *NewGV = new GlobalVariable( 851 *GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage, 852 UndefValue::get(GlobalType), GV->getName() + ".body", nullptr, 853 GV->getThreadLocalMode()); 854 855 // If there are bitcast users of the malloc (which is typical, usually we have 856 // a malloc + bitcast) then replace them with uses of the new global. Update 857 // other users to use the global as well. 858 BitCastInst *TheBC = nullptr; 859 while (!CI->use_empty()) { 860 Instruction *User = cast<Instruction>(CI->user_back()); 861 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 862 if (BCI->getType() == NewGV->getType()) { 863 BCI->replaceAllUsesWith(NewGV); 864 BCI->eraseFromParent(); 865 } else { 866 BCI->setOperand(0, NewGV); 867 } 868 } else { 869 if (!TheBC) 870 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); 871 User->replaceUsesOfWith(CI, TheBC); 872 } 873 } 874 875 Constant *RepValue = NewGV; 876 if (NewGV->getType() != GV->getValueType()) 877 RepValue = ConstantExpr::getBitCast(RepValue, GV->getValueType()); 878 879 // If there is a comparison against null, we will insert a global bool to 880 // keep track of whether the global was initialized yet or not. 881 GlobalVariable *InitBool = 882 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, 883 GlobalValue::InternalLinkage, 884 ConstantInt::getFalse(GV->getContext()), 885 GV->getName()+".init", GV->getThreadLocalMode()); 886 bool InitBoolUsed = false; 887 888 // Loop over all uses of GV, processing them in turn. 889 while (!GV->use_empty()) { 890 if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) { 891 // The global is initialized when the store to it occurs. 892 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0, 893 SI->getOrdering(), SI->getSyncScopeID(), SI); 894 SI->eraseFromParent(); 895 continue; 896 } 897 898 LoadInst *LI = cast<LoadInst>(GV->user_back()); 899 while (!LI->use_empty()) { 900 Use &LoadUse = *LI->use_begin(); 901 ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser()); 902 if (!ICI) { 903 LoadUse = RepValue; 904 continue; 905 } 906 907 // Replace the cmp X, 0 with a use of the bool value. 908 // Sink the load to where the compare was, if atomic rules allow us to. 909 Value *LV = new LoadInst(InitBool->getValueType(), InitBool, 910 InitBool->getName() + ".val", false, 0, 911 LI->getOrdering(), LI->getSyncScopeID(), 912 LI->isUnordered() ? (Instruction *)ICI : LI); 913 InitBoolUsed = true; 914 switch (ICI->getPredicate()) { 915 default: llvm_unreachable("Unknown ICmp Predicate!"); 916 case ICmpInst::ICMP_ULT: 917 case ICmpInst::ICMP_SLT: // X < null -> always false 918 LV = ConstantInt::getFalse(GV->getContext()); 919 break; 920 case ICmpInst::ICMP_ULE: 921 case ICmpInst::ICMP_SLE: 922 case ICmpInst::ICMP_EQ: 923 LV = BinaryOperator::CreateNot(LV, "notinit", ICI); 924 break; 925 case ICmpInst::ICMP_NE: 926 case ICmpInst::ICMP_UGE: 927 case ICmpInst::ICMP_SGE: 928 case ICmpInst::ICMP_UGT: 929 case ICmpInst::ICMP_SGT: 930 break; // no change. 931 } 932 ICI->replaceAllUsesWith(LV); 933 ICI->eraseFromParent(); 934 } 935 LI->eraseFromParent(); 936 } 937 938 // If the initialization boolean was used, insert it, otherwise delete it. 939 if (!InitBoolUsed) { 940 while (!InitBool->use_empty()) // Delete initializations 941 cast<StoreInst>(InitBool->user_back())->eraseFromParent(); 942 delete InitBool; 943 } else 944 GV->getParent()->getGlobalList().insert(GV->getIterator(), InitBool); 945 946 // Now the GV is dead, nuke it and the malloc.. 947 GV->eraseFromParent(); 948 CI->eraseFromParent(); 949 950 // To further other optimizations, loop over all users of NewGV and try to 951 // constant prop them. This will promote GEP instructions with constant 952 // indices into GEP constant-exprs, which will allow global-opt to hack on it. 953 ConstantPropUsersOf(NewGV, DL, TLI); 954 if (RepValue != NewGV) 955 ConstantPropUsersOf(RepValue, DL, TLI); 956 957 return NewGV; 958 } 959 960 /// Scan the use-list of V checking to make sure that there are no complex uses 961 /// of V. We permit simple things like dereferencing the pointer, but not 962 /// storing through the address, unless it is to the specified global. 963 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V, 964 const GlobalVariable *GV, 965 SmallPtrSetImpl<const PHINode*> &PHIs) { 966 for (const User *U : V->users()) { 967 const Instruction *Inst = cast<Instruction>(U); 968 969 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { 970 continue; // Fine, ignore. 971 } 972 973 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 974 if (SI->getOperand(0) == V && SI->getOperand(1) != GV) 975 return false; // Storing the pointer itself... bad. 976 continue; // Otherwise, storing through it, or storing into GV... fine. 977 } 978 979 // Must index into the array and into the struct. 980 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { 981 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) 982 return false; 983 continue; 984 } 985 986 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) { 987 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI 988 // cycles. 989 if (PHIs.insert(PN).second) 990 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) 991 return false; 992 continue; 993 } 994 995 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { 996 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) 997 return false; 998 continue; 999 } 1000 1001 return false; 1002 } 1003 return true; 1004 } 1005 1006 /// The Alloc pointer is stored into GV somewhere. Transform all uses of the 1007 /// allocation into loads from the global and uses of the resultant pointer. 1008 /// Further, delete the store into GV. This assumes that these value pass the 1009 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. 1010 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, 1011 GlobalVariable *GV) { 1012 while (!Alloc->use_empty()) { 1013 Instruction *U = cast<Instruction>(*Alloc->user_begin()); 1014 Instruction *InsertPt = U; 1015 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1016 // If this is the store of the allocation into the global, remove it. 1017 if (SI->getOperand(1) == GV) { 1018 SI->eraseFromParent(); 1019 continue; 1020 } 1021 } else if (PHINode *PN = dyn_cast<PHINode>(U)) { 1022 // Insert the load in the corresponding predecessor, not right before the 1023 // PHI. 1024 InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator(); 1025 } else if (isa<BitCastInst>(U)) { 1026 // Must be bitcast between the malloc and store to initialize the global. 1027 ReplaceUsesOfMallocWithGlobal(U, GV); 1028 U->eraseFromParent(); 1029 continue; 1030 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1031 // If this is a "GEP bitcast" and the user is a store to the global, then 1032 // just process it as a bitcast. 1033 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) 1034 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back())) 1035 if (SI->getOperand(1) == GV) { 1036 // Must be bitcast GEP between the malloc and store to initialize 1037 // the global. 1038 ReplaceUsesOfMallocWithGlobal(GEPI, GV); 1039 GEPI->eraseFromParent(); 1040 continue; 1041 } 1042 } 1043 1044 // Insert a load from the global, and use it instead of the malloc. 1045 Value *NL = 1046 new LoadInst(GV->getValueType(), GV, GV->getName() + ".val", InsertPt); 1047 U->replaceUsesOfWith(Alloc, NL); 1048 } 1049 } 1050 1051 /// Verify that all uses of V (a load, or a phi of a load) are simple enough to 1052 /// perform heap SRA on. This permits GEP's that index through the array and 1053 /// struct field, icmps of null, and PHIs. 1054 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V, 1055 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs, 1056 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) { 1057 // We permit two users of the load: setcc comparing against the null 1058 // pointer, and a getelementptr of a specific form. 1059 for (const User *U : V->users()) { 1060 const Instruction *UI = cast<Instruction>(U); 1061 1062 // Comparison against null is ok. 1063 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) { 1064 if (!isa<ConstantPointerNull>(ICI->getOperand(1))) 1065 return false; 1066 continue; 1067 } 1068 1069 // getelementptr is also ok, but only a simple form. 1070 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) { 1071 // Must index into the array and into the struct. 1072 if (GEPI->getNumOperands() < 3) 1073 return false; 1074 1075 // Otherwise the GEP is ok. 1076 continue; 1077 } 1078 1079 if (const PHINode *PN = dyn_cast<PHINode>(UI)) { 1080 if (!LoadUsingPHIsPerLoad.insert(PN).second) 1081 // This means some phi nodes are dependent on each other. 1082 // Avoid infinite looping! 1083 return false; 1084 if (!LoadUsingPHIs.insert(PN).second) 1085 // If we have already analyzed this PHI, then it is safe. 1086 continue; 1087 1088 // Make sure all uses of the PHI are simple enough to transform. 1089 if (!LoadUsesSimpleEnoughForHeapSRA(PN, 1090 LoadUsingPHIs, LoadUsingPHIsPerLoad)) 1091 return false; 1092 1093 continue; 1094 } 1095 1096 // Otherwise we don't know what this is, not ok. 1097 return false; 1098 } 1099 1100 return true; 1101 } 1102 1103 /// If all users of values loaded from GV are simple enough to perform HeapSRA, 1104 /// return true. 1105 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV, 1106 Instruction *StoredVal) { 1107 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs; 1108 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad; 1109 for (const User *U : GV->users()) 1110 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 1111 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, 1112 LoadUsingPHIsPerLoad)) 1113 return false; 1114 LoadUsingPHIsPerLoad.clear(); 1115 } 1116 1117 // If we reach here, we know that all uses of the loads and transitive uses 1118 // (through PHI nodes) are simple enough to transform. However, we don't know 1119 // that all inputs the to the PHI nodes are in the same equivalence sets. 1120 // Check to verify that all operands of the PHIs are either PHIS that can be 1121 // transformed, loads from GV, or MI itself. 1122 for (const PHINode *PN : LoadUsingPHIs) { 1123 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { 1124 Value *InVal = PN->getIncomingValue(op); 1125 1126 // PHI of the stored value itself is ok. 1127 if (InVal == StoredVal) continue; 1128 1129 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) { 1130 // One of the PHIs in our set is (optimistically) ok. 1131 if (LoadUsingPHIs.count(InPN)) 1132 continue; 1133 return false; 1134 } 1135 1136 // Load from GV is ok. 1137 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal)) 1138 if (LI->getOperand(0) == GV) 1139 continue; 1140 1141 // UNDEF? NULL? 1142 1143 // Anything else is rejected. 1144 return false; 1145 } 1146 } 1147 1148 return true; 1149 } 1150 1151 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, 1152 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues, 1153 std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) { 1154 std::vector<Value *> &FieldVals = InsertedScalarizedValues[V]; 1155 1156 if (FieldNo >= FieldVals.size()) 1157 FieldVals.resize(FieldNo+1); 1158 1159 // If we already have this value, just reuse the previously scalarized 1160 // version. 1161 if (Value *FieldVal = FieldVals[FieldNo]) 1162 return FieldVal; 1163 1164 // Depending on what instruction this is, we have several cases. 1165 Value *Result; 1166 if (LoadInst *LI = dyn_cast<LoadInst>(V)) { 1167 // This is a scalarized version of the load from the global. Just create 1168 // a new Load of the scalarized global. 1169 Value *V = GetHeapSROAValue(LI->getOperand(0), FieldNo, 1170 InsertedScalarizedValues, PHIsToRewrite); 1171 Result = new LoadInst(V->getType()->getPointerElementType(), V, 1172 LI->getName() + ".f" + Twine(FieldNo), LI); 1173 } else { 1174 PHINode *PN = cast<PHINode>(V); 1175 // PN's type is pointer to struct. Make a new PHI of pointer to struct 1176 // field. 1177 1178 PointerType *PTy = cast<PointerType>(PN->getType()); 1179 StructType *ST = cast<StructType>(PTy->getElementType()); 1180 1181 unsigned AS = PTy->getAddressSpace(); 1182 PHINode *NewPN = 1183 PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS), 1184 PN->getNumIncomingValues(), 1185 PN->getName()+".f"+Twine(FieldNo), PN); 1186 Result = NewPN; 1187 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); 1188 } 1189 1190 return FieldVals[FieldNo] = Result; 1191 } 1192 1193 /// Given a load instruction and a value derived from the load, rewrite the 1194 /// derived value to use the HeapSRoA'd load. 1195 static void RewriteHeapSROALoadUser(Instruction *LoadUser, 1196 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues, 1197 std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) { 1198 // If this is a comparison against null, handle it. 1199 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { 1200 assert(isa<ConstantPointerNull>(SCI->getOperand(1))); 1201 // If we have a setcc of the loaded pointer, we can use a setcc of any 1202 // field. 1203 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, 1204 InsertedScalarizedValues, PHIsToRewrite); 1205 1206 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, 1207 Constant::getNullValue(NPtr->getType()), 1208 SCI->getName()); 1209 SCI->replaceAllUsesWith(New); 1210 SCI->eraseFromParent(); 1211 return; 1212 } 1213 1214 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' 1215 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { 1216 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) 1217 && "Unexpected GEPI!"); 1218 1219 // Load the pointer for this field. 1220 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 1221 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, 1222 InsertedScalarizedValues, PHIsToRewrite); 1223 1224 // Create the new GEP idx vector. 1225 SmallVector<Value*, 8> GEPIdx; 1226 GEPIdx.push_back(GEPI->getOperand(1)); 1227 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); 1228 1229 Value *NGEPI = GetElementPtrInst::Create(GEPI->getResultElementType(), NewPtr, GEPIdx, 1230 GEPI->getName(), GEPI); 1231 GEPI->replaceAllUsesWith(NGEPI); 1232 GEPI->eraseFromParent(); 1233 return; 1234 } 1235 1236 // Recursively transform the users of PHI nodes. This will lazily create the 1237 // PHIs that are needed for individual elements. Keep track of what PHIs we 1238 // see in InsertedScalarizedValues so that we don't get infinite loops (very 1239 // antisocial). If the PHI is already in InsertedScalarizedValues, it has 1240 // already been seen first by another load, so its uses have already been 1241 // processed. 1242 PHINode *PN = cast<PHINode>(LoadUser); 1243 if (!InsertedScalarizedValues.insert(std::make_pair(PN, 1244 std::vector<Value *>())).second) 1245 return; 1246 1247 // If this is the first time we've seen this PHI, recursively process all 1248 // users. 1249 for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) { 1250 Instruction *User = cast<Instruction>(*UI++); 1251 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1252 } 1253 } 1254 1255 /// We are performing Heap SRoA on a global. Ptr is a value loaded from the 1256 /// global. Eliminate all uses of Ptr, making them use FieldGlobals instead. 1257 /// All uses of loaded values satisfy AllGlobalLoadUsesSimpleEnoughForHeapSRA. 1258 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, 1259 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues, 1260 std::vector<std::pair<PHINode *, unsigned> > &PHIsToRewrite) { 1261 for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) { 1262 Instruction *User = cast<Instruction>(*UI++); 1263 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1264 } 1265 1266 if (Load->use_empty()) { 1267 Load->eraseFromParent(); 1268 InsertedScalarizedValues.erase(Load); 1269 } 1270 } 1271 1272 /// CI is an allocation of an array of structures. Break it up into multiple 1273 /// allocations of arrays of the fields. 1274 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, 1275 Value *NElems, const DataLayout &DL, 1276 const TargetLibraryInfo *TLI) { 1277 LLVM_DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI 1278 << '\n'); 1279 Type *MAT = getMallocAllocatedType(CI, TLI); 1280 StructType *STy = cast<StructType>(MAT); 1281 1282 // There is guaranteed to be at least one use of the malloc (storing 1283 // it into GV). If there are other uses, change them to be uses of 1284 // the global to simplify later code. This also deletes the store 1285 // into GV. 1286 ReplaceUsesOfMallocWithGlobal(CI, GV); 1287 1288 // Okay, at this point, there are no users of the malloc. Insert N 1289 // new mallocs at the same place as CI, and N globals. 1290 std::vector<Value *> FieldGlobals; 1291 std::vector<Value *> FieldMallocs; 1292 1293 SmallVector<OperandBundleDef, 1> OpBundles; 1294 CI->getOperandBundlesAsDefs(OpBundles); 1295 1296 unsigned AS = GV->getType()->getPointerAddressSpace(); 1297 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ 1298 Type *FieldTy = STy->getElementType(FieldNo); 1299 PointerType *PFieldTy = PointerType::get(FieldTy, AS); 1300 1301 GlobalVariable *NGV = new GlobalVariable( 1302 *GV->getParent(), PFieldTy, false, GlobalValue::InternalLinkage, 1303 Constant::getNullValue(PFieldTy), GV->getName() + ".f" + Twine(FieldNo), 1304 nullptr, GV->getThreadLocalMode()); 1305 NGV->copyAttributesFrom(GV); 1306 FieldGlobals.push_back(NGV); 1307 1308 unsigned TypeSize = DL.getTypeAllocSize(FieldTy); 1309 if (StructType *ST = dyn_cast<StructType>(FieldTy)) 1310 TypeSize = DL.getStructLayout(ST)->getSizeInBytes(); 1311 Type *IntPtrTy = DL.getIntPtrType(CI->getType()); 1312 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, 1313 ConstantInt::get(IntPtrTy, TypeSize), 1314 NElems, OpBundles, nullptr, 1315 CI->getName() + ".f" + Twine(FieldNo)); 1316 FieldMallocs.push_back(NMI); 1317 new StoreInst(NMI, NGV, CI); 1318 } 1319 1320 // The tricky aspect of this transformation is handling the case when malloc 1321 // fails. In the original code, malloc failing would set the result pointer 1322 // of malloc to null. In this case, some mallocs could succeed and others 1323 // could fail. As such, we emit code that looks like this: 1324 // F0 = malloc(field0) 1325 // F1 = malloc(field1) 1326 // F2 = malloc(field2) 1327 // if (F0 == 0 || F1 == 0 || F2 == 0) { 1328 // if (F0) { free(F0); F0 = 0; } 1329 // if (F1) { free(F1); F1 = 0; } 1330 // if (F2) { free(F2); F2 = 0; } 1331 // } 1332 // The malloc can also fail if its argument is too large. 1333 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0); 1334 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0), 1335 ConstantZero, "isneg"); 1336 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { 1337 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i], 1338 Constant::getNullValue(FieldMallocs[i]->getType()), 1339 "isnull"); 1340 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI); 1341 } 1342 1343 // Split the basic block at the old malloc. 1344 BasicBlock *OrigBB = CI->getParent(); 1345 BasicBlock *ContBB = 1346 OrigBB->splitBasicBlock(CI->getIterator(), "malloc_cont"); 1347 1348 // Create the block to check the first condition. Put all these blocks at the 1349 // end of the function as they are unlikely to be executed. 1350 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(), 1351 "malloc_ret_null", 1352 OrigBB->getParent()); 1353 1354 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond 1355 // branch on RunningOr. 1356 OrigBB->getTerminator()->eraseFromParent(); 1357 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); 1358 1359 // Within the NullPtrBlock, we need to emit a comparison and branch for each 1360 // pointer, because some may be null while others are not. 1361 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1362 Value *GVVal = 1363 new LoadInst(cast<GlobalVariable>(FieldGlobals[i])->getValueType(), 1364 FieldGlobals[i], "tmp", NullPtrBlock); 1365 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, 1366 Constant::getNullValue(GVVal->getType())); 1367 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", 1368 OrigBB->getParent()); 1369 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", 1370 OrigBB->getParent()); 1371 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock, 1372 Cmp, NullPtrBlock); 1373 1374 // Fill in FreeBlock. 1375 CallInst::CreateFree(GVVal, OpBundles, BI); 1376 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], 1377 FreeBlock); 1378 BranchInst::Create(NextBlock, FreeBlock); 1379 1380 NullPtrBlock = NextBlock; 1381 } 1382 1383 BranchInst::Create(ContBB, NullPtrBlock); 1384 1385 // CI is no longer needed, remove it. 1386 CI->eraseFromParent(); 1387 1388 /// As we process loads, if we can't immediately update all uses of the load, 1389 /// keep track of what scalarized loads are inserted for a given load. 1390 DenseMap<Value *, std::vector<Value *>> InsertedScalarizedValues; 1391 InsertedScalarizedValues[GV] = FieldGlobals; 1392 1393 std::vector<std::pair<PHINode *, unsigned>> PHIsToRewrite; 1394 1395 // Okay, the malloc site is completely handled. All of the uses of GV are now 1396 // loads, and all uses of those loads are simple. Rewrite them to use loads 1397 // of the per-field globals instead. 1398 for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) { 1399 Instruction *User = cast<Instruction>(*UI++); 1400 1401 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1402 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite); 1403 continue; 1404 } 1405 1406 // Must be a store of null. 1407 StoreInst *SI = cast<StoreInst>(User); 1408 assert(isa<ConstantPointerNull>(SI->getOperand(0)) && 1409 "Unexpected heap-sra user!"); 1410 1411 // Insert a store of null into each global. 1412 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1413 Type *ValTy = cast<GlobalValue>(FieldGlobals[i])->getValueType(); 1414 Constant *Null = Constant::getNullValue(ValTy); 1415 new StoreInst(Null, FieldGlobals[i], SI); 1416 } 1417 // Erase the original store. 1418 SI->eraseFromParent(); 1419 } 1420 1421 // While we have PHIs that are interesting to rewrite, do it. 1422 while (!PHIsToRewrite.empty()) { 1423 PHINode *PN = PHIsToRewrite.back().first; 1424 unsigned FieldNo = PHIsToRewrite.back().second; 1425 PHIsToRewrite.pop_back(); 1426 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); 1427 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); 1428 1429 // Add all the incoming values. This can materialize more phis. 1430 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1431 Value *InVal = PN->getIncomingValue(i); 1432 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, 1433 PHIsToRewrite); 1434 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); 1435 } 1436 } 1437 1438 // Drop all inter-phi links and any loads that made it this far. 1439 for (DenseMap<Value *, std::vector<Value *>>::iterator 1440 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1441 I != E; ++I) { 1442 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1443 PN->dropAllReferences(); 1444 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1445 LI->dropAllReferences(); 1446 } 1447 1448 // Delete all the phis and loads now that inter-references are dead. 1449 for (DenseMap<Value *, std::vector<Value *>>::iterator 1450 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1451 I != E; ++I) { 1452 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1453 PN->eraseFromParent(); 1454 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1455 LI->eraseFromParent(); 1456 } 1457 1458 // The old global is now dead, remove it. 1459 GV->eraseFromParent(); 1460 1461 ++NumHeapSRA; 1462 return cast<GlobalVariable>(FieldGlobals[0]); 1463 } 1464 1465 /// This function is called when we see a pointer global variable with a single 1466 /// value stored it that is a malloc or cast of malloc. 1467 static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI, 1468 Type *AllocTy, 1469 AtomicOrdering Ordering, 1470 const DataLayout &DL, 1471 TargetLibraryInfo *TLI) { 1472 // If this is a malloc of an abstract type, don't touch it. 1473 if (!AllocTy->isSized()) 1474 return false; 1475 1476 // We can't optimize this global unless all uses of it are *known* to be 1477 // of the malloc value, not of the null initializer value (consider a use 1478 // that compares the global's value against zero to see if the malloc has 1479 // been reached). To do this, we check to see if all uses of the global 1480 // would trap if the global were null: this proves that they must all 1481 // happen after the malloc. 1482 if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) 1483 return false; 1484 1485 // We can't optimize this if the malloc itself is used in a complex way, 1486 // for example, being stored into multiple globals. This allows the 1487 // malloc to be stored into the specified global, loaded icmp'd, and 1488 // GEP'd. These are all things we could transform to using the global 1489 // for. 1490 SmallPtrSet<const PHINode*, 8> PHIs; 1491 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs)) 1492 return false; 1493 1494 // If we have a global that is only initialized with a fixed size malloc, 1495 // transform the program to use global memory instead of malloc'd memory. 1496 // This eliminates dynamic allocation, avoids an indirection accessing the 1497 // data, and exposes the resultant global to further GlobalOpt. 1498 // We cannot optimize the malloc if we cannot determine malloc array size. 1499 Value *NElems = getMallocArraySize(CI, DL, TLI, true); 1500 if (!NElems) 1501 return false; 1502 1503 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 1504 // Restrict this transformation to only working on small allocations 1505 // (2048 bytes currently), as we don't want to introduce a 16M global or 1506 // something. 1507 if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) { 1508 OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI); 1509 return true; 1510 } 1511 1512 // If the allocation is an array of structures, consider transforming this 1513 // into multiple malloc'd arrays, one for each field. This is basically 1514 // SRoA for malloc'd memory. 1515 1516 if (Ordering != AtomicOrdering::NotAtomic) 1517 return false; 1518 1519 // If this is an allocation of a fixed size array of structs, analyze as a 1520 // variable size array. malloc [100 x struct],1 -> malloc struct, 100 1521 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) 1522 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) 1523 AllocTy = AT->getElementType(); 1524 1525 StructType *AllocSTy = dyn_cast<StructType>(AllocTy); 1526 if (!AllocSTy) 1527 return false; 1528 1529 // This the structure has an unreasonable number of fields, leave it 1530 // alone. 1531 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && 1532 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) { 1533 1534 // If this is a fixed size array, transform the Malloc to be an alloc of 1535 // structs. malloc [100 x struct],1 -> malloc struct, 100 1536 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) { 1537 Type *IntPtrTy = DL.getIntPtrType(CI->getType()); 1538 unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes(); 1539 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); 1540 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); 1541 SmallVector<OperandBundleDef, 1> OpBundles; 1542 CI->getOperandBundlesAsDefs(OpBundles); 1543 Instruction *Malloc = 1544 CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, AllocSize, NumElements, 1545 OpBundles, nullptr, CI->getName()); 1546 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI); 1547 CI->replaceAllUsesWith(Cast); 1548 CI->eraseFromParent(); 1549 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc)) 1550 CI = cast<CallInst>(BCI->getOperand(0)); 1551 else 1552 CI = cast<CallInst>(Malloc); 1553 } 1554 1555 PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), DL, 1556 TLI); 1557 return true; 1558 } 1559 1560 return false; 1561 } 1562 1563 // Try to optimize globals based on the knowledge that only one value (besides 1564 // its initializer) is ever stored to the global. 1565 static bool optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, 1566 AtomicOrdering Ordering, 1567 const DataLayout &DL, 1568 TargetLibraryInfo *TLI) { 1569 // Ignore no-op GEPs and bitcasts. 1570 StoredOnceVal = StoredOnceVal->stripPointerCasts(); 1571 1572 // If we are dealing with a pointer global that is initialized to null and 1573 // only has one (non-null) value stored into it, then we can optimize any 1574 // users of the loaded value (often calls and loads) that would trap if the 1575 // value was null. 1576 if (GV->getInitializer()->getType()->isPointerTy() && 1577 GV->getInitializer()->isNullValue() && 1578 !NullPointerIsDefined( 1579 nullptr /* F */, 1580 GV->getInitializer()->getType()->getPointerAddressSpace())) { 1581 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { 1582 if (GV->getInitializer()->getType() != SOVC->getType()) 1583 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); 1584 1585 // Optimize away any trapping uses of the loaded value. 1586 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI)) 1587 return true; 1588 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) { 1589 Type *MallocType = getMallocAllocatedType(CI, TLI); 1590 if (MallocType && tryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, 1591 Ordering, DL, TLI)) 1592 return true; 1593 } 1594 } 1595 1596 return false; 1597 } 1598 1599 /// At this point, we have learned that the only two values ever stored into GV 1600 /// are its initializer and OtherVal. See if we can shrink the global into a 1601 /// boolean and select between the two values whenever it is used. This exposes 1602 /// the values to other scalar optimizations. 1603 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { 1604 Type *GVElType = GV->getValueType(); 1605 1606 // If GVElType is already i1, it is already shrunk. If the type of the GV is 1607 // an FP value, pointer or vector, don't do this optimization because a select 1608 // between them is very expensive and unlikely to lead to later 1609 // simplification. In these cases, we typically end up with "cond ? v1 : v2" 1610 // where v1 and v2 both require constant pool loads, a big loss. 1611 if (GVElType == Type::getInt1Ty(GV->getContext()) || 1612 GVElType->isFloatingPointTy() || 1613 GVElType->isPointerTy() || GVElType->isVectorTy()) 1614 return false; 1615 1616 // Walk the use list of the global seeing if all the uses are load or store. 1617 // If there is anything else, bail out. 1618 for (User *U : GV->users()) 1619 if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) 1620 return false; 1621 1622 LLVM_DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n"); 1623 1624 // Create the new global, initializing it to false. 1625 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), 1626 false, 1627 GlobalValue::InternalLinkage, 1628 ConstantInt::getFalse(GV->getContext()), 1629 GV->getName()+".b", 1630 GV->getThreadLocalMode(), 1631 GV->getType()->getAddressSpace()); 1632 NewGV->copyAttributesFrom(GV); 1633 GV->getParent()->getGlobalList().insert(GV->getIterator(), NewGV); 1634 1635 Constant *InitVal = GV->getInitializer(); 1636 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) && 1637 "No reason to shrink to bool!"); 1638 1639 SmallVector<DIGlobalVariableExpression *, 1> GVs; 1640 GV->getDebugInfo(GVs); 1641 1642 // If initialized to zero and storing one into the global, we can use a cast 1643 // instead of a select to synthesize the desired value. 1644 bool IsOneZero = false; 1645 bool EmitOneOrZero = true; 1646 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)){ 1647 IsOneZero = InitVal->isNullValue() && CI->isOne(); 1648 1649 if (ConstantInt *CIInit = dyn_cast<ConstantInt>(GV->getInitializer())){ 1650 uint64_t ValInit = CIInit->getZExtValue(); 1651 uint64_t ValOther = CI->getZExtValue(); 1652 uint64_t ValMinus = ValOther - ValInit; 1653 1654 for(auto *GVe : GVs){ 1655 DIGlobalVariable *DGV = GVe->getVariable(); 1656 DIExpression *E = GVe->getExpression(); 1657 const DataLayout &DL = GV->getParent()->getDataLayout(); 1658 unsigned SizeInOctets = 1659 DL.getTypeAllocSizeInBits(NewGV->getType()->getElementType()) / 8; 1660 1661 // It is expected that the address of global optimized variable is on 1662 // top of the stack. After optimization, value of that variable will 1663 // be ether 0 for initial value or 1 for other value. The following 1664 // expression should return constant integer value depending on the 1665 // value at global object address: 1666 // val * (ValOther - ValInit) + ValInit: 1667 // DW_OP_deref DW_OP_constu <ValMinus> 1668 // DW_OP_mul DW_OP_constu <ValInit> DW_OP_plus DW_OP_stack_value 1669 SmallVector<uint64_t, 12> Ops = { 1670 dwarf::DW_OP_deref_size, SizeInOctets, 1671 dwarf::DW_OP_constu, ValMinus, 1672 dwarf::DW_OP_mul, dwarf::DW_OP_constu, ValInit, 1673 dwarf::DW_OP_plus}; 1674 bool WithStackValue = true; 1675 E = DIExpression::prependOpcodes(E, Ops, WithStackValue); 1676 DIGlobalVariableExpression *DGVE = 1677 DIGlobalVariableExpression::get(NewGV->getContext(), DGV, E); 1678 NewGV->addDebugInfo(DGVE); 1679 } 1680 EmitOneOrZero = false; 1681 } 1682 } 1683 1684 if (EmitOneOrZero) { 1685 // FIXME: This will only emit address for debugger on which will 1686 // be written only 0 or 1. 1687 for(auto *GV : GVs) 1688 NewGV->addDebugInfo(GV); 1689 } 1690 1691 while (!GV->use_empty()) { 1692 Instruction *UI = cast<Instruction>(GV->user_back()); 1693 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 1694 // Change the store into a boolean store. 1695 bool StoringOther = SI->getOperand(0) == OtherVal; 1696 // Only do this if we weren't storing a loaded value. 1697 Value *StoreVal; 1698 if (StoringOther || SI->getOperand(0) == InitVal) { 1699 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), 1700 StoringOther); 1701 } else { 1702 // Otherwise, we are storing a previously loaded copy. To do this, 1703 // change the copy from copying the original value to just copying the 1704 // bool. 1705 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); 1706 1707 // If we've already replaced the input, StoredVal will be a cast or 1708 // select instruction. If not, it will be a load of the original 1709 // global. 1710 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 1711 assert(LI->getOperand(0) == GV && "Not a copy!"); 1712 // Insert a new load, to preserve the saved value. 1713 StoreVal = new LoadInst(NewGV->getValueType(), NewGV, 1714 LI->getName() + ".b", false, 0, 1715 LI->getOrdering(), LI->getSyncScopeID(), LI); 1716 } else { 1717 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && 1718 "This is not a form that we understand!"); 1719 StoreVal = StoredVal->getOperand(0); 1720 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); 1721 } 1722 } 1723 StoreInst *NSI = 1724 new StoreInst(StoreVal, NewGV, false, 0, SI->getOrdering(), 1725 SI->getSyncScopeID(), SI); 1726 NSI->setDebugLoc(SI->getDebugLoc()); 1727 } else { 1728 // Change the load into a load of bool then a select. 1729 LoadInst *LI = cast<LoadInst>(UI); 1730 LoadInst *NLI = 1731 new LoadInst(NewGV->getValueType(), NewGV, LI->getName() + ".b", 1732 false, 0, LI->getOrdering(), LI->getSyncScopeID(), LI); 1733 Instruction *NSI; 1734 if (IsOneZero) 1735 NSI = new ZExtInst(NLI, LI->getType(), "", LI); 1736 else 1737 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); 1738 NSI->takeName(LI); 1739 // Since LI is split into two instructions, NLI and NSI both inherit the 1740 // same DebugLoc 1741 NLI->setDebugLoc(LI->getDebugLoc()); 1742 NSI->setDebugLoc(LI->getDebugLoc()); 1743 LI->replaceAllUsesWith(NSI); 1744 } 1745 UI->eraseFromParent(); 1746 } 1747 1748 // Retain the name of the old global variable. People who are debugging their 1749 // programs may expect these variables to be named the same. 1750 NewGV->takeName(GV); 1751 GV->eraseFromParent(); 1752 return true; 1753 } 1754 1755 static bool deleteIfDead( 1756 GlobalValue &GV, SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 1757 GV.removeDeadConstantUsers(); 1758 1759 if (!GV.isDiscardableIfUnused() && !GV.isDeclaration()) 1760 return false; 1761 1762 if (const Comdat *C = GV.getComdat()) 1763 if (!GV.hasLocalLinkage() && NotDiscardableComdats.count(C)) 1764 return false; 1765 1766 bool Dead; 1767 if (auto *F = dyn_cast<Function>(&GV)) 1768 Dead = (F->isDeclaration() && F->use_empty()) || F->isDefTriviallyDead(); 1769 else 1770 Dead = GV.use_empty(); 1771 if (!Dead) 1772 return false; 1773 1774 LLVM_DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n"); 1775 GV.eraseFromParent(); 1776 ++NumDeleted; 1777 return true; 1778 } 1779 1780 static bool isPointerValueDeadOnEntryToFunction( 1781 const Function *F, GlobalValue *GV, 1782 function_ref<DominatorTree &(Function &)> LookupDomTree) { 1783 // Find all uses of GV. We expect them all to be in F, and if we can't 1784 // identify any of the uses we bail out. 1785 // 1786 // On each of these uses, identify if the memory that GV points to is 1787 // used/required/live at the start of the function. If it is not, for example 1788 // if the first thing the function does is store to the GV, the GV can 1789 // possibly be demoted. 1790 // 1791 // We don't do an exhaustive search for memory operations - simply look 1792 // through bitcasts as they're quite common and benign. 1793 const DataLayout &DL = GV->getParent()->getDataLayout(); 1794 SmallVector<LoadInst *, 4> Loads; 1795 SmallVector<StoreInst *, 4> Stores; 1796 for (auto *U : GV->users()) { 1797 if (Operator::getOpcode(U) == Instruction::BitCast) { 1798 for (auto *UU : U->users()) { 1799 if (auto *LI = dyn_cast<LoadInst>(UU)) 1800 Loads.push_back(LI); 1801 else if (auto *SI = dyn_cast<StoreInst>(UU)) 1802 Stores.push_back(SI); 1803 else 1804 return false; 1805 } 1806 continue; 1807 } 1808 1809 Instruction *I = dyn_cast<Instruction>(U); 1810 if (!I) 1811 return false; 1812 assert(I->getParent()->getParent() == F); 1813 1814 if (auto *LI = dyn_cast<LoadInst>(I)) 1815 Loads.push_back(LI); 1816 else if (auto *SI = dyn_cast<StoreInst>(I)) 1817 Stores.push_back(SI); 1818 else 1819 return false; 1820 } 1821 1822 // We have identified all uses of GV into loads and stores. Now check if all 1823 // of them are known not to depend on the value of the global at the function 1824 // entry point. We do this by ensuring that every load is dominated by at 1825 // least one store. 1826 auto &DT = LookupDomTree(*const_cast<Function *>(F)); 1827 1828 // The below check is quadratic. Check we're not going to do too many tests. 1829 // FIXME: Even though this will always have worst-case quadratic time, we 1830 // could put effort into minimizing the average time by putting stores that 1831 // have been shown to dominate at least one load at the beginning of the 1832 // Stores array, making subsequent dominance checks more likely to succeed 1833 // early. 1834 // 1835 // The threshold here is fairly large because global->local demotion is a 1836 // very powerful optimization should it fire. 1837 const unsigned Threshold = 100; 1838 if (Loads.size() * Stores.size() > Threshold) 1839 return false; 1840 1841 for (auto *L : Loads) { 1842 auto *LTy = L->getType(); 1843 if (none_of(Stores, [&](const StoreInst *S) { 1844 auto *STy = S->getValueOperand()->getType(); 1845 // The load is only dominated by the store if DomTree says so 1846 // and the number of bits loaded in L is less than or equal to 1847 // the number of bits stored in S. 1848 return DT.dominates(S, L) && 1849 DL.getTypeStoreSize(LTy) <= DL.getTypeStoreSize(STy); 1850 })) 1851 return false; 1852 } 1853 // All loads have known dependences inside F, so the global can be localized. 1854 return true; 1855 } 1856 1857 /// C may have non-instruction users. Can all of those users be turned into 1858 /// instructions? 1859 static bool allNonInstructionUsersCanBeMadeInstructions(Constant *C) { 1860 // We don't do this exhaustively. The most common pattern that we really need 1861 // to care about is a constant GEP or constant bitcast - so just looking 1862 // through one single ConstantExpr. 1863 // 1864 // The set of constants that this function returns true for must be able to be 1865 // handled by makeAllConstantUsesInstructions. 1866 for (auto *U : C->users()) { 1867 if (isa<Instruction>(U)) 1868 continue; 1869 if (!isa<ConstantExpr>(U)) 1870 // Non instruction, non-constantexpr user; cannot convert this. 1871 return false; 1872 for (auto *UU : U->users()) 1873 if (!isa<Instruction>(UU)) 1874 // A constantexpr used by another constant. We don't try and recurse any 1875 // further but just bail out at this point. 1876 return false; 1877 } 1878 1879 return true; 1880 } 1881 1882 /// C may have non-instruction users, and 1883 /// allNonInstructionUsersCanBeMadeInstructions has returned true. Convert the 1884 /// non-instruction users to instructions. 1885 static void makeAllConstantUsesInstructions(Constant *C) { 1886 SmallVector<ConstantExpr*,4> Users; 1887 for (auto *U : C->users()) { 1888 if (isa<ConstantExpr>(U)) 1889 Users.push_back(cast<ConstantExpr>(U)); 1890 else 1891 // We should never get here; allNonInstructionUsersCanBeMadeInstructions 1892 // should not have returned true for C. 1893 assert( 1894 isa<Instruction>(U) && 1895 "Can't transform non-constantexpr non-instruction to instruction!"); 1896 } 1897 1898 SmallVector<Value*,4> UUsers; 1899 for (auto *U : Users) { 1900 UUsers.clear(); 1901 for (auto *UU : U->users()) 1902 UUsers.push_back(UU); 1903 for (auto *UU : UUsers) { 1904 Instruction *UI = cast<Instruction>(UU); 1905 Instruction *NewU = U->getAsInstruction(); 1906 NewU->insertBefore(UI); 1907 UI->replaceUsesOfWith(U, NewU); 1908 } 1909 // We've replaced all the uses, so destroy the constant. (destroyConstant 1910 // will update value handles and metadata.) 1911 U->destroyConstant(); 1912 } 1913 } 1914 1915 /// Analyze the specified global variable and optimize 1916 /// it if possible. If we make a change, return true. 1917 static bool processInternalGlobal( 1918 GlobalVariable *GV, const GlobalStatus &GS, TargetLibraryInfo *TLI, 1919 function_ref<DominatorTree &(Function &)> LookupDomTree) { 1920 auto &DL = GV->getParent()->getDataLayout(); 1921 // If this is a first class global and has only one accessing function and 1922 // this function is non-recursive, we replace the global with a local alloca 1923 // in this function. 1924 // 1925 // NOTE: It doesn't make sense to promote non-single-value types since we 1926 // are just replacing static memory to stack memory. 1927 // 1928 // If the global is in different address space, don't bring it to stack. 1929 if (!GS.HasMultipleAccessingFunctions && 1930 GS.AccessingFunction && 1931 GV->getValueType()->isSingleValueType() && 1932 GV->getType()->getAddressSpace() == 0 && 1933 !GV->isExternallyInitialized() && 1934 allNonInstructionUsersCanBeMadeInstructions(GV) && 1935 GS.AccessingFunction->doesNotRecurse() && 1936 isPointerValueDeadOnEntryToFunction(GS.AccessingFunction, GV, 1937 LookupDomTree)) { 1938 const DataLayout &DL = GV->getParent()->getDataLayout(); 1939 1940 LLVM_DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n"); 1941 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction 1942 ->getEntryBlock().begin()); 1943 Type *ElemTy = GV->getValueType(); 1944 // FIXME: Pass Global's alignment when globals have alignment 1945 AllocaInst *Alloca = new AllocaInst(ElemTy, DL.getAllocaAddrSpace(), nullptr, 1946 GV->getName(), &FirstI); 1947 if (!isa<UndefValue>(GV->getInitializer())) 1948 new StoreInst(GV->getInitializer(), Alloca, &FirstI); 1949 1950 makeAllConstantUsesInstructions(GV); 1951 1952 GV->replaceAllUsesWith(Alloca); 1953 GV->eraseFromParent(); 1954 ++NumLocalized; 1955 return true; 1956 } 1957 1958 // If the global is never loaded (but may be stored to), it is dead. 1959 // Delete it now. 1960 if (!GS.IsLoaded) { 1961 LLVM_DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n"); 1962 1963 bool Changed; 1964 if (isLeakCheckerRoot(GV)) { 1965 // Delete any constant stores to the global. 1966 Changed = CleanupPointerRootUsers(GV, TLI); 1967 } else { 1968 // Delete any stores we can find to the global. We may not be able to 1969 // make it completely dead though. 1970 Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 1971 } 1972 1973 // If the global is dead now, delete it. 1974 if (GV->use_empty()) { 1975 GV->eraseFromParent(); 1976 ++NumDeleted; 1977 Changed = true; 1978 } 1979 return Changed; 1980 1981 } 1982 if (GS.StoredType <= GlobalStatus::InitializerStored) { 1983 LLVM_DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n"); 1984 1985 // Don't actually mark a global constant if it's atomic because atomic loads 1986 // are implemented by a trivial cmpxchg in some edge-cases and that usually 1987 // requires write access to the variable even if it's not actually changed. 1988 if (GS.Ordering == AtomicOrdering::NotAtomic) 1989 GV->setConstant(true); 1990 1991 // Clean up any obviously simplifiable users now. 1992 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 1993 1994 // If the global is dead now, just nuke it. 1995 if (GV->use_empty()) { 1996 LLVM_DEBUG(dbgs() << " *** Marking constant allowed us to simplify " 1997 << "all users and delete global!\n"); 1998 GV->eraseFromParent(); 1999 ++NumDeleted; 2000 return true; 2001 } 2002 2003 // Fall through to the next check; see if we can optimize further. 2004 ++NumMarked; 2005 } 2006 if (!GV->getInitializer()->getType()->isSingleValueType()) { 2007 const DataLayout &DL = GV->getParent()->getDataLayout(); 2008 if (SRAGlobal(GV, DL)) 2009 return true; 2010 } 2011 if (GS.StoredType == GlobalStatus::StoredOnce && GS.StoredOnceValue) { 2012 // If the initial value for the global was an undef value, and if only 2013 // one other value was stored into it, we can just change the 2014 // initializer to be the stored value, then delete all stores to the 2015 // global. This allows us to mark it constant. 2016 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 2017 if (isa<UndefValue>(GV->getInitializer())) { 2018 // Change the initial value here. 2019 GV->setInitializer(SOVConstant); 2020 2021 // Clean up any obviously simplifiable users now. 2022 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 2023 2024 if (GV->use_empty()) { 2025 LLVM_DEBUG(dbgs() << " *** Substituting initializer allowed us to " 2026 << "simplify all users and delete global!\n"); 2027 GV->eraseFromParent(); 2028 ++NumDeleted; 2029 } 2030 ++NumSubstitute; 2031 return true; 2032 } 2033 2034 // Try to optimize globals based on the knowledge that only one value 2035 // (besides its initializer) is ever stored to the global. 2036 if (optimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, DL, TLI)) 2037 return true; 2038 2039 // Otherwise, if the global was not a boolean, we can shrink it to be a 2040 // boolean. 2041 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) { 2042 if (GS.Ordering == AtomicOrdering::NotAtomic) { 2043 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { 2044 ++NumShrunkToBool; 2045 return true; 2046 } 2047 } 2048 } 2049 } 2050 2051 return false; 2052 } 2053 2054 /// Analyze the specified global variable and optimize it if possible. If we 2055 /// make a change, return true. 2056 static bool 2057 processGlobal(GlobalValue &GV, TargetLibraryInfo *TLI, 2058 function_ref<DominatorTree &(Function &)> LookupDomTree) { 2059 if (GV.getName().startswith("llvm.")) 2060 return false; 2061 2062 GlobalStatus GS; 2063 2064 if (GlobalStatus::analyzeGlobal(&GV, GS)) 2065 return false; 2066 2067 bool Changed = false; 2068 if (!GS.IsCompared && !GV.hasGlobalUnnamedAddr()) { 2069 auto NewUnnamedAddr = GV.hasLocalLinkage() ? GlobalValue::UnnamedAddr::Global 2070 : GlobalValue::UnnamedAddr::Local; 2071 if (NewUnnamedAddr != GV.getUnnamedAddr()) { 2072 GV.setUnnamedAddr(NewUnnamedAddr); 2073 NumUnnamed++; 2074 Changed = true; 2075 } 2076 } 2077 2078 // Do more involved optimizations if the global is internal. 2079 if (!GV.hasLocalLinkage()) 2080 return Changed; 2081 2082 auto *GVar = dyn_cast<GlobalVariable>(&GV); 2083 if (!GVar) 2084 return Changed; 2085 2086 if (GVar->isConstant() || !GVar->hasInitializer()) 2087 return Changed; 2088 2089 return processInternalGlobal(GVar, GS, TLI, LookupDomTree) || Changed; 2090 } 2091 2092 /// Walk all of the direct calls of the specified function, changing them to 2093 /// FastCC. 2094 static void ChangeCalleesToFastCall(Function *F) { 2095 for (User *U : F->users()) { 2096 if (isa<BlockAddress>(U)) 2097 continue; 2098 CallSite CS(cast<Instruction>(U)); 2099 CS.setCallingConv(CallingConv::Fast); 2100 } 2101 } 2102 2103 static AttributeList StripAttr(LLVMContext &C, AttributeList Attrs, 2104 Attribute::AttrKind A) { 2105 unsigned AttrIndex; 2106 if (Attrs.hasAttrSomewhere(A, &AttrIndex)) 2107 return Attrs.removeAttribute(C, AttrIndex, A); 2108 return Attrs; 2109 } 2110 2111 static void RemoveAttribute(Function *F, Attribute::AttrKind A) { 2112 F->setAttributes(StripAttr(F->getContext(), F->getAttributes(), A)); 2113 for (User *U : F->users()) { 2114 if (isa<BlockAddress>(U)) 2115 continue; 2116 CallSite CS(cast<Instruction>(U)); 2117 CS.setAttributes(StripAttr(F->getContext(), CS.getAttributes(), A)); 2118 } 2119 } 2120 2121 /// Return true if this is a calling convention that we'd like to change. The 2122 /// idea here is that we don't want to mess with the convention if the user 2123 /// explicitly requested something with performance implications like coldcc, 2124 /// GHC, or anyregcc. 2125 static bool hasChangeableCC(Function *F) { 2126 CallingConv::ID CC = F->getCallingConv(); 2127 2128 // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc? 2129 if (CC != CallingConv::C && CC != CallingConv::X86_ThisCall) 2130 return false; 2131 2132 // FIXME: Change CC for the whole chain of musttail calls when possible. 2133 // 2134 // Can't change CC of the function that either has musttail calls, or is a 2135 // musttail callee itself 2136 for (User *U : F->users()) { 2137 if (isa<BlockAddress>(U)) 2138 continue; 2139 CallInst* CI = dyn_cast<CallInst>(U); 2140 if (!CI) 2141 continue; 2142 2143 if (CI->isMustTailCall()) 2144 return false; 2145 } 2146 2147 for (BasicBlock &BB : *F) 2148 if (BB.getTerminatingMustTailCall()) 2149 return false; 2150 2151 return true; 2152 } 2153 2154 /// Return true if the block containing the call site has a BlockFrequency of 2155 /// less than ColdCCRelFreq% of the entry block. 2156 static bool isColdCallSite(CallSite CS, BlockFrequencyInfo &CallerBFI) { 2157 const BranchProbability ColdProb(ColdCCRelFreq, 100); 2158 auto CallSiteBB = CS.getInstruction()->getParent(); 2159 auto CallSiteFreq = CallerBFI.getBlockFreq(CallSiteBB); 2160 auto CallerEntryFreq = 2161 CallerBFI.getBlockFreq(&(CS.getCaller()->getEntryBlock())); 2162 return CallSiteFreq < CallerEntryFreq * ColdProb; 2163 } 2164 2165 // This function checks if the input function F is cold at all call sites. It 2166 // also looks each call site's containing function, returning false if the 2167 // caller function contains other non cold calls. The input vector AllCallsCold 2168 // contains a list of functions that only have call sites in cold blocks. 2169 static bool 2170 isValidCandidateForColdCC(Function &F, 2171 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2172 const std::vector<Function *> &AllCallsCold) { 2173 2174 if (F.user_empty()) 2175 return false; 2176 2177 for (User *U : F.users()) { 2178 if (isa<BlockAddress>(U)) 2179 continue; 2180 2181 CallSite CS(cast<Instruction>(U)); 2182 Function *CallerFunc = CS.getInstruction()->getParent()->getParent(); 2183 BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc); 2184 if (!isColdCallSite(CS, CallerBFI)) 2185 return false; 2186 auto It = std::find(AllCallsCold.begin(), AllCallsCold.end(), CallerFunc); 2187 if (It == AllCallsCold.end()) 2188 return false; 2189 } 2190 return true; 2191 } 2192 2193 static void changeCallSitesToColdCC(Function *F) { 2194 for (User *U : F->users()) { 2195 if (isa<BlockAddress>(U)) 2196 continue; 2197 CallSite CS(cast<Instruction>(U)); 2198 CS.setCallingConv(CallingConv::Cold); 2199 } 2200 } 2201 2202 // This function iterates over all the call instructions in the input Function 2203 // and checks that all call sites are in cold blocks and are allowed to use the 2204 // coldcc calling convention. 2205 static bool 2206 hasOnlyColdCalls(Function &F, 2207 function_ref<BlockFrequencyInfo &(Function &)> GetBFI) { 2208 for (BasicBlock &BB : F) { 2209 for (Instruction &I : BB) { 2210 if (CallInst *CI = dyn_cast<CallInst>(&I)) { 2211 CallSite CS(cast<Instruction>(CI)); 2212 // Skip over isline asm instructions since they aren't function calls. 2213 if (CI->isInlineAsm()) 2214 continue; 2215 Function *CalledFn = CI->getCalledFunction(); 2216 if (!CalledFn) 2217 return false; 2218 if (!CalledFn->hasLocalLinkage()) 2219 return false; 2220 // Skip over instrinsics since they won't remain as function calls. 2221 if (CalledFn->getIntrinsicID() != Intrinsic::not_intrinsic) 2222 continue; 2223 // Check if it's valid to use coldcc calling convention. 2224 if (!hasChangeableCC(CalledFn) || CalledFn->isVarArg() || 2225 CalledFn->hasAddressTaken()) 2226 return false; 2227 BlockFrequencyInfo &CallerBFI = GetBFI(F); 2228 if (!isColdCallSite(CS, CallerBFI)) 2229 return false; 2230 } 2231 } 2232 } 2233 return true; 2234 } 2235 2236 static bool 2237 OptimizeFunctions(Module &M, TargetLibraryInfo *TLI, 2238 function_ref<TargetTransformInfo &(Function &)> GetTTI, 2239 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2240 function_ref<DominatorTree &(Function &)> LookupDomTree, 2241 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 2242 2243 bool Changed = false; 2244 2245 std::vector<Function *> AllCallsCold; 2246 for (Module::iterator FI = M.begin(), E = M.end(); FI != E;) { 2247 Function *F = &*FI++; 2248 if (hasOnlyColdCalls(*F, GetBFI)) 2249 AllCallsCold.push_back(F); 2250 } 2251 2252 // Optimize functions. 2253 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { 2254 Function *F = &*FI++; 2255 2256 // Don't perform global opt pass on naked functions; we don't want fast 2257 // calling conventions for naked functions. 2258 if (F->hasFnAttribute(Attribute::Naked)) 2259 continue; 2260 2261 // Functions without names cannot be referenced outside this module. 2262 if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage()) 2263 F->setLinkage(GlobalValue::InternalLinkage); 2264 2265 if (deleteIfDead(*F, NotDiscardableComdats)) { 2266 Changed = true; 2267 continue; 2268 } 2269 2270 // LLVM's definition of dominance allows instructions that are cyclic 2271 // in unreachable blocks, e.g.: 2272 // %pat = select i1 %condition, @global, i16* %pat 2273 // because any instruction dominates an instruction in a block that's 2274 // not reachable from entry. 2275 // So, remove unreachable blocks from the function, because a) there's 2276 // no point in analyzing them and b) GlobalOpt should otherwise grow 2277 // some more complicated logic to break these cycles. 2278 // Removing unreachable blocks might invalidate the dominator so we 2279 // recalculate it. 2280 if (!F->isDeclaration()) { 2281 if (removeUnreachableBlocks(*F)) { 2282 auto &DT = LookupDomTree(*F); 2283 DT.recalculate(*F); 2284 Changed = true; 2285 } 2286 } 2287 2288 Changed |= processGlobal(*F, TLI, LookupDomTree); 2289 2290 if (!F->hasLocalLinkage()) 2291 continue; 2292 2293 // If we have an inalloca parameter that we can safely remove the 2294 // inalloca attribute from, do so. This unlocks optimizations that 2295 // wouldn't be safe in the presence of inalloca. 2296 // FIXME: We should also hoist alloca affected by this to the entry 2297 // block if possible. 2298 if (F->getAttributes().hasAttrSomewhere(Attribute::InAlloca) && 2299 !F->hasAddressTaken()) { 2300 RemoveAttribute(F, Attribute::InAlloca); 2301 Changed = true; 2302 } 2303 2304 if (hasChangeableCC(F) && !F->isVarArg() && !F->hasAddressTaken()) { 2305 NumInternalFunc++; 2306 TargetTransformInfo &TTI = GetTTI(*F); 2307 // Change the calling convention to coldcc if either stress testing is 2308 // enabled or the target would like to use coldcc on functions which are 2309 // cold at all call sites and the callers contain no other non coldcc 2310 // calls. 2311 if (EnableColdCCStressTest || 2312 (TTI.useColdCCForColdCall(*F) && 2313 isValidCandidateForColdCC(*F, GetBFI, AllCallsCold))) { 2314 F->setCallingConv(CallingConv::Cold); 2315 changeCallSitesToColdCC(F); 2316 Changed = true; 2317 NumColdCC++; 2318 } 2319 } 2320 2321 if (hasChangeableCC(F) && !F->isVarArg() && 2322 !F->hasAddressTaken()) { 2323 // If this function has a calling convention worth changing, is not a 2324 // varargs function, and is only called directly, promote it to use the 2325 // Fast calling convention. 2326 F->setCallingConv(CallingConv::Fast); 2327 ChangeCalleesToFastCall(F); 2328 ++NumFastCallFns; 2329 Changed = true; 2330 } 2331 2332 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && 2333 !F->hasAddressTaken()) { 2334 // The function is not used by a trampoline intrinsic, so it is safe 2335 // to remove the 'nest' attribute. 2336 RemoveAttribute(F, Attribute::Nest); 2337 ++NumNestRemoved; 2338 Changed = true; 2339 } 2340 } 2341 return Changed; 2342 } 2343 2344 static bool 2345 OptimizeGlobalVars(Module &M, TargetLibraryInfo *TLI, 2346 function_ref<DominatorTree &(Function &)> LookupDomTree, 2347 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 2348 bool Changed = false; 2349 2350 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); 2351 GVI != E; ) { 2352 GlobalVariable *GV = &*GVI++; 2353 // Global variables without names cannot be referenced outside this module. 2354 if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage()) 2355 GV->setLinkage(GlobalValue::InternalLinkage); 2356 // Simplify the initializer. 2357 if (GV->hasInitializer()) 2358 if (auto *C = dyn_cast<Constant>(GV->getInitializer())) { 2359 auto &DL = M.getDataLayout(); 2360 Constant *New = ConstantFoldConstant(C, DL, TLI); 2361 if (New && New != C) 2362 GV->setInitializer(New); 2363 } 2364 2365 if (deleteIfDead(*GV, NotDiscardableComdats)) { 2366 Changed = true; 2367 continue; 2368 } 2369 2370 Changed |= processGlobal(*GV, TLI, LookupDomTree); 2371 } 2372 return Changed; 2373 } 2374 2375 /// Evaluate a piece of a constantexpr store into a global initializer. This 2376 /// returns 'Init' modified to reflect 'Val' stored into it. At this point, the 2377 /// GEP operands of Addr [0, OpNo) have been stepped into. 2378 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, 2379 ConstantExpr *Addr, unsigned OpNo) { 2380 // Base case of the recursion. 2381 if (OpNo == Addr->getNumOperands()) { 2382 assert(Val->getType() == Init->getType() && "Type mismatch!"); 2383 return Val; 2384 } 2385 2386 SmallVector<Constant*, 32> Elts; 2387 if (StructType *STy = dyn_cast<StructType>(Init->getType())) { 2388 // Break up the constant into its elements. 2389 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2390 Elts.push_back(Init->getAggregateElement(i)); 2391 2392 // Replace the element that we are supposed to. 2393 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); 2394 unsigned Idx = CU->getZExtValue(); 2395 assert(Idx < STy->getNumElements() && "Struct index out of range!"); 2396 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1); 2397 2398 // Return the modified struct. 2399 return ConstantStruct::get(STy, Elts); 2400 } 2401 2402 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); 2403 SequentialType *InitTy = cast<SequentialType>(Init->getType()); 2404 uint64_t NumElts = InitTy->getNumElements(); 2405 2406 // Break up the array into elements. 2407 for (uint64_t i = 0, e = NumElts; i != e; ++i) 2408 Elts.push_back(Init->getAggregateElement(i)); 2409 2410 assert(CI->getZExtValue() < NumElts); 2411 Elts[CI->getZExtValue()] = 2412 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); 2413 2414 if (Init->getType()->isArrayTy()) 2415 return ConstantArray::get(cast<ArrayType>(InitTy), Elts); 2416 return ConstantVector::get(Elts); 2417 } 2418 2419 /// We have decided that Addr (which satisfies the predicate 2420 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. 2421 static void CommitValueTo(Constant *Val, Constant *Addr) { 2422 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 2423 assert(GV->hasInitializer()); 2424 GV->setInitializer(Val); 2425 return; 2426 } 2427 2428 ConstantExpr *CE = cast<ConstantExpr>(Addr); 2429 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2430 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2)); 2431 } 2432 2433 /// Given a map of address -> value, where addresses are expected to be some form 2434 /// of either a global or a constant GEP, set the initializer for the address to 2435 /// be the value. This performs mostly the same function as CommitValueTo() 2436 /// and EvaluateStoreInto() but is optimized to be more efficient for the common 2437 /// case where the set of addresses are GEPs sharing the same underlying global, 2438 /// processing the GEPs in batches rather than individually. 2439 /// 2440 /// To give an example, consider the following C++ code adapted from the clang 2441 /// regression tests: 2442 /// struct S { 2443 /// int n = 10; 2444 /// int m = 2 * n; 2445 /// S(int a) : n(a) {} 2446 /// }; 2447 /// 2448 /// template<typename T> 2449 /// struct U { 2450 /// T *r = &q; 2451 /// T q = 42; 2452 /// U *p = this; 2453 /// }; 2454 /// 2455 /// U<S> e; 2456 /// 2457 /// The global static constructor for 'e' will need to initialize 'r' and 'p' of 2458 /// the outer struct, while also initializing the inner 'q' structs 'n' and 'm' 2459 /// members. This batch algorithm will simply use general CommitValueTo() method 2460 /// to handle the complex nested S struct initialization of 'q', before 2461 /// processing the outermost members in a single batch. Using CommitValueTo() to 2462 /// handle member in the outer struct is inefficient when the struct/array is 2463 /// very large as we end up creating and destroy constant arrays for each 2464 /// initialization. 2465 /// For the above case, we expect the following IR to be generated: 2466 /// 2467 /// %struct.U = type { %struct.S*, %struct.S, %struct.U* } 2468 /// %struct.S = type { i32, i32 } 2469 /// @e = global %struct.U { %struct.S* gep inbounds (%struct.U, %struct.U* @e, 2470 /// i64 0, i32 1), 2471 /// %struct.S { i32 42, i32 84 }, %struct.U* @e } 2472 /// The %struct.S { i32 42, i32 84 } inner initializer is treated as a complex 2473 /// constant expression, while the other two elements of @e are "simple". 2474 static void BatchCommitValueTo(const DenseMap<Constant*, Constant*> &Mem) { 2475 SmallVector<std::pair<GlobalVariable*, Constant*>, 32> GVs; 2476 SmallVector<std::pair<ConstantExpr*, Constant*>, 32> ComplexCEs; 2477 SmallVector<std::pair<ConstantExpr*, Constant*>, 32> SimpleCEs; 2478 SimpleCEs.reserve(Mem.size()); 2479 2480 for (const auto &I : Mem) { 2481 if (auto *GV = dyn_cast<GlobalVariable>(I.first)) { 2482 GVs.push_back(std::make_pair(GV, I.second)); 2483 } else { 2484 ConstantExpr *GEP = cast<ConstantExpr>(I.first); 2485 // We don't handle the deeply recursive case using the batch method. 2486 if (GEP->getNumOperands() > 3) 2487 ComplexCEs.push_back(std::make_pair(GEP, I.second)); 2488 else 2489 SimpleCEs.push_back(std::make_pair(GEP, I.second)); 2490 } 2491 } 2492 2493 // The algorithm below doesn't handle cases like nested structs, so use the 2494 // slower fully general method if we have to. 2495 for (auto ComplexCE : ComplexCEs) 2496 CommitValueTo(ComplexCE.second, ComplexCE.first); 2497 2498 for (auto GVPair : GVs) { 2499 assert(GVPair.first->hasInitializer()); 2500 GVPair.first->setInitializer(GVPair.second); 2501 } 2502 2503 if (SimpleCEs.empty()) 2504 return; 2505 2506 // We cache a single global's initializer elements in the case where the 2507 // subsequent address/val pair uses the same one. This avoids throwing away and 2508 // rebuilding the constant struct/vector/array just because one element is 2509 // modified at a time. 2510 SmallVector<Constant *, 32> Elts; 2511 Elts.reserve(SimpleCEs.size()); 2512 GlobalVariable *CurrentGV = nullptr; 2513 2514 auto commitAndSetupCache = [&](GlobalVariable *GV, bool Update) { 2515 Constant *Init = GV->getInitializer(); 2516 Type *Ty = Init->getType(); 2517 if (Update) { 2518 if (CurrentGV) { 2519 assert(CurrentGV && "Expected a GV to commit to!"); 2520 Type *CurrentInitTy = CurrentGV->getInitializer()->getType(); 2521 // We have a valid cache that needs to be committed. 2522 if (StructType *STy = dyn_cast<StructType>(CurrentInitTy)) 2523 CurrentGV->setInitializer(ConstantStruct::get(STy, Elts)); 2524 else if (ArrayType *ArrTy = dyn_cast<ArrayType>(CurrentInitTy)) 2525 CurrentGV->setInitializer(ConstantArray::get(ArrTy, Elts)); 2526 else 2527 CurrentGV->setInitializer(ConstantVector::get(Elts)); 2528 } 2529 if (CurrentGV == GV) 2530 return; 2531 // Need to clear and set up cache for new initializer. 2532 CurrentGV = GV; 2533 Elts.clear(); 2534 unsigned NumElts; 2535 if (auto *STy = dyn_cast<StructType>(Ty)) 2536 NumElts = STy->getNumElements(); 2537 else 2538 NumElts = cast<SequentialType>(Ty)->getNumElements(); 2539 for (unsigned i = 0, e = NumElts; i != e; ++i) 2540 Elts.push_back(Init->getAggregateElement(i)); 2541 } 2542 }; 2543 2544 for (auto CEPair : SimpleCEs) { 2545 ConstantExpr *GEP = CEPair.first; 2546 Constant *Val = CEPair.second; 2547 2548 GlobalVariable *GV = cast<GlobalVariable>(GEP->getOperand(0)); 2549 commitAndSetupCache(GV, GV != CurrentGV); 2550 ConstantInt *CI = cast<ConstantInt>(GEP->getOperand(2)); 2551 Elts[CI->getZExtValue()] = Val; 2552 } 2553 // The last initializer in the list needs to be committed, others 2554 // will be committed on a new initializer being processed. 2555 commitAndSetupCache(CurrentGV, true); 2556 } 2557 2558 /// Evaluate static constructors in the function, if we can. Return true if we 2559 /// can, false otherwise. 2560 static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL, 2561 TargetLibraryInfo *TLI) { 2562 // Call the function. 2563 Evaluator Eval(DL, TLI); 2564 Constant *RetValDummy; 2565 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy, 2566 SmallVector<Constant*, 0>()); 2567 2568 if (EvalSuccess) { 2569 ++NumCtorsEvaluated; 2570 2571 // We succeeded at evaluation: commit the result. 2572 LLVM_DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" 2573 << F->getName() << "' to " 2574 << Eval.getMutatedMemory().size() << " stores.\n"); 2575 BatchCommitValueTo(Eval.getMutatedMemory()); 2576 for (GlobalVariable *GV : Eval.getInvariants()) 2577 GV->setConstant(true); 2578 } 2579 2580 return EvalSuccess; 2581 } 2582 2583 static int compareNames(Constant *const *A, Constant *const *B) { 2584 Value *AStripped = (*A)->stripPointerCastsNoFollowAliases(); 2585 Value *BStripped = (*B)->stripPointerCastsNoFollowAliases(); 2586 return AStripped->getName().compare(BStripped->getName()); 2587 } 2588 2589 static void setUsedInitializer(GlobalVariable &V, 2590 const SmallPtrSetImpl<GlobalValue *> &Init) { 2591 if (Init.empty()) { 2592 V.eraseFromParent(); 2593 return; 2594 } 2595 2596 // Type of pointer to the array of pointers. 2597 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0); 2598 2599 SmallVector<Constant *, 8> UsedArray; 2600 for (GlobalValue *GV : Init) { 2601 Constant *Cast 2602 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy); 2603 UsedArray.push_back(Cast); 2604 } 2605 // Sort to get deterministic order. 2606 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames); 2607 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size()); 2608 2609 Module *M = V.getParent(); 2610 V.removeFromParent(); 2611 GlobalVariable *NV = 2612 new GlobalVariable(*M, ATy, false, GlobalValue::AppendingLinkage, 2613 ConstantArray::get(ATy, UsedArray), ""); 2614 NV->takeName(&V); 2615 NV->setSection("llvm.metadata"); 2616 delete &V; 2617 } 2618 2619 namespace { 2620 2621 /// An easy to access representation of llvm.used and llvm.compiler.used. 2622 class LLVMUsed { 2623 SmallPtrSet<GlobalValue *, 8> Used; 2624 SmallPtrSet<GlobalValue *, 8> CompilerUsed; 2625 GlobalVariable *UsedV; 2626 GlobalVariable *CompilerUsedV; 2627 2628 public: 2629 LLVMUsed(Module &M) { 2630 UsedV = collectUsedGlobalVariables(M, Used, false); 2631 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true); 2632 } 2633 2634 using iterator = SmallPtrSet<GlobalValue *, 8>::iterator; 2635 using used_iterator_range = iterator_range<iterator>; 2636 2637 iterator usedBegin() { return Used.begin(); } 2638 iterator usedEnd() { return Used.end(); } 2639 2640 used_iterator_range used() { 2641 return used_iterator_range(usedBegin(), usedEnd()); 2642 } 2643 2644 iterator compilerUsedBegin() { return CompilerUsed.begin(); } 2645 iterator compilerUsedEnd() { return CompilerUsed.end(); } 2646 2647 used_iterator_range compilerUsed() { 2648 return used_iterator_range(compilerUsedBegin(), compilerUsedEnd()); 2649 } 2650 2651 bool usedCount(GlobalValue *GV) const { return Used.count(GV); } 2652 2653 bool compilerUsedCount(GlobalValue *GV) const { 2654 return CompilerUsed.count(GV); 2655 } 2656 2657 bool usedErase(GlobalValue *GV) { return Used.erase(GV); } 2658 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); } 2659 bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; } 2660 2661 bool compilerUsedInsert(GlobalValue *GV) { 2662 return CompilerUsed.insert(GV).second; 2663 } 2664 2665 void syncVariablesAndSets() { 2666 if (UsedV) 2667 setUsedInitializer(*UsedV, Used); 2668 if (CompilerUsedV) 2669 setUsedInitializer(*CompilerUsedV, CompilerUsed); 2670 } 2671 }; 2672 2673 } // end anonymous namespace 2674 2675 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) { 2676 if (GA.use_empty()) // No use at all. 2677 return false; 2678 2679 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && 2680 "We should have removed the duplicated " 2681 "element from llvm.compiler.used"); 2682 if (!GA.hasOneUse()) 2683 // Strictly more than one use. So at least one is not in llvm.used and 2684 // llvm.compiler.used. 2685 return true; 2686 2687 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used. 2688 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA); 2689 } 2690 2691 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V, 2692 const LLVMUsed &U) { 2693 unsigned N = 2; 2694 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) && 2695 "We should have removed the duplicated " 2696 "element from llvm.compiler.used"); 2697 if (U.usedCount(&V) || U.compilerUsedCount(&V)) 2698 ++N; 2699 return V.hasNUsesOrMore(N); 2700 } 2701 2702 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) { 2703 if (!GA.hasLocalLinkage()) 2704 return true; 2705 2706 return U.usedCount(&GA) || U.compilerUsedCount(&GA); 2707 } 2708 2709 static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U, 2710 bool &RenameTarget) { 2711 RenameTarget = false; 2712 bool Ret = false; 2713 if (hasUseOtherThanLLVMUsed(GA, U)) 2714 Ret = true; 2715 2716 // If the alias is externally visible, we may still be able to simplify it. 2717 if (!mayHaveOtherReferences(GA, U)) 2718 return Ret; 2719 2720 // If the aliasee has internal linkage, give it the name and linkage 2721 // of the alias, and delete the alias. This turns: 2722 // define internal ... @f(...) 2723 // @a = alias ... @f 2724 // into: 2725 // define ... @a(...) 2726 Constant *Aliasee = GA.getAliasee(); 2727 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 2728 if (!Target->hasLocalLinkage()) 2729 return Ret; 2730 2731 // Do not perform the transform if multiple aliases potentially target the 2732 // aliasee. This check also ensures that it is safe to replace the section 2733 // and other attributes of the aliasee with those of the alias. 2734 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U)) 2735 return Ret; 2736 2737 RenameTarget = true; 2738 return true; 2739 } 2740 2741 static bool 2742 OptimizeGlobalAliases(Module &M, 2743 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 2744 bool Changed = false; 2745 LLVMUsed Used(M); 2746 2747 for (GlobalValue *GV : Used.used()) 2748 Used.compilerUsedErase(GV); 2749 2750 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); 2751 I != E;) { 2752 GlobalAlias *J = &*I++; 2753 2754 // Aliases without names cannot be referenced outside this module. 2755 if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage()) 2756 J->setLinkage(GlobalValue::InternalLinkage); 2757 2758 if (deleteIfDead(*J, NotDiscardableComdats)) { 2759 Changed = true; 2760 continue; 2761 } 2762 2763 // If the alias can change at link time, nothing can be done - bail out. 2764 if (J->isInterposable()) 2765 continue; 2766 2767 Constant *Aliasee = J->getAliasee(); 2768 GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts()); 2769 // We can't trivially replace the alias with the aliasee if the aliasee is 2770 // non-trivial in some way. 2771 // TODO: Try to handle non-zero GEPs of local aliasees. 2772 if (!Target) 2773 continue; 2774 Target->removeDeadConstantUsers(); 2775 2776 // Make all users of the alias use the aliasee instead. 2777 bool RenameTarget; 2778 if (!hasUsesToReplace(*J, Used, RenameTarget)) 2779 continue; 2780 2781 J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType())); 2782 ++NumAliasesResolved; 2783 Changed = true; 2784 2785 if (RenameTarget) { 2786 // Give the aliasee the name, linkage and other attributes of the alias. 2787 Target->takeName(&*J); 2788 Target->setLinkage(J->getLinkage()); 2789 Target->setDSOLocal(J->isDSOLocal()); 2790 Target->setVisibility(J->getVisibility()); 2791 Target->setDLLStorageClass(J->getDLLStorageClass()); 2792 2793 if (Used.usedErase(&*J)) 2794 Used.usedInsert(Target); 2795 2796 if (Used.compilerUsedErase(&*J)) 2797 Used.compilerUsedInsert(Target); 2798 } else if (mayHaveOtherReferences(*J, Used)) 2799 continue; 2800 2801 // Delete the alias. 2802 M.getAliasList().erase(J); 2803 ++NumAliasesRemoved; 2804 Changed = true; 2805 } 2806 2807 Used.syncVariablesAndSets(); 2808 2809 return Changed; 2810 } 2811 2812 static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) { 2813 LibFunc F = LibFunc_cxa_atexit; 2814 if (!TLI->has(F)) 2815 return nullptr; 2816 2817 Function *Fn = M.getFunction(TLI->getName(F)); 2818 if (!Fn) 2819 return nullptr; 2820 2821 // Make sure that the function has the correct prototype. 2822 if (!TLI->getLibFunc(*Fn, F) || F != LibFunc_cxa_atexit) 2823 return nullptr; 2824 2825 return Fn; 2826 } 2827 2828 /// Returns whether the given function is an empty C++ destructor and can 2829 /// therefore be eliminated. 2830 /// Note that we assume that other optimization passes have already simplified 2831 /// the code so we simply check for 'ret'. 2832 static bool cxxDtorIsEmpty(const Function &Fn) { 2833 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and 2834 // nounwind, but that doesn't seem worth doing. 2835 if (Fn.isDeclaration()) 2836 return false; 2837 2838 for (auto &I : Fn.getEntryBlock()) { 2839 if (isa<DbgInfoIntrinsic>(I)) 2840 continue; 2841 if (isa<ReturnInst>(I)) 2842 return true; 2843 break; 2844 } 2845 return false; 2846 } 2847 2848 static bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { 2849 /// Itanium C++ ABI p3.3.5: 2850 /// 2851 /// After constructing a global (or local static) object, that will require 2852 /// destruction on exit, a termination function is registered as follows: 2853 /// 2854 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); 2855 /// 2856 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the 2857 /// call f(p) when DSO d is unloaded, before all such termination calls 2858 /// registered before this one. It returns zero if registration is 2859 /// successful, nonzero on failure. 2860 2861 // This pass will look for calls to __cxa_atexit where the function is trivial 2862 // and remove them. 2863 bool Changed = false; 2864 2865 for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end(); 2866 I != E;) { 2867 // We're only interested in calls. Theoretically, we could handle invoke 2868 // instructions as well, but neither llvm-gcc nor clang generate invokes 2869 // to __cxa_atexit. 2870 CallInst *CI = dyn_cast<CallInst>(*I++); 2871 if (!CI) 2872 continue; 2873 2874 Function *DtorFn = 2875 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); 2876 if (!DtorFn || !cxxDtorIsEmpty(*DtorFn)) 2877 continue; 2878 2879 // Just remove the call. 2880 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); 2881 CI->eraseFromParent(); 2882 2883 ++NumCXXDtorsRemoved; 2884 2885 Changed |= true; 2886 } 2887 2888 return Changed; 2889 } 2890 2891 static bool optimizeGlobalsInModule( 2892 Module &M, const DataLayout &DL, TargetLibraryInfo *TLI, 2893 function_ref<TargetTransformInfo &(Function &)> GetTTI, 2894 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2895 function_ref<DominatorTree &(Function &)> LookupDomTree) { 2896 SmallPtrSet<const Comdat *, 8> NotDiscardableComdats; 2897 bool Changed = false; 2898 bool LocalChange = true; 2899 while (LocalChange) { 2900 LocalChange = false; 2901 2902 NotDiscardableComdats.clear(); 2903 for (const GlobalVariable &GV : M.globals()) 2904 if (const Comdat *C = GV.getComdat()) 2905 if (!GV.isDiscardableIfUnused() || !GV.use_empty()) 2906 NotDiscardableComdats.insert(C); 2907 for (Function &F : M) 2908 if (const Comdat *C = F.getComdat()) 2909 if (!F.isDefTriviallyDead()) 2910 NotDiscardableComdats.insert(C); 2911 for (GlobalAlias &GA : M.aliases()) 2912 if (const Comdat *C = GA.getComdat()) 2913 if (!GA.isDiscardableIfUnused() || !GA.use_empty()) 2914 NotDiscardableComdats.insert(C); 2915 2916 // Delete functions that are trivially dead, ccc -> fastcc 2917 LocalChange |= OptimizeFunctions(M, TLI, GetTTI, GetBFI, LookupDomTree, 2918 NotDiscardableComdats); 2919 2920 // Optimize global_ctors list. 2921 LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) { 2922 return EvaluateStaticConstructor(F, DL, TLI); 2923 }); 2924 2925 // Optimize non-address-taken globals. 2926 LocalChange |= OptimizeGlobalVars(M, TLI, LookupDomTree, 2927 NotDiscardableComdats); 2928 2929 // Resolve aliases, when possible. 2930 LocalChange |= OptimizeGlobalAliases(M, NotDiscardableComdats); 2931 2932 // Try to remove trivial global destructors if they are not removed 2933 // already. 2934 Function *CXAAtExitFn = FindCXAAtExit(M, TLI); 2935 if (CXAAtExitFn) 2936 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); 2937 2938 Changed |= LocalChange; 2939 } 2940 2941 // TODO: Move all global ctors functions to the end of the module for code 2942 // layout. 2943 2944 return Changed; 2945 } 2946 2947 PreservedAnalyses GlobalOptPass::run(Module &M, ModuleAnalysisManager &AM) { 2948 auto &DL = M.getDataLayout(); 2949 auto &TLI = AM.getResult<TargetLibraryAnalysis>(M); 2950 auto &FAM = 2951 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 2952 auto LookupDomTree = [&FAM](Function &F) -> DominatorTree &{ 2953 return FAM.getResult<DominatorTreeAnalysis>(F); 2954 }; 2955 auto GetTTI = [&FAM](Function &F) -> TargetTransformInfo & { 2956 return FAM.getResult<TargetIRAnalysis>(F); 2957 }; 2958 2959 auto GetBFI = [&FAM](Function &F) -> BlockFrequencyInfo & { 2960 return FAM.getResult<BlockFrequencyAnalysis>(F); 2961 }; 2962 2963 if (!optimizeGlobalsInModule(M, DL, &TLI, GetTTI, GetBFI, LookupDomTree)) 2964 return PreservedAnalyses::all(); 2965 return PreservedAnalyses::none(); 2966 } 2967 2968 namespace { 2969 2970 struct GlobalOptLegacyPass : public ModulePass { 2971 static char ID; // Pass identification, replacement for typeid 2972 2973 GlobalOptLegacyPass() : ModulePass(ID) { 2974 initializeGlobalOptLegacyPassPass(*PassRegistry::getPassRegistry()); 2975 } 2976 2977 bool runOnModule(Module &M) override { 2978 if (skipModule(M)) 2979 return false; 2980 2981 auto &DL = M.getDataLayout(); 2982 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 2983 auto LookupDomTree = [this](Function &F) -> DominatorTree & { 2984 return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 2985 }; 2986 auto GetTTI = [this](Function &F) -> TargetTransformInfo & { 2987 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2988 }; 2989 2990 auto GetBFI = [this](Function &F) -> BlockFrequencyInfo & { 2991 return this->getAnalysis<BlockFrequencyInfoWrapperPass>(F).getBFI(); 2992 }; 2993 2994 return optimizeGlobalsInModule(M, DL, TLI, GetTTI, GetBFI, LookupDomTree); 2995 } 2996 2997 void getAnalysisUsage(AnalysisUsage &AU) const override { 2998 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2999 AU.addRequired<TargetTransformInfoWrapperPass>(); 3000 AU.addRequired<DominatorTreeWrapperPass>(); 3001 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 3002 } 3003 }; 3004 3005 } // end anonymous namespace 3006 3007 char GlobalOptLegacyPass::ID = 0; 3008 3009 INITIALIZE_PASS_BEGIN(GlobalOptLegacyPass, "globalopt", 3010 "Global Variable Optimizer", false, false) 3011 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 3012 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 3013 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 3014 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 3015 INITIALIZE_PASS_END(GlobalOptLegacyPass, "globalopt", 3016 "Global Variable Optimizer", false, false) 3017 3018 ModulePass *llvm::createGlobalOptimizerPass() { 3019 return new GlobalOptLegacyPass(); 3020 } 3021