1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass transforms simple global variables that never have their address 10 // taken. If obviously true, it marks read/write globals as constant, deletes 11 // variables only stored to, etc. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/GlobalOpt.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/ADT/Twine.h" 22 #include "llvm/ADT/iterator_range.h" 23 #include "llvm/Analysis/BlockFrequencyInfo.h" 24 #include "llvm/Analysis/ConstantFolding.h" 25 #include "llvm/Analysis/MemoryBuiltins.h" 26 #include "llvm/Analysis/TargetLibraryInfo.h" 27 #include "llvm/Analysis/TargetTransformInfo.h" 28 #include "llvm/BinaryFormat/Dwarf.h" 29 #include "llvm/IR/Attributes.h" 30 #include "llvm/IR/BasicBlock.h" 31 #include "llvm/IR/CallingConv.h" 32 #include "llvm/IR/Constant.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DebugInfoMetadata.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/Dominators.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/IR/GetElementPtrTypeIterator.h" 40 #include "llvm/IR/GlobalAlias.h" 41 #include "llvm/IR/GlobalValue.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/IRBuilder.h" 44 #include "llvm/IR/InstrTypes.h" 45 #include "llvm/IR/Instruction.h" 46 #include "llvm/IR/Instructions.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/IR/Module.h" 49 #include "llvm/IR/Operator.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/Use.h" 52 #include "llvm/IR/User.h" 53 #include "llvm/IR/Value.h" 54 #include "llvm/IR/ValueHandle.h" 55 #include "llvm/InitializePasses.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/AtomicOrdering.h" 58 #include "llvm/Support/Casting.h" 59 #include "llvm/Support/CommandLine.h" 60 #include "llvm/Support/Debug.h" 61 #include "llvm/Support/ErrorHandling.h" 62 #include "llvm/Support/MathExtras.h" 63 #include "llvm/Support/raw_ostream.h" 64 #include "llvm/Transforms/IPO.h" 65 #include "llvm/Transforms/Utils/CtorUtils.h" 66 #include "llvm/Transforms/Utils/Evaluator.h" 67 #include "llvm/Transforms/Utils/GlobalStatus.h" 68 #include "llvm/Transforms/Utils/Local.h" 69 #include <cassert> 70 #include <cstdint> 71 #include <utility> 72 #include <vector> 73 74 using namespace llvm; 75 76 #define DEBUG_TYPE "globalopt" 77 78 STATISTIC(NumMarked , "Number of globals marked constant"); 79 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr"); 80 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); 81 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); 82 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); 83 STATISTIC(NumDeleted , "Number of globals deleted"); 84 STATISTIC(NumGlobUses , "Number of global uses devirtualized"); 85 STATISTIC(NumLocalized , "Number of globals localized"); 86 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); 87 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); 88 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); 89 STATISTIC(NumNestRemoved , "Number of nest attributes removed"); 90 STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); 91 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); 92 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); 93 STATISTIC(NumInternalFunc, "Number of internal functions"); 94 STATISTIC(NumColdCC, "Number of functions marked coldcc"); 95 96 static cl::opt<bool> 97 EnableColdCCStressTest("enable-coldcc-stress-test", 98 cl::desc("Enable stress test of coldcc by adding " 99 "calling conv to all internal functions."), 100 cl::init(false), cl::Hidden); 101 102 static cl::opt<int> ColdCCRelFreq( 103 "coldcc-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore, 104 cl::desc( 105 "Maximum block frequency, expressed as a percentage of caller's " 106 "entry frequency, for a call site to be considered cold for enabling" 107 "coldcc")); 108 109 /// Is this global variable possibly used by a leak checker as a root? If so, 110 /// we might not really want to eliminate the stores to it. 111 static bool isLeakCheckerRoot(GlobalVariable *GV) { 112 // A global variable is a root if it is a pointer, or could plausibly contain 113 // a pointer. There are two challenges; one is that we could have a struct 114 // the has an inner member which is a pointer. We recurse through the type to 115 // detect these (up to a point). The other is that we may actually be a union 116 // of a pointer and another type, and so our LLVM type is an integer which 117 // gets converted into a pointer, or our type is an [i8 x #] with a pointer 118 // potentially contained here. 119 120 if (GV->hasPrivateLinkage()) 121 return false; 122 123 SmallVector<Type *, 4> Types; 124 Types.push_back(GV->getValueType()); 125 126 unsigned Limit = 20; 127 do { 128 Type *Ty = Types.pop_back_val(); 129 switch (Ty->getTypeID()) { 130 default: break; 131 case Type::PointerTyID: 132 return true; 133 case Type::FixedVectorTyID: 134 case Type::ScalableVectorTyID: 135 if (cast<VectorType>(Ty)->getElementType()->isPointerTy()) 136 return true; 137 break; 138 case Type::ArrayTyID: 139 Types.push_back(cast<ArrayType>(Ty)->getElementType()); 140 break; 141 case Type::StructTyID: { 142 StructType *STy = cast<StructType>(Ty); 143 if (STy->isOpaque()) return true; 144 for (StructType::element_iterator I = STy->element_begin(), 145 E = STy->element_end(); I != E; ++I) { 146 Type *InnerTy = *I; 147 if (isa<PointerType>(InnerTy)) return true; 148 if (isa<StructType>(InnerTy) || isa<ArrayType>(InnerTy) || 149 isa<VectorType>(InnerTy)) 150 Types.push_back(InnerTy); 151 } 152 break; 153 } 154 } 155 if (--Limit == 0) return true; 156 } while (!Types.empty()); 157 return false; 158 } 159 160 /// Given a value that is stored to a global but never read, determine whether 161 /// it's safe to remove the store and the chain of computation that feeds the 162 /// store. 163 static bool IsSafeComputationToRemove( 164 Value *V, function_ref<TargetLibraryInfo &(Function &)> GetTLI) { 165 do { 166 if (isa<Constant>(V)) 167 return true; 168 if (!V->hasOneUse()) 169 return false; 170 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) || 171 isa<GlobalValue>(V)) 172 return false; 173 if (isAllocationFn(V, GetTLI)) 174 return true; 175 176 Instruction *I = cast<Instruction>(V); 177 if (I->mayHaveSideEffects()) 178 return false; 179 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 180 if (!GEP->hasAllConstantIndices()) 181 return false; 182 } else if (I->getNumOperands() != 1) { 183 return false; 184 } 185 186 V = I->getOperand(0); 187 } while (true); 188 } 189 190 /// This GV is a pointer root. Loop over all users of the global and clean up 191 /// any that obviously don't assign the global a value that isn't dynamically 192 /// allocated. 193 static bool 194 CleanupPointerRootUsers(GlobalVariable *GV, 195 function_ref<TargetLibraryInfo &(Function &)> GetTLI) { 196 // A brief explanation of leak checkers. The goal is to find bugs where 197 // pointers are forgotten, causing an accumulating growth in memory 198 // usage over time. The common strategy for leak checkers is to explicitly 199 // allow the memory pointed to by globals at exit. This is popular because it 200 // also solves another problem where the main thread of a C++ program may shut 201 // down before other threads that are still expecting to use those globals. To 202 // handle that case, we expect the program may create a singleton and never 203 // destroy it. 204 205 bool Changed = false; 206 207 // If Dead[n].first is the only use of a malloc result, we can delete its 208 // chain of computation and the store to the global in Dead[n].second. 209 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead; 210 211 // Constants can't be pointers to dynamically allocated memory. 212 for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end(); 213 UI != E;) { 214 User *U = *UI++; 215 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 216 Value *V = SI->getValueOperand(); 217 if (isa<Constant>(V)) { 218 Changed = true; 219 SI->eraseFromParent(); 220 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 221 if (I->hasOneUse()) 222 Dead.push_back(std::make_pair(I, SI)); 223 } 224 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) { 225 if (isa<Constant>(MSI->getValue())) { 226 Changed = true; 227 MSI->eraseFromParent(); 228 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) { 229 if (I->hasOneUse()) 230 Dead.push_back(std::make_pair(I, MSI)); 231 } 232 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) { 233 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource()); 234 if (MemSrc && MemSrc->isConstant()) { 235 Changed = true; 236 MTI->eraseFromParent(); 237 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) { 238 if (I->hasOneUse()) 239 Dead.push_back(std::make_pair(I, MTI)); 240 } 241 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 242 if (CE->use_empty()) { 243 CE->destroyConstant(); 244 Changed = true; 245 } 246 } else if (Constant *C = dyn_cast<Constant>(U)) { 247 if (isSafeToDestroyConstant(C)) { 248 C->destroyConstant(); 249 // This could have invalidated UI, start over from scratch. 250 Dead.clear(); 251 CleanupPointerRootUsers(GV, GetTLI); 252 return true; 253 } 254 } 255 } 256 257 for (int i = 0, e = Dead.size(); i != e; ++i) { 258 if (IsSafeComputationToRemove(Dead[i].first, GetTLI)) { 259 Dead[i].second->eraseFromParent(); 260 Instruction *I = Dead[i].first; 261 do { 262 if (isAllocationFn(I, GetTLI)) 263 break; 264 Instruction *J = dyn_cast<Instruction>(I->getOperand(0)); 265 if (!J) 266 break; 267 I->eraseFromParent(); 268 I = J; 269 } while (true); 270 I->eraseFromParent(); 271 Changed = true; 272 } 273 } 274 275 return Changed; 276 } 277 278 /// We just marked GV constant. Loop over all users of the global, cleaning up 279 /// the obvious ones. This is largely just a quick scan over the use list to 280 /// clean up the easy and obvious cruft. This returns true if it made a change. 281 static bool CleanupConstantGlobalUsers( 282 Value *V, Constant *Init, const DataLayout &DL, 283 function_ref<TargetLibraryInfo &(Function &)> GetTLI) { 284 bool Changed = false; 285 // Note that we need to use a weak value handle for the worklist items. When 286 // we delete a constant array, we may also be holding pointer to one of its 287 // elements (or an element of one of its elements if we're dealing with an 288 // array of arrays) in the worklist. 289 SmallVector<WeakTrackingVH, 8> WorkList(V->users()); 290 while (!WorkList.empty()) { 291 Value *UV = WorkList.pop_back_val(); 292 if (!UV) 293 continue; 294 295 User *U = cast<User>(UV); 296 297 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 298 if (Init) { 299 // Replace the load with the initializer. 300 LI->replaceAllUsesWith(Init); 301 LI->eraseFromParent(); 302 Changed = true; 303 } 304 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 305 // Store must be unreachable or storing Init into the global. 306 SI->eraseFromParent(); 307 Changed = true; 308 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 309 if (CE->getOpcode() == Instruction::GetElementPtr) { 310 Constant *SubInit = nullptr; 311 if (Init) 312 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 313 Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, GetTLI); 314 } else if ((CE->getOpcode() == Instruction::BitCast && 315 CE->getType()->isPointerTy()) || 316 CE->getOpcode() == Instruction::AddrSpaceCast) { 317 // Pointer cast, delete any stores and memsets to the global. 318 Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, GetTLI); 319 } 320 321 if (CE->use_empty()) { 322 CE->destroyConstant(); 323 Changed = true; 324 } 325 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { 326 // Do not transform "gepinst (gep constexpr (GV))" here, because forming 327 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold 328 // and will invalidate our notion of what Init is. 329 Constant *SubInit = nullptr; 330 if (!isa<ConstantExpr>(GEP->getOperand(0))) { 331 ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>( 332 ConstantFoldInstruction(GEP, DL, &GetTLI(*GEP->getFunction()))); 333 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) 334 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 335 336 // If the initializer is an all-null value and we have an inbounds GEP, 337 // we already know what the result of any load from that GEP is. 338 // TODO: Handle splats. 339 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds()) 340 SubInit = Constant::getNullValue(GEP->getResultElementType()); 341 } 342 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, GetTLI); 343 344 if (GEP->use_empty()) { 345 GEP->eraseFromParent(); 346 Changed = true; 347 } 348 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv 349 if (MI->getRawDest() == V) { 350 MI->eraseFromParent(); 351 Changed = true; 352 } 353 354 } else if (Constant *C = dyn_cast<Constant>(U)) { 355 // If we have a chain of dead constantexprs or other things dangling from 356 // us, and if they are all dead, nuke them without remorse. 357 if (isSafeToDestroyConstant(C)) { 358 C->destroyConstant(); 359 CleanupConstantGlobalUsers(V, Init, DL, GetTLI); 360 return true; 361 } 362 } 363 } 364 return Changed; 365 } 366 367 static bool isSafeSROAElementUse(Value *V); 368 369 /// Return true if the specified GEP is a safe user of a derived 370 /// expression from a global that we want to SROA. 371 static bool isSafeSROAGEP(User *U) { 372 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we 373 // don't like < 3 operand CE's, and we don't like non-constant integer 374 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some 375 // value of C. 376 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || 377 !cast<Constant>(U->getOperand(1))->isNullValue()) 378 return false; 379 380 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); 381 ++GEPI; // Skip over the pointer index. 382 383 // For all other level we require that the indices are constant and inrange. 384 // In particular, consider: A[0][i]. We cannot know that the user isn't doing 385 // invalid things like allowing i to index an out-of-range subscript that 386 // accesses A[1]. This can also happen between different members of a struct 387 // in llvm IR. 388 for (; GEPI != E; ++GEPI) { 389 if (GEPI.isStruct()) 390 continue; 391 392 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); 393 if (!IdxVal || (GEPI.isBoundedSequential() && 394 IdxVal->getZExtValue() >= GEPI.getSequentialNumElements())) 395 return false; 396 } 397 398 return llvm::all_of(U->users(), 399 [](User *UU) { return isSafeSROAElementUse(UU); }); 400 } 401 402 /// Return true if the specified instruction is a safe user of a derived 403 /// expression from a global that we want to SROA. 404 static bool isSafeSROAElementUse(Value *V) { 405 // We might have a dead and dangling constant hanging off of here. 406 if (Constant *C = dyn_cast<Constant>(V)) 407 return isSafeToDestroyConstant(C); 408 409 Instruction *I = dyn_cast<Instruction>(V); 410 if (!I) return false; 411 412 // Loads are ok. 413 if (isa<LoadInst>(I)) return true; 414 415 // Stores *to* the pointer are ok. 416 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 417 return SI->getOperand(0) != V; 418 419 // Otherwise, it must be a GEP. Check it and its users are safe to SRA. 420 return isa<GetElementPtrInst>(I) && isSafeSROAGEP(I); 421 } 422 423 /// Look at all uses of the global and decide whether it is safe for us to 424 /// perform this transformation. 425 static bool GlobalUsersSafeToSRA(GlobalValue *GV) { 426 for (User *U : GV->users()) { 427 // The user of the global must be a GEP Inst or a ConstantExpr GEP. 428 if (!isa<GetElementPtrInst>(U) && 429 (!isa<ConstantExpr>(U) || 430 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) 431 return false; 432 433 // Check the gep and it's users are safe to SRA 434 if (!isSafeSROAGEP(U)) 435 return false; 436 } 437 438 return true; 439 } 440 441 static bool IsSRASequential(Type *T) { 442 return isa<ArrayType>(T) || isa<VectorType>(T); 443 } 444 static uint64_t GetSRASequentialNumElements(Type *T) { 445 if (ArrayType *AT = dyn_cast<ArrayType>(T)) 446 return AT->getNumElements(); 447 return cast<FixedVectorType>(T)->getNumElements(); 448 } 449 static Type *GetSRASequentialElementType(Type *T) { 450 if (ArrayType *AT = dyn_cast<ArrayType>(T)) 451 return AT->getElementType(); 452 return cast<VectorType>(T)->getElementType(); 453 } 454 static bool CanDoGlobalSRA(GlobalVariable *GV) { 455 Constant *Init = GV->getInitializer(); 456 457 if (isa<StructType>(Init->getType())) { 458 // nothing to check 459 } else if (IsSRASequential(Init->getType())) { 460 if (GetSRASequentialNumElements(Init->getType()) > 16 && 461 GV->hasNUsesOrMore(16)) 462 return false; // It's not worth it. 463 } else 464 return false; 465 466 return GlobalUsersSafeToSRA(GV); 467 } 468 469 /// Copy over the debug info for a variable to its SRA replacements. 470 static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV, 471 uint64_t FragmentOffsetInBits, 472 uint64_t FragmentSizeInBits, 473 uint64_t VarSize) { 474 SmallVector<DIGlobalVariableExpression *, 1> GVs; 475 GV->getDebugInfo(GVs); 476 for (auto *GVE : GVs) { 477 DIVariable *Var = GVE->getVariable(); 478 DIExpression *Expr = GVE->getExpression(); 479 // If the FragmentSize is smaller than the variable, 480 // emit a fragment expression. 481 if (FragmentSizeInBits < VarSize) { 482 if (auto E = DIExpression::createFragmentExpression( 483 Expr, FragmentOffsetInBits, FragmentSizeInBits)) 484 Expr = *E; 485 else 486 return; 487 } 488 auto *NGVE = DIGlobalVariableExpression::get(GVE->getContext(), Var, Expr); 489 NGV->addDebugInfo(NGVE); 490 } 491 } 492 493 /// Perform scalar replacement of aggregates on the specified global variable. 494 /// This opens the door for other optimizations by exposing the behavior of the 495 /// program in a more fine-grained way. We have determined that this 496 /// transformation is safe already. We return the first global variable we 497 /// insert so that the caller can reprocess it. 498 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { 499 // Make sure this global only has simple uses that we can SRA. 500 if (!CanDoGlobalSRA(GV)) 501 return nullptr; 502 503 assert(GV->hasLocalLinkage()); 504 Constant *Init = GV->getInitializer(); 505 Type *Ty = Init->getType(); 506 uint64_t VarSize = DL.getTypeSizeInBits(Ty); 507 508 std::map<unsigned, GlobalVariable *> NewGlobals; 509 510 // Get the alignment of the global, either explicit or target-specific. 511 Align StartAlignment = 512 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getType()); 513 514 // Loop over all users and create replacement variables for used aggregate 515 // elements. 516 for (User *GEP : GV->users()) { 517 assert(((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode() == 518 Instruction::GetElementPtr) || 519 isa<GetElementPtrInst>(GEP)) && 520 "NonGEP CE's are not SRAable!"); 521 522 // Ignore the 1th operand, which has to be zero or else the program is quite 523 // broken (undefined). Get the 2nd operand, which is the structure or array 524 // index. 525 unsigned ElementIdx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 526 if (NewGlobals.count(ElementIdx) == 1) 527 continue; // we`ve already created replacement variable 528 assert(NewGlobals.count(ElementIdx) == 0); 529 530 Type *ElTy = nullptr; 531 if (StructType *STy = dyn_cast<StructType>(Ty)) 532 ElTy = STy->getElementType(ElementIdx); 533 else 534 ElTy = GetSRASequentialElementType(Ty); 535 assert(ElTy); 536 537 Constant *In = Init->getAggregateElement(ElementIdx); 538 assert(In && "Couldn't get element of initializer?"); 539 540 GlobalVariable *NGV = new GlobalVariable( 541 ElTy, false, GlobalVariable::InternalLinkage, In, 542 GV->getName() + "." + Twine(ElementIdx), GV->getThreadLocalMode(), 543 GV->getType()->getAddressSpace()); 544 NGV->setExternallyInitialized(GV->isExternallyInitialized()); 545 NGV->copyAttributesFrom(GV); 546 NewGlobals.insert(std::make_pair(ElementIdx, NGV)); 547 548 if (StructType *STy = dyn_cast<StructType>(Ty)) { 549 const StructLayout &Layout = *DL.getStructLayout(STy); 550 551 // Calculate the known alignment of the field. If the original aggregate 552 // had 256 byte alignment for example, something might depend on that: 553 // propagate info to each field. 554 uint64_t FieldOffset = Layout.getElementOffset(ElementIdx); 555 Align NewAlign = commonAlignment(StartAlignment, FieldOffset); 556 if (NewAlign > DL.getABITypeAlign(STy->getElementType(ElementIdx))) 557 NGV->setAlignment(NewAlign); 558 559 // Copy over the debug info for the variable. 560 uint64_t Size = DL.getTypeAllocSizeInBits(NGV->getValueType()); 561 uint64_t FragmentOffsetInBits = Layout.getElementOffsetInBits(ElementIdx); 562 transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size, VarSize); 563 } else { 564 uint64_t EltSize = DL.getTypeAllocSize(ElTy); 565 Align EltAlign = DL.getABITypeAlign(ElTy); 566 uint64_t FragmentSizeInBits = DL.getTypeAllocSizeInBits(ElTy); 567 568 // Calculate the known alignment of the field. If the original aggregate 569 // had 256 byte alignment for example, something might depend on that: 570 // propagate info to each field. 571 Align NewAlign = commonAlignment(StartAlignment, EltSize * ElementIdx); 572 if (NewAlign > EltAlign) 573 NGV->setAlignment(NewAlign); 574 transferSRADebugInfo(GV, NGV, FragmentSizeInBits * ElementIdx, 575 FragmentSizeInBits, VarSize); 576 } 577 } 578 579 if (NewGlobals.empty()) 580 return nullptr; 581 582 Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); 583 for (auto NewGlobalVar : NewGlobals) 584 Globals.push_back(NewGlobalVar.second); 585 586 LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n"); 587 588 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); 589 590 // Loop over all of the uses of the global, replacing the constantexpr geps, 591 // with smaller constantexpr geps or direct references. 592 while (!GV->use_empty()) { 593 User *GEP = GV->user_back(); 594 assert(((isa<ConstantExpr>(GEP) && 595 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| 596 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); 597 598 // Ignore the 1th operand, which has to be zero or else the program is quite 599 // broken (undefined). Get the 2nd operand, which is the structure or array 600 // index. 601 unsigned ElementIdx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 602 assert(NewGlobals.count(ElementIdx) == 1); 603 604 Value *NewPtr = NewGlobals[ElementIdx]; 605 Type *NewTy = NewGlobals[ElementIdx]->getValueType(); 606 607 // Form a shorter GEP if needed. 608 if (GEP->getNumOperands() > 3) { 609 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { 610 SmallVector<Constant*, 8> Idxs; 611 Idxs.push_back(NullInt); 612 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) 613 Idxs.push_back(CE->getOperand(i)); 614 NewPtr = 615 ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs); 616 } else { 617 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); 618 SmallVector<Value*, 8> Idxs; 619 Idxs.push_back(NullInt); 620 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) 621 Idxs.push_back(GEPI->getOperand(i)); 622 NewPtr = GetElementPtrInst::Create( 623 NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(ElementIdx), 624 GEPI); 625 } 626 } 627 GEP->replaceAllUsesWith(NewPtr); 628 629 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) 630 GEPI->eraseFromParent(); 631 else 632 cast<ConstantExpr>(GEP)->destroyConstant(); 633 } 634 635 // Delete the old global, now that it is dead. 636 Globals.erase(GV); 637 ++NumSRA; 638 639 assert(NewGlobals.size() > 0); 640 return NewGlobals.begin()->second; 641 } 642 643 /// Return true if all users of the specified value will trap if the value is 644 /// dynamically null. PHIs keeps track of any phi nodes we've seen to avoid 645 /// reprocessing them. 646 static bool AllUsesOfValueWillTrapIfNull(const Value *V, 647 SmallPtrSetImpl<const PHINode*> &PHIs) { 648 for (const User *U : V->users()) { 649 if (const Instruction *I = dyn_cast<Instruction>(U)) { 650 // If null pointer is considered valid, then all uses are non-trapping. 651 // Non address-space 0 globals have already been pruned by the caller. 652 if (NullPointerIsDefined(I->getFunction())) 653 return false; 654 } 655 if (isa<LoadInst>(U)) { 656 // Will trap. 657 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 658 if (SI->getOperand(0) == V) { 659 //cerr << "NONTRAPPING USE: " << *U; 660 return false; // Storing the value. 661 } 662 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { 663 if (CI->getCalledOperand() != V) { 664 //cerr << "NONTRAPPING USE: " << *U; 665 return false; // Not calling the ptr 666 } 667 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { 668 if (II->getCalledOperand() != V) { 669 //cerr << "NONTRAPPING USE: " << *U; 670 return false; // Not calling the ptr 671 } 672 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { 673 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; 674 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 675 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; 676 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { 677 // If we've already seen this phi node, ignore it, it has already been 678 // checked. 679 if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) 680 return false; 681 } else { 682 //cerr << "NONTRAPPING USE: " << *U; 683 return false; 684 } 685 } 686 return true; 687 } 688 689 /// Return true if all uses of any loads from GV will trap if the loaded value 690 /// is null. Note that this also permits comparisons of the loaded value 691 /// against null, as a special case. 692 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { 693 for (const User *U : GV->users()) 694 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 695 SmallPtrSet<const PHINode*, 8> PHIs; 696 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) 697 return false; 698 } else if (isa<StoreInst>(U)) { 699 // Ignore stores to the global. 700 } else { 701 // We don't know or understand this user, bail out. 702 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U; 703 return false; 704 } 705 return true; 706 } 707 708 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { 709 bool Changed = false; 710 for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) { 711 Instruction *I = cast<Instruction>(*UI++); 712 // Uses are non-trapping if null pointer is considered valid. 713 // Non address-space 0 globals are already pruned by the caller. 714 if (NullPointerIsDefined(I->getFunction())) 715 return false; 716 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 717 LI->setOperand(0, NewV); 718 Changed = true; 719 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 720 if (SI->getOperand(1) == V) { 721 SI->setOperand(1, NewV); 722 Changed = true; 723 } 724 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 725 CallBase *CB = cast<CallBase>(I); 726 if (CB->getCalledOperand() == V) { 727 // Calling through the pointer! Turn into a direct call, but be careful 728 // that the pointer is not also being passed as an argument. 729 CB->setCalledOperand(NewV); 730 Changed = true; 731 bool PassedAsArg = false; 732 for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) 733 if (CB->getArgOperand(i) == V) { 734 PassedAsArg = true; 735 CB->setArgOperand(i, NewV); 736 } 737 738 if (PassedAsArg) { 739 // Being passed as an argument also. Be careful to not invalidate UI! 740 UI = V->user_begin(); 741 } 742 } 743 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 744 Changed |= OptimizeAwayTrappingUsesOfValue(CI, 745 ConstantExpr::getCast(CI->getOpcode(), 746 NewV, CI->getType())); 747 if (CI->use_empty()) { 748 Changed = true; 749 CI->eraseFromParent(); 750 } 751 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 752 // Should handle GEP here. 753 SmallVector<Constant*, 8> Idxs; 754 Idxs.reserve(GEPI->getNumOperands()-1); 755 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); 756 i != e; ++i) 757 if (Constant *C = dyn_cast<Constant>(*i)) 758 Idxs.push_back(C); 759 else 760 break; 761 if (Idxs.size() == GEPI->getNumOperands()-1) 762 Changed |= OptimizeAwayTrappingUsesOfValue( 763 GEPI, ConstantExpr::getGetElementPtr(GEPI->getSourceElementType(), 764 NewV, Idxs)); 765 if (GEPI->use_empty()) { 766 Changed = true; 767 GEPI->eraseFromParent(); 768 } 769 } 770 } 771 772 return Changed; 773 } 774 775 /// The specified global has only one non-null value stored into it. If there 776 /// are uses of the loaded value that would trap if the loaded value is 777 /// dynamically null, then we know that they cannot be reachable with a null 778 /// optimize away the load. 779 static bool OptimizeAwayTrappingUsesOfLoads( 780 GlobalVariable *GV, Constant *LV, const DataLayout &DL, 781 function_ref<TargetLibraryInfo &(Function &)> GetTLI) { 782 bool Changed = false; 783 784 // Keep track of whether we are able to remove all the uses of the global 785 // other than the store that defines it. 786 bool AllNonStoreUsesGone = true; 787 788 // Replace all uses of loads with uses of uses of the stored value. 789 for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){ 790 User *GlobalUser = *GUI++; 791 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { 792 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); 793 // If we were able to delete all uses of the loads 794 if (LI->use_empty()) { 795 LI->eraseFromParent(); 796 Changed = true; 797 } else { 798 AllNonStoreUsesGone = false; 799 } 800 } else if (isa<StoreInst>(GlobalUser)) { 801 // Ignore the store that stores "LV" to the global. 802 assert(GlobalUser->getOperand(1) == GV && 803 "Must be storing *to* the global"); 804 } else { 805 AllNonStoreUsesGone = false; 806 807 // If we get here we could have other crazy uses that are transitively 808 // loaded. 809 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || 810 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || 811 isa<BitCastInst>(GlobalUser) || 812 isa<GetElementPtrInst>(GlobalUser)) && 813 "Only expect load and stores!"); 814 } 815 } 816 817 if (Changed) { 818 LLVM_DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV 819 << "\n"); 820 ++NumGlobUses; 821 } 822 823 // If we nuked all of the loads, then none of the stores are needed either, 824 // nor is the global. 825 if (AllNonStoreUsesGone) { 826 if (isLeakCheckerRoot(GV)) { 827 Changed |= CleanupPointerRootUsers(GV, GetTLI); 828 } else { 829 Changed = true; 830 CleanupConstantGlobalUsers(GV, nullptr, DL, GetTLI); 831 } 832 if (GV->use_empty()) { 833 LLVM_DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); 834 Changed = true; 835 GV->eraseFromParent(); 836 ++NumDeleted; 837 } 838 } 839 return Changed; 840 } 841 842 /// Walk the use list of V, constant folding all of the instructions that are 843 /// foldable. 844 static void ConstantPropUsersOf(Value *V, const DataLayout &DL, 845 TargetLibraryInfo *TLI) { 846 for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; ) 847 if (Instruction *I = dyn_cast<Instruction>(*UI++)) 848 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) { 849 I->replaceAllUsesWith(NewC); 850 851 // Advance UI to the next non-I use to avoid invalidating it! 852 // Instructions could multiply use V. 853 while (UI != E && *UI == I) 854 ++UI; 855 if (isInstructionTriviallyDead(I, TLI)) 856 I->eraseFromParent(); 857 } 858 } 859 860 /// This function takes the specified global variable, and transforms the 861 /// program as if it always contained the result of the specified malloc. 862 /// Because it is always the result of the specified malloc, there is no reason 863 /// to actually DO the malloc. Instead, turn the malloc into a global, and any 864 /// loads of GV as uses of the new global. 865 static GlobalVariable * 866 OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy, 867 ConstantInt *NElements, const DataLayout &DL, 868 TargetLibraryInfo *TLI) { 869 LLVM_DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI 870 << '\n'); 871 872 Type *GlobalType; 873 if (NElements->getZExtValue() == 1) 874 GlobalType = AllocTy; 875 else 876 // If we have an array allocation, the global variable is of an array. 877 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue()); 878 879 // Create the new global variable. The contents of the malloc'd memory is 880 // undefined, so initialize with an undef value. 881 GlobalVariable *NewGV = new GlobalVariable( 882 *GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage, 883 UndefValue::get(GlobalType), GV->getName() + ".body", nullptr, 884 GV->getThreadLocalMode()); 885 886 // If there are bitcast users of the malloc (which is typical, usually we have 887 // a malloc + bitcast) then replace them with uses of the new global. Update 888 // other users to use the global as well. 889 BitCastInst *TheBC = nullptr; 890 while (!CI->use_empty()) { 891 Instruction *User = cast<Instruction>(CI->user_back()); 892 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 893 if (BCI->getType() == NewGV->getType()) { 894 BCI->replaceAllUsesWith(NewGV); 895 BCI->eraseFromParent(); 896 } else { 897 BCI->setOperand(0, NewGV); 898 } 899 } else { 900 if (!TheBC) 901 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); 902 User->replaceUsesOfWith(CI, TheBC); 903 } 904 } 905 906 Constant *RepValue = NewGV; 907 if (NewGV->getType() != GV->getValueType()) 908 RepValue = ConstantExpr::getBitCast(RepValue, GV->getValueType()); 909 910 // If there is a comparison against null, we will insert a global bool to 911 // keep track of whether the global was initialized yet or not. 912 GlobalVariable *InitBool = 913 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, 914 GlobalValue::InternalLinkage, 915 ConstantInt::getFalse(GV->getContext()), 916 GV->getName()+".init", GV->getThreadLocalMode()); 917 bool InitBoolUsed = false; 918 919 // Loop over all uses of GV, processing them in turn. 920 while (!GV->use_empty()) { 921 if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) { 922 // The global is initialized when the store to it occurs. 923 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 924 Align(1), SI->getOrdering(), SI->getSyncScopeID(), SI); 925 SI->eraseFromParent(); 926 continue; 927 } 928 929 LoadInst *LI = cast<LoadInst>(GV->user_back()); 930 while (!LI->use_empty()) { 931 Use &LoadUse = *LI->use_begin(); 932 ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser()); 933 if (!ICI) { 934 LoadUse = RepValue; 935 continue; 936 } 937 938 // Replace the cmp X, 0 with a use of the bool value. 939 // Sink the load to where the compare was, if atomic rules allow us to. 940 Value *LV = new LoadInst(InitBool->getValueType(), InitBool, 941 InitBool->getName() + ".val", false, Align(1), 942 LI->getOrdering(), LI->getSyncScopeID(), 943 LI->isUnordered() ? (Instruction *)ICI : LI); 944 InitBoolUsed = true; 945 switch (ICI->getPredicate()) { 946 default: llvm_unreachable("Unknown ICmp Predicate!"); 947 case ICmpInst::ICMP_ULT: 948 case ICmpInst::ICMP_SLT: // X < null -> always false 949 LV = ConstantInt::getFalse(GV->getContext()); 950 break; 951 case ICmpInst::ICMP_ULE: 952 case ICmpInst::ICMP_SLE: 953 case ICmpInst::ICMP_EQ: 954 LV = BinaryOperator::CreateNot(LV, "notinit", ICI); 955 break; 956 case ICmpInst::ICMP_NE: 957 case ICmpInst::ICMP_UGE: 958 case ICmpInst::ICMP_SGE: 959 case ICmpInst::ICMP_UGT: 960 case ICmpInst::ICMP_SGT: 961 break; // no change. 962 } 963 ICI->replaceAllUsesWith(LV); 964 ICI->eraseFromParent(); 965 } 966 LI->eraseFromParent(); 967 } 968 969 // If the initialization boolean was used, insert it, otherwise delete it. 970 if (!InitBoolUsed) { 971 while (!InitBool->use_empty()) // Delete initializations 972 cast<StoreInst>(InitBool->user_back())->eraseFromParent(); 973 delete InitBool; 974 } else 975 GV->getParent()->getGlobalList().insert(GV->getIterator(), InitBool); 976 977 // Now the GV is dead, nuke it and the malloc.. 978 GV->eraseFromParent(); 979 CI->eraseFromParent(); 980 981 // To further other optimizations, loop over all users of NewGV and try to 982 // constant prop them. This will promote GEP instructions with constant 983 // indices into GEP constant-exprs, which will allow global-opt to hack on it. 984 ConstantPropUsersOf(NewGV, DL, TLI); 985 if (RepValue != NewGV) 986 ConstantPropUsersOf(RepValue, DL, TLI); 987 988 return NewGV; 989 } 990 991 /// Scan the use-list of V checking to make sure that there are no complex uses 992 /// of V. We permit simple things like dereferencing the pointer, but not 993 /// storing through the address, unless it is to the specified global. 994 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V, 995 const GlobalVariable *GV, 996 SmallPtrSetImpl<const PHINode*> &PHIs) { 997 for (const User *U : V->users()) { 998 const Instruction *Inst = cast<Instruction>(U); 999 1000 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { 1001 continue; // Fine, ignore. 1002 } 1003 1004 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 1005 if (SI->getOperand(0) == V && SI->getOperand(1) != GV) 1006 return false; // Storing the pointer itself... bad. 1007 continue; // Otherwise, storing through it, or storing into GV... fine. 1008 } 1009 1010 // Must index into the array and into the struct. 1011 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { 1012 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) 1013 return false; 1014 continue; 1015 } 1016 1017 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) { 1018 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI 1019 // cycles. 1020 if (PHIs.insert(PN).second) 1021 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) 1022 return false; 1023 continue; 1024 } 1025 1026 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { 1027 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) 1028 return false; 1029 continue; 1030 } 1031 1032 return false; 1033 } 1034 return true; 1035 } 1036 1037 /// The Alloc pointer is stored into GV somewhere. Transform all uses of the 1038 /// allocation into loads from the global and uses of the resultant pointer. 1039 /// Further, delete the store into GV. This assumes that these value pass the 1040 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. 1041 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, 1042 GlobalVariable *GV) { 1043 while (!Alloc->use_empty()) { 1044 Instruction *U = cast<Instruction>(*Alloc->user_begin()); 1045 Instruction *InsertPt = U; 1046 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1047 // If this is the store of the allocation into the global, remove it. 1048 if (SI->getOperand(1) == GV) { 1049 SI->eraseFromParent(); 1050 continue; 1051 } 1052 } else if (PHINode *PN = dyn_cast<PHINode>(U)) { 1053 // Insert the load in the corresponding predecessor, not right before the 1054 // PHI. 1055 InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator(); 1056 } else if (isa<BitCastInst>(U)) { 1057 // Must be bitcast between the malloc and store to initialize the global. 1058 ReplaceUsesOfMallocWithGlobal(U, GV); 1059 U->eraseFromParent(); 1060 continue; 1061 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1062 // If this is a "GEP bitcast" and the user is a store to the global, then 1063 // just process it as a bitcast. 1064 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) 1065 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back())) 1066 if (SI->getOperand(1) == GV) { 1067 // Must be bitcast GEP between the malloc and store to initialize 1068 // the global. 1069 ReplaceUsesOfMallocWithGlobal(GEPI, GV); 1070 GEPI->eraseFromParent(); 1071 continue; 1072 } 1073 } 1074 1075 // Insert a load from the global, and use it instead of the malloc. 1076 Value *NL = 1077 new LoadInst(GV->getValueType(), GV, GV->getName() + ".val", InsertPt); 1078 U->replaceUsesOfWith(Alloc, NL); 1079 } 1080 } 1081 1082 /// Verify that all uses of V (a load, or a phi of a load) are simple enough to 1083 /// perform heap SRA on. This permits GEP's that index through the array and 1084 /// struct field, icmps of null, and PHIs. 1085 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V, 1086 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs, 1087 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) { 1088 // We permit two users of the load: setcc comparing against the null 1089 // pointer, and a getelementptr of a specific form. 1090 for (const User *U : V->users()) { 1091 const Instruction *UI = cast<Instruction>(U); 1092 1093 // Comparison against null is ok. 1094 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) { 1095 if (!isa<ConstantPointerNull>(ICI->getOperand(1))) 1096 return false; 1097 continue; 1098 } 1099 1100 // getelementptr is also ok, but only a simple form. 1101 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) { 1102 // Must index into the array and into the struct. 1103 if (GEPI->getNumOperands() < 3) 1104 return false; 1105 1106 // Otherwise the GEP is ok. 1107 continue; 1108 } 1109 1110 if (const PHINode *PN = dyn_cast<PHINode>(UI)) { 1111 if (!LoadUsingPHIsPerLoad.insert(PN).second) 1112 // This means some phi nodes are dependent on each other. 1113 // Avoid infinite looping! 1114 return false; 1115 if (!LoadUsingPHIs.insert(PN).second) 1116 // If we have already analyzed this PHI, then it is safe. 1117 continue; 1118 1119 // Make sure all uses of the PHI are simple enough to transform. 1120 if (!LoadUsesSimpleEnoughForHeapSRA(PN, 1121 LoadUsingPHIs, LoadUsingPHIsPerLoad)) 1122 return false; 1123 1124 continue; 1125 } 1126 1127 // Otherwise we don't know what this is, not ok. 1128 return false; 1129 } 1130 1131 return true; 1132 } 1133 1134 /// If all users of values loaded from GV are simple enough to perform HeapSRA, 1135 /// return true. 1136 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV, 1137 Instruction *StoredVal) { 1138 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs; 1139 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad; 1140 for (const User *U : GV->users()) 1141 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 1142 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, 1143 LoadUsingPHIsPerLoad)) 1144 return false; 1145 LoadUsingPHIsPerLoad.clear(); 1146 } 1147 1148 // If we reach here, we know that all uses of the loads and transitive uses 1149 // (through PHI nodes) are simple enough to transform. However, we don't know 1150 // that all inputs the to the PHI nodes are in the same equivalence sets. 1151 // Check to verify that all operands of the PHIs are either PHIS that can be 1152 // transformed, loads from GV, or MI itself. 1153 for (const PHINode *PN : LoadUsingPHIs) { 1154 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { 1155 Value *InVal = PN->getIncomingValue(op); 1156 1157 // PHI of the stored value itself is ok. 1158 if (InVal == StoredVal) continue; 1159 1160 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) { 1161 // One of the PHIs in our set is (optimistically) ok. 1162 if (LoadUsingPHIs.count(InPN)) 1163 continue; 1164 return false; 1165 } 1166 1167 // Load from GV is ok. 1168 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal)) 1169 if (LI->getOperand(0) == GV) 1170 continue; 1171 1172 // UNDEF? NULL? 1173 1174 // Anything else is rejected. 1175 return false; 1176 } 1177 } 1178 1179 return true; 1180 } 1181 1182 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, 1183 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues, 1184 std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) { 1185 std::vector<Value *> &FieldVals = InsertedScalarizedValues[V]; 1186 1187 if (FieldNo >= FieldVals.size()) 1188 FieldVals.resize(FieldNo+1); 1189 1190 // If we already have this value, just reuse the previously scalarized 1191 // version. 1192 if (Value *FieldVal = FieldVals[FieldNo]) 1193 return FieldVal; 1194 1195 // Depending on what instruction this is, we have several cases. 1196 Value *Result; 1197 if (LoadInst *LI = dyn_cast<LoadInst>(V)) { 1198 // This is a scalarized version of the load from the global. Just create 1199 // a new Load of the scalarized global. 1200 Value *V = GetHeapSROAValue(LI->getOperand(0), FieldNo, 1201 InsertedScalarizedValues, PHIsToRewrite); 1202 Result = new LoadInst(V->getType()->getPointerElementType(), V, 1203 LI->getName() + ".f" + Twine(FieldNo), LI); 1204 } else { 1205 PHINode *PN = cast<PHINode>(V); 1206 // PN's type is pointer to struct. Make a new PHI of pointer to struct 1207 // field. 1208 1209 PointerType *PTy = cast<PointerType>(PN->getType()); 1210 StructType *ST = cast<StructType>(PTy->getElementType()); 1211 1212 unsigned AS = PTy->getAddressSpace(); 1213 PHINode *NewPN = 1214 PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS), 1215 PN->getNumIncomingValues(), 1216 PN->getName()+".f"+Twine(FieldNo), PN); 1217 Result = NewPN; 1218 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); 1219 } 1220 1221 return FieldVals[FieldNo] = Result; 1222 } 1223 1224 /// Given a load instruction and a value derived from the load, rewrite the 1225 /// derived value to use the HeapSRoA'd load. 1226 static void RewriteHeapSROALoadUser(Instruction *LoadUser, 1227 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues, 1228 std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) { 1229 // If this is a comparison against null, handle it. 1230 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { 1231 assert(isa<ConstantPointerNull>(SCI->getOperand(1))); 1232 // If we have a setcc of the loaded pointer, we can use a setcc of any 1233 // field. 1234 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, 1235 InsertedScalarizedValues, PHIsToRewrite); 1236 1237 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, 1238 Constant::getNullValue(NPtr->getType()), 1239 SCI->getName()); 1240 SCI->replaceAllUsesWith(New); 1241 SCI->eraseFromParent(); 1242 return; 1243 } 1244 1245 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' 1246 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { 1247 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) 1248 && "Unexpected GEPI!"); 1249 1250 // Load the pointer for this field. 1251 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 1252 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, 1253 InsertedScalarizedValues, PHIsToRewrite); 1254 1255 // Create the new GEP idx vector. 1256 SmallVector<Value*, 8> GEPIdx; 1257 GEPIdx.push_back(GEPI->getOperand(1)); 1258 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); 1259 1260 Value *NGEPI = GetElementPtrInst::Create(GEPI->getResultElementType(), NewPtr, GEPIdx, 1261 GEPI->getName(), GEPI); 1262 GEPI->replaceAllUsesWith(NGEPI); 1263 GEPI->eraseFromParent(); 1264 return; 1265 } 1266 1267 // Recursively transform the users of PHI nodes. This will lazily create the 1268 // PHIs that are needed for individual elements. Keep track of what PHIs we 1269 // see in InsertedScalarizedValues so that we don't get infinite loops (very 1270 // antisocial). If the PHI is already in InsertedScalarizedValues, it has 1271 // already been seen first by another load, so its uses have already been 1272 // processed. 1273 PHINode *PN = cast<PHINode>(LoadUser); 1274 if (!InsertedScalarizedValues.insert(std::make_pair(PN, 1275 std::vector<Value *>())).second) 1276 return; 1277 1278 // If this is the first time we've seen this PHI, recursively process all 1279 // users. 1280 for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) { 1281 Instruction *User = cast<Instruction>(*UI++); 1282 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1283 } 1284 } 1285 1286 /// We are performing Heap SRoA on a global. Ptr is a value loaded from the 1287 /// global. Eliminate all uses of Ptr, making them use FieldGlobals instead. 1288 /// All uses of loaded values satisfy AllGlobalLoadUsesSimpleEnoughForHeapSRA. 1289 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, 1290 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues, 1291 std::vector<std::pair<PHINode *, unsigned> > &PHIsToRewrite) { 1292 for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) { 1293 Instruction *User = cast<Instruction>(*UI++); 1294 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1295 } 1296 1297 if (Load->use_empty()) { 1298 Load->eraseFromParent(); 1299 InsertedScalarizedValues.erase(Load); 1300 } 1301 } 1302 1303 /// CI is an allocation of an array of structures. Break it up into multiple 1304 /// allocations of arrays of the fields. 1305 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, 1306 Value *NElems, const DataLayout &DL, 1307 const TargetLibraryInfo *TLI) { 1308 LLVM_DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI 1309 << '\n'); 1310 Type *MAT = getMallocAllocatedType(CI, TLI); 1311 StructType *STy = cast<StructType>(MAT); 1312 1313 // There is guaranteed to be at least one use of the malloc (storing 1314 // it into GV). If there are other uses, change them to be uses of 1315 // the global to simplify later code. This also deletes the store 1316 // into GV. 1317 ReplaceUsesOfMallocWithGlobal(CI, GV); 1318 1319 // Okay, at this point, there are no users of the malloc. Insert N 1320 // new mallocs at the same place as CI, and N globals. 1321 std::vector<Value *> FieldGlobals; 1322 std::vector<Value *> FieldMallocs; 1323 1324 SmallVector<OperandBundleDef, 1> OpBundles; 1325 CI->getOperandBundlesAsDefs(OpBundles); 1326 1327 unsigned AS = GV->getType()->getPointerAddressSpace(); 1328 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ 1329 Type *FieldTy = STy->getElementType(FieldNo); 1330 PointerType *PFieldTy = PointerType::get(FieldTy, AS); 1331 1332 GlobalVariable *NGV = new GlobalVariable( 1333 *GV->getParent(), PFieldTy, false, GlobalValue::InternalLinkage, 1334 Constant::getNullValue(PFieldTy), GV->getName() + ".f" + Twine(FieldNo), 1335 nullptr, GV->getThreadLocalMode()); 1336 NGV->copyAttributesFrom(GV); 1337 FieldGlobals.push_back(NGV); 1338 1339 unsigned TypeSize = DL.getTypeAllocSize(FieldTy); 1340 if (StructType *ST = dyn_cast<StructType>(FieldTy)) 1341 TypeSize = DL.getStructLayout(ST)->getSizeInBytes(); 1342 Type *IntPtrTy = DL.getIntPtrType(CI->getType()); 1343 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, 1344 ConstantInt::get(IntPtrTy, TypeSize), 1345 NElems, OpBundles, nullptr, 1346 CI->getName() + ".f" + Twine(FieldNo)); 1347 FieldMallocs.push_back(NMI); 1348 new StoreInst(NMI, NGV, CI); 1349 } 1350 1351 // The tricky aspect of this transformation is handling the case when malloc 1352 // fails. In the original code, malloc failing would set the result pointer 1353 // of malloc to null. In this case, some mallocs could succeed and others 1354 // could fail. As such, we emit code that looks like this: 1355 // F0 = malloc(field0) 1356 // F1 = malloc(field1) 1357 // F2 = malloc(field2) 1358 // if (F0 == 0 || F1 == 0 || F2 == 0) { 1359 // if (F0) { free(F0); F0 = 0; } 1360 // if (F1) { free(F1); F1 = 0; } 1361 // if (F2) { free(F2); F2 = 0; } 1362 // } 1363 // The malloc can also fail if its argument is too large. 1364 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0); 1365 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0), 1366 ConstantZero, "isneg"); 1367 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { 1368 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i], 1369 Constant::getNullValue(FieldMallocs[i]->getType()), 1370 "isnull"); 1371 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI); 1372 } 1373 1374 // Split the basic block at the old malloc. 1375 BasicBlock *OrigBB = CI->getParent(); 1376 BasicBlock *ContBB = 1377 OrigBB->splitBasicBlock(CI->getIterator(), "malloc_cont"); 1378 1379 // Create the block to check the first condition. Put all these blocks at the 1380 // end of the function as they are unlikely to be executed. 1381 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(), 1382 "malloc_ret_null", 1383 OrigBB->getParent()); 1384 1385 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond 1386 // branch on RunningOr. 1387 OrigBB->getTerminator()->eraseFromParent(); 1388 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); 1389 1390 // Within the NullPtrBlock, we need to emit a comparison and branch for each 1391 // pointer, because some may be null while others are not. 1392 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1393 Value *GVVal = 1394 new LoadInst(cast<GlobalVariable>(FieldGlobals[i])->getValueType(), 1395 FieldGlobals[i], "tmp", NullPtrBlock); 1396 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, 1397 Constant::getNullValue(GVVal->getType())); 1398 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", 1399 OrigBB->getParent()); 1400 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", 1401 OrigBB->getParent()); 1402 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock, 1403 Cmp, NullPtrBlock); 1404 1405 // Fill in FreeBlock. 1406 CallInst::CreateFree(GVVal, OpBundles, BI); 1407 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], 1408 FreeBlock); 1409 BranchInst::Create(NextBlock, FreeBlock); 1410 1411 NullPtrBlock = NextBlock; 1412 } 1413 1414 BranchInst::Create(ContBB, NullPtrBlock); 1415 1416 // CI is no longer needed, remove it. 1417 CI->eraseFromParent(); 1418 1419 /// As we process loads, if we can't immediately update all uses of the load, 1420 /// keep track of what scalarized loads are inserted for a given load. 1421 DenseMap<Value *, std::vector<Value *>> InsertedScalarizedValues; 1422 InsertedScalarizedValues[GV] = FieldGlobals; 1423 1424 std::vector<std::pair<PHINode *, unsigned>> PHIsToRewrite; 1425 1426 // Okay, the malloc site is completely handled. All of the uses of GV are now 1427 // loads, and all uses of those loads are simple. Rewrite them to use loads 1428 // of the per-field globals instead. 1429 for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) { 1430 Instruction *User = cast<Instruction>(*UI++); 1431 1432 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1433 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite); 1434 continue; 1435 } 1436 1437 // Must be a store of null. 1438 StoreInst *SI = cast<StoreInst>(User); 1439 assert(isa<ConstantPointerNull>(SI->getOperand(0)) && 1440 "Unexpected heap-sra user!"); 1441 1442 // Insert a store of null into each global. 1443 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1444 Type *ValTy = cast<GlobalValue>(FieldGlobals[i])->getValueType(); 1445 Constant *Null = Constant::getNullValue(ValTy); 1446 new StoreInst(Null, FieldGlobals[i], SI); 1447 } 1448 // Erase the original store. 1449 SI->eraseFromParent(); 1450 } 1451 1452 // While we have PHIs that are interesting to rewrite, do it. 1453 while (!PHIsToRewrite.empty()) { 1454 PHINode *PN = PHIsToRewrite.back().first; 1455 unsigned FieldNo = PHIsToRewrite.back().second; 1456 PHIsToRewrite.pop_back(); 1457 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); 1458 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); 1459 1460 // Add all the incoming values. This can materialize more phis. 1461 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1462 Value *InVal = PN->getIncomingValue(i); 1463 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, 1464 PHIsToRewrite); 1465 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); 1466 } 1467 } 1468 1469 // Drop all inter-phi links and any loads that made it this far. 1470 for (DenseMap<Value *, std::vector<Value *>>::iterator 1471 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1472 I != E; ++I) { 1473 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1474 PN->dropAllReferences(); 1475 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1476 LI->dropAllReferences(); 1477 } 1478 1479 // Delete all the phis and loads now that inter-references are dead. 1480 for (DenseMap<Value *, std::vector<Value *>>::iterator 1481 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1482 I != E; ++I) { 1483 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1484 PN->eraseFromParent(); 1485 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1486 LI->eraseFromParent(); 1487 } 1488 1489 // The old global is now dead, remove it. 1490 GV->eraseFromParent(); 1491 1492 ++NumHeapSRA; 1493 return cast<GlobalVariable>(FieldGlobals[0]); 1494 } 1495 1496 /// This function is called when we see a pointer global variable with a single 1497 /// value stored it that is a malloc or cast of malloc. 1498 static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI, 1499 Type *AllocTy, 1500 AtomicOrdering Ordering, 1501 const DataLayout &DL, 1502 TargetLibraryInfo *TLI) { 1503 // If this is a malloc of an abstract type, don't touch it. 1504 if (!AllocTy->isSized()) 1505 return false; 1506 1507 // We can't optimize this global unless all uses of it are *known* to be 1508 // of the malloc value, not of the null initializer value (consider a use 1509 // that compares the global's value against zero to see if the malloc has 1510 // been reached). To do this, we check to see if all uses of the global 1511 // would trap if the global were null: this proves that they must all 1512 // happen after the malloc. 1513 if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) 1514 return false; 1515 1516 // We can't optimize this if the malloc itself is used in a complex way, 1517 // for example, being stored into multiple globals. This allows the 1518 // malloc to be stored into the specified global, loaded icmp'd, and 1519 // GEP'd. These are all things we could transform to using the global 1520 // for. 1521 SmallPtrSet<const PHINode*, 8> PHIs; 1522 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs)) 1523 return false; 1524 1525 // If we have a global that is only initialized with a fixed size malloc, 1526 // transform the program to use global memory instead of malloc'd memory. 1527 // This eliminates dynamic allocation, avoids an indirection accessing the 1528 // data, and exposes the resultant global to further GlobalOpt. 1529 // We cannot optimize the malloc if we cannot determine malloc array size. 1530 Value *NElems = getMallocArraySize(CI, DL, TLI, true); 1531 if (!NElems) 1532 return false; 1533 1534 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 1535 // Restrict this transformation to only working on small allocations 1536 // (2048 bytes currently), as we don't want to introduce a 16M global or 1537 // something. 1538 if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) { 1539 OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI); 1540 return true; 1541 } 1542 1543 // If the allocation is an array of structures, consider transforming this 1544 // into multiple malloc'd arrays, one for each field. This is basically 1545 // SRoA for malloc'd memory. 1546 1547 if (Ordering != AtomicOrdering::NotAtomic) 1548 return false; 1549 1550 // If this is an allocation of a fixed size array of structs, analyze as a 1551 // variable size array. malloc [100 x struct],1 -> malloc struct, 100 1552 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) 1553 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) 1554 AllocTy = AT->getElementType(); 1555 1556 StructType *AllocSTy = dyn_cast<StructType>(AllocTy); 1557 if (!AllocSTy) 1558 return false; 1559 1560 // This the structure has an unreasonable number of fields, leave it 1561 // alone. 1562 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && 1563 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) { 1564 1565 // If this is a fixed size array, transform the Malloc to be an alloc of 1566 // structs. malloc [100 x struct],1 -> malloc struct, 100 1567 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) { 1568 Type *IntPtrTy = DL.getIntPtrType(CI->getType()); 1569 unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes(); 1570 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); 1571 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); 1572 SmallVector<OperandBundleDef, 1> OpBundles; 1573 CI->getOperandBundlesAsDefs(OpBundles); 1574 Instruction *Malloc = 1575 CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, AllocSize, NumElements, 1576 OpBundles, nullptr, CI->getName()); 1577 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI); 1578 CI->replaceAllUsesWith(Cast); 1579 CI->eraseFromParent(); 1580 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc)) 1581 CI = cast<CallInst>(BCI->getOperand(0)); 1582 else 1583 CI = cast<CallInst>(Malloc); 1584 } 1585 1586 PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), DL, 1587 TLI); 1588 return true; 1589 } 1590 1591 return false; 1592 } 1593 1594 // Try to optimize globals based on the knowledge that only one value (besides 1595 // its initializer) is ever stored to the global. 1596 static bool 1597 optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, 1598 AtomicOrdering Ordering, const DataLayout &DL, 1599 function_ref<TargetLibraryInfo &(Function &)> GetTLI) { 1600 // Ignore no-op GEPs and bitcasts. 1601 StoredOnceVal = StoredOnceVal->stripPointerCasts(); 1602 1603 // If we are dealing with a pointer global that is initialized to null and 1604 // only has one (non-null) value stored into it, then we can optimize any 1605 // users of the loaded value (often calls and loads) that would trap if the 1606 // value was null. 1607 if (GV->getInitializer()->getType()->isPointerTy() && 1608 GV->getInitializer()->isNullValue() && 1609 !NullPointerIsDefined( 1610 nullptr /* F */, 1611 GV->getInitializer()->getType()->getPointerAddressSpace())) { 1612 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { 1613 if (GV->getInitializer()->getType() != SOVC->getType()) 1614 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); 1615 1616 // Optimize away any trapping uses of the loaded value. 1617 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, GetTLI)) 1618 return true; 1619 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, GetTLI)) { 1620 auto *TLI = &GetTLI(*CI->getFunction()); 1621 Type *MallocType = getMallocAllocatedType(CI, TLI); 1622 if (MallocType && tryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, 1623 Ordering, DL, TLI)) 1624 return true; 1625 } 1626 } 1627 1628 return false; 1629 } 1630 1631 /// At this point, we have learned that the only two values ever stored into GV 1632 /// are its initializer and OtherVal. See if we can shrink the global into a 1633 /// boolean and select between the two values whenever it is used. This exposes 1634 /// the values to other scalar optimizations. 1635 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { 1636 Type *GVElType = GV->getValueType(); 1637 1638 // If GVElType is already i1, it is already shrunk. If the type of the GV is 1639 // an FP value, pointer or vector, don't do this optimization because a select 1640 // between them is very expensive and unlikely to lead to later 1641 // simplification. In these cases, we typically end up with "cond ? v1 : v2" 1642 // where v1 and v2 both require constant pool loads, a big loss. 1643 if (GVElType == Type::getInt1Ty(GV->getContext()) || 1644 GVElType->isFloatingPointTy() || 1645 GVElType->isPointerTy() || GVElType->isVectorTy()) 1646 return false; 1647 1648 // Walk the use list of the global seeing if all the uses are load or store. 1649 // If there is anything else, bail out. 1650 for (User *U : GV->users()) 1651 if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) 1652 return false; 1653 1654 LLVM_DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n"); 1655 1656 // Create the new global, initializing it to false. 1657 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), 1658 false, 1659 GlobalValue::InternalLinkage, 1660 ConstantInt::getFalse(GV->getContext()), 1661 GV->getName()+".b", 1662 GV->getThreadLocalMode(), 1663 GV->getType()->getAddressSpace()); 1664 NewGV->copyAttributesFrom(GV); 1665 GV->getParent()->getGlobalList().insert(GV->getIterator(), NewGV); 1666 1667 Constant *InitVal = GV->getInitializer(); 1668 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) && 1669 "No reason to shrink to bool!"); 1670 1671 SmallVector<DIGlobalVariableExpression *, 1> GVs; 1672 GV->getDebugInfo(GVs); 1673 1674 // If initialized to zero and storing one into the global, we can use a cast 1675 // instead of a select to synthesize the desired value. 1676 bool IsOneZero = false; 1677 bool EmitOneOrZero = true; 1678 auto *CI = dyn_cast<ConstantInt>(OtherVal); 1679 if (CI && CI->getValue().getActiveBits() <= 64) { 1680 IsOneZero = InitVal->isNullValue() && CI->isOne(); 1681 1682 auto *CIInit = dyn_cast<ConstantInt>(GV->getInitializer()); 1683 if (CIInit && CIInit->getValue().getActiveBits() <= 64) { 1684 uint64_t ValInit = CIInit->getZExtValue(); 1685 uint64_t ValOther = CI->getZExtValue(); 1686 uint64_t ValMinus = ValOther - ValInit; 1687 1688 for(auto *GVe : GVs){ 1689 DIGlobalVariable *DGV = GVe->getVariable(); 1690 DIExpression *E = GVe->getExpression(); 1691 const DataLayout &DL = GV->getParent()->getDataLayout(); 1692 unsigned SizeInOctets = 1693 DL.getTypeAllocSizeInBits(NewGV->getType()->getElementType()) / 8; 1694 1695 // It is expected that the address of global optimized variable is on 1696 // top of the stack. After optimization, value of that variable will 1697 // be ether 0 for initial value or 1 for other value. The following 1698 // expression should return constant integer value depending on the 1699 // value at global object address: 1700 // val * (ValOther - ValInit) + ValInit: 1701 // DW_OP_deref DW_OP_constu <ValMinus> 1702 // DW_OP_mul DW_OP_constu <ValInit> DW_OP_plus DW_OP_stack_value 1703 SmallVector<uint64_t, 12> Ops = { 1704 dwarf::DW_OP_deref_size, SizeInOctets, 1705 dwarf::DW_OP_constu, ValMinus, 1706 dwarf::DW_OP_mul, dwarf::DW_OP_constu, ValInit, 1707 dwarf::DW_OP_plus}; 1708 bool WithStackValue = true; 1709 E = DIExpression::prependOpcodes(E, Ops, WithStackValue); 1710 DIGlobalVariableExpression *DGVE = 1711 DIGlobalVariableExpression::get(NewGV->getContext(), DGV, E); 1712 NewGV->addDebugInfo(DGVE); 1713 } 1714 EmitOneOrZero = false; 1715 } 1716 } 1717 1718 if (EmitOneOrZero) { 1719 // FIXME: This will only emit address for debugger on which will 1720 // be written only 0 or 1. 1721 for(auto *GV : GVs) 1722 NewGV->addDebugInfo(GV); 1723 } 1724 1725 while (!GV->use_empty()) { 1726 Instruction *UI = cast<Instruction>(GV->user_back()); 1727 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 1728 // Change the store into a boolean store. 1729 bool StoringOther = SI->getOperand(0) == OtherVal; 1730 // Only do this if we weren't storing a loaded value. 1731 Value *StoreVal; 1732 if (StoringOther || SI->getOperand(0) == InitVal) { 1733 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), 1734 StoringOther); 1735 } else { 1736 // Otherwise, we are storing a previously loaded copy. To do this, 1737 // change the copy from copying the original value to just copying the 1738 // bool. 1739 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); 1740 1741 // If we've already replaced the input, StoredVal will be a cast or 1742 // select instruction. If not, it will be a load of the original 1743 // global. 1744 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 1745 assert(LI->getOperand(0) == GV && "Not a copy!"); 1746 // Insert a new load, to preserve the saved value. 1747 StoreVal = new LoadInst(NewGV->getValueType(), NewGV, 1748 LI->getName() + ".b", false, Align(1), 1749 LI->getOrdering(), LI->getSyncScopeID(), LI); 1750 } else { 1751 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && 1752 "This is not a form that we understand!"); 1753 StoreVal = StoredVal->getOperand(0); 1754 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); 1755 } 1756 } 1757 StoreInst *NSI = 1758 new StoreInst(StoreVal, NewGV, false, Align(1), SI->getOrdering(), 1759 SI->getSyncScopeID(), SI); 1760 NSI->setDebugLoc(SI->getDebugLoc()); 1761 } else { 1762 // Change the load into a load of bool then a select. 1763 LoadInst *LI = cast<LoadInst>(UI); 1764 LoadInst *NLI = new LoadInst(NewGV->getValueType(), NewGV, 1765 LI->getName() + ".b", false, Align(1), 1766 LI->getOrdering(), LI->getSyncScopeID(), LI); 1767 Instruction *NSI; 1768 if (IsOneZero) 1769 NSI = new ZExtInst(NLI, LI->getType(), "", LI); 1770 else 1771 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); 1772 NSI->takeName(LI); 1773 // Since LI is split into two instructions, NLI and NSI both inherit the 1774 // same DebugLoc 1775 NLI->setDebugLoc(LI->getDebugLoc()); 1776 NSI->setDebugLoc(LI->getDebugLoc()); 1777 LI->replaceAllUsesWith(NSI); 1778 } 1779 UI->eraseFromParent(); 1780 } 1781 1782 // Retain the name of the old global variable. People who are debugging their 1783 // programs may expect these variables to be named the same. 1784 NewGV->takeName(GV); 1785 GV->eraseFromParent(); 1786 return true; 1787 } 1788 1789 static bool deleteIfDead( 1790 GlobalValue &GV, SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 1791 GV.removeDeadConstantUsers(); 1792 1793 if (!GV.isDiscardableIfUnused() && !GV.isDeclaration()) 1794 return false; 1795 1796 if (const Comdat *C = GV.getComdat()) 1797 if (!GV.hasLocalLinkage() && NotDiscardableComdats.count(C)) 1798 return false; 1799 1800 bool Dead; 1801 if (auto *F = dyn_cast<Function>(&GV)) 1802 Dead = (F->isDeclaration() && F->use_empty()) || F->isDefTriviallyDead(); 1803 else 1804 Dead = GV.use_empty(); 1805 if (!Dead) 1806 return false; 1807 1808 LLVM_DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n"); 1809 GV.eraseFromParent(); 1810 ++NumDeleted; 1811 return true; 1812 } 1813 1814 static bool isPointerValueDeadOnEntryToFunction( 1815 const Function *F, GlobalValue *GV, 1816 function_ref<DominatorTree &(Function &)> LookupDomTree) { 1817 // Find all uses of GV. We expect them all to be in F, and if we can't 1818 // identify any of the uses we bail out. 1819 // 1820 // On each of these uses, identify if the memory that GV points to is 1821 // used/required/live at the start of the function. If it is not, for example 1822 // if the first thing the function does is store to the GV, the GV can 1823 // possibly be demoted. 1824 // 1825 // We don't do an exhaustive search for memory operations - simply look 1826 // through bitcasts as they're quite common and benign. 1827 const DataLayout &DL = GV->getParent()->getDataLayout(); 1828 SmallVector<LoadInst *, 4> Loads; 1829 SmallVector<StoreInst *, 4> Stores; 1830 for (auto *U : GV->users()) { 1831 if (Operator::getOpcode(U) == Instruction::BitCast) { 1832 for (auto *UU : U->users()) { 1833 if (auto *LI = dyn_cast<LoadInst>(UU)) 1834 Loads.push_back(LI); 1835 else if (auto *SI = dyn_cast<StoreInst>(UU)) 1836 Stores.push_back(SI); 1837 else 1838 return false; 1839 } 1840 continue; 1841 } 1842 1843 Instruction *I = dyn_cast<Instruction>(U); 1844 if (!I) 1845 return false; 1846 assert(I->getParent()->getParent() == F); 1847 1848 if (auto *LI = dyn_cast<LoadInst>(I)) 1849 Loads.push_back(LI); 1850 else if (auto *SI = dyn_cast<StoreInst>(I)) 1851 Stores.push_back(SI); 1852 else 1853 return false; 1854 } 1855 1856 // We have identified all uses of GV into loads and stores. Now check if all 1857 // of them are known not to depend on the value of the global at the function 1858 // entry point. We do this by ensuring that every load is dominated by at 1859 // least one store. 1860 auto &DT = LookupDomTree(*const_cast<Function *>(F)); 1861 1862 // The below check is quadratic. Check we're not going to do too many tests. 1863 // FIXME: Even though this will always have worst-case quadratic time, we 1864 // could put effort into minimizing the average time by putting stores that 1865 // have been shown to dominate at least one load at the beginning of the 1866 // Stores array, making subsequent dominance checks more likely to succeed 1867 // early. 1868 // 1869 // The threshold here is fairly large because global->local demotion is a 1870 // very powerful optimization should it fire. 1871 const unsigned Threshold = 100; 1872 if (Loads.size() * Stores.size() > Threshold) 1873 return false; 1874 1875 for (auto *L : Loads) { 1876 auto *LTy = L->getType(); 1877 if (none_of(Stores, [&](const StoreInst *S) { 1878 auto *STy = S->getValueOperand()->getType(); 1879 // The load is only dominated by the store if DomTree says so 1880 // and the number of bits loaded in L is less than or equal to 1881 // the number of bits stored in S. 1882 return DT.dominates(S, L) && 1883 DL.getTypeStoreSize(LTy).getFixedSize() <= 1884 DL.getTypeStoreSize(STy).getFixedSize(); 1885 })) 1886 return false; 1887 } 1888 // All loads have known dependences inside F, so the global can be localized. 1889 return true; 1890 } 1891 1892 /// C may have non-instruction users. Can all of those users be turned into 1893 /// instructions? 1894 static bool allNonInstructionUsersCanBeMadeInstructions(Constant *C) { 1895 // We don't do this exhaustively. The most common pattern that we really need 1896 // to care about is a constant GEP or constant bitcast - so just looking 1897 // through one single ConstantExpr. 1898 // 1899 // The set of constants that this function returns true for must be able to be 1900 // handled by makeAllConstantUsesInstructions. 1901 for (auto *U : C->users()) { 1902 if (isa<Instruction>(U)) 1903 continue; 1904 if (!isa<ConstantExpr>(U)) 1905 // Non instruction, non-constantexpr user; cannot convert this. 1906 return false; 1907 for (auto *UU : U->users()) 1908 if (!isa<Instruction>(UU)) 1909 // A constantexpr used by another constant. We don't try and recurse any 1910 // further but just bail out at this point. 1911 return false; 1912 } 1913 1914 return true; 1915 } 1916 1917 /// C may have non-instruction users, and 1918 /// allNonInstructionUsersCanBeMadeInstructions has returned true. Convert the 1919 /// non-instruction users to instructions. 1920 static void makeAllConstantUsesInstructions(Constant *C) { 1921 SmallVector<ConstantExpr*,4> Users; 1922 for (auto *U : C->users()) { 1923 if (isa<ConstantExpr>(U)) 1924 Users.push_back(cast<ConstantExpr>(U)); 1925 else 1926 // We should never get here; allNonInstructionUsersCanBeMadeInstructions 1927 // should not have returned true for C. 1928 assert( 1929 isa<Instruction>(U) && 1930 "Can't transform non-constantexpr non-instruction to instruction!"); 1931 } 1932 1933 SmallVector<Value*,4> UUsers; 1934 for (auto *U : Users) { 1935 UUsers.clear(); 1936 append_range(UUsers, U->users()); 1937 for (auto *UU : UUsers) { 1938 Instruction *UI = cast<Instruction>(UU); 1939 Instruction *NewU = U->getAsInstruction(); 1940 NewU->insertBefore(UI); 1941 UI->replaceUsesOfWith(U, NewU); 1942 } 1943 // We've replaced all the uses, so destroy the constant. (destroyConstant 1944 // will update value handles and metadata.) 1945 U->destroyConstant(); 1946 } 1947 } 1948 1949 /// Analyze the specified global variable and optimize 1950 /// it if possible. If we make a change, return true. 1951 static bool 1952 processInternalGlobal(GlobalVariable *GV, const GlobalStatus &GS, 1953 function_ref<TargetLibraryInfo &(Function &)> GetTLI, 1954 function_ref<DominatorTree &(Function &)> LookupDomTree) { 1955 auto &DL = GV->getParent()->getDataLayout(); 1956 // If this is a first class global and has only one accessing function and 1957 // this function is non-recursive, we replace the global with a local alloca 1958 // in this function. 1959 // 1960 // NOTE: It doesn't make sense to promote non-single-value types since we 1961 // are just replacing static memory to stack memory. 1962 // 1963 // If the global is in different address space, don't bring it to stack. 1964 if (!GS.HasMultipleAccessingFunctions && 1965 GS.AccessingFunction && 1966 GV->getValueType()->isSingleValueType() && 1967 GV->getType()->getAddressSpace() == 0 && 1968 !GV->isExternallyInitialized() && 1969 allNonInstructionUsersCanBeMadeInstructions(GV) && 1970 GS.AccessingFunction->doesNotRecurse() && 1971 isPointerValueDeadOnEntryToFunction(GS.AccessingFunction, GV, 1972 LookupDomTree)) { 1973 const DataLayout &DL = GV->getParent()->getDataLayout(); 1974 1975 LLVM_DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n"); 1976 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction 1977 ->getEntryBlock().begin()); 1978 Type *ElemTy = GV->getValueType(); 1979 // FIXME: Pass Global's alignment when globals have alignment 1980 AllocaInst *Alloca = new AllocaInst(ElemTy, DL.getAllocaAddrSpace(), nullptr, 1981 GV->getName(), &FirstI); 1982 if (!isa<UndefValue>(GV->getInitializer())) 1983 new StoreInst(GV->getInitializer(), Alloca, &FirstI); 1984 1985 makeAllConstantUsesInstructions(GV); 1986 1987 GV->replaceAllUsesWith(Alloca); 1988 GV->eraseFromParent(); 1989 ++NumLocalized; 1990 return true; 1991 } 1992 1993 bool Changed = false; 1994 1995 // If the global is never loaded (but may be stored to), it is dead. 1996 // Delete it now. 1997 if (!GS.IsLoaded) { 1998 LLVM_DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n"); 1999 2000 if (isLeakCheckerRoot(GV)) { 2001 // Delete any constant stores to the global. 2002 Changed = CleanupPointerRootUsers(GV, GetTLI); 2003 } else { 2004 // Delete any stores we can find to the global. We may not be able to 2005 // make it completely dead though. 2006 Changed = 2007 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, GetTLI); 2008 } 2009 2010 // If the global is dead now, delete it. 2011 if (GV->use_empty()) { 2012 GV->eraseFromParent(); 2013 ++NumDeleted; 2014 Changed = true; 2015 } 2016 return Changed; 2017 2018 } 2019 if (GS.StoredType <= GlobalStatus::InitializerStored) { 2020 LLVM_DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n"); 2021 2022 // Don't actually mark a global constant if it's atomic because atomic loads 2023 // are implemented by a trivial cmpxchg in some edge-cases and that usually 2024 // requires write access to the variable even if it's not actually changed. 2025 if (GS.Ordering == AtomicOrdering::NotAtomic) { 2026 assert(!GV->isConstant() && "Expected a non-constant global"); 2027 GV->setConstant(true); 2028 Changed = true; 2029 } 2030 2031 // Clean up any obviously simplifiable users now. 2032 Changed |= CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, GetTLI); 2033 2034 // If the global is dead now, just nuke it. 2035 if (GV->use_empty()) { 2036 LLVM_DEBUG(dbgs() << " *** Marking constant allowed us to simplify " 2037 << "all users and delete global!\n"); 2038 GV->eraseFromParent(); 2039 ++NumDeleted; 2040 return true; 2041 } 2042 2043 // Fall through to the next check; see if we can optimize further. 2044 ++NumMarked; 2045 } 2046 if (!GV->getInitializer()->getType()->isSingleValueType()) { 2047 const DataLayout &DL = GV->getParent()->getDataLayout(); 2048 if (SRAGlobal(GV, DL)) 2049 return true; 2050 } 2051 if (GS.StoredType == GlobalStatus::StoredOnce && GS.StoredOnceValue) { 2052 // If the initial value for the global was an undef value, and if only 2053 // one other value was stored into it, we can just change the 2054 // initializer to be the stored value, then delete all stores to the 2055 // global. This allows us to mark it constant. 2056 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 2057 if (isa<UndefValue>(GV->getInitializer())) { 2058 // Change the initial value here. 2059 GV->setInitializer(SOVConstant); 2060 2061 // Clean up any obviously simplifiable users now. 2062 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, GetTLI); 2063 2064 if (GV->use_empty()) { 2065 LLVM_DEBUG(dbgs() << " *** Substituting initializer allowed us to " 2066 << "simplify all users and delete global!\n"); 2067 GV->eraseFromParent(); 2068 ++NumDeleted; 2069 } 2070 ++NumSubstitute; 2071 return true; 2072 } 2073 2074 // Try to optimize globals based on the knowledge that only one value 2075 // (besides its initializer) is ever stored to the global. 2076 if (optimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, DL, 2077 GetTLI)) 2078 return true; 2079 2080 // Otherwise, if the global was not a boolean, we can shrink it to be a 2081 // boolean. 2082 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) { 2083 if (GS.Ordering == AtomicOrdering::NotAtomic) { 2084 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { 2085 ++NumShrunkToBool; 2086 return true; 2087 } 2088 } 2089 } 2090 } 2091 2092 return Changed; 2093 } 2094 2095 /// Analyze the specified global variable and optimize it if possible. If we 2096 /// make a change, return true. 2097 static bool 2098 processGlobal(GlobalValue &GV, 2099 function_ref<TargetLibraryInfo &(Function &)> GetTLI, 2100 function_ref<DominatorTree &(Function &)> LookupDomTree) { 2101 if (GV.getName().startswith("llvm.")) 2102 return false; 2103 2104 GlobalStatus GS; 2105 2106 if (GlobalStatus::analyzeGlobal(&GV, GS)) 2107 return false; 2108 2109 bool Changed = false; 2110 if (!GS.IsCompared && !GV.hasGlobalUnnamedAddr()) { 2111 auto NewUnnamedAddr = GV.hasLocalLinkage() ? GlobalValue::UnnamedAddr::Global 2112 : GlobalValue::UnnamedAddr::Local; 2113 if (NewUnnamedAddr != GV.getUnnamedAddr()) { 2114 GV.setUnnamedAddr(NewUnnamedAddr); 2115 NumUnnamed++; 2116 Changed = true; 2117 } 2118 } 2119 2120 // Do more involved optimizations if the global is internal. 2121 if (!GV.hasLocalLinkage()) 2122 return Changed; 2123 2124 auto *GVar = dyn_cast<GlobalVariable>(&GV); 2125 if (!GVar) 2126 return Changed; 2127 2128 if (GVar->isConstant() || !GVar->hasInitializer()) 2129 return Changed; 2130 2131 return processInternalGlobal(GVar, GS, GetTLI, LookupDomTree) || Changed; 2132 } 2133 2134 /// Walk all of the direct calls of the specified function, changing them to 2135 /// FastCC. 2136 static void ChangeCalleesToFastCall(Function *F) { 2137 for (User *U : F->users()) { 2138 if (isa<BlockAddress>(U)) 2139 continue; 2140 cast<CallBase>(U)->setCallingConv(CallingConv::Fast); 2141 } 2142 } 2143 2144 static AttributeList StripAttr(LLVMContext &C, AttributeList Attrs, 2145 Attribute::AttrKind A) { 2146 unsigned AttrIndex; 2147 if (Attrs.hasAttrSomewhere(A, &AttrIndex)) 2148 return Attrs.removeAttribute(C, AttrIndex, A); 2149 return Attrs; 2150 } 2151 2152 static void RemoveAttribute(Function *F, Attribute::AttrKind A) { 2153 F->setAttributes(StripAttr(F->getContext(), F->getAttributes(), A)); 2154 for (User *U : F->users()) { 2155 if (isa<BlockAddress>(U)) 2156 continue; 2157 CallBase *CB = cast<CallBase>(U); 2158 CB->setAttributes(StripAttr(F->getContext(), CB->getAttributes(), A)); 2159 } 2160 } 2161 2162 /// Return true if this is a calling convention that we'd like to change. The 2163 /// idea here is that we don't want to mess with the convention if the user 2164 /// explicitly requested something with performance implications like coldcc, 2165 /// GHC, or anyregcc. 2166 static bool hasChangeableCC(Function *F) { 2167 CallingConv::ID CC = F->getCallingConv(); 2168 2169 // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc? 2170 if (CC != CallingConv::C && CC != CallingConv::X86_ThisCall) 2171 return false; 2172 2173 // FIXME: Change CC for the whole chain of musttail calls when possible. 2174 // 2175 // Can't change CC of the function that either has musttail calls, or is a 2176 // musttail callee itself 2177 for (User *U : F->users()) { 2178 if (isa<BlockAddress>(U)) 2179 continue; 2180 CallInst* CI = dyn_cast<CallInst>(U); 2181 if (!CI) 2182 continue; 2183 2184 if (CI->isMustTailCall()) 2185 return false; 2186 } 2187 2188 for (BasicBlock &BB : *F) 2189 if (BB.getTerminatingMustTailCall()) 2190 return false; 2191 2192 return true; 2193 } 2194 2195 /// Return true if the block containing the call site has a BlockFrequency of 2196 /// less than ColdCCRelFreq% of the entry block. 2197 static bool isColdCallSite(CallBase &CB, BlockFrequencyInfo &CallerBFI) { 2198 const BranchProbability ColdProb(ColdCCRelFreq, 100); 2199 auto *CallSiteBB = CB.getParent(); 2200 auto CallSiteFreq = CallerBFI.getBlockFreq(CallSiteBB); 2201 auto CallerEntryFreq = 2202 CallerBFI.getBlockFreq(&(CB.getCaller()->getEntryBlock())); 2203 return CallSiteFreq < CallerEntryFreq * ColdProb; 2204 } 2205 2206 // This function checks if the input function F is cold at all call sites. It 2207 // also looks each call site's containing function, returning false if the 2208 // caller function contains other non cold calls. The input vector AllCallsCold 2209 // contains a list of functions that only have call sites in cold blocks. 2210 static bool 2211 isValidCandidateForColdCC(Function &F, 2212 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2213 const std::vector<Function *> &AllCallsCold) { 2214 2215 if (F.user_empty()) 2216 return false; 2217 2218 for (User *U : F.users()) { 2219 if (isa<BlockAddress>(U)) 2220 continue; 2221 2222 CallBase &CB = cast<CallBase>(*U); 2223 Function *CallerFunc = CB.getParent()->getParent(); 2224 BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc); 2225 if (!isColdCallSite(CB, CallerBFI)) 2226 return false; 2227 if (!llvm::is_contained(AllCallsCold, CallerFunc)) 2228 return false; 2229 } 2230 return true; 2231 } 2232 2233 static void changeCallSitesToColdCC(Function *F) { 2234 for (User *U : F->users()) { 2235 if (isa<BlockAddress>(U)) 2236 continue; 2237 cast<CallBase>(U)->setCallingConv(CallingConv::Cold); 2238 } 2239 } 2240 2241 // This function iterates over all the call instructions in the input Function 2242 // and checks that all call sites are in cold blocks and are allowed to use the 2243 // coldcc calling convention. 2244 static bool 2245 hasOnlyColdCalls(Function &F, 2246 function_ref<BlockFrequencyInfo &(Function &)> GetBFI) { 2247 for (BasicBlock &BB : F) { 2248 for (Instruction &I : BB) { 2249 if (CallInst *CI = dyn_cast<CallInst>(&I)) { 2250 // Skip over isline asm instructions since they aren't function calls. 2251 if (CI->isInlineAsm()) 2252 continue; 2253 Function *CalledFn = CI->getCalledFunction(); 2254 if (!CalledFn) 2255 return false; 2256 if (!CalledFn->hasLocalLinkage()) 2257 return false; 2258 // Skip over instrinsics since they won't remain as function calls. 2259 if (CalledFn->getIntrinsicID() != Intrinsic::not_intrinsic) 2260 continue; 2261 // Check if it's valid to use coldcc calling convention. 2262 if (!hasChangeableCC(CalledFn) || CalledFn->isVarArg() || 2263 CalledFn->hasAddressTaken()) 2264 return false; 2265 BlockFrequencyInfo &CallerBFI = GetBFI(F); 2266 if (!isColdCallSite(*CI, CallerBFI)) 2267 return false; 2268 } 2269 } 2270 } 2271 return true; 2272 } 2273 2274 static bool hasMustTailCallers(Function *F) { 2275 for (User *U : F->users()) { 2276 CallBase *CB = dyn_cast<CallBase>(U); 2277 if (!CB) { 2278 assert(isa<BlockAddress>(U) && 2279 "Expected either CallBase or BlockAddress"); 2280 continue; 2281 } 2282 if (CB->isMustTailCall()) 2283 return true; 2284 } 2285 return false; 2286 } 2287 2288 static bool hasInvokeCallers(Function *F) { 2289 for (User *U : F->users()) 2290 if (isa<InvokeInst>(U)) 2291 return true; 2292 return false; 2293 } 2294 2295 static void RemovePreallocated(Function *F) { 2296 RemoveAttribute(F, Attribute::Preallocated); 2297 2298 auto *M = F->getParent(); 2299 2300 IRBuilder<> Builder(M->getContext()); 2301 2302 // Cannot modify users() while iterating over it, so make a copy. 2303 SmallVector<User *, 4> PreallocatedCalls(F->users()); 2304 for (User *U : PreallocatedCalls) { 2305 CallBase *CB = dyn_cast<CallBase>(U); 2306 if (!CB) 2307 continue; 2308 2309 assert( 2310 !CB->isMustTailCall() && 2311 "Shouldn't call RemotePreallocated() on a musttail preallocated call"); 2312 // Create copy of call without "preallocated" operand bundle. 2313 SmallVector<OperandBundleDef, 1> OpBundles; 2314 CB->getOperandBundlesAsDefs(OpBundles); 2315 CallBase *PreallocatedSetup = nullptr; 2316 for (auto *It = OpBundles.begin(); It != OpBundles.end(); ++It) { 2317 if (It->getTag() == "preallocated") { 2318 PreallocatedSetup = cast<CallBase>(*It->input_begin()); 2319 OpBundles.erase(It); 2320 break; 2321 } 2322 } 2323 assert(PreallocatedSetup && "Did not find preallocated bundle"); 2324 uint64_t ArgCount = 2325 cast<ConstantInt>(PreallocatedSetup->getArgOperand(0))->getZExtValue(); 2326 2327 assert((isa<CallInst>(CB) || isa<InvokeInst>(CB)) && 2328 "Unknown indirect call type"); 2329 CallBase *NewCB = CallBase::Create(CB, OpBundles, CB); 2330 CB->replaceAllUsesWith(NewCB); 2331 NewCB->takeName(CB); 2332 CB->eraseFromParent(); 2333 2334 Builder.SetInsertPoint(PreallocatedSetup); 2335 auto *StackSave = 2336 Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stacksave)); 2337 2338 Builder.SetInsertPoint(NewCB->getNextNonDebugInstruction()); 2339 Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackrestore), 2340 StackSave); 2341 2342 // Replace @llvm.call.preallocated.arg() with alloca. 2343 // Cannot modify users() while iterating over it, so make a copy. 2344 // @llvm.call.preallocated.arg() can be called with the same index multiple 2345 // times. So for each @llvm.call.preallocated.arg(), we see if we have 2346 // already created a Value* for the index, and if not, create an alloca and 2347 // bitcast right after the @llvm.call.preallocated.setup() so that it 2348 // dominates all uses. 2349 SmallVector<Value *, 2> ArgAllocas(ArgCount); 2350 SmallVector<User *, 2> PreallocatedArgs(PreallocatedSetup->users()); 2351 for (auto *User : PreallocatedArgs) { 2352 auto *UseCall = cast<CallBase>(User); 2353 assert(UseCall->getCalledFunction()->getIntrinsicID() == 2354 Intrinsic::call_preallocated_arg && 2355 "preallocated token use was not a llvm.call.preallocated.arg"); 2356 uint64_t AllocArgIndex = 2357 cast<ConstantInt>(UseCall->getArgOperand(1))->getZExtValue(); 2358 Value *AllocaReplacement = ArgAllocas[AllocArgIndex]; 2359 if (!AllocaReplacement) { 2360 auto AddressSpace = UseCall->getType()->getPointerAddressSpace(); 2361 auto *ArgType = UseCall 2362 ->getAttribute(AttributeList::FunctionIndex, 2363 Attribute::Preallocated) 2364 .getValueAsType(); 2365 auto *InsertBefore = PreallocatedSetup->getNextNonDebugInstruction(); 2366 Builder.SetInsertPoint(InsertBefore); 2367 auto *Alloca = 2368 Builder.CreateAlloca(ArgType, AddressSpace, nullptr, "paarg"); 2369 auto *BitCast = Builder.CreateBitCast( 2370 Alloca, Type::getInt8PtrTy(M->getContext()), UseCall->getName()); 2371 ArgAllocas[AllocArgIndex] = BitCast; 2372 AllocaReplacement = BitCast; 2373 } 2374 2375 UseCall->replaceAllUsesWith(AllocaReplacement); 2376 UseCall->eraseFromParent(); 2377 } 2378 // Remove @llvm.call.preallocated.setup(). 2379 cast<Instruction>(PreallocatedSetup)->eraseFromParent(); 2380 } 2381 } 2382 2383 static bool 2384 OptimizeFunctions(Module &M, 2385 function_ref<TargetLibraryInfo &(Function &)> GetTLI, 2386 function_ref<TargetTransformInfo &(Function &)> GetTTI, 2387 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2388 function_ref<DominatorTree &(Function &)> LookupDomTree, 2389 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 2390 2391 bool Changed = false; 2392 2393 std::vector<Function *> AllCallsCold; 2394 for (Module::iterator FI = M.begin(), E = M.end(); FI != E;) { 2395 Function *F = &*FI++; 2396 if (hasOnlyColdCalls(*F, GetBFI)) 2397 AllCallsCold.push_back(F); 2398 } 2399 2400 // Optimize functions. 2401 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { 2402 Function *F = &*FI++; 2403 2404 // Don't perform global opt pass on naked functions; we don't want fast 2405 // calling conventions for naked functions. 2406 if (F->hasFnAttribute(Attribute::Naked)) 2407 continue; 2408 2409 // Functions without names cannot be referenced outside this module. 2410 if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage()) 2411 F->setLinkage(GlobalValue::InternalLinkage); 2412 2413 if (deleteIfDead(*F, NotDiscardableComdats)) { 2414 Changed = true; 2415 continue; 2416 } 2417 2418 // LLVM's definition of dominance allows instructions that are cyclic 2419 // in unreachable blocks, e.g.: 2420 // %pat = select i1 %condition, @global, i16* %pat 2421 // because any instruction dominates an instruction in a block that's 2422 // not reachable from entry. 2423 // So, remove unreachable blocks from the function, because a) there's 2424 // no point in analyzing them and b) GlobalOpt should otherwise grow 2425 // some more complicated logic to break these cycles. 2426 // Removing unreachable blocks might invalidate the dominator so we 2427 // recalculate it. 2428 if (!F->isDeclaration()) { 2429 if (removeUnreachableBlocks(*F)) { 2430 auto &DT = LookupDomTree(*F); 2431 DT.recalculate(*F); 2432 Changed = true; 2433 } 2434 } 2435 2436 Changed |= processGlobal(*F, GetTLI, LookupDomTree); 2437 2438 if (!F->hasLocalLinkage()) 2439 continue; 2440 2441 // If we have an inalloca parameter that we can safely remove the 2442 // inalloca attribute from, do so. This unlocks optimizations that 2443 // wouldn't be safe in the presence of inalloca. 2444 // FIXME: We should also hoist alloca affected by this to the entry 2445 // block if possible. 2446 if (F->getAttributes().hasAttrSomewhere(Attribute::InAlloca) && 2447 !F->hasAddressTaken() && !hasMustTailCallers(F)) { 2448 RemoveAttribute(F, Attribute::InAlloca); 2449 Changed = true; 2450 } 2451 2452 // FIXME: handle invokes 2453 // FIXME: handle musttail 2454 if (F->getAttributes().hasAttrSomewhere(Attribute::Preallocated)) { 2455 if (!F->hasAddressTaken() && !hasMustTailCallers(F) && 2456 !hasInvokeCallers(F)) { 2457 RemovePreallocated(F); 2458 Changed = true; 2459 } 2460 continue; 2461 } 2462 2463 if (hasChangeableCC(F) && !F->isVarArg() && !F->hasAddressTaken()) { 2464 NumInternalFunc++; 2465 TargetTransformInfo &TTI = GetTTI(*F); 2466 // Change the calling convention to coldcc if either stress testing is 2467 // enabled or the target would like to use coldcc on functions which are 2468 // cold at all call sites and the callers contain no other non coldcc 2469 // calls. 2470 if (EnableColdCCStressTest || 2471 (TTI.useColdCCForColdCall(*F) && 2472 isValidCandidateForColdCC(*F, GetBFI, AllCallsCold))) { 2473 F->setCallingConv(CallingConv::Cold); 2474 changeCallSitesToColdCC(F); 2475 Changed = true; 2476 NumColdCC++; 2477 } 2478 } 2479 2480 if (hasChangeableCC(F) && !F->isVarArg() && 2481 !F->hasAddressTaken()) { 2482 // If this function has a calling convention worth changing, is not a 2483 // varargs function, and is only called directly, promote it to use the 2484 // Fast calling convention. 2485 F->setCallingConv(CallingConv::Fast); 2486 ChangeCalleesToFastCall(F); 2487 ++NumFastCallFns; 2488 Changed = true; 2489 } 2490 2491 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && 2492 !F->hasAddressTaken()) { 2493 // The function is not used by a trampoline intrinsic, so it is safe 2494 // to remove the 'nest' attribute. 2495 RemoveAttribute(F, Attribute::Nest); 2496 ++NumNestRemoved; 2497 Changed = true; 2498 } 2499 } 2500 return Changed; 2501 } 2502 2503 static bool 2504 OptimizeGlobalVars(Module &M, 2505 function_ref<TargetLibraryInfo &(Function &)> GetTLI, 2506 function_ref<DominatorTree &(Function &)> LookupDomTree, 2507 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 2508 bool Changed = false; 2509 2510 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); 2511 GVI != E; ) { 2512 GlobalVariable *GV = &*GVI++; 2513 // Global variables without names cannot be referenced outside this module. 2514 if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage()) 2515 GV->setLinkage(GlobalValue::InternalLinkage); 2516 // Simplify the initializer. 2517 if (GV->hasInitializer()) 2518 if (auto *C = dyn_cast<Constant>(GV->getInitializer())) { 2519 auto &DL = M.getDataLayout(); 2520 // TLI is not used in the case of a Constant, so use default nullptr 2521 // for that optional parameter, since we don't have a Function to 2522 // provide GetTLI anyway. 2523 Constant *New = ConstantFoldConstant(C, DL, /*TLI*/ nullptr); 2524 if (New != C) 2525 GV->setInitializer(New); 2526 } 2527 2528 if (deleteIfDead(*GV, NotDiscardableComdats)) { 2529 Changed = true; 2530 continue; 2531 } 2532 2533 Changed |= processGlobal(*GV, GetTLI, LookupDomTree); 2534 } 2535 return Changed; 2536 } 2537 2538 /// Evaluate a piece of a constantexpr store into a global initializer. This 2539 /// returns 'Init' modified to reflect 'Val' stored into it. At this point, the 2540 /// GEP operands of Addr [0, OpNo) have been stepped into. 2541 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, 2542 ConstantExpr *Addr, unsigned OpNo) { 2543 // Base case of the recursion. 2544 if (OpNo == Addr->getNumOperands()) { 2545 assert(Val->getType() == Init->getType() && "Type mismatch!"); 2546 return Val; 2547 } 2548 2549 SmallVector<Constant*, 32> Elts; 2550 if (StructType *STy = dyn_cast<StructType>(Init->getType())) { 2551 // Break up the constant into its elements. 2552 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2553 Elts.push_back(Init->getAggregateElement(i)); 2554 2555 // Replace the element that we are supposed to. 2556 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); 2557 unsigned Idx = CU->getZExtValue(); 2558 assert(Idx < STy->getNumElements() && "Struct index out of range!"); 2559 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1); 2560 2561 // Return the modified struct. 2562 return ConstantStruct::get(STy, Elts); 2563 } 2564 2565 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); 2566 uint64_t NumElts; 2567 if (ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) 2568 NumElts = ATy->getNumElements(); 2569 else 2570 NumElts = cast<FixedVectorType>(Init->getType())->getNumElements(); 2571 2572 // Break up the array into elements. 2573 for (uint64_t i = 0, e = NumElts; i != e; ++i) 2574 Elts.push_back(Init->getAggregateElement(i)); 2575 2576 assert(CI->getZExtValue() < NumElts); 2577 Elts[CI->getZExtValue()] = 2578 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); 2579 2580 if (Init->getType()->isArrayTy()) 2581 return ConstantArray::get(cast<ArrayType>(Init->getType()), Elts); 2582 return ConstantVector::get(Elts); 2583 } 2584 2585 /// We have decided that Addr (which satisfies the predicate 2586 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. 2587 static void CommitValueTo(Constant *Val, Constant *Addr) { 2588 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 2589 assert(GV->hasInitializer()); 2590 GV->setInitializer(Val); 2591 return; 2592 } 2593 2594 ConstantExpr *CE = cast<ConstantExpr>(Addr); 2595 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2596 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2)); 2597 } 2598 2599 /// Given a map of address -> value, where addresses are expected to be some form 2600 /// of either a global or a constant GEP, set the initializer for the address to 2601 /// be the value. This performs mostly the same function as CommitValueTo() 2602 /// and EvaluateStoreInto() but is optimized to be more efficient for the common 2603 /// case where the set of addresses are GEPs sharing the same underlying global, 2604 /// processing the GEPs in batches rather than individually. 2605 /// 2606 /// To give an example, consider the following C++ code adapted from the clang 2607 /// regression tests: 2608 /// struct S { 2609 /// int n = 10; 2610 /// int m = 2 * n; 2611 /// S(int a) : n(a) {} 2612 /// }; 2613 /// 2614 /// template<typename T> 2615 /// struct U { 2616 /// T *r = &q; 2617 /// T q = 42; 2618 /// U *p = this; 2619 /// }; 2620 /// 2621 /// U<S> e; 2622 /// 2623 /// The global static constructor for 'e' will need to initialize 'r' and 'p' of 2624 /// the outer struct, while also initializing the inner 'q' structs 'n' and 'm' 2625 /// members. This batch algorithm will simply use general CommitValueTo() method 2626 /// to handle the complex nested S struct initialization of 'q', before 2627 /// processing the outermost members in a single batch. Using CommitValueTo() to 2628 /// handle member in the outer struct is inefficient when the struct/array is 2629 /// very large as we end up creating and destroy constant arrays for each 2630 /// initialization. 2631 /// For the above case, we expect the following IR to be generated: 2632 /// 2633 /// %struct.U = type { %struct.S*, %struct.S, %struct.U* } 2634 /// %struct.S = type { i32, i32 } 2635 /// @e = global %struct.U { %struct.S* gep inbounds (%struct.U, %struct.U* @e, 2636 /// i64 0, i32 1), 2637 /// %struct.S { i32 42, i32 84 }, %struct.U* @e } 2638 /// The %struct.S { i32 42, i32 84 } inner initializer is treated as a complex 2639 /// constant expression, while the other two elements of @e are "simple". 2640 static void BatchCommitValueTo(const DenseMap<Constant*, Constant*> &Mem) { 2641 SmallVector<std::pair<GlobalVariable*, Constant*>, 32> GVs; 2642 SmallVector<std::pair<ConstantExpr*, Constant*>, 32> ComplexCEs; 2643 SmallVector<std::pair<ConstantExpr*, Constant*>, 32> SimpleCEs; 2644 SimpleCEs.reserve(Mem.size()); 2645 2646 for (const auto &I : Mem) { 2647 if (auto *GV = dyn_cast<GlobalVariable>(I.first)) { 2648 GVs.push_back(std::make_pair(GV, I.second)); 2649 } else { 2650 ConstantExpr *GEP = cast<ConstantExpr>(I.first); 2651 // We don't handle the deeply recursive case using the batch method. 2652 if (GEP->getNumOperands() > 3) 2653 ComplexCEs.push_back(std::make_pair(GEP, I.second)); 2654 else 2655 SimpleCEs.push_back(std::make_pair(GEP, I.second)); 2656 } 2657 } 2658 2659 // The algorithm below doesn't handle cases like nested structs, so use the 2660 // slower fully general method if we have to. 2661 for (auto ComplexCE : ComplexCEs) 2662 CommitValueTo(ComplexCE.second, ComplexCE.first); 2663 2664 for (auto GVPair : GVs) { 2665 assert(GVPair.first->hasInitializer()); 2666 GVPair.first->setInitializer(GVPair.second); 2667 } 2668 2669 if (SimpleCEs.empty()) 2670 return; 2671 2672 // We cache a single global's initializer elements in the case where the 2673 // subsequent address/val pair uses the same one. This avoids throwing away and 2674 // rebuilding the constant struct/vector/array just because one element is 2675 // modified at a time. 2676 SmallVector<Constant *, 32> Elts; 2677 Elts.reserve(SimpleCEs.size()); 2678 GlobalVariable *CurrentGV = nullptr; 2679 2680 auto commitAndSetupCache = [&](GlobalVariable *GV, bool Update) { 2681 Constant *Init = GV->getInitializer(); 2682 Type *Ty = Init->getType(); 2683 if (Update) { 2684 if (CurrentGV) { 2685 assert(CurrentGV && "Expected a GV to commit to!"); 2686 Type *CurrentInitTy = CurrentGV->getInitializer()->getType(); 2687 // We have a valid cache that needs to be committed. 2688 if (StructType *STy = dyn_cast<StructType>(CurrentInitTy)) 2689 CurrentGV->setInitializer(ConstantStruct::get(STy, Elts)); 2690 else if (ArrayType *ArrTy = dyn_cast<ArrayType>(CurrentInitTy)) 2691 CurrentGV->setInitializer(ConstantArray::get(ArrTy, Elts)); 2692 else 2693 CurrentGV->setInitializer(ConstantVector::get(Elts)); 2694 } 2695 if (CurrentGV == GV) 2696 return; 2697 // Need to clear and set up cache for new initializer. 2698 CurrentGV = GV; 2699 Elts.clear(); 2700 unsigned NumElts; 2701 if (auto *STy = dyn_cast<StructType>(Ty)) 2702 NumElts = STy->getNumElements(); 2703 else if (auto *ATy = dyn_cast<ArrayType>(Ty)) 2704 NumElts = ATy->getNumElements(); 2705 else 2706 NumElts = cast<FixedVectorType>(Ty)->getNumElements(); 2707 for (unsigned i = 0, e = NumElts; i != e; ++i) 2708 Elts.push_back(Init->getAggregateElement(i)); 2709 } 2710 }; 2711 2712 for (auto CEPair : SimpleCEs) { 2713 ConstantExpr *GEP = CEPair.first; 2714 Constant *Val = CEPair.second; 2715 2716 GlobalVariable *GV = cast<GlobalVariable>(GEP->getOperand(0)); 2717 commitAndSetupCache(GV, GV != CurrentGV); 2718 ConstantInt *CI = cast<ConstantInt>(GEP->getOperand(2)); 2719 Elts[CI->getZExtValue()] = Val; 2720 } 2721 // The last initializer in the list needs to be committed, others 2722 // will be committed on a new initializer being processed. 2723 commitAndSetupCache(CurrentGV, true); 2724 } 2725 2726 /// Evaluate static constructors in the function, if we can. Return true if we 2727 /// can, false otherwise. 2728 static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL, 2729 TargetLibraryInfo *TLI) { 2730 // Call the function. 2731 Evaluator Eval(DL, TLI); 2732 Constant *RetValDummy; 2733 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy, 2734 SmallVector<Constant*, 0>()); 2735 2736 if (EvalSuccess) { 2737 ++NumCtorsEvaluated; 2738 2739 // We succeeded at evaluation: commit the result. 2740 LLVM_DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" 2741 << F->getName() << "' to " 2742 << Eval.getMutatedMemory().size() << " stores.\n"); 2743 BatchCommitValueTo(Eval.getMutatedMemory()); 2744 for (GlobalVariable *GV : Eval.getInvariants()) 2745 GV->setConstant(true); 2746 } 2747 2748 return EvalSuccess; 2749 } 2750 2751 static int compareNames(Constant *const *A, Constant *const *B) { 2752 Value *AStripped = (*A)->stripPointerCasts(); 2753 Value *BStripped = (*B)->stripPointerCasts(); 2754 return AStripped->getName().compare(BStripped->getName()); 2755 } 2756 2757 static void setUsedInitializer(GlobalVariable &V, 2758 const SmallPtrSetImpl<GlobalValue *> &Init) { 2759 if (Init.empty()) { 2760 V.eraseFromParent(); 2761 return; 2762 } 2763 2764 // Type of pointer to the array of pointers. 2765 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0); 2766 2767 SmallVector<Constant *, 8> UsedArray; 2768 for (GlobalValue *GV : Init) { 2769 Constant *Cast 2770 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy); 2771 UsedArray.push_back(Cast); 2772 } 2773 // Sort to get deterministic order. 2774 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames); 2775 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size()); 2776 2777 Module *M = V.getParent(); 2778 V.removeFromParent(); 2779 GlobalVariable *NV = 2780 new GlobalVariable(*M, ATy, false, GlobalValue::AppendingLinkage, 2781 ConstantArray::get(ATy, UsedArray), ""); 2782 NV->takeName(&V); 2783 NV->setSection("llvm.metadata"); 2784 delete &V; 2785 } 2786 2787 namespace { 2788 2789 /// An easy to access representation of llvm.used and llvm.compiler.used. 2790 class LLVMUsed { 2791 SmallPtrSet<GlobalValue *, 8> Used; 2792 SmallPtrSet<GlobalValue *, 8> CompilerUsed; 2793 GlobalVariable *UsedV; 2794 GlobalVariable *CompilerUsedV; 2795 2796 public: 2797 LLVMUsed(Module &M) { 2798 UsedV = collectUsedGlobalVariables(M, Used, false); 2799 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true); 2800 } 2801 2802 using iterator = SmallPtrSet<GlobalValue *, 8>::iterator; 2803 using used_iterator_range = iterator_range<iterator>; 2804 2805 iterator usedBegin() { return Used.begin(); } 2806 iterator usedEnd() { return Used.end(); } 2807 2808 used_iterator_range used() { 2809 return used_iterator_range(usedBegin(), usedEnd()); 2810 } 2811 2812 iterator compilerUsedBegin() { return CompilerUsed.begin(); } 2813 iterator compilerUsedEnd() { return CompilerUsed.end(); } 2814 2815 used_iterator_range compilerUsed() { 2816 return used_iterator_range(compilerUsedBegin(), compilerUsedEnd()); 2817 } 2818 2819 bool usedCount(GlobalValue *GV) const { return Used.count(GV); } 2820 2821 bool compilerUsedCount(GlobalValue *GV) const { 2822 return CompilerUsed.count(GV); 2823 } 2824 2825 bool usedErase(GlobalValue *GV) { return Used.erase(GV); } 2826 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); } 2827 bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; } 2828 2829 bool compilerUsedInsert(GlobalValue *GV) { 2830 return CompilerUsed.insert(GV).second; 2831 } 2832 2833 void syncVariablesAndSets() { 2834 if (UsedV) 2835 setUsedInitializer(*UsedV, Used); 2836 if (CompilerUsedV) 2837 setUsedInitializer(*CompilerUsedV, CompilerUsed); 2838 } 2839 }; 2840 2841 } // end anonymous namespace 2842 2843 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) { 2844 if (GA.use_empty()) // No use at all. 2845 return false; 2846 2847 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && 2848 "We should have removed the duplicated " 2849 "element from llvm.compiler.used"); 2850 if (!GA.hasOneUse()) 2851 // Strictly more than one use. So at least one is not in llvm.used and 2852 // llvm.compiler.used. 2853 return true; 2854 2855 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used. 2856 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA); 2857 } 2858 2859 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V, 2860 const LLVMUsed &U) { 2861 unsigned N = 2; 2862 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) && 2863 "We should have removed the duplicated " 2864 "element from llvm.compiler.used"); 2865 if (U.usedCount(&V) || U.compilerUsedCount(&V)) 2866 ++N; 2867 return V.hasNUsesOrMore(N); 2868 } 2869 2870 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) { 2871 if (!GA.hasLocalLinkage()) 2872 return true; 2873 2874 return U.usedCount(&GA) || U.compilerUsedCount(&GA); 2875 } 2876 2877 static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U, 2878 bool &RenameTarget) { 2879 RenameTarget = false; 2880 bool Ret = false; 2881 if (hasUseOtherThanLLVMUsed(GA, U)) 2882 Ret = true; 2883 2884 // If the alias is externally visible, we may still be able to simplify it. 2885 if (!mayHaveOtherReferences(GA, U)) 2886 return Ret; 2887 2888 // If the aliasee has internal linkage, give it the name and linkage 2889 // of the alias, and delete the alias. This turns: 2890 // define internal ... @f(...) 2891 // @a = alias ... @f 2892 // into: 2893 // define ... @a(...) 2894 Constant *Aliasee = GA.getAliasee(); 2895 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 2896 if (!Target->hasLocalLinkage()) 2897 return Ret; 2898 2899 // Do not perform the transform if multiple aliases potentially target the 2900 // aliasee. This check also ensures that it is safe to replace the section 2901 // and other attributes of the aliasee with those of the alias. 2902 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U)) 2903 return Ret; 2904 2905 RenameTarget = true; 2906 return true; 2907 } 2908 2909 static bool 2910 OptimizeGlobalAliases(Module &M, 2911 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 2912 bool Changed = false; 2913 LLVMUsed Used(M); 2914 2915 for (GlobalValue *GV : Used.used()) 2916 Used.compilerUsedErase(GV); 2917 2918 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); 2919 I != E;) { 2920 GlobalAlias *J = &*I++; 2921 2922 // Aliases without names cannot be referenced outside this module. 2923 if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage()) 2924 J->setLinkage(GlobalValue::InternalLinkage); 2925 2926 if (deleteIfDead(*J, NotDiscardableComdats)) { 2927 Changed = true; 2928 continue; 2929 } 2930 2931 // If the alias can change at link time, nothing can be done - bail out. 2932 if (J->isInterposable()) 2933 continue; 2934 2935 Constant *Aliasee = J->getAliasee(); 2936 GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts()); 2937 // We can't trivially replace the alias with the aliasee if the aliasee is 2938 // non-trivial in some way. 2939 // TODO: Try to handle non-zero GEPs of local aliasees. 2940 if (!Target) 2941 continue; 2942 Target->removeDeadConstantUsers(); 2943 2944 // Make all users of the alias use the aliasee instead. 2945 bool RenameTarget; 2946 if (!hasUsesToReplace(*J, Used, RenameTarget)) 2947 continue; 2948 2949 J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType())); 2950 ++NumAliasesResolved; 2951 Changed = true; 2952 2953 if (RenameTarget) { 2954 // Give the aliasee the name, linkage and other attributes of the alias. 2955 Target->takeName(&*J); 2956 Target->setLinkage(J->getLinkage()); 2957 Target->setDSOLocal(J->isDSOLocal()); 2958 Target->setVisibility(J->getVisibility()); 2959 Target->setDLLStorageClass(J->getDLLStorageClass()); 2960 2961 if (Used.usedErase(&*J)) 2962 Used.usedInsert(Target); 2963 2964 if (Used.compilerUsedErase(&*J)) 2965 Used.compilerUsedInsert(Target); 2966 } else if (mayHaveOtherReferences(*J, Used)) 2967 continue; 2968 2969 // Delete the alias. 2970 M.getAliasList().erase(J); 2971 ++NumAliasesRemoved; 2972 Changed = true; 2973 } 2974 2975 Used.syncVariablesAndSets(); 2976 2977 return Changed; 2978 } 2979 2980 static Function * 2981 FindCXAAtExit(Module &M, function_ref<TargetLibraryInfo &(Function &)> GetTLI) { 2982 // Hack to get a default TLI before we have actual Function. 2983 auto FuncIter = M.begin(); 2984 if (FuncIter == M.end()) 2985 return nullptr; 2986 auto *TLI = &GetTLI(*FuncIter); 2987 2988 LibFunc F = LibFunc_cxa_atexit; 2989 if (!TLI->has(F)) 2990 return nullptr; 2991 2992 Function *Fn = M.getFunction(TLI->getName(F)); 2993 if (!Fn) 2994 return nullptr; 2995 2996 // Now get the actual TLI for Fn. 2997 TLI = &GetTLI(*Fn); 2998 2999 // Make sure that the function has the correct prototype. 3000 if (!TLI->getLibFunc(*Fn, F) || F != LibFunc_cxa_atexit) 3001 return nullptr; 3002 3003 return Fn; 3004 } 3005 3006 /// Returns whether the given function is an empty C++ destructor and can 3007 /// therefore be eliminated. 3008 /// Note that we assume that other optimization passes have already simplified 3009 /// the code so we simply check for 'ret'. 3010 static bool cxxDtorIsEmpty(const Function &Fn) { 3011 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and 3012 // nounwind, but that doesn't seem worth doing. 3013 if (Fn.isDeclaration()) 3014 return false; 3015 3016 for (auto &I : Fn.getEntryBlock()) { 3017 if (isa<DbgInfoIntrinsic>(I)) 3018 continue; 3019 if (isa<ReturnInst>(I)) 3020 return true; 3021 break; 3022 } 3023 return false; 3024 } 3025 3026 static bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { 3027 /// Itanium C++ ABI p3.3.5: 3028 /// 3029 /// After constructing a global (or local static) object, that will require 3030 /// destruction on exit, a termination function is registered as follows: 3031 /// 3032 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); 3033 /// 3034 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the 3035 /// call f(p) when DSO d is unloaded, before all such termination calls 3036 /// registered before this one. It returns zero if registration is 3037 /// successful, nonzero on failure. 3038 3039 // This pass will look for calls to __cxa_atexit where the function is trivial 3040 // and remove them. 3041 bool Changed = false; 3042 3043 for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end(); 3044 I != E;) { 3045 // We're only interested in calls. Theoretically, we could handle invoke 3046 // instructions as well, but neither llvm-gcc nor clang generate invokes 3047 // to __cxa_atexit. 3048 CallInst *CI = dyn_cast<CallInst>(*I++); 3049 if (!CI) 3050 continue; 3051 3052 Function *DtorFn = 3053 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); 3054 if (!DtorFn || !cxxDtorIsEmpty(*DtorFn)) 3055 continue; 3056 3057 // Just remove the call. 3058 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); 3059 CI->eraseFromParent(); 3060 3061 ++NumCXXDtorsRemoved; 3062 3063 Changed |= true; 3064 } 3065 3066 return Changed; 3067 } 3068 3069 static bool optimizeGlobalsInModule( 3070 Module &M, const DataLayout &DL, 3071 function_ref<TargetLibraryInfo &(Function &)> GetTLI, 3072 function_ref<TargetTransformInfo &(Function &)> GetTTI, 3073 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 3074 function_ref<DominatorTree &(Function &)> LookupDomTree) { 3075 SmallPtrSet<const Comdat *, 8> NotDiscardableComdats; 3076 bool Changed = false; 3077 bool LocalChange = true; 3078 while (LocalChange) { 3079 LocalChange = false; 3080 3081 NotDiscardableComdats.clear(); 3082 for (const GlobalVariable &GV : M.globals()) 3083 if (const Comdat *C = GV.getComdat()) 3084 if (!GV.isDiscardableIfUnused() || !GV.use_empty()) 3085 NotDiscardableComdats.insert(C); 3086 for (Function &F : M) 3087 if (const Comdat *C = F.getComdat()) 3088 if (!F.isDefTriviallyDead()) 3089 NotDiscardableComdats.insert(C); 3090 for (GlobalAlias &GA : M.aliases()) 3091 if (const Comdat *C = GA.getComdat()) 3092 if (!GA.isDiscardableIfUnused() || !GA.use_empty()) 3093 NotDiscardableComdats.insert(C); 3094 3095 // Delete functions that are trivially dead, ccc -> fastcc 3096 LocalChange |= OptimizeFunctions(M, GetTLI, GetTTI, GetBFI, LookupDomTree, 3097 NotDiscardableComdats); 3098 3099 // Optimize global_ctors list. 3100 LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) { 3101 return EvaluateStaticConstructor(F, DL, &GetTLI(*F)); 3102 }); 3103 3104 // Optimize non-address-taken globals. 3105 LocalChange |= 3106 OptimizeGlobalVars(M, GetTLI, LookupDomTree, NotDiscardableComdats); 3107 3108 // Resolve aliases, when possible. 3109 LocalChange |= OptimizeGlobalAliases(M, NotDiscardableComdats); 3110 3111 // Try to remove trivial global destructors if they are not removed 3112 // already. 3113 Function *CXAAtExitFn = FindCXAAtExit(M, GetTLI); 3114 if (CXAAtExitFn) 3115 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); 3116 3117 Changed |= LocalChange; 3118 } 3119 3120 // TODO: Move all global ctors functions to the end of the module for code 3121 // layout. 3122 3123 return Changed; 3124 } 3125 3126 PreservedAnalyses GlobalOptPass::run(Module &M, ModuleAnalysisManager &AM) { 3127 auto &DL = M.getDataLayout(); 3128 auto &FAM = 3129 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 3130 auto LookupDomTree = [&FAM](Function &F) -> DominatorTree &{ 3131 return FAM.getResult<DominatorTreeAnalysis>(F); 3132 }; 3133 auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & { 3134 return FAM.getResult<TargetLibraryAnalysis>(F); 3135 }; 3136 auto GetTTI = [&FAM](Function &F) -> TargetTransformInfo & { 3137 return FAM.getResult<TargetIRAnalysis>(F); 3138 }; 3139 3140 auto GetBFI = [&FAM](Function &F) -> BlockFrequencyInfo & { 3141 return FAM.getResult<BlockFrequencyAnalysis>(F); 3142 }; 3143 3144 if (!optimizeGlobalsInModule(M, DL, GetTLI, GetTTI, GetBFI, LookupDomTree)) 3145 return PreservedAnalyses::all(); 3146 return PreservedAnalyses::none(); 3147 } 3148 3149 namespace { 3150 3151 struct GlobalOptLegacyPass : public ModulePass { 3152 static char ID; // Pass identification, replacement for typeid 3153 3154 GlobalOptLegacyPass() : ModulePass(ID) { 3155 initializeGlobalOptLegacyPassPass(*PassRegistry::getPassRegistry()); 3156 } 3157 3158 bool runOnModule(Module &M) override { 3159 if (skipModule(M)) 3160 return false; 3161 3162 auto &DL = M.getDataLayout(); 3163 auto LookupDomTree = [this](Function &F) -> DominatorTree & { 3164 return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 3165 }; 3166 auto GetTLI = [this](Function &F) -> TargetLibraryInfo & { 3167 return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 3168 }; 3169 auto GetTTI = [this](Function &F) -> TargetTransformInfo & { 3170 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 3171 }; 3172 3173 auto GetBFI = [this](Function &F) -> BlockFrequencyInfo & { 3174 return this->getAnalysis<BlockFrequencyInfoWrapperPass>(F).getBFI(); 3175 }; 3176 3177 return optimizeGlobalsInModule(M, DL, GetTLI, GetTTI, GetBFI, 3178 LookupDomTree); 3179 } 3180 3181 void getAnalysisUsage(AnalysisUsage &AU) const override { 3182 AU.addRequired<TargetLibraryInfoWrapperPass>(); 3183 AU.addRequired<TargetTransformInfoWrapperPass>(); 3184 AU.addRequired<DominatorTreeWrapperPass>(); 3185 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 3186 } 3187 }; 3188 3189 } // end anonymous namespace 3190 3191 char GlobalOptLegacyPass::ID = 0; 3192 3193 INITIALIZE_PASS_BEGIN(GlobalOptLegacyPass, "globalopt", 3194 "Global Variable Optimizer", false, false) 3195 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 3196 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 3197 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 3198 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 3199 INITIALIZE_PASS_END(GlobalOptLegacyPass, "globalopt", 3200 "Global Variable Optimizer", false, false) 3201 3202 ModulePass *llvm::createGlobalOptimizerPass() { 3203 return new GlobalOptLegacyPass(); 3204 } 3205