1 //===- WholeProgramDevirt.cpp - Whole program virtual call optimization ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements whole program optimization of virtual calls in cases 10 // where we know (via !type metadata) that the list of callees is fixed. This 11 // includes the following: 12 // - Single implementation devirtualization: if a virtual call has a single 13 // possible callee, replace all calls with a direct call to that callee. 14 // - Virtual constant propagation: if the virtual function's return type is an 15 // integer <=64 bits and all possible callees are readnone, for each class and 16 // each list of constant arguments: evaluate the function, store the return 17 // value alongside the virtual table, and rewrite each virtual call as a load 18 // from the virtual table. 19 // - Uniform return value optimization: if the conditions for virtual constant 20 // propagation hold and each function returns the same constant value, replace 21 // each virtual call with that constant. 22 // - Unique return value optimization for i1 return values: if the conditions 23 // for virtual constant propagation hold and a single vtable's function 24 // returns 0, or a single vtable's function returns 1, replace each virtual 25 // call with a comparison of the vptr against that vtable's address. 26 // 27 // This pass is intended to be used during the regular and thin LTO pipelines: 28 // 29 // During regular LTO, the pass determines the best optimization for each 30 // virtual call and applies the resolutions directly to virtual calls that are 31 // eligible for virtual call optimization (i.e. calls that use either of the 32 // llvm.assume(llvm.type.test) or llvm.type.checked.load intrinsics). 33 // 34 // During hybrid Regular/ThinLTO, the pass operates in two phases: 35 // - Export phase: this is run during the thin link over a single merged module 36 // that contains all vtables with !type metadata that participate in the link. 37 // The pass computes a resolution for each virtual call and stores it in the 38 // type identifier summary. 39 // - Import phase: this is run during the thin backends over the individual 40 // modules. The pass applies the resolutions previously computed during the 41 // import phase to each eligible virtual call. 42 // 43 // During ThinLTO, the pass operates in two phases: 44 // - Export phase: this is run during the thin link over the index which 45 // contains a summary of all vtables with !type metadata that participate in 46 // the link. It computes a resolution for each virtual call and stores it in 47 // the type identifier summary. Only single implementation devirtualization 48 // is supported. 49 // - Import phase: (same as with hybrid case above). 50 // 51 //===----------------------------------------------------------------------===// 52 53 #include "llvm/Transforms/IPO/WholeProgramDevirt.h" 54 #include "llvm/ADT/ArrayRef.h" 55 #include "llvm/ADT/DenseMap.h" 56 #include "llvm/ADT/DenseMapInfo.h" 57 #include "llvm/ADT/DenseSet.h" 58 #include "llvm/ADT/MapVector.h" 59 #include "llvm/ADT/SmallVector.h" 60 #include "llvm/ADT/Triple.h" 61 #include "llvm/ADT/iterator_range.h" 62 #include "llvm/Analysis/AssumptionCache.h" 63 #include "llvm/Analysis/BasicAliasAnalysis.h" 64 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 65 #include "llvm/Analysis/TypeMetadataUtils.h" 66 #include "llvm/Bitcode/BitcodeReader.h" 67 #include "llvm/Bitcode/BitcodeWriter.h" 68 #include "llvm/IR/Constants.h" 69 #include "llvm/IR/DataLayout.h" 70 #include "llvm/IR/DebugLoc.h" 71 #include "llvm/IR/DerivedTypes.h" 72 #include "llvm/IR/Dominators.h" 73 #include "llvm/IR/Function.h" 74 #include "llvm/IR/GlobalAlias.h" 75 #include "llvm/IR/GlobalVariable.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/InstrTypes.h" 78 #include "llvm/IR/Instruction.h" 79 #include "llvm/IR/Instructions.h" 80 #include "llvm/IR/Intrinsics.h" 81 #include "llvm/IR/LLVMContext.h" 82 #include "llvm/IR/Metadata.h" 83 #include "llvm/IR/Module.h" 84 #include "llvm/IR/ModuleSummaryIndexYAML.h" 85 #include "llvm/InitializePasses.h" 86 #include "llvm/Pass.h" 87 #include "llvm/PassRegistry.h" 88 #include "llvm/Support/Casting.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/Errc.h" 91 #include "llvm/Support/Error.h" 92 #include "llvm/Support/FileSystem.h" 93 #include "llvm/Support/GlobPattern.h" 94 #include "llvm/Support/MathExtras.h" 95 #include "llvm/Transforms/IPO.h" 96 #include "llvm/Transforms/IPO/FunctionAttrs.h" 97 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 98 #include "llvm/Transforms/Utils/Evaluator.h" 99 #include <algorithm> 100 #include <cstddef> 101 #include <map> 102 #include <set> 103 #include <string> 104 105 using namespace llvm; 106 using namespace wholeprogramdevirt; 107 108 #define DEBUG_TYPE "wholeprogramdevirt" 109 110 static cl::opt<PassSummaryAction> ClSummaryAction( 111 "wholeprogramdevirt-summary-action", 112 cl::desc("What to do with the summary when running this pass"), 113 cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"), 114 clEnumValN(PassSummaryAction::Import, "import", 115 "Import typeid resolutions from summary and globals"), 116 clEnumValN(PassSummaryAction::Export, "export", 117 "Export typeid resolutions to summary and globals")), 118 cl::Hidden); 119 120 static cl::opt<std::string> ClReadSummary( 121 "wholeprogramdevirt-read-summary", 122 cl::desc( 123 "Read summary from given bitcode or YAML file before running pass"), 124 cl::Hidden); 125 126 static cl::opt<std::string> ClWriteSummary( 127 "wholeprogramdevirt-write-summary", 128 cl::desc("Write summary to given bitcode or YAML file after running pass. " 129 "Output file format is deduced from extension: *.bc means writing " 130 "bitcode, otherwise YAML"), 131 cl::Hidden); 132 133 static cl::opt<unsigned> 134 ClThreshold("wholeprogramdevirt-branch-funnel-threshold", cl::Hidden, 135 cl::init(10), cl::ZeroOrMore, 136 cl::desc("Maximum number of call targets per " 137 "call site to enable branch funnels")); 138 139 static cl::opt<bool> 140 PrintSummaryDevirt("wholeprogramdevirt-print-index-based", cl::Hidden, 141 cl::init(false), cl::ZeroOrMore, 142 cl::desc("Print index-based devirtualization messages")); 143 144 /// Provide a way to force enable whole program visibility in tests. 145 /// This is needed to support legacy tests that don't contain 146 /// !vcall_visibility metadata (the mere presense of type tests 147 /// previously implied hidden visibility). 148 static cl::opt<bool> 149 WholeProgramVisibility("whole-program-visibility", cl::init(false), 150 cl::Hidden, cl::ZeroOrMore, 151 cl::desc("Enable whole program visibility")); 152 153 /// Provide a way to force disable whole program for debugging or workarounds, 154 /// when enabled via the linker. 155 static cl::opt<bool> DisableWholeProgramVisibility( 156 "disable-whole-program-visibility", cl::init(false), cl::Hidden, 157 cl::ZeroOrMore, 158 cl::desc("Disable whole program visibility (overrides enabling options)")); 159 160 /// Provide way to prevent certain function from being devirtualized 161 static cl::list<std::string> 162 SkipFunctionNames("wholeprogramdevirt-skip", 163 cl::desc("Prevent function(s) from being devirtualized"), 164 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated); 165 166 /// Mechanism to add runtime checking of devirtualization decisions, trapping on 167 /// any that are not correct. Useful for debugging undefined behavior leading to 168 /// failures with WPD. 169 static cl::opt<bool> 170 CheckDevirt("wholeprogramdevirt-check", cl::init(false), cl::Hidden, 171 cl::ZeroOrMore, 172 cl::desc("Add code to trap on incorrect devirtualizations")); 173 174 namespace { 175 struct PatternList { 176 std::vector<GlobPattern> Patterns; 177 template <class T> void init(const T &StringList) { 178 for (const auto &S : StringList) 179 if (Expected<GlobPattern> Pat = GlobPattern::create(S)) 180 Patterns.push_back(std::move(*Pat)); 181 } 182 bool match(StringRef S) { 183 for (const GlobPattern &P : Patterns) 184 if (P.match(S)) 185 return true; 186 return false; 187 } 188 }; 189 } // namespace 190 191 // Find the minimum offset that we may store a value of size Size bits at. If 192 // IsAfter is set, look for an offset before the object, otherwise look for an 193 // offset after the object. 194 uint64_t 195 wholeprogramdevirt::findLowestOffset(ArrayRef<VirtualCallTarget> Targets, 196 bool IsAfter, uint64_t Size) { 197 // Find a minimum offset taking into account only vtable sizes. 198 uint64_t MinByte = 0; 199 for (const VirtualCallTarget &Target : Targets) { 200 if (IsAfter) 201 MinByte = std::max(MinByte, Target.minAfterBytes()); 202 else 203 MinByte = std::max(MinByte, Target.minBeforeBytes()); 204 } 205 206 // Build a vector of arrays of bytes covering, for each target, a slice of the 207 // used region (see AccumBitVector::BytesUsed in 208 // llvm/Transforms/IPO/WholeProgramDevirt.h) starting at MinByte. Effectively, 209 // this aligns the used regions to start at MinByte. 210 // 211 // In this example, A, B and C are vtables, # is a byte already allocated for 212 // a virtual function pointer, AAAA... (etc.) are the used regions for the 213 // vtables and Offset(X) is the value computed for the Offset variable below 214 // for X. 215 // 216 // Offset(A) 217 // | | 218 // |MinByte 219 // A: ################AAAAAAAA|AAAAAAAA 220 // B: ########BBBBBBBBBBBBBBBB|BBBB 221 // C: ########################|CCCCCCCCCCCCCCCC 222 // | Offset(B) | 223 // 224 // This code produces the slices of A, B and C that appear after the divider 225 // at MinByte. 226 std::vector<ArrayRef<uint8_t>> Used; 227 for (const VirtualCallTarget &Target : Targets) { 228 ArrayRef<uint8_t> VTUsed = IsAfter ? Target.TM->Bits->After.BytesUsed 229 : Target.TM->Bits->Before.BytesUsed; 230 uint64_t Offset = IsAfter ? MinByte - Target.minAfterBytes() 231 : MinByte - Target.minBeforeBytes(); 232 233 // Disregard used regions that are smaller than Offset. These are 234 // effectively all-free regions that do not need to be checked. 235 if (VTUsed.size() > Offset) 236 Used.push_back(VTUsed.slice(Offset)); 237 } 238 239 if (Size == 1) { 240 // Find a free bit in each member of Used. 241 for (unsigned I = 0;; ++I) { 242 uint8_t BitsUsed = 0; 243 for (auto &&B : Used) 244 if (I < B.size()) 245 BitsUsed |= B[I]; 246 if (BitsUsed != 0xff) 247 return (MinByte + I) * 8 + 248 countTrailingZeros(uint8_t(~BitsUsed), ZB_Undefined); 249 } 250 } else { 251 // Find a free (Size/8) byte region in each member of Used. 252 // FIXME: see if alignment helps. 253 for (unsigned I = 0;; ++I) { 254 for (auto &&B : Used) { 255 unsigned Byte = 0; 256 while ((I + Byte) < B.size() && Byte < (Size / 8)) { 257 if (B[I + Byte]) 258 goto NextI; 259 ++Byte; 260 } 261 } 262 return (MinByte + I) * 8; 263 NextI:; 264 } 265 } 266 } 267 268 void wholeprogramdevirt::setBeforeReturnValues( 269 MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocBefore, 270 unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) { 271 if (BitWidth == 1) 272 OffsetByte = -(AllocBefore / 8 + 1); 273 else 274 OffsetByte = -((AllocBefore + 7) / 8 + (BitWidth + 7) / 8); 275 OffsetBit = AllocBefore % 8; 276 277 for (VirtualCallTarget &Target : Targets) { 278 if (BitWidth == 1) 279 Target.setBeforeBit(AllocBefore); 280 else 281 Target.setBeforeBytes(AllocBefore, (BitWidth + 7) / 8); 282 } 283 } 284 285 void wholeprogramdevirt::setAfterReturnValues( 286 MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocAfter, 287 unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) { 288 if (BitWidth == 1) 289 OffsetByte = AllocAfter / 8; 290 else 291 OffsetByte = (AllocAfter + 7) / 8; 292 OffsetBit = AllocAfter % 8; 293 294 for (VirtualCallTarget &Target : Targets) { 295 if (BitWidth == 1) 296 Target.setAfterBit(AllocAfter); 297 else 298 Target.setAfterBytes(AllocAfter, (BitWidth + 7) / 8); 299 } 300 } 301 302 VirtualCallTarget::VirtualCallTarget(Function *Fn, const TypeMemberInfo *TM) 303 : Fn(Fn), TM(TM), 304 IsBigEndian(Fn->getParent()->getDataLayout().isBigEndian()), WasDevirt(false) {} 305 306 namespace { 307 308 // A slot in a set of virtual tables. The TypeID identifies the set of virtual 309 // tables, and the ByteOffset is the offset in bytes from the address point to 310 // the virtual function pointer. 311 struct VTableSlot { 312 Metadata *TypeID; 313 uint64_t ByteOffset; 314 }; 315 316 } // end anonymous namespace 317 318 namespace llvm { 319 320 template <> struct DenseMapInfo<VTableSlot> { 321 static VTableSlot getEmptyKey() { 322 return {DenseMapInfo<Metadata *>::getEmptyKey(), 323 DenseMapInfo<uint64_t>::getEmptyKey()}; 324 } 325 static VTableSlot getTombstoneKey() { 326 return {DenseMapInfo<Metadata *>::getTombstoneKey(), 327 DenseMapInfo<uint64_t>::getTombstoneKey()}; 328 } 329 static unsigned getHashValue(const VTableSlot &I) { 330 return DenseMapInfo<Metadata *>::getHashValue(I.TypeID) ^ 331 DenseMapInfo<uint64_t>::getHashValue(I.ByteOffset); 332 } 333 static bool isEqual(const VTableSlot &LHS, 334 const VTableSlot &RHS) { 335 return LHS.TypeID == RHS.TypeID && LHS.ByteOffset == RHS.ByteOffset; 336 } 337 }; 338 339 template <> struct DenseMapInfo<VTableSlotSummary> { 340 static VTableSlotSummary getEmptyKey() { 341 return {DenseMapInfo<StringRef>::getEmptyKey(), 342 DenseMapInfo<uint64_t>::getEmptyKey()}; 343 } 344 static VTableSlotSummary getTombstoneKey() { 345 return {DenseMapInfo<StringRef>::getTombstoneKey(), 346 DenseMapInfo<uint64_t>::getTombstoneKey()}; 347 } 348 static unsigned getHashValue(const VTableSlotSummary &I) { 349 return DenseMapInfo<StringRef>::getHashValue(I.TypeID) ^ 350 DenseMapInfo<uint64_t>::getHashValue(I.ByteOffset); 351 } 352 static bool isEqual(const VTableSlotSummary &LHS, 353 const VTableSlotSummary &RHS) { 354 return LHS.TypeID == RHS.TypeID && LHS.ByteOffset == RHS.ByteOffset; 355 } 356 }; 357 358 } // end namespace llvm 359 360 namespace { 361 362 // A virtual call site. VTable is the loaded virtual table pointer, and CS is 363 // the indirect virtual call. 364 struct VirtualCallSite { 365 Value *VTable = nullptr; 366 CallBase &CB; 367 368 // If non-null, this field points to the associated unsafe use count stored in 369 // the DevirtModule::NumUnsafeUsesForTypeTest map below. See the description 370 // of that field for details. 371 unsigned *NumUnsafeUses = nullptr; 372 373 void 374 emitRemark(const StringRef OptName, const StringRef TargetName, 375 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter) { 376 Function *F = CB.getCaller(); 377 DebugLoc DLoc = CB.getDebugLoc(); 378 BasicBlock *Block = CB.getParent(); 379 380 using namespace ore; 381 OREGetter(F).emit(OptimizationRemark(DEBUG_TYPE, OptName, DLoc, Block) 382 << NV("Optimization", OptName) 383 << ": devirtualized a call to " 384 << NV("FunctionName", TargetName)); 385 } 386 387 void replaceAndErase( 388 const StringRef OptName, const StringRef TargetName, bool RemarksEnabled, 389 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter, 390 Value *New) { 391 if (RemarksEnabled) 392 emitRemark(OptName, TargetName, OREGetter); 393 CB.replaceAllUsesWith(New); 394 if (auto *II = dyn_cast<InvokeInst>(&CB)) { 395 BranchInst::Create(II->getNormalDest(), &CB); 396 II->getUnwindDest()->removePredecessor(II->getParent()); 397 } 398 CB.eraseFromParent(); 399 // This use is no longer unsafe. 400 if (NumUnsafeUses) 401 --*NumUnsafeUses; 402 } 403 }; 404 405 // Call site information collected for a specific VTableSlot and possibly a list 406 // of constant integer arguments. The grouping by arguments is handled by the 407 // VTableSlotInfo class. 408 struct CallSiteInfo { 409 /// The set of call sites for this slot. Used during regular LTO and the 410 /// import phase of ThinLTO (as well as the export phase of ThinLTO for any 411 /// call sites that appear in the merged module itself); in each of these 412 /// cases we are directly operating on the call sites at the IR level. 413 std::vector<VirtualCallSite> CallSites; 414 415 /// Whether all call sites represented by this CallSiteInfo, including those 416 /// in summaries, have been devirtualized. This starts off as true because a 417 /// default constructed CallSiteInfo represents no call sites. 418 bool AllCallSitesDevirted = true; 419 420 // These fields are used during the export phase of ThinLTO and reflect 421 // information collected from function summaries. 422 423 /// Whether any function summary contains an llvm.assume(llvm.type.test) for 424 /// this slot. 425 bool SummaryHasTypeTestAssumeUsers = false; 426 427 /// CFI-specific: a vector containing the list of function summaries that use 428 /// the llvm.type.checked.load intrinsic and therefore will require 429 /// resolutions for llvm.type.test in order to implement CFI checks if 430 /// devirtualization was unsuccessful. If devirtualization was successful, the 431 /// pass will clear this vector by calling markDevirt(). If at the end of the 432 /// pass the vector is non-empty, we will need to add a use of llvm.type.test 433 /// to each of the function summaries in the vector. 434 std::vector<FunctionSummary *> SummaryTypeCheckedLoadUsers; 435 std::vector<FunctionSummary *> SummaryTypeTestAssumeUsers; 436 437 bool isExported() const { 438 return SummaryHasTypeTestAssumeUsers || 439 !SummaryTypeCheckedLoadUsers.empty(); 440 } 441 442 void addSummaryTypeCheckedLoadUser(FunctionSummary *FS) { 443 SummaryTypeCheckedLoadUsers.push_back(FS); 444 AllCallSitesDevirted = false; 445 } 446 447 void addSummaryTypeTestAssumeUser(FunctionSummary *FS) { 448 SummaryTypeTestAssumeUsers.push_back(FS); 449 SummaryHasTypeTestAssumeUsers = true; 450 AllCallSitesDevirted = false; 451 } 452 453 void markDevirt() { 454 AllCallSitesDevirted = true; 455 456 // As explained in the comment for SummaryTypeCheckedLoadUsers. 457 SummaryTypeCheckedLoadUsers.clear(); 458 } 459 }; 460 461 // Call site information collected for a specific VTableSlot. 462 struct VTableSlotInfo { 463 // The set of call sites which do not have all constant integer arguments 464 // (excluding "this"). 465 CallSiteInfo CSInfo; 466 467 // The set of call sites with all constant integer arguments (excluding 468 // "this"), grouped by argument list. 469 std::map<std::vector<uint64_t>, CallSiteInfo> ConstCSInfo; 470 471 void addCallSite(Value *VTable, CallBase &CB, unsigned *NumUnsafeUses); 472 473 private: 474 CallSiteInfo &findCallSiteInfo(CallBase &CB); 475 }; 476 477 CallSiteInfo &VTableSlotInfo::findCallSiteInfo(CallBase &CB) { 478 std::vector<uint64_t> Args; 479 auto *CBType = dyn_cast<IntegerType>(CB.getType()); 480 if (!CBType || CBType->getBitWidth() > 64 || CB.arg_empty()) 481 return CSInfo; 482 for (auto &&Arg : drop_begin(CB.args())) { 483 auto *CI = dyn_cast<ConstantInt>(Arg); 484 if (!CI || CI->getBitWidth() > 64) 485 return CSInfo; 486 Args.push_back(CI->getZExtValue()); 487 } 488 return ConstCSInfo[Args]; 489 } 490 491 void VTableSlotInfo::addCallSite(Value *VTable, CallBase &CB, 492 unsigned *NumUnsafeUses) { 493 auto &CSI = findCallSiteInfo(CB); 494 CSI.AllCallSitesDevirted = false; 495 CSI.CallSites.push_back({VTable, CB, NumUnsafeUses}); 496 } 497 498 struct DevirtModule { 499 Module &M; 500 function_ref<AAResults &(Function &)> AARGetter; 501 function_ref<DominatorTree &(Function &)> LookupDomTree; 502 503 ModuleSummaryIndex *ExportSummary; 504 const ModuleSummaryIndex *ImportSummary; 505 506 IntegerType *Int8Ty; 507 PointerType *Int8PtrTy; 508 IntegerType *Int32Ty; 509 IntegerType *Int64Ty; 510 IntegerType *IntPtrTy; 511 /// Sizeless array type, used for imported vtables. This provides a signal 512 /// to analyzers that these imports may alias, as they do for example 513 /// when multiple unique return values occur in the same vtable. 514 ArrayType *Int8Arr0Ty; 515 516 bool RemarksEnabled; 517 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter; 518 519 MapVector<VTableSlot, VTableSlotInfo> CallSlots; 520 521 // Calls that have already been optimized. We may add a call to multiple 522 // VTableSlotInfos if vtable loads are coalesced and need to make sure not to 523 // optimize a call more than once. 524 SmallPtrSet<CallBase *, 8> OptimizedCalls; 525 526 // This map keeps track of the number of "unsafe" uses of a loaded function 527 // pointer. The key is the associated llvm.type.test intrinsic call generated 528 // by this pass. An unsafe use is one that calls the loaded function pointer 529 // directly. Every time we eliminate an unsafe use (for example, by 530 // devirtualizing it or by applying virtual constant propagation), we 531 // decrement the value stored in this map. If a value reaches zero, we can 532 // eliminate the type check by RAUWing the associated llvm.type.test call with 533 // true. 534 std::map<CallInst *, unsigned> NumUnsafeUsesForTypeTest; 535 PatternList FunctionsToSkip; 536 537 DevirtModule(Module &M, function_ref<AAResults &(Function &)> AARGetter, 538 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter, 539 function_ref<DominatorTree &(Function &)> LookupDomTree, 540 ModuleSummaryIndex *ExportSummary, 541 const ModuleSummaryIndex *ImportSummary) 542 : M(M), AARGetter(AARGetter), LookupDomTree(LookupDomTree), 543 ExportSummary(ExportSummary), ImportSummary(ImportSummary), 544 Int8Ty(Type::getInt8Ty(M.getContext())), 545 Int8PtrTy(Type::getInt8PtrTy(M.getContext())), 546 Int32Ty(Type::getInt32Ty(M.getContext())), 547 Int64Ty(Type::getInt64Ty(M.getContext())), 548 IntPtrTy(M.getDataLayout().getIntPtrType(M.getContext(), 0)), 549 Int8Arr0Ty(ArrayType::get(Type::getInt8Ty(M.getContext()), 0)), 550 RemarksEnabled(areRemarksEnabled()), OREGetter(OREGetter) { 551 assert(!(ExportSummary && ImportSummary)); 552 FunctionsToSkip.init(SkipFunctionNames); 553 } 554 555 bool areRemarksEnabled(); 556 557 void 558 scanTypeTestUsers(Function *TypeTestFunc, 559 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap); 560 void scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc); 561 562 void buildTypeIdentifierMap( 563 std::vector<VTableBits> &Bits, 564 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap); 565 bool 566 tryFindVirtualCallTargets(std::vector<VirtualCallTarget> &TargetsForSlot, 567 const std::set<TypeMemberInfo> &TypeMemberInfos, 568 uint64_t ByteOffset); 569 570 void applySingleImplDevirt(VTableSlotInfo &SlotInfo, Constant *TheFn, 571 bool &IsExported); 572 bool trySingleImplDevirt(ModuleSummaryIndex *ExportSummary, 573 MutableArrayRef<VirtualCallTarget> TargetsForSlot, 574 VTableSlotInfo &SlotInfo, 575 WholeProgramDevirtResolution *Res); 576 577 void applyICallBranchFunnel(VTableSlotInfo &SlotInfo, Constant *JT, 578 bool &IsExported); 579 void tryICallBranchFunnel(MutableArrayRef<VirtualCallTarget> TargetsForSlot, 580 VTableSlotInfo &SlotInfo, 581 WholeProgramDevirtResolution *Res, VTableSlot Slot); 582 583 bool tryEvaluateFunctionsWithArgs( 584 MutableArrayRef<VirtualCallTarget> TargetsForSlot, 585 ArrayRef<uint64_t> Args); 586 587 void applyUniformRetValOpt(CallSiteInfo &CSInfo, StringRef FnName, 588 uint64_t TheRetVal); 589 bool tryUniformRetValOpt(MutableArrayRef<VirtualCallTarget> TargetsForSlot, 590 CallSiteInfo &CSInfo, 591 WholeProgramDevirtResolution::ByArg *Res); 592 593 // Returns the global symbol name that is used to export information about the 594 // given vtable slot and list of arguments. 595 std::string getGlobalName(VTableSlot Slot, ArrayRef<uint64_t> Args, 596 StringRef Name); 597 598 bool shouldExportConstantsAsAbsoluteSymbols(); 599 600 // This function is called during the export phase to create a symbol 601 // definition containing information about the given vtable slot and list of 602 // arguments. 603 void exportGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args, StringRef Name, 604 Constant *C); 605 void exportConstant(VTableSlot Slot, ArrayRef<uint64_t> Args, StringRef Name, 606 uint32_t Const, uint32_t &Storage); 607 608 // This function is called during the import phase to create a reference to 609 // the symbol definition created during the export phase. 610 Constant *importGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args, 611 StringRef Name); 612 Constant *importConstant(VTableSlot Slot, ArrayRef<uint64_t> Args, 613 StringRef Name, IntegerType *IntTy, 614 uint32_t Storage); 615 616 Constant *getMemberAddr(const TypeMemberInfo *M); 617 618 void applyUniqueRetValOpt(CallSiteInfo &CSInfo, StringRef FnName, bool IsOne, 619 Constant *UniqueMemberAddr); 620 bool tryUniqueRetValOpt(unsigned BitWidth, 621 MutableArrayRef<VirtualCallTarget> TargetsForSlot, 622 CallSiteInfo &CSInfo, 623 WholeProgramDevirtResolution::ByArg *Res, 624 VTableSlot Slot, ArrayRef<uint64_t> Args); 625 626 void applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName, 627 Constant *Byte, Constant *Bit); 628 bool tryVirtualConstProp(MutableArrayRef<VirtualCallTarget> TargetsForSlot, 629 VTableSlotInfo &SlotInfo, 630 WholeProgramDevirtResolution *Res, VTableSlot Slot); 631 632 void rebuildGlobal(VTableBits &B); 633 634 // Apply the summary resolution for Slot to all virtual calls in SlotInfo. 635 void importResolution(VTableSlot Slot, VTableSlotInfo &SlotInfo); 636 637 // If we were able to eliminate all unsafe uses for a type checked load, 638 // eliminate the associated type tests by replacing them with true. 639 void removeRedundantTypeTests(); 640 641 bool run(); 642 643 // Lower the module using the action and summary passed as command line 644 // arguments. For testing purposes only. 645 static bool 646 runForTesting(Module &M, function_ref<AAResults &(Function &)> AARGetter, 647 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter, 648 function_ref<DominatorTree &(Function &)> LookupDomTree); 649 }; 650 651 struct DevirtIndex { 652 ModuleSummaryIndex &ExportSummary; 653 // The set in which to record GUIDs exported from their module by 654 // devirtualization, used by client to ensure they are not internalized. 655 std::set<GlobalValue::GUID> &ExportedGUIDs; 656 // A map in which to record the information necessary to locate the WPD 657 // resolution for local targets in case they are exported by cross module 658 // importing. 659 std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap; 660 661 MapVector<VTableSlotSummary, VTableSlotInfo> CallSlots; 662 663 PatternList FunctionsToSkip; 664 665 DevirtIndex( 666 ModuleSummaryIndex &ExportSummary, 667 std::set<GlobalValue::GUID> &ExportedGUIDs, 668 std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap) 669 : ExportSummary(ExportSummary), ExportedGUIDs(ExportedGUIDs), 670 LocalWPDTargetsMap(LocalWPDTargetsMap) { 671 FunctionsToSkip.init(SkipFunctionNames); 672 } 673 674 bool tryFindVirtualCallTargets(std::vector<ValueInfo> &TargetsForSlot, 675 const TypeIdCompatibleVtableInfo TIdInfo, 676 uint64_t ByteOffset); 677 678 bool trySingleImplDevirt(MutableArrayRef<ValueInfo> TargetsForSlot, 679 VTableSlotSummary &SlotSummary, 680 VTableSlotInfo &SlotInfo, 681 WholeProgramDevirtResolution *Res, 682 std::set<ValueInfo> &DevirtTargets); 683 684 void run(); 685 }; 686 687 struct WholeProgramDevirt : public ModulePass { 688 static char ID; 689 690 bool UseCommandLine = false; 691 692 ModuleSummaryIndex *ExportSummary = nullptr; 693 const ModuleSummaryIndex *ImportSummary = nullptr; 694 695 WholeProgramDevirt() : ModulePass(ID), UseCommandLine(true) { 696 initializeWholeProgramDevirtPass(*PassRegistry::getPassRegistry()); 697 } 698 699 WholeProgramDevirt(ModuleSummaryIndex *ExportSummary, 700 const ModuleSummaryIndex *ImportSummary) 701 : ModulePass(ID), ExportSummary(ExportSummary), 702 ImportSummary(ImportSummary) { 703 initializeWholeProgramDevirtPass(*PassRegistry::getPassRegistry()); 704 } 705 706 bool runOnModule(Module &M) override { 707 if (skipModule(M)) 708 return false; 709 710 // In the new pass manager, we can request the optimization 711 // remark emitter pass on a per-function-basis, which the 712 // OREGetter will do for us. 713 // In the old pass manager, this is harder, so we just build 714 // an optimization remark emitter on the fly, when we need it. 715 std::unique_ptr<OptimizationRemarkEmitter> ORE; 716 auto OREGetter = [&](Function *F) -> OptimizationRemarkEmitter & { 717 ORE = std::make_unique<OptimizationRemarkEmitter>(F); 718 return *ORE; 719 }; 720 721 auto LookupDomTree = [this](Function &F) -> DominatorTree & { 722 return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 723 }; 724 725 if (UseCommandLine) 726 return DevirtModule::runForTesting(M, LegacyAARGetter(*this), OREGetter, 727 LookupDomTree); 728 729 return DevirtModule(M, LegacyAARGetter(*this), OREGetter, LookupDomTree, 730 ExportSummary, ImportSummary) 731 .run(); 732 } 733 734 void getAnalysisUsage(AnalysisUsage &AU) const override { 735 AU.addRequired<AssumptionCacheTracker>(); 736 AU.addRequired<TargetLibraryInfoWrapperPass>(); 737 AU.addRequired<DominatorTreeWrapperPass>(); 738 } 739 }; 740 741 } // end anonymous namespace 742 743 INITIALIZE_PASS_BEGIN(WholeProgramDevirt, "wholeprogramdevirt", 744 "Whole program devirtualization", false, false) 745 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 746 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 747 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 748 INITIALIZE_PASS_END(WholeProgramDevirt, "wholeprogramdevirt", 749 "Whole program devirtualization", false, false) 750 char WholeProgramDevirt::ID = 0; 751 752 ModulePass * 753 llvm::createWholeProgramDevirtPass(ModuleSummaryIndex *ExportSummary, 754 const ModuleSummaryIndex *ImportSummary) { 755 return new WholeProgramDevirt(ExportSummary, ImportSummary); 756 } 757 758 PreservedAnalyses WholeProgramDevirtPass::run(Module &M, 759 ModuleAnalysisManager &AM) { 760 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 761 auto AARGetter = [&](Function &F) -> AAResults & { 762 return FAM.getResult<AAManager>(F); 763 }; 764 auto OREGetter = [&](Function *F) -> OptimizationRemarkEmitter & { 765 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 766 }; 767 auto LookupDomTree = [&FAM](Function &F) -> DominatorTree & { 768 return FAM.getResult<DominatorTreeAnalysis>(F); 769 }; 770 if (UseCommandLine) { 771 if (DevirtModule::runForTesting(M, AARGetter, OREGetter, LookupDomTree)) 772 return PreservedAnalyses::all(); 773 return PreservedAnalyses::none(); 774 } 775 if (!DevirtModule(M, AARGetter, OREGetter, LookupDomTree, ExportSummary, 776 ImportSummary) 777 .run()) 778 return PreservedAnalyses::all(); 779 return PreservedAnalyses::none(); 780 } 781 782 // Enable whole program visibility if enabled by client (e.g. linker) or 783 // internal option, and not force disabled. 784 static bool hasWholeProgramVisibility(bool WholeProgramVisibilityEnabledInLTO) { 785 return (WholeProgramVisibilityEnabledInLTO || WholeProgramVisibility) && 786 !DisableWholeProgramVisibility; 787 } 788 789 namespace llvm { 790 791 /// If whole program visibility asserted, then upgrade all public vcall 792 /// visibility metadata on vtable definitions to linkage unit visibility in 793 /// Module IR (for regular or hybrid LTO). 794 void updateVCallVisibilityInModule( 795 Module &M, bool WholeProgramVisibilityEnabledInLTO, 796 const DenseSet<GlobalValue::GUID> &DynamicExportSymbols) { 797 if (!hasWholeProgramVisibility(WholeProgramVisibilityEnabledInLTO)) 798 return; 799 for (GlobalVariable &GV : M.globals()) 800 // Add linkage unit visibility to any variable with type metadata, which are 801 // the vtable definitions. We won't have an existing vcall_visibility 802 // metadata on vtable definitions with public visibility. 803 if (GV.hasMetadata(LLVMContext::MD_type) && 804 GV.getVCallVisibility() == GlobalObject::VCallVisibilityPublic && 805 // Don't upgrade the visibility for symbols exported to the dynamic 806 // linker, as we have no information on their eventual use. 807 !DynamicExportSymbols.count(GV.getGUID())) 808 GV.setVCallVisibilityMetadata(GlobalObject::VCallVisibilityLinkageUnit); 809 } 810 811 /// If whole program visibility asserted, then upgrade all public vcall 812 /// visibility metadata on vtable definition summaries to linkage unit 813 /// visibility in Module summary index (for ThinLTO). 814 void updateVCallVisibilityInIndex( 815 ModuleSummaryIndex &Index, bool WholeProgramVisibilityEnabledInLTO, 816 const DenseSet<GlobalValue::GUID> &DynamicExportSymbols) { 817 if (!hasWholeProgramVisibility(WholeProgramVisibilityEnabledInLTO)) 818 return; 819 for (auto &P : Index) { 820 for (auto &S : P.second.SummaryList) { 821 auto *GVar = dyn_cast<GlobalVarSummary>(S.get()); 822 if (!GVar || 823 GVar->getVCallVisibility() != GlobalObject::VCallVisibilityPublic || 824 // Don't upgrade the visibility for symbols exported to the dynamic 825 // linker, as we have no information on their eventual use. 826 DynamicExportSymbols.count(P.first)) 827 continue; 828 GVar->setVCallVisibility(GlobalObject::VCallVisibilityLinkageUnit); 829 } 830 } 831 } 832 833 void runWholeProgramDevirtOnIndex( 834 ModuleSummaryIndex &Summary, std::set<GlobalValue::GUID> &ExportedGUIDs, 835 std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap) { 836 DevirtIndex(Summary, ExportedGUIDs, LocalWPDTargetsMap).run(); 837 } 838 839 void updateIndexWPDForExports( 840 ModuleSummaryIndex &Summary, 841 function_ref<bool(StringRef, ValueInfo)> isExported, 842 std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap) { 843 for (auto &T : LocalWPDTargetsMap) { 844 auto &VI = T.first; 845 // This was enforced earlier during trySingleImplDevirt. 846 assert(VI.getSummaryList().size() == 1 && 847 "Devirt of local target has more than one copy"); 848 auto &S = VI.getSummaryList()[0]; 849 if (!isExported(S->modulePath(), VI)) 850 continue; 851 852 // It's been exported by a cross module import. 853 for (auto &SlotSummary : T.second) { 854 auto *TIdSum = Summary.getTypeIdSummary(SlotSummary.TypeID); 855 assert(TIdSum); 856 auto WPDRes = TIdSum->WPDRes.find(SlotSummary.ByteOffset); 857 assert(WPDRes != TIdSum->WPDRes.end()); 858 WPDRes->second.SingleImplName = ModuleSummaryIndex::getGlobalNameForLocal( 859 WPDRes->second.SingleImplName, 860 Summary.getModuleHash(S->modulePath())); 861 } 862 } 863 } 864 865 } // end namespace llvm 866 867 static Error checkCombinedSummaryForTesting(ModuleSummaryIndex *Summary) { 868 // Check that summary index contains regular LTO module when performing 869 // export to prevent occasional use of index from pure ThinLTO compilation 870 // (-fno-split-lto-module). This kind of summary index is passed to 871 // DevirtIndex::run, not to DevirtModule::run used by opt/runForTesting. 872 const auto &ModPaths = Summary->modulePaths(); 873 if (ClSummaryAction != PassSummaryAction::Import && 874 ModPaths.find(ModuleSummaryIndex::getRegularLTOModuleName()) == 875 ModPaths.end()) 876 return createStringError( 877 errc::invalid_argument, 878 "combined summary should contain Regular LTO module"); 879 return ErrorSuccess(); 880 } 881 882 bool DevirtModule::runForTesting( 883 Module &M, function_ref<AAResults &(Function &)> AARGetter, 884 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter, 885 function_ref<DominatorTree &(Function &)> LookupDomTree) { 886 std::unique_ptr<ModuleSummaryIndex> Summary = 887 std::make_unique<ModuleSummaryIndex>(/*HaveGVs=*/false); 888 889 // Handle the command-line summary arguments. This code is for testing 890 // purposes only, so we handle errors directly. 891 if (!ClReadSummary.empty()) { 892 ExitOnError ExitOnErr("-wholeprogramdevirt-read-summary: " + ClReadSummary + 893 ": "); 894 auto ReadSummaryFile = 895 ExitOnErr(errorOrToExpected(MemoryBuffer::getFile(ClReadSummary))); 896 if (Expected<std::unique_ptr<ModuleSummaryIndex>> SummaryOrErr = 897 getModuleSummaryIndex(*ReadSummaryFile)) { 898 Summary = std::move(*SummaryOrErr); 899 ExitOnErr(checkCombinedSummaryForTesting(Summary.get())); 900 } else { 901 // Try YAML if we've failed with bitcode. 902 consumeError(SummaryOrErr.takeError()); 903 yaml::Input In(ReadSummaryFile->getBuffer()); 904 In >> *Summary; 905 ExitOnErr(errorCodeToError(In.error())); 906 } 907 } 908 909 bool Changed = 910 DevirtModule(M, AARGetter, OREGetter, LookupDomTree, 911 ClSummaryAction == PassSummaryAction::Export ? Summary.get() 912 : nullptr, 913 ClSummaryAction == PassSummaryAction::Import ? Summary.get() 914 : nullptr) 915 .run(); 916 917 if (!ClWriteSummary.empty()) { 918 ExitOnError ExitOnErr( 919 "-wholeprogramdevirt-write-summary: " + ClWriteSummary + ": "); 920 std::error_code EC; 921 if (StringRef(ClWriteSummary).endswith(".bc")) { 922 raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_None); 923 ExitOnErr(errorCodeToError(EC)); 924 WriteIndexToFile(*Summary, OS); 925 } else { 926 raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_TextWithCRLF); 927 ExitOnErr(errorCodeToError(EC)); 928 yaml::Output Out(OS); 929 Out << *Summary; 930 } 931 } 932 933 return Changed; 934 } 935 936 void DevirtModule::buildTypeIdentifierMap( 937 std::vector<VTableBits> &Bits, 938 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap) { 939 DenseMap<GlobalVariable *, VTableBits *> GVToBits; 940 Bits.reserve(M.getGlobalList().size()); 941 SmallVector<MDNode *, 2> Types; 942 for (GlobalVariable &GV : M.globals()) { 943 Types.clear(); 944 GV.getMetadata(LLVMContext::MD_type, Types); 945 if (GV.isDeclaration() || Types.empty()) 946 continue; 947 948 VTableBits *&BitsPtr = GVToBits[&GV]; 949 if (!BitsPtr) { 950 Bits.emplace_back(); 951 Bits.back().GV = &GV; 952 Bits.back().ObjectSize = 953 M.getDataLayout().getTypeAllocSize(GV.getInitializer()->getType()); 954 BitsPtr = &Bits.back(); 955 } 956 957 for (MDNode *Type : Types) { 958 auto TypeID = Type->getOperand(1).get(); 959 960 uint64_t Offset = 961 cast<ConstantInt>( 962 cast<ConstantAsMetadata>(Type->getOperand(0))->getValue()) 963 ->getZExtValue(); 964 965 TypeIdMap[TypeID].insert({BitsPtr, Offset}); 966 } 967 } 968 } 969 970 bool DevirtModule::tryFindVirtualCallTargets( 971 std::vector<VirtualCallTarget> &TargetsForSlot, 972 const std::set<TypeMemberInfo> &TypeMemberInfos, uint64_t ByteOffset) { 973 for (const TypeMemberInfo &TM : TypeMemberInfos) { 974 if (!TM.Bits->GV->isConstant()) 975 return false; 976 977 // We cannot perform whole program devirtualization analysis on a vtable 978 // with public LTO visibility. 979 if (TM.Bits->GV->getVCallVisibility() == 980 GlobalObject::VCallVisibilityPublic) 981 return false; 982 983 Constant *Ptr = getPointerAtOffset(TM.Bits->GV->getInitializer(), 984 TM.Offset + ByteOffset, M); 985 if (!Ptr) 986 return false; 987 988 auto Fn = dyn_cast<Function>(Ptr->stripPointerCasts()); 989 if (!Fn) 990 return false; 991 992 if (FunctionsToSkip.match(Fn->getName())) 993 return false; 994 995 // We can disregard __cxa_pure_virtual as a possible call target, as 996 // calls to pure virtuals are UB. 997 if (Fn->getName() == "__cxa_pure_virtual") 998 continue; 999 1000 TargetsForSlot.push_back({Fn, &TM}); 1001 } 1002 1003 // Give up if we couldn't find any targets. 1004 return !TargetsForSlot.empty(); 1005 } 1006 1007 bool DevirtIndex::tryFindVirtualCallTargets( 1008 std::vector<ValueInfo> &TargetsForSlot, const TypeIdCompatibleVtableInfo TIdInfo, 1009 uint64_t ByteOffset) { 1010 for (const TypeIdOffsetVtableInfo &P : TIdInfo) { 1011 // Find a representative copy of the vtable initializer. 1012 // We can have multiple available_externally, linkonce_odr and weak_odr 1013 // vtable initializers. We can also have multiple external vtable 1014 // initializers in the case of comdats, which we cannot check here. 1015 // The linker should give an error in this case. 1016 // 1017 // Also, handle the case of same-named local Vtables with the same path 1018 // and therefore the same GUID. This can happen if there isn't enough 1019 // distinguishing path when compiling the source file. In that case we 1020 // conservatively return false early. 1021 const GlobalVarSummary *VS = nullptr; 1022 bool LocalFound = false; 1023 for (auto &S : P.VTableVI.getSummaryList()) { 1024 if (GlobalValue::isLocalLinkage(S->linkage())) { 1025 if (LocalFound) 1026 return false; 1027 LocalFound = true; 1028 } 1029 auto *CurVS = cast<GlobalVarSummary>(S->getBaseObject()); 1030 if (!CurVS->vTableFuncs().empty() || 1031 // Previously clang did not attach the necessary type metadata to 1032 // available_externally vtables, in which case there would not 1033 // be any vtable functions listed in the summary and we need 1034 // to treat this case conservatively (in case the bitcode is old). 1035 // However, we will also not have any vtable functions in the 1036 // case of a pure virtual base class. In that case we do want 1037 // to set VS to avoid treating it conservatively. 1038 !GlobalValue::isAvailableExternallyLinkage(S->linkage())) { 1039 VS = CurVS; 1040 // We cannot perform whole program devirtualization analysis on a vtable 1041 // with public LTO visibility. 1042 if (VS->getVCallVisibility() == GlobalObject::VCallVisibilityPublic) 1043 return false; 1044 } 1045 } 1046 // There will be no VS if all copies are available_externally having no 1047 // type metadata. In that case we can't safely perform WPD. 1048 if (!VS) 1049 return false; 1050 if (!VS->isLive()) 1051 continue; 1052 for (auto VTP : VS->vTableFuncs()) { 1053 if (VTP.VTableOffset != P.AddressPointOffset + ByteOffset) 1054 continue; 1055 1056 TargetsForSlot.push_back(VTP.FuncVI); 1057 } 1058 } 1059 1060 // Give up if we couldn't find any targets. 1061 return !TargetsForSlot.empty(); 1062 } 1063 1064 void DevirtModule::applySingleImplDevirt(VTableSlotInfo &SlotInfo, 1065 Constant *TheFn, bool &IsExported) { 1066 // Don't devirtualize function if we're told to skip it 1067 // in -wholeprogramdevirt-skip. 1068 if (FunctionsToSkip.match(TheFn->stripPointerCasts()->getName())) 1069 return; 1070 auto Apply = [&](CallSiteInfo &CSInfo) { 1071 for (auto &&VCallSite : CSInfo.CallSites) { 1072 if (!OptimizedCalls.insert(&VCallSite.CB).second) 1073 continue; 1074 1075 if (RemarksEnabled) 1076 VCallSite.emitRemark("single-impl", 1077 TheFn->stripPointerCasts()->getName(), OREGetter); 1078 auto &CB = VCallSite.CB; 1079 assert(!CB.getCalledFunction() && "devirtualizing direct call?"); 1080 IRBuilder<> Builder(&CB); 1081 Value *Callee = 1082 Builder.CreateBitCast(TheFn, CB.getCalledOperand()->getType()); 1083 1084 // If checking is enabled, add support to compare the virtual function 1085 // pointer to the devirtualized target. In case of a mismatch, perform a 1086 // debug trap. 1087 if (CheckDevirt) { 1088 auto *Cond = Builder.CreateICmpNE(CB.getCalledOperand(), Callee); 1089 Instruction *ThenTerm = 1090 SplitBlockAndInsertIfThen(Cond, &CB, /*Unreachable=*/false); 1091 Builder.SetInsertPoint(ThenTerm); 1092 Function *TrapFn = Intrinsic::getDeclaration(&M, Intrinsic::debugtrap); 1093 auto *CallTrap = Builder.CreateCall(TrapFn); 1094 CallTrap->setDebugLoc(CB.getDebugLoc()); 1095 } 1096 1097 // Devirtualize. 1098 CB.setCalledOperand(Callee); 1099 1100 // This use is no longer unsafe. 1101 if (VCallSite.NumUnsafeUses) 1102 --*VCallSite.NumUnsafeUses; 1103 } 1104 if (CSInfo.isExported()) 1105 IsExported = true; 1106 CSInfo.markDevirt(); 1107 }; 1108 Apply(SlotInfo.CSInfo); 1109 for (auto &P : SlotInfo.ConstCSInfo) 1110 Apply(P.second); 1111 } 1112 1113 static bool AddCalls(VTableSlotInfo &SlotInfo, const ValueInfo &Callee) { 1114 // We can't add calls if we haven't seen a definition 1115 if (Callee.getSummaryList().empty()) 1116 return false; 1117 1118 // Insert calls into the summary index so that the devirtualized targets 1119 // are eligible for import. 1120 // FIXME: Annotate type tests with hotness. For now, mark these as hot 1121 // to better ensure we have the opportunity to inline them. 1122 bool IsExported = false; 1123 auto &S = Callee.getSummaryList()[0]; 1124 CalleeInfo CI(CalleeInfo::HotnessType::Hot, /* RelBF = */ 0); 1125 auto AddCalls = [&](CallSiteInfo &CSInfo) { 1126 for (auto *FS : CSInfo.SummaryTypeCheckedLoadUsers) { 1127 FS->addCall({Callee, CI}); 1128 IsExported |= S->modulePath() != FS->modulePath(); 1129 } 1130 for (auto *FS : CSInfo.SummaryTypeTestAssumeUsers) { 1131 FS->addCall({Callee, CI}); 1132 IsExported |= S->modulePath() != FS->modulePath(); 1133 } 1134 }; 1135 AddCalls(SlotInfo.CSInfo); 1136 for (auto &P : SlotInfo.ConstCSInfo) 1137 AddCalls(P.second); 1138 return IsExported; 1139 } 1140 1141 bool DevirtModule::trySingleImplDevirt( 1142 ModuleSummaryIndex *ExportSummary, 1143 MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo, 1144 WholeProgramDevirtResolution *Res) { 1145 // See if the program contains a single implementation of this virtual 1146 // function. 1147 Function *TheFn = TargetsForSlot[0].Fn; 1148 for (auto &&Target : TargetsForSlot) 1149 if (TheFn != Target.Fn) 1150 return false; 1151 1152 // If so, update each call site to call that implementation directly. 1153 if (RemarksEnabled) 1154 TargetsForSlot[0].WasDevirt = true; 1155 1156 bool IsExported = false; 1157 applySingleImplDevirt(SlotInfo, TheFn, IsExported); 1158 if (!IsExported) 1159 return false; 1160 1161 // If the only implementation has local linkage, we must promote to external 1162 // to make it visible to thin LTO objects. We can only get here during the 1163 // ThinLTO export phase. 1164 if (TheFn->hasLocalLinkage()) { 1165 std::string NewName = (TheFn->getName() + ".llvm.merged").str(); 1166 1167 // Since we are renaming the function, any comdats with the same name must 1168 // also be renamed. This is required when targeting COFF, as the comdat name 1169 // must match one of the names of the symbols in the comdat. 1170 if (Comdat *C = TheFn->getComdat()) { 1171 if (C->getName() == TheFn->getName()) { 1172 Comdat *NewC = M.getOrInsertComdat(NewName); 1173 NewC->setSelectionKind(C->getSelectionKind()); 1174 for (GlobalObject &GO : M.global_objects()) 1175 if (GO.getComdat() == C) 1176 GO.setComdat(NewC); 1177 } 1178 } 1179 1180 TheFn->setLinkage(GlobalValue::ExternalLinkage); 1181 TheFn->setVisibility(GlobalValue::HiddenVisibility); 1182 TheFn->setName(NewName); 1183 } 1184 if (ValueInfo TheFnVI = ExportSummary->getValueInfo(TheFn->getGUID())) 1185 // Any needed promotion of 'TheFn' has already been done during 1186 // LTO unit split, so we can ignore return value of AddCalls. 1187 AddCalls(SlotInfo, TheFnVI); 1188 1189 Res->TheKind = WholeProgramDevirtResolution::SingleImpl; 1190 Res->SingleImplName = std::string(TheFn->getName()); 1191 1192 return true; 1193 } 1194 1195 bool DevirtIndex::trySingleImplDevirt(MutableArrayRef<ValueInfo> TargetsForSlot, 1196 VTableSlotSummary &SlotSummary, 1197 VTableSlotInfo &SlotInfo, 1198 WholeProgramDevirtResolution *Res, 1199 std::set<ValueInfo> &DevirtTargets) { 1200 // See if the program contains a single implementation of this virtual 1201 // function. 1202 auto TheFn = TargetsForSlot[0]; 1203 for (auto &&Target : TargetsForSlot) 1204 if (TheFn != Target) 1205 return false; 1206 1207 // Don't devirtualize if we don't have target definition. 1208 auto Size = TheFn.getSummaryList().size(); 1209 if (!Size) 1210 return false; 1211 1212 // Don't devirtualize function if we're told to skip it 1213 // in -wholeprogramdevirt-skip. 1214 if (FunctionsToSkip.match(TheFn.name())) 1215 return false; 1216 1217 // If the summary list contains multiple summaries where at least one is 1218 // a local, give up, as we won't know which (possibly promoted) name to use. 1219 for (auto &S : TheFn.getSummaryList()) 1220 if (GlobalValue::isLocalLinkage(S->linkage()) && Size > 1) 1221 return false; 1222 1223 // Collect functions devirtualized at least for one call site for stats. 1224 if (PrintSummaryDevirt) 1225 DevirtTargets.insert(TheFn); 1226 1227 auto &S = TheFn.getSummaryList()[0]; 1228 bool IsExported = AddCalls(SlotInfo, TheFn); 1229 if (IsExported) 1230 ExportedGUIDs.insert(TheFn.getGUID()); 1231 1232 // Record in summary for use in devirtualization during the ThinLTO import 1233 // step. 1234 Res->TheKind = WholeProgramDevirtResolution::SingleImpl; 1235 if (GlobalValue::isLocalLinkage(S->linkage())) { 1236 if (IsExported) 1237 // If target is a local function and we are exporting it by 1238 // devirtualizing a call in another module, we need to record the 1239 // promoted name. 1240 Res->SingleImplName = ModuleSummaryIndex::getGlobalNameForLocal( 1241 TheFn.name(), ExportSummary.getModuleHash(S->modulePath())); 1242 else { 1243 LocalWPDTargetsMap[TheFn].push_back(SlotSummary); 1244 Res->SingleImplName = std::string(TheFn.name()); 1245 } 1246 } else 1247 Res->SingleImplName = std::string(TheFn.name()); 1248 1249 // Name will be empty if this thin link driven off of serialized combined 1250 // index (e.g. llvm-lto). However, WPD is not supported/invoked for the 1251 // legacy LTO API anyway. 1252 assert(!Res->SingleImplName.empty()); 1253 1254 return true; 1255 } 1256 1257 void DevirtModule::tryICallBranchFunnel( 1258 MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo, 1259 WholeProgramDevirtResolution *Res, VTableSlot Slot) { 1260 Triple T(M.getTargetTriple()); 1261 if (T.getArch() != Triple::x86_64) 1262 return; 1263 1264 if (TargetsForSlot.size() > ClThreshold) 1265 return; 1266 1267 bool HasNonDevirt = !SlotInfo.CSInfo.AllCallSitesDevirted; 1268 if (!HasNonDevirt) 1269 for (auto &P : SlotInfo.ConstCSInfo) 1270 if (!P.second.AllCallSitesDevirted) { 1271 HasNonDevirt = true; 1272 break; 1273 } 1274 1275 if (!HasNonDevirt) 1276 return; 1277 1278 FunctionType *FT = 1279 FunctionType::get(Type::getVoidTy(M.getContext()), {Int8PtrTy}, true); 1280 Function *JT; 1281 if (isa<MDString>(Slot.TypeID)) { 1282 JT = Function::Create(FT, Function::ExternalLinkage, 1283 M.getDataLayout().getProgramAddressSpace(), 1284 getGlobalName(Slot, {}, "branch_funnel"), &M); 1285 JT->setVisibility(GlobalValue::HiddenVisibility); 1286 } else { 1287 JT = Function::Create(FT, Function::InternalLinkage, 1288 M.getDataLayout().getProgramAddressSpace(), 1289 "branch_funnel", &M); 1290 } 1291 JT->addAttribute(1, Attribute::Nest); 1292 1293 std::vector<Value *> JTArgs; 1294 JTArgs.push_back(JT->arg_begin()); 1295 for (auto &T : TargetsForSlot) { 1296 JTArgs.push_back(getMemberAddr(T.TM)); 1297 JTArgs.push_back(T.Fn); 1298 } 1299 1300 BasicBlock *BB = BasicBlock::Create(M.getContext(), "", JT, nullptr); 1301 Function *Intr = 1302 Intrinsic::getDeclaration(&M, llvm::Intrinsic::icall_branch_funnel, {}); 1303 1304 auto *CI = CallInst::Create(Intr, JTArgs, "", BB); 1305 CI->setTailCallKind(CallInst::TCK_MustTail); 1306 ReturnInst::Create(M.getContext(), nullptr, BB); 1307 1308 bool IsExported = false; 1309 applyICallBranchFunnel(SlotInfo, JT, IsExported); 1310 if (IsExported) 1311 Res->TheKind = WholeProgramDevirtResolution::BranchFunnel; 1312 } 1313 1314 void DevirtModule::applyICallBranchFunnel(VTableSlotInfo &SlotInfo, 1315 Constant *JT, bool &IsExported) { 1316 auto Apply = [&](CallSiteInfo &CSInfo) { 1317 if (CSInfo.isExported()) 1318 IsExported = true; 1319 if (CSInfo.AllCallSitesDevirted) 1320 return; 1321 for (auto &&VCallSite : CSInfo.CallSites) { 1322 CallBase &CB = VCallSite.CB; 1323 1324 // Jump tables are only profitable if the retpoline mitigation is enabled. 1325 Attribute FSAttr = CB.getCaller()->getFnAttribute("target-features"); 1326 if (!FSAttr.isValid() || 1327 !FSAttr.getValueAsString().contains("+retpoline")) 1328 continue; 1329 1330 if (RemarksEnabled) 1331 VCallSite.emitRemark("branch-funnel", 1332 JT->stripPointerCasts()->getName(), OREGetter); 1333 1334 // Pass the address of the vtable in the nest register, which is r10 on 1335 // x86_64. 1336 std::vector<Type *> NewArgs; 1337 NewArgs.push_back(Int8PtrTy); 1338 append_range(NewArgs, CB.getFunctionType()->params()); 1339 FunctionType *NewFT = 1340 FunctionType::get(CB.getFunctionType()->getReturnType(), NewArgs, 1341 CB.getFunctionType()->isVarArg()); 1342 PointerType *NewFTPtr = PointerType::getUnqual(NewFT); 1343 1344 IRBuilder<> IRB(&CB); 1345 std::vector<Value *> Args; 1346 Args.push_back(IRB.CreateBitCast(VCallSite.VTable, Int8PtrTy)); 1347 llvm::append_range(Args, CB.args()); 1348 1349 CallBase *NewCS = nullptr; 1350 if (isa<CallInst>(CB)) 1351 NewCS = IRB.CreateCall(NewFT, IRB.CreateBitCast(JT, NewFTPtr), Args); 1352 else 1353 NewCS = IRB.CreateInvoke(NewFT, IRB.CreateBitCast(JT, NewFTPtr), 1354 cast<InvokeInst>(CB).getNormalDest(), 1355 cast<InvokeInst>(CB).getUnwindDest(), Args); 1356 NewCS->setCallingConv(CB.getCallingConv()); 1357 1358 AttributeList Attrs = CB.getAttributes(); 1359 std::vector<AttributeSet> NewArgAttrs; 1360 NewArgAttrs.push_back(AttributeSet::get( 1361 M.getContext(), ArrayRef<Attribute>{Attribute::get( 1362 M.getContext(), Attribute::Nest)})); 1363 for (unsigned I = 0; I + 2 < Attrs.getNumAttrSets(); ++I) 1364 NewArgAttrs.push_back(Attrs.getParamAttributes(I)); 1365 NewCS->setAttributes( 1366 AttributeList::get(M.getContext(), Attrs.getFnAttributes(), 1367 Attrs.getRetAttributes(), NewArgAttrs)); 1368 1369 CB.replaceAllUsesWith(NewCS); 1370 CB.eraseFromParent(); 1371 1372 // This use is no longer unsafe. 1373 if (VCallSite.NumUnsafeUses) 1374 --*VCallSite.NumUnsafeUses; 1375 } 1376 // Don't mark as devirtualized because there may be callers compiled without 1377 // retpoline mitigation, which would mean that they are lowered to 1378 // llvm.type.test and therefore require an llvm.type.test resolution for the 1379 // type identifier. 1380 }; 1381 Apply(SlotInfo.CSInfo); 1382 for (auto &P : SlotInfo.ConstCSInfo) 1383 Apply(P.second); 1384 } 1385 1386 bool DevirtModule::tryEvaluateFunctionsWithArgs( 1387 MutableArrayRef<VirtualCallTarget> TargetsForSlot, 1388 ArrayRef<uint64_t> Args) { 1389 // Evaluate each function and store the result in each target's RetVal 1390 // field. 1391 for (VirtualCallTarget &Target : TargetsForSlot) { 1392 if (Target.Fn->arg_size() != Args.size() + 1) 1393 return false; 1394 1395 Evaluator Eval(M.getDataLayout(), nullptr); 1396 SmallVector<Constant *, 2> EvalArgs; 1397 EvalArgs.push_back( 1398 Constant::getNullValue(Target.Fn->getFunctionType()->getParamType(0))); 1399 for (unsigned I = 0; I != Args.size(); ++I) { 1400 auto *ArgTy = dyn_cast<IntegerType>( 1401 Target.Fn->getFunctionType()->getParamType(I + 1)); 1402 if (!ArgTy) 1403 return false; 1404 EvalArgs.push_back(ConstantInt::get(ArgTy, Args[I])); 1405 } 1406 1407 Constant *RetVal; 1408 if (!Eval.EvaluateFunction(Target.Fn, RetVal, EvalArgs) || 1409 !isa<ConstantInt>(RetVal)) 1410 return false; 1411 Target.RetVal = cast<ConstantInt>(RetVal)->getZExtValue(); 1412 } 1413 return true; 1414 } 1415 1416 void DevirtModule::applyUniformRetValOpt(CallSiteInfo &CSInfo, StringRef FnName, 1417 uint64_t TheRetVal) { 1418 for (auto Call : CSInfo.CallSites) { 1419 if (!OptimizedCalls.insert(&Call.CB).second) 1420 continue; 1421 Call.replaceAndErase( 1422 "uniform-ret-val", FnName, RemarksEnabled, OREGetter, 1423 ConstantInt::get(cast<IntegerType>(Call.CB.getType()), TheRetVal)); 1424 } 1425 CSInfo.markDevirt(); 1426 } 1427 1428 bool DevirtModule::tryUniformRetValOpt( 1429 MutableArrayRef<VirtualCallTarget> TargetsForSlot, CallSiteInfo &CSInfo, 1430 WholeProgramDevirtResolution::ByArg *Res) { 1431 // Uniform return value optimization. If all functions return the same 1432 // constant, replace all calls with that constant. 1433 uint64_t TheRetVal = TargetsForSlot[0].RetVal; 1434 for (const VirtualCallTarget &Target : TargetsForSlot) 1435 if (Target.RetVal != TheRetVal) 1436 return false; 1437 1438 if (CSInfo.isExported()) { 1439 Res->TheKind = WholeProgramDevirtResolution::ByArg::UniformRetVal; 1440 Res->Info = TheRetVal; 1441 } 1442 1443 applyUniformRetValOpt(CSInfo, TargetsForSlot[0].Fn->getName(), TheRetVal); 1444 if (RemarksEnabled) 1445 for (auto &&Target : TargetsForSlot) 1446 Target.WasDevirt = true; 1447 return true; 1448 } 1449 1450 std::string DevirtModule::getGlobalName(VTableSlot Slot, 1451 ArrayRef<uint64_t> Args, 1452 StringRef Name) { 1453 std::string FullName = "__typeid_"; 1454 raw_string_ostream OS(FullName); 1455 OS << cast<MDString>(Slot.TypeID)->getString() << '_' << Slot.ByteOffset; 1456 for (uint64_t Arg : Args) 1457 OS << '_' << Arg; 1458 OS << '_' << Name; 1459 return OS.str(); 1460 } 1461 1462 bool DevirtModule::shouldExportConstantsAsAbsoluteSymbols() { 1463 Triple T(M.getTargetTriple()); 1464 return T.isX86() && T.getObjectFormat() == Triple::ELF; 1465 } 1466 1467 void DevirtModule::exportGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args, 1468 StringRef Name, Constant *C) { 1469 GlobalAlias *GA = GlobalAlias::create(Int8Ty, 0, GlobalValue::ExternalLinkage, 1470 getGlobalName(Slot, Args, Name), C, &M); 1471 GA->setVisibility(GlobalValue::HiddenVisibility); 1472 } 1473 1474 void DevirtModule::exportConstant(VTableSlot Slot, ArrayRef<uint64_t> Args, 1475 StringRef Name, uint32_t Const, 1476 uint32_t &Storage) { 1477 if (shouldExportConstantsAsAbsoluteSymbols()) { 1478 exportGlobal( 1479 Slot, Args, Name, 1480 ConstantExpr::getIntToPtr(ConstantInt::get(Int32Ty, Const), Int8PtrTy)); 1481 return; 1482 } 1483 1484 Storage = Const; 1485 } 1486 1487 Constant *DevirtModule::importGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args, 1488 StringRef Name) { 1489 Constant *C = 1490 M.getOrInsertGlobal(getGlobalName(Slot, Args, Name), Int8Arr0Ty); 1491 auto *GV = dyn_cast<GlobalVariable>(C); 1492 if (GV) 1493 GV->setVisibility(GlobalValue::HiddenVisibility); 1494 return C; 1495 } 1496 1497 Constant *DevirtModule::importConstant(VTableSlot Slot, ArrayRef<uint64_t> Args, 1498 StringRef Name, IntegerType *IntTy, 1499 uint32_t Storage) { 1500 if (!shouldExportConstantsAsAbsoluteSymbols()) 1501 return ConstantInt::get(IntTy, Storage); 1502 1503 Constant *C = importGlobal(Slot, Args, Name); 1504 auto *GV = cast<GlobalVariable>(C->stripPointerCasts()); 1505 C = ConstantExpr::getPtrToInt(C, IntTy); 1506 1507 // We only need to set metadata if the global is newly created, in which 1508 // case it would not have hidden visibility. 1509 if (GV->hasMetadata(LLVMContext::MD_absolute_symbol)) 1510 return C; 1511 1512 auto SetAbsRange = [&](uint64_t Min, uint64_t Max) { 1513 auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Min)); 1514 auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Max)); 1515 GV->setMetadata(LLVMContext::MD_absolute_symbol, 1516 MDNode::get(M.getContext(), {MinC, MaxC})); 1517 }; 1518 unsigned AbsWidth = IntTy->getBitWidth(); 1519 if (AbsWidth == IntPtrTy->getBitWidth()) 1520 SetAbsRange(~0ull, ~0ull); // Full set. 1521 else 1522 SetAbsRange(0, 1ull << AbsWidth); 1523 return C; 1524 } 1525 1526 void DevirtModule::applyUniqueRetValOpt(CallSiteInfo &CSInfo, StringRef FnName, 1527 bool IsOne, 1528 Constant *UniqueMemberAddr) { 1529 for (auto &&Call : CSInfo.CallSites) { 1530 if (!OptimizedCalls.insert(&Call.CB).second) 1531 continue; 1532 IRBuilder<> B(&Call.CB); 1533 Value *Cmp = 1534 B.CreateICmp(IsOne ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, Call.VTable, 1535 B.CreateBitCast(UniqueMemberAddr, Call.VTable->getType())); 1536 Cmp = B.CreateZExt(Cmp, Call.CB.getType()); 1537 Call.replaceAndErase("unique-ret-val", FnName, RemarksEnabled, OREGetter, 1538 Cmp); 1539 } 1540 CSInfo.markDevirt(); 1541 } 1542 1543 Constant *DevirtModule::getMemberAddr(const TypeMemberInfo *M) { 1544 Constant *C = ConstantExpr::getBitCast(M->Bits->GV, Int8PtrTy); 1545 return ConstantExpr::getGetElementPtr(Int8Ty, C, 1546 ConstantInt::get(Int64Ty, M->Offset)); 1547 } 1548 1549 bool DevirtModule::tryUniqueRetValOpt( 1550 unsigned BitWidth, MutableArrayRef<VirtualCallTarget> TargetsForSlot, 1551 CallSiteInfo &CSInfo, WholeProgramDevirtResolution::ByArg *Res, 1552 VTableSlot Slot, ArrayRef<uint64_t> Args) { 1553 // IsOne controls whether we look for a 0 or a 1. 1554 auto tryUniqueRetValOptFor = [&](bool IsOne) { 1555 const TypeMemberInfo *UniqueMember = nullptr; 1556 for (const VirtualCallTarget &Target : TargetsForSlot) { 1557 if (Target.RetVal == (IsOne ? 1 : 0)) { 1558 if (UniqueMember) 1559 return false; 1560 UniqueMember = Target.TM; 1561 } 1562 } 1563 1564 // We should have found a unique member or bailed out by now. We already 1565 // checked for a uniform return value in tryUniformRetValOpt. 1566 assert(UniqueMember); 1567 1568 Constant *UniqueMemberAddr = getMemberAddr(UniqueMember); 1569 if (CSInfo.isExported()) { 1570 Res->TheKind = WholeProgramDevirtResolution::ByArg::UniqueRetVal; 1571 Res->Info = IsOne; 1572 1573 exportGlobal(Slot, Args, "unique_member", UniqueMemberAddr); 1574 } 1575 1576 // Replace each call with the comparison. 1577 applyUniqueRetValOpt(CSInfo, TargetsForSlot[0].Fn->getName(), IsOne, 1578 UniqueMemberAddr); 1579 1580 // Update devirtualization statistics for targets. 1581 if (RemarksEnabled) 1582 for (auto &&Target : TargetsForSlot) 1583 Target.WasDevirt = true; 1584 1585 return true; 1586 }; 1587 1588 if (BitWidth == 1) { 1589 if (tryUniqueRetValOptFor(true)) 1590 return true; 1591 if (tryUniqueRetValOptFor(false)) 1592 return true; 1593 } 1594 return false; 1595 } 1596 1597 void DevirtModule::applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName, 1598 Constant *Byte, Constant *Bit) { 1599 for (auto Call : CSInfo.CallSites) { 1600 if (!OptimizedCalls.insert(&Call.CB).second) 1601 continue; 1602 auto *RetType = cast<IntegerType>(Call.CB.getType()); 1603 IRBuilder<> B(&Call.CB); 1604 Value *Addr = 1605 B.CreateGEP(Int8Ty, B.CreateBitCast(Call.VTable, Int8PtrTy), Byte); 1606 if (RetType->getBitWidth() == 1) { 1607 Value *Bits = B.CreateLoad(Int8Ty, Addr); 1608 Value *BitsAndBit = B.CreateAnd(Bits, Bit); 1609 auto IsBitSet = B.CreateICmpNE(BitsAndBit, ConstantInt::get(Int8Ty, 0)); 1610 Call.replaceAndErase("virtual-const-prop-1-bit", FnName, RemarksEnabled, 1611 OREGetter, IsBitSet); 1612 } else { 1613 Value *ValAddr = B.CreateBitCast(Addr, RetType->getPointerTo()); 1614 Value *Val = B.CreateLoad(RetType, ValAddr); 1615 Call.replaceAndErase("virtual-const-prop", FnName, RemarksEnabled, 1616 OREGetter, Val); 1617 } 1618 } 1619 CSInfo.markDevirt(); 1620 } 1621 1622 bool DevirtModule::tryVirtualConstProp( 1623 MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo, 1624 WholeProgramDevirtResolution *Res, VTableSlot Slot) { 1625 // This only works if the function returns an integer. 1626 auto RetType = dyn_cast<IntegerType>(TargetsForSlot[0].Fn->getReturnType()); 1627 if (!RetType) 1628 return false; 1629 unsigned BitWidth = RetType->getBitWidth(); 1630 if (BitWidth > 64) 1631 return false; 1632 1633 // Make sure that each function is defined, does not access memory, takes at 1634 // least one argument, does not use its first argument (which we assume is 1635 // 'this'), and has the same return type. 1636 // 1637 // Note that we test whether this copy of the function is readnone, rather 1638 // than testing function attributes, which must hold for any copy of the 1639 // function, even a less optimized version substituted at link time. This is 1640 // sound because the virtual constant propagation optimizations effectively 1641 // inline all implementations of the virtual function into each call site, 1642 // rather than using function attributes to perform local optimization. 1643 for (VirtualCallTarget &Target : TargetsForSlot) { 1644 if (Target.Fn->isDeclaration() || 1645 computeFunctionBodyMemoryAccess(*Target.Fn, AARGetter(*Target.Fn)) != 1646 MAK_ReadNone || 1647 Target.Fn->arg_empty() || !Target.Fn->arg_begin()->use_empty() || 1648 Target.Fn->getReturnType() != RetType) 1649 return false; 1650 } 1651 1652 for (auto &&CSByConstantArg : SlotInfo.ConstCSInfo) { 1653 if (!tryEvaluateFunctionsWithArgs(TargetsForSlot, CSByConstantArg.first)) 1654 continue; 1655 1656 WholeProgramDevirtResolution::ByArg *ResByArg = nullptr; 1657 if (Res) 1658 ResByArg = &Res->ResByArg[CSByConstantArg.first]; 1659 1660 if (tryUniformRetValOpt(TargetsForSlot, CSByConstantArg.second, ResByArg)) 1661 continue; 1662 1663 if (tryUniqueRetValOpt(BitWidth, TargetsForSlot, CSByConstantArg.second, 1664 ResByArg, Slot, CSByConstantArg.first)) 1665 continue; 1666 1667 // Find an allocation offset in bits in all vtables associated with the 1668 // type. 1669 uint64_t AllocBefore = 1670 findLowestOffset(TargetsForSlot, /*IsAfter=*/false, BitWidth); 1671 uint64_t AllocAfter = 1672 findLowestOffset(TargetsForSlot, /*IsAfter=*/true, BitWidth); 1673 1674 // Calculate the total amount of padding needed to store a value at both 1675 // ends of the object. 1676 uint64_t TotalPaddingBefore = 0, TotalPaddingAfter = 0; 1677 for (auto &&Target : TargetsForSlot) { 1678 TotalPaddingBefore += std::max<int64_t>( 1679 (AllocBefore + 7) / 8 - Target.allocatedBeforeBytes() - 1, 0); 1680 TotalPaddingAfter += std::max<int64_t>( 1681 (AllocAfter + 7) / 8 - Target.allocatedAfterBytes() - 1, 0); 1682 } 1683 1684 // If the amount of padding is too large, give up. 1685 // FIXME: do something smarter here. 1686 if (std::min(TotalPaddingBefore, TotalPaddingAfter) > 128) 1687 continue; 1688 1689 // Calculate the offset to the value as a (possibly negative) byte offset 1690 // and (if applicable) a bit offset, and store the values in the targets. 1691 int64_t OffsetByte; 1692 uint64_t OffsetBit; 1693 if (TotalPaddingBefore <= TotalPaddingAfter) 1694 setBeforeReturnValues(TargetsForSlot, AllocBefore, BitWidth, OffsetByte, 1695 OffsetBit); 1696 else 1697 setAfterReturnValues(TargetsForSlot, AllocAfter, BitWidth, OffsetByte, 1698 OffsetBit); 1699 1700 if (RemarksEnabled) 1701 for (auto &&Target : TargetsForSlot) 1702 Target.WasDevirt = true; 1703 1704 1705 if (CSByConstantArg.second.isExported()) { 1706 ResByArg->TheKind = WholeProgramDevirtResolution::ByArg::VirtualConstProp; 1707 exportConstant(Slot, CSByConstantArg.first, "byte", OffsetByte, 1708 ResByArg->Byte); 1709 exportConstant(Slot, CSByConstantArg.first, "bit", 1ULL << OffsetBit, 1710 ResByArg->Bit); 1711 } 1712 1713 // Rewrite each call to a load from OffsetByte/OffsetBit. 1714 Constant *ByteConst = ConstantInt::get(Int32Ty, OffsetByte); 1715 Constant *BitConst = ConstantInt::get(Int8Ty, 1ULL << OffsetBit); 1716 applyVirtualConstProp(CSByConstantArg.second, 1717 TargetsForSlot[0].Fn->getName(), ByteConst, BitConst); 1718 } 1719 return true; 1720 } 1721 1722 void DevirtModule::rebuildGlobal(VTableBits &B) { 1723 if (B.Before.Bytes.empty() && B.After.Bytes.empty()) 1724 return; 1725 1726 // Align the before byte array to the global's minimum alignment so that we 1727 // don't break any alignment requirements on the global. 1728 Align Alignment = M.getDataLayout().getValueOrABITypeAlignment( 1729 B.GV->getAlign(), B.GV->getValueType()); 1730 B.Before.Bytes.resize(alignTo(B.Before.Bytes.size(), Alignment)); 1731 1732 // Before was stored in reverse order; flip it now. 1733 for (size_t I = 0, Size = B.Before.Bytes.size(); I != Size / 2; ++I) 1734 std::swap(B.Before.Bytes[I], B.Before.Bytes[Size - 1 - I]); 1735 1736 // Build an anonymous global containing the before bytes, followed by the 1737 // original initializer, followed by the after bytes. 1738 auto NewInit = ConstantStruct::getAnon( 1739 {ConstantDataArray::get(M.getContext(), B.Before.Bytes), 1740 B.GV->getInitializer(), 1741 ConstantDataArray::get(M.getContext(), B.After.Bytes)}); 1742 auto NewGV = 1743 new GlobalVariable(M, NewInit->getType(), B.GV->isConstant(), 1744 GlobalVariable::PrivateLinkage, NewInit, "", B.GV); 1745 NewGV->setSection(B.GV->getSection()); 1746 NewGV->setComdat(B.GV->getComdat()); 1747 NewGV->setAlignment(MaybeAlign(B.GV->getAlignment())); 1748 1749 // Copy the original vtable's metadata to the anonymous global, adjusting 1750 // offsets as required. 1751 NewGV->copyMetadata(B.GV, B.Before.Bytes.size()); 1752 1753 // Build an alias named after the original global, pointing at the second 1754 // element (the original initializer). 1755 auto Alias = GlobalAlias::create( 1756 B.GV->getInitializer()->getType(), 0, B.GV->getLinkage(), "", 1757 ConstantExpr::getGetElementPtr( 1758 NewInit->getType(), NewGV, 1759 ArrayRef<Constant *>{ConstantInt::get(Int32Ty, 0), 1760 ConstantInt::get(Int32Ty, 1)}), 1761 &M); 1762 Alias->setVisibility(B.GV->getVisibility()); 1763 Alias->takeName(B.GV); 1764 1765 B.GV->replaceAllUsesWith(Alias); 1766 B.GV->eraseFromParent(); 1767 } 1768 1769 bool DevirtModule::areRemarksEnabled() { 1770 const auto &FL = M.getFunctionList(); 1771 for (const Function &Fn : FL) { 1772 const auto &BBL = Fn.getBasicBlockList(); 1773 if (BBL.empty()) 1774 continue; 1775 auto DI = OptimizationRemark(DEBUG_TYPE, "", DebugLoc(), &BBL.front()); 1776 return DI.isEnabled(); 1777 } 1778 return false; 1779 } 1780 1781 void DevirtModule::scanTypeTestUsers( 1782 Function *TypeTestFunc, 1783 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap) { 1784 // Find all virtual calls via a virtual table pointer %p under an assumption 1785 // of the form llvm.assume(llvm.type.test(%p, %md)). This indicates that %p 1786 // points to a member of the type identifier %md. Group calls by (type ID, 1787 // offset) pair (effectively the identity of the virtual function) and store 1788 // to CallSlots. 1789 for (auto I = TypeTestFunc->use_begin(), E = TypeTestFunc->use_end(); 1790 I != E;) { 1791 auto CI = dyn_cast<CallInst>(I->getUser()); 1792 ++I; 1793 if (!CI) 1794 continue; 1795 1796 // Search for virtual calls based on %p and add them to DevirtCalls. 1797 SmallVector<DevirtCallSite, 1> DevirtCalls; 1798 SmallVector<CallInst *, 1> Assumes; 1799 auto &DT = LookupDomTree(*CI->getFunction()); 1800 findDevirtualizableCallsForTypeTest(DevirtCalls, Assumes, CI, DT); 1801 1802 Metadata *TypeId = 1803 cast<MetadataAsValue>(CI->getArgOperand(1))->getMetadata(); 1804 // If we found any, add them to CallSlots. 1805 if (!Assumes.empty()) { 1806 Value *Ptr = CI->getArgOperand(0)->stripPointerCasts(); 1807 for (DevirtCallSite Call : DevirtCalls) 1808 CallSlots[{TypeId, Call.Offset}].addCallSite(Ptr, Call.CB, nullptr); 1809 } 1810 1811 auto RemoveTypeTestAssumes = [&]() { 1812 // We no longer need the assumes or the type test. 1813 for (auto Assume : Assumes) 1814 Assume->eraseFromParent(); 1815 // We can't use RecursivelyDeleteTriviallyDeadInstructions here because we 1816 // may use the vtable argument later. 1817 if (CI->use_empty()) 1818 CI->eraseFromParent(); 1819 }; 1820 1821 // At this point we could remove all type test assume sequences, as they 1822 // were originally inserted for WPD. However, we can keep these in the 1823 // code stream for later analysis (e.g. to help drive more efficient ICP 1824 // sequences). They will eventually be removed by a second LowerTypeTests 1825 // invocation that cleans them up. In order to do this correctly, the first 1826 // LowerTypeTests invocation needs to know that they have "Unknown" type 1827 // test resolution, so that they aren't treated as Unsat and lowered to 1828 // False, which will break any uses on assumes. Below we remove any type 1829 // test assumes that will not be treated as Unknown by LTT. 1830 1831 // The type test assumes will be treated by LTT as Unsat if the type id is 1832 // not used on a global (in which case it has no entry in the TypeIdMap). 1833 if (!TypeIdMap.count(TypeId)) 1834 RemoveTypeTestAssumes(); 1835 1836 // For ThinLTO importing, we need to remove the type test assumes if this is 1837 // an MDString type id without a corresponding TypeIdSummary. Any 1838 // non-MDString type ids are ignored and treated as Unknown by LTT, so their 1839 // type test assumes can be kept. If the MDString type id is missing a 1840 // TypeIdSummary (e.g. because there was no use on a vcall, preventing the 1841 // exporting phase of WPD from analyzing it), then it would be treated as 1842 // Unsat by LTT and we need to remove its type test assumes here. If not 1843 // used on a vcall we don't need them for later optimization use in any 1844 // case. 1845 else if (ImportSummary && isa<MDString>(TypeId)) { 1846 const TypeIdSummary *TidSummary = 1847 ImportSummary->getTypeIdSummary(cast<MDString>(TypeId)->getString()); 1848 if (!TidSummary) 1849 RemoveTypeTestAssumes(); 1850 else 1851 // If one was created it should not be Unsat, because if we reached here 1852 // the type id was used on a global. 1853 assert(TidSummary->TTRes.TheKind != TypeTestResolution::Unsat); 1854 } 1855 } 1856 } 1857 1858 void DevirtModule::scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc) { 1859 Function *TypeTestFunc = Intrinsic::getDeclaration(&M, Intrinsic::type_test); 1860 1861 for (auto I = TypeCheckedLoadFunc->use_begin(), 1862 E = TypeCheckedLoadFunc->use_end(); 1863 I != E;) { 1864 auto CI = dyn_cast<CallInst>(I->getUser()); 1865 ++I; 1866 if (!CI) 1867 continue; 1868 1869 Value *Ptr = CI->getArgOperand(0); 1870 Value *Offset = CI->getArgOperand(1); 1871 Value *TypeIdValue = CI->getArgOperand(2); 1872 Metadata *TypeId = cast<MetadataAsValue>(TypeIdValue)->getMetadata(); 1873 1874 SmallVector<DevirtCallSite, 1> DevirtCalls; 1875 SmallVector<Instruction *, 1> LoadedPtrs; 1876 SmallVector<Instruction *, 1> Preds; 1877 bool HasNonCallUses = false; 1878 auto &DT = LookupDomTree(*CI->getFunction()); 1879 findDevirtualizableCallsForTypeCheckedLoad(DevirtCalls, LoadedPtrs, Preds, 1880 HasNonCallUses, CI, DT); 1881 1882 // Start by generating "pessimistic" code that explicitly loads the function 1883 // pointer from the vtable and performs the type check. If possible, we will 1884 // eliminate the load and the type check later. 1885 1886 // If possible, only generate the load at the point where it is used. 1887 // This helps avoid unnecessary spills. 1888 IRBuilder<> LoadB( 1889 (LoadedPtrs.size() == 1 && !HasNonCallUses) ? LoadedPtrs[0] : CI); 1890 Value *GEP = LoadB.CreateGEP(Int8Ty, Ptr, Offset); 1891 Value *GEPPtr = LoadB.CreateBitCast(GEP, PointerType::getUnqual(Int8PtrTy)); 1892 Value *LoadedValue = LoadB.CreateLoad(Int8PtrTy, GEPPtr); 1893 1894 for (Instruction *LoadedPtr : LoadedPtrs) { 1895 LoadedPtr->replaceAllUsesWith(LoadedValue); 1896 LoadedPtr->eraseFromParent(); 1897 } 1898 1899 // Likewise for the type test. 1900 IRBuilder<> CallB((Preds.size() == 1 && !HasNonCallUses) ? Preds[0] : CI); 1901 CallInst *TypeTestCall = CallB.CreateCall(TypeTestFunc, {Ptr, TypeIdValue}); 1902 1903 for (Instruction *Pred : Preds) { 1904 Pred->replaceAllUsesWith(TypeTestCall); 1905 Pred->eraseFromParent(); 1906 } 1907 1908 // We have already erased any extractvalue instructions that refer to the 1909 // intrinsic call, but the intrinsic may have other non-extractvalue uses 1910 // (although this is unlikely). In that case, explicitly build a pair and 1911 // RAUW it. 1912 if (!CI->use_empty()) { 1913 Value *Pair = UndefValue::get(CI->getType()); 1914 IRBuilder<> B(CI); 1915 Pair = B.CreateInsertValue(Pair, LoadedValue, {0}); 1916 Pair = B.CreateInsertValue(Pair, TypeTestCall, {1}); 1917 CI->replaceAllUsesWith(Pair); 1918 } 1919 1920 // The number of unsafe uses is initially the number of uses. 1921 auto &NumUnsafeUses = NumUnsafeUsesForTypeTest[TypeTestCall]; 1922 NumUnsafeUses = DevirtCalls.size(); 1923 1924 // If the function pointer has a non-call user, we cannot eliminate the type 1925 // check, as one of those users may eventually call the pointer. Increment 1926 // the unsafe use count to make sure it cannot reach zero. 1927 if (HasNonCallUses) 1928 ++NumUnsafeUses; 1929 for (DevirtCallSite Call : DevirtCalls) { 1930 CallSlots[{TypeId, Call.Offset}].addCallSite(Ptr, Call.CB, 1931 &NumUnsafeUses); 1932 } 1933 1934 CI->eraseFromParent(); 1935 } 1936 } 1937 1938 void DevirtModule::importResolution(VTableSlot Slot, VTableSlotInfo &SlotInfo) { 1939 auto *TypeId = dyn_cast<MDString>(Slot.TypeID); 1940 if (!TypeId) 1941 return; 1942 const TypeIdSummary *TidSummary = 1943 ImportSummary->getTypeIdSummary(TypeId->getString()); 1944 if (!TidSummary) 1945 return; 1946 auto ResI = TidSummary->WPDRes.find(Slot.ByteOffset); 1947 if (ResI == TidSummary->WPDRes.end()) 1948 return; 1949 const WholeProgramDevirtResolution &Res = ResI->second; 1950 1951 if (Res.TheKind == WholeProgramDevirtResolution::SingleImpl) { 1952 assert(!Res.SingleImplName.empty()); 1953 // The type of the function in the declaration is irrelevant because every 1954 // call site will cast it to the correct type. 1955 Constant *SingleImpl = 1956 cast<Constant>(M.getOrInsertFunction(Res.SingleImplName, 1957 Type::getVoidTy(M.getContext())) 1958 .getCallee()); 1959 1960 // This is the import phase so we should not be exporting anything. 1961 bool IsExported = false; 1962 applySingleImplDevirt(SlotInfo, SingleImpl, IsExported); 1963 assert(!IsExported); 1964 } 1965 1966 for (auto &CSByConstantArg : SlotInfo.ConstCSInfo) { 1967 auto I = Res.ResByArg.find(CSByConstantArg.first); 1968 if (I == Res.ResByArg.end()) 1969 continue; 1970 auto &ResByArg = I->second; 1971 // FIXME: We should figure out what to do about the "function name" argument 1972 // to the apply* functions, as the function names are unavailable during the 1973 // importing phase. For now we just pass the empty string. This does not 1974 // impact correctness because the function names are just used for remarks. 1975 switch (ResByArg.TheKind) { 1976 case WholeProgramDevirtResolution::ByArg::UniformRetVal: 1977 applyUniformRetValOpt(CSByConstantArg.second, "", ResByArg.Info); 1978 break; 1979 case WholeProgramDevirtResolution::ByArg::UniqueRetVal: { 1980 Constant *UniqueMemberAddr = 1981 importGlobal(Slot, CSByConstantArg.first, "unique_member"); 1982 applyUniqueRetValOpt(CSByConstantArg.second, "", ResByArg.Info, 1983 UniqueMemberAddr); 1984 break; 1985 } 1986 case WholeProgramDevirtResolution::ByArg::VirtualConstProp: { 1987 Constant *Byte = importConstant(Slot, CSByConstantArg.first, "byte", 1988 Int32Ty, ResByArg.Byte); 1989 Constant *Bit = importConstant(Slot, CSByConstantArg.first, "bit", Int8Ty, 1990 ResByArg.Bit); 1991 applyVirtualConstProp(CSByConstantArg.second, "", Byte, Bit); 1992 break; 1993 } 1994 default: 1995 break; 1996 } 1997 } 1998 1999 if (Res.TheKind == WholeProgramDevirtResolution::BranchFunnel) { 2000 // The type of the function is irrelevant, because it's bitcast at calls 2001 // anyhow. 2002 Constant *JT = cast<Constant>( 2003 M.getOrInsertFunction(getGlobalName(Slot, {}, "branch_funnel"), 2004 Type::getVoidTy(M.getContext())) 2005 .getCallee()); 2006 bool IsExported = false; 2007 applyICallBranchFunnel(SlotInfo, JT, IsExported); 2008 assert(!IsExported); 2009 } 2010 } 2011 2012 void DevirtModule::removeRedundantTypeTests() { 2013 auto True = ConstantInt::getTrue(M.getContext()); 2014 for (auto &&U : NumUnsafeUsesForTypeTest) { 2015 if (U.second == 0) { 2016 U.first->replaceAllUsesWith(True); 2017 U.first->eraseFromParent(); 2018 } 2019 } 2020 } 2021 2022 bool DevirtModule::run() { 2023 // If only some of the modules were split, we cannot correctly perform 2024 // this transformation. We already checked for the presense of type tests 2025 // with partially split modules during the thin link, and would have emitted 2026 // an error if any were found, so here we can simply return. 2027 if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) || 2028 (ImportSummary && ImportSummary->partiallySplitLTOUnits())) 2029 return false; 2030 2031 Function *TypeTestFunc = 2032 M.getFunction(Intrinsic::getName(Intrinsic::type_test)); 2033 Function *TypeCheckedLoadFunc = 2034 M.getFunction(Intrinsic::getName(Intrinsic::type_checked_load)); 2035 Function *AssumeFunc = M.getFunction(Intrinsic::getName(Intrinsic::assume)); 2036 2037 // Normally if there are no users of the devirtualization intrinsics in the 2038 // module, this pass has nothing to do. But if we are exporting, we also need 2039 // to handle any users that appear only in the function summaries. 2040 if (!ExportSummary && 2041 (!TypeTestFunc || TypeTestFunc->use_empty() || !AssumeFunc || 2042 AssumeFunc->use_empty()) && 2043 (!TypeCheckedLoadFunc || TypeCheckedLoadFunc->use_empty())) 2044 return false; 2045 2046 // Rebuild type metadata into a map for easy lookup. 2047 std::vector<VTableBits> Bits; 2048 DenseMap<Metadata *, std::set<TypeMemberInfo>> TypeIdMap; 2049 buildTypeIdentifierMap(Bits, TypeIdMap); 2050 2051 if (TypeTestFunc && AssumeFunc) 2052 scanTypeTestUsers(TypeTestFunc, TypeIdMap); 2053 2054 if (TypeCheckedLoadFunc) 2055 scanTypeCheckedLoadUsers(TypeCheckedLoadFunc); 2056 2057 if (ImportSummary) { 2058 for (auto &S : CallSlots) 2059 importResolution(S.first, S.second); 2060 2061 removeRedundantTypeTests(); 2062 2063 // We have lowered or deleted the type instrinsics, so we will no 2064 // longer have enough information to reason about the liveness of virtual 2065 // function pointers in GlobalDCE. 2066 for (GlobalVariable &GV : M.globals()) 2067 GV.eraseMetadata(LLVMContext::MD_vcall_visibility); 2068 2069 // The rest of the code is only necessary when exporting or during regular 2070 // LTO, so we are done. 2071 return true; 2072 } 2073 2074 if (TypeIdMap.empty()) 2075 return true; 2076 2077 // Collect information from summary about which calls to try to devirtualize. 2078 if (ExportSummary) { 2079 DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID; 2080 for (auto &P : TypeIdMap) { 2081 if (auto *TypeId = dyn_cast<MDString>(P.first)) 2082 MetadataByGUID[GlobalValue::getGUID(TypeId->getString())].push_back( 2083 TypeId); 2084 } 2085 2086 for (auto &P : *ExportSummary) { 2087 for (auto &S : P.second.SummaryList) { 2088 auto *FS = dyn_cast<FunctionSummary>(S.get()); 2089 if (!FS) 2090 continue; 2091 // FIXME: Only add live functions. 2092 for (FunctionSummary::VFuncId VF : FS->type_test_assume_vcalls()) { 2093 for (Metadata *MD : MetadataByGUID[VF.GUID]) { 2094 CallSlots[{MD, VF.Offset}].CSInfo.addSummaryTypeTestAssumeUser(FS); 2095 } 2096 } 2097 for (FunctionSummary::VFuncId VF : FS->type_checked_load_vcalls()) { 2098 for (Metadata *MD : MetadataByGUID[VF.GUID]) { 2099 CallSlots[{MD, VF.Offset}].CSInfo.addSummaryTypeCheckedLoadUser(FS); 2100 } 2101 } 2102 for (const FunctionSummary::ConstVCall &VC : 2103 FS->type_test_assume_const_vcalls()) { 2104 for (Metadata *MD : MetadataByGUID[VC.VFunc.GUID]) { 2105 CallSlots[{MD, VC.VFunc.Offset}] 2106 .ConstCSInfo[VC.Args] 2107 .addSummaryTypeTestAssumeUser(FS); 2108 } 2109 } 2110 for (const FunctionSummary::ConstVCall &VC : 2111 FS->type_checked_load_const_vcalls()) { 2112 for (Metadata *MD : MetadataByGUID[VC.VFunc.GUID]) { 2113 CallSlots[{MD, VC.VFunc.Offset}] 2114 .ConstCSInfo[VC.Args] 2115 .addSummaryTypeCheckedLoadUser(FS); 2116 } 2117 } 2118 } 2119 } 2120 } 2121 2122 // For each (type, offset) pair: 2123 bool DidVirtualConstProp = false; 2124 std::map<std::string, Function*> DevirtTargets; 2125 for (auto &S : CallSlots) { 2126 // Search each of the members of the type identifier for the virtual 2127 // function implementation at offset S.first.ByteOffset, and add to 2128 // TargetsForSlot. 2129 std::vector<VirtualCallTarget> TargetsForSlot; 2130 WholeProgramDevirtResolution *Res = nullptr; 2131 const std::set<TypeMemberInfo> &TypeMemberInfos = TypeIdMap[S.first.TypeID]; 2132 if (ExportSummary && isa<MDString>(S.first.TypeID) && 2133 TypeMemberInfos.size()) 2134 // For any type id used on a global's type metadata, create the type id 2135 // summary resolution regardless of whether we can devirtualize, so that 2136 // lower type tests knows the type id is not Unsat. If it was not used on 2137 // a global's type metadata, the TypeIdMap entry set will be empty, and 2138 // we don't want to create an entry (with the default Unknown type 2139 // resolution), which can prevent detection of the Unsat. 2140 Res = &ExportSummary 2141 ->getOrInsertTypeIdSummary( 2142 cast<MDString>(S.first.TypeID)->getString()) 2143 .WPDRes[S.first.ByteOffset]; 2144 if (tryFindVirtualCallTargets(TargetsForSlot, TypeMemberInfos, 2145 S.first.ByteOffset)) { 2146 2147 if (!trySingleImplDevirt(ExportSummary, TargetsForSlot, S.second, Res)) { 2148 DidVirtualConstProp |= 2149 tryVirtualConstProp(TargetsForSlot, S.second, Res, S.first); 2150 2151 tryICallBranchFunnel(TargetsForSlot, S.second, Res, S.first); 2152 } 2153 2154 // Collect functions devirtualized at least for one call site for stats. 2155 if (RemarksEnabled) 2156 for (const auto &T : TargetsForSlot) 2157 if (T.WasDevirt) 2158 DevirtTargets[std::string(T.Fn->getName())] = T.Fn; 2159 } 2160 2161 // CFI-specific: if we are exporting and any llvm.type.checked.load 2162 // intrinsics were *not* devirtualized, we need to add the resulting 2163 // llvm.type.test intrinsics to the function summaries so that the 2164 // LowerTypeTests pass will export them. 2165 if (ExportSummary && isa<MDString>(S.first.TypeID)) { 2166 auto GUID = 2167 GlobalValue::getGUID(cast<MDString>(S.first.TypeID)->getString()); 2168 for (auto FS : S.second.CSInfo.SummaryTypeCheckedLoadUsers) 2169 FS->addTypeTest(GUID); 2170 for (auto &CCS : S.second.ConstCSInfo) 2171 for (auto FS : CCS.second.SummaryTypeCheckedLoadUsers) 2172 FS->addTypeTest(GUID); 2173 } 2174 } 2175 2176 if (RemarksEnabled) { 2177 // Generate remarks for each devirtualized function. 2178 for (const auto &DT : DevirtTargets) { 2179 Function *F = DT.second; 2180 2181 using namespace ore; 2182 OREGetter(F).emit(OptimizationRemark(DEBUG_TYPE, "Devirtualized", F) 2183 << "devirtualized " 2184 << NV("FunctionName", DT.first)); 2185 } 2186 } 2187 2188 removeRedundantTypeTests(); 2189 2190 // Rebuild each global we touched as part of virtual constant propagation to 2191 // include the before and after bytes. 2192 if (DidVirtualConstProp) 2193 for (VTableBits &B : Bits) 2194 rebuildGlobal(B); 2195 2196 // We have lowered or deleted the type instrinsics, so we will no 2197 // longer have enough information to reason about the liveness of virtual 2198 // function pointers in GlobalDCE. 2199 for (GlobalVariable &GV : M.globals()) 2200 GV.eraseMetadata(LLVMContext::MD_vcall_visibility); 2201 2202 return true; 2203 } 2204 2205 void DevirtIndex::run() { 2206 if (ExportSummary.typeIdCompatibleVtableMap().empty()) 2207 return; 2208 2209 DenseMap<GlobalValue::GUID, std::vector<StringRef>> NameByGUID; 2210 for (auto &P : ExportSummary.typeIdCompatibleVtableMap()) { 2211 NameByGUID[GlobalValue::getGUID(P.first)].push_back(P.first); 2212 } 2213 2214 // Collect information from summary about which calls to try to devirtualize. 2215 for (auto &P : ExportSummary) { 2216 for (auto &S : P.second.SummaryList) { 2217 auto *FS = dyn_cast<FunctionSummary>(S.get()); 2218 if (!FS) 2219 continue; 2220 // FIXME: Only add live functions. 2221 for (FunctionSummary::VFuncId VF : FS->type_test_assume_vcalls()) { 2222 for (StringRef Name : NameByGUID[VF.GUID]) { 2223 CallSlots[{Name, VF.Offset}].CSInfo.addSummaryTypeTestAssumeUser(FS); 2224 } 2225 } 2226 for (FunctionSummary::VFuncId VF : FS->type_checked_load_vcalls()) { 2227 for (StringRef Name : NameByGUID[VF.GUID]) { 2228 CallSlots[{Name, VF.Offset}].CSInfo.addSummaryTypeCheckedLoadUser(FS); 2229 } 2230 } 2231 for (const FunctionSummary::ConstVCall &VC : 2232 FS->type_test_assume_const_vcalls()) { 2233 for (StringRef Name : NameByGUID[VC.VFunc.GUID]) { 2234 CallSlots[{Name, VC.VFunc.Offset}] 2235 .ConstCSInfo[VC.Args] 2236 .addSummaryTypeTestAssumeUser(FS); 2237 } 2238 } 2239 for (const FunctionSummary::ConstVCall &VC : 2240 FS->type_checked_load_const_vcalls()) { 2241 for (StringRef Name : NameByGUID[VC.VFunc.GUID]) { 2242 CallSlots[{Name, VC.VFunc.Offset}] 2243 .ConstCSInfo[VC.Args] 2244 .addSummaryTypeCheckedLoadUser(FS); 2245 } 2246 } 2247 } 2248 } 2249 2250 std::set<ValueInfo> DevirtTargets; 2251 // For each (type, offset) pair: 2252 for (auto &S : CallSlots) { 2253 // Search each of the members of the type identifier for the virtual 2254 // function implementation at offset S.first.ByteOffset, and add to 2255 // TargetsForSlot. 2256 std::vector<ValueInfo> TargetsForSlot; 2257 auto TidSummary = ExportSummary.getTypeIdCompatibleVtableSummary(S.first.TypeID); 2258 assert(TidSummary); 2259 // Create the type id summary resolution regardlness of whether we can 2260 // devirtualize, so that lower type tests knows the type id is used on 2261 // a global and not Unsat. 2262 WholeProgramDevirtResolution *Res = 2263 &ExportSummary.getOrInsertTypeIdSummary(S.first.TypeID) 2264 .WPDRes[S.first.ByteOffset]; 2265 if (tryFindVirtualCallTargets(TargetsForSlot, *TidSummary, 2266 S.first.ByteOffset)) { 2267 2268 if (!trySingleImplDevirt(TargetsForSlot, S.first, S.second, Res, 2269 DevirtTargets)) 2270 continue; 2271 } 2272 } 2273 2274 // Optionally have the thin link print message for each devirtualized 2275 // function. 2276 if (PrintSummaryDevirt) 2277 for (const auto &DT : DevirtTargets) 2278 errs() << "Devirtualized call to " << DT << "\n"; 2279 } 2280