1 //===- FunctionSpecialization.cpp - Function Specialization ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This specialises functions with constant parameters. Constant parameters 10 // like function pointers and constant globals are propagated to the callee by 11 // specializing the function. The main benefit of this pass at the moment is 12 // that indirect calls are transformed into direct calls, which provides inline 13 // opportunities that the inliner would not have been able to achieve. That's 14 // why function specialisation is run before the inliner in the optimisation 15 // pipeline; that is by design. Otherwise, we would only benefit from constant 16 // passing, which is a valid use-case too, but hasn't been explored much in 17 // terms of performance uplifts, cost-model and compile-time impact. 18 // 19 // Current limitations: 20 // - It does not yet handle integer ranges. We do support "literal constants", 21 // but that's off by default under an option. 22 // - The cost-model could be further looked into (it mainly focuses on inlining 23 // benefits), 24 // 25 // Ideas: 26 // - With a function specialization attribute for arguments, we could have 27 // a direct way to steer function specialization, avoiding the cost-model, 28 // and thus control compile-times / code-size. 29 // 30 // Todos: 31 // - Specializing recursive functions relies on running the transformation a 32 // number of times, which is controlled by option 33 // `func-specialization-max-iters`. Thus, increasing this value and the 34 // number of iterations, will linearly increase the number of times recursive 35 // functions get specialized, see also the discussion in 36 // https://reviews.llvm.org/D106426 for details. Perhaps there is a 37 // compile-time friendlier way to control/limit the number of specialisations 38 // for recursive functions. 39 // - Don't transform the function if function specialization does not trigger; 40 // the SCCPSolver may make IR changes. 41 // 42 // References: 43 // - 2021 LLVM Dev Mtg “Introducing function specialisation, and can we enable 44 // it by default?”, https://www.youtube.com/watch?v=zJiCjeXgV5Q 45 // 46 //===----------------------------------------------------------------------===// 47 48 #include "llvm/Transforms/IPO/FunctionSpecialization.h" 49 #include "llvm/ADT/Statistic.h" 50 #include "llvm/Analysis/CodeMetrics.h" 51 #include "llvm/Analysis/InlineCost.h" 52 #include "llvm/Analysis/LoopInfo.h" 53 #include "llvm/Analysis/TargetTransformInfo.h" 54 #include "llvm/Analysis/ValueLattice.h" 55 #include "llvm/Analysis/ValueLatticeUtils.h" 56 #include "llvm/IR/IntrinsicInst.h" 57 #include "llvm/Transforms/Scalar/SCCP.h" 58 #include "llvm/Transforms/Utils/Cloning.h" 59 #include "llvm/Transforms/Utils/SCCPSolver.h" 60 #include "llvm/Transforms/Utils/SizeOpts.h" 61 #include <cmath> 62 63 using namespace llvm; 64 65 #define DEBUG_TYPE "function-specialization" 66 67 STATISTIC(NumFuncSpecialized, "Number of functions specialized"); 68 69 static cl::opt<bool> ForceFunctionSpecialization( 70 "force-function-specialization", cl::init(false), cl::Hidden, 71 cl::desc("Force function specialization for every call site with a " 72 "constant argument")); 73 74 static cl::opt<unsigned> MaxClonesThreshold( 75 "func-specialization-max-clones", cl::Hidden, 76 cl::desc("The maximum number of clones allowed for a single function " 77 "specialization"), 78 cl::init(3)); 79 80 static cl::opt<unsigned> SmallFunctionThreshold( 81 "func-specialization-size-threshold", cl::Hidden, 82 cl::desc("Don't specialize functions that have less than this theshold " 83 "number of instructions"), 84 cl::init(100)); 85 86 static cl::opt<unsigned> 87 AvgLoopIterationCount("func-specialization-avg-iters-cost", cl::Hidden, 88 cl::desc("Average loop iteration count cost"), 89 cl::init(10)); 90 91 static cl::opt<bool> SpecializeOnAddresses( 92 "func-specialization-on-address", cl::init(false), cl::Hidden, 93 cl::desc("Enable function specialization on the address of global values")); 94 95 // Disabled by default as it can significantly increase compilation times. 96 // 97 // https://llvm-compile-time-tracker.com 98 // https://github.com/nikic/llvm-compile-time-tracker 99 static cl::opt<bool> EnableSpecializationForLiteralConstant( 100 "function-specialization-for-literal-constant", cl::init(false), cl::Hidden, 101 cl::desc("Enable specialization of functions that take a literal constant " 102 "as an argument.")); 103 104 Constant *FunctionSpecializer::getPromotableAlloca(AllocaInst *Alloca, 105 CallInst *Call) { 106 Value *StoreValue = nullptr; 107 for (auto *User : Alloca->users()) { 108 // We can't use llvm::isAllocaPromotable() as that would fail because of 109 // the usage in the CallInst, which is what we check here. 110 if (User == Call) 111 continue; 112 if (auto *Bitcast = dyn_cast<BitCastInst>(User)) { 113 if (!Bitcast->hasOneUse() || *Bitcast->user_begin() != Call) 114 return nullptr; 115 continue; 116 } 117 118 if (auto *Store = dyn_cast<StoreInst>(User)) { 119 // This is a duplicate store, bail out. 120 if (StoreValue || Store->isVolatile()) 121 return nullptr; 122 StoreValue = Store->getValueOperand(); 123 continue; 124 } 125 // Bail if there is any other unknown usage. 126 return nullptr; 127 } 128 return getCandidateConstant(StoreValue); 129 } 130 131 // A constant stack value is an AllocaInst that has a single constant 132 // value stored to it. Return this constant if such an alloca stack value 133 // is a function argument. 134 Constant *FunctionSpecializer::getConstantStackValue(CallInst *Call, 135 Value *Val) { 136 if (!Val) 137 return nullptr; 138 Val = Val->stripPointerCasts(); 139 if (auto *ConstVal = dyn_cast<ConstantInt>(Val)) 140 return ConstVal; 141 auto *Alloca = dyn_cast<AllocaInst>(Val); 142 if (!Alloca || !Alloca->getAllocatedType()->isIntegerTy()) 143 return nullptr; 144 return getPromotableAlloca(Alloca, Call); 145 } 146 147 // To support specializing recursive functions, it is important to propagate 148 // constant arguments because after a first iteration of specialisation, a 149 // reduced example may look like this: 150 // 151 // define internal void @RecursiveFn(i32* arg1) { 152 // %temp = alloca i32, align 4 153 // store i32 2 i32* %temp, align 4 154 // call void @RecursiveFn.1(i32* nonnull %temp) 155 // ret void 156 // } 157 // 158 // Before a next iteration, we need to propagate the constant like so 159 // which allows further specialization in next iterations. 160 // 161 // @funcspec.arg = internal constant i32 2 162 // 163 // define internal void @someFunc(i32* arg1) { 164 // call void @otherFunc(i32* nonnull @funcspec.arg) 165 // ret void 166 // } 167 // 168 void FunctionSpecializer::promoteConstantStackValues() { 169 // Iterate over the argument tracked functions see if there 170 // are any new constant values for the call instruction via 171 // stack variables. 172 for (Function &F : M) { 173 if (!Solver.isArgumentTrackedFunction(&F)) 174 continue; 175 176 for (auto *User : F.users()) { 177 178 auto *Call = dyn_cast<CallInst>(User); 179 if (!Call) 180 continue; 181 182 if (!Solver.isBlockExecutable(Call->getParent())) 183 continue; 184 185 bool Changed = false; 186 for (const Use &U : Call->args()) { 187 unsigned Idx = Call->getArgOperandNo(&U); 188 Value *ArgOp = Call->getArgOperand(Idx); 189 Type *ArgOpType = ArgOp->getType(); 190 191 if (!Call->onlyReadsMemory(Idx) || !ArgOpType->isPointerTy()) 192 continue; 193 194 auto *ConstVal = getConstantStackValue(Call, ArgOp); 195 if (!ConstVal) 196 continue; 197 198 Value *GV = new GlobalVariable(M, ConstVal->getType(), true, 199 GlobalValue::InternalLinkage, ConstVal, 200 "funcspec.arg"); 201 if (ArgOpType != ConstVal->getType()) 202 GV = ConstantExpr::getBitCast(cast<Constant>(GV), ArgOpType); 203 204 Call->setArgOperand(Idx, GV); 205 Changed = true; 206 } 207 208 // Add the changed CallInst to Solver Worklist 209 if (Changed) 210 Solver.visitCall(*Call); 211 } 212 } 213 } 214 215 // ssa_copy intrinsics are introduced by the SCCP solver. These intrinsics 216 // interfere with the promoteConstantStackValues() optimization. 217 static void removeSSACopy(Function &F) { 218 for (BasicBlock &BB : F) { 219 for (Instruction &Inst : llvm::make_early_inc_range(BB)) { 220 auto *II = dyn_cast<IntrinsicInst>(&Inst); 221 if (!II) 222 continue; 223 if (II->getIntrinsicID() != Intrinsic::ssa_copy) 224 continue; 225 Inst.replaceAllUsesWith(II->getOperand(0)); 226 Inst.eraseFromParent(); 227 } 228 } 229 } 230 231 /// Remove any ssa_copy intrinsics that may have been introduced. 232 void FunctionSpecializer::cleanUpSSA() { 233 for (Function *F : SpecializedFuncs) 234 removeSSACopy(*F); 235 } 236 237 238 template <> struct llvm::DenseMapInfo<SpecSig> { 239 static inline SpecSig getEmptyKey() { return {~0U, {}}; } 240 241 static inline SpecSig getTombstoneKey() { return {~1U, {}}; } 242 243 static unsigned getHashValue(const SpecSig &S) { 244 return static_cast<unsigned>(hash_value(S)); 245 } 246 247 static bool isEqual(const SpecSig &LHS, const SpecSig &RHS) { 248 return LHS == RHS; 249 } 250 }; 251 252 /// Attempt to specialize functions in the module to enable constant 253 /// propagation across function boundaries. 254 /// 255 /// \returns true if at least one function is specialized. 256 bool FunctionSpecializer::run() { 257 // Find possible specializations for each function. 258 SpecMap SM; 259 SmallVector<Spec, 32> AllSpecs; 260 unsigned NumCandidates = 0; 261 for (Function &F : M) { 262 if (!isCandidateFunction(&F)) 263 continue; 264 265 auto Cost = getSpecializationCost(&F); 266 if (!Cost.isValid()) { 267 LLVM_DEBUG(dbgs() << "FnSpecialization: Invalid specialization cost for " 268 << F.getName() << "\n"); 269 continue; 270 } 271 272 LLVM_DEBUG(dbgs() << "FnSpecialization: Specialization cost for " 273 << F.getName() << " is " << Cost << "\n"); 274 275 if (!findSpecializations(&F, Cost, AllSpecs, SM)) { 276 LLVM_DEBUG( 277 dbgs() << "FnSpecialization: No possible specializations found for " 278 << F.getName() << "\n"); 279 continue; 280 } 281 282 ++NumCandidates; 283 } 284 285 if (!NumCandidates) { 286 LLVM_DEBUG( 287 dbgs() 288 << "FnSpecialization: No possible specializations found in module\n"); 289 return false; 290 } 291 292 // Choose the most profitable specialisations, which fit in the module 293 // specialization budget, which is derived from maximum number of 294 // specializations per specialization candidate function. 295 auto CompareGain = [&AllSpecs](unsigned I, unsigned J) { 296 return AllSpecs[I].Gain > AllSpecs[J].Gain; 297 }; 298 const unsigned NSpecs = 299 std::min(NumCandidates * MaxClonesThreshold, unsigned(AllSpecs.size())); 300 SmallVector<unsigned> BestSpecs(NSpecs + 1); 301 std::iota(BestSpecs.begin(), BestSpecs.begin() + NSpecs, 0); 302 if (AllSpecs.size() > NSpecs) { 303 LLVM_DEBUG(dbgs() << "FnSpecialization: Number of candidates exceed " 304 << "the maximum number of clones threshold.\n" 305 << "FnSpecialization: Specializing the " 306 << NSpecs 307 << " most profitable candidates.\n"); 308 std::make_heap(BestSpecs.begin(), BestSpecs.begin() + NSpecs, CompareGain); 309 for (unsigned I = NSpecs, N = AllSpecs.size(); I < N; ++I) { 310 BestSpecs[NSpecs] = I; 311 std::push_heap(BestSpecs.begin(), BestSpecs.end(), CompareGain); 312 std::pop_heap(BestSpecs.begin(), BestSpecs.end(), CompareGain); 313 } 314 } 315 316 LLVM_DEBUG(dbgs() << "FnSpecialization: List of specializations \n"; 317 for (unsigned I = 0; I < NSpecs; ++I) { 318 const Spec &S = AllSpecs[BestSpecs[I]]; 319 dbgs() << "FnSpecialization: Function " << S.F->getName() 320 << " , gain " << S.Gain << "\n"; 321 for (const ArgInfo &Arg : S.Sig.Args) 322 dbgs() << "FnSpecialization: FormalArg = " 323 << Arg.Formal->getNameOrAsOperand() 324 << ", ActualArg = " << Arg.Actual->getNameOrAsOperand() 325 << "\n"; 326 }); 327 328 // Create the chosen specializations. 329 SmallPtrSet<Function *, 8> OriginalFuncs; 330 SmallVector<Function *> Clones; 331 for (unsigned I = 0; I < NSpecs; ++I) { 332 Spec &S = AllSpecs[BestSpecs[I]]; 333 S.Clone = createSpecialization(S.F, S.Sig); 334 335 // Update the known call sites to call the clone. 336 for (CallBase *Call : S.CallSites) { 337 LLVM_DEBUG(dbgs() << "FnSpecialization: Redirecting " << *Call 338 << " to call " << S.Clone->getName() << "\n"); 339 Call->setCalledFunction(S.Clone); 340 } 341 342 Clones.push_back(S.Clone); 343 OriginalFuncs.insert(S.F); 344 } 345 346 Solver.solveWhileResolvedUndefsIn(Clones); 347 348 // Update the rest of the call sites - these are the recursive calls, calls 349 // to discarded specialisations and calls that may match a specialisation 350 // after the solver runs. 351 for (Function *F : OriginalFuncs) { 352 auto [Begin, End] = SM[F]; 353 updateCallSites(F, AllSpecs.begin() + Begin, AllSpecs.begin() + End); 354 } 355 356 promoteConstantStackValues(); 357 LLVM_DEBUG(if (NbFunctionsSpecialized) dbgs() 358 << "FnSpecialization: Specialized " << NbFunctionsSpecialized 359 << " functions in module " << M.getName() << "\n"); 360 361 NumFuncSpecialized += NbFunctionsSpecialized; 362 return true; 363 } 364 365 void FunctionSpecializer::removeDeadFunctions() { 366 for (Function *F : FullySpecialized) { 367 LLVM_DEBUG(dbgs() << "FnSpecialization: Removing dead function " 368 << F->getName() << "\n"); 369 if (FAM) 370 FAM->clear(*F, F->getName()); 371 F->eraseFromParent(); 372 } 373 FullySpecialized.clear(); 374 } 375 376 // Compute the code metrics for function \p F. 377 CodeMetrics &FunctionSpecializer::analyzeFunction(Function *F) { 378 auto I = FunctionMetrics.insert({F, CodeMetrics()}); 379 CodeMetrics &Metrics = I.first->second; 380 if (I.second) { 381 // The code metrics were not cached. 382 SmallPtrSet<const Value *, 32> EphValues; 383 CodeMetrics::collectEphemeralValues(F, &(GetAC)(*F), EphValues); 384 for (BasicBlock &BB : *F) 385 Metrics.analyzeBasicBlock(&BB, (GetTTI)(*F), EphValues); 386 387 LLVM_DEBUG(dbgs() << "FnSpecialization: Code size of function " 388 << F->getName() << " is " << Metrics.NumInsts 389 << " instructions\n"); 390 } 391 return Metrics; 392 } 393 394 /// Clone the function \p F and remove the ssa_copy intrinsics added by 395 /// the SCCPSolver in the cloned version. 396 static Function *cloneCandidateFunction(Function *F) { 397 ValueToValueMapTy Mappings; 398 Function *Clone = CloneFunction(F, Mappings); 399 removeSSACopy(*Clone); 400 return Clone; 401 } 402 403 bool FunctionSpecializer::findSpecializations(Function *F, InstructionCost Cost, 404 SmallVectorImpl<Spec> &AllSpecs, 405 SpecMap &SM) { 406 // A mapping from a specialisation signature to the index of the respective 407 // entry in the all specialisation array. Used to ensure uniqueness of 408 // specialisations. 409 DenseMap<SpecSig, unsigned> UM; 410 411 // Get a list of interesting arguments. 412 SmallVector<Argument *> Args; 413 for (Argument &Arg : F->args()) 414 if (isArgumentInteresting(&Arg)) 415 Args.push_back(&Arg); 416 417 if (Args.empty()) 418 return false; 419 420 bool Found = false; 421 for (User *U : F->users()) { 422 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 423 continue; 424 auto &CS = *cast<CallBase>(U); 425 426 // The user instruction does not call our function. 427 if (CS.getCalledFunction() != F) 428 continue; 429 430 // If the call site has attribute minsize set, that callsite won't be 431 // specialized. 432 if (CS.hasFnAttr(Attribute::MinSize)) 433 continue; 434 435 // If the parent of the call site will never be executed, we don't need 436 // to worry about the passed value. 437 if (!Solver.isBlockExecutable(CS.getParent())) 438 continue; 439 440 // Examine arguments and create a specialisation candidate from the 441 // constant operands of this call site. 442 SpecSig S; 443 for (Argument *A : Args) { 444 Constant *C = getCandidateConstant(CS.getArgOperand(A->getArgNo())); 445 if (!C) 446 continue; 447 LLVM_DEBUG(dbgs() << "FnSpecialization: Found interesting argument " 448 << A->getName() << " : " << C->getNameOrAsOperand() 449 << "\n"); 450 S.Args.push_back({A, C}); 451 } 452 453 if (S.Args.empty()) 454 continue; 455 456 // Check if we have encountered the same specialisation already. 457 if (auto It = UM.find(S); It != UM.end()) { 458 // Existing specialisation. Add the call to the list to rewrite, unless 459 // it's a recursive call. A specialisation, generated because of a 460 // recursive call may end up as not the best specialisation for all 461 // the cloned instances of this call, which result from specialising 462 // functions. Hence we don't rewrite the call directly, but match it with 463 // the best specialisation once all specialisations are known. 464 if (CS.getFunction() == F) 465 continue; 466 const unsigned Index = It->second; 467 AllSpecs[Index].CallSites.push_back(&CS); 468 } else { 469 // Calculate the specialisation gain. 470 InstructionCost Gain = 0 - Cost; 471 for (ArgInfo &A : S.Args) 472 Gain += 473 getSpecializationBonus(A.Formal, A.Actual, Solver.getLoopInfo(*F)); 474 475 // Discard unprofitable specialisations. 476 if (!ForceFunctionSpecialization && Gain <= 0) 477 continue; 478 479 // Create a new specialisation entry. 480 auto &Spec = AllSpecs.emplace_back(F, S, Gain); 481 if (CS.getFunction() != F) 482 Spec.CallSites.push_back(&CS); 483 const unsigned Index = AllSpecs.size() - 1; 484 UM[S] = Index; 485 if (auto [It, Inserted] = SM.try_emplace(F, Index, Index + 1); !Inserted) 486 It->second.second = Index + 1; 487 Found = true; 488 } 489 } 490 491 return Found; 492 } 493 494 bool FunctionSpecializer::isCandidateFunction(Function *F) { 495 if (F->isDeclaration()) 496 return false; 497 498 if (F->hasFnAttribute(Attribute::NoDuplicate)) 499 return false; 500 501 if (!Solver.isArgumentTrackedFunction(F)) 502 return false; 503 504 // Do not specialize the cloned function again. 505 if (SpecializedFuncs.contains(F)) 506 return false; 507 508 // If we're optimizing the function for size, we shouldn't specialize it. 509 if (F->hasOptSize() || 510 shouldOptimizeForSize(F, nullptr, nullptr, PGSOQueryType::IRPass)) 511 return false; 512 513 // Exit if the function is not executable. There's no point in specializing 514 // a dead function. 515 if (!Solver.isBlockExecutable(&F->getEntryBlock())) 516 return false; 517 518 // It wastes time to specialize a function which would get inlined finally. 519 if (F->hasFnAttribute(Attribute::AlwaysInline)) 520 return false; 521 522 LLVM_DEBUG(dbgs() << "FnSpecialization: Try function: " << F->getName() 523 << "\n"); 524 return true; 525 } 526 527 Function *FunctionSpecializer::createSpecialization(Function *F, const SpecSig &S) { 528 Function *Clone = cloneCandidateFunction(F); 529 530 // Initialize the lattice state of the arguments of the function clone, 531 // marking the argument on which we specialized the function constant 532 // with the given value. 533 Solver.markArgInFuncSpecialization(Clone, S.Args); 534 535 Solver.addArgumentTrackedFunction(Clone); 536 Solver.markBlockExecutable(&Clone->front()); 537 538 // Mark all the specialized functions 539 SpecializedFuncs.insert(Clone); 540 NbFunctionsSpecialized++; 541 542 return Clone; 543 } 544 545 /// Compute and return the cost of specializing function \p F. 546 InstructionCost FunctionSpecializer::getSpecializationCost(Function *F) { 547 CodeMetrics &Metrics = analyzeFunction(F); 548 // If the code metrics reveal that we shouldn't duplicate the function, we 549 // shouldn't specialize it. Set the specialization cost to Invalid. 550 // Or if the lines of codes implies that this function is easy to get 551 // inlined so that we shouldn't specialize it. 552 if (Metrics.notDuplicatable || !Metrics.NumInsts.isValid() || 553 (!ForceFunctionSpecialization && 554 !F->hasFnAttribute(Attribute::NoInline) && 555 Metrics.NumInsts < SmallFunctionThreshold)) 556 return InstructionCost::getInvalid(); 557 558 // Otherwise, set the specialization cost to be the cost of all the 559 // instructions in the function. 560 return Metrics.NumInsts * InlineConstants::getInstrCost(); 561 } 562 563 static InstructionCost getUserBonus(User *U, llvm::TargetTransformInfo &TTI, 564 const LoopInfo &LI) { 565 auto *I = dyn_cast_or_null<Instruction>(U); 566 // If not an instruction we do not know how to evaluate. 567 // Keep minimum possible cost for now so that it doesnt affect 568 // specialization. 569 if (!I) 570 return std::numeric_limits<unsigned>::min(); 571 572 InstructionCost Cost = 573 TTI.getInstructionCost(U, TargetTransformInfo::TCK_SizeAndLatency); 574 575 // Increase the cost if it is inside the loop. 576 unsigned LoopDepth = LI.getLoopDepth(I->getParent()); 577 Cost *= std::pow((double)AvgLoopIterationCount, LoopDepth); 578 579 // Traverse recursively if there are more uses. 580 // TODO: Any other instructions to be added here? 581 if (I->mayReadFromMemory() || I->isCast()) 582 for (auto *User : I->users()) 583 Cost += getUserBonus(User, TTI, LI); 584 585 return Cost; 586 } 587 588 /// Compute a bonus for replacing argument \p A with constant \p C. 589 InstructionCost 590 FunctionSpecializer::getSpecializationBonus(Argument *A, Constant *C, 591 const LoopInfo &LI) { 592 Function *F = A->getParent(); 593 auto &TTI = (GetTTI)(*F); 594 LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing bonus for constant: " 595 << C->getNameOrAsOperand() << "\n"); 596 597 InstructionCost TotalCost = 0; 598 for (auto *U : A->users()) { 599 TotalCost += getUserBonus(U, TTI, LI); 600 LLVM_DEBUG(dbgs() << "FnSpecialization: User cost "; 601 TotalCost.print(dbgs()); dbgs() << " for: " << *U << "\n"); 602 } 603 604 // The below heuristic is only concerned with exposing inlining 605 // opportunities via indirect call promotion. If the argument is not a 606 // (potentially casted) function pointer, give up. 607 Function *CalledFunction = dyn_cast<Function>(C->stripPointerCasts()); 608 if (!CalledFunction) 609 return TotalCost; 610 611 // Get TTI for the called function (used for the inline cost). 612 auto &CalleeTTI = (GetTTI)(*CalledFunction); 613 614 // Look at all the call sites whose called value is the argument. 615 // Specializing the function on the argument would allow these indirect 616 // calls to be promoted to direct calls. If the indirect call promotion 617 // would likely enable the called function to be inlined, specializing is a 618 // good idea. 619 int Bonus = 0; 620 for (User *U : A->users()) { 621 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 622 continue; 623 auto *CS = cast<CallBase>(U); 624 if (CS->getCalledOperand() != A) 625 continue; 626 if (CS->getFunctionType() != CalledFunction->getFunctionType()) 627 continue; 628 629 // Get the cost of inlining the called function at this call site. Note 630 // that this is only an estimate. The called function may eventually 631 // change in a way that leads to it not being inlined here, even though 632 // inlining looks profitable now. For example, one of its called 633 // functions may be inlined into it, making the called function too large 634 // to be inlined into this call site. 635 // 636 // We apply a boost for performing indirect call promotion by increasing 637 // the default threshold by the threshold for indirect calls. 638 auto Params = getInlineParams(); 639 Params.DefaultThreshold += InlineConstants::IndirectCallThreshold; 640 InlineCost IC = 641 getInlineCost(*CS, CalledFunction, Params, CalleeTTI, GetAC, GetTLI); 642 643 // We clamp the bonus for this call to be between zero and the default 644 // threshold. 645 if (IC.isAlways()) 646 Bonus += Params.DefaultThreshold; 647 else if (IC.isVariable() && IC.getCostDelta() > 0) 648 Bonus += IC.getCostDelta(); 649 650 LLVM_DEBUG(dbgs() << "FnSpecialization: Inlining bonus " << Bonus 651 << " for user " << *U << "\n"); 652 } 653 654 return TotalCost + Bonus; 655 } 656 657 /// Determine if it is possible to specialise the function for constant values 658 /// of the formal parameter \p A. 659 bool FunctionSpecializer::isArgumentInteresting(Argument *A) { 660 // No point in specialization if the argument is unused. 661 if (A->user_empty()) 662 return false; 663 664 // For now, don't attempt to specialize functions based on the values of 665 // composite types. 666 Type *ArgTy = A->getType(); 667 if (!ArgTy->isSingleValueType()) 668 return false; 669 670 // Specialization of integer and floating point types needs to be explicitly 671 // enabled. 672 if (!EnableSpecializationForLiteralConstant && 673 (ArgTy->isIntegerTy() || ArgTy->isFloatingPointTy())) 674 return false; 675 676 // SCCP solver does not record an argument that will be constructed on 677 // stack. 678 if (A->hasByValAttr() && !A->getParent()->onlyReadsMemory()) 679 return false; 680 681 // Check the lattice value and decide if we should attemt to specialize, 682 // based on this argument. No point in specialization, if the lattice value 683 // is already a constant. 684 const ValueLatticeElement &LV = Solver.getLatticeValueFor(A); 685 if (LV.isUnknownOrUndef() || LV.isConstant() || 686 (LV.isConstantRange() && LV.getConstantRange().isSingleElement())) { 687 LLVM_DEBUG(dbgs() << "FnSpecialization: Nothing to do, parameter " 688 << A->getNameOrAsOperand() << " is already constant\n"); 689 return false; 690 } 691 692 LLVM_DEBUG(dbgs() << "FnSpecialization: Found interesting parameter " 693 << A->getNameOrAsOperand() << "\n"); 694 695 return true; 696 } 697 698 /// Check if the valuy \p V (an actual argument) is a constant or can only 699 /// have a constant value. Return that constant. 700 Constant *FunctionSpecializer::getCandidateConstant(Value *V) { 701 if (isa<PoisonValue>(V)) 702 return nullptr; 703 704 // TrackValueOfGlobalVariable only tracks scalar global variables. 705 if (auto *GV = dyn_cast<GlobalVariable>(V)) { 706 // Check if we want to specialize on the address of non-constant 707 // global values. 708 if (!GV->isConstant() && !SpecializeOnAddresses) 709 return nullptr; 710 711 if (!GV->getValueType()->isSingleValueType()) 712 return nullptr; 713 } 714 715 // Select for possible specialisation values that are constants or 716 // are deduced to be constants or constant ranges with a single element. 717 Constant *C = dyn_cast<Constant>(V); 718 if (!C) { 719 const ValueLatticeElement &LV = Solver.getLatticeValueFor(V); 720 if (LV.isConstant()) 721 C = LV.getConstant(); 722 else if (LV.isConstantRange() && LV.getConstantRange().isSingleElement()) { 723 assert(V->getType()->isIntegerTy() && "Non-integral constant range"); 724 C = Constant::getIntegerValue(V->getType(), 725 *LV.getConstantRange().getSingleElement()); 726 } else 727 return nullptr; 728 } 729 730 return C; 731 } 732 733 void FunctionSpecializer::updateCallSites(Function *F, const Spec *Begin, 734 const Spec *End) { 735 // Collect the call sites that need updating. 736 SmallVector<CallBase *> ToUpdate; 737 for (User *U : F->users()) 738 if (auto *CS = dyn_cast<CallBase>(U); 739 CS && CS->getCalledFunction() == F && 740 Solver.isBlockExecutable(CS->getParent())) 741 ToUpdate.push_back(CS); 742 743 unsigned NCallsLeft = ToUpdate.size(); 744 for (CallBase *CS : ToUpdate) { 745 bool ShouldDecrementCount = CS->getFunction() == F; 746 747 // Find the best matching specialisation. 748 const Spec *BestSpec = nullptr; 749 for (const Spec &S : make_range(Begin, End)) { 750 if (!S.Clone || (BestSpec && S.Gain <= BestSpec->Gain)) 751 continue; 752 753 if (any_of(S.Sig.Args, [CS, this](const ArgInfo &Arg) { 754 unsigned ArgNo = Arg.Formal->getArgNo(); 755 return getCandidateConstant(CS->getArgOperand(ArgNo)) != Arg.Actual; 756 })) 757 continue; 758 759 BestSpec = &S; 760 } 761 762 if (BestSpec) { 763 LLVM_DEBUG(dbgs() << "FnSpecialization: Redirecting " << *CS 764 << " to call " << BestSpec->Clone->getName() << "\n"); 765 CS->setCalledFunction(BestSpec->Clone); 766 ShouldDecrementCount = true; 767 } 768 769 if (ShouldDecrementCount) 770 --NCallsLeft; 771 } 772 773 // If the function has been completely specialized, the original function 774 // is no longer needed. Mark it unreachable. 775 if (NCallsLeft == 0) { 776 Solver.markFunctionUnreachable(F); 777 FullySpecialized.insert(F); 778 } 779 } 780