1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // OpenMP specific optimizations: 10 // 11 // - Deduplication of runtime calls, e.g., omp_get_thread_num. 12 // - Replacing globalized device memory with stack memory. 13 // - Replacing globalized device memory with shared memory. 14 // - Parallel region merging. 15 // - Transforming generic-mode device kernels to SPMD mode. 16 // - Specializing the state machine for generic-mode device kernels. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "llvm/Transforms/IPO/OpenMPOpt.h" 21 22 #include "llvm/ADT/EnumeratedArray.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/SetVector.h" 25 #include "llvm/ADT/SmallPtrSet.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/Statistic.h" 28 #include "llvm/ADT/StringExtras.h" 29 #include "llvm/ADT/StringRef.h" 30 #include "llvm/Analysis/CallGraph.h" 31 #include "llvm/Analysis/CallGraphSCCPass.h" 32 #include "llvm/Analysis/MemoryLocation.h" 33 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 34 #include "llvm/Analysis/ValueTracking.h" 35 #include "llvm/Frontend/OpenMP/OMPConstants.h" 36 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 37 #include "llvm/IR/Assumptions.h" 38 #include "llvm/IR/BasicBlock.h" 39 #include "llvm/IR/Constants.h" 40 #include "llvm/IR/DiagnosticInfo.h" 41 #include "llvm/IR/Dominators.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/GlobalValue.h" 44 #include "llvm/IR/GlobalVariable.h" 45 #include "llvm/IR/Instruction.h" 46 #include "llvm/IR/Instructions.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/IR/IntrinsicsAMDGPU.h" 49 #include "llvm/IR/IntrinsicsNVPTX.h" 50 #include "llvm/IR/LLVMContext.h" 51 #include "llvm/Support/Casting.h" 52 #include "llvm/Support/CommandLine.h" 53 #include "llvm/Support/Debug.h" 54 #include "llvm/Transforms/IPO/Attributor.h" 55 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 56 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 57 58 #include <algorithm> 59 #include <optional> 60 #include <string> 61 62 using namespace llvm; 63 using namespace omp; 64 65 #define DEBUG_TYPE "openmp-opt" 66 67 static cl::opt<bool> DisableOpenMPOptimizations( 68 "openmp-opt-disable", cl::desc("Disable OpenMP specific optimizations."), 69 cl::Hidden, cl::init(false)); 70 71 static cl::opt<bool> EnableParallelRegionMerging( 72 "openmp-opt-enable-merging", 73 cl::desc("Enable the OpenMP region merging optimization."), cl::Hidden, 74 cl::init(false)); 75 76 static cl::opt<bool> 77 DisableInternalization("openmp-opt-disable-internalization", 78 cl::desc("Disable function internalization."), 79 cl::Hidden, cl::init(false)); 80 81 static cl::opt<bool> DeduceICVValues("openmp-deduce-icv-values", 82 cl::init(false), cl::Hidden); 83 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false), 84 cl::Hidden); 85 static cl::opt<bool> PrintOpenMPKernels("openmp-print-gpu-kernels", 86 cl::init(false), cl::Hidden); 87 88 static cl::opt<bool> HideMemoryTransferLatency( 89 "openmp-hide-memory-transfer-latency", 90 cl::desc("[WIP] Tries to hide the latency of host to device memory" 91 " transfers"), 92 cl::Hidden, cl::init(false)); 93 94 static cl::opt<bool> DisableOpenMPOptDeglobalization( 95 "openmp-opt-disable-deglobalization", 96 cl::desc("Disable OpenMP optimizations involving deglobalization."), 97 cl::Hidden, cl::init(false)); 98 99 static cl::opt<bool> DisableOpenMPOptSPMDization( 100 "openmp-opt-disable-spmdization", 101 cl::desc("Disable OpenMP optimizations involving SPMD-ization."), 102 cl::Hidden, cl::init(false)); 103 104 static cl::opt<bool> DisableOpenMPOptFolding( 105 "openmp-opt-disable-folding", 106 cl::desc("Disable OpenMP optimizations involving folding."), cl::Hidden, 107 cl::init(false)); 108 109 static cl::opt<bool> DisableOpenMPOptStateMachineRewrite( 110 "openmp-opt-disable-state-machine-rewrite", 111 cl::desc("Disable OpenMP optimizations that replace the state machine."), 112 cl::Hidden, cl::init(false)); 113 114 static cl::opt<bool> DisableOpenMPOptBarrierElimination( 115 "openmp-opt-disable-barrier-elimination", 116 cl::desc("Disable OpenMP optimizations that eliminate barriers."), 117 cl::Hidden, cl::init(false)); 118 119 static cl::opt<bool> PrintModuleAfterOptimizations( 120 "openmp-opt-print-module-after", 121 cl::desc("Print the current module after OpenMP optimizations."), 122 cl::Hidden, cl::init(false)); 123 124 static cl::opt<bool> PrintModuleBeforeOptimizations( 125 "openmp-opt-print-module-before", 126 cl::desc("Print the current module before OpenMP optimizations."), 127 cl::Hidden, cl::init(false)); 128 129 static cl::opt<bool> AlwaysInlineDeviceFunctions( 130 "openmp-opt-inline-device", 131 cl::desc("Inline all applicible functions on the device."), cl::Hidden, 132 cl::init(false)); 133 134 static cl::opt<bool> 135 EnableVerboseRemarks("openmp-opt-verbose-remarks", 136 cl::desc("Enables more verbose remarks."), cl::Hidden, 137 cl::init(false)); 138 139 static cl::opt<unsigned> 140 SetFixpointIterations("openmp-opt-max-iterations", cl::Hidden, 141 cl::desc("Maximal number of attributor iterations."), 142 cl::init(256)); 143 144 static cl::opt<unsigned> 145 SharedMemoryLimit("openmp-opt-shared-limit", cl::Hidden, 146 cl::desc("Maximum amount of shared memory to use."), 147 cl::init(std::numeric_limits<unsigned>::max())); 148 149 STATISTIC(NumOpenMPRuntimeCallsDeduplicated, 150 "Number of OpenMP runtime calls deduplicated"); 151 STATISTIC(NumOpenMPParallelRegionsDeleted, 152 "Number of OpenMP parallel regions deleted"); 153 STATISTIC(NumOpenMPRuntimeFunctionsIdentified, 154 "Number of OpenMP runtime functions identified"); 155 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified, 156 "Number of OpenMP runtime function uses identified"); 157 STATISTIC(NumOpenMPTargetRegionKernels, 158 "Number of OpenMP target region entry points (=kernels) identified"); 159 STATISTIC(NumOpenMPTargetRegionKernelsSPMD, 160 "Number of OpenMP target region entry points (=kernels) executed in " 161 "SPMD-mode instead of generic-mode"); 162 STATISTIC(NumOpenMPTargetRegionKernelsWithoutStateMachine, 163 "Number of OpenMP target region entry points (=kernels) executed in " 164 "generic-mode without a state machines"); 165 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback, 166 "Number of OpenMP target region entry points (=kernels) executed in " 167 "generic-mode with customized state machines with fallback"); 168 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback, 169 "Number of OpenMP target region entry points (=kernels) executed in " 170 "generic-mode with customized state machines without fallback"); 171 STATISTIC( 172 NumOpenMPParallelRegionsReplacedInGPUStateMachine, 173 "Number of OpenMP parallel regions replaced with ID in GPU state machines"); 174 STATISTIC(NumOpenMPParallelRegionsMerged, 175 "Number of OpenMP parallel regions merged"); 176 STATISTIC(NumBytesMovedToSharedMemory, 177 "Amount of memory pushed to shared memory"); 178 STATISTIC(NumBarriersEliminated, "Number of redundant barriers eliminated"); 179 180 #if !defined(NDEBUG) 181 static constexpr auto TAG = "[" DEBUG_TYPE "]"; 182 #endif 183 184 namespace { 185 186 struct AAHeapToShared; 187 188 struct AAICVTracker; 189 190 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for 191 /// Attributor runs. 192 struct OMPInformationCache : public InformationCache { 193 OMPInformationCache(Module &M, AnalysisGetter &AG, 194 BumpPtrAllocator &Allocator, SetVector<Function *> *CGSCC, 195 bool OpenMPPostLink) 196 : InformationCache(M, AG, Allocator, CGSCC), OMPBuilder(M), 197 OpenMPPostLink(OpenMPPostLink) { 198 199 OMPBuilder.initialize(); 200 initializeRuntimeFunctions(M); 201 initializeInternalControlVars(); 202 } 203 204 /// Generic information that describes an internal control variable. 205 struct InternalControlVarInfo { 206 /// The kind, as described by InternalControlVar enum. 207 InternalControlVar Kind; 208 209 /// The name of the ICV. 210 StringRef Name; 211 212 /// Environment variable associated with this ICV. 213 StringRef EnvVarName; 214 215 /// Initial value kind. 216 ICVInitValue InitKind; 217 218 /// Initial value. 219 ConstantInt *InitValue; 220 221 /// Setter RTL function associated with this ICV. 222 RuntimeFunction Setter; 223 224 /// Getter RTL function associated with this ICV. 225 RuntimeFunction Getter; 226 227 /// RTL Function corresponding to the override clause of this ICV 228 RuntimeFunction Clause; 229 }; 230 231 /// Generic information that describes a runtime function 232 struct RuntimeFunctionInfo { 233 234 /// The kind, as described by the RuntimeFunction enum. 235 RuntimeFunction Kind; 236 237 /// The name of the function. 238 StringRef Name; 239 240 /// Flag to indicate a variadic function. 241 bool IsVarArg; 242 243 /// The return type of the function. 244 Type *ReturnType; 245 246 /// The argument types of the function. 247 SmallVector<Type *, 8> ArgumentTypes; 248 249 /// The declaration if available. 250 Function *Declaration = nullptr; 251 252 /// Uses of this runtime function per function containing the use. 253 using UseVector = SmallVector<Use *, 16>; 254 255 /// Clear UsesMap for runtime function. 256 void clearUsesMap() { UsesMap.clear(); } 257 258 /// Boolean conversion that is true if the runtime function was found. 259 operator bool() const { return Declaration; } 260 261 /// Return the vector of uses in function \p F. 262 UseVector &getOrCreateUseVector(Function *F) { 263 std::shared_ptr<UseVector> &UV = UsesMap[F]; 264 if (!UV) 265 UV = std::make_shared<UseVector>(); 266 return *UV; 267 } 268 269 /// Return the vector of uses in function \p F or `nullptr` if there are 270 /// none. 271 const UseVector *getUseVector(Function &F) const { 272 auto I = UsesMap.find(&F); 273 if (I != UsesMap.end()) 274 return I->second.get(); 275 return nullptr; 276 } 277 278 /// Return how many functions contain uses of this runtime function. 279 size_t getNumFunctionsWithUses() const { return UsesMap.size(); } 280 281 /// Return the number of arguments (or the minimal number for variadic 282 /// functions). 283 size_t getNumArgs() const { return ArgumentTypes.size(); } 284 285 /// Run the callback \p CB on each use and forget the use if the result is 286 /// true. The callback will be fed the function in which the use was 287 /// encountered as second argument. 288 void foreachUse(SmallVectorImpl<Function *> &SCC, 289 function_ref<bool(Use &, Function &)> CB) { 290 for (Function *F : SCC) 291 foreachUse(CB, F); 292 } 293 294 /// Run the callback \p CB on each use within the function \p F and forget 295 /// the use if the result is true. 296 void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F) { 297 SmallVector<unsigned, 8> ToBeDeleted; 298 ToBeDeleted.clear(); 299 300 unsigned Idx = 0; 301 UseVector &UV = getOrCreateUseVector(F); 302 303 for (Use *U : UV) { 304 if (CB(*U, *F)) 305 ToBeDeleted.push_back(Idx); 306 ++Idx; 307 } 308 309 // Remove the to-be-deleted indices in reverse order as prior 310 // modifications will not modify the smaller indices. 311 while (!ToBeDeleted.empty()) { 312 unsigned Idx = ToBeDeleted.pop_back_val(); 313 UV[Idx] = UV.back(); 314 UV.pop_back(); 315 } 316 } 317 318 private: 319 /// Map from functions to all uses of this runtime function contained in 320 /// them. 321 DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap; 322 323 public: 324 /// Iterators for the uses of this runtime function. 325 decltype(UsesMap)::iterator begin() { return UsesMap.begin(); } 326 decltype(UsesMap)::iterator end() { return UsesMap.end(); } 327 }; 328 329 /// An OpenMP-IR-Builder instance 330 OpenMPIRBuilder OMPBuilder; 331 332 /// Map from runtime function kind to the runtime function description. 333 EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction, 334 RuntimeFunction::OMPRTL___last> 335 RFIs; 336 337 /// Map from function declarations/definitions to their runtime enum type. 338 DenseMap<Function *, RuntimeFunction> RuntimeFunctionIDMap; 339 340 /// Map from ICV kind to the ICV description. 341 EnumeratedArray<InternalControlVarInfo, InternalControlVar, 342 InternalControlVar::ICV___last> 343 ICVs; 344 345 /// Helper to initialize all internal control variable information for those 346 /// defined in OMPKinds.def. 347 void initializeInternalControlVars() { 348 #define ICV_RT_SET(_Name, RTL) \ 349 { \ 350 auto &ICV = ICVs[_Name]; \ 351 ICV.Setter = RTL; \ 352 } 353 #define ICV_RT_GET(Name, RTL) \ 354 { \ 355 auto &ICV = ICVs[Name]; \ 356 ICV.Getter = RTL; \ 357 } 358 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init) \ 359 { \ 360 auto &ICV = ICVs[Enum]; \ 361 ICV.Name = _Name; \ 362 ICV.Kind = Enum; \ 363 ICV.InitKind = Init; \ 364 ICV.EnvVarName = _EnvVarName; \ 365 switch (ICV.InitKind) { \ 366 case ICV_IMPLEMENTATION_DEFINED: \ 367 ICV.InitValue = nullptr; \ 368 break; \ 369 case ICV_ZERO: \ 370 ICV.InitValue = ConstantInt::get( \ 371 Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0); \ 372 break; \ 373 case ICV_FALSE: \ 374 ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext()); \ 375 break; \ 376 case ICV_LAST: \ 377 break; \ 378 } \ 379 } 380 #include "llvm/Frontend/OpenMP/OMPKinds.def" 381 } 382 383 /// Returns true if the function declaration \p F matches the runtime 384 /// function types, that is, return type \p RTFRetType, and argument types 385 /// \p RTFArgTypes. 386 static bool declMatchesRTFTypes(Function *F, Type *RTFRetType, 387 SmallVector<Type *, 8> &RTFArgTypes) { 388 // TODO: We should output information to the user (under debug output 389 // and via remarks). 390 391 if (!F) 392 return false; 393 if (F->getReturnType() != RTFRetType) 394 return false; 395 if (F->arg_size() != RTFArgTypes.size()) 396 return false; 397 398 auto *RTFTyIt = RTFArgTypes.begin(); 399 for (Argument &Arg : F->args()) { 400 if (Arg.getType() != *RTFTyIt) 401 return false; 402 403 ++RTFTyIt; 404 } 405 406 return true; 407 } 408 409 // Helper to collect all uses of the declaration in the UsesMap. 410 unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) { 411 unsigned NumUses = 0; 412 if (!RFI.Declaration) 413 return NumUses; 414 OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration); 415 416 if (CollectStats) { 417 NumOpenMPRuntimeFunctionsIdentified += 1; 418 NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses(); 419 } 420 421 // TODO: We directly convert uses into proper calls and unknown uses. 422 for (Use &U : RFI.Declaration->uses()) { 423 if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) { 424 if (!CGSCC || CGSCC->empty() || CGSCC->contains(UserI->getFunction())) { 425 RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U); 426 ++NumUses; 427 } 428 } else { 429 RFI.getOrCreateUseVector(nullptr).push_back(&U); 430 ++NumUses; 431 } 432 } 433 return NumUses; 434 } 435 436 // Helper function to recollect uses of a runtime function. 437 void recollectUsesForFunction(RuntimeFunction RTF) { 438 auto &RFI = RFIs[RTF]; 439 RFI.clearUsesMap(); 440 collectUses(RFI, /*CollectStats*/ false); 441 } 442 443 // Helper function to recollect uses of all runtime functions. 444 void recollectUses() { 445 for (int Idx = 0; Idx < RFIs.size(); ++Idx) 446 recollectUsesForFunction(static_cast<RuntimeFunction>(Idx)); 447 } 448 449 // Helper function to inherit the calling convention of the function callee. 450 void setCallingConvention(FunctionCallee Callee, CallInst *CI) { 451 if (Function *Fn = dyn_cast<Function>(Callee.getCallee())) 452 CI->setCallingConv(Fn->getCallingConv()); 453 } 454 455 // Helper function to determine if it's legal to create a call to the runtime 456 // functions. 457 bool runtimeFnsAvailable(ArrayRef<RuntimeFunction> Fns) { 458 // We can always emit calls if we haven't yet linked in the runtime. 459 if (!OpenMPPostLink) 460 return true; 461 462 // Once the runtime has been already been linked in we cannot emit calls to 463 // any undefined functions. 464 for (RuntimeFunction Fn : Fns) { 465 RuntimeFunctionInfo &RFI = RFIs[Fn]; 466 467 if (RFI.Declaration && RFI.Declaration->isDeclaration()) 468 return false; 469 } 470 return true; 471 } 472 473 /// Helper to initialize all runtime function information for those defined 474 /// in OpenMPKinds.def. 475 void initializeRuntimeFunctions(Module &M) { 476 477 // Helper macros for handling __VA_ARGS__ in OMP_RTL 478 #define OMP_TYPE(VarName, ...) \ 479 Type *VarName = OMPBuilder.VarName; \ 480 (void)VarName; 481 482 #define OMP_ARRAY_TYPE(VarName, ...) \ 483 ArrayType *VarName##Ty = OMPBuilder.VarName##Ty; \ 484 (void)VarName##Ty; \ 485 PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy; \ 486 (void)VarName##PtrTy; 487 488 #define OMP_FUNCTION_TYPE(VarName, ...) \ 489 FunctionType *VarName = OMPBuilder.VarName; \ 490 (void)VarName; \ 491 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 492 (void)VarName##Ptr; 493 494 #define OMP_STRUCT_TYPE(VarName, ...) \ 495 StructType *VarName = OMPBuilder.VarName; \ 496 (void)VarName; \ 497 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 498 (void)VarName##Ptr; 499 500 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...) \ 501 { \ 502 SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__}); \ 503 Function *F = M.getFunction(_Name); \ 504 RTLFunctions.insert(F); \ 505 if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) { \ 506 RuntimeFunctionIDMap[F] = _Enum; \ 507 auto &RFI = RFIs[_Enum]; \ 508 RFI.Kind = _Enum; \ 509 RFI.Name = _Name; \ 510 RFI.IsVarArg = _IsVarArg; \ 511 RFI.ReturnType = OMPBuilder._ReturnType; \ 512 RFI.ArgumentTypes = std::move(ArgsTypes); \ 513 RFI.Declaration = F; \ 514 unsigned NumUses = collectUses(RFI); \ 515 (void)NumUses; \ 516 LLVM_DEBUG({ \ 517 dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not") \ 518 << " found\n"; \ 519 if (RFI.Declaration) \ 520 dbgs() << TAG << "-> got " << NumUses << " uses in " \ 521 << RFI.getNumFunctionsWithUses() \ 522 << " different functions.\n"; \ 523 }); \ 524 } \ 525 } 526 #include "llvm/Frontend/OpenMP/OMPKinds.def" 527 528 // Remove the `noinline` attribute from `__kmpc`, `ompx::` and `omp_` 529 // functions, except if `optnone` is present. 530 if (isOpenMPDevice(M)) { 531 for (Function &F : M) { 532 for (StringRef Prefix : {"__kmpc", "_ZN4ompx", "omp_"}) 533 if (F.hasFnAttribute(Attribute::NoInline) && 534 F.getName().startswith(Prefix) && 535 !F.hasFnAttribute(Attribute::OptimizeNone)) 536 F.removeFnAttr(Attribute::NoInline); 537 } 538 } 539 540 // TODO: We should attach the attributes defined in OMPKinds.def. 541 } 542 543 /// Collection of known OpenMP runtime functions.. 544 DenseSet<const Function *> RTLFunctions; 545 546 /// Indicates if we have already linked in the OpenMP device library. 547 bool OpenMPPostLink = false; 548 }; 549 550 template <typename Ty, bool InsertInvalidates = true> 551 struct BooleanStateWithSetVector : public BooleanState { 552 bool contains(const Ty &Elem) const { return Set.contains(Elem); } 553 bool insert(const Ty &Elem) { 554 if (InsertInvalidates) 555 BooleanState::indicatePessimisticFixpoint(); 556 return Set.insert(Elem); 557 } 558 559 const Ty &operator[](int Idx) const { return Set[Idx]; } 560 bool operator==(const BooleanStateWithSetVector &RHS) const { 561 return BooleanState::operator==(RHS) && Set == RHS.Set; 562 } 563 bool operator!=(const BooleanStateWithSetVector &RHS) const { 564 return !(*this == RHS); 565 } 566 567 bool empty() const { return Set.empty(); } 568 size_t size() const { return Set.size(); } 569 570 /// "Clamp" this state with \p RHS. 571 BooleanStateWithSetVector &operator^=(const BooleanStateWithSetVector &RHS) { 572 BooleanState::operator^=(RHS); 573 Set.insert(RHS.Set.begin(), RHS.Set.end()); 574 return *this; 575 } 576 577 private: 578 /// A set to keep track of elements. 579 SetVector<Ty> Set; 580 581 public: 582 typename decltype(Set)::iterator begin() { return Set.begin(); } 583 typename decltype(Set)::iterator end() { return Set.end(); } 584 typename decltype(Set)::const_iterator begin() const { return Set.begin(); } 585 typename decltype(Set)::const_iterator end() const { return Set.end(); } 586 }; 587 588 template <typename Ty, bool InsertInvalidates = true> 589 using BooleanStateWithPtrSetVector = 590 BooleanStateWithSetVector<Ty *, InsertInvalidates>; 591 592 struct KernelInfoState : AbstractState { 593 /// Flag to track if we reached a fixpoint. 594 bool IsAtFixpoint = false; 595 596 /// The parallel regions (identified by the outlined parallel functions) that 597 /// can be reached from the associated function. 598 BooleanStateWithPtrSetVector<Function, /* InsertInvalidates */ false> 599 ReachedKnownParallelRegions; 600 601 /// State to track what parallel region we might reach. 602 BooleanStateWithPtrSetVector<CallBase> ReachedUnknownParallelRegions; 603 604 /// State to track if we are in SPMD-mode, assumed or know, and why we decided 605 /// we cannot be. If it is assumed, then RequiresFullRuntime should also be 606 /// false. 607 BooleanStateWithPtrSetVector<Instruction, false> SPMDCompatibilityTracker; 608 609 /// The __kmpc_target_init call in this kernel, if any. If we find more than 610 /// one we abort as the kernel is malformed. 611 CallBase *KernelInitCB = nullptr; 612 613 /// The __kmpc_target_deinit call in this kernel, if any. If we find more than 614 /// one we abort as the kernel is malformed. 615 CallBase *KernelDeinitCB = nullptr; 616 617 /// Flag to indicate if the associated function is a kernel entry. 618 bool IsKernelEntry = false; 619 620 /// State to track what kernel entries can reach the associated function. 621 BooleanStateWithPtrSetVector<Function, false> ReachingKernelEntries; 622 623 /// State to indicate if we can track parallel level of the associated 624 /// function. We will give up tracking if we encounter unknown caller or the 625 /// caller is __kmpc_parallel_51. 626 BooleanStateWithSetVector<uint8_t> ParallelLevels; 627 628 /// Flag that indicates if the kernel has nested Parallelism 629 bool NestedParallelism = false; 630 631 /// Abstract State interface 632 ///{ 633 634 KernelInfoState() = default; 635 KernelInfoState(bool BestState) { 636 if (!BestState) 637 indicatePessimisticFixpoint(); 638 } 639 640 /// See AbstractState::isValidState(...) 641 bool isValidState() const override { return true; } 642 643 /// See AbstractState::isAtFixpoint(...) 644 bool isAtFixpoint() const override { return IsAtFixpoint; } 645 646 /// See AbstractState::indicatePessimisticFixpoint(...) 647 ChangeStatus indicatePessimisticFixpoint() override { 648 IsAtFixpoint = true; 649 ParallelLevels.indicatePessimisticFixpoint(); 650 ReachingKernelEntries.indicatePessimisticFixpoint(); 651 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 652 ReachedKnownParallelRegions.indicatePessimisticFixpoint(); 653 ReachedUnknownParallelRegions.indicatePessimisticFixpoint(); 654 return ChangeStatus::CHANGED; 655 } 656 657 /// See AbstractState::indicateOptimisticFixpoint(...) 658 ChangeStatus indicateOptimisticFixpoint() override { 659 IsAtFixpoint = true; 660 ParallelLevels.indicateOptimisticFixpoint(); 661 ReachingKernelEntries.indicateOptimisticFixpoint(); 662 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 663 ReachedKnownParallelRegions.indicateOptimisticFixpoint(); 664 ReachedUnknownParallelRegions.indicateOptimisticFixpoint(); 665 return ChangeStatus::UNCHANGED; 666 } 667 668 /// Return the assumed state 669 KernelInfoState &getAssumed() { return *this; } 670 const KernelInfoState &getAssumed() const { return *this; } 671 672 bool operator==(const KernelInfoState &RHS) const { 673 if (SPMDCompatibilityTracker != RHS.SPMDCompatibilityTracker) 674 return false; 675 if (ReachedKnownParallelRegions != RHS.ReachedKnownParallelRegions) 676 return false; 677 if (ReachedUnknownParallelRegions != RHS.ReachedUnknownParallelRegions) 678 return false; 679 if (ReachingKernelEntries != RHS.ReachingKernelEntries) 680 return false; 681 if (ParallelLevels != RHS.ParallelLevels) 682 return false; 683 return true; 684 } 685 686 /// Returns true if this kernel contains any OpenMP parallel regions. 687 bool mayContainParallelRegion() { 688 return !ReachedKnownParallelRegions.empty() || 689 !ReachedUnknownParallelRegions.empty(); 690 } 691 692 /// Return empty set as the best state of potential values. 693 static KernelInfoState getBestState() { return KernelInfoState(true); } 694 695 static KernelInfoState getBestState(KernelInfoState &KIS) { 696 return getBestState(); 697 } 698 699 /// Return full set as the worst state of potential values. 700 static KernelInfoState getWorstState() { return KernelInfoState(false); } 701 702 /// "Clamp" this state with \p KIS. 703 KernelInfoState operator^=(const KernelInfoState &KIS) { 704 // Do not merge two different _init and _deinit call sites. 705 if (KIS.KernelInitCB) { 706 if (KernelInitCB && KernelInitCB != KIS.KernelInitCB) 707 llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt " 708 "assumptions."); 709 KernelInitCB = KIS.KernelInitCB; 710 } 711 if (KIS.KernelDeinitCB) { 712 if (KernelDeinitCB && KernelDeinitCB != KIS.KernelDeinitCB) 713 llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt " 714 "assumptions."); 715 KernelDeinitCB = KIS.KernelDeinitCB; 716 } 717 SPMDCompatibilityTracker ^= KIS.SPMDCompatibilityTracker; 718 ReachedKnownParallelRegions ^= KIS.ReachedKnownParallelRegions; 719 ReachedUnknownParallelRegions ^= KIS.ReachedUnknownParallelRegions; 720 NestedParallelism |= KIS.NestedParallelism; 721 return *this; 722 } 723 724 KernelInfoState operator&=(const KernelInfoState &KIS) { 725 return (*this ^= KIS); 726 } 727 728 ///} 729 }; 730 731 /// Used to map the values physically (in the IR) stored in an offload 732 /// array, to a vector in memory. 733 struct OffloadArray { 734 /// Physical array (in the IR). 735 AllocaInst *Array = nullptr; 736 /// Mapped values. 737 SmallVector<Value *, 8> StoredValues; 738 /// Last stores made in the offload array. 739 SmallVector<StoreInst *, 8> LastAccesses; 740 741 OffloadArray() = default; 742 743 /// Initializes the OffloadArray with the values stored in \p Array before 744 /// instruction \p Before is reached. Returns false if the initialization 745 /// fails. 746 /// This MUST be used immediately after the construction of the object. 747 bool initialize(AllocaInst &Array, Instruction &Before) { 748 if (!Array.getAllocatedType()->isArrayTy()) 749 return false; 750 751 if (!getValues(Array, Before)) 752 return false; 753 754 this->Array = &Array; 755 return true; 756 } 757 758 static const unsigned DeviceIDArgNum = 1; 759 static const unsigned BasePtrsArgNum = 3; 760 static const unsigned PtrsArgNum = 4; 761 static const unsigned SizesArgNum = 5; 762 763 private: 764 /// Traverses the BasicBlock where \p Array is, collecting the stores made to 765 /// \p Array, leaving StoredValues with the values stored before the 766 /// instruction \p Before is reached. 767 bool getValues(AllocaInst &Array, Instruction &Before) { 768 // Initialize container. 769 const uint64_t NumValues = Array.getAllocatedType()->getArrayNumElements(); 770 StoredValues.assign(NumValues, nullptr); 771 LastAccesses.assign(NumValues, nullptr); 772 773 // TODO: This assumes the instruction \p Before is in the same 774 // BasicBlock as Array. Make it general, for any control flow graph. 775 BasicBlock *BB = Array.getParent(); 776 if (BB != Before.getParent()) 777 return false; 778 779 const DataLayout &DL = Array.getModule()->getDataLayout(); 780 const unsigned int PointerSize = DL.getPointerSize(); 781 782 for (Instruction &I : *BB) { 783 if (&I == &Before) 784 break; 785 786 if (!isa<StoreInst>(&I)) 787 continue; 788 789 auto *S = cast<StoreInst>(&I); 790 int64_t Offset = -1; 791 auto *Dst = 792 GetPointerBaseWithConstantOffset(S->getPointerOperand(), Offset, DL); 793 if (Dst == &Array) { 794 int64_t Idx = Offset / PointerSize; 795 StoredValues[Idx] = getUnderlyingObject(S->getValueOperand()); 796 LastAccesses[Idx] = S; 797 } 798 } 799 800 return isFilled(); 801 } 802 803 /// Returns true if all values in StoredValues and 804 /// LastAccesses are not nullptrs. 805 bool isFilled() { 806 const unsigned NumValues = StoredValues.size(); 807 for (unsigned I = 0; I < NumValues; ++I) { 808 if (!StoredValues[I] || !LastAccesses[I]) 809 return false; 810 } 811 812 return true; 813 } 814 }; 815 816 struct OpenMPOpt { 817 818 using OptimizationRemarkGetter = 819 function_ref<OptimizationRemarkEmitter &(Function *)>; 820 821 OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater, 822 OptimizationRemarkGetter OREGetter, 823 OMPInformationCache &OMPInfoCache, Attributor &A) 824 : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater), 825 OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {} 826 827 /// Check if any remarks are enabled for openmp-opt 828 bool remarksEnabled() { 829 auto &Ctx = M.getContext(); 830 return Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(DEBUG_TYPE); 831 } 832 833 /// Run all OpenMP optimizations on the underlying SCC. 834 bool run(bool IsModulePass) { 835 if (SCC.empty()) 836 return false; 837 838 bool Changed = false; 839 840 LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size() 841 << " functions\n"); 842 843 if (IsModulePass) { 844 Changed |= runAttributor(IsModulePass); 845 846 // Recollect uses, in case Attributor deleted any. 847 OMPInfoCache.recollectUses(); 848 849 // TODO: This should be folded into buildCustomStateMachine. 850 Changed |= rewriteDeviceCodeStateMachine(); 851 852 if (remarksEnabled()) 853 analysisGlobalization(); 854 } else { 855 if (PrintICVValues) 856 printICVs(); 857 if (PrintOpenMPKernels) 858 printKernels(); 859 860 Changed |= runAttributor(IsModulePass); 861 862 // Recollect uses, in case Attributor deleted any. 863 OMPInfoCache.recollectUses(); 864 865 Changed |= deleteParallelRegions(); 866 867 if (HideMemoryTransferLatency) 868 Changed |= hideMemTransfersLatency(); 869 Changed |= deduplicateRuntimeCalls(); 870 if (EnableParallelRegionMerging) { 871 if (mergeParallelRegions()) { 872 deduplicateRuntimeCalls(); 873 Changed = true; 874 } 875 } 876 } 877 878 return Changed; 879 } 880 881 /// Print initial ICV values for testing. 882 /// FIXME: This should be done from the Attributor once it is added. 883 void printICVs() const { 884 InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel, 885 ICV_proc_bind}; 886 887 for (Function *F : SCC) { 888 for (auto ICV : ICVs) { 889 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 890 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 891 return ORA << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name) 892 << " Value: " 893 << (ICVInfo.InitValue 894 ? toString(ICVInfo.InitValue->getValue(), 10, true) 895 : "IMPLEMENTATION_DEFINED"); 896 }; 897 898 emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPICVTracker", Remark); 899 } 900 } 901 } 902 903 /// Print OpenMP GPU kernels for testing. 904 void printKernels() const { 905 for (Function *F : SCC) { 906 if (!omp::isKernel(*F)) 907 continue; 908 909 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 910 return ORA << "OpenMP GPU kernel " 911 << ore::NV("OpenMPGPUKernel", F->getName()) << "\n"; 912 }; 913 914 emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPGPU", Remark); 915 } 916 } 917 918 /// Return the call if \p U is a callee use in a regular call. If \p RFI is 919 /// given it has to be the callee or a nullptr is returned. 920 static CallInst *getCallIfRegularCall( 921 Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 922 CallInst *CI = dyn_cast<CallInst>(U.getUser()); 923 if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() && 924 (!RFI || 925 (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration))) 926 return CI; 927 return nullptr; 928 } 929 930 /// Return the call if \p V is a regular call. If \p RFI is given it has to be 931 /// the callee or a nullptr is returned. 932 static CallInst *getCallIfRegularCall( 933 Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 934 CallInst *CI = dyn_cast<CallInst>(&V); 935 if (CI && !CI->hasOperandBundles() && 936 (!RFI || 937 (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration))) 938 return CI; 939 return nullptr; 940 } 941 942 private: 943 /// Merge parallel regions when it is safe. 944 bool mergeParallelRegions() { 945 const unsigned CallbackCalleeOperand = 2; 946 const unsigned CallbackFirstArgOperand = 3; 947 using InsertPointTy = OpenMPIRBuilder::InsertPointTy; 948 949 // Check if there are any __kmpc_fork_call calls to merge. 950 OMPInformationCache::RuntimeFunctionInfo &RFI = 951 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 952 953 if (!RFI.Declaration) 954 return false; 955 956 // Unmergable calls that prevent merging a parallel region. 957 OMPInformationCache::RuntimeFunctionInfo UnmergableCallsInfo[] = { 958 OMPInfoCache.RFIs[OMPRTL___kmpc_push_proc_bind], 959 OMPInfoCache.RFIs[OMPRTL___kmpc_push_num_threads], 960 }; 961 962 bool Changed = false; 963 LoopInfo *LI = nullptr; 964 DominatorTree *DT = nullptr; 965 966 SmallDenseMap<BasicBlock *, SmallPtrSet<Instruction *, 4>> BB2PRMap; 967 968 BasicBlock *StartBB = nullptr, *EndBB = nullptr; 969 auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP) { 970 BasicBlock *CGStartBB = CodeGenIP.getBlock(); 971 BasicBlock *CGEndBB = 972 SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); 973 assert(StartBB != nullptr && "StartBB should not be null"); 974 CGStartBB->getTerminator()->setSuccessor(0, StartBB); 975 assert(EndBB != nullptr && "EndBB should not be null"); 976 EndBB->getTerminator()->setSuccessor(0, CGEndBB); 977 }; 978 979 auto PrivCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &, 980 Value &Inner, Value *&ReplacementValue) -> InsertPointTy { 981 ReplacementValue = &Inner; 982 return CodeGenIP; 983 }; 984 985 auto FiniCB = [&](InsertPointTy CodeGenIP) {}; 986 987 /// Create a sequential execution region within a merged parallel region, 988 /// encapsulated in a master construct with a barrier for synchronization. 989 auto CreateSequentialRegion = [&](Function *OuterFn, 990 BasicBlock *OuterPredBB, 991 Instruction *SeqStartI, 992 Instruction *SeqEndI) { 993 // Isolate the instructions of the sequential region to a separate 994 // block. 995 BasicBlock *ParentBB = SeqStartI->getParent(); 996 BasicBlock *SeqEndBB = 997 SplitBlock(ParentBB, SeqEndI->getNextNode(), DT, LI); 998 BasicBlock *SeqAfterBB = 999 SplitBlock(SeqEndBB, &*SeqEndBB->getFirstInsertionPt(), DT, LI); 1000 BasicBlock *SeqStartBB = 1001 SplitBlock(ParentBB, SeqStartI, DT, LI, nullptr, "seq.par.merged"); 1002 1003 assert(ParentBB->getUniqueSuccessor() == SeqStartBB && 1004 "Expected a different CFG"); 1005 const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc(); 1006 ParentBB->getTerminator()->eraseFromParent(); 1007 1008 auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP) { 1009 BasicBlock *CGStartBB = CodeGenIP.getBlock(); 1010 BasicBlock *CGEndBB = 1011 SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); 1012 assert(SeqStartBB != nullptr && "SeqStartBB should not be null"); 1013 CGStartBB->getTerminator()->setSuccessor(0, SeqStartBB); 1014 assert(SeqEndBB != nullptr && "SeqEndBB should not be null"); 1015 SeqEndBB->getTerminator()->setSuccessor(0, CGEndBB); 1016 }; 1017 auto FiniCB = [&](InsertPointTy CodeGenIP) {}; 1018 1019 // Find outputs from the sequential region to outside users and 1020 // broadcast their values to them. 1021 for (Instruction &I : *SeqStartBB) { 1022 SmallPtrSet<Instruction *, 4> OutsideUsers; 1023 for (User *Usr : I.users()) { 1024 Instruction &UsrI = *cast<Instruction>(Usr); 1025 // Ignore outputs to LT intrinsics, code extraction for the merged 1026 // parallel region will fix them. 1027 if (UsrI.isLifetimeStartOrEnd()) 1028 continue; 1029 1030 if (UsrI.getParent() != SeqStartBB) 1031 OutsideUsers.insert(&UsrI); 1032 } 1033 1034 if (OutsideUsers.empty()) 1035 continue; 1036 1037 // Emit an alloca in the outer region to store the broadcasted 1038 // value. 1039 const DataLayout &DL = M.getDataLayout(); 1040 AllocaInst *AllocaI = new AllocaInst( 1041 I.getType(), DL.getAllocaAddrSpace(), nullptr, 1042 I.getName() + ".seq.output.alloc", &OuterFn->front().front()); 1043 1044 // Emit a store instruction in the sequential BB to update the 1045 // value. 1046 new StoreInst(&I, AllocaI, SeqStartBB->getTerminator()); 1047 1048 // Emit a load instruction and replace the use of the output value 1049 // with it. 1050 for (Instruction *UsrI : OutsideUsers) { 1051 LoadInst *LoadI = new LoadInst( 1052 I.getType(), AllocaI, I.getName() + ".seq.output.load", UsrI); 1053 UsrI->replaceUsesOfWith(&I, LoadI); 1054 } 1055 } 1056 1057 OpenMPIRBuilder::LocationDescription Loc( 1058 InsertPointTy(ParentBB, ParentBB->end()), DL); 1059 InsertPointTy SeqAfterIP = 1060 OMPInfoCache.OMPBuilder.createMaster(Loc, BodyGenCB, FiniCB); 1061 1062 OMPInfoCache.OMPBuilder.createBarrier(SeqAfterIP, OMPD_parallel); 1063 1064 BranchInst::Create(SeqAfterBB, SeqAfterIP.getBlock()); 1065 1066 LLVM_DEBUG(dbgs() << TAG << "After sequential inlining " << *OuterFn 1067 << "\n"); 1068 }; 1069 1070 // Helper to merge the __kmpc_fork_call calls in MergableCIs. They are all 1071 // contained in BB and only separated by instructions that can be 1072 // redundantly executed in parallel. The block BB is split before the first 1073 // call (in MergableCIs) and after the last so the entire region we merge 1074 // into a single parallel region is contained in a single basic block 1075 // without any other instructions. We use the OpenMPIRBuilder to outline 1076 // that block and call the resulting function via __kmpc_fork_call. 1077 auto Merge = [&](const SmallVectorImpl<CallInst *> &MergableCIs, 1078 BasicBlock *BB) { 1079 // TODO: Change the interface to allow single CIs expanded, e.g, to 1080 // include an outer loop. 1081 assert(MergableCIs.size() > 1 && "Assumed multiple mergable CIs"); 1082 1083 auto Remark = [&](OptimizationRemark OR) { 1084 OR << "Parallel region merged with parallel region" 1085 << (MergableCIs.size() > 2 ? "s" : "") << " at "; 1086 for (auto *CI : llvm::drop_begin(MergableCIs)) { 1087 OR << ore::NV("OpenMPParallelMerge", CI->getDebugLoc()); 1088 if (CI != MergableCIs.back()) 1089 OR << ", "; 1090 } 1091 return OR << "."; 1092 }; 1093 1094 emitRemark<OptimizationRemark>(MergableCIs.front(), "OMP150", Remark); 1095 1096 Function *OriginalFn = BB->getParent(); 1097 LLVM_DEBUG(dbgs() << TAG << "Merge " << MergableCIs.size() 1098 << " parallel regions in " << OriginalFn->getName() 1099 << "\n"); 1100 1101 // Isolate the calls to merge in a separate block. 1102 EndBB = SplitBlock(BB, MergableCIs.back()->getNextNode(), DT, LI); 1103 BasicBlock *AfterBB = 1104 SplitBlock(EndBB, &*EndBB->getFirstInsertionPt(), DT, LI); 1105 StartBB = SplitBlock(BB, MergableCIs.front(), DT, LI, nullptr, 1106 "omp.par.merged"); 1107 1108 assert(BB->getUniqueSuccessor() == StartBB && "Expected a different CFG"); 1109 const DebugLoc DL = BB->getTerminator()->getDebugLoc(); 1110 BB->getTerminator()->eraseFromParent(); 1111 1112 // Create sequential regions for sequential instructions that are 1113 // in-between mergable parallel regions. 1114 for (auto *It = MergableCIs.begin(), *End = MergableCIs.end() - 1; 1115 It != End; ++It) { 1116 Instruction *ForkCI = *It; 1117 Instruction *NextForkCI = *(It + 1); 1118 1119 // Continue if there are not in-between instructions. 1120 if (ForkCI->getNextNode() == NextForkCI) 1121 continue; 1122 1123 CreateSequentialRegion(OriginalFn, BB, ForkCI->getNextNode(), 1124 NextForkCI->getPrevNode()); 1125 } 1126 1127 OpenMPIRBuilder::LocationDescription Loc(InsertPointTy(BB, BB->end()), 1128 DL); 1129 IRBuilder<>::InsertPoint AllocaIP( 1130 &OriginalFn->getEntryBlock(), 1131 OriginalFn->getEntryBlock().getFirstInsertionPt()); 1132 // Create the merged parallel region with default proc binding, to 1133 // avoid overriding binding settings, and without explicit cancellation. 1134 InsertPointTy AfterIP = OMPInfoCache.OMPBuilder.createParallel( 1135 Loc, AllocaIP, BodyGenCB, PrivCB, FiniCB, nullptr, nullptr, 1136 OMP_PROC_BIND_default, /* IsCancellable */ false); 1137 BranchInst::Create(AfterBB, AfterIP.getBlock()); 1138 1139 // Perform the actual outlining. 1140 OMPInfoCache.OMPBuilder.finalize(OriginalFn); 1141 1142 Function *OutlinedFn = MergableCIs.front()->getCaller(); 1143 1144 // Replace the __kmpc_fork_call calls with direct calls to the outlined 1145 // callbacks. 1146 SmallVector<Value *, 8> Args; 1147 for (auto *CI : MergableCIs) { 1148 Value *Callee = CI->getArgOperand(CallbackCalleeOperand); 1149 FunctionType *FT = OMPInfoCache.OMPBuilder.ParallelTask; 1150 Args.clear(); 1151 Args.push_back(OutlinedFn->getArg(0)); 1152 Args.push_back(OutlinedFn->getArg(1)); 1153 for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E; 1154 ++U) 1155 Args.push_back(CI->getArgOperand(U)); 1156 1157 CallInst *NewCI = CallInst::Create(FT, Callee, Args, "", CI); 1158 if (CI->getDebugLoc()) 1159 NewCI->setDebugLoc(CI->getDebugLoc()); 1160 1161 // Forward parameter attributes from the callback to the callee. 1162 for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E; 1163 ++U) 1164 for (const Attribute &A : CI->getAttributes().getParamAttrs(U)) 1165 NewCI->addParamAttr( 1166 U - (CallbackFirstArgOperand - CallbackCalleeOperand), A); 1167 1168 // Emit an explicit barrier to replace the implicit fork-join barrier. 1169 if (CI != MergableCIs.back()) { 1170 // TODO: Remove barrier if the merged parallel region includes the 1171 // 'nowait' clause. 1172 OMPInfoCache.OMPBuilder.createBarrier( 1173 InsertPointTy(NewCI->getParent(), 1174 NewCI->getNextNode()->getIterator()), 1175 OMPD_parallel); 1176 } 1177 1178 CI->eraseFromParent(); 1179 } 1180 1181 assert(OutlinedFn != OriginalFn && "Outlining failed"); 1182 CGUpdater.registerOutlinedFunction(*OriginalFn, *OutlinedFn); 1183 CGUpdater.reanalyzeFunction(*OriginalFn); 1184 1185 NumOpenMPParallelRegionsMerged += MergableCIs.size(); 1186 1187 return true; 1188 }; 1189 1190 // Helper function that identifes sequences of 1191 // __kmpc_fork_call uses in a basic block. 1192 auto DetectPRsCB = [&](Use &U, Function &F) { 1193 CallInst *CI = getCallIfRegularCall(U, &RFI); 1194 BB2PRMap[CI->getParent()].insert(CI); 1195 1196 return false; 1197 }; 1198 1199 BB2PRMap.clear(); 1200 RFI.foreachUse(SCC, DetectPRsCB); 1201 SmallVector<SmallVector<CallInst *, 4>, 4> MergableCIsVector; 1202 // Find mergable parallel regions within a basic block that are 1203 // safe to merge, that is any in-between instructions can safely 1204 // execute in parallel after merging. 1205 // TODO: support merging across basic-blocks. 1206 for (auto &It : BB2PRMap) { 1207 auto &CIs = It.getSecond(); 1208 if (CIs.size() < 2) 1209 continue; 1210 1211 BasicBlock *BB = It.getFirst(); 1212 SmallVector<CallInst *, 4> MergableCIs; 1213 1214 /// Returns true if the instruction is mergable, false otherwise. 1215 /// A terminator instruction is unmergable by definition since merging 1216 /// works within a BB. Instructions before the mergable region are 1217 /// mergable if they are not calls to OpenMP runtime functions that may 1218 /// set different execution parameters for subsequent parallel regions. 1219 /// Instructions in-between parallel regions are mergable if they are not 1220 /// calls to any non-intrinsic function since that may call a non-mergable 1221 /// OpenMP runtime function. 1222 auto IsMergable = [&](Instruction &I, bool IsBeforeMergableRegion) { 1223 // We do not merge across BBs, hence return false (unmergable) if the 1224 // instruction is a terminator. 1225 if (I.isTerminator()) 1226 return false; 1227 1228 if (!isa<CallInst>(&I)) 1229 return true; 1230 1231 CallInst *CI = cast<CallInst>(&I); 1232 if (IsBeforeMergableRegion) { 1233 Function *CalledFunction = CI->getCalledFunction(); 1234 if (!CalledFunction) 1235 return false; 1236 // Return false (unmergable) if the call before the parallel 1237 // region calls an explicit affinity (proc_bind) or number of 1238 // threads (num_threads) compiler-generated function. Those settings 1239 // may be incompatible with following parallel regions. 1240 // TODO: ICV tracking to detect compatibility. 1241 for (const auto &RFI : UnmergableCallsInfo) { 1242 if (CalledFunction == RFI.Declaration) 1243 return false; 1244 } 1245 } else { 1246 // Return false (unmergable) if there is a call instruction 1247 // in-between parallel regions when it is not an intrinsic. It 1248 // may call an unmergable OpenMP runtime function in its callpath. 1249 // TODO: Keep track of possible OpenMP calls in the callpath. 1250 if (!isa<IntrinsicInst>(CI)) 1251 return false; 1252 } 1253 1254 return true; 1255 }; 1256 // Find maximal number of parallel region CIs that are safe to merge. 1257 for (auto It = BB->begin(), End = BB->end(); It != End;) { 1258 Instruction &I = *It; 1259 ++It; 1260 1261 if (CIs.count(&I)) { 1262 MergableCIs.push_back(cast<CallInst>(&I)); 1263 continue; 1264 } 1265 1266 // Continue expanding if the instruction is mergable. 1267 if (IsMergable(I, MergableCIs.empty())) 1268 continue; 1269 1270 // Forward the instruction iterator to skip the next parallel region 1271 // since there is an unmergable instruction which can affect it. 1272 for (; It != End; ++It) { 1273 Instruction &SkipI = *It; 1274 if (CIs.count(&SkipI)) { 1275 LLVM_DEBUG(dbgs() << TAG << "Skip parallel region " << SkipI 1276 << " due to " << I << "\n"); 1277 ++It; 1278 break; 1279 } 1280 } 1281 1282 // Store mergable regions found. 1283 if (MergableCIs.size() > 1) { 1284 MergableCIsVector.push_back(MergableCIs); 1285 LLVM_DEBUG(dbgs() << TAG << "Found " << MergableCIs.size() 1286 << " parallel regions in block " << BB->getName() 1287 << " of function " << BB->getParent()->getName() 1288 << "\n";); 1289 } 1290 1291 MergableCIs.clear(); 1292 } 1293 1294 if (!MergableCIsVector.empty()) { 1295 Changed = true; 1296 1297 for (auto &MergableCIs : MergableCIsVector) 1298 Merge(MergableCIs, BB); 1299 MergableCIsVector.clear(); 1300 } 1301 } 1302 1303 if (Changed) { 1304 /// Re-collect use for fork calls, emitted barrier calls, and 1305 /// any emitted master/end_master calls. 1306 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_fork_call); 1307 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_barrier); 1308 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_master); 1309 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_end_master); 1310 } 1311 1312 return Changed; 1313 } 1314 1315 /// Try to delete parallel regions if possible. 1316 bool deleteParallelRegions() { 1317 const unsigned CallbackCalleeOperand = 2; 1318 1319 OMPInformationCache::RuntimeFunctionInfo &RFI = 1320 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 1321 1322 if (!RFI.Declaration) 1323 return false; 1324 1325 bool Changed = false; 1326 auto DeleteCallCB = [&](Use &U, Function &) { 1327 CallInst *CI = getCallIfRegularCall(U); 1328 if (!CI) 1329 return false; 1330 auto *Fn = dyn_cast<Function>( 1331 CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts()); 1332 if (!Fn) 1333 return false; 1334 if (!Fn->onlyReadsMemory()) 1335 return false; 1336 if (!Fn->hasFnAttribute(Attribute::WillReturn)) 1337 return false; 1338 1339 LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in " 1340 << CI->getCaller()->getName() << "\n"); 1341 1342 auto Remark = [&](OptimizationRemark OR) { 1343 return OR << "Removing parallel region with no side-effects."; 1344 }; 1345 emitRemark<OptimizationRemark>(CI, "OMP160", Remark); 1346 1347 CGUpdater.removeCallSite(*CI); 1348 CI->eraseFromParent(); 1349 Changed = true; 1350 ++NumOpenMPParallelRegionsDeleted; 1351 return true; 1352 }; 1353 1354 RFI.foreachUse(SCC, DeleteCallCB); 1355 1356 return Changed; 1357 } 1358 1359 /// Try to eliminate runtime calls by reusing existing ones. 1360 bool deduplicateRuntimeCalls() { 1361 bool Changed = false; 1362 1363 RuntimeFunction DeduplicableRuntimeCallIDs[] = { 1364 OMPRTL_omp_get_num_threads, 1365 OMPRTL_omp_in_parallel, 1366 OMPRTL_omp_get_cancellation, 1367 OMPRTL_omp_get_thread_limit, 1368 OMPRTL_omp_get_supported_active_levels, 1369 OMPRTL_omp_get_level, 1370 OMPRTL_omp_get_ancestor_thread_num, 1371 OMPRTL_omp_get_team_size, 1372 OMPRTL_omp_get_active_level, 1373 OMPRTL_omp_in_final, 1374 OMPRTL_omp_get_proc_bind, 1375 OMPRTL_omp_get_num_places, 1376 OMPRTL_omp_get_num_procs, 1377 OMPRTL_omp_get_place_num, 1378 OMPRTL_omp_get_partition_num_places, 1379 OMPRTL_omp_get_partition_place_nums}; 1380 1381 // Global-tid is handled separately. 1382 SmallSetVector<Value *, 16> GTIdArgs; 1383 collectGlobalThreadIdArguments(GTIdArgs); 1384 LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size() 1385 << " global thread ID arguments\n"); 1386 1387 for (Function *F : SCC) { 1388 for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs) 1389 Changed |= deduplicateRuntimeCalls( 1390 *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]); 1391 1392 // __kmpc_global_thread_num is special as we can replace it with an 1393 // argument in enough cases to make it worth trying. 1394 Value *GTIdArg = nullptr; 1395 for (Argument &Arg : F->args()) 1396 if (GTIdArgs.count(&Arg)) { 1397 GTIdArg = &Arg; 1398 break; 1399 } 1400 Changed |= deduplicateRuntimeCalls( 1401 *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg); 1402 } 1403 1404 return Changed; 1405 } 1406 1407 /// Tries to hide the latency of runtime calls that involve host to 1408 /// device memory transfers by splitting them into their "issue" and "wait" 1409 /// versions. The "issue" is moved upwards as much as possible. The "wait" is 1410 /// moved downards as much as possible. The "issue" issues the memory transfer 1411 /// asynchronously, returning a handle. The "wait" waits in the returned 1412 /// handle for the memory transfer to finish. 1413 bool hideMemTransfersLatency() { 1414 auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper]; 1415 bool Changed = false; 1416 auto SplitMemTransfers = [&](Use &U, Function &Decl) { 1417 auto *RTCall = getCallIfRegularCall(U, &RFI); 1418 if (!RTCall) 1419 return false; 1420 1421 OffloadArray OffloadArrays[3]; 1422 if (!getValuesInOffloadArrays(*RTCall, OffloadArrays)) 1423 return false; 1424 1425 LLVM_DEBUG(dumpValuesInOffloadArrays(OffloadArrays)); 1426 1427 // TODO: Check if can be moved upwards. 1428 bool WasSplit = false; 1429 Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall); 1430 if (WaitMovementPoint) 1431 WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint); 1432 1433 Changed |= WasSplit; 1434 return WasSplit; 1435 }; 1436 if (OMPInfoCache.runtimeFnsAvailable( 1437 {OMPRTL___tgt_target_data_begin_mapper_issue, 1438 OMPRTL___tgt_target_data_begin_mapper_wait})) 1439 RFI.foreachUse(SCC, SplitMemTransfers); 1440 1441 return Changed; 1442 } 1443 1444 void analysisGlobalization() { 1445 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 1446 1447 auto CheckGlobalization = [&](Use &U, Function &Decl) { 1448 if (CallInst *CI = getCallIfRegularCall(U, &RFI)) { 1449 auto Remark = [&](OptimizationRemarkMissed ORM) { 1450 return ORM 1451 << "Found thread data sharing on the GPU. " 1452 << "Expect degraded performance due to data globalization."; 1453 }; 1454 emitRemark<OptimizationRemarkMissed>(CI, "OMP112", Remark); 1455 } 1456 1457 return false; 1458 }; 1459 1460 RFI.foreachUse(SCC, CheckGlobalization); 1461 } 1462 1463 /// Maps the values stored in the offload arrays passed as arguments to 1464 /// \p RuntimeCall into the offload arrays in \p OAs. 1465 bool getValuesInOffloadArrays(CallInst &RuntimeCall, 1466 MutableArrayRef<OffloadArray> OAs) { 1467 assert(OAs.size() == 3 && "Need space for three offload arrays!"); 1468 1469 // A runtime call that involves memory offloading looks something like: 1470 // call void @__tgt_target_data_begin_mapper(arg0, arg1, 1471 // i8** %offload_baseptrs, i8** %offload_ptrs, i64* %offload_sizes, 1472 // ...) 1473 // So, the idea is to access the allocas that allocate space for these 1474 // offload arrays, offload_baseptrs, offload_ptrs, offload_sizes. 1475 // Therefore: 1476 // i8** %offload_baseptrs. 1477 Value *BasePtrsArg = 1478 RuntimeCall.getArgOperand(OffloadArray::BasePtrsArgNum); 1479 // i8** %offload_ptrs. 1480 Value *PtrsArg = RuntimeCall.getArgOperand(OffloadArray::PtrsArgNum); 1481 // i8** %offload_sizes. 1482 Value *SizesArg = RuntimeCall.getArgOperand(OffloadArray::SizesArgNum); 1483 1484 // Get values stored in **offload_baseptrs. 1485 auto *V = getUnderlyingObject(BasePtrsArg); 1486 if (!isa<AllocaInst>(V)) 1487 return false; 1488 auto *BasePtrsArray = cast<AllocaInst>(V); 1489 if (!OAs[0].initialize(*BasePtrsArray, RuntimeCall)) 1490 return false; 1491 1492 // Get values stored in **offload_baseptrs. 1493 V = getUnderlyingObject(PtrsArg); 1494 if (!isa<AllocaInst>(V)) 1495 return false; 1496 auto *PtrsArray = cast<AllocaInst>(V); 1497 if (!OAs[1].initialize(*PtrsArray, RuntimeCall)) 1498 return false; 1499 1500 // Get values stored in **offload_sizes. 1501 V = getUnderlyingObject(SizesArg); 1502 // If it's a [constant] global array don't analyze it. 1503 if (isa<GlobalValue>(V)) 1504 return isa<Constant>(V); 1505 if (!isa<AllocaInst>(V)) 1506 return false; 1507 1508 auto *SizesArray = cast<AllocaInst>(V); 1509 if (!OAs[2].initialize(*SizesArray, RuntimeCall)) 1510 return false; 1511 1512 return true; 1513 } 1514 1515 /// Prints the values in the OffloadArrays \p OAs using LLVM_DEBUG. 1516 /// For now this is a way to test that the function getValuesInOffloadArrays 1517 /// is working properly. 1518 /// TODO: Move this to a unittest when unittests are available for OpenMPOpt. 1519 void dumpValuesInOffloadArrays(ArrayRef<OffloadArray> OAs) { 1520 assert(OAs.size() == 3 && "There are three offload arrays to debug!"); 1521 1522 LLVM_DEBUG(dbgs() << TAG << " Successfully got offload values:\n"); 1523 std::string ValuesStr; 1524 raw_string_ostream Printer(ValuesStr); 1525 std::string Separator = " --- "; 1526 1527 for (auto *BP : OAs[0].StoredValues) { 1528 BP->print(Printer); 1529 Printer << Separator; 1530 } 1531 LLVM_DEBUG(dbgs() << "\t\toffload_baseptrs: " << Printer.str() << "\n"); 1532 ValuesStr.clear(); 1533 1534 for (auto *P : OAs[1].StoredValues) { 1535 P->print(Printer); 1536 Printer << Separator; 1537 } 1538 LLVM_DEBUG(dbgs() << "\t\toffload_ptrs: " << Printer.str() << "\n"); 1539 ValuesStr.clear(); 1540 1541 for (auto *S : OAs[2].StoredValues) { 1542 S->print(Printer); 1543 Printer << Separator; 1544 } 1545 LLVM_DEBUG(dbgs() << "\t\toffload_sizes: " << Printer.str() << "\n"); 1546 } 1547 1548 /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be 1549 /// moved. Returns nullptr if the movement is not possible, or not worth it. 1550 Instruction *canBeMovedDownwards(CallInst &RuntimeCall) { 1551 // FIXME: This traverses only the BasicBlock where RuntimeCall is. 1552 // Make it traverse the CFG. 1553 1554 Instruction *CurrentI = &RuntimeCall; 1555 bool IsWorthIt = false; 1556 while ((CurrentI = CurrentI->getNextNode())) { 1557 1558 // TODO: Once we detect the regions to be offloaded we should use the 1559 // alias analysis manager to check if CurrentI may modify one of 1560 // the offloaded regions. 1561 if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) { 1562 if (IsWorthIt) 1563 return CurrentI; 1564 1565 return nullptr; 1566 } 1567 1568 // FIXME: For now if we move it over anything without side effect 1569 // is worth it. 1570 IsWorthIt = true; 1571 } 1572 1573 // Return end of BasicBlock. 1574 return RuntimeCall.getParent()->getTerminator(); 1575 } 1576 1577 /// Splits \p RuntimeCall into its "issue" and "wait" counterparts. 1578 bool splitTargetDataBeginRTC(CallInst &RuntimeCall, 1579 Instruction &WaitMovementPoint) { 1580 // Create stack allocated handle (__tgt_async_info) at the beginning of the 1581 // function. Used for storing information of the async transfer, allowing to 1582 // wait on it later. 1583 auto &IRBuilder = OMPInfoCache.OMPBuilder; 1584 Function *F = RuntimeCall.getCaller(); 1585 BasicBlock &Entry = F->getEntryBlock(); 1586 IRBuilder.Builder.SetInsertPoint(&Entry, 1587 Entry.getFirstNonPHIOrDbgOrAlloca()); 1588 Value *Handle = IRBuilder.Builder.CreateAlloca( 1589 IRBuilder.AsyncInfo, /*ArraySize=*/nullptr, "handle"); 1590 Handle = 1591 IRBuilder.Builder.CreateAddrSpaceCast(Handle, IRBuilder.AsyncInfoPtr); 1592 1593 // Add "issue" runtime call declaration: 1594 // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32, 1595 // i8**, i8**, i64*, i64*) 1596 FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction( 1597 M, OMPRTL___tgt_target_data_begin_mapper_issue); 1598 1599 // Change RuntimeCall call site for its asynchronous version. 1600 SmallVector<Value *, 16> Args; 1601 for (auto &Arg : RuntimeCall.args()) 1602 Args.push_back(Arg.get()); 1603 Args.push_back(Handle); 1604 1605 CallInst *IssueCallsite = 1606 CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall); 1607 OMPInfoCache.setCallingConvention(IssueDecl, IssueCallsite); 1608 RuntimeCall.eraseFromParent(); 1609 1610 // Add "wait" runtime call declaration: 1611 // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info) 1612 FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction( 1613 M, OMPRTL___tgt_target_data_begin_mapper_wait); 1614 1615 Value *WaitParams[2] = { 1616 IssueCallsite->getArgOperand( 1617 OffloadArray::DeviceIDArgNum), // device_id. 1618 Handle // handle to wait on. 1619 }; 1620 CallInst *WaitCallsite = CallInst::Create( 1621 WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint); 1622 OMPInfoCache.setCallingConvention(WaitDecl, WaitCallsite); 1623 1624 return true; 1625 } 1626 1627 static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent, 1628 bool GlobalOnly, bool &SingleChoice) { 1629 if (CurrentIdent == NextIdent) 1630 return CurrentIdent; 1631 1632 // TODO: Figure out how to actually combine multiple debug locations. For 1633 // now we just keep an existing one if there is a single choice. 1634 if (!GlobalOnly || isa<GlobalValue>(NextIdent)) { 1635 SingleChoice = !CurrentIdent; 1636 return NextIdent; 1637 } 1638 return nullptr; 1639 } 1640 1641 /// Return an `struct ident_t*` value that represents the ones used in the 1642 /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not 1643 /// return a local `struct ident_t*`. For now, if we cannot find a suitable 1644 /// return value we create one from scratch. We also do not yet combine 1645 /// information, e.g., the source locations, see combinedIdentStruct. 1646 Value * 1647 getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI, 1648 Function &F, bool GlobalOnly) { 1649 bool SingleChoice = true; 1650 Value *Ident = nullptr; 1651 auto CombineIdentStruct = [&](Use &U, Function &Caller) { 1652 CallInst *CI = getCallIfRegularCall(U, &RFI); 1653 if (!CI || &F != &Caller) 1654 return false; 1655 Ident = combinedIdentStruct(Ident, CI->getArgOperand(0), 1656 /* GlobalOnly */ true, SingleChoice); 1657 return false; 1658 }; 1659 RFI.foreachUse(SCC, CombineIdentStruct); 1660 1661 if (!Ident || !SingleChoice) { 1662 // The IRBuilder uses the insertion block to get to the module, this is 1663 // unfortunate but we work around it for now. 1664 if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock()) 1665 OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy( 1666 &F.getEntryBlock(), F.getEntryBlock().begin())); 1667 // Create a fallback location if non was found. 1668 // TODO: Use the debug locations of the calls instead. 1669 uint32_t SrcLocStrSize; 1670 Constant *Loc = 1671 OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize); 1672 Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc, SrcLocStrSize); 1673 } 1674 return Ident; 1675 } 1676 1677 /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or 1678 /// \p ReplVal if given. 1679 bool deduplicateRuntimeCalls(Function &F, 1680 OMPInformationCache::RuntimeFunctionInfo &RFI, 1681 Value *ReplVal = nullptr) { 1682 auto *UV = RFI.getUseVector(F); 1683 if (!UV || UV->size() + (ReplVal != nullptr) < 2) 1684 return false; 1685 1686 LLVM_DEBUG( 1687 dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name 1688 << (ReplVal ? " with an existing value\n" : "\n") << "\n"); 1689 1690 assert((!ReplVal || (isa<Argument>(ReplVal) && 1691 cast<Argument>(ReplVal)->getParent() == &F)) && 1692 "Unexpected replacement value!"); 1693 1694 // TODO: Use dominance to find a good position instead. 1695 auto CanBeMoved = [this](CallBase &CB) { 1696 unsigned NumArgs = CB.arg_size(); 1697 if (NumArgs == 0) 1698 return true; 1699 if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr) 1700 return false; 1701 for (unsigned U = 1; U < NumArgs; ++U) 1702 if (isa<Instruction>(CB.getArgOperand(U))) 1703 return false; 1704 return true; 1705 }; 1706 1707 if (!ReplVal) { 1708 auto *DT = 1709 OMPInfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(F); 1710 if (!DT) 1711 return false; 1712 Instruction *IP = nullptr; 1713 for (Use *U : *UV) { 1714 if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) { 1715 if (IP) 1716 IP = DT->findNearestCommonDominator(IP, CI); 1717 else 1718 IP = CI; 1719 if (!CanBeMoved(*CI)) 1720 continue; 1721 if (!ReplVal) 1722 ReplVal = CI; 1723 } 1724 } 1725 if (!ReplVal) 1726 return false; 1727 assert(IP && "Expected insertion point!"); 1728 cast<Instruction>(ReplVal)->moveBefore(IP); 1729 } 1730 1731 // If we use a call as a replacement value we need to make sure the ident is 1732 // valid at the new location. For now we just pick a global one, either 1733 // existing and used by one of the calls, or created from scratch. 1734 if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) { 1735 if (!CI->arg_empty() && 1736 CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) { 1737 Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F, 1738 /* GlobalOnly */ true); 1739 CI->setArgOperand(0, Ident); 1740 } 1741 } 1742 1743 bool Changed = false; 1744 auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) { 1745 CallInst *CI = getCallIfRegularCall(U, &RFI); 1746 if (!CI || CI == ReplVal || &F != &Caller) 1747 return false; 1748 assert(CI->getCaller() == &F && "Unexpected call!"); 1749 1750 auto Remark = [&](OptimizationRemark OR) { 1751 return OR << "OpenMP runtime call " 1752 << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated."; 1753 }; 1754 if (CI->getDebugLoc()) 1755 emitRemark<OptimizationRemark>(CI, "OMP170", Remark); 1756 else 1757 emitRemark<OptimizationRemark>(&F, "OMP170", Remark); 1758 1759 CGUpdater.removeCallSite(*CI); 1760 CI->replaceAllUsesWith(ReplVal); 1761 CI->eraseFromParent(); 1762 ++NumOpenMPRuntimeCallsDeduplicated; 1763 Changed = true; 1764 return true; 1765 }; 1766 RFI.foreachUse(SCC, ReplaceAndDeleteCB); 1767 1768 return Changed; 1769 } 1770 1771 /// Collect arguments that represent the global thread id in \p GTIdArgs. 1772 void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> >IdArgs) { 1773 // TODO: Below we basically perform a fixpoint iteration with a pessimistic 1774 // initialization. We could define an AbstractAttribute instead and 1775 // run the Attributor here once it can be run as an SCC pass. 1776 1777 // Helper to check the argument \p ArgNo at all call sites of \p F for 1778 // a GTId. 1779 auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) { 1780 if (!F.hasLocalLinkage()) 1781 return false; 1782 for (Use &U : F.uses()) { 1783 if (CallInst *CI = getCallIfRegularCall(U)) { 1784 Value *ArgOp = CI->getArgOperand(ArgNo); 1785 if (CI == &RefCI || GTIdArgs.count(ArgOp) || 1786 getCallIfRegularCall( 1787 *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num])) 1788 continue; 1789 } 1790 return false; 1791 } 1792 return true; 1793 }; 1794 1795 // Helper to identify uses of a GTId as GTId arguments. 1796 auto AddUserArgs = [&](Value >Id) { 1797 for (Use &U : GTId.uses()) 1798 if (CallInst *CI = dyn_cast<CallInst>(U.getUser())) 1799 if (CI->isArgOperand(&U)) 1800 if (Function *Callee = CI->getCalledFunction()) 1801 if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI)) 1802 GTIdArgs.insert(Callee->getArg(U.getOperandNo())); 1803 }; 1804 1805 // The argument users of __kmpc_global_thread_num calls are GTIds. 1806 OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI = 1807 OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]; 1808 1809 GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) { 1810 if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI)) 1811 AddUserArgs(*CI); 1812 return false; 1813 }); 1814 1815 // Transitively search for more arguments by looking at the users of the 1816 // ones we know already. During the search the GTIdArgs vector is extended 1817 // so we cannot cache the size nor can we use a range based for. 1818 for (unsigned U = 0; U < GTIdArgs.size(); ++U) 1819 AddUserArgs(*GTIdArgs[U]); 1820 } 1821 1822 /// Kernel (=GPU) optimizations and utility functions 1823 /// 1824 ///{{ 1825 1826 /// Cache to remember the unique kernel for a function. 1827 DenseMap<Function *, std::optional<Kernel>> UniqueKernelMap; 1828 1829 /// Find the unique kernel that will execute \p F, if any. 1830 Kernel getUniqueKernelFor(Function &F); 1831 1832 /// Find the unique kernel that will execute \p I, if any. 1833 Kernel getUniqueKernelFor(Instruction &I) { 1834 return getUniqueKernelFor(*I.getFunction()); 1835 } 1836 1837 /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in 1838 /// the cases we can avoid taking the address of a function. 1839 bool rewriteDeviceCodeStateMachine(); 1840 1841 /// 1842 ///}} 1843 1844 /// Emit a remark generically 1845 /// 1846 /// This template function can be used to generically emit a remark. The 1847 /// RemarkKind should be one of the following: 1848 /// - OptimizationRemark to indicate a successful optimization attempt 1849 /// - OptimizationRemarkMissed to report a failed optimization attempt 1850 /// - OptimizationRemarkAnalysis to provide additional information about an 1851 /// optimization attempt 1852 /// 1853 /// The remark is built using a callback function provided by the caller that 1854 /// takes a RemarkKind as input and returns a RemarkKind. 1855 template <typename RemarkKind, typename RemarkCallBack> 1856 void emitRemark(Instruction *I, StringRef RemarkName, 1857 RemarkCallBack &&RemarkCB) const { 1858 Function *F = I->getParent()->getParent(); 1859 auto &ORE = OREGetter(F); 1860 1861 if (RemarkName.startswith("OMP")) 1862 ORE.emit([&]() { 1863 return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)) 1864 << " [" << RemarkName << "]"; 1865 }); 1866 else 1867 ORE.emit( 1868 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)); }); 1869 } 1870 1871 /// Emit a remark on a function. 1872 template <typename RemarkKind, typename RemarkCallBack> 1873 void emitRemark(Function *F, StringRef RemarkName, 1874 RemarkCallBack &&RemarkCB) const { 1875 auto &ORE = OREGetter(F); 1876 1877 if (RemarkName.startswith("OMP")) 1878 ORE.emit([&]() { 1879 return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)) 1880 << " [" << RemarkName << "]"; 1881 }); 1882 else 1883 ORE.emit( 1884 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)); }); 1885 } 1886 1887 /// The underlying module. 1888 Module &M; 1889 1890 /// The SCC we are operating on. 1891 SmallVectorImpl<Function *> &SCC; 1892 1893 /// Callback to update the call graph, the first argument is a removed call, 1894 /// the second an optional replacement call. 1895 CallGraphUpdater &CGUpdater; 1896 1897 /// Callback to get an OptimizationRemarkEmitter from a Function * 1898 OptimizationRemarkGetter OREGetter; 1899 1900 /// OpenMP-specific information cache. Also Used for Attributor runs. 1901 OMPInformationCache &OMPInfoCache; 1902 1903 /// Attributor instance. 1904 Attributor &A; 1905 1906 /// Helper function to run Attributor on SCC. 1907 bool runAttributor(bool IsModulePass) { 1908 if (SCC.empty()) 1909 return false; 1910 1911 registerAAs(IsModulePass); 1912 1913 ChangeStatus Changed = A.run(); 1914 1915 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size() 1916 << " functions, result: " << Changed << ".\n"); 1917 1918 return Changed == ChangeStatus::CHANGED; 1919 } 1920 1921 void registerFoldRuntimeCall(RuntimeFunction RF); 1922 1923 /// Populate the Attributor with abstract attribute opportunities in the 1924 /// functions. 1925 void registerAAs(bool IsModulePass); 1926 1927 public: 1928 /// Callback to register AAs for live functions, including internal functions 1929 /// marked live during the traversal. 1930 static void registerAAsForFunction(Attributor &A, const Function &F); 1931 }; 1932 1933 Kernel OpenMPOpt::getUniqueKernelFor(Function &F) { 1934 if (OMPInfoCache.CGSCC && !OMPInfoCache.CGSCC->empty() && 1935 !OMPInfoCache.CGSCC->contains(&F)) 1936 return nullptr; 1937 1938 // Use a scope to keep the lifetime of the CachedKernel short. 1939 { 1940 std::optional<Kernel> &CachedKernel = UniqueKernelMap[&F]; 1941 if (CachedKernel) 1942 return *CachedKernel; 1943 1944 // TODO: We should use an AA to create an (optimistic and callback 1945 // call-aware) call graph. For now we stick to simple patterns that 1946 // are less powerful, basically the worst fixpoint. 1947 if (isKernel(F)) { 1948 CachedKernel = Kernel(&F); 1949 return *CachedKernel; 1950 } 1951 1952 CachedKernel = nullptr; 1953 if (!F.hasLocalLinkage()) { 1954 1955 // See https://openmp.llvm.org/remarks/OptimizationRemarks.html 1956 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 1957 return ORA << "Potentially unknown OpenMP target region caller."; 1958 }; 1959 emitRemark<OptimizationRemarkAnalysis>(&F, "OMP100", Remark); 1960 1961 return nullptr; 1962 } 1963 } 1964 1965 auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel { 1966 if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) { 1967 // Allow use in equality comparisons. 1968 if (Cmp->isEquality()) 1969 return getUniqueKernelFor(*Cmp); 1970 return nullptr; 1971 } 1972 if (auto *CB = dyn_cast<CallBase>(U.getUser())) { 1973 // Allow direct calls. 1974 if (CB->isCallee(&U)) 1975 return getUniqueKernelFor(*CB); 1976 1977 OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI = 1978 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 1979 // Allow the use in __kmpc_parallel_51 calls. 1980 if (OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI)) 1981 return getUniqueKernelFor(*CB); 1982 return nullptr; 1983 } 1984 // Disallow every other use. 1985 return nullptr; 1986 }; 1987 1988 // TODO: In the future we want to track more than just a unique kernel. 1989 SmallPtrSet<Kernel, 2> PotentialKernels; 1990 OMPInformationCache::foreachUse(F, [&](const Use &U) { 1991 PotentialKernels.insert(GetUniqueKernelForUse(U)); 1992 }); 1993 1994 Kernel K = nullptr; 1995 if (PotentialKernels.size() == 1) 1996 K = *PotentialKernels.begin(); 1997 1998 // Cache the result. 1999 UniqueKernelMap[&F] = K; 2000 2001 return K; 2002 } 2003 2004 bool OpenMPOpt::rewriteDeviceCodeStateMachine() { 2005 OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI = 2006 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 2007 2008 bool Changed = false; 2009 if (!KernelParallelRFI) 2010 return Changed; 2011 2012 // If we have disabled state machine changes, exit 2013 if (DisableOpenMPOptStateMachineRewrite) 2014 return Changed; 2015 2016 for (Function *F : SCC) { 2017 2018 // Check if the function is a use in a __kmpc_parallel_51 call at 2019 // all. 2020 bool UnknownUse = false; 2021 bool KernelParallelUse = false; 2022 unsigned NumDirectCalls = 0; 2023 2024 SmallVector<Use *, 2> ToBeReplacedStateMachineUses; 2025 OMPInformationCache::foreachUse(*F, [&](Use &U) { 2026 if (auto *CB = dyn_cast<CallBase>(U.getUser())) 2027 if (CB->isCallee(&U)) { 2028 ++NumDirectCalls; 2029 return; 2030 } 2031 2032 if (isa<ICmpInst>(U.getUser())) { 2033 ToBeReplacedStateMachineUses.push_back(&U); 2034 return; 2035 } 2036 2037 // Find wrapper functions that represent parallel kernels. 2038 CallInst *CI = 2039 OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI); 2040 const unsigned int WrapperFunctionArgNo = 6; 2041 if (!KernelParallelUse && CI && 2042 CI->getArgOperandNo(&U) == WrapperFunctionArgNo) { 2043 KernelParallelUse = true; 2044 ToBeReplacedStateMachineUses.push_back(&U); 2045 return; 2046 } 2047 UnknownUse = true; 2048 }); 2049 2050 // Do not emit a remark if we haven't seen a __kmpc_parallel_51 2051 // use. 2052 if (!KernelParallelUse) 2053 continue; 2054 2055 // If this ever hits, we should investigate. 2056 // TODO: Checking the number of uses is not a necessary restriction and 2057 // should be lifted. 2058 if (UnknownUse || NumDirectCalls != 1 || 2059 ToBeReplacedStateMachineUses.size() > 2) { 2060 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 2061 return ORA << "Parallel region is used in " 2062 << (UnknownUse ? "unknown" : "unexpected") 2063 << " ways. Will not attempt to rewrite the state machine."; 2064 }; 2065 emitRemark<OptimizationRemarkAnalysis>(F, "OMP101", Remark); 2066 continue; 2067 } 2068 2069 // Even if we have __kmpc_parallel_51 calls, we (for now) give 2070 // up if the function is not called from a unique kernel. 2071 Kernel K = getUniqueKernelFor(*F); 2072 if (!K) { 2073 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 2074 return ORA << "Parallel region is not called from a unique kernel. " 2075 "Will not attempt to rewrite the state machine."; 2076 }; 2077 emitRemark<OptimizationRemarkAnalysis>(F, "OMP102", Remark); 2078 continue; 2079 } 2080 2081 // We now know F is a parallel body function called only from the kernel K. 2082 // We also identified the state machine uses in which we replace the 2083 // function pointer by a new global symbol for identification purposes. This 2084 // ensures only direct calls to the function are left. 2085 2086 Module &M = *F->getParent(); 2087 Type *Int8Ty = Type::getInt8Ty(M.getContext()); 2088 2089 auto *ID = new GlobalVariable( 2090 M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage, 2091 UndefValue::get(Int8Ty), F->getName() + ".ID"); 2092 2093 for (Use *U : ToBeReplacedStateMachineUses) 2094 U->set(ConstantExpr::getPointerBitCastOrAddrSpaceCast( 2095 ID, U->get()->getType())); 2096 2097 ++NumOpenMPParallelRegionsReplacedInGPUStateMachine; 2098 2099 Changed = true; 2100 } 2101 2102 return Changed; 2103 } 2104 2105 /// Abstract Attribute for tracking ICV values. 2106 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> { 2107 using Base = StateWrapper<BooleanState, AbstractAttribute>; 2108 AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 2109 2110 /// Returns true if value is assumed to be tracked. 2111 bool isAssumedTracked() const { return getAssumed(); } 2112 2113 /// Returns true if value is known to be tracked. 2114 bool isKnownTracked() const { return getAssumed(); } 2115 2116 /// Create an abstract attribute biew for the position \p IRP. 2117 static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A); 2118 2119 /// Return the value with which \p I can be replaced for specific \p ICV. 2120 virtual std::optional<Value *> getReplacementValue(InternalControlVar ICV, 2121 const Instruction *I, 2122 Attributor &A) const { 2123 return std::nullopt; 2124 } 2125 2126 /// Return an assumed unique ICV value if a single candidate is found. If 2127 /// there cannot be one, return a nullptr. If it is not clear yet, return 2128 /// std::nullopt. 2129 virtual std::optional<Value *> 2130 getUniqueReplacementValue(InternalControlVar ICV) const = 0; 2131 2132 // Currently only nthreads is being tracked. 2133 // this array will only grow with time. 2134 InternalControlVar TrackableICVs[1] = {ICV_nthreads}; 2135 2136 /// See AbstractAttribute::getName() 2137 const std::string getName() const override { return "AAICVTracker"; } 2138 2139 /// See AbstractAttribute::getIdAddr() 2140 const char *getIdAddr() const override { return &ID; } 2141 2142 /// This function should return true if the type of the \p AA is AAICVTracker 2143 static bool classof(const AbstractAttribute *AA) { 2144 return (AA->getIdAddr() == &ID); 2145 } 2146 2147 static const char ID; 2148 }; 2149 2150 struct AAICVTrackerFunction : public AAICVTracker { 2151 AAICVTrackerFunction(const IRPosition &IRP, Attributor &A) 2152 : AAICVTracker(IRP, A) {} 2153 2154 // FIXME: come up with better string. 2155 const std::string getAsStr(Attributor *) const override { 2156 return "ICVTrackerFunction"; 2157 } 2158 2159 // FIXME: come up with some stats. 2160 void trackStatistics() const override {} 2161 2162 /// We don't manifest anything for this AA. 2163 ChangeStatus manifest(Attributor &A) override { 2164 return ChangeStatus::UNCHANGED; 2165 } 2166 2167 // Map of ICV to their values at specific program point. 2168 EnumeratedArray<DenseMap<Instruction *, Value *>, InternalControlVar, 2169 InternalControlVar::ICV___last> 2170 ICVReplacementValuesMap; 2171 2172 ChangeStatus updateImpl(Attributor &A) override { 2173 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 2174 2175 Function *F = getAnchorScope(); 2176 2177 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2178 2179 for (InternalControlVar ICV : TrackableICVs) { 2180 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 2181 2182 auto &ValuesMap = ICVReplacementValuesMap[ICV]; 2183 auto TrackValues = [&](Use &U, Function &) { 2184 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U); 2185 if (!CI) 2186 return false; 2187 2188 // FIXME: handle setters with more that 1 arguments. 2189 /// Track new value. 2190 if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second) 2191 HasChanged = ChangeStatus::CHANGED; 2192 2193 return false; 2194 }; 2195 2196 auto CallCheck = [&](Instruction &I) { 2197 std::optional<Value *> ReplVal = getValueForCall(A, I, ICV); 2198 if (ReplVal && ValuesMap.insert(std::make_pair(&I, *ReplVal)).second) 2199 HasChanged = ChangeStatus::CHANGED; 2200 2201 return true; 2202 }; 2203 2204 // Track all changes of an ICV. 2205 SetterRFI.foreachUse(TrackValues, F); 2206 2207 bool UsedAssumedInformation = false; 2208 A.checkForAllInstructions(CallCheck, *this, {Instruction::Call}, 2209 UsedAssumedInformation, 2210 /* CheckBBLivenessOnly */ true); 2211 2212 /// TODO: Figure out a way to avoid adding entry in 2213 /// ICVReplacementValuesMap 2214 Instruction *Entry = &F->getEntryBlock().front(); 2215 if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry)) 2216 ValuesMap.insert(std::make_pair(Entry, nullptr)); 2217 } 2218 2219 return HasChanged; 2220 } 2221 2222 /// Helper to check if \p I is a call and get the value for it if it is 2223 /// unique. 2224 std::optional<Value *> getValueForCall(Attributor &A, const Instruction &I, 2225 InternalControlVar &ICV) const { 2226 2227 const auto *CB = dyn_cast<CallBase>(&I); 2228 if (!CB || CB->hasFnAttr("no_openmp") || 2229 CB->hasFnAttr("no_openmp_routines")) 2230 return std::nullopt; 2231 2232 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2233 auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter]; 2234 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 2235 Function *CalledFunction = CB->getCalledFunction(); 2236 2237 // Indirect call, assume ICV changes. 2238 if (CalledFunction == nullptr) 2239 return nullptr; 2240 if (CalledFunction == GetterRFI.Declaration) 2241 return std::nullopt; 2242 if (CalledFunction == SetterRFI.Declaration) { 2243 if (ICVReplacementValuesMap[ICV].count(&I)) 2244 return ICVReplacementValuesMap[ICV].lookup(&I); 2245 2246 return nullptr; 2247 } 2248 2249 // Since we don't know, assume it changes the ICV. 2250 if (CalledFunction->isDeclaration()) 2251 return nullptr; 2252 2253 const auto *ICVTrackingAA = A.getAAFor<AAICVTracker>( 2254 *this, IRPosition::callsite_returned(*CB), DepClassTy::REQUIRED); 2255 2256 if (ICVTrackingAA->isAssumedTracked()) { 2257 std::optional<Value *> URV = 2258 ICVTrackingAA->getUniqueReplacementValue(ICV); 2259 if (!URV || (*URV && AA::isValidAtPosition(AA::ValueAndContext(**URV, I), 2260 OMPInfoCache))) 2261 return URV; 2262 } 2263 2264 // If we don't know, assume it changes. 2265 return nullptr; 2266 } 2267 2268 // We don't check unique value for a function, so return std::nullopt. 2269 std::optional<Value *> 2270 getUniqueReplacementValue(InternalControlVar ICV) const override { 2271 return std::nullopt; 2272 } 2273 2274 /// Return the value with which \p I can be replaced for specific \p ICV. 2275 std::optional<Value *> getReplacementValue(InternalControlVar ICV, 2276 const Instruction *I, 2277 Attributor &A) const override { 2278 const auto &ValuesMap = ICVReplacementValuesMap[ICV]; 2279 if (ValuesMap.count(I)) 2280 return ValuesMap.lookup(I); 2281 2282 SmallVector<const Instruction *, 16> Worklist; 2283 SmallPtrSet<const Instruction *, 16> Visited; 2284 Worklist.push_back(I); 2285 2286 std::optional<Value *> ReplVal; 2287 2288 while (!Worklist.empty()) { 2289 const Instruction *CurrInst = Worklist.pop_back_val(); 2290 if (!Visited.insert(CurrInst).second) 2291 continue; 2292 2293 const BasicBlock *CurrBB = CurrInst->getParent(); 2294 2295 // Go up and look for all potential setters/calls that might change the 2296 // ICV. 2297 while ((CurrInst = CurrInst->getPrevNode())) { 2298 if (ValuesMap.count(CurrInst)) { 2299 std::optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst); 2300 // Unknown value, track new. 2301 if (!ReplVal) { 2302 ReplVal = NewReplVal; 2303 break; 2304 } 2305 2306 // If we found a new value, we can't know the icv value anymore. 2307 if (NewReplVal) 2308 if (ReplVal != NewReplVal) 2309 return nullptr; 2310 2311 break; 2312 } 2313 2314 std::optional<Value *> NewReplVal = getValueForCall(A, *CurrInst, ICV); 2315 if (!NewReplVal) 2316 continue; 2317 2318 // Unknown value, track new. 2319 if (!ReplVal) { 2320 ReplVal = NewReplVal; 2321 break; 2322 } 2323 2324 // if (NewReplVal.hasValue()) 2325 // We found a new value, we can't know the icv value anymore. 2326 if (ReplVal != NewReplVal) 2327 return nullptr; 2328 } 2329 2330 // If we are in the same BB and we have a value, we are done. 2331 if (CurrBB == I->getParent() && ReplVal) 2332 return ReplVal; 2333 2334 // Go through all predecessors and add terminators for analysis. 2335 for (const BasicBlock *Pred : predecessors(CurrBB)) 2336 if (const Instruction *Terminator = Pred->getTerminator()) 2337 Worklist.push_back(Terminator); 2338 } 2339 2340 return ReplVal; 2341 } 2342 }; 2343 2344 struct AAICVTrackerFunctionReturned : AAICVTracker { 2345 AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A) 2346 : AAICVTracker(IRP, A) {} 2347 2348 // FIXME: come up with better string. 2349 const std::string getAsStr(Attributor *) const override { 2350 return "ICVTrackerFunctionReturned"; 2351 } 2352 2353 // FIXME: come up with some stats. 2354 void trackStatistics() const override {} 2355 2356 /// We don't manifest anything for this AA. 2357 ChangeStatus manifest(Attributor &A) override { 2358 return ChangeStatus::UNCHANGED; 2359 } 2360 2361 // Map of ICV to their values at specific program point. 2362 EnumeratedArray<std::optional<Value *>, InternalControlVar, 2363 InternalControlVar::ICV___last> 2364 ICVReplacementValuesMap; 2365 2366 /// Return the value with which \p I can be replaced for specific \p ICV. 2367 std::optional<Value *> 2368 getUniqueReplacementValue(InternalControlVar ICV) const override { 2369 return ICVReplacementValuesMap[ICV]; 2370 } 2371 2372 ChangeStatus updateImpl(Attributor &A) override { 2373 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2374 const auto *ICVTrackingAA = A.getAAFor<AAICVTracker>( 2375 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 2376 2377 if (!ICVTrackingAA->isAssumedTracked()) 2378 return indicatePessimisticFixpoint(); 2379 2380 for (InternalControlVar ICV : TrackableICVs) { 2381 std::optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 2382 std::optional<Value *> UniqueICVValue; 2383 2384 auto CheckReturnInst = [&](Instruction &I) { 2385 std::optional<Value *> NewReplVal = 2386 ICVTrackingAA->getReplacementValue(ICV, &I, A); 2387 2388 // If we found a second ICV value there is no unique returned value. 2389 if (UniqueICVValue && UniqueICVValue != NewReplVal) 2390 return false; 2391 2392 UniqueICVValue = NewReplVal; 2393 2394 return true; 2395 }; 2396 2397 bool UsedAssumedInformation = false; 2398 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}, 2399 UsedAssumedInformation, 2400 /* CheckBBLivenessOnly */ true)) 2401 UniqueICVValue = nullptr; 2402 2403 if (UniqueICVValue == ReplVal) 2404 continue; 2405 2406 ReplVal = UniqueICVValue; 2407 Changed = ChangeStatus::CHANGED; 2408 } 2409 2410 return Changed; 2411 } 2412 }; 2413 2414 struct AAICVTrackerCallSite : AAICVTracker { 2415 AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A) 2416 : AAICVTracker(IRP, A) {} 2417 2418 void initialize(Attributor &A) override { 2419 assert(getAnchorScope() && "Expected anchor function"); 2420 2421 // We only initialize this AA for getters, so we need to know which ICV it 2422 // gets. 2423 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2424 for (InternalControlVar ICV : TrackableICVs) { 2425 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 2426 auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter]; 2427 if (Getter.Declaration == getAssociatedFunction()) { 2428 AssociatedICV = ICVInfo.Kind; 2429 return; 2430 } 2431 } 2432 2433 /// Unknown ICV. 2434 indicatePessimisticFixpoint(); 2435 } 2436 2437 ChangeStatus manifest(Attributor &A) override { 2438 if (!ReplVal || !*ReplVal) 2439 return ChangeStatus::UNCHANGED; 2440 2441 A.changeAfterManifest(IRPosition::inst(*getCtxI()), **ReplVal); 2442 A.deleteAfterManifest(*getCtxI()); 2443 2444 return ChangeStatus::CHANGED; 2445 } 2446 2447 // FIXME: come up with better string. 2448 const std::string getAsStr(Attributor *) const override { 2449 return "ICVTrackerCallSite"; 2450 } 2451 2452 // FIXME: come up with some stats. 2453 void trackStatistics() const override {} 2454 2455 InternalControlVar AssociatedICV; 2456 std::optional<Value *> ReplVal; 2457 2458 ChangeStatus updateImpl(Attributor &A) override { 2459 const auto *ICVTrackingAA = A.getAAFor<AAICVTracker>( 2460 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 2461 2462 // We don't have any information, so we assume it changes the ICV. 2463 if (!ICVTrackingAA->isAssumedTracked()) 2464 return indicatePessimisticFixpoint(); 2465 2466 std::optional<Value *> NewReplVal = 2467 ICVTrackingAA->getReplacementValue(AssociatedICV, getCtxI(), A); 2468 2469 if (ReplVal == NewReplVal) 2470 return ChangeStatus::UNCHANGED; 2471 2472 ReplVal = NewReplVal; 2473 return ChangeStatus::CHANGED; 2474 } 2475 2476 // Return the value with which associated value can be replaced for specific 2477 // \p ICV. 2478 std::optional<Value *> 2479 getUniqueReplacementValue(InternalControlVar ICV) const override { 2480 return ReplVal; 2481 } 2482 }; 2483 2484 struct AAICVTrackerCallSiteReturned : AAICVTracker { 2485 AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A) 2486 : AAICVTracker(IRP, A) {} 2487 2488 // FIXME: come up with better string. 2489 const std::string getAsStr(Attributor *) const override { 2490 return "ICVTrackerCallSiteReturned"; 2491 } 2492 2493 // FIXME: come up with some stats. 2494 void trackStatistics() const override {} 2495 2496 /// We don't manifest anything for this AA. 2497 ChangeStatus manifest(Attributor &A) override { 2498 return ChangeStatus::UNCHANGED; 2499 } 2500 2501 // Map of ICV to their values at specific program point. 2502 EnumeratedArray<std::optional<Value *>, InternalControlVar, 2503 InternalControlVar::ICV___last> 2504 ICVReplacementValuesMap; 2505 2506 /// Return the value with which associated value can be replaced for specific 2507 /// \p ICV. 2508 std::optional<Value *> 2509 getUniqueReplacementValue(InternalControlVar ICV) const override { 2510 return ICVReplacementValuesMap[ICV]; 2511 } 2512 2513 ChangeStatus updateImpl(Attributor &A) override { 2514 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2515 const auto *ICVTrackingAA = A.getAAFor<AAICVTracker>( 2516 *this, IRPosition::returned(*getAssociatedFunction()), 2517 DepClassTy::REQUIRED); 2518 2519 // We don't have any information, so we assume it changes the ICV. 2520 if (!ICVTrackingAA->isAssumedTracked()) 2521 return indicatePessimisticFixpoint(); 2522 2523 for (InternalControlVar ICV : TrackableICVs) { 2524 std::optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 2525 std::optional<Value *> NewReplVal = 2526 ICVTrackingAA->getUniqueReplacementValue(ICV); 2527 2528 if (ReplVal == NewReplVal) 2529 continue; 2530 2531 ReplVal = NewReplVal; 2532 Changed = ChangeStatus::CHANGED; 2533 } 2534 return Changed; 2535 } 2536 }; 2537 2538 struct AAExecutionDomainFunction : public AAExecutionDomain { 2539 AAExecutionDomainFunction(const IRPosition &IRP, Attributor &A) 2540 : AAExecutionDomain(IRP, A) {} 2541 2542 ~AAExecutionDomainFunction() { delete RPOT; } 2543 2544 void initialize(Attributor &A) override { 2545 Function *F = getAnchorScope(); 2546 assert(F && "Expected anchor function"); 2547 RPOT = new ReversePostOrderTraversal<Function *>(F); 2548 } 2549 2550 const std::string getAsStr(Attributor *) const override { 2551 unsigned TotalBlocks = 0, InitialThreadBlocks = 0, AlignedBlocks = 0; 2552 for (auto &It : BEDMap) { 2553 if (!It.getFirst()) 2554 continue; 2555 TotalBlocks++; 2556 InitialThreadBlocks += It.getSecond().IsExecutedByInitialThreadOnly; 2557 AlignedBlocks += It.getSecond().IsReachedFromAlignedBarrierOnly && 2558 It.getSecond().IsReachingAlignedBarrierOnly; 2559 } 2560 return "[AAExecutionDomain] " + std::to_string(InitialThreadBlocks) + "/" + 2561 std::to_string(AlignedBlocks) + " of " + 2562 std::to_string(TotalBlocks) + 2563 " executed by initial thread / aligned"; 2564 } 2565 2566 /// See AbstractAttribute::trackStatistics(). 2567 void trackStatistics() const override {} 2568 2569 ChangeStatus manifest(Attributor &A) override { 2570 LLVM_DEBUG({ 2571 for (const BasicBlock &BB : *getAnchorScope()) { 2572 if (!isExecutedByInitialThreadOnly(BB)) 2573 continue; 2574 dbgs() << TAG << " Basic block @" << getAnchorScope()->getName() << " " 2575 << BB.getName() << " is executed by a single thread.\n"; 2576 } 2577 }); 2578 2579 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2580 2581 if (DisableOpenMPOptBarrierElimination) 2582 return Changed; 2583 2584 SmallPtrSet<CallBase *, 16> DeletedBarriers; 2585 auto HandleAlignedBarrier = [&](CallBase *CB) { 2586 const ExecutionDomainTy &ED = CB ? CEDMap[{CB, PRE}] : BEDMap[nullptr]; 2587 if (!ED.IsReachedFromAlignedBarrierOnly || 2588 ED.EncounteredNonLocalSideEffect) 2589 return; 2590 2591 // We can remove this barrier, if it is one, or all aligned barriers 2592 // reaching the kernel end. In the latter case we can transitively work 2593 // our way back until we find a barrier that guards a side-effect if we 2594 // are dealing with the kernel end here. 2595 if (CB) { 2596 DeletedBarriers.insert(CB); 2597 A.deleteAfterManifest(*CB); 2598 ++NumBarriersEliminated; 2599 Changed = ChangeStatus::CHANGED; 2600 } else if (!ED.AlignedBarriers.empty()) { 2601 NumBarriersEliminated += ED.AlignedBarriers.size(); 2602 Changed = ChangeStatus::CHANGED; 2603 SmallVector<CallBase *> Worklist(ED.AlignedBarriers.begin(), 2604 ED.AlignedBarriers.end()); 2605 SmallSetVector<CallBase *, 16> Visited; 2606 while (!Worklist.empty()) { 2607 CallBase *LastCB = Worklist.pop_back_val(); 2608 if (!Visited.insert(LastCB)) 2609 continue; 2610 if (LastCB->getFunction() != getAnchorScope()) 2611 continue; 2612 if (!DeletedBarriers.count(LastCB)) { 2613 A.deleteAfterManifest(*LastCB); 2614 continue; 2615 } 2616 // The final aligned barrier (LastCB) reaching the kernel end was 2617 // removed already. This means we can go one step further and remove 2618 // the barriers encoutered last before (LastCB). 2619 const ExecutionDomainTy &LastED = CEDMap[{LastCB, PRE}]; 2620 Worklist.append(LastED.AlignedBarriers.begin(), 2621 LastED.AlignedBarriers.end()); 2622 } 2623 } 2624 2625 // If we actually eliminated a barrier we need to eliminate the associated 2626 // llvm.assumes as well to avoid creating UB. 2627 if (!ED.EncounteredAssumes.empty() && (CB || !ED.AlignedBarriers.empty())) 2628 for (auto *AssumeCB : ED.EncounteredAssumes) 2629 A.deleteAfterManifest(*AssumeCB); 2630 }; 2631 2632 for (auto *CB : AlignedBarriers) 2633 HandleAlignedBarrier(CB); 2634 2635 // Handle the "kernel end barrier" for kernels too. 2636 if (omp::isKernel(*getAnchorScope())) 2637 HandleAlignedBarrier(nullptr); 2638 2639 return Changed; 2640 } 2641 2642 bool isNoOpFence(const FenceInst &FI) const override { 2643 return getState().isValidState() && !NonNoOpFences.count(&FI); 2644 } 2645 2646 /// Merge barrier and assumption information from \p PredED into the successor 2647 /// \p ED. 2648 void 2649 mergeInPredecessorBarriersAndAssumptions(Attributor &A, ExecutionDomainTy &ED, 2650 const ExecutionDomainTy &PredED); 2651 2652 /// Merge all information from \p PredED into the successor \p ED. If 2653 /// \p InitialEdgeOnly is set, only the initial edge will enter the block 2654 /// represented by \p ED from this predecessor. 2655 bool mergeInPredecessor(Attributor &A, ExecutionDomainTy &ED, 2656 const ExecutionDomainTy &PredED, 2657 bool InitialEdgeOnly = false); 2658 2659 /// Accumulate information for the entry block in \p EntryBBED. 2660 bool handleCallees(Attributor &A, ExecutionDomainTy &EntryBBED); 2661 2662 /// See AbstractAttribute::updateImpl. 2663 ChangeStatus updateImpl(Attributor &A) override; 2664 2665 /// Query interface, see AAExecutionDomain 2666 ///{ 2667 bool isExecutedByInitialThreadOnly(const BasicBlock &BB) const override { 2668 if (!isValidState()) 2669 return false; 2670 assert(BB.getParent() == getAnchorScope() && "Block is out of scope!"); 2671 return BEDMap.lookup(&BB).IsExecutedByInitialThreadOnly; 2672 } 2673 2674 bool isExecutedInAlignedRegion(Attributor &A, 2675 const Instruction &I) const override { 2676 assert(I.getFunction() == getAnchorScope() && 2677 "Instruction is out of scope!"); 2678 if (!isValidState()) 2679 return false; 2680 2681 bool ForwardIsOk = true; 2682 const Instruction *CurI; 2683 2684 // Check forward until a call or the block end is reached. 2685 CurI = &I; 2686 do { 2687 auto *CB = dyn_cast<CallBase>(CurI); 2688 if (!CB) 2689 continue; 2690 if (CB != &I && AlignedBarriers.contains(const_cast<CallBase *>(CB))) 2691 return true; 2692 const auto &It = CEDMap.find({CB, PRE}); 2693 if (It == CEDMap.end()) 2694 continue; 2695 if (!It->getSecond().IsReachingAlignedBarrierOnly) 2696 ForwardIsOk = false; 2697 break; 2698 } while ((CurI = CurI->getNextNonDebugInstruction())); 2699 2700 if (!CurI && !BEDMap.lookup(I.getParent()).IsReachingAlignedBarrierOnly) 2701 ForwardIsOk = false; 2702 2703 // Check backward until a call or the block beginning is reached. 2704 CurI = &I; 2705 do { 2706 auto *CB = dyn_cast<CallBase>(CurI); 2707 if (!CB) 2708 continue; 2709 if (CB != &I && AlignedBarriers.contains(const_cast<CallBase *>(CB))) 2710 return true; 2711 const auto &It = CEDMap.find({CB, POST}); 2712 if (It == CEDMap.end()) 2713 continue; 2714 if (It->getSecond().IsReachedFromAlignedBarrierOnly) 2715 break; 2716 return false; 2717 } while ((CurI = CurI->getPrevNonDebugInstruction())); 2718 2719 // Delayed decision on the forward pass to allow aligned barrier detection 2720 // in the backwards traversal. 2721 if (!ForwardIsOk) 2722 return false; 2723 2724 if (!CurI) { 2725 const BasicBlock *BB = I.getParent(); 2726 if (BB == &BB->getParent()->getEntryBlock()) 2727 return BEDMap.lookup(nullptr).IsReachedFromAlignedBarrierOnly; 2728 if (!llvm::all_of(predecessors(BB), [&](const BasicBlock *PredBB) { 2729 return BEDMap.lookup(PredBB).IsReachedFromAlignedBarrierOnly; 2730 })) { 2731 return false; 2732 } 2733 } 2734 2735 // On neither traversal we found a anything but aligned barriers. 2736 return true; 2737 } 2738 2739 ExecutionDomainTy getExecutionDomain(const BasicBlock &BB) const override { 2740 assert(isValidState() && 2741 "No request should be made against an invalid state!"); 2742 return BEDMap.lookup(&BB); 2743 } 2744 std::pair<ExecutionDomainTy, ExecutionDomainTy> 2745 getExecutionDomain(const CallBase &CB) const override { 2746 assert(isValidState() && 2747 "No request should be made against an invalid state!"); 2748 return {CEDMap.lookup({&CB, PRE}), CEDMap.lookup({&CB, POST})}; 2749 } 2750 ExecutionDomainTy getFunctionExecutionDomain() const override { 2751 assert(isValidState() && 2752 "No request should be made against an invalid state!"); 2753 return InterProceduralED; 2754 } 2755 ///} 2756 2757 // Check if the edge into the successor block contains a condition that only 2758 // lets the main thread execute it. 2759 static bool isInitialThreadOnlyEdge(Attributor &A, BranchInst *Edge, 2760 BasicBlock &SuccessorBB) { 2761 if (!Edge || !Edge->isConditional()) 2762 return false; 2763 if (Edge->getSuccessor(0) != &SuccessorBB) 2764 return false; 2765 2766 auto *Cmp = dyn_cast<CmpInst>(Edge->getCondition()); 2767 if (!Cmp || !Cmp->isTrueWhenEqual() || !Cmp->isEquality()) 2768 return false; 2769 2770 ConstantInt *C = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 2771 if (!C) 2772 return false; 2773 2774 // Match: -1 == __kmpc_target_init (for non-SPMD kernels only!) 2775 if (C->isAllOnesValue()) { 2776 auto *CB = dyn_cast<CallBase>(Cmp->getOperand(0)); 2777 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2778 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 2779 CB = CB ? OpenMPOpt::getCallIfRegularCall(*CB, &RFI) : nullptr; 2780 if (!CB) 2781 return false; 2782 const int InitModeArgNo = 1; 2783 auto *ModeCI = dyn_cast<ConstantInt>(CB->getOperand(InitModeArgNo)); 2784 return ModeCI && (ModeCI->getSExtValue() & OMP_TGT_EXEC_MODE_GENERIC); 2785 } 2786 2787 if (C->isZero()) { 2788 // Match: 0 == llvm.nvvm.read.ptx.sreg.tid.x() 2789 if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0))) 2790 if (II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_tid_x) 2791 return true; 2792 2793 // Match: 0 == llvm.amdgcn.workitem.id.x() 2794 if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0))) 2795 if (II->getIntrinsicID() == Intrinsic::amdgcn_workitem_id_x) 2796 return true; 2797 } 2798 2799 return false; 2800 }; 2801 2802 /// Mapping containing information about the function for other AAs. 2803 ExecutionDomainTy InterProceduralED; 2804 2805 enum Direction { PRE = 0, POST = 1 }; 2806 /// Mapping containing information per block. 2807 DenseMap<const BasicBlock *, ExecutionDomainTy> BEDMap; 2808 DenseMap<PointerIntPair<const CallBase *, 1, Direction>, ExecutionDomainTy> 2809 CEDMap; 2810 SmallSetVector<CallBase *, 16> AlignedBarriers; 2811 2812 ReversePostOrderTraversal<Function *> *RPOT = nullptr; 2813 2814 /// Set \p R to \V and report true if that changed \p R. 2815 static bool setAndRecord(bool &R, bool V) { 2816 bool Eq = (R == V); 2817 R = V; 2818 return !Eq; 2819 } 2820 2821 /// Collection of fences known to be non-no-opt. All fences not in this set 2822 /// can be assumed no-opt. 2823 SmallPtrSet<const FenceInst *, 8> NonNoOpFences; 2824 }; 2825 2826 void AAExecutionDomainFunction::mergeInPredecessorBarriersAndAssumptions( 2827 Attributor &A, ExecutionDomainTy &ED, const ExecutionDomainTy &PredED) { 2828 for (auto *EA : PredED.EncounteredAssumes) 2829 ED.addAssumeInst(A, *EA); 2830 2831 for (auto *AB : PredED.AlignedBarriers) 2832 ED.addAlignedBarrier(A, *AB); 2833 } 2834 2835 bool AAExecutionDomainFunction::mergeInPredecessor( 2836 Attributor &A, ExecutionDomainTy &ED, const ExecutionDomainTy &PredED, 2837 bool InitialEdgeOnly) { 2838 2839 bool Changed = false; 2840 Changed |= 2841 setAndRecord(ED.IsExecutedByInitialThreadOnly, 2842 InitialEdgeOnly || (PredED.IsExecutedByInitialThreadOnly && 2843 ED.IsExecutedByInitialThreadOnly)); 2844 2845 Changed |= setAndRecord(ED.IsReachedFromAlignedBarrierOnly, 2846 ED.IsReachedFromAlignedBarrierOnly && 2847 PredED.IsReachedFromAlignedBarrierOnly); 2848 Changed |= setAndRecord(ED.EncounteredNonLocalSideEffect, 2849 ED.EncounteredNonLocalSideEffect | 2850 PredED.EncounteredNonLocalSideEffect); 2851 // Do not track assumptions and barriers as part of Changed. 2852 if (ED.IsReachedFromAlignedBarrierOnly) 2853 mergeInPredecessorBarriersAndAssumptions(A, ED, PredED); 2854 else 2855 ED.clearAssumeInstAndAlignedBarriers(); 2856 return Changed; 2857 } 2858 2859 bool AAExecutionDomainFunction::handleCallees(Attributor &A, 2860 ExecutionDomainTy &EntryBBED) { 2861 SmallVector<std::pair<ExecutionDomainTy, ExecutionDomainTy>, 4> CallSiteEDs; 2862 auto PredForCallSite = [&](AbstractCallSite ACS) { 2863 const auto *EDAA = A.getAAFor<AAExecutionDomain>( 2864 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2865 DepClassTy::OPTIONAL); 2866 if (!EDAA || !EDAA->getState().isValidState()) 2867 return false; 2868 CallSiteEDs.emplace_back( 2869 EDAA->getExecutionDomain(*cast<CallBase>(ACS.getInstruction()))); 2870 return true; 2871 }; 2872 2873 ExecutionDomainTy ExitED; 2874 bool AllCallSitesKnown; 2875 if (A.checkForAllCallSites(PredForCallSite, *this, 2876 /* RequiresAllCallSites */ true, 2877 AllCallSitesKnown)) { 2878 for (const auto &[CSInED, CSOutED] : CallSiteEDs) { 2879 mergeInPredecessor(A, EntryBBED, CSInED); 2880 ExitED.IsReachingAlignedBarrierOnly &= 2881 CSOutED.IsReachingAlignedBarrierOnly; 2882 } 2883 2884 } else { 2885 // We could not find all predecessors, so this is either a kernel or a 2886 // function with external linkage (or with some other weird uses). 2887 if (omp::isKernel(*getAnchorScope())) { 2888 EntryBBED.IsExecutedByInitialThreadOnly = false; 2889 EntryBBED.IsReachedFromAlignedBarrierOnly = true; 2890 EntryBBED.EncounteredNonLocalSideEffect = false; 2891 ExitED.IsReachingAlignedBarrierOnly = true; 2892 } else { 2893 EntryBBED.IsExecutedByInitialThreadOnly = false; 2894 EntryBBED.IsReachedFromAlignedBarrierOnly = false; 2895 EntryBBED.EncounteredNonLocalSideEffect = true; 2896 ExitED.IsReachingAlignedBarrierOnly = false; 2897 } 2898 } 2899 2900 bool Changed = false; 2901 auto &FnED = BEDMap[nullptr]; 2902 Changed |= setAndRecord(FnED.IsReachedFromAlignedBarrierOnly, 2903 FnED.IsReachedFromAlignedBarrierOnly & 2904 EntryBBED.IsReachedFromAlignedBarrierOnly); 2905 Changed |= setAndRecord(FnED.IsReachingAlignedBarrierOnly, 2906 FnED.IsReachingAlignedBarrierOnly & 2907 ExitED.IsReachingAlignedBarrierOnly); 2908 Changed |= setAndRecord(FnED.IsExecutedByInitialThreadOnly, 2909 EntryBBED.IsExecutedByInitialThreadOnly); 2910 return Changed; 2911 } 2912 2913 ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) { 2914 2915 bool Changed = false; 2916 2917 // Helper to deal with an aligned barrier encountered during the forward 2918 // traversal. \p CB is the aligned barrier, \p ED is the execution domain when 2919 // it was encountered. 2920 auto HandleAlignedBarrier = [&](CallBase &CB, ExecutionDomainTy &ED) { 2921 Changed |= AlignedBarriers.insert(&CB); 2922 // First, update the barrier ED kept in the separate CEDMap. 2923 auto &CallInED = CEDMap[{&CB, PRE}]; 2924 Changed |= mergeInPredecessor(A, CallInED, ED); 2925 CallInED.IsReachingAlignedBarrierOnly = true; 2926 // Next adjust the ED we use for the traversal. 2927 ED.EncounteredNonLocalSideEffect = false; 2928 ED.IsReachedFromAlignedBarrierOnly = true; 2929 // Aligned barrier collection has to come last. 2930 ED.clearAssumeInstAndAlignedBarriers(); 2931 ED.addAlignedBarrier(A, CB); 2932 auto &CallOutED = CEDMap[{&CB, POST}]; 2933 Changed |= mergeInPredecessor(A, CallOutED, ED); 2934 }; 2935 2936 auto *LivenessAA = 2937 A.getAAFor<AAIsDead>(*this, getIRPosition(), DepClassTy::OPTIONAL); 2938 2939 Function *F = getAnchorScope(); 2940 BasicBlock &EntryBB = F->getEntryBlock(); 2941 bool IsKernel = omp::isKernel(*F); 2942 2943 SmallVector<Instruction *> SyncInstWorklist; 2944 for (auto &RIt : *RPOT) { 2945 BasicBlock &BB = *RIt; 2946 2947 bool IsEntryBB = &BB == &EntryBB; 2948 // TODO: We use local reasoning since we don't have a divergence analysis 2949 // running as well. We could basically allow uniform branches here. 2950 bool AlignedBarrierLastInBlock = IsEntryBB && IsKernel; 2951 bool IsExplicitlyAligned = IsEntryBB && IsKernel; 2952 ExecutionDomainTy ED; 2953 // Propagate "incoming edges" into information about this block. 2954 if (IsEntryBB) { 2955 Changed |= handleCallees(A, ED); 2956 } else { 2957 // For live non-entry blocks we only propagate 2958 // information via live edges. 2959 if (LivenessAA && LivenessAA->isAssumedDead(&BB)) 2960 continue; 2961 2962 for (auto *PredBB : predecessors(&BB)) { 2963 if (LivenessAA && LivenessAA->isEdgeDead(PredBB, &BB)) 2964 continue; 2965 bool InitialEdgeOnly = isInitialThreadOnlyEdge( 2966 A, dyn_cast<BranchInst>(PredBB->getTerminator()), BB); 2967 mergeInPredecessor(A, ED, BEDMap[PredBB], InitialEdgeOnly); 2968 } 2969 } 2970 2971 // Now we traverse the block, accumulate effects in ED and attach 2972 // information to calls. 2973 for (Instruction &I : BB) { 2974 bool UsedAssumedInformation; 2975 if (A.isAssumedDead(I, *this, LivenessAA, UsedAssumedInformation, 2976 /* CheckBBLivenessOnly */ false, DepClassTy::OPTIONAL, 2977 /* CheckForDeadStore */ true)) 2978 continue; 2979 2980 // Asummes and "assume-like" (dbg, lifetime, ...) are handled first, the 2981 // former is collected the latter is ignored. 2982 if (auto *II = dyn_cast<IntrinsicInst>(&I)) { 2983 if (auto *AI = dyn_cast_or_null<AssumeInst>(II)) { 2984 ED.addAssumeInst(A, *AI); 2985 continue; 2986 } 2987 // TODO: Should we also collect and delete lifetime markers? 2988 if (II->isAssumeLikeIntrinsic()) 2989 continue; 2990 } 2991 2992 if (auto *FI = dyn_cast<FenceInst>(&I)) { 2993 if (!ED.EncounteredNonLocalSideEffect) { 2994 // An aligned fence without non-local side-effects is a no-op. 2995 if (ED.IsReachedFromAlignedBarrierOnly) 2996 continue; 2997 // A non-aligned fence without non-local side-effects is a no-op 2998 // if the ordering only publishes non-local side-effects (or less). 2999 switch (FI->getOrdering()) { 3000 case AtomicOrdering::NotAtomic: 3001 continue; 3002 case AtomicOrdering::Unordered: 3003 continue; 3004 case AtomicOrdering::Monotonic: 3005 continue; 3006 case AtomicOrdering::Acquire: 3007 break; 3008 case AtomicOrdering::Release: 3009 continue; 3010 case AtomicOrdering::AcquireRelease: 3011 break; 3012 case AtomicOrdering::SequentiallyConsistent: 3013 break; 3014 }; 3015 } 3016 NonNoOpFences.insert(FI); 3017 } 3018 3019 auto *CB = dyn_cast<CallBase>(&I); 3020 bool IsNoSync = AA::isNoSyncInst(A, I, *this); 3021 bool IsAlignedBarrier = 3022 !IsNoSync && CB && 3023 AANoSync::isAlignedBarrier(*CB, AlignedBarrierLastInBlock); 3024 3025 AlignedBarrierLastInBlock &= IsNoSync; 3026 IsExplicitlyAligned &= IsNoSync; 3027 3028 // Next we check for calls. Aligned barriers are handled 3029 // explicitly, everything else is kept for the backward traversal and will 3030 // also affect our state. 3031 if (CB) { 3032 if (IsAlignedBarrier) { 3033 HandleAlignedBarrier(*CB, ED); 3034 AlignedBarrierLastInBlock = true; 3035 IsExplicitlyAligned = true; 3036 continue; 3037 } 3038 3039 // Check the pointer(s) of a memory intrinsic explicitly. 3040 if (isa<MemIntrinsic>(&I)) { 3041 if (!ED.EncounteredNonLocalSideEffect && 3042 AA::isPotentiallyAffectedByBarrier(A, I, *this)) 3043 ED.EncounteredNonLocalSideEffect = true; 3044 if (!IsNoSync) { 3045 ED.IsReachedFromAlignedBarrierOnly = false; 3046 SyncInstWorklist.push_back(&I); 3047 } 3048 continue; 3049 } 3050 3051 // Record how we entered the call, then accumulate the effect of the 3052 // call in ED for potential use by the callee. 3053 auto &CallInED = CEDMap[{CB, PRE}]; 3054 Changed |= mergeInPredecessor(A, CallInED, ED); 3055 3056 // If we have a sync-definition we can check if it starts/ends in an 3057 // aligned barrier. If we are unsure we assume any sync breaks 3058 // alignment. 3059 Function *Callee = CB->getCalledFunction(); 3060 if (!IsNoSync && Callee && !Callee->isDeclaration()) { 3061 const auto *EDAA = A.getAAFor<AAExecutionDomain>( 3062 *this, IRPosition::function(*Callee), DepClassTy::OPTIONAL); 3063 if (EDAA && EDAA->getState().isValidState()) { 3064 const auto &CalleeED = EDAA->getFunctionExecutionDomain(); 3065 ED.IsReachedFromAlignedBarrierOnly = 3066 CalleeED.IsReachedFromAlignedBarrierOnly; 3067 AlignedBarrierLastInBlock = ED.IsReachedFromAlignedBarrierOnly; 3068 if (IsNoSync || !CalleeED.IsReachedFromAlignedBarrierOnly) 3069 ED.EncounteredNonLocalSideEffect |= 3070 CalleeED.EncounteredNonLocalSideEffect; 3071 else 3072 ED.EncounteredNonLocalSideEffect = 3073 CalleeED.EncounteredNonLocalSideEffect; 3074 if (!CalleeED.IsReachingAlignedBarrierOnly) { 3075 Changed |= 3076 setAndRecord(CallInED.IsReachingAlignedBarrierOnly, false); 3077 SyncInstWorklist.push_back(&I); 3078 } 3079 if (CalleeED.IsReachedFromAlignedBarrierOnly) 3080 mergeInPredecessorBarriersAndAssumptions(A, ED, CalleeED); 3081 auto &CallOutED = CEDMap[{CB, POST}]; 3082 Changed |= mergeInPredecessor(A, CallOutED, ED); 3083 continue; 3084 } 3085 } 3086 if (!IsNoSync) { 3087 ED.IsReachedFromAlignedBarrierOnly = false; 3088 Changed |= setAndRecord(CallInED.IsReachingAlignedBarrierOnly, false); 3089 SyncInstWorklist.push_back(&I); 3090 } 3091 AlignedBarrierLastInBlock &= ED.IsReachedFromAlignedBarrierOnly; 3092 ED.EncounteredNonLocalSideEffect |= !CB->doesNotAccessMemory(); 3093 auto &CallOutED = CEDMap[{CB, POST}]; 3094 Changed |= mergeInPredecessor(A, CallOutED, ED); 3095 } 3096 3097 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory()) 3098 continue; 3099 3100 // If we have a callee we try to use fine-grained information to 3101 // determine local side-effects. 3102 if (CB) { 3103 const auto *MemAA = A.getAAFor<AAMemoryLocation>( 3104 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 3105 3106 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 3107 AAMemoryLocation::AccessKind, 3108 AAMemoryLocation::MemoryLocationsKind) { 3109 return !AA::isPotentiallyAffectedByBarrier(A, {Ptr}, *this, I); 3110 }; 3111 if (MemAA && MemAA->getState().isValidState() && 3112 MemAA->checkForAllAccessesToMemoryKind( 3113 AccessPred, AAMemoryLocation::ALL_LOCATIONS)) 3114 continue; 3115 } 3116 3117 auto &InfoCache = A.getInfoCache(); 3118 if (!I.mayHaveSideEffects() && InfoCache.isOnlyUsedByAssume(I)) 3119 continue; 3120 3121 if (auto *LI = dyn_cast<LoadInst>(&I)) 3122 if (LI->hasMetadata(LLVMContext::MD_invariant_load)) 3123 continue; 3124 3125 if (!ED.EncounteredNonLocalSideEffect && 3126 AA::isPotentiallyAffectedByBarrier(A, I, *this)) 3127 ED.EncounteredNonLocalSideEffect = true; 3128 } 3129 3130 bool IsEndAndNotReachingAlignedBarriersOnly = false; 3131 if (!isa<UnreachableInst>(BB.getTerminator()) && 3132 !BB.getTerminator()->getNumSuccessors()) { 3133 3134 Changed |= mergeInPredecessor(A, InterProceduralED, ED); 3135 3136 auto &FnED = BEDMap[nullptr]; 3137 if (IsKernel && !IsExplicitlyAligned) 3138 FnED.IsReachingAlignedBarrierOnly = false; 3139 Changed |= mergeInPredecessor(A, FnED, ED); 3140 3141 if (!FnED.IsReachingAlignedBarrierOnly) { 3142 IsEndAndNotReachingAlignedBarriersOnly = true; 3143 SyncInstWorklist.push_back(BB.getTerminator()); 3144 auto &BBED = BEDMap[&BB]; 3145 Changed |= setAndRecord(BBED.IsReachingAlignedBarrierOnly, false); 3146 } 3147 } 3148 3149 ExecutionDomainTy &StoredED = BEDMap[&BB]; 3150 ED.IsReachingAlignedBarrierOnly = StoredED.IsReachingAlignedBarrierOnly & 3151 !IsEndAndNotReachingAlignedBarriersOnly; 3152 3153 // Check if we computed anything different as part of the forward 3154 // traversal. We do not take assumptions and aligned barriers into account 3155 // as they do not influence the state we iterate. Backward traversal values 3156 // are handled later on. 3157 if (ED.IsExecutedByInitialThreadOnly != 3158 StoredED.IsExecutedByInitialThreadOnly || 3159 ED.IsReachedFromAlignedBarrierOnly != 3160 StoredED.IsReachedFromAlignedBarrierOnly || 3161 ED.EncounteredNonLocalSideEffect != 3162 StoredED.EncounteredNonLocalSideEffect) 3163 Changed = true; 3164 3165 // Update the state with the new value. 3166 StoredED = std::move(ED); 3167 } 3168 3169 // Propagate (non-aligned) sync instruction effects backwards until the 3170 // entry is hit or an aligned barrier. 3171 SmallSetVector<BasicBlock *, 16> Visited; 3172 while (!SyncInstWorklist.empty()) { 3173 Instruction *SyncInst = SyncInstWorklist.pop_back_val(); 3174 Instruction *CurInst = SyncInst; 3175 bool HitAlignedBarrierOrKnownEnd = false; 3176 while ((CurInst = CurInst->getPrevNode())) { 3177 auto *CB = dyn_cast<CallBase>(CurInst); 3178 if (!CB) 3179 continue; 3180 auto &CallOutED = CEDMap[{CB, POST}]; 3181 Changed |= setAndRecord(CallOutED.IsReachingAlignedBarrierOnly, false); 3182 auto &CallInED = CEDMap[{CB, PRE}]; 3183 HitAlignedBarrierOrKnownEnd = 3184 AlignedBarriers.count(CB) || !CallInED.IsReachingAlignedBarrierOnly; 3185 if (HitAlignedBarrierOrKnownEnd) 3186 break; 3187 Changed |= setAndRecord(CallInED.IsReachingAlignedBarrierOnly, false); 3188 } 3189 if (HitAlignedBarrierOrKnownEnd) 3190 continue; 3191 BasicBlock *SyncBB = SyncInst->getParent(); 3192 for (auto *PredBB : predecessors(SyncBB)) { 3193 if (LivenessAA && LivenessAA->isEdgeDead(PredBB, SyncBB)) 3194 continue; 3195 if (!Visited.insert(PredBB)) 3196 continue; 3197 auto &PredED = BEDMap[PredBB]; 3198 if (setAndRecord(PredED.IsReachingAlignedBarrierOnly, false)) { 3199 Changed = true; 3200 SyncInstWorklist.push_back(PredBB->getTerminator()); 3201 } 3202 } 3203 if (SyncBB != &EntryBB) 3204 continue; 3205 Changed |= 3206 setAndRecord(InterProceduralED.IsReachingAlignedBarrierOnly, false); 3207 } 3208 3209 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3210 } 3211 3212 /// Try to replace memory allocation calls called by a single thread with a 3213 /// static buffer of shared memory. 3214 struct AAHeapToShared : public StateWrapper<BooleanState, AbstractAttribute> { 3215 using Base = StateWrapper<BooleanState, AbstractAttribute>; 3216 AAHeapToShared(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 3217 3218 /// Create an abstract attribute view for the position \p IRP. 3219 static AAHeapToShared &createForPosition(const IRPosition &IRP, 3220 Attributor &A); 3221 3222 /// Returns true if HeapToShared conversion is assumed to be possible. 3223 virtual bool isAssumedHeapToShared(CallBase &CB) const = 0; 3224 3225 /// Returns true if HeapToShared conversion is assumed and the CB is a 3226 /// callsite to a free operation to be removed. 3227 virtual bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const = 0; 3228 3229 /// See AbstractAttribute::getName(). 3230 const std::string getName() const override { return "AAHeapToShared"; } 3231 3232 /// See AbstractAttribute::getIdAddr(). 3233 const char *getIdAddr() const override { return &ID; } 3234 3235 /// This function should return true if the type of the \p AA is 3236 /// AAHeapToShared. 3237 static bool classof(const AbstractAttribute *AA) { 3238 return (AA->getIdAddr() == &ID); 3239 } 3240 3241 /// Unique ID (due to the unique address) 3242 static const char ID; 3243 }; 3244 3245 struct AAHeapToSharedFunction : public AAHeapToShared { 3246 AAHeapToSharedFunction(const IRPosition &IRP, Attributor &A) 3247 : AAHeapToShared(IRP, A) {} 3248 3249 const std::string getAsStr(Attributor *) const override { 3250 return "[AAHeapToShared] " + std::to_string(MallocCalls.size()) + 3251 " malloc calls eligible."; 3252 } 3253 3254 /// See AbstractAttribute::trackStatistics(). 3255 void trackStatistics() const override {} 3256 3257 /// This functions finds free calls that will be removed by the 3258 /// HeapToShared transformation. 3259 void findPotentialRemovedFreeCalls(Attributor &A) { 3260 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3261 auto &FreeRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared]; 3262 3263 PotentialRemovedFreeCalls.clear(); 3264 // Update free call users of found malloc calls. 3265 for (CallBase *CB : MallocCalls) { 3266 SmallVector<CallBase *, 4> FreeCalls; 3267 for (auto *U : CB->users()) { 3268 CallBase *C = dyn_cast<CallBase>(U); 3269 if (C && C->getCalledFunction() == FreeRFI.Declaration) 3270 FreeCalls.push_back(C); 3271 } 3272 3273 if (FreeCalls.size() != 1) 3274 continue; 3275 3276 PotentialRemovedFreeCalls.insert(FreeCalls.front()); 3277 } 3278 } 3279 3280 void initialize(Attributor &A) override { 3281 if (DisableOpenMPOptDeglobalization) { 3282 indicatePessimisticFixpoint(); 3283 return; 3284 } 3285 3286 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3287 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 3288 if (!RFI.Declaration) 3289 return; 3290 3291 Attributor::SimplifictionCallbackTy SCB = 3292 [](const IRPosition &, const AbstractAttribute *, 3293 bool &) -> std::optional<Value *> { return nullptr; }; 3294 3295 Function *F = getAnchorScope(); 3296 for (User *U : RFI.Declaration->users()) 3297 if (CallBase *CB = dyn_cast<CallBase>(U)) { 3298 if (CB->getFunction() != F) 3299 continue; 3300 MallocCalls.insert(CB); 3301 A.registerSimplificationCallback(IRPosition::callsite_returned(*CB), 3302 SCB); 3303 } 3304 3305 findPotentialRemovedFreeCalls(A); 3306 } 3307 3308 bool isAssumedHeapToShared(CallBase &CB) const override { 3309 return isValidState() && MallocCalls.count(&CB); 3310 } 3311 3312 bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const override { 3313 return isValidState() && PotentialRemovedFreeCalls.count(&CB); 3314 } 3315 3316 ChangeStatus manifest(Attributor &A) override { 3317 if (MallocCalls.empty()) 3318 return ChangeStatus::UNCHANGED; 3319 3320 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3321 auto &FreeCall = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared]; 3322 3323 Function *F = getAnchorScope(); 3324 auto *HS = A.lookupAAFor<AAHeapToStack>(IRPosition::function(*F), this, 3325 DepClassTy::OPTIONAL); 3326 3327 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3328 for (CallBase *CB : MallocCalls) { 3329 // Skip replacing this if HeapToStack has already claimed it. 3330 if (HS && HS->isAssumedHeapToStack(*CB)) 3331 continue; 3332 3333 // Find the unique free call to remove it. 3334 SmallVector<CallBase *, 4> FreeCalls; 3335 for (auto *U : CB->users()) { 3336 CallBase *C = dyn_cast<CallBase>(U); 3337 if (C && C->getCalledFunction() == FreeCall.Declaration) 3338 FreeCalls.push_back(C); 3339 } 3340 if (FreeCalls.size() != 1) 3341 continue; 3342 3343 auto *AllocSize = cast<ConstantInt>(CB->getArgOperand(0)); 3344 3345 if (AllocSize->getZExtValue() + SharedMemoryUsed > SharedMemoryLimit) { 3346 LLVM_DEBUG(dbgs() << TAG << "Cannot replace call " << *CB 3347 << " with shared memory." 3348 << " Shared memory usage is limited to " 3349 << SharedMemoryLimit << " bytes\n"); 3350 continue; 3351 } 3352 3353 LLVM_DEBUG(dbgs() << TAG << "Replace globalization call " << *CB 3354 << " with " << AllocSize->getZExtValue() 3355 << " bytes of shared memory\n"); 3356 3357 // Create a new shared memory buffer of the same size as the allocation 3358 // and replace all the uses of the original allocation with it. 3359 Module *M = CB->getModule(); 3360 Type *Int8Ty = Type::getInt8Ty(M->getContext()); 3361 Type *Int8ArrTy = ArrayType::get(Int8Ty, AllocSize->getZExtValue()); 3362 auto *SharedMem = new GlobalVariable( 3363 *M, Int8ArrTy, /* IsConstant */ false, GlobalValue::InternalLinkage, 3364 PoisonValue::get(Int8ArrTy), CB->getName() + "_shared", nullptr, 3365 GlobalValue::NotThreadLocal, 3366 static_cast<unsigned>(AddressSpace::Shared)); 3367 auto *NewBuffer = 3368 ConstantExpr::getPointerCast(SharedMem, Int8Ty->getPointerTo()); 3369 3370 auto Remark = [&](OptimizationRemark OR) { 3371 return OR << "Replaced globalized variable with " 3372 << ore::NV("SharedMemory", AllocSize->getZExtValue()) 3373 << (AllocSize->isOne() ? " byte " : " bytes ") 3374 << "of shared memory."; 3375 }; 3376 A.emitRemark<OptimizationRemark>(CB, "OMP111", Remark); 3377 3378 MaybeAlign Alignment = CB->getRetAlign(); 3379 assert(Alignment && 3380 "HeapToShared on allocation without alignment attribute"); 3381 SharedMem->setAlignment(*Alignment); 3382 3383 A.changeAfterManifest(IRPosition::callsite_returned(*CB), *NewBuffer); 3384 A.deleteAfterManifest(*CB); 3385 A.deleteAfterManifest(*FreeCalls.front()); 3386 3387 SharedMemoryUsed += AllocSize->getZExtValue(); 3388 NumBytesMovedToSharedMemory = SharedMemoryUsed; 3389 Changed = ChangeStatus::CHANGED; 3390 } 3391 3392 return Changed; 3393 } 3394 3395 ChangeStatus updateImpl(Attributor &A) override { 3396 if (MallocCalls.empty()) 3397 return indicatePessimisticFixpoint(); 3398 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3399 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 3400 if (!RFI.Declaration) 3401 return ChangeStatus::UNCHANGED; 3402 3403 Function *F = getAnchorScope(); 3404 3405 auto NumMallocCalls = MallocCalls.size(); 3406 3407 // Only consider malloc calls executed by a single thread with a constant. 3408 for (User *U : RFI.Declaration->users()) { 3409 if (CallBase *CB = dyn_cast<CallBase>(U)) { 3410 if (CB->getCaller() != F) 3411 continue; 3412 if (!MallocCalls.count(CB)) 3413 continue; 3414 if (!isa<ConstantInt>(CB->getArgOperand(0))) { 3415 MallocCalls.remove(CB); 3416 continue; 3417 } 3418 const auto *ED = A.getAAFor<AAExecutionDomain>( 3419 *this, IRPosition::function(*F), DepClassTy::REQUIRED); 3420 if (!ED || !ED->isExecutedByInitialThreadOnly(*CB)) 3421 MallocCalls.remove(CB); 3422 } 3423 } 3424 3425 findPotentialRemovedFreeCalls(A); 3426 3427 if (NumMallocCalls != MallocCalls.size()) 3428 return ChangeStatus::CHANGED; 3429 3430 return ChangeStatus::UNCHANGED; 3431 } 3432 3433 /// Collection of all malloc calls in a function. 3434 SmallSetVector<CallBase *, 4> MallocCalls; 3435 /// Collection of potentially removed free calls in a function. 3436 SmallPtrSet<CallBase *, 4> PotentialRemovedFreeCalls; 3437 /// The total amount of shared memory that has been used for HeapToShared. 3438 unsigned SharedMemoryUsed = 0; 3439 }; 3440 3441 struct AAKernelInfo : public StateWrapper<KernelInfoState, AbstractAttribute> { 3442 using Base = StateWrapper<KernelInfoState, AbstractAttribute>; 3443 AAKernelInfo(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 3444 3445 /// Statistics are tracked as part of manifest for now. 3446 void trackStatistics() const override {} 3447 3448 /// See AbstractAttribute::getAsStr() 3449 const std::string getAsStr(Attributor *) const override { 3450 if (!isValidState()) 3451 return "<invalid>"; 3452 return std::string(SPMDCompatibilityTracker.isAssumed() ? "SPMD" 3453 : "generic") + 3454 std::string(SPMDCompatibilityTracker.isAtFixpoint() ? " [FIX]" 3455 : "") + 3456 std::string(" #PRs: ") + 3457 (ReachedKnownParallelRegions.isValidState() 3458 ? std::to_string(ReachedKnownParallelRegions.size()) 3459 : "<invalid>") + 3460 ", #Unknown PRs: " + 3461 (ReachedUnknownParallelRegions.isValidState() 3462 ? std::to_string(ReachedUnknownParallelRegions.size()) 3463 : "<invalid>") + 3464 ", #Reaching Kernels: " + 3465 (ReachingKernelEntries.isValidState() 3466 ? std::to_string(ReachingKernelEntries.size()) 3467 : "<invalid>") + 3468 ", #ParLevels: " + 3469 (ParallelLevels.isValidState() 3470 ? std::to_string(ParallelLevels.size()) 3471 : "<invalid>"); 3472 } 3473 3474 /// Create an abstract attribute biew for the position \p IRP. 3475 static AAKernelInfo &createForPosition(const IRPosition &IRP, Attributor &A); 3476 3477 /// See AbstractAttribute::getName() 3478 const std::string getName() const override { return "AAKernelInfo"; } 3479 3480 /// See AbstractAttribute::getIdAddr() 3481 const char *getIdAddr() const override { return &ID; } 3482 3483 /// This function should return true if the type of the \p AA is AAKernelInfo 3484 static bool classof(const AbstractAttribute *AA) { 3485 return (AA->getIdAddr() == &ID); 3486 } 3487 3488 static const char ID; 3489 }; 3490 3491 /// The function kernel info abstract attribute, basically, what can we say 3492 /// about a function with regards to the KernelInfoState. 3493 struct AAKernelInfoFunction : AAKernelInfo { 3494 AAKernelInfoFunction(const IRPosition &IRP, Attributor &A) 3495 : AAKernelInfo(IRP, A) {} 3496 3497 SmallPtrSet<Instruction *, 4> GuardedInstructions; 3498 3499 SmallPtrSetImpl<Instruction *> &getGuardedInstructions() { 3500 return GuardedInstructions; 3501 } 3502 3503 /// See AbstractAttribute::initialize(...). 3504 void initialize(Attributor &A) override { 3505 // This is a high-level transform that might change the constant arguments 3506 // of the init and dinit calls. We need to tell the Attributor about this 3507 // to avoid other parts using the current constant value for simpliication. 3508 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3509 3510 Function *Fn = getAnchorScope(); 3511 3512 OMPInformationCache::RuntimeFunctionInfo &InitRFI = 3513 OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 3514 OMPInformationCache::RuntimeFunctionInfo &DeinitRFI = 3515 OMPInfoCache.RFIs[OMPRTL___kmpc_target_deinit]; 3516 3517 // For kernels we perform more initialization work, first we find the init 3518 // and deinit calls. 3519 auto StoreCallBase = [](Use &U, 3520 OMPInformationCache::RuntimeFunctionInfo &RFI, 3521 CallBase *&Storage) { 3522 CallBase *CB = OpenMPOpt::getCallIfRegularCall(U, &RFI); 3523 assert(CB && 3524 "Unexpected use of __kmpc_target_init or __kmpc_target_deinit!"); 3525 assert(!Storage && 3526 "Multiple uses of __kmpc_target_init or __kmpc_target_deinit!"); 3527 Storage = CB; 3528 return false; 3529 }; 3530 InitRFI.foreachUse( 3531 [&](Use &U, Function &) { 3532 StoreCallBase(U, InitRFI, KernelInitCB); 3533 return false; 3534 }, 3535 Fn); 3536 DeinitRFI.foreachUse( 3537 [&](Use &U, Function &) { 3538 StoreCallBase(U, DeinitRFI, KernelDeinitCB); 3539 return false; 3540 }, 3541 Fn); 3542 3543 // Ignore kernels without initializers such as global constructors. 3544 if (!KernelInitCB || !KernelDeinitCB) 3545 return; 3546 3547 // Add itself to the reaching kernel and set IsKernelEntry. 3548 ReachingKernelEntries.insert(Fn); 3549 IsKernelEntry = true; 3550 3551 // For kernels we might need to initialize/finalize the IsSPMD state and 3552 // we need to register a simplification callback so that the Attributor 3553 // knows the constant arguments to __kmpc_target_init and 3554 // __kmpc_target_deinit might actually change. 3555 3556 Attributor::SimplifictionCallbackTy StateMachineSimplifyCB = 3557 [&](const IRPosition &IRP, const AbstractAttribute *AA, 3558 bool &UsedAssumedInformation) -> std::optional<Value *> { 3559 return nullptr; 3560 }; 3561 3562 Attributor::SimplifictionCallbackTy ModeSimplifyCB = 3563 [&](const IRPosition &IRP, const AbstractAttribute *AA, 3564 bool &UsedAssumedInformation) -> std::optional<Value *> { 3565 // IRP represents the "SPMDCompatibilityTracker" argument of an 3566 // __kmpc_target_init or 3567 // __kmpc_target_deinit call. We will answer this one with the internal 3568 // state. 3569 if (!SPMDCompatibilityTracker.isValidState()) 3570 return nullptr; 3571 if (!SPMDCompatibilityTracker.isAtFixpoint()) { 3572 if (AA) 3573 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 3574 UsedAssumedInformation = true; 3575 } else { 3576 UsedAssumedInformation = false; 3577 } 3578 auto *Val = ConstantInt::getSigned( 3579 IntegerType::getInt8Ty(IRP.getAnchorValue().getContext()), 3580 SPMDCompatibilityTracker.isAssumed() ? OMP_TGT_EXEC_MODE_SPMD 3581 : OMP_TGT_EXEC_MODE_GENERIC); 3582 return Val; 3583 }; 3584 3585 constexpr const int InitModeArgNo = 1; 3586 constexpr const int DeinitModeArgNo = 1; 3587 constexpr const int InitUseStateMachineArgNo = 2; 3588 A.registerSimplificationCallback( 3589 IRPosition::callsite_argument(*KernelInitCB, InitUseStateMachineArgNo), 3590 StateMachineSimplifyCB); 3591 A.registerSimplificationCallback( 3592 IRPosition::callsite_argument(*KernelInitCB, InitModeArgNo), 3593 ModeSimplifyCB); 3594 A.registerSimplificationCallback( 3595 IRPosition::callsite_argument(*KernelDeinitCB, DeinitModeArgNo), 3596 ModeSimplifyCB); 3597 3598 // Check if we know we are in SPMD-mode already. 3599 ConstantInt *ModeArg = 3600 dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo)); 3601 if (ModeArg && (ModeArg->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD)) 3602 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 3603 // This is a generic region but SPMDization is disabled so stop tracking. 3604 else if (DisableOpenMPOptSPMDization) 3605 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 3606 3607 // Register virtual uses of functions we might need to preserve. 3608 auto RegisterVirtualUse = [&](RuntimeFunction RFKind, 3609 Attributor::VirtualUseCallbackTy &CB) { 3610 if (!OMPInfoCache.RFIs[RFKind].Declaration) 3611 return; 3612 A.registerVirtualUseCallback(*OMPInfoCache.RFIs[RFKind].Declaration, CB); 3613 }; 3614 3615 // Add a dependence to ensure updates if the state changes. 3616 auto AddDependence = [](Attributor &A, const AAKernelInfo *KI, 3617 const AbstractAttribute *QueryingAA) { 3618 if (QueryingAA) { 3619 A.recordDependence(*KI, *QueryingAA, DepClassTy::OPTIONAL); 3620 } 3621 return true; 3622 }; 3623 3624 Attributor::VirtualUseCallbackTy CustomStateMachineUseCB = 3625 [&](Attributor &A, const AbstractAttribute *QueryingAA) { 3626 // Whenever we create a custom state machine we will insert calls to 3627 // __kmpc_get_hardware_num_threads_in_block, 3628 // __kmpc_get_warp_size, 3629 // __kmpc_barrier_simple_generic, 3630 // __kmpc_kernel_parallel, and 3631 // __kmpc_kernel_end_parallel. 3632 // Not needed if we are on track for SPMDzation. 3633 if (SPMDCompatibilityTracker.isValidState()) 3634 return AddDependence(A, this, QueryingAA); 3635 // Not needed if we can't rewrite due to an invalid state. 3636 if (!ReachedKnownParallelRegions.isValidState()) 3637 return AddDependence(A, this, QueryingAA); 3638 return false; 3639 }; 3640 3641 // Not needed if we are pre-runtime merge. 3642 if (!KernelInitCB->getCalledFunction()->isDeclaration()) { 3643 RegisterVirtualUse(OMPRTL___kmpc_get_hardware_num_threads_in_block, 3644 CustomStateMachineUseCB); 3645 RegisterVirtualUse(OMPRTL___kmpc_get_warp_size, CustomStateMachineUseCB); 3646 RegisterVirtualUse(OMPRTL___kmpc_barrier_simple_generic, 3647 CustomStateMachineUseCB); 3648 RegisterVirtualUse(OMPRTL___kmpc_kernel_parallel, 3649 CustomStateMachineUseCB); 3650 RegisterVirtualUse(OMPRTL___kmpc_kernel_end_parallel, 3651 CustomStateMachineUseCB); 3652 } 3653 3654 // If we do not perform SPMDzation we do not need the virtual uses below. 3655 if (SPMDCompatibilityTracker.isAtFixpoint()) 3656 return; 3657 3658 Attributor::VirtualUseCallbackTy HWThreadIdUseCB = 3659 [&](Attributor &A, const AbstractAttribute *QueryingAA) { 3660 // Whenever we perform SPMDzation we will insert 3661 // __kmpc_get_hardware_thread_id_in_block calls. 3662 if (!SPMDCompatibilityTracker.isValidState()) 3663 return AddDependence(A, this, QueryingAA); 3664 return false; 3665 }; 3666 RegisterVirtualUse(OMPRTL___kmpc_get_hardware_thread_id_in_block, 3667 HWThreadIdUseCB); 3668 3669 Attributor::VirtualUseCallbackTy SPMDBarrierUseCB = 3670 [&](Attributor &A, const AbstractAttribute *QueryingAA) { 3671 // Whenever we perform SPMDzation with guarding we will insert 3672 // __kmpc_simple_barrier_spmd calls. If SPMDzation failed, there is 3673 // nothing to guard, or there are no parallel regions, we don't need 3674 // the calls. 3675 if (!SPMDCompatibilityTracker.isValidState()) 3676 return AddDependence(A, this, QueryingAA); 3677 if (SPMDCompatibilityTracker.empty()) 3678 return AddDependence(A, this, QueryingAA); 3679 if (!mayContainParallelRegion()) 3680 return AddDependence(A, this, QueryingAA); 3681 return false; 3682 }; 3683 RegisterVirtualUse(OMPRTL___kmpc_barrier_simple_spmd, SPMDBarrierUseCB); 3684 } 3685 3686 /// Sanitize the string \p S such that it is a suitable global symbol name. 3687 static std::string sanitizeForGlobalName(std::string S) { 3688 std::replace_if( 3689 S.begin(), S.end(), 3690 [](const char C) { 3691 return !((C >= 'a' && C <= 'z') || (C >= 'A' && C <= 'Z') || 3692 (C >= '0' && C <= '9') || C == '_'); 3693 }, 3694 '.'); 3695 return S; 3696 } 3697 3698 /// Modify the IR based on the KernelInfoState as the fixpoint iteration is 3699 /// finished now. 3700 ChangeStatus manifest(Attributor &A) override { 3701 // If we are not looking at a kernel with __kmpc_target_init and 3702 // __kmpc_target_deinit call we cannot actually manifest the information. 3703 if (!KernelInitCB || !KernelDeinitCB) 3704 return ChangeStatus::UNCHANGED; 3705 3706 /// Insert nested Parallelism global variable 3707 Function *Kernel = getAnchorScope(); 3708 Module &M = *Kernel->getParent(); 3709 Type *Int8Ty = Type::getInt8Ty(M.getContext()); 3710 auto *GV = new GlobalVariable( 3711 M, Int8Ty, /* isConstant */ true, GlobalValue::WeakAnyLinkage, 3712 ConstantInt::get(Int8Ty, NestedParallelism ? 1 : 0), 3713 Kernel->getName() + "_nested_parallelism"); 3714 GV->setVisibility(GlobalValue::HiddenVisibility); 3715 3716 // If we can we change the execution mode to SPMD-mode otherwise we build a 3717 // custom state machine. 3718 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3719 if (!changeToSPMDMode(A, Changed)) { 3720 if (!KernelInitCB->getCalledFunction()->isDeclaration()) 3721 return buildCustomStateMachine(A); 3722 } 3723 3724 return Changed; 3725 } 3726 3727 void insertInstructionGuardsHelper(Attributor &A) { 3728 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3729 3730 auto CreateGuardedRegion = [&](Instruction *RegionStartI, 3731 Instruction *RegionEndI) { 3732 LoopInfo *LI = nullptr; 3733 DominatorTree *DT = nullptr; 3734 MemorySSAUpdater *MSU = nullptr; 3735 using InsertPointTy = OpenMPIRBuilder::InsertPointTy; 3736 3737 BasicBlock *ParentBB = RegionStartI->getParent(); 3738 Function *Fn = ParentBB->getParent(); 3739 Module &M = *Fn->getParent(); 3740 3741 // Create all the blocks and logic. 3742 // ParentBB: 3743 // goto RegionCheckTidBB 3744 // RegionCheckTidBB: 3745 // Tid = __kmpc_hardware_thread_id() 3746 // if (Tid != 0) 3747 // goto RegionBarrierBB 3748 // RegionStartBB: 3749 // <execute instructions guarded> 3750 // goto RegionEndBB 3751 // RegionEndBB: 3752 // <store escaping values to shared mem> 3753 // goto RegionBarrierBB 3754 // RegionBarrierBB: 3755 // __kmpc_simple_barrier_spmd() 3756 // // second barrier is omitted if lacking escaping values. 3757 // <load escaping values from shared mem> 3758 // __kmpc_simple_barrier_spmd() 3759 // goto RegionExitBB 3760 // RegionExitBB: 3761 // <execute rest of instructions> 3762 3763 BasicBlock *RegionEndBB = SplitBlock(ParentBB, RegionEndI->getNextNode(), 3764 DT, LI, MSU, "region.guarded.end"); 3765 BasicBlock *RegionBarrierBB = 3766 SplitBlock(RegionEndBB, &*RegionEndBB->getFirstInsertionPt(), DT, LI, 3767 MSU, "region.barrier"); 3768 BasicBlock *RegionExitBB = 3769 SplitBlock(RegionBarrierBB, &*RegionBarrierBB->getFirstInsertionPt(), 3770 DT, LI, MSU, "region.exit"); 3771 BasicBlock *RegionStartBB = 3772 SplitBlock(ParentBB, RegionStartI, DT, LI, MSU, "region.guarded"); 3773 3774 assert(ParentBB->getUniqueSuccessor() == RegionStartBB && 3775 "Expected a different CFG"); 3776 3777 BasicBlock *RegionCheckTidBB = SplitBlock( 3778 ParentBB, ParentBB->getTerminator(), DT, LI, MSU, "region.check.tid"); 3779 3780 // Register basic blocks with the Attributor. 3781 A.registerManifestAddedBasicBlock(*RegionEndBB); 3782 A.registerManifestAddedBasicBlock(*RegionBarrierBB); 3783 A.registerManifestAddedBasicBlock(*RegionExitBB); 3784 A.registerManifestAddedBasicBlock(*RegionStartBB); 3785 A.registerManifestAddedBasicBlock(*RegionCheckTidBB); 3786 3787 bool HasBroadcastValues = false; 3788 // Find escaping outputs from the guarded region to outside users and 3789 // broadcast their values to them. 3790 for (Instruction &I : *RegionStartBB) { 3791 SmallPtrSet<Instruction *, 4> OutsideUsers; 3792 for (User *Usr : I.users()) { 3793 Instruction &UsrI = *cast<Instruction>(Usr); 3794 if (UsrI.getParent() != RegionStartBB) 3795 OutsideUsers.insert(&UsrI); 3796 } 3797 3798 if (OutsideUsers.empty()) 3799 continue; 3800 3801 HasBroadcastValues = true; 3802 3803 // Emit a global variable in shared memory to store the broadcasted 3804 // value. 3805 auto *SharedMem = new GlobalVariable( 3806 M, I.getType(), /* IsConstant */ false, 3807 GlobalValue::InternalLinkage, UndefValue::get(I.getType()), 3808 sanitizeForGlobalName( 3809 (I.getName() + ".guarded.output.alloc").str()), 3810 nullptr, GlobalValue::NotThreadLocal, 3811 static_cast<unsigned>(AddressSpace::Shared)); 3812 3813 // Emit a store instruction to update the value. 3814 new StoreInst(&I, SharedMem, RegionEndBB->getTerminator()); 3815 3816 LoadInst *LoadI = new LoadInst(I.getType(), SharedMem, 3817 I.getName() + ".guarded.output.load", 3818 RegionBarrierBB->getTerminator()); 3819 3820 // Emit a load instruction and replace uses of the output value. 3821 for (Instruction *UsrI : OutsideUsers) 3822 UsrI->replaceUsesOfWith(&I, LoadI); 3823 } 3824 3825 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3826 3827 // Go to tid check BB in ParentBB. 3828 const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc(); 3829 ParentBB->getTerminator()->eraseFromParent(); 3830 OpenMPIRBuilder::LocationDescription Loc( 3831 InsertPointTy(ParentBB, ParentBB->end()), DL); 3832 OMPInfoCache.OMPBuilder.updateToLocation(Loc); 3833 uint32_t SrcLocStrSize; 3834 auto *SrcLocStr = 3835 OMPInfoCache.OMPBuilder.getOrCreateSrcLocStr(Loc, SrcLocStrSize); 3836 Value *Ident = 3837 OMPInfoCache.OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize); 3838 BranchInst::Create(RegionCheckTidBB, ParentBB)->setDebugLoc(DL); 3839 3840 // Add check for Tid in RegionCheckTidBB 3841 RegionCheckTidBB->getTerminator()->eraseFromParent(); 3842 OpenMPIRBuilder::LocationDescription LocRegionCheckTid( 3843 InsertPointTy(RegionCheckTidBB, RegionCheckTidBB->end()), DL); 3844 OMPInfoCache.OMPBuilder.updateToLocation(LocRegionCheckTid); 3845 FunctionCallee HardwareTidFn = 3846 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3847 M, OMPRTL___kmpc_get_hardware_thread_id_in_block); 3848 CallInst *Tid = 3849 OMPInfoCache.OMPBuilder.Builder.CreateCall(HardwareTidFn, {}); 3850 Tid->setDebugLoc(DL); 3851 OMPInfoCache.setCallingConvention(HardwareTidFn, Tid); 3852 Value *TidCheck = OMPInfoCache.OMPBuilder.Builder.CreateIsNull(Tid); 3853 OMPInfoCache.OMPBuilder.Builder 3854 .CreateCondBr(TidCheck, RegionStartBB, RegionBarrierBB) 3855 ->setDebugLoc(DL); 3856 3857 // First barrier for synchronization, ensures main thread has updated 3858 // values. 3859 FunctionCallee BarrierFn = 3860 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3861 M, OMPRTL___kmpc_barrier_simple_spmd); 3862 OMPInfoCache.OMPBuilder.updateToLocation(InsertPointTy( 3863 RegionBarrierBB, RegionBarrierBB->getFirstInsertionPt())); 3864 CallInst *Barrier = 3865 OMPInfoCache.OMPBuilder.Builder.CreateCall(BarrierFn, {Ident, Tid}); 3866 Barrier->setDebugLoc(DL); 3867 OMPInfoCache.setCallingConvention(BarrierFn, Barrier); 3868 3869 // Second barrier ensures workers have read broadcast values. 3870 if (HasBroadcastValues) { 3871 CallInst *Barrier = CallInst::Create(BarrierFn, {Ident, Tid}, "", 3872 RegionBarrierBB->getTerminator()); 3873 Barrier->setDebugLoc(DL); 3874 OMPInfoCache.setCallingConvention(BarrierFn, Barrier); 3875 } 3876 }; 3877 3878 auto &AllocSharedRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 3879 SmallPtrSet<BasicBlock *, 8> Visited; 3880 for (Instruction *GuardedI : SPMDCompatibilityTracker) { 3881 BasicBlock *BB = GuardedI->getParent(); 3882 if (!Visited.insert(BB).second) 3883 continue; 3884 3885 SmallVector<std::pair<Instruction *, Instruction *>> Reorders; 3886 Instruction *LastEffect = nullptr; 3887 BasicBlock::reverse_iterator IP = BB->rbegin(), IPEnd = BB->rend(); 3888 while (++IP != IPEnd) { 3889 if (!IP->mayHaveSideEffects() && !IP->mayReadFromMemory()) 3890 continue; 3891 Instruction *I = &*IP; 3892 if (OpenMPOpt::getCallIfRegularCall(*I, &AllocSharedRFI)) 3893 continue; 3894 if (!I->user_empty() || !SPMDCompatibilityTracker.contains(I)) { 3895 LastEffect = nullptr; 3896 continue; 3897 } 3898 if (LastEffect) 3899 Reorders.push_back({I, LastEffect}); 3900 LastEffect = &*IP; 3901 } 3902 for (auto &Reorder : Reorders) 3903 Reorder.first->moveBefore(Reorder.second); 3904 } 3905 3906 SmallVector<std::pair<Instruction *, Instruction *>, 4> GuardedRegions; 3907 3908 for (Instruction *GuardedI : SPMDCompatibilityTracker) { 3909 BasicBlock *BB = GuardedI->getParent(); 3910 auto *CalleeAA = A.lookupAAFor<AAKernelInfo>( 3911 IRPosition::function(*GuardedI->getFunction()), nullptr, 3912 DepClassTy::NONE); 3913 assert(CalleeAA != nullptr && "Expected Callee AAKernelInfo"); 3914 auto &CalleeAAFunction = *cast<AAKernelInfoFunction>(CalleeAA); 3915 // Continue if instruction is already guarded. 3916 if (CalleeAAFunction.getGuardedInstructions().contains(GuardedI)) 3917 continue; 3918 3919 Instruction *GuardedRegionStart = nullptr, *GuardedRegionEnd = nullptr; 3920 for (Instruction &I : *BB) { 3921 // If instruction I needs to be guarded update the guarded region 3922 // bounds. 3923 if (SPMDCompatibilityTracker.contains(&I)) { 3924 CalleeAAFunction.getGuardedInstructions().insert(&I); 3925 if (GuardedRegionStart) 3926 GuardedRegionEnd = &I; 3927 else 3928 GuardedRegionStart = GuardedRegionEnd = &I; 3929 3930 continue; 3931 } 3932 3933 // Instruction I does not need guarding, store 3934 // any region found and reset bounds. 3935 if (GuardedRegionStart) { 3936 GuardedRegions.push_back( 3937 std::make_pair(GuardedRegionStart, GuardedRegionEnd)); 3938 GuardedRegionStart = nullptr; 3939 GuardedRegionEnd = nullptr; 3940 } 3941 } 3942 } 3943 3944 for (auto &GR : GuardedRegions) 3945 CreateGuardedRegion(GR.first, GR.second); 3946 } 3947 3948 void forceSingleThreadPerWorkgroupHelper(Attributor &A) { 3949 // Only allow 1 thread per workgroup to continue executing the user code. 3950 // 3951 // InitCB = __kmpc_target_init(...) 3952 // ThreadIdInBlock = __kmpc_get_hardware_thread_id_in_block(); 3953 // if (ThreadIdInBlock != 0) return; 3954 // UserCode: 3955 // // user code 3956 // 3957 auto &Ctx = getAnchorValue().getContext(); 3958 Function *Kernel = getAssociatedFunction(); 3959 assert(Kernel && "Expected an associated function!"); 3960 3961 // Create block for user code to branch to from initial block. 3962 BasicBlock *InitBB = KernelInitCB->getParent(); 3963 BasicBlock *UserCodeBB = InitBB->splitBasicBlock( 3964 KernelInitCB->getNextNode(), "main.thread.user_code"); 3965 BasicBlock *ReturnBB = 3966 BasicBlock::Create(Ctx, "exit.threads", Kernel, UserCodeBB); 3967 3968 // Register blocks with attributor: 3969 A.registerManifestAddedBasicBlock(*InitBB); 3970 A.registerManifestAddedBasicBlock(*UserCodeBB); 3971 A.registerManifestAddedBasicBlock(*ReturnBB); 3972 3973 // Debug location: 3974 const DebugLoc &DLoc = KernelInitCB->getDebugLoc(); 3975 ReturnInst::Create(Ctx, ReturnBB)->setDebugLoc(DLoc); 3976 InitBB->getTerminator()->eraseFromParent(); 3977 3978 // Prepare call to OMPRTL___kmpc_get_hardware_thread_id_in_block. 3979 Module &M = *Kernel->getParent(); 3980 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3981 FunctionCallee ThreadIdInBlockFn = 3982 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3983 M, OMPRTL___kmpc_get_hardware_thread_id_in_block); 3984 3985 // Get thread ID in block. 3986 CallInst *ThreadIdInBlock = 3987 CallInst::Create(ThreadIdInBlockFn, "thread_id.in.block", InitBB); 3988 OMPInfoCache.setCallingConvention(ThreadIdInBlockFn, ThreadIdInBlock); 3989 ThreadIdInBlock->setDebugLoc(DLoc); 3990 3991 // Eliminate all threads in the block with ID not equal to 0: 3992 Instruction *IsMainThread = 3993 ICmpInst::Create(ICmpInst::ICmp, CmpInst::ICMP_NE, ThreadIdInBlock, 3994 ConstantInt::get(ThreadIdInBlock->getType(), 0), 3995 "thread.is_main", InitBB); 3996 IsMainThread->setDebugLoc(DLoc); 3997 BranchInst::Create(ReturnBB, UserCodeBB, IsMainThread, InitBB); 3998 } 3999 4000 bool changeToSPMDMode(Attributor &A, ChangeStatus &Changed) { 4001 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4002 4003 // We cannot change to SPMD mode if the runtime functions aren't availible. 4004 if (!OMPInfoCache.runtimeFnsAvailable( 4005 {OMPRTL___kmpc_get_hardware_thread_id_in_block, 4006 OMPRTL___kmpc_barrier_simple_spmd})) 4007 return false; 4008 4009 if (!SPMDCompatibilityTracker.isAssumed()) { 4010 for (Instruction *NonCompatibleI : SPMDCompatibilityTracker) { 4011 if (!NonCompatibleI) 4012 continue; 4013 4014 // Skip diagnostics on calls to known OpenMP runtime functions for now. 4015 if (auto *CB = dyn_cast<CallBase>(NonCompatibleI)) 4016 if (OMPInfoCache.RTLFunctions.contains(CB->getCalledFunction())) 4017 continue; 4018 4019 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 4020 ORA << "Value has potential side effects preventing SPMD-mode " 4021 "execution"; 4022 if (isa<CallBase>(NonCompatibleI)) { 4023 ORA << ". Add `__attribute__((assume(\"ompx_spmd_amenable\")))` to " 4024 "the called function to override"; 4025 } 4026 return ORA << "."; 4027 }; 4028 A.emitRemark<OptimizationRemarkAnalysis>(NonCompatibleI, "OMP121", 4029 Remark); 4030 4031 LLVM_DEBUG(dbgs() << TAG << "SPMD-incompatible side-effect: " 4032 << *NonCompatibleI << "\n"); 4033 } 4034 4035 return false; 4036 } 4037 4038 // Get the actual kernel, could be the caller of the anchor scope if we have 4039 // a debug wrapper. 4040 Function *Kernel = getAnchorScope(); 4041 if (Kernel->hasLocalLinkage()) { 4042 assert(Kernel->hasOneUse() && "Unexpected use of debug kernel wrapper."); 4043 auto *CB = cast<CallBase>(Kernel->user_back()); 4044 Kernel = CB->getCaller(); 4045 } 4046 assert(omp::isKernel(*Kernel) && "Expected kernel function!"); 4047 4048 // Check if the kernel is already in SPMD mode, if so, return success. 4049 GlobalVariable *ExecMode = Kernel->getParent()->getGlobalVariable( 4050 (Kernel->getName() + "_exec_mode").str()); 4051 assert(ExecMode && "Kernel without exec mode?"); 4052 assert(ExecMode->getInitializer() && "ExecMode doesn't have initializer!"); 4053 4054 // Set the global exec mode flag to indicate SPMD-Generic mode. 4055 assert(isa<ConstantInt>(ExecMode->getInitializer()) && 4056 "ExecMode is not an integer!"); 4057 const int8_t ExecModeVal = 4058 cast<ConstantInt>(ExecMode->getInitializer())->getSExtValue(); 4059 if (ExecModeVal != OMP_TGT_EXEC_MODE_GENERIC) 4060 return true; 4061 4062 // We will now unconditionally modify the IR, indicate a change. 4063 Changed = ChangeStatus::CHANGED; 4064 4065 // Do not use instruction guards when no parallel is present inside 4066 // the target region. 4067 if (mayContainParallelRegion()) 4068 insertInstructionGuardsHelper(A); 4069 else 4070 forceSingleThreadPerWorkgroupHelper(A); 4071 4072 // Adjust the global exec mode flag that tells the runtime what mode this 4073 // kernel is executed in. 4074 assert(ExecModeVal == OMP_TGT_EXEC_MODE_GENERIC && 4075 "Initially non-SPMD kernel has SPMD exec mode!"); 4076 ExecMode->setInitializer( 4077 ConstantInt::get(ExecMode->getInitializer()->getType(), 4078 ExecModeVal | OMP_TGT_EXEC_MODE_GENERIC_SPMD)); 4079 4080 // Next rewrite the init and deinit calls to indicate we use SPMD-mode now. 4081 const int InitModeArgNo = 1; 4082 const int DeinitModeArgNo = 1; 4083 const int InitUseStateMachineArgNo = 2; 4084 4085 auto &Ctx = getAnchorValue().getContext(); 4086 A.changeUseAfterManifest( 4087 KernelInitCB->getArgOperandUse(InitModeArgNo), 4088 *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx), 4089 OMP_TGT_EXEC_MODE_SPMD)); 4090 A.changeUseAfterManifest( 4091 KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), 4092 *ConstantInt::getBool(Ctx, false)); 4093 A.changeUseAfterManifest( 4094 KernelDeinitCB->getArgOperandUse(DeinitModeArgNo), 4095 *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx), 4096 OMP_TGT_EXEC_MODE_SPMD)); 4097 4098 ++NumOpenMPTargetRegionKernelsSPMD; 4099 4100 auto Remark = [&](OptimizationRemark OR) { 4101 return OR << "Transformed generic-mode kernel to SPMD-mode."; 4102 }; 4103 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP120", Remark); 4104 return true; 4105 }; 4106 4107 ChangeStatus buildCustomStateMachine(Attributor &A) { 4108 // If we have disabled state machine rewrites, don't make a custom one 4109 if (DisableOpenMPOptStateMachineRewrite) 4110 return ChangeStatus::UNCHANGED; 4111 4112 // Don't rewrite the state machine if we are not in a valid state. 4113 if (!ReachedKnownParallelRegions.isValidState()) 4114 return ChangeStatus::UNCHANGED; 4115 4116 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4117 if (!OMPInfoCache.runtimeFnsAvailable( 4118 {OMPRTL___kmpc_get_hardware_num_threads_in_block, 4119 OMPRTL___kmpc_get_warp_size, OMPRTL___kmpc_barrier_simple_generic, 4120 OMPRTL___kmpc_kernel_parallel, OMPRTL___kmpc_kernel_end_parallel})) 4121 return ChangeStatus::UNCHANGED; 4122 4123 const int InitModeArgNo = 1; 4124 const int InitUseStateMachineArgNo = 2; 4125 4126 // Check if the current configuration is non-SPMD and generic state machine. 4127 // If we already have SPMD mode or a custom state machine we do not need to 4128 // go any further. If it is anything but a constant something is weird and 4129 // we give up. 4130 ConstantInt *UseStateMachine = dyn_cast<ConstantInt>( 4131 KernelInitCB->getArgOperand(InitUseStateMachineArgNo)); 4132 ConstantInt *Mode = 4133 dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo)); 4134 4135 // If we are stuck with generic mode, try to create a custom device (=GPU) 4136 // state machine which is specialized for the parallel regions that are 4137 // reachable by the kernel. 4138 if (!UseStateMachine || UseStateMachine->isZero() || !Mode || 4139 (Mode->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD)) 4140 return ChangeStatus::UNCHANGED; 4141 4142 // If not SPMD mode, indicate we use a custom state machine now. 4143 auto &Ctx = getAnchorValue().getContext(); 4144 auto *FalseVal = ConstantInt::getBool(Ctx, false); 4145 A.changeUseAfterManifest( 4146 KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), *FalseVal); 4147 4148 // If we don't actually need a state machine we are done here. This can 4149 // happen if there simply are no parallel regions. In the resulting kernel 4150 // all worker threads will simply exit right away, leaving the main thread 4151 // to do the work alone. 4152 if (!mayContainParallelRegion()) { 4153 ++NumOpenMPTargetRegionKernelsWithoutStateMachine; 4154 4155 auto Remark = [&](OptimizationRemark OR) { 4156 return OR << "Removing unused state machine from generic-mode kernel."; 4157 }; 4158 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP130", Remark); 4159 4160 return ChangeStatus::CHANGED; 4161 } 4162 4163 // Keep track in the statistics of our new shiny custom state machine. 4164 if (ReachedUnknownParallelRegions.empty()) { 4165 ++NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback; 4166 4167 auto Remark = [&](OptimizationRemark OR) { 4168 return OR << "Rewriting generic-mode kernel with a customized state " 4169 "machine."; 4170 }; 4171 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP131", Remark); 4172 } else { 4173 ++NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback; 4174 4175 auto Remark = [&](OptimizationRemarkAnalysis OR) { 4176 return OR << "Generic-mode kernel is executed with a customized state " 4177 "machine that requires a fallback."; 4178 }; 4179 A.emitRemark<OptimizationRemarkAnalysis>(KernelInitCB, "OMP132", Remark); 4180 4181 // Tell the user why we ended up with a fallback. 4182 for (CallBase *UnknownParallelRegionCB : ReachedUnknownParallelRegions) { 4183 if (!UnknownParallelRegionCB) 4184 continue; 4185 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 4186 return ORA << "Call may contain unknown parallel regions. Use " 4187 << "`__attribute__((assume(\"omp_no_parallelism\")))` to " 4188 "override."; 4189 }; 4190 A.emitRemark<OptimizationRemarkAnalysis>(UnknownParallelRegionCB, 4191 "OMP133", Remark); 4192 } 4193 } 4194 4195 // Create all the blocks: 4196 // 4197 // InitCB = __kmpc_target_init(...) 4198 // BlockHwSize = 4199 // __kmpc_get_hardware_num_threads_in_block(); 4200 // WarpSize = __kmpc_get_warp_size(); 4201 // BlockSize = BlockHwSize - WarpSize; 4202 // IsWorkerCheckBB: bool IsWorker = InitCB != -1; 4203 // if (IsWorker) { 4204 // if (InitCB >= BlockSize) return; 4205 // SMBeginBB: __kmpc_barrier_simple_generic(...); 4206 // void *WorkFn; 4207 // bool Active = __kmpc_kernel_parallel(&WorkFn); 4208 // if (!WorkFn) return; 4209 // SMIsActiveCheckBB: if (Active) { 4210 // SMIfCascadeCurrentBB: if (WorkFn == <ParFn0>) 4211 // ParFn0(...); 4212 // SMIfCascadeCurrentBB: else if (WorkFn == <ParFn1>) 4213 // ParFn1(...); 4214 // ... 4215 // SMIfCascadeCurrentBB: else 4216 // ((WorkFnTy*)WorkFn)(...); 4217 // SMEndParallelBB: __kmpc_kernel_end_parallel(...); 4218 // } 4219 // SMDoneBB: __kmpc_barrier_simple_generic(...); 4220 // goto SMBeginBB; 4221 // } 4222 // UserCodeEntryBB: // user code 4223 // __kmpc_target_deinit(...) 4224 // 4225 Function *Kernel = getAssociatedFunction(); 4226 assert(Kernel && "Expected an associated function!"); 4227 4228 BasicBlock *InitBB = KernelInitCB->getParent(); 4229 BasicBlock *UserCodeEntryBB = InitBB->splitBasicBlock( 4230 KernelInitCB->getNextNode(), "thread.user_code.check"); 4231 BasicBlock *IsWorkerCheckBB = 4232 BasicBlock::Create(Ctx, "is_worker_check", Kernel, UserCodeEntryBB); 4233 BasicBlock *StateMachineBeginBB = BasicBlock::Create( 4234 Ctx, "worker_state_machine.begin", Kernel, UserCodeEntryBB); 4235 BasicBlock *StateMachineFinishedBB = BasicBlock::Create( 4236 Ctx, "worker_state_machine.finished", Kernel, UserCodeEntryBB); 4237 BasicBlock *StateMachineIsActiveCheckBB = BasicBlock::Create( 4238 Ctx, "worker_state_machine.is_active.check", Kernel, UserCodeEntryBB); 4239 BasicBlock *StateMachineIfCascadeCurrentBB = 4240 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check", 4241 Kernel, UserCodeEntryBB); 4242 BasicBlock *StateMachineEndParallelBB = 4243 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.end", 4244 Kernel, UserCodeEntryBB); 4245 BasicBlock *StateMachineDoneBarrierBB = BasicBlock::Create( 4246 Ctx, "worker_state_machine.done.barrier", Kernel, UserCodeEntryBB); 4247 A.registerManifestAddedBasicBlock(*InitBB); 4248 A.registerManifestAddedBasicBlock(*UserCodeEntryBB); 4249 A.registerManifestAddedBasicBlock(*IsWorkerCheckBB); 4250 A.registerManifestAddedBasicBlock(*StateMachineBeginBB); 4251 A.registerManifestAddedBasicBlock(*StateMachineFinishedBB); 4252 A.registerManifestAddedBasicBlock(*StateMachineIsActiveCheckBB); 4253 A.registerManifestAddedBasicBlock(*StateMachineIfCascadeCurrentBB); 4254 A.registerManifestAddedBasicBlock(*StateMachineEndParallelBB); 4255 A.registerManifestAddedBasicBlock(*StateMachineDoneBarrierBB); 4256 4257 const DebugLoc &DLoc = KernelInitCB->getDebugLoc(); 4258 ReturnInst::Create(Ctx, StateMachineFinishedBB)->setDebugLoc(DLoc); 4259 InitBB->getTerminator()->eraseFromParent(); 4260 4261 Instruction *IsWorker = 4262 ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_NE, KernelInitCB, 4263 ConstantInt::get(KernelInitCB->getType(), -1), 4264 "thread.is_worker", InitBB); 4265 IsWorker->setDebugLoc(DLoc); 4266 BranchInst::Create(IsWorkerCheckBB, UserCodeEntryBB, IsWorker, InitBB); 4267 4268 Module &M = *Kernel->getParent(); 4269 FunctionCallee BlockHwSizeFn = 4270 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 4271 M, OMPRTL___kmpc_get_hardware_num_threads_in_block); 4272 FunctionCallee WarpSizeFn = 4273 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 4274 M, OMPRTL___kmpc_get_warp_size); 4275 CallInst *BlockHwSize = 4276 CallInst::Create(BlockHwSizeFn, "block.hw_size", IsWorkerCheckBB); 4277 OMPInfoCache.setCallingConvention(BlockHwSizeFn, BlockHwSize); 4278 BlockHwSize->setDebugLoc(DLoc); 4279 CallInst *WarpSize = 4280 CallInst::Create(WarpSizeFn, "warp.size", IsWorkerCheckBB); 4281 OMPInfoCache.setCallingConvention(WarpSizeFn, WarpSize); 4282 WarpSize->setDebugLoc(DLoc); 4283 Instruction *BlockSize = BinaryOperator::CreateSub( 4284 BlockHwSize, WarpSize, "block.size", IsWorkerCheckBB); 4285 BlockSize->setDebugLoc(DLoc); 4286 Instruction *IsMainOrWorker = ICmpInst::Create( 4287 ICmpInst::ICmp, llvm::CmpInst::ICMP_SLT, KernelInitCB, BlockSize, 4288 "thread.is_main_or_worker", IsWorkerCheckBB); 4289 IsMainOrWorker->setDebugLoc(DLoc); 4290 BranchInst::Create(StateMachineBeginBB, StateMachineFinishedBB, 4291 IsMainOrWorker, IsWorkerCheckBB); 4292 4293 // Create local storage for the work function pointer. 4294 const DataLayout &DL = M.getDataLayout(); 4295 Type *VoidPtrTy = Type::getInt8PtrTy(Ctx); 4296 Instruction *WorkFnAI = 4297 new AllocaInst(VoidPtrTy, DL.getAllocaAddrSpace(), nullptr, 4298 "worker.work_fn.addr", &Kernel->getEntryBlock().front()); 4299 WorkFnAI->setDebugLoc(DLoc); 4300 4301 OMPInfoCache.OMPBuilder.updateToLocation( 4302 OpenMPIRBuilder::LocationDescription( 4303 IRBuilder<>::InsertPoint(StateMachineBeginBB, 4304 StateMachineBeginBB->end()), 4305 DLoc)); 4306 4307 Value *Ident = KernelInitCB->getArgOperand(0); 4308 Value *GTid = KernelInitCB; 4309 4310 FunctionCallee BarrierFn = 4311 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 4312 M, OMPRTL___kmpc_barrier_simple_generic); 4313 CallInst *Barrier = 4314 CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineBeginBB); 4315 OMPInfoCache.setCallingConvention(BarrierFn, Barrier); 4316 Barrier->setDebugLoc(DLoc); 4317 4318 if (WorkFnAI->getType()->getPointerAddressSpace() != 4319 (unsigned int)AddressSpace::Generic) { 4320 WorkFnAI = new AddrSpaceCastInst( 4321 WorkFnAI, PointerType::get(Ctx, (unsigned int)AddressSpace::Generic), 4322 WorkFnAI->getName() + ".generic", StateMachineBeginBB); 4323 WorkFnAI->setDebugLoc(DLoc); 4324 } 4325 4326 FunctionCallee KernelParallelFn = 4327 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 4328 M, OMPRTL___kmpc_kernel_parallel); 4329 CallInst *IsActiveWorker = CallInst::Create( 4330 KernelParallelFn, {WorkFnAI}, "worker.is_active", StateMachineBeginBB); 4331 OMPInfoCache.setCallingConvention(KernelParallelFn, IsActiveWorker); 4332 IsActiveWorker->setDebugLoc(DLoc); 4333 Instruction *WorkFn = new LoadInst(VoidPtrTy, WorkFnAI, "worker.work_fn", 4334 StateMachineBeginBB); 4335 WorkFn->setDebugLoc(DLoc); 4336 4337 FunctionType *ParallelRegionFnTy = FunctionType::get( 4338 Type::getVoidTy(Ctx), {Type::getInt16Ty(Ctx), Type::getInt32Ty(Ctx)}, 4339 false); 4340 Value *WorkFnCast = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 4341 WorkFn, ParallelRegionFnTy->getPointerTo(), "worker.work_fn.addr_cast", 4342 StateMachineBeginBB); 4343 4344 Instruction *IsDone = 4345 ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFn, 4346 Constant::getNullValue(VoidPtrTy), "worker.is_done", 4347 StateMachineBeginBB); 4348 IsDone->setDebugLoc(DLoc); 4349 BranchInst::Create(StateMachineFinishedBB, StateMachineIsActiveCheckBB, 4350 IsDone, StateMachineBeginBB) 4351 ->setDebugLoc(DLoc); 4352 4353 BranchInst::Create(StateMachineIfCascadeCurrentBB, 4354 StateMachineDoneBarrierBB, IsActiveWorker, 4355 StateMachineIsActiveCheckBB) 4356 ->setDebugLoc(DLoc); 4357 4358 Value *ZeroArg = 4359 Constant::getNullValue(ParallelRegionFnTy->getParamType(0)); 4360 4361 // Now that we have most of the CFG skeleton it is time for the if-cascade 4362 // that checks the function pointer we got from the runtime against the 4363 // parallel regions we expect, if there are any. 4364 for (int I = 0, E = ReachedKnownParallelRegions.size(); I < E; ++I) { 4365 auto *ParallelRegion = ReachedKnownParallelRegions[I]; 4366 BasicBlock *PRExecuteBB = BasicBlock::Create( 4367 Ctx, "worker_state_machine.parallel_region.execute", Kernel, 4368 StateMachineEndParallelBB); 4369 CallInst::Create(ParallelRegion, {ZeroArg, GTid}, "", PRExecuteBB) 4370 ->setDebugLoc(DLoc); 4371 BranchInst::Create(StateMachineEndParallelBB, PRExecuteBB) 4372 ->setDebugLoc(DLoc); 4373 4374 BasicBlock *PRNextBB = 4375 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check", 4376 Kernel, StateMachineEndParallelBB); 4377 4378 // Check if we need to compare the pointer at all or if we can just 4379 // call the parallel region function. 4380 Value *IsPR; 4381 if (I + 1 < E || !ReachedUnknownParallelRegions.empty()) { 4382 Instruction *CmpI = ICmpInst::Create( 4383 ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFnCast, ParallelRegion, 4384 "worker.check_parallel_region", StateMachineIfCascadeCurrentBB); 4385 CmpI->setDebugLoc(DLoc); 4386 IsPR = CmpI; 4387 } else { 4388 IsPR = ConstantInt::getTrue(Ctx); 4389 } 4390 4391 BranchInst::Create(PRExecuteBB, PRNextBB, IsPR, 4392 StateMachineIfCascadeCurrentBB) 4393 ->setDebugLoc(DLoc); 4394 StateMachineIfCascadeCurrentBB = PRNextBB; 4395 } 4396 4397 // At the end of the if-cascade we place the indirect function pointer call 4398 // in case we might need it, that is if there can be parallel regions we 4399 // have not handled in the if-cascade above. 4400 if (!ReachedUnknownParallelRegions.empty()) { 4401 StateMachineIfCascadeCurrentBB->setName( 4402 "worker_state_machine.parallel_region.fallback.execute"); 4403 CallInst::Create(ParallelRegionFnTy, WorkFnCast, {ZeroArg, GTid}, "", 4404 StateMachineIfCascadeCurrentBB) 4405 ->setDebugLoc(DLoc); 4406 } 4407 BranchInst::Create(StateMachineEndParallelBB, 4408 StateMachineIfCascadeCurrentBB) 4409 ->setDebugLoc(DLoc); 4410 4411 FunctionCallee EndParallelFn = 4412 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 4413 M, OMPRTL___kmpc_kernel_end_parallel); 4414 CallInst *EndParallel = 4415 CallInst::Create(EndParallelFn, {}, "", StateMachineEndParallelBB); 4416 OMPInfoCache.setCallingConvention(EndParallelFn, EndParallel); 4417 EndParallel->setDebugLoc(DLoc); 4418 BranchInst::Create(StateMachineDoneBarrierBB, StateMachineEndParallelBB) 4419 ->setDebugLoc(DLoc); 4420 4421 CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineDoneBarrierBB) 4422 ->setDebugLoc(DLoc); 4423 BranchInst::Create(StateMachineBeginBB, StateMachineDoneBarrierBB) 4424 ->setDebugLoc(DLoc); 4425 4426 return ChangeStatus::CHANGED; 4427 } 4428 4429 /// Fixpoint iteration update function. Will be called every time a dependence 4430 /// changed its state (and in the beginning). 4431 ChangeStatus updateImpl(Attributor &A) override { 4432 KernelInfoState StateBefore = getState(); 4433 4434 // Callback to check a read/write instruction. 4435 auto CheckRWInst = [&](Instruction &I) { 4436 // We handle calls later. 4437 if (isa<CallBase>(I)) 4438 return true; 4439 // We only care about write effects. 4440 if (!I.mayWriteToMemory()) 4441 return true; 4442 if (auto *SI = dyn_cast<StoreInst>(&I)) { 4443 const auto *UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>( 4444 *this, IRPosition::value(*SI->getPointerOperand()), 4445 DepClassTy::OPTIONAL); 4446 auto *HS = A.getAAFor<AAHeapToStack>( 4447 *this, IRPosition::function(*I.getFunction()), 4448 DepClassTy::OPTIONAL); 4449 if (UnderlyingObjsAA && 4450 UnderlyingObjsAA->forallUnderlyingObjects([&](Value &Obj) { 4451 if (AA::isAssumedThreadLocalObject(A, Obj, *this)) 4452 return true; 4453 // Check for AAHeapToStack moved objects which must not be 4454 // guarded. 4455 auto *CB = dyn_cast<CallBase>(&Obj); 4456 return CB && HS && HS->isAssumedHeapToStack(*CB); 4457 })) 4458 return true; 4459 } 4460 4461 // Insert instruction that needs guarding. 4462 SPMDCompatibilityTracker.insert(&I); 4463 return true; 4464 }; 4465 4466 bool UsedAssumedInformationInCheckRWInst = false; 4467 if (!SPMDCompatibilityTracker.isAtFixpoint()) 4468 if (!A.checkForAllReadWriteInstructions( 4469 CheckRWInst, *this, UsedAssumedInformationInCheckRWInst)) 4470 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4471 4472 bool UsedAssumedInformationFromReachingKernels = false; 4473 if (!IsKernelEntry) { 4474 updateParallelLevels(A); 4475 4476 bool AllReachingKernelsKnown = true; 4477 updateReachingKernelEntries(A, AllReachingKernelsKnown); 4478 UsedAssumedInformationFromReachingKernels = !AllReachingKernelsKnown; 4479 4480 if (!SPMDCompatibilityTracker.empty()) { 4481 if (!ParallelLevels.isValidState()) 4482 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4483 else if (!ReachingKernelEntries.isValidState()) 4484 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4485 else { 4486 // Check if all reaching kernels agree on the mode as we can otherwise 4487 // not guard instructions. We might not be sure about the mode so we 4488 // we cannot fix the internal spmd-zation state either. 4489 int SPMD = 0, Generic = 0; 4490 for (auto *Kernel : ReachingKernelEntries) { 4491 auto *CBAA = A.getAAFor<AAKernelInfo>( 4492 *this, IRPosition::function(*Kernel), DepClassTy::OPTIONAL); 4493 if (CBAA && CBAA->SPMDCompatibilityTracker.isValidState() && 4494 CBAA->SPMDCompatibilityTracker.isAssumed()) 4495 ++SPMD; 4496 else 4497 ++Generic; 4498 if (!CBAA || !CBAA->SPMDCompatibilityTracker.isAtFixpoint()) 4499 UsedAssumedInformationFromReachingKernels = true; 4500 } 4501 if (SPMD != 0 && Generic != 0) 4502 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4503 } 4504 } 4505 } 4506 4507 // Callback to check a call instruction. 4508 bool AllParallelRegionStatesWereFixed = true; 4509 bool AllSPMDStatesWereFixed = true; 4510 auto CheckCallInst = [&](Instruction &I) { 4511 auto &CB = cast<CallBase>(I); 4512 auto *CBAA = A.getAAFor<AAKernelInfo>( 4513 *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL); 4514 if (!CBAA) 4515 return false; 4516 getState() ^= CBAA->getState(); 4517 AllSPMDStatesWereFixed &= CBAA->SPMDCompatibilityTracker.isAtFixpoint(); 4518 AllParallelRegionStatesWereFixed &= 4519 CBAA->ReachedKnownParallelRegions.isAtFixpoint(); 4520 AllParallelRegionStatesWereFixed &= 4521 CBAA->ReachedUnknownParallelRegions.isAtFixpoint(); 4522 return true; 4523 }; 4524 4525 bool UsedAssumedInformationInCheckCallInst = false; 4526 if (!A.checkForAllCallLikeInstructions( 4527 CheckCallInst, *this, UsedAssumedInformationInCheckCallInst)) { 4528 LLVM_DEBUG(dbgs() << TAG 4529 << "Failed to visit all call-like instructions!\n";); 4530 return indicatePessimisticFixpoint(); 4531 } 4532 4533 // If we haven't used any assumed information for the reached parallel 4534 // region states we can fix it. 4535 if (!UsedAssumedInformationInCheckCallInst && 4536 AllParallelRegionStatesWereFixed) { 4537 ReachedKnownParallelRegions.indicateOptimisticFixpoint(); 4538 ReachedUnknownParallelRegions.indicateOptimisticFixpoint(); 4539 } 4540 4541 // If we haven't used any assumed information for the SPMD state we can fix 4542 // it. 4543 if (!UsedAssumedInformationInCheckRWInst && 4544 !UsedAssumedInformationInCheckCallInst && 4545 !UsedAssumedInformationFromReachingKernels && AllSPMDStatesWereFixed) 4546 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 4547 4548 return StateBefore == getState() ? ChangeStatus::UNCHANGED 4549 : ChangeStatus::CHANGED; 4550 } 4551 4552 private: 4553 /// Update info regarding reaching kernels. 4554 void updateReachingKernelEntries(Attributor &A, 4555 bool &AllReachingKernelsKnown) { 4556 auto PredCallSite = [&](AbstractCallSite ACS) { 4557 Function *Caller = ACS.getInstruction()->getFunction(); 4558 4559 assert(Caller && "Caller is nullptr"); 4560 4561 auto *CAA = A.getOrCreateAAFor<AAKernelInfo>( 4562 IRPosition::function(*Caller), this, DepClassTy::REQUIRED); 4563 if (CAA && CAA->ReachingKernelEntries.isValidState()) { 4564 ReachingKernelEntries ^= CAA->ReachingKernelEntries; 4565 return true; 4566 } 4567 4568 // We lost track of the caller of the associated function, any kernel 4569 // could reach now. 4570 ReachingKernelEntries.indicatePessimisticFixpoint(); 4571 4572 return true; 4573 }; 4574 4575 if (!A.checkForAllCallSites(PredCallSite, *this, 4576 true /* RequireAllCallSites */, 4577 AllReachingKernelsKnown)) 4578 ReachingKernelEntries.indicatePessimisticFixpoint(); 4579 } 4580 4581 /// Update info regarding parallel levels. 4582 void updateParallelLevels(Attributor &A) { 4583 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4584 OMPInformationCache::RuntimeFunctionInfo &Parallel51RFI = 4585 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 4586 4587 auto PredCallSite = [&](AbstractCallSite ACS) { 4588 Function *Caller = ACS.getInstruction()->getFunction(); 4589 4590 assert(Caller && "Caller is nullptr"); 4591 4592 auto *CAA = 4593 A.getOrCreateAAFor<AAKernelInfo>(IRPosition::function(*Caller)); 4594 if (CAA && CAA->ParallelLevels.isValidState()) { 4595 // Any function that is called by `__kmpc_parallel_51` will not be 4596 // folded as the parallel level in the function is updated. In order to 4597 // get it right, all the analysis would depend on the implentation. That 4598 // said, if in the future any change to the implementation, the analysis 4599 // could be wrong. As a consequence, we are just conservative here. 4600 if (Caller == Parallel51RFI.Declaration) { 4601 ParallelLevels.indicatePessimisticFixpoint(); 4602 return true; 4603 } 4604 4605 ParallelLevels ^= CAA->ParallelLevels; 4606 4607 return true; 4608 } 4609 4610 // We lost track of the caller of the associated function, any kernel 4611 // could reach now. 4612 ParallelLevels.indicatePessimisticFixpoint(); 4613 4614 return true; 4615 }; 4616 4617 bool AllCallSitesKnown = true; 4618 if (!A.checkForAllCallSites(PredCallSite, *this, 4619 true /* RequireAllCallSites */, 4620 AllCallSitesKnown)) 4621 ParallelLevels.indicatePessimisticFixpoint(); 4622 } 4623 }; 4624 4625 /// The call site kernel info abstract attribute, basically, what can we say 4626 /// about a call site with regards to the KernelInfoState. For now this simply 4627 /// forwards the information from the callee. 4628 struct AAKernelInfoCallSite : AAKernelInfo { 4629 AAKernelInfoCallSite(const IRPosition &IRP, Attributor &A) 4630 : AAKernelInfo(IRP, A) {} 4631 4632 /// See AbstractAttribute::initialize(...). 4633 void initialize(Attributor &A) override { 4634 AAKernelInfo::initialize(A); 4635 4636 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4637 Function *Callee = getAssociatedFunction(); 4638 4639 auto *AssumptionAA = A.getAAFor<AAAssumptionInfo>( 4640 *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL); 4641 4642 // Check for SPMD-mode assumptions. 4643 if (AssumptionAA && AssumptionAA->hasAssumption("ompx_spmd_amenable")) { 4644 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 4645 indicateOptimisticFixpoint(); 4646 } 4647 4648 // First weed out calls we do not care about, that is readonly/readnone 4649 // calls, intrinsics, and "no_openmp" calls. Neither of these can reach a 4650 // parallel region or anything else we are looking for. 4651 if (!CB.mayWriteToMemory() || isa<IntrinsicInst>(CB)) { 4652 indicateOptimisticFixpoint(); 4653 return; 4654 } 4655 4656 // Next we check if we know the callee. If it is a known OpenMP function 4657 // we will handle them explicitly in the switch below. If it is not, we 4658 // will use an AAKernelInfo object on the callee to gather information and 4659 // merge that into the current state. The latter happens in the updateImpl. 4660 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4661 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee); 4662 if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) { 4663 // Unknown caller or declarations are not analyzable, we give up. 4664 if (!Callee || !A.isFunctionIPOAmendable(*Callee)) { 4665 4666 // Unknown callees might contain parallel regions, except if they have 4667 // an appropriate assumption attached. 4668 if (!AssumptionAA || 4669 !(AssumptionAA->hasAssumption("omp_no_openmp") || 4670 AssumptionAA->hasAssumption("omp_no_parallelism"))) 4671 ReachedUnknownParallelRegions.insert(&CB); 4672 4673 // If SPMDCompatibilityTracker is not fixed, we need to give up on the 4674 // idea we can run something unknown in SPMD-mode. 4675 if (!SPMDCompatibilityTracker.isAtFixpoint()) { 4676 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4677 SPMDCompatibilityTracker.insert(&CB); 4678 } 4679 4680 // We have updated the state for this unknown call properly, there won't 4681 // be any change so we indicate a fixpoint. 4682 indicateOptimisticFixpoint(); 4683 } 4684 // If the callee is known and can be used in IPO, we will update the state 4685 // based on the callee state in updateImpl. 4686 return; 4687 } 4688 4689 const unsigned int WrapperFunctionArgNo = 6; 4690 RuntimeFunction RF = It->getSecond(); 4691 switch (RF) { 4692 // All the functions we know are compatible with SPMD mode. 4693 case OMPRTL___kmpc_is_spmd_exec_mode: 4694 case OMPRTL___kmpc_distribute_static_fini: 4695 case OMPRTL___kmpc_for_static_fini: 4696 case OMPRTL___kmpc_global_thread_num: 4697 case OMPRTL___kmpc_get_hardware_num_threads_in_block: 4698 case OMPRTL___kmpc_get_hardware_num_blocks: 4699 case OMPRTL___kmpc_single: 4700 case OMPRTL___kmpc_end_single: 4701 case OMPRTL___kmpc_master: 4702 case OMPRTL___kmpc_end_master: 4703 case OMPRTL___kmpc_barrier: 4704 case OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2: 4705 case OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2: 4706 case OMPRTL___kmpc_nvptx_end_reduce_nowait: 4707 break; 4708 case OMPRTL___kmpc_distribute_static_init_4: 4709 case OMPRTL___kmpc_distribute_static_init_4u: 4710 case OMPRTL___kmpc_distribute_static_init_8: 4711 case OMPRTL___kmpc_distribute_static_init_8u: 4712 case OMPRTL___kmpc_for_static_init_4: 4713 case OMPRTL___kmpc_for_static_init_4u: 4714 case OMPRTL___kmpc_for_static_init_8: 4715 case OMPRTL___kmpc_for_static_init_8u: { 4716 // Check the schedule and allow static schedule in SPMD mode. 4717 unsigned ScheduleArgOpNo = 2; 4718 auto *ScheduleTypeCI = 4719 dyn_cast<ConstantInt>(CB.getArgOperand(ScheduleArgOpNo)); 4720 unsigned ScheduleTypeVal = 4721 ScheduleTypeCI ? ScheduleTypeCI->getZExtValue() : 0; 4722 switch (OMPScheduleType(ScheduleTypeVal)) { 4723 case OMPScheduleType::UnorderedStatic: 4724 case OMPScheduleType::UnorderedStaticChunked: 4725 case OMPScheduleType::OrderedDistribute: 4726 case OMPScheduleType::OrderedDistributeChunked: 4727 break; 4728 default: 4729 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4730 SPMDCompatibilityTracker.insert(&CB); 4731 break; 4732 }; 4733 } break; 4734 case OMPRTL___kmpc_target_init: 4735 KernelInitCB = &CB; 4736 break; 4737 case OMPRTL___kmpc_target_deinit: 4738 KernelDeinitCB = &CB; 4739 break; 4740 case OMPRTL___kmpc_parallel_51: 4741 if (auto *ParallelRegion = dyn_cast<Function>( 4742 CB.getArgOperand(WrapperFunctionArgNo)->stripPointerCasts())) { 4743 ReachedKnownParallelRegions.insert(ParallelRegion); 4744 /// Check nested parallelism 4745 auto *FnAA = A.getAAFor<AAKernelInfo>( 4746 *this, IRPosition::function(*ParallelRegion), DepClassTy::OPTIONAL); 4747 NestedParallelism |= !FnAA || !FnAA->getState().isValidState() || 4748 !FnAA->ReachedKnownParallelRegions.empty() || 4749 !FnAA->ReachedUnknownParallelRegions.empty(); 4750 break; 4751 } 4752 // The condition above should usually get the parallel region function 4753 // pointer and record it. In the off chance it doesn't we assume the 4754 // worst. 4755 ReachedUnknownParallelRegions.insert(&CB); 4756 break; 4757 case OMPRTL___kmpc_omp_task: 4758 // We do not look into tasks right now, just give up. 4759 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4760 SPMDCompatibilityTracker.insert(&CB); 4761 ReachedUnknownParallelRegions.insert(&CB); 4762 break; 4763 case OMPRTL___kmpc_alloc_shared: 4764 case OMPRTL___kmpc_free_shared: 4765 // Return without setting a fixpoint, to be resolved in updateImpl. 4766 return; 4767 default: 4768 // Unknown OpenMP runtime calls cannot be executed in SPMD-mode, 4769 // generally. However, they do not hide parallel regions. 4770 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4771 SPMDCompatibilityTracker.insert(&CB); 4772 break; 4773 } 4774 // All other OpenMP runtime calls will not reach parallel regions so they 4775 // can be safely ignored for now. Since it is a known OpenMP runtime call we 4776 // have now modeled all effects and there is no need for any update. 4777 indicateOptimisticFixpoint(); 4778 } 4779 4780 ChangeStatus updateImpl(Attributor &A) override { 4781 // TODO: Once we have call site specific value information we can provide 4782 // call site specific liveness information and then it makes 4783 // sense to specialize attributes for call sites arguments instead of 4784 // redirecting requests to the callee argument. 4785 Function *F = getAssociatedFunction(); 4786 4787 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4788 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(F); 4789 4790 // If F is not a runtime function, propagate the AAKernelInfo of the callee. 4791 if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) { 4792 const IRPosition &FnPos = IRPosition::function(*F); 4793 auto *FnAA = A.getAAFor<AAKernelInfo>(*this, FnPos, DepClassTy::REQUIRED); 4794 if (!FnAA) 4795 return indicatePessimisticFixpoint(); 4796 if (getState() == FnAA->getState()) 4797 return ChangeStatus::UNCHANGED; 4798 getState() = FnAA->getState(); 4799 return ChangeStatus::CHANGED; 4800 } 4801 4802 // F is a runtime function that allocates or frees memory, check 4803 // AAHeapToStack and AAHeapToShared. 4804 KernelInfoState StateBefore = getState(); 4805 assert((It->getSecond() == OMPRTL___kmpc_alloc_shared || 4806 It->getSecond() == OMPRTL___kmpc_free_shared) && 4807 "Expected a __kmpc_alloc_shared or __kmpc_free_shared runtime call"); 4808 4809 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4810 4811 auto *HeapToStackAA = A.getAAFor<AAHeapToStack>( 4812 *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL); 4813 auto *HeapToSharedAA = A.getAAFor<AAHeapToShared>( 4814 *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL); 4815 4816 RuntimeFunction RF = It->getSecond(); 4817 4818 switch (RF) { 4819 // If neither HeapToStack nor HeapToShared assume the call is removed, 4820 // assume SPMD incompatibility. 4821 case OMPRTL___kmpc_alloc_shared: 4822 if ((!HeapToStackAA || !HeapToStackAA->isAssumedHeapToStack(CB)) && 4823 (!HeapToSharedAA || !HeapToSharedAA->isAssumedHeapToShared(CB))) 4824 SPMDCompatibilityTracker.insert(&CB); 4825 break; 4826 case OMPRTL___kmpc_free_shared: 4827 if ((!HeapToStackAA || 4828 !HeapToStackAA->isAssumedHeapToStackRemovedFree(CB)) && 4829 (!HeapToSharedAA || 4830 !HeapToSharedAA->isAssumedHeapToSharedRemovedFree(CB))) 4831 SPMDCompatibilityTracker.insert(&CB); 4832 break; 4833 default: 4834 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4835 SPMDCompatibilityTracker.insert(&CB); 4836 } 4837 4838 return StateBefore == getState() ? ChangeStatus::UNCHANGED 4839 : ChangeStatus::CHANGED; 4840 } 4841 }; 4842 4843 struct AAFoldRuntimeCall 4844 : public StateWrapper<BooleanState, AbstractAttribute> { 4845 using Base = StateWrapper<BooleanState, AbstractAttribute>; 4846 4847 AAFoldRuntimeCall(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 4848 4849 /// Statistics are tracked as part of manifest for now. 4850 void trackStatistics() const override {} 4851 4852 /// Create an abstract attribute biew for the position \p IRP. 4853 static AAFoldRuntimeCall &createForPosition(const IRPosition &IRP, 4854 Attributor &A); 4855 4856 /// See AbstractAttribute::getName() 4857 const std::string getName() const override { return "AAFoldRuntimeCall"; } 4858 4859 /// See AbstractAttribute::getIdAddr() 4860 const char *getIdAddr() const override { return &ID; } 4861 4862 /// This function should return true if the type of the \p AA is 4863 /// AAFoldRuntimeCall 4864 static bool classof(const AbstractAttribute *AA) { 4865 return (AA->getIdAddr() == &ID); 4866 } 4867 4868 static const char ID; 4869 }; 4870 4871 struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall { 4872 AAFoldRuntimeCallCallSiteReturned(const IRPosition &IRP, Attributor &A) 4873 : AAFoldRuntimeCall(IRP, A) {} 4874 4875 /// See AbstractAttribute::getAsStr() 4876 const std::string getAsStr(Attributor *) const override { 4877 if (!isValidState()) 4878 return "<invalid>"; 4879 4880 std::string Str("simplified value: "); 4881 4882 if (!SimplifiedValue) 4883 return Str + std::string("none"); 4884 4885 if (!*SimplifiedValue) 4886 return Str + std::string("nullptr"); 4887 4888 if (ConstantInt *CI = dyn_cast<ConstantInt>(*SimplifiedValue)) 4889 return Str + std::to_string(CI->getSExtValue()); 4890 4891 return Str + std::string("unknown"); 4892 } 4893 4894 void initialize(Attributor &A) override { 4895 if (DisableOpenMPOptFolding) 4896 indicatePessimisticFixpoint(); 4897 4898 Function *Callee = getAssociatedFunction(); 4899 4900 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4901 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee); 4902 assert(It != OMPInfoCache.RuntimeFunctionIDMap.end() && 4903 "Expected a known OpenMP runtime function"); 4904 4905 RFKind = It->getSecond(); 4906 4907 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4908 A.registerSimplificationCallback( 4909 IRPosition::callsite_returned(CB), 4910 [&](const IRPosition &IRP, const AbstractAttribute *AA, 4911 bool &UsedAssumedInformation) -> std::optional<Value *> { 4912 assert((isValidState() || 4913 (SimplifiedValue && *SimplifiedValue == nullptr)) && 4914 "Unexpected invalid state!"); 4915 4916 if (!isAtFixpoint()) { 4917 UsedAssumedInformation = true; 4918 if (AA) 4919 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 4920 } 4921 return SimplifiedValue; 4922 }); 4923 } 4924 4925 ChangeStatus updateImpl(Attributor &A) override { 4926 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4927 switch (RFKind) { 4928 case OMPRTL___kmpc_is_spmd_exec_mode: 4929 Changed |= foldIsSPMDExecMode(A); 4930 break; 4931 case OMPRTL___kmpc_parallel_level: 4932 Changed |= foldParallelLevel(A); 4933 break; 4934 case OMPRTL___kmpc_get_hardware_num_threads_in_block: 4935 Changed = Changed | foldKernelFnAttribute(A, "omp_target_thread_limit"); 4936 break; 4937 case OMPRTL___kmpc_get_hardware_num_blocks: 4938 Changed = Changed | foldKernelFnAttribute(A, "omp_target_num_teams"); 4939 break; 4940 default: 4941 llvm_unreachable("Unhandled OpenMP runtime function!"); 4942 } 4943 4944 return Changed; 4945 } 4946 4947 ChangeStatus manifest(Attributor &A) override { 4948 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4949 4950 if (SimplifiedValue && *SimplifiedValue) { 4951 Instruction &I = *getCtxI(); 4952 A.changeAfterManifest(IRPosition::inst(I), **SimplifiedValue); 4953 A.deleteAfterManifest(I); 4954 4955 CallBase *CB = dyn_cast<CallBase>(&I); 4956 auto Remark = [&](OptimizationRemark OR) { 4957 if (auto *C = dyn_cast<ConstantInt>(*SimplifiedValue)) 4958 return OR << "Replacing OpenMP runtime call " 4959 << CB->getCalledFunction()->getName() << " with " 4960 << ore::NV("FoldedValue", C->getZExtValue()) << "."; 4961 return OR << "Replacing OpenMP runtime call " 4962 << CB->getCalledFunction()->getName() << "."; 4963 }; 4964 4965 if (CB && EnableVerboseRemarks) 4966 A.emitRemark<OptimizationRemark>(CB, "OMP180", Remark); 4967 4968 LLVM_DEBUG(dbgs() << TAG << "Replacing runtime call: " << I << " with " 4969 << **SimplifiedValue << "\n"); 4970 4971 Changed = ChangeStatus::CHANGED; 4972 } 4973 4974 return Changed; 4975 } 4976 4977 ChangeStatus indicatePessimisticFixpoint() override { 4978 SimplifiedValue = nullptr; 4979 return AAFoldRuntimeCall::indicatePessimisticFixpoint(); 4980 } 4981 4982 private: 4983 /// Fold __kmpc_is_spmd_exec_mode into a constant if possible. 4984 ChangeStatus foldIsSPMDExecMode(Attributor &A) { 4985 std::optional<Value *> SimplifiedValueBefore = SimplifiedValue; 4986 4987 unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0; 4988 unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0; 4989 auto *CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 4990 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 4991 4992 if (!CallerKernelInfoAA || 4993 !CallerKernelInfoAA->ReachingKernelEntries.isValidState()) 4994 return indicatePessimisticFixpoint(); 4995 4996 for (Kernel K : CallerKernelInfoAA->ReachingKernelEntries) { 4997 auto *AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K), 4998 DepClassTy::REQUIRED); 4999 5000 if (!AA || !AA->isValidState()) { 5001 SimplifiedValue = nullptr; 5002 return indicatePessimisticFixpoint(); 5003 } 5004 5005 if (AA->SPMDCompatibilityTracker.isAssumed()) { 5006 if (AA->SPMDCompatibilityTracker.isAtFixpoint()) 5007 ++KnownSPMDCount; 5008 else 5009 ++AssumedSPMDCount; 5010 } else { 5011 if (AA->SPMDCompatibilityTracker.isAtFixpoint()) 5012 ++KnownNonSPMDCount; 5013 else 5014 ++AssumedNonSPMDCount; 5015 } 5016 } 5017 5018 if ((AssumedSPMDCount + KnownSPMDCount) && 5019 (AssumedNonSPMDCount + KnownNonSPMDCount)) 5020 return indicatePessimisticFixpoint(); 5021 5022 auto &Ctx = getAnchorValue().getContext(); 5023 if (KnownSPMDCount || AssumedSPMDCount) { 5024 assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 && 5025 "Expected only SPMD kernels!"); 5026 // All reaching kernels are in SPMD mode. Update all function calls to 5027 // __kmpc_is_spmd_exec_mode to 1. 5028 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true); 5029 } else if (KnownNonSPMDCount || AssumedNonSPMDCount) { 5030 assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 && 5031 "Expected only non-SPMD kernels!"); 5032 // All reaching kernels are in non-SPMD mode. Update all function 5033 // calls to __kmpc_is_spmd_exec_mode to 0. 5034 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), false); 5035 } else { 5036 // We have empty reaching kernels, therefore we cannot tell if the 5037 // associated call site can be folded. At this moment, SimplifiedValue 5038 // must be none. 5039 assert(!SimplifiedValue && "SimplifiedValue should be none"); 5040 } 5041 5042 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 5043 : ChangeStatus::CHANGED; 5044 } 5045 5046 /// Fold __kmpc_parallel_level into a constant if possible. 5047 ChangeStatus foldParallelLevel(Attributor &A) { 5048 std::optional<Value *> SimplifiedValueBefore = SimplifiedValue; 5049 5050 auto *CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 5051 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 5052 5053 if (!CallerKernelInfoAA || 5054 !CallerKernelInfoAA->ParallelLevels.isValidState()) 5055 return indicatePessimisticFixpoint(); 5056 5057 if (!CallerKernelInfoAA->ReachingKernelEntries.isValidState()) 5058 return indicatePessimisticFixpoint(); 5059 5060 if (CallerKernelInfoAA->ReachingKernelEntries.empty()) { 5061 assert(!SimplifiedValue && 5062 "SimplifiedValue should keep none at this point"); 5063 return ChangeStatus::UNCHANGED; 5064 } 5065 5066 unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0; 5067 unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0; 5068 for (Kernel K : CallerKernelInfoAA->ReachingKernelEntries) { 5069 auto *AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K), 5070 DepClassTy::REQUIRED); 5071 if (!AA || !AA->SPMDCompatibilityTracker.isValidState()) 5072 return indicatePessimisticFixpoint(); 5073 5074 if (AA->SPMDCompatibilityTracker.isAssumed()) { 5075 if (AA->SPMDCompatibilityTracker.isAtFixpoint()) 5076 ++KnownSPMDCount; 5077 else 5078 ++AssumedSPMDCount; 5079 } else { 5080 if (AA->SPMDCompatibilityTracker.isAtFixpoint()) 5081 ++KnownNonSPMDCount; 5082 else 5083 ++AssumedNonSPMDCount; 5084 } 5085 } 5086 5087 if ((AssumedSPMDCount + KnownSPMDCount) && 5088 (AssumedNonSPMDCount + KnownNonSPMDCount)) 5089 return indicatePessimisticFixpoint(); 5090 5091 auto &Ctx = getAnchorValue().getContext(); 5092 // If the caller can only be reached by SPMD kernel entries, the parallel 5093 // level is 1. Similarly, if the caller can only be reached by non-SPMD 5094 // kernel entries, it is 0. 5095 if (AssumedSPMDCount || KnownSPMDCount) { 5096 assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 && 5097 "Expected only SPMD kernels!"); 5098 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 1); 5099 } else { 5100 assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 && 5101 "Expected only non-SPMD kernels!"); 5102 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 0); 5103 } 5104 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 5105 : ChangeStatus::CHANGED; 5106 } 5107 5108 ChangeStatus foldKernelFnAttribute(Attributor &A, llvm::StringRef Attr) { 5109 // Specialize only if all the calls agree with the attribute constant value 5110 int32_t CurrentAttrValue = -1; 5111 std::optional<Value *> SimplifiedValueBefore = SimplifiedValue; 5112 5113 auto *CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 5114 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 5115 5116 if (!CallerKernelInfoAA || 5117 !CallerKernelInfoAA->ReachingKernelEntries.isValidState()) 5118 return indicatePessimisticFixpoint(); 5119 5120 // Iterate over the kernels that reach this function 5121 for (Kernel K : CallerKernelInfoAA->ReachingKernelEntries) { 5122 int32_t NextAttrVal = K->getFnAttributeAsParsedInteger(Attr, -1); 5123 5124 if (NextAttrVal == -1 || 5125 (CurrentAttrValue != -1 && CurrentAttrValue != NextAttrVal)) 5126 return indicatePessimisticFixpoint(); 5127 CurrentAttrValue = NextAttrVal; 5128 } 5129 5130 if (CurrentAttrValue != -1) { 5131 auto &Ctx = getAnchorValue().getContext(); 5132 SimplifiedValue = 5133 ConstantInt::get(Type::getInt32Ty(Ctx), CurrentAttrValue); 5134 } 5135 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 5136 : ChangeStatus::CHANGED; 5137 } 5138 5139 /// An optional value the associated value is assumed to fold to. That is, we 5140 /// assume the associated value (which is a call) can be replaced by this 5141 /// simplified value. 5142 std::optional<Value *> SimplifiedValue; 5143 5144 /// The runtime function kind of the callee of the associated call site. 5145 RuntimeFunction RFKind; 5146 }; 5147 5148 } // namespace 5149 5150 /// Register folding callsite 5151 void OpenMPOpt::registerFoldRuntimeCall(RuntimeFunction RF) { 5152 auto &RFI = OMPInfoCache.RFIs[RF]; 5153 RFI.foreachUse(SCC, [&](Use &U, Function &F) { 5154 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &RFI); 5155 if (!CI) 5156 return false; 5157 A.getOrCreateAAFor<AAFoldRuntimeCall>( 5158 IRPosition::callsite_returned(*CI), /* QueryingAA */ nullptr, 5159 DepClassTy::NONE, /* ForceUpdate */ false, 5160 /* UpdateAfterInit */ false); 5161 return false; 5162 }); 5163 } 5164 5165 void OpenMPOpt::registerAAs(bool IsModulePass) { 5166 if (SCC.empty()) 5167 return; 5168 5169 if (IsModulePass) { 5170 // Ensure we create the AAKernelInfo AAs first and without triggering an 5171 // update. This will make sure we register all value simplification 5172 // callbacks before any other AA has the chance to create an AAValueSimplify 5173 // or similar. 5174 auto CreateKernelInfoCB = [&](Use &, Function &Kernel) { 5175 A.getOrCreateAAFor<AAKernelInfo>( 5176 IRPosition::function(Kernel), /* QueryingAA */ nullptr, 5177 DepClassTy::NONE, /* ForceUpdate */ false, 5178 /* UpdateAfterInit */ false); 5179 return false; 5180 }; 5181 OMPInformationCache::RuntimeFunctionInfo &InitRFI = 5182 OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 5183 InitRFI.foreachUse(SCC, CreateKernelInfoCB); 5184 5185 registerFoldRuntimeCall(OMPRTL___kmpc_is_spmd_exec_mode); 5186 registerFoldRuntimeCall(OMPRTL___kmpc_parallel_level); 5187 registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_threads_in_block); 5188 registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_blocks); 5189 } 5190 5191 // Create CallSite AA for all Getters. 5192 if (DeduceICVValues) { 5193 for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) { 5194 auto ICVInfo = OMPInfoCache.ICVs[static_cast<InternalControlVar>(Idx)]; 5195 5196 auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter]; 5197 5198 auto CreateAA = [&](Use &U, Function &Caller) { 5199 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI); 5200 if (!CI) 5201 return false; 5202 5203 auto &CB = cast<CallBase>(*CI); 5204 5205 IRPosition CBPos = IRPosition::callsite_function(CB); 5206 A.getOrCreateAAFor<AAICVTracker>(CBPos); 5207 return false; 5208 }; 5209 5210 GetterRFI.foreachUse(SCC, CreateAA); 5211 } 5212 } 5213 5214 // Create an ExecutionDomain AA for every function and a HeapToStack AA for 5215 // every function if there is a device kernel. 5216 if (!isOpenMPDevice(M)) 5217 return; 5218 5219 for (auto *F : SCC) { 5220 if (F->isDeclaration()) 5221 continue; 5222 5223 // We look at internal functions only on-demand but if any use is not a 5224 // direct call or outside the current set of analyzed functions, we have 5225 // to do it eagerly. 5226 if (F->hasLocalLinkage()) { 5227 if (llvm::all_of(F->uses(), [this](const Use &U) { 5228 const auto *CB = dyn_cast<CallBase>(U.getUser()); 5229 return CB && CB->isCallee(&U) && 5230 A.isRunOn(const_cast<Function *>(CB->getCaller())); 5231 })) 5232 continue; 5233 } 5234 registerAAsForFunction(A, *F); 5235 } 5236 } 5237 5238 void OpenMPOpt::registerAAsForFunction(Attributor &A, const Function &F) { 5239 if (!DisableOpenMPOptDeglobalization) 5240 A.getOrCreateAAFor<AAHeapToShared>(IRPosition::function(F)); 5241 A.getOrCreateAAFor<AAExecutionDomain>(IRPosition::function(F)); 5242 if (!DisableOpenMPOptDeglobalization) 5243 A.getOrCreateAAFor<AAHeapToStack>(IRPosition::function(F)); 5244 if (F.hasFnAttribute(Attribute::Convergent)) 5245 A.getOrCreateAAFor<AANonConvergent>(IRPosition::function(F)); 5246 5247 for (auto &I : instructions(F)) { 5248 if (auto *LI = dyn_cast<LoadInst>(&I)) { 5249 bool UsedAssumedInformation = false; 5250 A.getAssumedSimplified(IRPosition::value(*LI), /* AA */ nullptr, 5251 UsedAssumedInformation, AA::Interprocedural); 5252 continue; 5253 } 5254 if (auto *SI = dyn_cast<StoreInst>(&I)) { 5255 A.getOrCreateAAFor<AAIsDead>(IRPosition::value(*SI)); 5256 continue; 5257 } 5258 if (auto *FI = dyn_cast<FenceInst>(&I)) { 5259 A.getOrCreateAAFor<AAIsDead>(IRPosition::value(*FI)); 5260 continue; 5261 } 5262 if (auto *II = dyn_cast<IntrinsicInst>(&I)) { 5263 if (II->getIntrinsicID() == Intrinsic::assume) { 5264 A.getOrCreateAAFor<AAPotentialValues>( 5265 IRPosition::value(*II->getArgOperand(0))); 5266 continue; 5267 } 5268 } 5269 } 5270 } 5271 5272 const char AAICVTracker::ID = 0; 5273 const char AAKernelInfo::ID = 0; 5274 const char AAExecutionDomain::ID = 0; 5275 const char AAHeapToShared::ID = 0; 5276 const char AAFoldRuntimeCall::ID = 0; 5277 5278 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP, 5279 Attributor &A) { 5280 AAICVTracker *AA = nullptr; 5281 switch (IRP.getPositionKind()) { 5282 case IRPosition::IRP_INVALID: 5283 case IRPosition::IRP_FLOAT: 5284 case IRPosition::IRP_ARGUMENT: 5285 case IRPosition::IRP_CALL_SITE_ARGUMENT: 5286 llvm_unreachable("ICVTracker can only be created for function position!"); 5287 case IRPosition::IRP_RETURNED: 5288 AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A); 5289 break; 5290 case IRPosition::IRP_CALL_SITE_RETURNED: 5291 AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A); 5292 break; 5293 case IRPosition::IRP_CALL_SITE: 5294 AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A); 5295 break; 5296 case IRPosition::IRP_FUNCTION: 5297 AA = new (A.Allocator) AAICVTrackerFunction(IRP, A); 5298 break; 5299 } 5300 5301 return *AA; 5302 } 5303 5304 AAExecutionDomain &AAExecutionDomain::createForPosition(const IRPosition &IRP, 5305 Attributor &A) { 5306 AAExecutionDomainFunction *AA = nullptr; 5307 switch (IRP.getPositionKind()) { 5308 case IRPosition::IRP_INVALID: 5309 case IRPosition::IRP_FLOAT: 5310 case IRPosition::IRP_ARGUMENT: 5311 case IRPosition::IRP_CALL_SITE_ARGUMENT: 5312 case IRPosition::IRP_RETURNED: 5313 case IRPosition::IRP_CALL_SITE_RETURNED: 5314 case IRPosition::IRP_CALL_SITE: 5315 llvm_unreachable( 5316 "AAExecutionDomain can only be created for function position!"); 5317 case IRPosition::IRP_FUNCTION: 5318 AA = new (A.Allocator) AAExecutionDomainFunction(IRP, A); 5319 break; 5320 } 5321 5322 return *AA; 5323 } 5324 5325 AAHeapToShared &AAHeapToShared::createForPosition(const IRPosition &IRP, 5326 Attributor &A) { 5327 AAHeapToSharedFunction *AA = nullptr; 5328 switch (IRP.getPositionKind()) { 5329 case IRPosition::IRP_INVALID: 5330 case IRPosition::IRP_FLOAT: 5331 case IRPosition::IRP_ARGUMENT: 5332 case IRPosition::IRP_CALL_SITE_ARGUMENT: 5333 case IRPosition::IRP_RETURNED: 5334 case IRPosition::IRP_CALL_SITE_RETURNED: 5335 case IRPosition::IRP_CALL_SITE: 5336 llvm_unreachable( 5337 "AAHeapToShared can only be created for function position!"); 5338 case IRPosition::IRP_FUNCTION: 5339 AA = new (A.Allocator) AAHeapToSharedFunction(IRP, A); 5340 break; 5341 } 5342 5343 return *AA; 5344 } 5345 5346 AAKernelInfo &AAKernelInfo::createForPosition(const IRPosition &IRP, 5347 Attributor &A) { 5348 AAKernelInfo *AA = nullptr; 5349 switch (IRP.getPositionKind()) { 5350 case IRPosition::IRP_INVALID: 5351 case IRPosition::IRP_FLOAT: 5352 case IRPosition::IRP_ARGUMENT: 5353 case IRPosition::IRP_RETURNED: 5354 case IRPosition::IRP_CALL_SITE_RETURNED: 5355 case IRPosition::IRP_CALL_SITE_ARGUMENT: 5356 llvm_unreachable("KernelInfo can only be created for function position!"); 5357 case IRPosition::IRP_CALL_SITE: 5358 AA = new (A.Allocator) AAKernelInfoCallSite(IRP, A); 5359 break; 5360 case IRPosition::IRP_FUNCTION: 5361 AA = new (A.Allocator) AAKernelInfoFunction(IRP, A); 5362 break; 5363 } 5364 5365 return *AA; 5366 } 5367 5368 AAFoldRuntimeCall &AAFoldRuntimeCall::createForPosition(const IRPosition &IRP, 5369 Attributor &A) { 5370 AAFoldRuntimeCall *AA = nullptr; 5371 switch (IRP.getPositionKind()) { 5372 case IRPosition::IRP_INVALID: 5373 case IRPosition::IRP_FLOAT: 5374 case IRPosition::IRP_ARGUMENT: 5375 case IRPosition::IRP_RETURNED: 5376 case IRPosition::IRP_FUNCTION: 5377 case IRPosition::IRP_CALL_SITE: 5378 case IRPosition::IRP_CALL_SITE_ARGUMENT: 5379 llvm_unreachable("KernelInfo can only be created for call site position!"); 5380 case IRPosition::IRP_CALL_SITE_RETURNED: 5381 AA = new (A.Allocator) AAFoldRuntimeCallCallSiteReturned(IRP, A); 5382 break; 5383 } 5384 5385 return *AA; 5386 } 5387 5388 PreservedAnalyses OpenMPOptPass::run(Module &M, ModuleAnalysisManager &AM) { 5389 if (!containsOpenMP(M)) 5390 return PreservedAnalyses::all(); 5391 if (DisableOpenMPOptimizations) 5392 return PreservedAnalyses::all(); 5393 5394 FunctionAnalysisManager &FAM = 5395 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 5396 KernelSet Kernels = getDeviceKernels(M); 5397 5398 if (PrintModuleBeforeOptimizations) 5399 LLVM_DEBUG(dbgs() << TAG << "Module before OpenMPOpt Module Pass:\n" << M); 5400 5401 auto IsCalled = [&](Function &F) { 5402 if (Kernels.contains(&F)) 5403 return true; 5404 for (const User *U : F.users()) 5405 if (!isa<BlockAddress>(U)) 5406 return true; 5407 return false; 5408 }; 5409 5410 auto EmitRemark = [&](Function &F) { 5411 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 5412 ORE.emit([&]() { 5413 OptimizationRemarkAnalysis ORA(DEBUG_TYPE, "OMP140", &F); 5414 return ORA << "Could not internalize function. " 5415 << "Some optimizations may not be possible. [OMP140]"; 5416 }); 5417 }; 5418 5419 bool Changed = false; 5420 5421 // Create internal copies of each function if this is a kernel Module. This 5422 // allows iterprocedural passes to see every call edge. 5423 DenseMap<Function *, Function *> InternalizedMap; 5424 if (isOpenMPDevice(M)) { 5425 SmallPtrSet<Function *, 16> InternalizeFns; 5426 for (Function &F : M) 5427 if (!F.isDeclaration() && !Kernels.contains(&F) && IsCalled(F) && 5428 !DisableInternalization) { 5429 if (Attributor::isInternalizable(F)) { 5430 InternalizeFns.insert(&F); 5431 } else if (!F.hasLocalLinkage() && !F.hasFnAttribute(Attribute::Cold)) { 5432 EmitRemark(F); 5433 } 5434 } 5435 5436 Changed |= 5437 Attributor::internalizeFunctions(InternalizeFns, InternalizedMap); 5438 } 5439 5440 // Look at every function in the Module unless it was internalized. 5441 SetVector<Function *> Functions; 5442 SmallVector<Function *, 16> SCC; 5443 for (Function &F : M) 5444 if (!F.isDeclaration() && !InternalizedMap.lookup(&F)) { 5445 SCC.push_back(&F); 5446 Functions.insert(&F); 5447 } 5448 5449 if (SCC.empty()) 5450 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all(); 5451 5452 AnalysisGetter AG(FAM); 5453 5454 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 5455 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 5456 }; 5457 5458 BumpPtrAllocator Allocator; 5459 CallGraphUpdater CGUpdater; 5460 5461 bool PostLink = LTOPhase == ThinOrFullLTOPhase::FullLTOPostLink || 5462 LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink; 5463 OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ nullptr, PostLink); 5464 5465 unsigned MaxFixpointIterations = 5466 (isOpenMPDevice(M)) ? SetFixpointIterations : 32; 5467 5468 AttributorConfig AC(CGUpdater); 5469 AC.DefaultInitializeLiveInternals = false; 5470 AC.IsModulePass = true; 5471 AC.RewriteSignatures = false; 5472 AC.MaxFixpointIterations = MaxFixpointIterations; 5473 AC.OREGetter = OREGetter; 5474 AC.PassName = DEBUG_TYPE; 5475 AC.InitializationCallback = OpenMPOpt::registerAAsForFunction; 5476 AC.IPOAmendableCB = [](const Function &F) { 5477 return F.hasFnAttribute("kernel"); 5478 }; 5479 5480 Attributor A(Functions, InfoCache, AC); 5481 5482 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 5483 Changed |= OMPOpt.run(true); 5484 5485 // Optionally inline device functions for potentially better performance. 5486 if (AlwaysInlineDeviceFunctions && isOpenMPDevice(M)) 5487 for (Function &F : M) 5488 if (!F.isDeclaration() && !Kernels.contains(&F) && 5489 !F.hasFnAttribute(Attribute::NoInline)) 5490 F.addFnAttr(Attribute::AlwaysInline); 5491 5492 if (PrintModuleAfterOptimizations) 5493 LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt Module Pass:\n" << M); 5494 5495 if (Changed) 5496 return PreservedAnalyses::none(); 5497 5498 return PreservedAnalyses::all(); 5499 } 5500 5501 PreservedAnalyses OpenMPOptCGSCCPass::run(LazyCallGraph::SCC &C, 5502 CGSCCAnalysisManager &AM, 5503 LazyCallGraph &CG, 5504 CGSCCUpdateResult &UR) { 5505 if (!containsOpenMP(*C.begin()->getFunction().getParent())) 5506 return PreservedAnalyses::all(); 5507 if (DisableOpenMPOptimizations) 5508 return PreservedAnalyses::all(); 5509 5510 SmallVector<Function *, 16> SCC; 5511 // If there are kernels in the module, we have to run on all SCC's. 5512 for (LazyCallGraph::Node &N : C) { 5513 Function *Fn = &N.getFunction(); 5514 SCC.push_back(Fn); 5515 } 5516 5517 if (SCC.empty()) 5518 return PreservedAnalyses::all(); 5519 5520 Module &M = *C.begin()->getFunction().getParent(); 5521 5522 if (PrintModuleBeforeOptimizations) 5523 LLVM_DEBUG(dbgs() << TAG << "Module before OpenMPOpt CGSCC Pass:\n" << M); 5524 5525 KernelSet Kernels = getDeviceKernels(M); 5526 5527 FunctionAnalysisManager &FAM = 5528 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 5529 5530 AnalysisGetter AG(FAM); 5531 5532 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 5533 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 5534 }; 5535 5536 BumpPtrAllocator Allocator; 5537 CallGraphUpdater CGUpdater; 5538 CGUpdater.initialize(CG, C, AM, UR); 5539 5540 bool PostLink = LTOPhase == ThinOrFullLTOPhase::FullLTOPostLink || 5541 LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink; 5542 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 5543 OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator, 5544 /*CGSCC*/ &Functions, PostLink); 5545 5546 unsigned MaxFixpointIterations = 5547 (isOpenMPDevice(M)) ? SetFixpointIterations : 32; 5548 5549 AttributorConfig AC(CGUpdater); 5550 AC.DefaultInitializeLiveInternals = false; 5551 AC.IsModulePass = false; 5552 AC.RewriteSignatures = false; 5553 AC.MaxFixpointIterations = MaxFixpointIterations; 5554 AC.OREGetter = OREGetter; 5555 AC.PassName = DEBUG_TYPE; 5556 AC.InitializationCallback = OpenMPOpt::registerAAsForFunction; 5557 5558 Attributor A(Functions, InfoCache, AC); 5559 5560 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 5561 bool Changed = OMPOpt.run(false); 5562 5563 if (PrintModuleAfterOptimizations) 5564 LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt CGSCC Pass:\n" << M); 5565 5566 if (Changed) 5567 return PreservedAnalyses::none(); 5568 5569 return PreservedAnalyses::all(); 5570 } 5571 5572 bool llvm::omp::isKernel(Function &Fn) { return Fn.hasFnAttribute("kernel"); } 5573 5574 KernelSet llvm::omp::getDeviceKernels(Module &M) { 5575 // TODO: Create a more cross-platform way of determining device kernels. 5576 NamedMDNode *MD = M.getNamedMetadata("nvvm.annotations"); 5577 KernelSet Kernels; 5578 5579 if (!MD) 5580 return Kernels; 5581 5582 for (auto *Op : MD->operands()) { 5583 if (Op->getNumOperands() < 2) 5584 continue; 5585 MDString *KindID = dyn_cast<MDString>(Op->getOperand(1)); 5586 if (!KindID || KindID->getString() != "kernel") 5587 continue; 5588 5589 Function *KernelFn = 5590 mdconst::dyn_extract_or_null<Function>(Op->getOperand(0)); 5591 if (!KernelFn) 5592 continue; 5593 5594 assert(isKernel(*KernelFn) && "Inconsistent kernel function annotation"); 5595 ++NumOpenMPTargetRegionKernels; 5596 5597 Kernels.insert(KernelFn); 5598 } 5599 5600 return Kernels; 5601 } 5602 5603 bool llvm::omp::containsOpenMP(Module &M) { 5604 Metadata *MD = M.getModuleFlag("openmp"); 5605 if (!MD) 5606 return false; 5607 5608 return true; 5609 } 5610 5611 bool llvm::omp::isOpenMPDevice(Module &M) { 5612 Metadata *MD = M.getModuleFlag("openmp-device"); 5613 if (!MD) 5614 return false; 5615 5616 return true; 5617 } 5618