1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // OpenMP specific optimizations: 10 // 11 // - Deduplication of runtime calls, e.g., omp_get_thread_num. 12 // - Replacing globalized device memory with stack memory. 13 // - Replacing globalized device memory with shared memory. 14 // - Parallel region merging. 15 // - Transforming generic-mode device kernels to SPMD mode. 16 // - Specializing the state machine for generic-mode device kernels. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "llvm/Transforms/IPO/OpenMPOpt.h" 21 22 #include "llvm/ADT/EnumeratedArray.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/SetVector.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/ADT/StringRef.h" 28 #include "llvm/Analysis/CallGraph.h" 29 #include "llvm/Analysis/CallGraphSCCPass.h" 30 #include "llvm/Analysis/MemoryLocation.h" 31 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/Frontend/OpenMP/OMPConstants.h" 34 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 35 #include "llvm/IR/Assumptions.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DiagnosticInfo.h" 39 #include "llvm/IR/GlobalValue.h" 40 #include "llvm/IR/GlobalVariable.h" 41 #include "llvm/IR/Instruction.h" 42 #include "llvm/IR/Instructions.h" 43 #include "llvm/IR/IntrinsicInst.h" 44 #include "llvm/IR/IntrinsicsAMDGPU.h" 45 #include "llvm/IR/IntrinsicsNVPTX.h" 46 #include "llvm/IR/LLVMContext.h" 47 #include "llvm/InitializePasses.h" 48 #include "llvm/Support/CommandLine.h" 49 #include "llvm/Support/Debug.h" 50 #include "llvm/Transforms/IPO/Attributor.h" 51 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 52 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 53 54 #include <algorithm> 55 #include <optional> 56 #include <string> 57 58 using namespace llvm; 59 using namespace omp; 60 61 #define DEBUG_TYPE "openmp-opt" 62 63 static cl::opt<bool> DisableOpenMPOptimizations( 64 "openmp-opt-disable", cl::desc("Disable OpenMP specific optimizations."), 65 cl::Hidden, cl::init(false)); 66 67 static cl::opt<bool> EnableParallelRegionMerging( 68 "openmp-opt-enable-merging", 69 cl::desc("Enable the OpenMP region merging optimization."), cl::Hidden, 70 cl::init(false)); 71 72 static cl::opt<bool> 73 DisableInternalization("openmp-opt-disable-internalization", 74 cl::desc("Disable function internalization."), 75 cl::Hidden, cl::init(false)); 76 77 static cl::opt<bool> DeduceICVValues("openmp-deduce-icv-values", 78 cl::init(false), cl::Hidden); 79 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false), 80 cl::Hidden); 81 static cl::opt<bool> PrintOpenMPKernels("openmp-print-gpu-kernels", 82 cl::init(false), cl::Hidden); 83 84 static cl::opt<bool> HideMemoryTransferLatency( 85 "openmp-hide-memory-transfer-latency", 86 cl::desc("[WIP] Tries to hide the latency of host to device memory" 87 " transfers"), 88 cl::Hidden, cl::init(false)); 89 90 static cl::opt<bool> DisableOpenMPOptDeglobalization( 91 "openmp-opt-disable-deglobalization", 92 cl::desc("Disable OpenMP optimizations involving deglobalization."), 93 cl::Hidden, cl::init(false)); 94 95 static cl::opt<bool> DisableOpenMPOptSPMDization( 96 "openmp-opt-disable-spmdization", 97 cl::desc("Disable OpenMP optimizations involving SPMD-ization."), 98 cl::Hidden, cl::init(false)); 99 100 static cl::opt<bool> DisableOpenMPOptFolding( 101 "openmp-opt-disable-folding", 102 cl::desc("Disable OpenMP optimizations involving folding."), cl::Hidden, 103 cl::init(false)); 104 105 static cl::opt<bool> DisableOpenMPOptStateMachineRewrite( 106 "openmp-opt-disable-state-machine-rewrite", 107 cl::desc("Disable OpenMP optimizations that replace the state machine."), 108 cl::Hidden, cl::init(false)); 109 110 static cl::opt<bool> DisableOpenMPOptBarrierElimination( 111 "openmp-opt-disable-barrier-elimination", 112 cl::desc("Disable OpenMP optimizations that eliminate barriers."), 113 cl::Hidden, cl::init(false)); 114 115 static cl::opt<bool> PrintModuleAfterOptimizations( 116 "openmp-opt-print-module-after", 117 cl::desc("Print the current module after OpenMP optimizations."), 118 cl::Hidden, cl::init(false)); 119 120 static cl::opt<bool> PrintModuleBeforeOptimizations( 121 "openmp-opt-print-module-before", 122 cl::desc("Print the current module before OpenMP optimizations."), 123 cl::Hidden, cl::init(false)); 124 125 static cl::opt<bool> AlwaysInlineDeviceFunctions( 126 "openmp-opt-inline-device", 127 cl::desc("Inline all applicible functions on the device."), cl::Hidden, 128 cl::init(false)); 129 130 static cl::opt<bool> 131 EnableVerboseRemarks("openmp-opt-verbose-remarks", 132 cl::desc("Enables more verbose remarks."), cl::Hidden, 133 cl::init(false)); 134 135 static cl::opt<unsigned> 136 SetFixpointIterations("openmp-opt-max-iterations", cl::Hidden, 137 cl::desc("Maximal number of attributor iterations."), 138 cl::init(256)); 139 140 static cl::opt<unsigned> 141 SharedMemoryLimit("openmp-opt-shared-limit", cl::Hidden, 142 cl::desc("Maximum amount of shared memory to use."), 143 cl::init(std::numeric_limits<unsigned>::max())); 144 145 STATISTIC(NumOpenMPRuntimeCallsDeduplicated, 146 "Number of OpenMP runtime calls deduplicated"); 147 STATISTIC(NumOpenMPParallelRegionsDeleted, 148 "Number of OpenMP parallel regions deleted"); 149 STATISTIC(NumOpenMPRuntimeFunctionsIdentified, 150 "Number of OpenMP runtime functions identified"); 151 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified, 152 "Number of OpenMP runtime function uses identified"); 153 STATISTIC(NumOpenMPTargetRegionKernels, 154 "Number of OpenMP target region entry points (=kernels) identified"); 155 STATISTIC(NumOpenMPTargetRegionKernelsSPMD, 156 "Number of OpenMP target region entry points (=kernels) executed in " 157 "SPMD-mode instead of generic-mode"); 158 STATISTIC(NumOpenMPTargetRegionKernelsWithoutStateMachine, 159 "Number of OpenMP target region entry points (=kernels) executed in " 160 "generic-mode without a state machines"); 161 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback, 162 "Number of OpenMP target region entry points (=kernels) executed in " 163 "generic-mode with customized state machines with fallback"); 164 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback, 165 "Number of OpenMP target region entry points (=kernels) executed in " 166 "generic-mode with customized state machines without fallback"); 167 STATISTIC( 168 NumOpenMPParallelRegionsReplacedInGPUStateMachine, 169 "Number of OpenMP parallel regions replaced with ID in GPU state machines"); 170 STATISTIC(NumOpenMPParallelRegionsMerged, 171 "Number of OpenMP parallel regions merged"); 172 STATISTIC(NumBytesMovedToSharedMemory, 173 "Amount of memory pushed to shared memory"); 174 STATISTIC(NumBarriersEliminated, "Number of redundant barriers eliminated"); 175 176 #if !defined(NDEBUG) 177 static constexpr auto TAG = "[" DEBUG_TYPE "]"; 178 #endif 179 180 namespace { 181 182 struct AAHeapToShared; 183 184 struct AAICVTracker; 185 186 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for 187 /// Attributor runs. 188 struct OMPInformationCache : public InformationCache { 189 OMPInformationCache(Module &M, AnalysisGetter &AG, 190 BumpPtrAllocator &Allocator, SetVector<Function *> *CGSCC, 191 KernelSet &Kernels, bool OpenMPPostLink) 192 : InformationCache(M, AG, Allocator, CGSCC), OMPBuilder(M), 193 Kernels(Kernels), OpenMPPostLink(OpenMPPostLink) { 194 195 OMPBuilder.initialize(); 196 initializeRuntimeFunctions(M); 197 initializeInternalControlVars(); 198 } 199 200 /// Generic information that describes an internal control variable. 201 struct InternalControlVarInfo { 202 /// The kind, as described by InternalControlVar enum. 203 InternalControlVar Kind; 204 205 /// The name of the ICV. 206 StringRef Name; 207 208 /// Environment variable associated with this ICV. 209 StringRef EnvVarName; 210 211 /// Initial value kind. 212 ICVInitValue InitKind; 213 214 /// Initial value. 215 ConstantInt *InitValue; 216 217 /// Setter RTL function associated with this ICV. 218 RuntimeFunction Setter; 219 220 /// Getter RTL function associated with this ICV. 221 RuntimeFunction Getter; 222 223 /// RTL Function corresponding to the override clause of this ICV 224 RuntimeFunction Clause; 225 }; 226 227 /// Generic information that describes a runtime function 228 struct RuntimeFunctionInfo { 229 230 /// The kind, as described by the RuntimeFunction enum. 231 RuntimeFunction Kind; 232 233 /// The name of the function. 234 StringRef Name; 235 236 /// Flag to indicate a variadic function. 237 bool IsVarArg; 238 239 /// The return type of the function. 240 Type *ReturnType; 241 242 /// The argument types of the function. 243 SmallVector<Type *, 8> ArgumentTypes; 244 245 /// The declaration if available. 246 Function *Declaration = nullptr; 247 248 /// Uses of this runtime function per function containing the use. 249 using UseVector = SmallVector<Use *, 16>; 250 251 /// Clear UsesMap for runtime function. 252 void clearUsesMap() { UsesMap.clear(); } 253 254 /// Boolean conversion that is true if the runtime function was found. 255 operator bool() const { return Declaration; } 256 257 /// Return the vector of uses in function \p F. 258 UseVector &getOrCreateUseVector(Function *F) { 259 std::shared_ptr<UseVector> &UV = UsesMap[F]; 260 if (!UV) 261 UV = std::make_shared<UseVector>(); 262 return *UV; 263 } 264 265 /// Return the vector of uses in function \p F or `nullptr` if there are 266 /// none. 267 const UseVector *getUseVector(Function &F) const { 268 auto I = UsesMap.find(&F); 269 if (I != UsesMap.end()) 270 return I->second.get(); 271 return nullptr; 272 } 273 274 /// Return how many functions contain uses of this runtime function. 275 size_t getNumFunctionsWithUses() const { return UsesMap.size(); } 276 277 /// Return the number of arguments (or the minimal number for variadic 278 /// functions). 279 size_t getNumArgs() const { return ArgumentTypes.size(); } 280 281 /// Run the callback \p CB on each use and forget the use if the result is 282 /// true. The callback will be fed the function in which the use was 283 /// encountered as second argument. 284 void foreachUse(SmallVectorImpl<Function *> &SCC, 285 function_ref<bool(Use &, Function &)> CB) { 286 for (Function *F : SCC) 287 foreachUse(CB, F); 288 } 289 290 /// Run the callback \p CB on each use within the function \p F and forget 291 /// the use if the result is true. 292 void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F) { 293 SmallVector<unsigned, 8> ToBeDeleted; 294 ToBeDeleted.clear(); 295 296 unsigned Idx = 0; 297 UseVector &UV = getOrCreateUseVector(F); 298 299 for (Use *U : UV) { 300 if (CB(*U, *F)) 301 ToBeDeleted.push_back(Idx); 302 ++Idx; 303 } 304 305 // Remove the to-be-deleted indices in reverse order as prior 306 // modifications will not modify the smaller indices. 307 while (!ToBeDeleted.empty()) { 308 unsigned Idx = ToBeDeleted.pop_back_val(); 309 UV[Idx] = UV.back(); 310 UV.pop_back(); 311 } 312 } 313 314 private: 315 /// Map from functions to all uses of this runtime function contained in 316 /// them. 317 DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap; 318 319 public: 320 /// Iterators for the uses of this runtime function. 321 decltype(UsesMap)::iterator begin() { return UsesMap.begin(); } 322 decltype(UsesMap)::iterator end() { return UsesMap.end(); } 323 }; 324 325 /// An OpenMP-IR-Builder instance 326 OpenMPIRBuilder OMPBuilder; 327 328 /// Map from runtime function kind to the runtime function description. 329 EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction, 330 RuntimeFunction::OMPRTL___last> 331 RFIs; 332 333 /// Map from function declarations/definitions to their runtime enum type. 334 DenseMap<Function *, RuntimeFunction> RuntimeFunctionIDMap; 335 336 /// Map from ICV kind to the ICV description. 337 EnumeratedArray<InternalControlVarInfo, InternalControlVar, 338 InternalControlVar::ICV___last> 339 ICVs; 340 341 /// Helper to initialize all internal control variable information for those 342 /// defined in OMPKinds.def. 343 void initializeInternalControlVars() { 344 #define ICV_RT_SET(_Name, RTL) \ 345 { \ 346 auto &ICV = ICVs[_Name]; \ 347 ICV.Setter = RTL; \ 348 } 349 #define ICV_RT_GET(Name, RTL) \ 350 { \ 351 auto &ICV = ICVs[Name]; \ 352 ICV.Getter = RTL; \ 353 } 354 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init) \ 355 { \ 356 auto &ICV = ICVs[Enum]; \ 357 ICV.Name = _Name; \ 358 ICV.Kind = Enum; \ 359 ICV.InitKind = Init; \ 360 ICV.EnvVarName = _EnvVarName; \ 361 switch (ICV.InitKind) { \ 362 case ICV_IMPLEMENTATION_DEFINED: \ 363 ICV.InitValue = nullptr; \ 364 break; \ 365 case ICV_ZERO: \ 366 ICV.InitValue = ConstantInt::get( \ 367 Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0); \ 368 break; \ 369 case ICV_FALSE: \ 370 ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext()); \ 371 break; \ 372 case ICV_LAST: \ 373 break; \ 374 } \ 375 } 376 #include "llvm/Frontend/OpenMP/OMPKinds.def" 377 } 378 379 /// Returns true if the function declaration \p F matches the runtime 380 /// function types, that is, return type \p RTFRetType, and argument types 381 /// \p RTFArgTypes. 382 static bool declMatchesRTFTypes(Function *F, Type *RTFRetType, 383 SmallVector<Type *, 8> &RTFArgTypes) { 384 // TODO: We should output information to the user (under debug output 385 // and via remarks). 386 387 if (!F) 388 return false; 389 if (F->getReturnType() != RTFRetType) 390 return false; 391 if (F->arg_size() != RTFArgTypes.size()) 392 return false; 393 394 auto *RTFTyIt = RTFArgTypes.begin(); 395 for (Argument &Arg : F->args()) { 396 if (Arg.getType() != *RTFTyIt) 397 return false; 398 399 ++RTFTyIt; 400 } 401 402 return true; 403 } 404 405 // Helper to collect all uses of the declaration in the UsesMap. 406 unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) { 407 unsigned NumUses = 0; 408 if (!RFI.Declaration) 409 return NumUses; 410 OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration); 411 412 if (CollectStats) { 413 NumOpenMPRuntimeFunctionsIdentified += 1; 414 NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses(); 415 } 416 417 // TODO: We directly convert uses into proper calls and unknown uses. 418 for (Use &U : RFI.Declaration->uses()) { 419 if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) { 420 if (ModuleSlice.empty() || ModuleSlice.count(UserI->getFunction())) { 421 RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U); 422 ++NumUses; 423 } 424 } else { 425 RFI.getOrCreateUseVector(nullptr).push_back(&U); 426 ++NumUses; 427 } 428 } 429 return NumUses; 430 } 431 432 // Helper function to recollect uses of a runtime function. 433 void recollectUsesForFunction(RuntimeFunction RTF) { 434 auto &RFI = RFIs[RTF]; 435 RFI.clearUsesMap(); 436 collectUses(RFI, /*CollectStats*/ false); 437 } 438 439 // Helper function to recollect uses of all runtime functions. 440 void recollectUses() { 441 for (int Idx = 0; Idx < RFIs.size(); ++Idx) 442 recollectUsesForFunction(static_cast<RuntimeFunction>(Idx)); 443 } 444 445 // Helper function to inherit the calling convention of the function callee. 446 void setCallingConvention(FunctionCallee Callee, CallInst *CI) { 447 if (Function *Fn = dyn_cast<Function>(Callee.getCallee())) 448 CI->setCallingConv(Fn->getCallingConv()); 449 } 450 451 // Helper function to determine if it's legal to create a call to the runtime 452 // functions. 453 bool runtimeFnsAvailable(ArrayRef<RuntimeFunction> Fns) { 454 // We can always emit calls if we haven't yet linked in the runtime. 455 if (!OpenMPPostLink) 456 return true; 457 458 // Once the runtime has been already been linked in we cannot emit calls to 459 // any undefined functions. 460 for (RuntimeFunction Fn : Fns) { 461 RuntimeFunctionInfo &RFI = RFIs[Fn]; 462 463 if (RFI.Declaration && RFI.Declaration->isDeclaration()) 464 return false; 465 } 466 return true; 467 } 468 469 /// Helper to initialize all runtime function information for those defined 470 /// in OpenMPKinds.def. 471 void initializeRuntimeFunctions(Module &M) { 472 473 // Helper macros for handling __VA_ARGS__ in OMP_RTL 474 #define OMP_TYPE(VarName, ...) \ 475 Type *VarName = OMPBuilder.VarName; \ 476 (void)VarName; 477 478 #define OMP_ARRAY_TYPE(VarName, ...) \ 479 ArrayType *VarName##Ty = OMPBuilder.VarName##Ty; \ 480 (void)VarName##Ty; \ 481 PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy; \ 482 (void)VarName##PtrTy; 483 484 #define OMP_FUNCTION_TYPE(VarName, ...) \ 485 FunctionType *VarName = OMPBuilder.VarName; \ 486 (void)VarName; \ 487 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 488 (void)VarName##Ptr; 489 490 #define OMP_STRUCT_TYPE(VarName, ...) \ 491 StructType *VarName = OMPBuilder.VarName; \ 492 (void)VarName; \ 493 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 494 (void)VarName##Ptr; 495 496 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...) \ 497 { \ 498 SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__}); \ 499 Function *F = M.getFunction(_Name); \ 500 RTLFunctions.insert(F); \ 501 if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) { \ 502 RuntimeFunctionIDMap[F] = _Enum; \ 503 auto &RFI = RFIs[_Enum]; \ 504 RFI.Kind = _Enum; \ 505 RFI.Name = _Name; \ 506 RFI.IsVarArg = _IsVarArg; \ 507 RFI.ReturnType = OMPBuilder._ReturnType; \ 508 RFI.ArgumentTypes = std::move(ArgsTypes); \ 509 RFI.Declaration = F; \ 510 unsigned NumUses = collectUses(RFI); \ 511 (void)NumUses; \ 512 LLVM_DEBUG({ \ 513 dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not") \ 514 << " found\n"; \ 515 if (RFI.Declaration) \ 516 dbgs() << TAG << "-> got " << NumUses << " uses in " \ 517 << RFI.getNumFunctionsWithUses() \ 518 << " different functions.\n"; \ 519 }); \ 520 } \ 521 } 522 #include "llvm/Frontend/OpenMP/OMPKinds.def" 523 524 // Remove the `noinline` attribute from `__kmpc`, `ompx::` and `omp_` 525 // functions, except if `optnone` is present. 526 if (isOpenMPDevice(M)) { 527 for (Function &F : M) { 528 for (StringRef Prefix : {"__kmpc", "_ZN4ompx", "omp_"}) 529 if (F.hasFnAttribute(Attribute::NoInline) && 530 F.getName().startswith(Prefix) && 531 !F.hasFnAttribute(Attribute::OptimizeNone)) 532 F.removeFnAttr(Attribute::NoInline); 533 } 534 } 535 536 // TODO: We should attach the attributes defined in OMPKinds.def. 537 } 538 539 /// Collection of known kernels (\see Kernel) in the module. 540 KernelSet &Kernels; 541 542 /// Collection of known OpenMP runtime functions.. 543 DenseSet<const Function *> RTLFunctions; 544 545 /// Indicates if we have already linked in the OpenMP device library. 546 bool OpenMPPostLink = false; 547 }; 548 549 template <typename Ty, bool InsertInvalidates = true> 550 struct BooleanStateWithSetVector : public BooleanState { 551 bool contains(const Ty &Elem) const { return Set.contains(Elem); } 552 bool insert(const Ty &Elem) { 553 if (InsertInvalidates) 554 BooleanState::indicatePessimisticFixpoint(); 555 return Set.insert(Elem); 556 } 557 558 const Ty &operator[](int Idx) const { return Set[Idx]; } 559 bool operator==(const BooleanStateWithSetVector &RHS) const { 560 return BooleanState::operator==(RHS) && Set == RHS.Set; 561 } 562 bool operator!=(const BooleanStateWithSetVector &RHS) const { 563 return !(*this == RHS); 564 } 565 566 bool empty() const { return Set.empty(); } 567 size_t size() const { return Set.size(); } 568 569 /// "Clamp" this state with \p RHS. 570 BooleanStateWithSetVector &operator^=(const BooleanStateWithSetVector &RHS) { 571 BooleanState::operator^=(RHS); 572 Set.insert(RHS.Set.begin(), RHS.Set.end()); 573 return *this; 574 } 575 576 private: 577 /// A set to keep track of elements. 578 SetVector<Ty> Set; 579 580 public: 581 typename decltype(Set)::iterator begin() { return Set.begin(); } 582 typename decltype(Set)::iterator end() { return Set.end(); } 583 typename decltype(Set)::const_iterator begin() const { return Set.begin(); } 584 typename decltype(Set)::const_iterator end() const { return Set.end(); } 585 }; 586 587 template <typename Ty, bool InsertInvalidates = true> 588 using BooleanStateWithPtrSetVector = 589 BooleanStateWithSetVector<Ty *, InsertInvalidates>; 590 591 struct KernelInfoState : AbstractState { 592 /// Flag to track if we reached a fixpoint. 593 bool IsAtFixpoint = false; 594 595 /// The parallel regions (identified by the outlined parallel functions) that 596 /// can be reached from the associated function. 597 BooleanStateWithPtrSetVector<Function, /* InsertInvalidates */ false> 598 ReachedKnownParallelRegions; 599 600 /// State to track what parallel region we might reach. 601 BooleanStateWithPtrSetVector<CallBase> ReachedUnknownParallelRegions; 602 603 /// State to track if we are in SPMD-mode, assumed or know, and why we decided 604 /// we cannot be. If it is assumed, then RequiresFullRuntime should also be 605 /// false. 606 BooleanStateWithPtrSetVector<Instruction, false> SPMDCompatibilityTracker; 607 608 /// The __kmpc_target_init call in this kernel, if any. If we find more than 609 /// one we abort as the kernel is malformed. 610 CallBase *KernelInitCB = nullptr; 611 612 /// The __kmpc_target_deinit call in this kernel, if any. If we find more than 613 /// one we abort as the kernel is malformed. 614 CallBase *KernelDeinitCB = nullptr; 615 616 /// Flag to indicate if the associated function is a kernel entry. 617 bool IsKernelEntry = false; 618 619 /// State to track what kernel entries can reach the associated function. 620 BooleanStateWithPtrSetVector<Function, false> ReachingKernelEntries; 621 622 /// State to indicate if we can track parallel level of the associated 623 /// function. We will give up tracking if we encounter unknown caller or the 624 /// caller is __kmpc_parallel_51. 625 BooleanStateWithSetVector<uint8_t> ParallelLevels; 626 627 /// Flag that indicates if the kernel has nested Parallelism 628 bool NestedParallelism = false; 629 630 /// Abstract State interface 631 ///{ 632 633 KernelInfoState() = default; 634 KernelInfoState(bool BestState) { 635 if (!BestState) 636 indicatePessimisticFixpoint(); 637 } 638 639 /// See AbstractState::isValidState(...) 640 bool isValidState() const override { return true; } 641 642 /// See AbstractState::isAtFixpoint(...) 643 bool isAtFixpoint() const override { return IsAtFixpoint; } 644 645 /// See AbstractState::indicatePessimisticFixpoint(...) 646 ChangeStatus indicatePessimisticFixpoint() override { 647 IsAtFixpoint = true; 648 ParallelLevels.indicatePessimisticFixpoint(); 649 ReachingKernelEntries.indicatePessimisticFixpoint(); 650 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 651 ReachedKnownParallelRegions.indicatePessimisticFixpoint(); 652 ReachedUnknownParallelRegions.indicatePessimisticFixpoint(); 653 return ChangeStatus::CHANGED; 654 } 655 656 /// See AbstractState::indicateOptimisticFixpoint(...) 657 ChangeStatus indicateOptimisticFixpoint() override { 658 IsAtFixpoint = true; 659 ParallelLevels.indicateOptimisticFixpoint(); 660 ReachingKernelEntries.indicateOptimisticFixpoint(); 661 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 662 ReachedKnownParallelRegions.indicateOptimisticFixpoint(); 663 ReachedUnknownParallelRegions.indicateOptimisticFixpoint(); 664 return ChangeStatus::UNCHANGED; 665 } 666 667 /// Return the assumed state 668 KernelInfoState &getAssumed() { return *this; } 669 const KernelInfoState &getAssumed() const { return *this; } 670 671 bool operator==(const KernelInfoState &RHS) const { 672 if (SPMDCompatibilityTracker != RHS.SPMDCompatibilityTracker) 673 return false; 674 if (ReachedKnownParallelRegions != RHS.ReachedKnownParallelRegions) 675 return false; 676 if (ReachedUnknownParallelRegions != RHS.ReachedUnknownParallelRegions) 677 return false; 678 if (ReachingKernelEntries != RHS.ReachingKernelEntries) 679 return false; 680 if (ParallelLevels != RHS.ParallelLevels) 681 return false; 682 return true; 683 } 684 685 /// Returns true if this kernel contains any OpenMP parallel regions. 686 bool mayContainParallelRegion() { 687 return !ReachedKnownParallelRegions.empty() || 688 !ReachedUnknownParallelRegions.empty(); 689 } 690 691 /// Return empty set as the best state of potential values. 692 static KernelInfoState getBestState() { return KernelInfoState(true); } 693 694 static KernelInfoState getBestState(KernelInfoState &KIS) { 695 return getBestState(); 696 } 697 698 /// Return full set as the worst state of potential values. 699 static KernelInfoState getWorstState() { return KernelInfoState(false); } 700 701 /// "Clamp" this state with \p KIS. 702 KernelInfoState operator^=(const KernelInfoState &KIS) { 703 // Do not merge two different _init and _deinit call sites. 704 if (KIS.KernelInitCB) { 705 if (KernelInitCB && KernelInitCB != KIS.KernelInitCB) 706 llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt " 707 "assumptions."); 708 KernelInitCB = KIS.KernelInitCB; 709 } 710 if (KIS.KernelDeinitCB) { 711 if (KernelDeinitCB && KernelDeinitCB != KIS.KernelDeinitCB) 712 llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt " 713 "assumptions."); 714 KernelDeinitCB = KIS.KernelDeinitCB; 715 } 716 SPMDCompatibilityTracker ^= KIS.SPMDCompatibilityTracker; 717 ReachedKnownParallelRegions ^= KIS.ReachedKnownParallelRegions; 718 ReachedUnknownParallelRegions ^= KIS.ReachedUnknownParallelRegions; 719 NestedParallelism |= KIS.NestedParallelism; 720 return *this; 721 } 722 723 KernelInfoState operator&=(const KernelInfoState &KIS) { 724 return (*this ^= KIS); 725 } 726 727 ///} 728 }; 729 730 /// Used to map the values physically (in the IR) stored in an offload 731 /// array, to a vector in memory. 732 struct OffloadArray { 733 /// Physical array (in the IR). 734 AllocaInst *Array = nullptr; 735 /// Mapped values. 736 SmallVector<Value *, 8> StoredValues; 737 /// Last stores made in the offload array. 738 SmallVector<StoreInst *, 8> LastAccesses; 739 740 OffloadArray() = default; 741 742 /// Initializes the OffloadArray with the values stored in \p Array before 743 /// instruction \p Before is reached. Returns false if the initialization 744 /// fails. 745 /// This MUST be used immediately after the construction of the object. 746 bool initialize(AllocaInst &Array, Instruction &Before) { 747 if (!Array.getAllocatedType()->isArrayTy()) 748 return false; 749 750 if (!getValues(Array, Before)) 751 return false; 752 753 this->Array = &Array; 754 return true; 755 } 756 757 static const unsigned DeviceIDArgNum = 1; 758 static const unsigned BasePtrsArgNum = 3; 759 static const unsigned PtrsArgNum = 4; 760 static const unsigned SizesArgNum = 5; 761 762 private: 763 /// Traverses the BasicBlock where \p Array is, collecting the stores made to 764 /// \p Array, leaving StoredValues with the values stored before the 765 /// instruction \p Before is reached. 766 bool getValues(AllocaInst &Array, Instruction &Before) { 767 // Initialize container. 768 const uint64_t NumValues = Array.getAllocatedType()->getArrayNumElements(); 769 StoredValues.assign(NumValues, nullptr); 770 LastAccesses.assign(NumValues, nullptr); 771 772 // TODO: This assumes the instruction \p Before is in the same 773 // BasicBlock as Array. Make it general, for any control flow graph. 774 BasicBlock *BB = Array.getParent(); 775 if (BB != Before.getParent()) 776 return false; 777 778 const DataLayout &DL = Array.getModule()->getDataLayout(); 779 const unsigned int PointerSize = DL.getPointerSize(); 780 781 for (Instruction &I : *BB) { 782 if (&I == &Before) 783 break; 784 785 if (!isa<StoreInst>(&I)) 786 continue; 787 788 auto *S = cast<StoreInst>(&I); 789 int64_t Offset = -1; 790 auto *Dst = 791 GetPointerBaseWithConstantOffset(S->getPointerOperand(), Offset, DL); 792 if (Dst == &Array) { 793 int64_t Idx = Offset / PointerSize; 794 StoredValues[Idx] = getUnderlyingObject(S->getValueOperand()); 795 LastAccesses[Idx] = S; 796 } 797 } 798 799 return isFilled(); 800 } 801 802 /// Returns true if all values in StoredValues and 803 /// LastAccesses are not nullptrs. 804 bool isFilled() { 805 const unsigned NumValues = StoredValues.size(); 806 for (unsigned I = 0; I < NumValues; ++I) { 807 if (!StoredValues[I] || !LastAccesses[I]) 808 return false; 809 } 810 811 return true; 812 } 813 }; 814 815 struct OpenMPOpt { 816 817 using OptimizationRemarkGetter = 818 function_ref<OptimizationRemarkEmitter &(Function *)>; 819 820 OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater, 821 OptimizationRemarkGetter OREGetter, 822 OMPInformationCache &OMPInfoCache, Attributor &A) 823 : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater), 824 OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {} 825 826 /// Check if any remarks are enabled for openmp-opt 827 bool remarksEnabled() { 828 auto &Ctx = M.getContext(); 829 return Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(DEBUG_TYPE); 830 } 831 832 /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice. 833 bool run(bool IsModulePass) { 834 if (SCC.empty()) 835 return false; 836 837 bool Changed = false; 838 839 LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size() 840 << " functions in a slice with " 841 << OMPInfoCache.ModuleSlice.size() << " functions\n"); 842 843 if (IsModulePass) { 844 Changed |= runAttributor(IsModulePass); 845 846 // Recollect uses, in case Attributor deleted any. 847 OMPInfoCache.recollectUses(); 848 849 // TODO: This should be folded into buildCustomStateMachine. 850 Changed |= rewriteDeviceCodeStateMachine(); 851 852 if (remarksEnabled()) 853 analysisGlobalization(); 854 } else { 855 if (PrintICVValues) 856 printICVs(); 857 if (PrintOpenMPKernels) 858 printKernels(); 859 860 Changed |= runAttributor(IsModulePass); 861 862 // Recollect uses, in case Attributor deleted any. 863 OMPInfoCache.recollectUses(); 864 865 Changed |= deleteParallelRegions(); 866 867 if (HideMemoryTransferLatency) 868 Changed |= hideMemTransfersLatency(); 869 Changed |= deduplicateRuntimeCalls(); 870 if (EnableParallelRegionMerging) { 871 if (mergeParallelRegions()) { 872 deduplicateRuntimeCalls(); 873 Changed = true; 874 } 875 } 876 } 877 878 return Changed; 879 } 880 881 /// Print initial ICV values for testing. 882 /// FIXME: This should be done from the Attributor once it is added. 883 void printICVs() const { 884 InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel, 885 ICV_proc_bind}; 886 887 for (Function *F : SCC) { 888 for (auto ICV : ICVs) { 889 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 890 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 891 return ORA << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name) 892 << " Value: " 893 << (ICVInfo.InitValue 894 ? toString(ICVInfo.InitValue->getValue(), 10, true) 895 : "IMPLEMENTATION_DEFINED"); 896 }; 897 898 emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPICVTracker", Remark); 899 } 900 } 901 } 902 903 /// Print OpenMP GPU kernels for testing. 904 void printKernels() const { 905 for (Function *F : SCC) { 906 if (!OMPInfoCache.Kernels.count(F)) 907 continue; 908 909 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 910 return ORA << "OpenMP GPU kernel " 911 << ore::NV("OpenMPGPUKernel", F->getName()) << "\n"; 912 }; 913 914 emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPGPU", Remark); 915 } 916 } 917 918 /// Return the call if \p U is a callee use in a regular call. If \p RFI is 919 /// given it has to be the callee or a nullptr is returned. 920 static CallInst *getCallIfRegularCall( 921 Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 922 CallInst *CI = dyn_cast<CallInst>(U.getUser()); 923 if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() && 924 (!RFI || 925 (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration))) 926 return CI; 927 return nullptr; 928 } 929 930 /// Return the call if \p V is a regular call. If \p RFI is given it has to be 931 /// the callee or a nullptr is returned. 932 static CallInst *getCallIfRegularCall( 933 Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 934 CallInst *CI = dyn_cast<CallInst>(&V); 935 if (CI && !CI->hasOperandBundles() && 936 (!RFI || 937 (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration))) 938 return CI; 939 return nullptr; 940 } 941 942 private: 943 /// Merge parallel regions when it is safe. 944 bool mergeParallelRegions() { 945 const unsigned CallbackCalleeOperand = 2; 946 const unsigned CallbackFirstArgOperand = 3; 947 using InsertPointTy = OpenMPIRBuilder::InsertPointTy; 948 949 // Check if there are any __kmpc_fork_call calls to merge. 950 OMPInformationCache::RuntimeFunctionInfo &RFI = 951 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 952 953 if (!RFI.Declaration) 954 return false; 955 956 // Unmergable calls that prevent merging a parallel region. 957 OMPInformationCache::RuntimeFunctionInfo UnmergableCallsInfo[] = { 958 OMPInfoCache.RFIs[OMPRTL___kmpc_push_proc_bind], 959 OMPInfoCache.RFIs[OMPRTL___kmpc_push_num_threads], 960 }; 961 962 bool Changed = false; 963 LoopInfo *LI = nullptr; 964 DominatorTree *DT = nullptr; 965 966 SmallDenseMap<BasicBlock *, SmallPtrSet<Instruction *, 4>> BB2PRMap; 967 968 BasicBlock *StartBB = nullptr, *EndBB = nullptr; 969 auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP) { 970 BasicBlock *CGStartBB = CodeGenIP.getBlock(); 971 BasicBlock *CGEndBB = 972 SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); 973 assert(StartBB != nullptr && "StartBB should not be null"); 974 CGStartBB->getTerminator()->setSuccessor(0, StartBB); 975 assert(EndBB != nullptr && "EndBB should not be null"); 976 EndBB->getTerminator()->setSuccessor(0, CGEndBB); 977 }; 978 979 auto PrivCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &, 980 Value &Inner, Value *&ReplacementValue) -> InsertPointTy { 981 ReplacementValue = &Inner; 982 return CodeGenIP; 983 }; 984 985 auto FiniCB = [&](InsertPointTy CodeGenIP) {}; 986 987 /// Create a sequential execution region within a merged parallel region, 988 /// encapsulated in a master construct with a barrier for synchronization. 989 auto CreateSequentialRegion = [&](Function *OuterFn, 990 BasicBlock *OuterPredBB, 991 Instruction *SeqStartI, 992 Instruction *SeqEndI) { 993 // Isolate the instructions of the sequential region to a separate 994 // block. 995 BasicBlock *ParentBB = SeqStartI->getParent(); 996 BasicBlock *SeqEndBB = 997 SplitBlock(ParentBB, SeqEndI->getNextNode(), DT, LI); 998 BasicBlock *SeqAfterBB = 999 SplitBlock(SeqEndBB, &*SeqEndBB->getFirstInsertionPt(), DT, LI); 1000 BasicBlock *SeqStartBB = 1001 SplitBlock(ParentBB, SeqStartI, DT, LI, nullptr, "seq.par.merged"); 1002 1003 assert(ParentBB->getUniqueSuccessor() == SeqStartBB && 1004 "Expected a different CFG"); 1005 const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc(); 1006 ParentBB->getTerminator()->eraseFromParent(); 1007 1008 auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP) { 1009 BasicBlock *CGStartBB = CodeGenIP.getBlock(); 1010 BasicBlock *CGEndBB = 1011 SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); 1012 assert(SeqStartBB != nullptr && "SeqStartBB should not be null"); 1013 CGStartBB->getTerminator()->setSuccessor(0, SeqStartBB); 1014 assert(SeqEndBB != nullptr && "SeqEndBB should not be null"); 1015 SeqEndBB->getTerminator()->setSuccessor(0, CGEndBB); 1016 }; 1017 auto FiniCB = [&](InsertPointTy CodeGenIP) {}; 1018 1019 // Find outputs from the sequential region to outside users and 1020 // broadcast their values to them. 1021 for (Instruction &I : *SeqStartBB) { 1022 SmallPtrSet<Instruction *, 4> OutsideUsers; 1023 for (User *Usr : I.users()) { 1024 Instruction &UsrI = *cast<Instruction>(Usr); 1025 // Ignore outputs to LT intrinsics, code extraction for the merged 1026 // parallel region will fix them. 1027 if (UsrI.isLifetimeStartOrEnd()) 1028 continue; 1029 1030 if (UsrI.getParent() != SeqStartBB) 1031 OutsideUsers.insert(&UsrI); 1032 } 1033 1034 if (OutsideUsers.empty()) 1035 continue; 1036 1037 // Emit an alloca in the outer region to store the broadcasted 1038 // value. 1039 const DataLayout &DL = M.getDataLayout(); 1040 AllocaInst *AllocaI = new AllocaInst( 1041 I.getType(), DL.getAllocaAddrSpace(), nullptr, 1042 I.getName() + ".seq.output.alloc", &OuterFn->front().front()); 1043 1044 // Emit a store instruction in the sequential BB to update the 1045 // value. 1046 new StoreInst(&I, AllocaI, SeqStartBB->getTerminator()); 1047 1048 // Emit a load instruction and replace the use of the output value 1049 // with it. 1050 for (Instruction *UsrI : OutsideUsers) { 1051 LoadInst *LoadI = new LoadInst( 1052 I.getType(), AllocaI, I.getName() + ".seq.output.load", UsrI); 1053 UsrI->replaceUsesOfWith(&I, LoadI); 1054 } 1055 } 1056 1057 OpenMPIRBuilder::LocationDescription Loc( 1058 InsertPointTy(ParentBB, ParentBB->end()), DL); 1059 InsertPointTy SeqAfterIP = 1060 OMPInfoCache.OMPBuilder.createMaster(Loc, BodyGenCB, FiniCB); 1061 1062 OMPInfoCache.OMPBuilder.createBarrier(SeqAfterIP, OMPD_parallel); 1063 1064 BranchInst::Create(SeqAfterBB, SeqAfterIP.getBlock()); 1065 1066 LLVM_DEBUG(dbgs() << TAG << "After sequential inlining " << *OuterFn 1067 << "\n"); 1068 }; 1069 1070 // Helper to merge the __kmpc_fork_call calls in MergableCIs. They are all 1071 // contained in BB and only separated by instructions that can be 1072 // redundantly executed in parallel. The block BB is split before the first 1073 // call (in MergableCIs) and after the last so the entire region we merge 1074 // into a single parallel region is contained in a single basic block 1075 // without any other instructions. We use the OpenMPIRBuilder to outline 1076 // that block and call the resulting function via __kmpc_fork_call. 1077 auto Merge = [&](const SmallVectorImpl<CallInst *> &MergableCIs, 1078 BasicBlock *BB) { 1079 // TODO: Change the interface to allow single CIs expanded, e.g, to 1080 // include an outer loop. 1081 assert(MergableCIs.size() > 1 && "Assumed multiple mergable CIs"); 1082 1083 auto Remark = [&](OptimizationRemark OR) { 1084 OR << "Parallel region merged with parallel region" 1085 << (MergableCIs.size() > 2 ? "s" : "") << " at "; 1086 for (auto *CI : llvm::drop_begin(MergableCIs)) { 1087 OR << ore::NV("OpenMPParallelMerge", CI->getDebugLoc()); 1088 if (CI != MergableCIs.back()) 1089 OR << ", "; 1090 } 1091 return OR << "."; 1092 }; 1093 1094 emitRemark<OptimizationRemark>(MergableCIs.front(), "OMP150", Remark); 1095 1096 Function *OriginalFn = BB->getParent(); 1097 LLVM_DEBUG(dbgs() << TAG << "Merge " << MergableCIs.size() 1098 << " parallel regions in " << OriginalFn->getName() 1099 << "\n"); 1100 1101 // Isolate the calls to merge in a separate block. 1102 EndBB = SplitBlock(BB, MergableCIs.back()->getNextNode(), DT, LI); 1103 BasicBlock *AfterBB = 1104 SplitBlock(EndBB, &*EndBB->getFirstInsertionPt(), DT, LI); 1105 StartBB = SplitBlock(BB, MergableCIs.front(), DT, LI, nullptr, 1106 "omp.par.merged"); 1107 1108 assert(BB->getUniqueSuccessor() == StartBB && "Expected a different CFG"); 1109 const DebugLoc DL = BB->getTerminator()->getDebugLoc(); 1110 BB->getTerminator()->eraseFromParent(); 1111 1112 // Create sequential regions for sequential instructions that are 1113 // in-between mergable parallel regions. 1114 for (auto *It = MergableCIs.begin(), *End = MergableCIs.end() - 1; 1115 It != End; ++It) { 1116 Instruction *ForkCI = *It; 1117 Instruction *NextForkCI = *(It + 1); 1118 1119 // Continue if there are not in-between instructions. 1120 if (ForkCI->getNextNode() == NextForkCI) 1121 continue; 1122 1123 CreateSequentialRegion(OriginalFn, BB, ForkCI->getNextNode(), 1124 NextForkCI->getPrevNode()); 1125 } 1126 1127 OpenMPIRBuilder::LocationDescription Loc(InsertPointTy(BB, BB->end()), 1128 DL); 1129 IRBuilder<>::InsertPoint AllocaIP( 1130 &OriginalFn->getEntryBlock(), 1131 OriginalFn->getEntryBlock().getFirstInsertionPt()); 1132 // Create the merged parallel region with default proc binding, to 1133 // avoid overriding binding settings, and without explicit cancellation. 1134 InsertPointTy AfterIP = OMPInfoCache.OMPBuilder.createParallel( 1135 Loc, AllocaIP, BodyGenCB, PrivCB, FiniCB, nullptr, nullptr, 1136 OMP_PROC_BIND_default, /* IsCancellable */ false); 1137 BranchInst::Create(AfterBB, AfterIP.getBlock()); 1138 1139 // Perform the actual outlining. 1140 OMPInfoCache.OMPBuilder.finalize(OriginalFn); 1141 1142 Function *OutlinedFn = MergableCIs.front()->getCaller(); 1143 1144 // Replace the __kmpc_fork_call calls with direct calls to the outlined 1145 // callbacks. 1146 SmallVector<Value *, 8> Args; 1147 for (auto *CI : MergableCIs) { 1148 Value *Callee = CI->getArgOperand(CallbackCalleeOperand); 1149 FunctionType *FT = OMPInfoCache.OMPBuilder.ParallelTask; 1150 Args.clear(); 1151 Args.push_back(OutlinedFn->getArg(0)); 1152 Args.push_back(OutlinedFn->getArg(1)); 1153 for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E; 1154 ++U) 1155 Args.push_back(CI->getArgOperand(U)); 1156 1157 CallInst *NewCI = CallInst::Create(FT, Callee, Args, "", CI); 1158 if (CI->getDebugLoc()) 1159 NewCI->setDebugLoc(CI->getDebugLoc()); 1160 1161 // Forward parameter attributes from the callback to the callee. 1162 for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E; 1163 ++U) 1164 for (const Attribute &A : CI->getAttributes().getParamAttrs(U)) 1165 NewCI->addParamAttr( 1166 U - (CallbackFirstArgOperand - CallbackCalleeOperand), A); 1167 1168 // Emit an explicit barrier to replace the implicit fork-join barrier. 1169 if (CI != MergableCIs.back()) { 1170 // TODO: Remove barrier if the merged parallel region includes the 1171 // 'nowait' clause. 1172 OMPInfoCache.OMPBuilder.createBarrier( 1173 InsertPointTy(NewCI->getParent(), 1174 NewCI->getNextNode()->getIterator()), 1175 OMPD_parallel); 1176 } 1177 1178 CI->eraseFromParent(); 1179 } 1180 1181 assert(OutlinedFn != OriginalFn && "Outlining failed"); 1182 CGUpdater.registerOutlinedFunction(*OriginalFn, *OutlinedFn); 1183 CGUpdater.reanalyzeFunction(*OriginalFn); 1184 1185 NumOpenMPParallelRegionsMerged += MergableCIs.size(); 1186 1187 return true; 1188 }; 1189 1190 // Helper function that identifes sequences of 1191 // __kmpc_fork_call uses in a basic block. 1192 auto DetectPRsCB = [&](Use &U, Function &F) { 1193 CallInst *CI = getCallIfRegularCall(U, &RFI); 1194 BB2PRMap[CI->getParent()].insert(CI); 1195 1196 return false; 1197 }; 1198 1199 BB2PRMap.clear(); 1200 RFI.foreachUse(SCC, DetectPRsCB); 1201 SmallVector<SmallVector<CallInst *, 4>, 4> MergableCIsVector; 1202 // Find mergable parallel regions within a basic block that are 1203 // safe to merge, that is any in-between instructions can safely 1204 // execute in parallel after merging. 1205 // TODO: support merging across basic-blocks. 1206 for (auto &It : BB2PRMap) { 1207 auto &CIs = It.getSecond(); 1208 if (CIs.size() < 2) 1209 continue; 1210 1211 BasicBlock *BB = It.getFirst(); 1212 SmallVector<CallInst *, 4> MergableCIs; 1213 1214 /// Returns true if the instruction is mergable, false otherwise. 1215 /// A terminator instruction is unmergable by definition since merging 1216 /// works within a BB. Instructions before the mergable region are 1217 /// mergable if they are not calls to OpenMP runtime functions that may 1218 /// set different execution parameters for subsequent parallel regions. 1219 /// Instructions in-between parallel regions are mergable if they are not 1220 /// calls to any non-intrinsic function since that may call a non-mergable 1221 /// OpenMP runtime function. 1222 auto IsMergable = [&](Instruction &I, bool IsBeforeMergableRegion) { 1223 // We do not merge across BBs, hence return false (unmergable) if the 1224 // instruction is a terminator. 1225 if (I.isTerminator()) 1226 return false; 1227 1228 if (!isa<CallInst>(&I)) 1229 return true; 1230 1231 CallInst *CI = cast<CallInst>(&I); 1232 if (IsBeforeMergableRegion) { 1233 Function *CalledFunction = CI->getCalledFunction(); 1234 if (!CalledFunction) 1235 return false; 1236 // Return false (unmergable) if the call before the parallel 1237 // region calls an explicit affinity (proc_bind) or number of 1238 // threads (num_threads) compiler-generated function. Those settings 1239 // may be incompatible with following parallel regions. 1240 // TODO: ICV tracking to detect compatibility. 1241 for (const auto &RFI : UnmergableCallsInfo) { 1242 if (CalledFunction == RFI.Declaration) 1243 return false; 1244 } 1245 } else { 1246 // Return false (unmergable) if there is a call instruction 1247 // in-between parallel regions when it is not an intrinsic. It 1248 // may call an unmergable OpenMP runtime function in its callpath. 1249 // TODO: Keep track of possible OpenMP calls in the callpath. 1250 if (!isa<IntrinsicInst>(CI)) 1251 return false; 1252 } 1253 1254 return true; 1255 }; 1256 // Find maximal number of parallel region CIs that are safe to merge. 1257 for (auto It = BB->begin(), End = BB->end(); It != End;) { 1258 Instruction &I = *It; 1259 ++It; 1260 1261 if (CIs.count(&I)) { 1262 MergableCIs.push_back(cast<CallInst>(&I)); 1263 continue; 1264 } 1265 1266 // Continue expanding if the instruction is mergable. 1267 if (IsMergable(I, MergableCIs.empty())) 1268 continue; 1269 1270 // Forward the instruction iterator to skip the next parallel region 1271 // since there is an unmergable instruction which can affect it. 1272 for (; It != End; ++It) { 1273 Instruction &SkipI = *It; 1274 if (CIs.count(&SkipI)) { 1275 LLVM_DEBUG(dbgs() << TAG << "Skip parallel region " << SkipI 1276 << " due to " << I << "\n"); 1277 ++It; 1278 break; 1279 } 1280 } 1281 1282 // Store mergable regions found. 1283 if (MergableCIs.size() > 1) { 1284 MergableCIsVector.push_back(MergableCIs); 1285 LLVM_DEBUG(dbgs() << TAG << "Found " << MergableCIs.size() 1286 << " parallel regions in block " << BB->getName() 1287 << " of function " << BB->getParent()->getName() 1288 << "\n";); 1289 } 1290 1291 MergableCIs.clear(); 1292 } 1293 1294 if (!MergableCIsVector.empty()) { 1295 Changed = true; 1296 1297 for (auto &MergableCIs : MergableCIsVector) 1298 Merge(MergableCIs, BB); 1299 MergableCIsVector.clear(); 1300 } 1301 } 1302 1303 if (Changed) { 1304 /// Re-collect use for fork calls, emitted barrier calls, and 1305 /// any emitted master/end_master calls. 1306 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_fork_call); 1307 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_barrier); 1308 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_master); 1309 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_end_master); 1310 } 1311 1312 return Changed; 1313 } 1314 1315 /// Try to delete parallel regions if possible. 1316 bool deleteParallelRegions() { 1317 const unsigned CallbackCalleeOperand = 2; 1318 1319 OMPInformationCache::RuntimeFunctionInfo &RFI = 1320 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 1321 1322 if (!RFI.Declaration) 1323 return false; 1324 1325 bool Changed = false; 1326 auto DeleteCallCB = [&](Use &U, Function &) { 1327 CallInst *CI = getCallIfRegularCall(U); 1328 if (!CI) 1329 return false; 1330 auto *Fn = dyn_cast<Function>( 1331 CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts()); 1332 if (!Fn) 1333 return false; 1334 if (!Fn->onlyReadsMemory()) 1335 return false; 1336 if (!Fn->hasFnAttribute(Attribute::WillReturn)) 1337 return false; 1338 1339 LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in " 1340 << CI->getCaller()->getName() << "\n"); 1341 1342 auto Remark = [&](OptimizationRemark OR) { 1343 return OR << "Removing parallel region with no side-effects."; 1344 }; 1345 emitRemark<OptimizationRemark>(CI, "OMP160", Remark); 1346 1347 CGUpdater.removeCallSite(*CI); 1348 CI->eraseFromParent(); 1349 Changed = true; 1350 ++NumOpenMPParallelRegionsDeleted; 1351 return true; 1352 }; 1353 1354 RFI.foreachUse(SCC, DeleteCallCB); 1355 1356 return Changed; 1357 } 1358 1359 /// Try to eliminate runtime calls by reusing existing ones. 1360 bool deduplicateRuntimeCalls() { 1361 bool Changed = false; 1362 1363 RuntimeFunction DeduplicableRuntimeCallIDs[] = { 1364 OMPRTL_omp_get_num_threads, 1365 OMPRTL_omp_in_parallel, 1366 OMPRTL_omp_get_cancellation, 1367 OMPRTL_omp_get_thread_limit, 1368 OMPRTL_omp_get_supported_active_levels, 1369 OMPRTL_omp_get_level, 1370 OMPRTL_omp_get_ancestor_thread_num, 1371 OMPRTL_omp_get_team_size, 1372 OMPRTL_omp_get_active_level, 1373 OMPRTL_omp_in_final, 1374 OMPRTL_omp_get_proc_bind, 1375 OMPRTL_omp_get_num_places, 1376 OMPRTL_omp_get_num_procs, 1377 OMPRTL_omp_get_place_num, 1378 OMPRTL_omp_get_partition_num_places, 1379 OMPRTL_omp_get_partition_place_nums}; 1380 1381 // Global-tid is handled separately. 1382 SmallSetVector<Value *, 16> GTIdArgs; 1383 collectGlobalThreadIdArguments(GTIdArgs); 1384 LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size() 1385 << " global thread ID arguments\n"); 1386 1387 for (Function *F : SCC) { 1388 for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs) 1389 Changed |= deduplicateRuntimeCalls( 1390 *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]); 1391 1392 // __kmpc_global_thread_num is special as we can replace it with an 1393 // argument in enough cases to make it worth trying. 1394 Value *GTIdArg = nullptr; 1395 for (Argument &Arg : F->args()) 1396 if (GTIdArgs.count(&Arg)) { 1397 GTIdArg = &Arg; 1398 break; 1399 } 1400 Changed |= deduplicateRuntimeCalls( 1401 *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg); 1402 } 1403 1404 return Changed; 1405 } 1406 1407 /// Tries to hide the latency of runtime calls that involve host to 1408 /// device memory transfers by splitting them into their "issue" and "wait" 1409 /// versions. The "issue" is moved upwards as much as possible. The "wait" is 1410 /// moved downards as much as possible. The "issue" issues the memory transfer 1411 /// asynchronously, returning a handle. The "wait" waits in the returned 1412 /// handle for the memory transfer to finish. 1413 bool hideMemTransfersLatency() { 1414 auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper]; 1415 bool Changed = false; 1416 auto SplitMemTransfers = [&](Use &U, Function &Decl) { 1417 auto *RTCall = getCallIfRegularCall(U, &RFI); 1418 if (!RTCall) 1419 return false; 1420 1421 OffloadArray OffloadArrays[3]; 1422 if (!getValuesInOffloadArrays(*RTCall, OffloadArrays)) 1423 return false; 1424 1425 LLVM_DEBUG(dumpValuesInOffloadArrays(OffloadArrays)); 1426 1427 // TODO: Check if can be moved upwards. 1428 bool WasSplit = false; 1429 Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall); 1430 if (WaitMovementPoint) 1431 WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint); 1432 1433 Changed |= WasSplit; 1434 return WasSplit; 1435 }; 1436 if (OMPInfoCache.runtimeFnsAvailable( 1437 {OMPRTL___tgt_target_data_begin_mapper_issue, 1438 OMPRTL___tgt_target_data_begin_mapper_wait})) 1439 RFI.foreachUse(SCC, SplitMemTransfers); 1440 1441 return Changed; 1442 } 1443 1444 void analysisGlobalization() { 1445 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 1446 1447 auto CheckGlobalization = [&](Use &U, Function &Decl) { 1448 if (CallInst *CI = getCallIfRegularCall(U, &RFI)) { 1449 auto Remark = [&](OptimizationRemarkMissed ORM) { 1450 return ORM 1451 << "Found thread data sharing on the GPU. " 1452 << "Expect degraded performance due to data globalization."; 1453 }; 1454 emitRemark<OptimizationRemarkMissed>(CI, "OMP112", Remark); 1455 } 1456 1457 return false; 1458 }; 1459 1460 RFI.foreachUse(SCC, CheckGlobalization); 1461 } 1462 1463 /// Maps the values stored in the offload arrays passed as arguments to 1464 /// \p RuntimeCall into the offload arrays in \p OAs. 1465 bool getValuesInOffloadArrays(CallInst &RuntimeCall, 1466 MutableArrayRef<OffloadArray> OAs) { 1467 assert(OAs.size() == 3 && "Need space for three offload arrays!"); 1468 1469 // A runtime call that involves memory offloading looks something like: 1470 // call void @__tgt_target_data_begin_mapper(arg0, arg1, 1471 // i8** %offload_baseptrs, i8** %offload_ptrs, i64* %offload_sizes, 1472 // ...) 1473 // So, the idea is to access the allocas that allocate space for these 1474 // offload arrays, offload_baseptrs, offload_ptrs, offload_sizes. 1475 // Therefore: 1476 // i8** %offload_baseptrs. 1477 Value *BasePtrsArg = 1478 RuntimeCall.getArgOperand(OffloadArray::BasePtrsArgNum); 1479 // i8** %offload_ptrs. 1480 Value *PtrsArg = RuntimeCall.getArgOperand(OffloadArray::PtrsArgNum); 1481 // i8** %offload_sizes. 1482 Value *SizesArg = RuntimeCall.getArgOperand(OffloadArray::SizesArgNum); 1483 1484 // Get values stored in **offload_baseptrs. 1485 auto *V = getUnderlyingObject(BasePtrsArg); 1486 if (!isa<AllocaInst>(V)) 1487 return false; 1488 auto *BasePtrsArray = cast<AllocaInst>(V); 1489 if (!OAs[0].initialize(*BasePtrsArray, RuntimeCall)) 1490 return false; 1491 1492 // Get values stored in **offload_baseptrs. 1493 V = getUnderlyingObject(PtrsArg); 1494 if (!isa<AllocaInst>(V)) 1495 return false; 1496 auto *PtrsArray = cast<AllocaInst>(V); 1497 if (!OAs[1].initialize(*PtrsArray, RuntimeCall)) 1498 return false; 1499 1500 // Get values stored in **offload_sizes. 1501 V = getUnderlyingObject(SizesArg); 1502 // If it's a [constant] global array don't analyze it. 1503 if (isa<GlobalValue>(V)) 1504 return isa<Constant>(V); 1505 if (!isa<AllocaInst>(V)) 1506 return false; 1507 1508 auto *SizesArray = cast<AllocaInst>(V); 1509 if (!OAs[2].initialize(*SizesArray, RuntimeCall)) 1510 return false; 1511 1512 return true; 1513 } 1514 1515 /// Prints the values in the OffloadArrays \p OAs using LLVM_DEBUG. 1516 /// For now this is a way to test that the function getValuesInOffloadArrays 1517 /// is working properly. 1518 /// TODO: Move this to a unittest when unittests are available for OpenMPOpt. 1519 void dumpValuesInOffloadArrays(ArrayRef<OffloadArray> OAs) { 1520 assert(OAs.size() == 3 && "There are three offload arrays to debug!"); 1521 1522 LLVM_DEBUG(dbgs() << TAG << " Successfully got offload values:\n"); 1523 std::string ValuesStr; 1524 raw_string_ostream Printer(ValuesStr); 1525 std::string Separator = " --- "; 1526 1527 for (auto *BP : OAs[0].StoredValues) { 1528 BP->print(Printer); 1529 Printer << Separator; 1530 } 1531 LLVM_DEBUG(dbgs() << "\t\toffload_baseptrs: " << Printer.str() << "\n"); 1532 ValuesStr.clear(); 1533 1534 for (auto *P : OAs[1].StoredValues) { 1535 P->print(Printer); 1536 Printer << Separator; 1537 } 1538 LLVM_DEBUG(dbgs() << "\t\toffload_ptrs: " << Printer.str() << "\n"); 1539 ValuesStr.clear(); 1540 1541 for (auto *S : OAs[2].StoredValues) { 1542 S->print(Printer); 1543 Printer << Separator; 1544 } 1545 LLVM_DEBUG(dbgs() << "\t\toffload_sizes: " << Printer.str() << "\n"); 1546 } 1547 1548 /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be 1549 /// moved. Returns nullptr if the movement is not possible, or not worth it. 1550 Instruction *canBeMovedDownwards(CallInst &RuntimeCall) { 1551 // FIXME: This traverses only the BasicBlock where RuntimeCall is. 1552 // Make it traverse the CFG. 1553 1554 Instruction *CurrentI = &RuntimeCall; 1555 bool IsWorthIt = false; 1556 while ((CurrentI = CurrentI->getNextNode())) { 1557 1558 // TODO: Once we detect the regions to be offloaded we should use the 1559 // alias analysis manager to check if CurrentI may modify one of 1560 // the offloaded regions. 1561 if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) { 1562 if (IsWorthIt) 1563 return CurrentI; 1564 1565 return nullptr; 1566 } 1567 1568 // FIXME: For now if we move it over anything without side effect 1569 // is worth it. 1570 IsWorthIt = true; 1571 } 1572 1573 // Return end of BasicBlock. 1574 return RuntimeCall.getParent()->getTerminator(); 1575 } 1576 1577 /// Splits \p RuntimeCall into its "issue" and "wait" counterparts. 1578 bool splitTargetDataBeginRTC(CallInst &RuntimeCall, 1579 Instruction &WaitMovementPoint) { 1580 // Create stack allocated handle (__tgt_async_info) at the beginning of the 1581 // function. Used for storing information of the async transfer, allowing to 1582 // wait on it later. 1583 auto &IRBuilder = OMPInfoCache.OMPBuilder; 1584 Function *F = RuntimeCall.getCaller(); 1585 BasicBlock &Entry = F->getEntryBlock(); 1586 IRBuilder.Builder.SetInsertPoint(&Entry, 1587 Entry.getFirstNonPHIOrDbgOrAlloca()); 1588 Value *Handle = IRBuilder.Builder.CreateAlloca( 1589 IRBuilder.AsyncInfo, /*ArraySize=*/nullptr, "handle"); 1590 Handle = 1591 IRBuilder.Builder.CreateAddrSpaceCast(Handle, IRBuilder.AsyncInfoPtr); 1592 1593 // Add "issue" runtime call declaration: 1594 // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32, 1595 // i8**, i8**, i64*, i64*) 1596 FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction( 1597 M, OMPRTL___tgt_target_data_begin_mapper_issue); 1598 1599 // Change RuntimeCall call site for its asynchronous version. 1600 SmallVector<Value *, 16> Args; 1601 for (auto &Arg : RuntimeCall.args()) 1602 Args.push_back(Arg.get()); 1603 Args.push_back(Handle); 1604 1605 CallInst *IssueCallsite = 1606 CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall); 1607 OMPInfoCache.setCallingConvention(IssueDecl, IssueCallsite); 1608 RuntimeCall.eraseFromParent(); 1609 1610 // Add "wait" runtime call declaration: 1611 // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info) 1612 FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction( 1613 M, OMPRTL___tgt_target_data_begin_mapper_wait); 1614 1615 Value *WaitParams[2] = { 1616 IssueCallsite->getArgOperand( 1617 OffloadArray::DeviceIDArgNum), // device_id. 1618 Handle // handle to wait on. 1619 }; 1620 CallInst *WaitCallsite = CallInst::Create( 1621 WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint); 1622 OMPInfoCache.setCallingConvention(WaitDecl, WaitCallsite); 1623 1624 return true; 1625 } 1626 1627 static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent, 1628 bool GlobalOnly, bool &SingleChoice) { 1629 if (CurrentIdent == NextIdent) 1630 return CurrentIdent; 1631 1632 // TODO: Figure out how to actually combine multiple debug locations. For 1633 // now we just keep an existing one if there is a single choice. 1634 if (!GlobalOnly || isa<GlobalValue>(NextIdent)) { 1635 SingleChoice = !CurrentIdent; 1636 return NextIdent; 1637 } 1638 return nullptr; 1639 } 1640 1641 /// Return an `struct ident_t*` value that represents the ones used in the 1642 /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not 1643 /// return a local `struct ident_t*`. For now, if we cannot find a suitable 1644 /// return value we create one from scratch. We also do not yet combine 1645 /// information, e.g., the source locations, see combinedIdentStruct. 1646 Value * 1647 getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI, 1648 Function &F, bool GlobalOnly) { 1649 bool SingleChoice = true; 1650 Value *Ident = nullptr; 1651 auto CombineIdentStruct = [&](Use &U, Function &Caller) { 1652 CallInst *CI = getCallIfRegularCall(U, &RFI); 1653 if (!CI || &F != &Caller) 1654 return false; 1655 Ident = combinedIdentStruct(Ident, CI->getArgOperand(0), 1656 /* GlobalOnly */ true, SingleChoice); 1657 return false; 1658 }; 1659 RFI.foreachUse(SCC, CombineIdentStruct); 1660 1661 if (!Ident || !SingleChoice) { 1662 // The IRBuilder uses the insertion block to get to the module, this is 1663 // unfortunate but we work around it for now. 1664 if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock()) 1665 OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy( 1666 &F.getEntryBlock(), F.getEntryBlock().begin())); 1667 // Create a fallback location if non was found. 1668 // TODO: Use the debug locations of the calls instead. 1669 uint32_t SrcLocStrSize; 1670 Constant *Loc = 1671 OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize); 1672 Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc, SrcLocStrSize); 1673 } 1674 return Ident; 1675 } 1676 1677 /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or 1678 /// \p ReplVal if given. 1679 bool deduplicateRuntimeCalls(Function &F, 1680 OMPInformationCache::RuntimeFunctionInfo &RFI, 1681 Value *ReplVal = nullptr) { 1682 auto *UV = RFI.getUseVector(F); 1683 if (!UV || UV->size() + (ReplVal != nullptr) < 2) 1684 return false; 1685 1686 LLVM_DEBUG( 1687 dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name 1688 << (ReplVal ? " with an existing value\n" : "\n") << "\n"); 1689 1690 assert((!ReplVal || (isa<Argument>(ReplVal) && 1691 cast<Argument>(ReplVal)->getParent() == &F)) && 1692 "Unexpected replacement value!"); 1693 1694 // TODO: Use dominance to find a good position instead. 1695 auto CanBeMoved = [this](CallBase &CB) { 1696 unsigned NumArgs = CB.arg_size(); 1697 if (NumArgs == 0) 1698 return true; 1699 if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr) 1700 return false; 1701 for (unsigned U = 1; U < NumArgs; ++U) 1702 if (isa<Instruction>(CB.getArgOperand(U))) 1703 return false; 1704 return true; 1705 }; 1706 1707 if (!ReplVal) { 1708 for (Use *U : *UV) 1709 if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) { 1710 if (!CanBeMoved(*CI)) 1711 continue; 1712 1713 // If the function is a kernel, dedup will move 1714 // the runtime call right after the kernel init callsite. Otherwise, 1715 // it will move it to the beginning of the caller function. 1716 if (isKernel(F)) { 1717 auto &KernelInitRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 1718 auto *KernelInitUV = KernelInitRFI.getUseVector(F); 1719 1720 if (KernelInitUV->empty()) 1721 continue; 1722 1723 assert(KernelInitUV->size() == 1 && 1724 "Expected a single __kmpc_target_init in kernel\n"); 1725 1726 CallInst *KernelInitCI = 1727 getCallIfRegularCall(*KernelInitUV->front(), &KernelInitRFI); 1728 assert(KernelInitCI && 1729 "Expected a call to __kmpc_target_init in kernel\n"); 1730 1731 CI->moveAfter(KernelInitCI); 1732 } else 1733 CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt()); 1734 ReplVal = CI; 1735 break; 1736 } 1737 if (!ReplVal) 1738 return false; 1739 } 1740 1741 // If we use a call as a replacement value we need to make sure the ident is 1742 // valid at the new location. For now we just pick a global one, either 1743 // existing and used by one of the calls, or created from scratch. 1744 if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) { 1745 if (!CI->arg_empty() && 1746 CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) { 1747 Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F, 1748 /* GlobalOnly */ true); 1749 CI->setArgOperand(0, Ident); 1750 } 1751 } 1752 1753 bool Changed = false; 1754 auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) { 1755 CallInst *CI = getCallIfRegularCall(U, &RFI); 1756 if (!CI || CI == ReplVal || &F != &Caller) 1757 return false; 1758 assert(CI->getCaller() == &F && "Unexpected call!"); 1759 1760 auto Remark = [&](OptimizationRemark OR) { 1761 return OR << "OpenMP runtime call " 1762 << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated."; 1763 }; 1764 if (CI->getDebugLoc()) 1765 emitRemark<OptimizationRemark>(CI, "OMP170", Remark); 1766 else 1767 emitRemark<OptimizationRemark>(&F, "OMP170", Remark); 1768 1769 CGUpdater.removeCallSite(*CI); 1770 CI->replaceAllUsesWith(ReplVal); 1771 CI->eraseFromParent(); 1772 ++NumOpenMPRuntimeCallsDeduplicated; 1773 Changed = true; 1774 return true; 1775 }; 1776 RFI.foreachUse(SCC, ReplaceAndDeleteCB); 1777 1778 return Changed; 1779 } 1780 1781 /// Collect arguments that represent the global thread id in \p GTIdArgs. 1782 void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> >IdArgs) { 1783 // TODO: Below we basically perform a fixpoint iteration with a pessimistic 1784 // initialization. We could define an AbstractAttribute instead and 1785 // run the Attributor here once it can be run as an SCC pass. 1786 1787 // Helper to check the argument \p ArgNo at all call sites of \p F for 1788 // a GTId. 1789 auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) { 1790 if (!F.hasLocalLinkage()) 1791 return false; 1792 for (Use &U : F.uses()) { 1793 if (CallInst *CI = getCallIfRegularCall(U)) { 1794 Value *ArgOp = CI->getArgOperand(ArgNo); 1795 if (CI == &RefCI || GTIdArgs.count(ArgOp) || 1796 getCallIfRegularCall( 1797 *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num])) 1798 continue; 1799 } 1800 return false; 1801 } 1802 return true; 1803 }; 1804 1805 // Helper to identify uses of a GTId as GTId arguments. 1806 auto AddUserArgs = [&](Value >Id) { 1807 for (Use &U : GTId.uses()) 1808 if (CallInst *CI = dyn_cast<CallInst>(U.getUser())) 1809 if (CI->isArgOperand(&U)) 1810 if (Function *Callee = CI->getCalledFunction()) 1811 if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI)) 1812 GTIdArgs.insert(Callee->getArg(U.getOperandNo())); 1813 }; 1814 1815 // The argument users of __kmpc_global_thread_num calls are GTIds. 1816 OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI = 1817 OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]; 1818 1819 GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) { 1820 if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI)) 1821 AddUserArgs(*CI); 1822 return false; 1823 }); 1824 1825 // Transitively search for more arguments by looking at the users of the 1826 // ones we know already. During the search the GTIdArgs vector is extended 1827 // so we cannot cache the size nor can we use a range based for. 1828 for (unsigned U = 0; U < GTIdArgs.size(); ++U) 1829 AddUserArgs(*GTIdArgs[U]); 1830 } 1831 1832 /// Kernel (=GPU) optimizations and utility functions 1833 /// 1834 ///{{ 1835 1836 /// Check if \p F is a kernel, hence entry point for target offloading. 1837 bool isKernel(Function &F) { return OMPInfoCache.Kernels.count(&F); } 1838 1839 /// Cache to remember the unique kernel for a function. 1840 DenseMap<Function *, std::optional<Kernel>> UniqueKernelMap; 1841 1842 /// Find the unique kernel that will execute \p F, if any. 1843 Kernel getUniqueKernelFor(Function &F); 1844 1845 /// Find the unique kernel that will execute \p I, if any. 1846 Kernel getUniqueKernelFor(Instruction &I) { 1847 return getUniqueKernelFor(*I.getFunction()); 1848 } 1849 1850 /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in 1851 /// the cases we can avoid taking the address of a function. 1852 bool rewriteDeviceCodeStateMachine(); 1853 1854 /// 1855 ///}} 1856 1857 /// Emit a remark generically 1858 /// 1859 /// This template function can be used to generically emit a remark. The 1860 /// RemarkKind should be one of the following: 1861 /// - OptimizationRemark to indicate a successful optimization attempt 1862 /// - OptimizationRemarkMissed to report a failed optimization attempt 1863 /// - OptimizationRemarkAnalysis to provide additional information about an 1864 /// optimization attempt 1865 /// 1866 /// The remark is built using a callback function provided by the caller that 1867 /// takes a RemarkKind as input and returns a RemarkKind. 1868 template <typename RemarkKind, typename RemarkCallBack> 1869 void emitRemark(Instruction *I, StringRef RemarkName, 1870 RemarkCallBack &&RemarkCB) const { 1871 Function *F = I->getParent()->getParent(); 1872 auto &ORE = OREGetter(F); 1873 1874 if (RemarkName.startswith("OMP")) 1875 ORE.emit([&]() { 1876 return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)) 1877 << " [" << RemarkName << "]"; 1878 }); 1879 else 1880 ORE.emit( 1881 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)); }); 1882 } 1883 1884 /// Emit a remark on a function. 1885 template <typename RemarkKind, typename RemarkCallBack> 1886 void emitRemark(Function *F, StringRef RemarkName, 1887 RemarkCallBack &&RemarkCB) const { 1888 auto &ORE = OREGetter(F); 1889 1890 if (RemarkName.startswith("OMP")) 1891 ORE.emit([&]() { 1892 return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)) 1893 << " [" << RemarkName << "]"; 1894 }); 1895 else 1896 ORE.emit( 1897 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)); }); 1898 } 1899 1900 /// The underlying module. 1901 Module &M; 1902 1903 /// The SCC we are operating on. 1904 SmallVectorImpl<Function *> &SCC; 1905 1906 /// Callback to update the call graph, the first argument is a removed call, 1907 /// the second an optional replacement call. 1908 CallGraphUpdater &CGUpdater; 1909 1910 /// Callback to get an OptimizationRemarkEmitter from a Function * 1911 OptimizationRemarkGetter OREGetter; 1912 1913 /// OpenMP-specific information cache. Also Used for Attributor runs. 1914 OMPInformationCache &OMPInfoCache; 1915 1916 /// Attributor instance. 1917 Attributor &A; 1918 1919 /// Helper function to run Attributor on SCC. 1920 bool runAttributor(bool IsModulePass) { 1921 if (SCC.empty()) 1922 return false; 1923 1924 registerAAs(IsModulePass); 1925 1926 ChangeStatus Changed = A.run(); 1927 1928 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size() 1929 << " functions, result: " << Changed << ".\n"); 1930 1931 return Changed == ChangeStatus::CHANGED; 1932 } 1933 1934 void registerFoldRuntimeCall(RuntimeFunction RF); 1935 1936 /// Populate the Attributor with abstract attribute opportunities in the 1937 /// functions. 1938 void registerAAs(bool IsModulePass); 1939 1940 public: 1941 /// Callback to register AAs for live functions, including internal functions 1942 /// marked live during the traversal. 1943 static void registerAAsForFunction(Attributor &A, const Function &F); 1944 }; 1945 1946 Kernel OpenMPOpt::getUniqueKernelFor(Function &F) { 1947 if (!OMPInfoCache.ModuleSlice.empty() && !OMPInfoCache.ModuleSlice.count(&F)) 1948 return nullptr; 1949 1950 // Use a scope to keep the lifetime of the CachedKernel short. 1951 { 1952 std::optional<Kernel> &CachedKernel = UniqueKernelMap[&F]; 1953 if (CachedKernel) 1954 return *CachedKernel; 1955 1956 // TODO: We should use an AA to create an (optimistic and callback 1957 // call-aware) call graph. For now we stick to simple patterns that 1958 // are less powerful, basically the worst fixpoint. 1959 if (isKernel(F)) { 1960 CachedKernel = Kernel(&F); 1961 return *CachedKernel; 1962 } 1963 1964 CachedKernel = nullptr; 1965 if (!F.hasLocalLinkage()) { 1966 1967 // See https://openmp.llvm.org/remarks/OptimizationRemarks.html 1968 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 1969 return ORA << "Potentially unknown OpenMP target region caller."; 1970 }; 1971 emitRemark<OptimizationRemarkAnalysis>(&F, "OMP100", Remark); 1972 1973 return nullptr; 1974 } 1975 } 1976 1977 auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel { 1978 if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) { 1979 // Allow use in equality comparisons. 1980 if (Cmp->isEquality()) 1981 return getUniqueKernelFor(*Cmp); 1982 return nullptr; 1983 } 1984 if (auto *CB = dyn_cast<CallBase>(U.getUser())) { 1985 // Allow direct calls. 1986 if (CB->isCallee(&U)) 1987 return getUniqueKernelFor(*CB); 1988 1989 OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI = 1990 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 1991 // Allow the use in __kmpc_parallel_51 calls. 1992 if (OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI)) 1993 return getUniqueKernelFor(*CB); 1994 return nullptr; 1995 } 1996 // Disallow every other use. 1997 return nullptr; 1998 }; 1999 2000 // TODO: In the future we want to track more than just a unique kernel. 2001 SmallPtrSet<Kernel, 2> PotentialKernels; 2002 OMPInformationCache::foreachUse(F, [&](const Use &U) { 2003 PotentialKernels.insert(GetUniqueKernelForUse(U)); 2004 }); 2005 2006 Kernel K = nullptr; 2007 if (PotentialKernels.size() == 1) 2008 K = *PotentialKernels.begin(); 2009 2010 // Cache the result. 2011 UniqueKernelMap[&F] = K; 2012 2013 return K; 2014 } 2015 2016 bool OpenMPOpt::rewriteDeviceCodeStateMachine() { 2017 OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI = 2018 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 2019 2020 bool Changed = false; 2021 if (!KernelParallelRFI) 2022 return Changed; 2023 2024 // If we have disabled state machine changes, exit 2025 if (DisableOpenMPOptStateMachineRewrite) 2026 return Changed; 2027 2028 for (Function *F : SCC) { 2029 2030 // Check if the function is a use in a __kmpc_parallel_51 call at 2031 // all. 2032 bool UnknownUse = false; 2033 bool KernelParallelUse = false; 2034 unsigned NumDirectCalls = 0; 2035 2036 SmallVector<Use *, 2> ToBeReplacedStateMachineUses; 2037 OMPInformationCache::foreachUse(*F, [&](Use &U) { 2038 if (auto *CB = dyn_cast<CallBase>(U.getUser())) 2039 if (CB->isCallee(&U)) { 2040 ++NumDirectCalls; 2041 return; 2042 } 2043 2044 if (isa<ICmpInst>(U.getUser())) { 2045 ToBeReplacedStateMachineUses.push_back(&U); 2046 return; 2047 } 2048 2049 // Find wrapper functions that represent parallel kernels. 2050 CallInst *CI = 2051 OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI); 2052 const unsigned int WrapperFunctionArgNo = 6; 2053 if (!KernelParallelUse && CI && 2054 CI->getArgOperandNo(&U) == WrapperFunctionArgNo) { 2055 KernelParallelUse = true; 2056 ToBeReplacedStateMachineUses.push_back(&U); 2057 return; 2058 } 2059 UnknownUse = true; 2060 }); 2061 2062 // Do not emit a remark if we haven't seen a __kmpc_parallel_51 2063 // use. 2064 if (!KernelParallelUse) 2065 continue; 2066 2067 // If this ever hits, we should investigate. 2068 // TODO: Checking the number of uses is not a necessary restriction and 2069 // should be lifted. 2070 if (UnknownUse || NumDirectCalls != 1 || 2071 ToBeReplacedStateMachineUses.size() > 2) { 2072 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 2073 return ORA << "Parallel region is used in " 2074 << (UnknownUse ? "unknown" : "unexpected") 2075 << " ways. Will not attempt to rewrite the state machine."; 2076 }; 2077 emitRemark<OptimizationRemarkAnalysis>(F, "OMP101", Remark); 2078 continue; 2079 } 2080 2081 // Even if we have __kmpc_parallel_51 calls, we (for now) give 2082 // up if the function is not called from a unique kernel. 2083 Kernel K = getUniqueKernelFor(*F); 2084 if (!K) { 2085 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 2086 return ORA << "Parallel region is not called from a unique kernel. " 2087 "Will not attempt to rewrite the state machine."; 2088 }; 2089 emitRemark<OptimizationRemarkAnalysis>(F, "OMP102", Remark); 2090 continue; 2091 } 2092 2093 // We now know F is a parallel body function called only from the kernel K. 2094 // We also identified the state machine uses in which we replace the 2095 // function pointer by a new global symbol for identification purposes. This 2096 // ensures only direct calls to the function are left. 2097 2098 Module &M = *F->getParent(); 2099 Type *Int8Ty = Type::getInt8Ty(M.getContext()); 2100 2101 auto *ID = new GlobalVariable( 2102 M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage, 2103 UndefValue::get(Int8Ty), F->getName() + ".ID"); 2104 2105 for (Use *U : ToBeReplacedStateMachineUses) 2106 U->set(ConstantExpr::getPointerBitCastOrAddrSpaceCast( 2107 ID, U->get()->getType())); 2108 2109 ++NumOpenMPParallelRegionsReplacedInGPUStateMachine; 2110 2111 Changed = true; 2112 } 2113 2114 return Changed; 2115 } 2116 2117 /// Abstract Attribute for tracking ICV values. 2118 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> { 2119 using Base = StateWrapper<BooleanState, AbstractAttribute>; 2120 AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 2121 2122 void initialize(Attributor &A) override { 2123 Function *F = getAnchorScope(); 2124 if (!F || !A.isFunctionIPOAmendable(*F)) 2125 indicatePessimisticFixpoint(); 2126 } 2127 2128 /// Returns true if value is assumed to be tracked. 2129 bool isAssumedTracked() const { return getAssumed(); } 2130 2131 /// Returns true if value is known to be tracked. 2132 bool isKnownTracked() const { return getAssumed(); } 2133 2134 /// Create an abstract attribute biew for the position \p IRP. 2135 static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A); 2136 2137 /// Return the value with which \p I can be replaced for specific \p ICV. 2138 virtual std::optional<Value *> getReplacementValue(InternalControlVar ICV, 2139 const Instruction *I, 2140 Attributor &A) const { 2141 return std::nullopt; 2142 } 2143 2144 /// Return an assumed unique ICV value if a single candidate is found. If 2145 /// there cannot be one, return a nullptr. If it is not clear yet, return 2146 /// std::nullopt. 2147 virtual std::optional<Value *> 2148 getUniqueReplacementValue(InternalControlVar ICV) const = 0; 2149 2150 // Currently only nthreads is being tracked. 2151 // this array will only grow with time. 2152 InternalControlVar TrackableICVs[1] = {ICV_nthreads}; 2153 2154 /// See AbstractAttribute::getName() 2155 const std::string getName() const override { return "AAICVTracker"; } 2156 2157 /// See AbstractAttribute::getIdAddr() 2158 const char *getIdAddr() const override { return &ID; } 2159 2160 /// This function should return true if the type of the \p AA is AAICVTracker 2161 static bool classof(const AbstractAttribute *AA) { 2162 return (AA->getIdAddr() == &ID); 2163 } 2164 2165 static const char ID; 2166 }; 2167 2168 struct AAICVTrackerFunction : public AAICVTracker { 2169 AAICVTrackerFunction(const IRPosition &IRP, Attributor &A) 2170 : AAICVTracker(IRP, A) {} 2171 2172 // FIXME: come up with better string. 2173 const std::string getAsStr() const override { return "ICVTrackerFunction"; } 2174 2175 // FIXME: come up with some stats. 2176 void trackStatistics() const override {} 2177 2178 /// We don't manifest anything for this AA. 2179 ChangeStatus manifest(Attributor &A) override { 2180 return ChangeStatus::UNCHANGED; 2181 } 2182 2183 // Map of ICV to their values at specific program point. 2184 EnumeratedArray<DenseMap<Instruction *, Value *>, InternalControlVar, 2185 InternalControlVar::ICV___last> 2186 ICVReplacementValuesMap; 2187 2188 ChangeStatus updateImpl(Attributor &A) override { 2189 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 2190 2191 Function *F = getAnchorScope(); 2192 2193 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2194 2195 for (InternalControlVar ICV : TrackableICVs) { 2196 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 2197 2198 auto &ValuesMap = ICVReplacementValuesMap[ICV]; 2199 auto TrackValues = [&](Use &U, Function &) { 2200 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U); 2201 if (!CI) 2202 return false; 2203 2204 // FIXME: handle setters with more that 1 arguments. 2205 /// Track new value. 2206 if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second) 2207 HasChanged = ChangeStatus::CHANGED; 2208 2209 return false; 2210 }; 2211 2212 auto CallCheck = [&](Instruction &I) { 2213 std::optional<Value *> ReplVal = getValueForCall(A, I, ICV); 2214 if (ReplVal && ValuesMap.insert(std::make_pair(&I, *ReplVal)).second) 2215 HasChanged = ChangeStatus::CHANGED; 2216 2217 return true; 2218 }; 2219 2220 // Track all changes of an ICV. 2221 SetterRFI.foreachUse(TrackValues, F); 2222 2223 bool UsedAssumedInformation = false; 2224 A.checkForAllInstructions(CallCheck, *this, {Instruction::Call}, 2225 UsedAssumedInformation, 2226 /* CheckBBLivenessOnly */ true); 2227 2228 /// TODO: Figure out a way to avoid adding entry in 2229 /// ICVReplacementValuesMap 2230 Instruction *Entry = &F->getEntryBlock().front(); 2231 if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry)) 2232 ValuesMap.insert(std::make_pair(Entry, nullptr)); 2233 } 2234 2235 return HasChanged; 2236 } 2237 2238 /// Helper to check if \p I is a call and get the value for it if it is 2239 /// unique. 2240 std::optional<Value *> getValueForCall(Attributor &A, const Instruction &I, 2241 InternalControlVar &ICV) const { 2242 2243 const auto *CB = dyn_cast<CallBase>(&I); 2244 if (!CB || CB->hasFnAttr("no_openmp") || 2245 CB->hasFnAttr("no_openmp_routines")) 2246 return std::nullopt; 2247 2248 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2249 auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter]; 2250 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 2251 Function *CalledFunction = CB->getCalledFunction(); 2252 2253 // Indirect call, assume ICV changes. 2254 if (CalledFunction == nullptr) 2255 return nullptr; 2256 if (CalledFunction == GetterRFI.Declaration) 2257 return std::nullopt; 2258 if (CalledFunction == SetterRFI.Declaration) { 2259 if (ICVReplacementValuesMap[ICV].count(&I)) 2260 return ICVReplacementValuesMap[ICV].lookup(&I); 2261 2262 return nullptr; 2263 } 2264 2265 // Since we don't know, assume it changes the ICV. 2266 if (CalledFunction->isDeclaration()) 2267 return nullptr; 2268 2269 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2270 *this, IRPosition::callsite_returned(*CB), DepClassTy::REQUIRED); 2271 2272 if (ICVTrackingAA.isAssumedTracked()) { 2273 std::optional<Value *> URV = ICVTrackingAA.getUniqueReplacementValue(ICV); 2274 if (!URV || (*URV && AA::isValidAtPosition(AA::ValueAndContext(**URV, I), 2275 OMPInfoCache))) 2276 return URV; 2277 } 2278 2279 // If we don't know, assume it changes. 2280 return nullptr; 2281 } 2282 2283 // We don't check unique value for a function, so return std::nullopt. 2284 std::optional<Value *> 2285 getUniqueReplacementValue(InternalControlVar ICV) const override { 2286 return std::nullopt; 2287 } 2288 2289 /// Return the value with which \p I can be replaced for specific \p ICV. 2290 std::optional<Value *> getReplacementValue(InternalControlVar ICV, 2291 const Instruction *I, 2292 Attributor &A) const override { 2293 const auto &ValuesMap = ICVReplacementValuesMap[ICV]; 2294 if (ValuesMap.count(I)) 2295 return ValuesMap.lookup(I); 2296 2297 SmallVector<const Instruction *, 16> Worklist; 2298 SmallPtrSet<const Instruction *, 16> Visited; 2299 Worklist.push_back(I); 2300 2301 std::optional<Value *> ReplVal; 2302 2303 while (!Worklist.empty()) { 2304 const Instruction *CurrInst = Worklist.pop_back_val(); 2305 if (!Visited.insert(CurrInst).second) 2306 continue; 2307 2308 const BasicBlock *CurrBB = CurrInst->getParent(); 2309 2310 // Go up and look for all potential setters/calls that might change the 2311 // ICV. 2312 while ((CurrInst = CurrInst->getPrevNode())) { 2313 if (ValuesMap.count(CurrInst)) { 2314 std::optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst); 2315 // Unknown value, track new. 2316 if (!ReplVal) { 2317 ReplVal = NewReplVal; 2318 break; 2319 } 2320 2321 // If we found a new value, we can't know the icv value anymore. 2322 if (NewReplVal) 2323 if (ReplVal != NewReplVal) 2324 return nullptr; 2325 2326 break; 2327 } 2328 2329 std::optional<Value *> NewReplVal = getValueForCall(A, *CurrInst, ICV); 2330 if (!NewReplVal) 2331 continue; 2332 2333 // Unknown value, track new. 2334 if (!ReplVal) { 2335 ReplVal = NewReplVal; 2336 break; 2337 } 2338 2339 // if (NewReplVal.hasValue()) 2340 // We found a new value, we can't know the icv value anymore. 2341 if (ReplVal != NewReplVal) 2342 return nullptr; 2343 } 2344 2345 // If we are in the same BB and we have a value, we are done. 2346 if (CurrBB == I->getParent() && ReplVal) 2347 return ReplVal; 2348 2349 // Go through all predecessors and add terminators for analysis. 2350 for (const BasicBlock *Pred : predecessors(CurrBB)) 2351 if (const Instruction *Terminator = Pred->getTerminator()) 2352 Worklist.push_back(Terminator); 2353 } 2354 2355 return ReplVal; 2356 } 2357 }; 2358 2359 struct AAICVTrackerFunctionReturned : AAICVTracker { 2360 AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A) 2361 : AAICVTracker(IRP, A) {} 2362 2363 // FIXME: come up with better string. 2364 const std::string getAsStr() const override { 2365 return "ICVTrackerFunctionReturned"; 2366 } 2367 2368 // FIXME: come up with some stats. 2369 void trackStatistics() const override {} 2370 2371 /// We don't manifest anything for this AA. 2372 ChangeStatus manifest(Attributor &A) override { 2373 return ChangeStatus::UNCHANGED; 2374 } 2375 2376 // Map of ICV to their values at specific program point. 2377 EnumeratedArray<std::optional<Value *>, InternalControlVar, 2378 InternalControlVar::ICV___last> 2379 ICVReplacementValuesMap; 2380 2381 /// Return the value with which \p I can be replaced for specific \p ICV. 2382 std::optional<Value *> 2383 getUniqueReplacementValue(InternalControlVar ICV) const override { 2384 return ICVReplacementValuesMap[ICV]; 2385 } 2386 2387 ChangeStatus updateImpl(Attributor &A) override { 2388 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2389 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2390 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 2391 2392 if (!ICVTrackingAA.isAssumedTracked()) 2393 return indicatePessimisticFixpoint(); 2394 2395 for (InternalControlVar ICV : TrackableICVs) { 2396 std::optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 2397 std::optional<Value *> UniqueICVValue; 2398 2399 auto CheckReturnInst = [&](Instruction &I) { 2400 std::optional<Value *> NewReplVal = 2401 ICVTrackingAA.getReplacementValue(ICV, &I, A); 2402 2403 // If we found a second ICV value there is no unique returned value. 2404 if (UniqueICVValue && UniqueICVValue != NewReplVal) 2405 return false; 2406 2407 UniqueICVValue = NewReplVal; 2408 2409 return true; 2410 }; 2411 2412 bool UsedAssumedInformation = false; 2413 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}, 2414 UsedAssumedInformation, 2415 /* CheckBBLivenessOnly */ true)) 2416 UniqueICVValue = nullptr; 2417 2418 if (UniqueICVValue == ReplVal) 2419 continue; 2420 2421 ReplVal = UniqueICVValue; 2422 Changed = ChangeStatus::CHANGED; 2423 } 2424 2425 return Changed; 2426 } 2427 }; 2428 2429 struct AAICVTrackerCallSite : AAICVTracker { 2430 AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A) 2431 : AAICVTracker(IRP, A) {} 2432 2433 void initialize(Attributor &A) override { 2434 Function *F = getAnchorScope(); 2435 if (!F || !A.isFunctionIPOAmendable(*F)) 2436 indicatePessimisticFixpoint(); 2437 2438 // We only initialize this AA for getters, so we need to know which ICV it 2439 // gets. 2440 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2441 for (InternalControlVar ICV : TrackableICVs) { 2442 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 2443 auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter]; 2444 if (Getter.Declaration == getAssociatedFunction()) { 2445 AssociatedICV = ICVInfo.Kind; 2446 return; 2447 } 2448 } 2449 2450 /// Unknown ICV. 2451 indicatePessimisticFixpoint(); 2452 } 2453 2454 ChangeStatus manifest(Attributor &A) override { 2455 if (!ReplVal || !*ReplVal) 2456 return ChangeStatus::UNCHANGED; 2457 2458 A.changeAfterManifest(IRPosition::inst(*getCtxI()), **ReplVal); 2459 A.deleteAfterManifest(*getCtxI()); 2460 2461 return ChangeStatus::CHANGED; 2462 } 2463 2464 // FIXME: come up with better string. 2465 const std::string getAsStr() const override { return "ICVTrackerCallSite"; } 2466 2467 // FIXME: come up with some stats. 2468 void trackStatistics() const override {} 2469 2470 InternalControlVar AssociatedICV; 2471 std::optional<Value *> ReplVal; 2472 2473 ChangeStatus updateImpl(Attributor &A) override { 2474 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2475 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 2476 2477 // We don't have any information, so we assume it changes the ICV. 2478 if (!ICVTrackingAA.isAssumedTracked()) 2479 return indicatePessimisticFixpoint(); 2480 2481 std::optional<Value *> NewReplVal = 2482 ICVTrackingAA.getReplacementValue(AssociatedICV, getCtxI(), A); 2483 2484 if (ReplVal == NewReplVal) 2485 return ChangeStatus::UNCHANGED; 2486 2487 ReplVal = NewReplVal; 2488 return ChangeStatus::CHANGED; 2489 } 2490 2491 // Return the value with which associated value can be replaced for specific 2492 // \p ICV. 2493 std::optional<Value *> 2494 getUniqueReplacementValue(InternalControlVar ICV) const override { 2495 return ReplVal; 2496 } 2497 }; 2498 2499 struct AAICVTrackerCallSiteReturned : AAICVTracker { 2500 AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A) 2501 : AAICVTracker(IRP, A) {} 2502 2503 // FIXME: come up with better string. 2504 const std::string getAsStr() const override { 2505 return "ICVTrackerCallSiteReturned"; 2506 } 2507 2508 // FIXME: come up with some stats. 2509 void trackStatistics() const override {} 2510 2511 /// We don't manifest anything for this AA. 2512 ChangeStatus manifest(Attributor &A) override { 2513 return ChangeStatus::UNCHANGED; 2514 } 2515 2516 // Map of ICV to their values at specific program point. 2517 EnumeratedArray<std::optional<Value *>, InternalControlVar, 2518 InternalControlVar::ICV___last> 2519 ICVReplacementValuesMap; 2520 2521 /// Return the value with which associated value can be replaced for specific 2522 /// \p ICV. 2523 std::optional<Value *> 2524 getUniqueReplacementValue(InternalControlVar ICV) const override { 2525 return ICVReplacementValuesMap[ICV]; 2526 } 2527 2528 ChangeStatus updateImpl(Attributor &A) override { 2529 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2530 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2531 *this, IRPosition::returned(*getAssociatedFunction()), 2532 DepClassTy::REQUIRED); 2533 2534 // We don't have any information, so we assume it changes the ICV. 2535 if (!ICVTrackingAA.isAssumedTracked()) 2536 return indicatePessimisticFixpoint(); 2537 2538 for (InternalControlVar ICV : TrackableICVs) { 2539 std::optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 2540 std::optional<Value *> NewReplVal = 2541 ICVTrackingAA.getUniqueReplacementValue(ICV); 2542 2543 if (ReplVal == NewReplVal) 2544 continue; 2545 2546 ReplVal = NewReplVal; 2547 Changed = ChangeStatus::CHANGED; 2548 } 2549 return Changed; 2550 } 2551 }; 2552 2553 struct AAExecutionDomainFunction : public AAExecutionDomain { 2554 AAExecutionDomainFunction(const IRPosition &IRP, Attributor &A) 2555 : AAExecutionDomain(IRP, A) {} 2556 2557 ~AAExecutionDomainFunction() { 2558 delete RPOT; 2559 } 2560 2561 void initialize(Attributor &A) override { 2562 if (getAnchorScope()->isDeclaration()) { 2563 indicatePessimisticFixpoint(); 2564 return; 2565 } 2566 RPOT = new ReversePostOrderTraversal<Function *>(getAnchorScope()); 2567 } 2568 2569 const std::string getAsStr() const override { 2570 unsigned TotalBlocks = 0, InitialThreadBlocks = 0; 2571 for (auto &It : BEDMap) { 2572 TotalBlocks++; 2573 InitialThreadBlocks += It.getSecond().IsExecutedByInitialThreadOnly; 2574 } 2575 return "[AAExecutionDomain] " + std::to_string(InitialThreadBlocks) + "/" + 2576 std::to_string(TotalBlocks) + " executed by initial thread only"; 2577 } 2578 2579 /// See AbstractAttribute::trackStatistics(). 2580 void trackStatistics() const override {} 2581 2582 ChangeStatus manifest(Attributor &A) override { 2583 LLVM_DEBUG({ 2584 for (const BasicBlock &BB : *getAnchorScope()) { 2585 if (!isExecutedByInitialThreadOnly(BB)) 2586 continue; 2587 dbgs() << TAG << " Basic block @" << getAnchorScope()->getName() << " " 2588 << BB.getName() << " is executed by a single thread.\n"; 2589 } 2590 }); 2591 2592 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2593 2594 if (DisableOpenMPOptBarrierElimination) 2595 return Changed; 2596 2597 SmallPtrSet<CallBase *, 16> DeletedBarriers; 2598 auto HandleAlignedBarrier = [&](CallBase *CB) { 2599 const ExecutionDomainTy &ED = CEDMap[CB]; 2600 if (!ED.IsReachedFromAlignedBarrierOnly || 2601 ED.EncounteredNonLocalSideEffect) 2602 return; 2603 2604 // We can remove this barrier, if it is one, or all aligned barriers 2605 // reaching the kernel end. In the latter case we can transitively work 2606 // our way back until we find a barrier that guards a side-effect if we 2607 // are dealing with the kernel end here. 2608 if (CB) { 2609 DeletedBarriers.insert(CB); 2610 A.deleteAfterManifest(*CB); 2611 ++NumBarriersEliminated; 2612 Changed = ChangeStatus::CHANGED; 2613 } else if (!ED.AlignedBarriers.empty()) { 2614 NumBarriersEliminated += ED.AlignedBarriers.size(); 2615 Changed = ChangeStatus::CHANGED; 2616 SmallVector<CallBase *> Worklist(ED.AlignedBarriers.begin(), 2617 ED.AlignedBarriers.end()); 2618 SmallSetVector<CallBase *, 16> Visited; 2619 while (!Worklist.empty()) { 2620 CallBase *LastCB = Worklist.pop_back_val(); 2621 if (!Visited.insert(LastCB)) 2622 continue; 2623 if (!DeletedBarriers.count(LastCB)) { 2624 A.deleteAfterManifest(*LastCB); 2625 continue; 2626 } 2627 // The final aligned barrier (LastCB) reaching the kernel end was 2628 // removed already. This means we can go one step further and remove 2629 // the barriers encoutered last before (LastCB). 2630 const ExecutionDomainTy &LastED = CEDMap[LastCB]; 2631 Worklist.append(LastED.AlignedBarriers.begin(), 2632 LastED.AlignedBarriers.end()); 2633 } 2634 } 2635 2636 // If we actually eliminated a barrier we need to eliminate the associated 2637 // llvm.assumes as well to avoid creating UB. 2638 if (!ED.EncounteredAssumes.empty() && (CB || !ED.AlignedBarriers.empty())) 2639 for (auto *AssumeCB : ED.EncounteredAssumes) 2640 A.deleteAfterManifest(*AssumeCB); 2641 }; 2642 2643 for (auto *CB : AlignedBarriers) 2644 HandleAlignedBarrier(CB); 2645 2646 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2647 // Handle the "kernel end barrier" for kernels too. 2648 if (OMPInfoCache.Kernels.count(getAnchorScope())) 2649 HandleAlignedBarrier(nullptr); 2650 2651 return Changed; 2652 } 2653 2654 /// Merge barrier and assumption information from \p PredED into the successor 2655 /// \p ED. 2656 void 2657 mergeInPredecessorBarriersAndAssumptions(Attributor &A, ExecutionDomainTy &ED, 2658 const ExecutionDomainTy &PredED); 2659 2660 /// Merge all information from \p PredED into the successor \p ED. If 2661 /// \p InitialEdgeOnly is set, only the initial edge will enter the block 2662 /// represented by \p ED from this predecessor. 2663 void mergeInPredecessor(Attributor &A, ExecutionDomainTy &ED, 2664 const ExecutionDomainTy &PredED, 2665 bool InitialEdgeOnly = false); 2666 2667 /// Accumulate information for the entry block in \p EntryBBED. 2668 void handleEntryBB(Attributor &A, ExecutionDomainTy &EntryBBED); 2669 2670 /// See AbstractAttribute::updateImpl. 2671 ChangeStatus updateImpl(Attributor &A) override; 2672 2673 /// Query interface, see AAExecutionDomain 2674 ///{ 2675 bool isExecutedByInitialThreadOnly(const BasicBlock &BB) const override { 2676 if (!isValidState()) 2677 return false; 2678 return BEDMap.lookup(&BB).IsExecutedByInitialThreadOnly; 2679 } 2680 2681 bool isExecutedInAlignedRegion(Attributor &A, 2682 const Instruction &I) const override { 2683 assert(I.getFunction() == getAnchorScope() && 2684 "Instruction is out of scope!"); 2685 if (!isValidState()) 2686 return false; 2687 2688 const Instruction *CurI; 2689 2690 // Check forward until a call or the block end is reached. 2691 CurI = &I; 2692 do { 2693 auto *CB = dyn_cast<CallBase>(CurI); 2694 if (!CB) 2695 continue; 2696 if (CB != &I && AlignedBarriers.contains(const_cast<CallBase *>(CB))) { 2697 break; 2698 } 2699 const auto &It = CEDMap.find(CB); 2700 if (It == CEDMap.end()) 2701 continue; 2702 if (!It->getSecond().IsReachingAlignedBarrierOnly) 2703 return false; 2704 break; 2705 } while ((CurI = CurI->getNextNonDebugInstruction())); 2706 2707 if (!CurI && !BEDMap.lookup(I.getParent()).IsReachingAlignedBarrierOnly) 2708 return false; 2709 2710 // Check backward until a call or the block beginning is reached. 2711 CurI = &I; 2712 do { 2713 auto *CB = dyn_cast<CallBase>(CurI); 2714 if (!CB) 2715 continue; 2716 if (CB != &I && AlignedBarriers.contains(const_cast<CallBase *>(CB))) { 2717 break; 2718 } 2719 const auto &It = CEDMap.find(CB); 2720 if (It == CEDMap.end()) 2721 continue; 2722 if (!AA::isNoSyncInst(A, *CB, *this)) { 2723 if (It->getSecond().IsReachedFromAlignedBarrierOnly) { 2724 break; 2725 } 2726 return false; 2727 } 2728 2729 Function *Callee = CB->getCalledFunction(); 2730 if (!Callee || Callee->isDeclaration()) 2731 return false; 2732 const auto &EDAA = A.getAAFor<AAExecutionDomain>( 2733 *this, IRPosition::function(*Callee), DepClassTy::OPTIONAL); 2734 if (!EDAA.getState().isValidState()) 2735 return false; 2736 if (!EDAA.getFunctionExecutionDomain().IsReachedFromAlignedBarrierOnly) 2737 return false; 2738 break; 2739 } while ((CurI = CurI->getPrevNonDebugInstruction())); 2740 2741 if (!CurI && 2742 !llvm::all_of( 2743 predecessors(I.getParent()), [&](const BasicBlock *PredBB) { 2744 return BEDMap.lookup(PredBB).IsReachedFromAlignedBarrierOnly; 2745 })) { 2746 return false; 2747 } 2748 2749 // On neither traversal we found a anything but aligned barriers. 2750 return true; 2751 } 2752 2753 ExecutionDomainTy getExecutionDomain(const BasicBlock &BB) const override { 2754 assert(isValidState() && 2755 "No request should be made against an invalid state!"); 2756 return BEDMap.lookup(&BB); 2757 } 2758 ExecutionDomainTy getExecutionDomain(const CallBase &CB) const override { 2759 assert(isValidState() && 2760 "No request should be made against an invalid state!"); 2761 return CEDMap.lookup(&CB); 2762 } 2763 ExecutionDomainTy getFunctionExecutionDomain() const override { 2764 assert(isValidState() && 2765 "No request should be made against an invalid state!"); 2766 return BEDMap.lookup(nullptr); 2767 } 2768 ///} 2769 2770 // Check if the edge into the successor block contains a condition that only 2771 // lets the main thread execute it. 2772 static bool isInitialThreadOnlyEdge(Attributor &A, BranchInst *Edge, 2773 BasicBlock &SuccessorBB) { 2774 if (!Edge || !Edge->isConditional()) 2775 return false; 2776 if (Edge->getSuccessor(0) != &SuccessorBB) 2777 return false; 2778 2779 auto *Cmp = dyn_cast<CmpInst>(Edge->getCondition()); 2780 if (!Cmp || !Cmp->isTrueWhenEqual() || !Cmp->isEquality()) 2781 return false; 2782 2783 ConstantInt *C = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 2784 if (!C) 2785 return false; 2786 2787 // Match: -1 == __kmpc_target_init (for non-SPMD kernels only!) 2788 if (C->isAllOnesValue()) { 2789 auto *CB = dyn_cast<CallBase>(Cmp->getOperand(0)); 2790 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2791 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 2792 CB = CB ? OpenMPOpt::getCallIfRegularCall(*CB, &RFI) : nullptr; 2793 if (!CB) 2794 return false; 2795 const int InitModeArgNo = 1; 2796 auto *ModeCI = dyn_cast<ConstantInt>(CB->getOperand(InitModeArgNo)); 2797 return ModeCI && (ModeCI->getSExtValue() & OMP_TGT_EXEC_MODE_GENERIC); 2798 } 2799 2800 if (C->isZero()) { 2801 // Match: 0 == llvm.nvvm.read.ptx.sreg.tid.x() 2802 if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0))) 2803 if (II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_tid_x) 2804 return true; 2805 2806 // Match: 0 == llvm.amdgcn.workitem.id.x() 2807 if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0))) 2808 if (II->getIntrinsicID() == Intrinsic::amdgcn_workitem_id_x) 2809 return true; 2810 } 2811 2812 return false; 2813 }; 2814 2815 /// Mapping containing information per block. 2816 DenseMap<const BasicBlock *, ExecutionDomainTy> BEDMap; 2817 DenseMap<const CallBase *, ExecutionDomainTy> CEDMap; 2818 SmallSetVector<CallBase *, 16> AlignedBarriers; 2819 2820 ReversePostOrderTraversal<Function *> *RPOT = nullptr; 2821 }; 2822 2823 void AAExecutionDomainFunction::mergeInPredecessorBarriersAndAssumptions( 2824 Attributor &A, ExecutionDomainTy &ED, const ExecutionDomainTy &PredED) { 2825 for (auto *EA : PredED.EncounteredAssumes) 2826 ED.addAssumeInst(A, *EA); 2827 2828 for (auto *AB : PredED.AlignedBarriers) 2829 ED.addAlignedBarrier(A, *AB); 2830 } 2831 2832 void AAExecutionDomainFunction::mergeInPredecessor( 2833 Attributor &A, ExecutionDomainTy &ED, const ExecutionDomainTy &PredED, 2834 bool InitialEdgeOnly) { 2835 ED.IsExecutedByInitialThreadOnly = 2836 InitialEdgeOnly || (PredED.IsExecutedByInitialThreadOnly && 2837 ED.IsExecutedByInitialThreadOnly); 2838 2839 ED.IsReachedFromAlignedBarrierOnly = ED.IsReachedFromAlignedBarrierOnly && 2840 PredED.IsReachedFromAlignedBarrierOnly; 2841 ED.EncounteredNonLocalSideEffect = 2842 ED.EncounteredNonLocalSideEffect | PredED.EncounteredNonLocalSideEffect; 2843 if (ED.IsReachedFromAlignedBarrierOnly) 2844 mergeInPredecessorBarriersAndAssumptions(A, ED, PredED); 2845 else 2846 ED.clearAssumeInstAndAlignedBarriers(); 2847 } 2848 2849 void AAExecutionDomainFunction::handleEntryBB(Attributor &A, 2850 ExecutionDomainTy &EntryBBED) { 2851 SmallVector<ExecutionDomainTy> PredExecDomains; 2852 auto PredForCallSite = [&](AbstractCallSite ACS) { 2853 const auto &EDAA = A.getAAFor<AAExecutionDomain>( 2854 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2855 DepClassTy::OPTIONAL); 2856 if (!EDAA.getState().isValidState()) 2857 return false; 2858 PredExecDomains.emplace_back( 2859 EDAA.getExecutionDomain(*cast<CallBase>(ACS.getInstruction()))); 2860 return true; 2861 }; 2862 2863 bool AllCallSitesKnown; 2864 if (A.checkForAllCallSites(PredForCallSite, *this, 2865 /* RequiresAllCallSites */ true, 2866 AllCallSitesKnown)) { 2867 for (const auto &PredED : PredExecDomains) 2868 mergeInPredecessor(A, EntryBBED, PredED); 2869 2870 } else { 2871 // We could not find all predecessors, so this is either a kernel or a 2872 // function with external linkage (or with some other weird uses). 2873 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2874 if (OMPInfoCache.Kernels.count(getAnchorScope())) { 2875 EntryBBED.IsExecutedByInitialThreadOnly = false; 2876 EntryBBED.IsReachedFromAlignedBarrierOnly = true; 2877 EntryBBED.EncounteredNonLocalSideEffect = false; 2878 } else { 2879 EntryBBED.IsExecutedByInitialThreadOnly = false; 2880 EntryBBED.IsReachedFromAlignedBarrierOnly = false; 2881 EntryBBED.EncounteredNonLocalSideEffect = true; 2882 } 2883 } 2884 2885 auto &FnED = BEDMap[nullptr]; 2886 FnED.IsReachingAlignedBarrierOnly &= 2887 EntryBBED.IsReachedFromAlignedBarrierOnly; 2888 } 2889 2890 ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) { 2891 2892 bool Changed = false; 2893 2894 // Helper to deal with an aligned barrier encountered during the forward 2895 // traversal. \p CB is the aligned barrier, \p ED is the execution domain when 2896 // it was encountered. 2897 auto HandleAlignedBarrier = [&](CallBase *CB, ExecutionDomainTy &ED) { 2898 if (CB) 2899 Changed |= AlignedBarriers.insert(CB); 2900 // First, update the barrier ED kept in the separate CEDMap. 2901 auto &CallED = CEDMap[CB]; 2902 mergeInPredecessor(A, CallED, ED); 2903 // Next adjust the ED we use for the traversal. 2904 ED.EncounteredNonLocalSideEffect = false; 2905 ED.IsReachedFromAlignedBarrierOnly = true; 2906 // Aligned barrier collection has to come last. 2907 ED.clearAssumeInstAndAlignedBarriers(); 2908 if (CB) 2909 ED.addAlignedBarrier(A, *CB); 2910 }; 2911 2912 auto &LivenessAA = 2913 A.getAAFor<AAIsDead>(*this, getIRPosition(), DepClassTy::OPTIONAL); 2914 2915 // Set \p R to \V and report true if that changed \p R. 2916 auto SetAndRecord = [&](bool &R, bool V) { 2917 bool Eq = (R == V); 2918 R = V; 2919 return !Eq; 2920 }; 2921 2922 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2923 2924 Function *F = getAnchorScope(); 2925 BasicBlock &EntryBB = F->getEntryBlock(); 2926 bool IsKernel = OMPInfoCache.Kernels.count(F); 2927 2928 SmallVector<Instruction *> SyncInstWorklist; 2929 for (auto &RIt : *RPOT) { 2930 BasicBlock &BB = *RIt; 2931 2932 bool IsEntryBB = &BB == &EntryBB; 2933 // TODO: We use local reasoning since we don't have a divergence analysis 2934 // running as well. We could basically allow uniform branches here. 2935 bool AlignedBarrierLastInBlock = IsEntryBB && IsKernel; 2936 ExecutionDomainTy ED; 2937 // Propagate "incoming edges" into information about this block. 2938 if (IsEntryBB) { 2939 handleEntryBB(A, ED); 2940 } else { 2941 // For live non-entry blocks we only propagate 2942 // information via live edges. 2943 if (LivenessAA.isAssumedDead(&BB)) 2944 continue; 2945 2946 for (auto *PredBB : predecessors(&BB)) { 2947 if (LivenessAA.isEdgeDead(PredBB, &BB)) 2948 continue; 2949 bool InitialEdgeOnly = isInitialThreadOnlyEdge( 2950 A, dyn_cast<BranchInst>(PredBB->getTerminator()), BB); 2951 mergeInPredecessor(A, ED, BEDMap[PredBB], InitialEdgeOnly); 2952 } 2953 } 2954 2955 // Now we traverse the block, accumulate effects in ED and attach 2956 // information to calls. 2957 for (Instruction &I : BB) { 2958 bool UsedAssumedInformation; 2959 if (A.isAssumedDead(I, *this, &LivenessAA, UsedAssumedInformation, 2960 /* CheckBBLivenessOnly */ false, DepClassTy::OPTIONAL, 2961 /* CheckForDeadStore */ true)) 2962 continue; 2963 2964 // Asummes and "assume-like" (dbg, lifetime, ...) are handled first, the 2965 // former is collected the latter is ignored. 2966 if (auto *II = dyn_cast<IntrinsicInst>(&I)) { 2967 if (auto *AI = dyn_cast_or_null<AssumeInst>(II)) { 2968 ED.addAssumeInst(A, *AI); 2969 continue; 2970 } 2971 // TODO: Should we also collect and delete lifetime markers? 2972 if (II->isAssumeLikeIntrinsic()) 2973 continue; 2974 } 2975 2976 auto *CB = dyn_cast<CallBase>(&I); 2977 bool IsNoSync = AA::isNoSyncInst(A, I, *this); 2978 bool IsAlignedBarrier = 2979 !IsNoSync && CB && 2980 AANoSync::isAlignedBarrier(*CB, AlignedBarrierLastInBlock); 2981 2982 AlignedBarrierLastInBlock &= IsNoSync; 2983 2984 // Next we check for calls. Aligned barriers are handled 2985 // explicitly, everything else is kept for the backward traversal and will 2986 // also affect our state. 2987 if (CB) { 2988 if (IsAlignedBarrier) { 2989 HandleAlignedBarrier(CB, ED); 2990 AlignedBarrierLastInBlock = true; 2991 continue; 2992 } 2993 2994 // Check the pointer(s) of a memory intrinsic explicitly. 2995 if (isa<MemIntrinsic>(&I)) { 2996 if (!ED.EncounteredNonLocalSideEffect && 2997 AA::isPotentiallyAffectedByBarrier(A, I, *this)) 2998 ED.EncounteredNonLocalSideEffect = true; 2999 if (!IsNoSync) { 3000 ED.IsReachedFromAlignedBarrierOnly = false; 3001 SyncInstWorklist.push_back(&I); 3002 } 3003 continue; 3004 } 3005 3006 // Record how we entered the call, then accumulate the effect of the 3007 // call in ED for potential use by the callee. 3008 auto &CallED = CEDMap[CB]; 3009 mergeInPredecessor(A, CallED, ED); 3010 3011 // If we have a sync-definition we can check if it starts/ends in an 3012 // aligned barrier. If we are unsure we assume any sync breaks 3013 // alignment. 3014 Function *Callee = CB->getCalledFunction(); 3015 if (!IsNoSync && Callee && !Callee->isDeclaration()) { 3016 const auto &EDAA = A.getAAFor<AAExecutionDomain>( 3017 *this, IRPosition::function(*Callee), DepClassTy::OPTIONAL); 3018 if (EDAA.getState().isValidState()) { 3019 const auto &CalleeED = EDAA.getFunctionExecutionDomain(); 3020 ED.IsReachedFromAlignedBarrierOnly = 3021 CallED.IsReachedFromAlignedBarrierOnly = 3022 CalleeED.IsReachedFromAlignedBarrierOnly; 3023 AlignedBarrierLastInBlock = ED.IsReachedFromAlignedBarrierOnly; 3024 if (IsNoSync || !CalleeED.IsReachedFromAlignedBarrierOnly) 3025 ED.EncounteredNonLocalSideEffect |= 3026 CalleeED.EncounteredNonLocalSideEffect; 3027 else 3028 ED.EncounteredNonLocalSideEffect = 3029 CalleeED.EncounteredNonLocalSideEffect; 3030 if (!CalleeED.IsReachingAlignedBarrierOnly) 3031 SyncInstWorklist.push_back(&I); 3032 if (CalleeED.IsReachedFromAlignedBarrierOnly) 3033 mergeInPredecessorBarriersAndAssumptions(A, ED, CalleeED); 3034 continue; 3035 } 3036 } 3037 if (!IsNoSync) 3038 ED.IsReachedFromAlignedBarrierOnly = 3039 CallED.IsReachedFromAlignedBarrierOnly = false; 3040 AlignedBarrierLastInBlock &= ED.IsReachedFromAlignedBarrierOnly; 3041 ED.EncounteredNonLocalSideEffect |= !CB->doesNotAccessMemory(); 3042 if (!IsNoSync) 3043 SyncInstWorklist.push_back(&I); 3044 } 3045 3046 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory()) 3047 continue; 3048 3049 // If we have a callee we try to use fine-grained information to 3050 // determine local side-effects. 3051 if (CB) { 3052 const auto &MemAA = A.getAAFor<AAMemoryLocation>( 3053 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 3054 3055 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 3056 AAMemoryLocation::AccessKind, 3057 AAMemoryLocation::MemoryLocationsKind) { 3058 return !AA::isPotentiallyAffectedByBarrier(A, {Ptr}, *this, I); 3059 }; 3060 if (MemAA.getState().isValidState() && 3061 MemAA.checkForAllAccessesToMemoryKind( 3062 AccessPred, AAMemoryLocation::ALL_LOCATIONS)) 3063 continue; 3064 } 3065 3066 if (!I.mayHaveSideEffects() && OMPInfoCache.isOnlyUsedByAssume(I)) 3067 continue; 3068 3069 if (auto *LI = dyn_cast<LoadInst>(&I)) 3070 if (LI->hasMetadata(LLVMContext::MD_invariant_load)) 3071 continue; 3072 3073 if (!ED.EncounteredNonLocalSideEffect && 3074 AA::isPotentiallyAffectedByBarrier(A, I, *this)) 3075 ED.EncounteredNonLocalSideEffect = true; 3076 } 3077 3078 if (!isa<UnreachableInst>(BB.getTerminator()) && 3079 !BB.getTerminator()->getNumSuccessors()) { 3080 3081 auto &FnED = BEDMap[nullptr]; 3082 mergeInPredecessor(A, FnED, ED); 3083 3084 if (IsKernel) 3085 HandleAlignedBarrier(nullptr, ED); 3086 } 3087 3088 ExecutionDomainTy &StoredED = BEDMap[&BB]; 3089 ED.IsReachingAlignedBarrierOnly = StoredED.IsReachingAlignedBarrierOnly; 3090 3091 // Check if we computed anything different as part of the forward 3092 // traversal. We do not take assumptions and aligned barriers into account 3093 // as they do not influence the state we iterate. Backward traversal values 3094 // are handled later on. 3095 if (ED.IsExecutedByInitialThreadOnly != 3096 StoredED.IsExecutedByInitialThreadOnly || 3097 ED.IsReachedFromAlignedBarrierOnly != 3098 StoredED.IsReachedFromAlignedBarrierOnly || 3099 ED.EncounteredNonLocalSideEffect != 3100 StoredED.EncounteredNonLocalSideEffect) 3101 Changed = true; 3102 3103 // Update the state with the new value. 3104 StoredED = std::move(ED); 3105 } 3106 3107 // Propagate (non-aligned) sync instruction effects backwards until the 3108 // entry is hit or an aligned barrier. 3109 SmallSetVector<BasicBlock *, 16> Visited; 3110 while (!SyncInstWorklist.empty()) { 3111 Instruction *SyncInst = SyncInstWorklist.pop_back_val(); 3112 Instruction *CurInst = SyncInst; 3113 bool HitAlignedBarrier = false; 3114 while ((CurInst = CurInst->getPrevNode())) { 3115 auto *CB = dyn_cast<CallBase>(CurInst); 3116 if (!CB) 3117 continue; 3118 auto &CallED = CEDMap[CB]; 3119 if (SetAndRecord(CallED.IsReachingAlignedBarrierOnly, false)) 3120 Changed = true; 3121 HitAlignedBarrier = AlignedBarriers.count(CB); 3122 if (HitAlignedBarrier) 3123 break; 3124 } 3125 if (HitAlignedBarrier) 3126 continue; 3127 BasicBlock *SyncBB = SyncInst->getParent(); 3128 for (auto *PredBB : predecessors(SyncBB)) { 3129 if (LivenessAA.isEdgeDead(PredBB, SyncBB)) 3130 continue; 3131 if (!Visited.insert(PredBB)) 3132 continue; 3133 SyncInstWorklist.push_back(PredBB->getTerminator()); 3134 auto &PredED = BEDMap[PredBB]; 3135 if (SetAndRecord(PredED.IsReachingAlignedBarrierOnly, false)) 3136 Changed = true; 3137 } 3138 if (SyncBB != &EntryBB) 3139 continue; 3140 auto &FnED = BEDMap[nullptr]; 3141 if (SetAndRecord(FnED.IsReachingAlignedBarrierOnly, false)) 3142 Changed = true; 3143 } 3144 3145 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3146 } 3147 3148 /// Try to replace memory allocation calls called by a single thread with a 3149 /// static buffer of shared memory. 3150 struct AAHeapToShared : public StateWrapper<BooleanState, AbstractAttribute> { 3151 using Base = StateWrapper<BooleanState, AbstractAttribute>; 3152 AAHeapToShared(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 3153 3154 /// Create an abstract attribute view for the position \p IRP. 3155 static AAHeapToShared &createForPosition(const IRPosition &IRP, 3156 Attributor &A); 3157 3158 /// Returns true if HeapToShared conversion is assumed to be possible. 3159 virtual bool isAssumedHeapToShared(CallBase &CB) const = 0; 3160 3161 /// Returns true if HeapToShared conversion is assumed and the CB is a 3162 /// callsite to a free operation to be removed. 3163 virtual bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const = 0; 3164 3165 /// See AbstractAttribute::getName(). 3166 const std::string getName() const override { return "AAHeapToShared"; } 3167 3168 /// See AbstractAttribute::getIdAddr(). 3169 const char *getIdAddr() const override { return &ID; } 3170 3171 /// This function should return true if the type of the \p AA is 3172 /// AAHeapToShared. 3173 static bool classof(const AbstractAttribute *AA) { 3174 return (AA->getIdAddr() == &ID); 3175 } 3176 3177 /// Unique ID (due to the unique address) 3178 static const char ID; 3179 }; 3180 3181 struct AAHeapToSharedFunction : public AAHeapToShared { 3182 AAHeapToSharedFunction(const IRPosition &IRP, Attributor &A) 3183 : AAHeapToShared(IRP, A) {} 3184 3185 const std::string getAsStr() const override { 3186 return "[AAHeapToShared] " + std::to_string(MallocCalls.size()) + 3187 " malloc calls eligible."; 3188 } 3189 3190 /// See AbstractAttribute::trackStatistics(). 3191 void trackStatistics() const override {} 3192 3193 /// This functions finds free calls that will be removed by the 3194 /// HeapToShared transformation. 3195 void findPotentialRemovedFreeCalls(Attributor &A) { 3196 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3197 auto &FreeRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared]; 3198 3199 PotentialRemovedFreeCalls.clear(); 3200 // Update free call users of found malloc calls. 3201 for (CallBase *CB : MallocCalls) { 3202 SmallVector<CallBase *, 4> FreeCalls; 3203 for (auto *U : CB->users()) { 3204 CallBase *C = dyn_cast<CallBase>(U); 3205 if (C && C->getCalledFunction() == FreeRFI.Declaration) 3206 FreeCalls.push_back(C); 3207 } 3208 3209 if (FreeCalls.size() != 1) 3210 continue; 3211 3212 PotentialRemovedFreeCalls.insert(FreeCalls.front()); 3213 } 3214 } 3215 3216 void initialize(Attributor &A) override { 3217 if (DisableOpenMPOptDeglobalization) { 3218 indicatePessimisticFixpoint(); 3219 return; 3220 } 3221 3222 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3223 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 3224 if (!RFI.Declaration) 3225 return; 3226 3227 Attributor::SimplifictionCallbackTy SCB = 3228 [](const IRPosition &, const AbstractAttribute *, 3229 bool &) -> std::optional<Value *> { return nullptr; }; 3230 3231 Function *F = getAnchorScope(); 3232 for (User *U : RFI.Declaration->users()) 3233 if (CallBase *CB = dyn_cast<CallBase>(U)) { 3234 if (CB->getFunction() != F) 3235 continue; 3236 MallocCalls.insert(CB); 3237 A.registerSimplificationCallback(IRPosition::callsite_returned(*CB), 3238 SCB); 3239 } 3240 3241 findPotentialRemovedFreeCalls(A); 3242 } 3243 3244 bool isAssumedHeapToShared(CallBase &CB) const override { 3245 return isValidState() && MallocCalls.count(&CB); 3246 } 3247 3248 bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const override { 3249 return isValidState() && PotentialRemovedFreeCalls.count(&CB); 3250 } 3251 3252 ChangeStatus manifest(Attributor &A) override { 3253 if (MallocCalls.empty()) 3254 return ChangeStatus::UNCHANGED; 3255 3256 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3257 auto &FreeCall = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared]; 3258 3259 Function *F = getAnchorScope(); 3260 auto *HS = A.lookupAAFor<AAHeapToStack>(IRPosition::function(*F), this, 3261 DepClassTy::OPTIONAL); 3262 3263 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3264 for (CallBase *CB : MallocCalls) { 3265 // Skip replacing this if HeapToStack has already claimed it. 3266 if (HS && HS->isAssumedHeapToStack(*CB)) 3267 continue; 3268 3269 // Find the unique free call to remove it. 3270 SmallVector<CallBase *, 4> FreeCalls; 3271 for (auto *U : CB->users()) { 3272 CallBase *C = dyn_cast<CallBase>(U); 3273 if (C && C->getCalledFunction() == FreeCall.Declaration) 3274 FreeCalls.push_back(C); 3275 } 3276 if (FreeCalls.size() != 1) 3277 continue; 3278 3279 auto *AllocSize = cast<ConstantInt>(CB->getArgOperand(0)); 3280 3281 if (AllocSize->getZExtValue() + SharedMemoryUsed > SharedMemoryLimit) { 3282 LLVM_DEBUG(dbgs() << TAG << "Cannot replace call " << *CB 3283 << " with shared memory." 3284 << " Shared memory usage is limited to " 3285 << SharedMemoryLimit << " bytes\n"); 3286 continue; 3287 } 3288 3289 LLVM_DEBUG(dbgs() << TAG << "Replace globalization call " << *CB 3290 << " with " << AllocSize->getZExtValue() 3291 << " bytes of shared memory\n"); 3292 3293 // Create a new shared memory buffer of the same size as the allocation 3294 // and replace all the uses of the original allocation with it. 3295 Module *M = CB->getModule(); 3296 Type *Int8Ty = Type::getInt8Ty(M->getContext()); 3297 Type *Int8ArrTy = ArrayType::get(Int8Ty, AllocSize->getZExtValue()); 3298 auto *SharedMem = new GlobalVariable( 3299 *M, Int8ArrTy, /* IsConstant */ false, GlobalValue::InternalLinkage, 3300 UndefValue::get(Int8ArrTy), CB->getName() + "_shared", nullptr, 3301 GlobalValue::NotThreadLocal, 3302 static_cast<unsigned>(AddressSpace::Shared)); 3303 auto *NewBuffer = 3304 ConstantExpr::getPointerCast(SharedMem, Int8Ty->getPointerTo()); 3305 3306 auto Remark = [&](OptimizationRemark OR) { 3307 return OR << "Replaced globalized variable with " 3308 << ore::NV("SharedMemory", AllocSize->getZExtValue()) 3309 << ((AllocSize->getZExtValue() != 1) ? " bytes " : " byte ") 3310 << "of shared memory."; 3311 }; 3312 A.emitRemark<OptimizationRemark>(CB, "OMP111", Remark); 3313 3314 MaybeAlign Alignment = CB->getRetAlign(); 3315 assert(Alignment && 3316 "HeapToShared on allocation without alignment attribute"); 3317 SharedMem->setAlignment(MaybeAlign(Alignment)); 3318 3319 A.changeAfterManifest(IRPosition::callsite_returned(*CB), *NewBuffer); 3320 A.deleteAfterManifest(*CB); 3321 A.deleteAfterManifest(*FreeCalls.front()); 3322 3323 SharedMemoryUsed += AllocSize->getZExtValue(); 3324 NumBytesMovedToSharedMemory = SharedMemoryUsed; 3325 Changed = ChangeStatus::CHANGED; 3326 } 3327 3328 return Changed; 3329 } 3330 3331 ChangeStatus updateImpl(Attributor &A) override { 3332 if (MallocCalls.empty()) 3333 return indicatePessimisticFixpoint(); 3334 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3335 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 3336 if (!RFI.Declaration) 3337 return ChangeStatus::UNCHANGED; 3338 3339 Function *F = getAnchorScope(); 3340 3341 auto NumMallocCalls = MallocCalls.size(); 3342 3343 // Only consider malloc calls executed by a single thread with a constant. 3344 for (User *U : RFI.Declaration->users()) { 3345 if (CallBase *CB = dyn_cast<CallBase>(U)) { 3346 if (CB->getCaller() != F) 3347 continue; 3348 if (!MallocCalls.count(CB)) 3349 continue; 3350 if (!isa<ConstantInt>(CB->getArgOperand(0))) { 3351 MallocCalls.remove(CB); 3352 continue; 3353 } 3354 const auto &ED = A.getAAFor<AAExecutionDomain>( 3355 *this, IRPosition::function(*F), DepClassTy::REQUIRED); 3356 if (!ED.isExecutedByInitialThreadOnly(*CB)) 3357 MallocCalls.remove(CB); 3358 } 3359 } 3360 3361 findPotentialRemovedFreeCalls(A); 3362 3363 if (NumMallocCalls != MallocCalls.size()) 3364 return ChangeStatus::CHANGED; 3365 3366 return ChangeStatus::UNCHANGED; 3367 } 3368 3369 /// Collection of all malloc calls in a function. 3370 SmallSetVector<CallBase *, 4> MallocCalls; 3371 /// Collection of potentially removed free calls in a function. 3372 SmallPtrSet<CallBase *, 4> PotentialRemovedFreeCalls; 3373 /// The total amount of shared memory that has been used for HeapToShared. 3374 unsigned SharedMemoryUsed = 0; 3375 }; 3376 3377 struct AAKernelInfo : public StateWrapper<KernelInfoState, AbstractAttribute> { 3378 using Base = StateWrapper<KernelInfoState, AbstractAttribute>; 3379 AAKernelInfo(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 3380 3381 /// Statistics are tracked as part of manifest for now. 3382 void trackStatistics() const override {} 3383 3384 /// See AbstractAttribute::getAsStr() 3385 const std::string getAsStr() const override { 3386 if (!isValidState()) 3387 return "<invalid>"; 3388 return std::string(SPMDCompatibilityTracker.isAssumed() ? "SPMD" 3389 : "generic") + 3390 std::string(SPMDCompatibilityTracker.isAtFixpoint() ? " [FIX]" 3391 : "") + 3392 std::string(" #PRs: ") + 3393 (ReachedKnownParallelRegions.isValidState() 3394 ? std::to_string(ReachedKnownParallelRegions.size()) 3395 : "<invalid>") + 3396 ", #Unknown PRs: " + 3397 (ReachedUnknownParallelRegions.isValidState() 3398 ? std::to_string(ReachedUnknownParallelRegions.size()) 3399 : "<invalid>") + 3400 ", #Reaching Kernels: " + 3401 (ReachingKernelEntries.isValidState() 3402 ? std::to_string(ReachingKernelEntries.size()) 3403 : "<invalid>") + 3404 ", #ParLevels: " + 3405 (ParallelLevels.isValidState() 3406 ? std::to_string(ParallelLevels.size()) 3407 : "<invalid>"); 3408 } 3409 3410 /// Create an abstract attribute biew for the position \p IRP. 3411 static AAKernelInfo &createForPosition(const IRPosition &IRP, Attributor &A); 3412 3413 /// See AbstractAttribute::getName() 3414 const std::string getName() const override { return "AAKernelInfo"; } 3415 3416 /// See AbstractAttribute::getIdAddr() 3417 const char *getIdAddr() const override { return &ID; } 3418 3419 /// This function should return true if the type of the \p AA is AAKernelInfo 3420 static bool classof(const AbstractAttribute *AA) { 3421 return (AA->getIdAddr() == &ID); 3422 } 3423 3424 static const char ID; 3425 }; 3426 3427 /// The function kernel info abstract attribute, basically, what can we say 3428 /// about a function with regards to the KernelInfoState. 3429 struct AAKernelInfoFunction : AAKernelInfo { 3430 AAKernelInfoFunction(const IRPosition &IRP, Attributor &A) 3431 : AAKernelInfo(IRP, A) {} 3432 3433 SmallPtrSet<Instruction *, 4> GuardedInstructions; 3434 3435 SmallPtrSetImpl<Instruction *> &getGuardedInstructions() { 3436 return GuardedInstructions; 3437 } 3438 3439 /// See AbstractAttribute::initialize(...). 3440 void initialize(Attributor &A) override { 3441 // This is a high-level transform that might change the constant arguments 3442 // of the init and dinit calls. We need to tell the Attributor about this 3443 // to avoid other parts using the current constant value for simpliication. 3444 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3445 3446 Function *Fn = getAnchorScope(); 3447 3448 OMPInformationCache::RuntimeFunctionInfo &InitRFI = 3449 OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 3450 OMPInformationCache::RuntimeFunctionInfo &DeinitRFI = 3451 OMPInfoCache.RFIs[OMPRTL___kmpc_target_deinit]; 3452 3453 // For kernels we perform more initialization work, first we find the init 3454 // and deinit calls. 3455 auto StoreCallBase = [](Use &U, 3456 OMPInformationCache::RuntimeFunctionInfo &RFI, 3457 CallBase *&Storage) { 3458 CallBase *CB = OpenMPOpt::getCallIfRegularCall(U, &RFI); 3459 assert(CB && 3460 "Unexpected use of __kmpc_target_init or __kmpc_target_deinit!"); 3461 assert(!Storage && 3462 "Multiple uses of __kmpc_target_init or __kmpc_target_deinit!"); 3463 Storage = CB; 3464 return false; 3465 }; 3466 InitRFI.foreachUse( 3467 [&](Use &U, Function &) { 3468 StoreCallBase(U, InitRFI, KernelInitCB); 3469 return false; 3470 }, 3471 Fn); 3472 DeinitRFI.foreachUse( 3473 [&](Use &U, Function &) { 3474 StoreCallBase(U, DeinitRFI, KernelDeinitCB); 3475 return false; 3476 }, 3477 Fn); 3478 3479 // Ignore kernels without initializers such as global constructors. 3480 if (!KernelInitCB || !KernelDeinitCB) 3481 return; 3482 3483 // Add itself to the reaching kernel and set IsKernelEntry. 3484 ReachingKernelEntries.insert(Fn); 3485 IsKernelEntry = true; 3486 3487 // For kernels we might need to initialize/finalize the IsSPMD state and 3488 // we need to register a simplification callback so that the Attributor 3489 // knows the constant arguments to __kmpc_target_init and 3490 // __kmpc_target_deinit might actually change. 3491 3492 Attributor::SimplifictionCallbackTy StateMachineSimplifyCB = 3493 [&](const IRPosition &IRP, const AbstractAttribute *AA, 3494 bool &UsedAssumedInformation) -> std::optional<Value *> { 3495 // IRP represents the "use generic state machine" argument of an 3496 // __kmpc_target_init call. We will answer this one with the internal 3497 // state. As long as we are not in an invalid state, we will create a 3498 // custom state machine so the value should be a `i1 false`. If we are 3499 // in an invalid state, we won't change the value that is in the IR. 3500 if (!ReachedKnownParallelRegions.isValidState()) 3501 return nullptr; 3502 // If we have disabled state machine rewrites, don't make a custom one. 3503 if (DisableOpenMPOptStateMachineRewrite) 3504 return nullptr; 3505 if (AA) 3506 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 3507 UsedAssumedInformation = !isAtFixpoint(); 3508 auto *FalseVal = 3509 ConstantInt::getBool(IRP.getAnchorValue().getContext(), false); 3510 return FalseVal; 3511 }; 3512 3513 Attributor::SimplifictionCallbackTy ModeSimplifyCB = 3514 [&](const IRPosition &IRP, const AbstractAttribute *AA, 3515 bool &UsedAssumedInformation) -> std::optional<Value *> { 3516 // IRP represents the "SPMDCompatibilityTracker" argument of an 3517 // __kmpc_target_init or 3518 // __kmpc_target_deinit call. We will answer this one with the internal 3519 // state. 3520 if (!SPMDCompatibilityTracker.isValidState()) 3521 return nullptr; 3522 if (!SPMDCompatibilityTracker.isAtFixpoint()) { 3523 if (AA) 3524 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 3525 UsedAssumedInformation = true; 3526 } else { 3527 UsedAssumedInformation = false; 3528 } 3529 auto *Val = ConstantInt::getSigned( 3530 IntegerType::getInt8Ty(IRP.getAnchorValue().getContext()), 3531 SPMDCompatibilityTracker.isAssumed() ? OMP_TGT_EXEC_MODE_SPMD 3532 : OMP_TGT_EXEC_MODE_GENERIC); 3533 return Val; 3534 }; 3535 3536 constexpr const int InitModeArgNo = 1; 3537 constexpr const int DeinitModeArgNo = 1; 3538 constexpr const int InitUseStateMachineArgNo = 2; 3539 A.registerSimplificationCallback( 3540 IRPosition::callsite_argument(*KernelInitCB, InitUseStateMachineArgNo), 3541 StateMachineSimplifyCB); 3542 A.registerSimplificationCallback( 3543 IRPosition::callsite_argument(*KernelInitCB, InitModeArgNo), 3544 ModeSimplifyCB); 3545 A.registerSimplificationCallback( 3546 IRPosition::callsite_argument(*KernelDeinitCB, DeinitModeArgNo), 3547 ModeSimplifyCB); 3548 3549 // Check if we know we are in SPMD-mode already. 3550 ConstantInt *ModeArg = 3551 dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo)); 3552 if (ModeArg && (ModeArg->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD)) 3553 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 3554 // This is a generic region but SPMDization is disabled so stop tracking. 3555 else if (DisableOpenMPOptSPMDization) 3556 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 3557 3558 // Register virtual uses of functions we might need to preserve. 3559 auto RegisterVirtualUse = [&](RuntimeFunction RFKind, 3560 Attributor::VirtualUseCallbackTy &CB) { 3561 if (!OMPInfoCache.RFIs[RFKind].Declaration) 3562 return; 3563 A.registerVirtualUseCallback(*OMPInfoCache.RFIs[RFKind].Declaration, CB); 3564 }; 3565 3566 // Add a dependence to ensure updates if the state changes. 3567 auto AddDependence = [](Attributor &A, const AAKernelInfo *KI, 3568 const AbstractAttribute *QueryingAA) { 3569 if (QueryingAA) { 3570 A.recordDependence(*KI, *QueryingAA, DepClassTy::OPTIONAL); 3571 } 3572 return true; 3573 }; 3574 3575 Attributor::VirtualUseCallbackTy CustomStateMachineUseCB = 3576 [&](Attributor &A, const AbstractAttribute *QueryingAA) { 3577 // Whenever we create a custom state machine we will insert calls to 3578 // __kmpc_get_hardware_num_threads_in_block, 3579 // __kmpc_get_warp_size, 3580 // __kmpc_barrier_simple_generic, 3581 // __kmpc_kernel_parallel, and 3582 // __kmpc_kernel_end_parallel. 3583 // Not needed if we are on track for SPMDzation. 3584 if (SPMDCompatibilityTracker.isValidState()) 3585 return AddDependence(A, this, QueryingAA); 3586 // Not needed if we can't rewrite due to an invalid state. 3587 if (!ReachedKnownParallelRegions.isValidState()) 3588 return AddDependence(A, this, QueryingAA); 3589 return false; 3590 }; 3591 3592 // Not needed if we are pre-runtime merge. 3593 if (!KernelInitCB->getCalledFunction()->isDeclaration()) { 3594 RegisterVirtualUse(OMPRTL___kmpc_get_hardware_num_threads_in_block, 3595 CustomStateMachineUseCB); 3596 RegisterVirtualUse(OMPRTL___kmpc_get_warp_size, CustomStateMachineUseCB); 3597 RegisterVirtualUse(OMPRTL___kmpc_barrier_simple_generic, 3598 CustomStateMachineUseCB); 3599 RegisterVirtualUse(OMPRTL___kmpc_kernel_parallel, 3600 CustomStateMachineUseCB); 3601 RegisterVirtualUse(OMPRTL___kmpc_kernel_end_parallel, 3602 CustomStateMachineUseCB); 3603 } 3604 3605 // If we do not perform SPMDzation we do not need the virtual uses below. 3606 if (SPMDCompatibilityTracker.isAtFixpoint()) 3607 return; 3608 3609 Attributor::VirtualUseCallbackTy HWThreadIdUseCB = 3610 [&](Attributor &A, const AbstractAttribute *QueryingAA) { 3611 // Whenever we perform SPMDzation we will insert 3612 // __kmpc_get_hardware_thread_id_in_block calls. 3613 if (!SPMDCompatibilityTracker.isValidState()) 3614 return AddDependence(A, this, QueryingAA); 3615 return false; 3616 }; 3617 RegisterVirtualUse(OMPRTL___kmpc_get_hardware_thread_id_in_block, 3618 HWThreadIdUseCB); 3619 3620 Attributor::VirtualUseCallbackTy SPMDBarrierUseCB = 3621 [&](Attributor &A, const AbstractAttribute *QueryingAA) { 3622 // Whenever we perform SPMDzation with guarding we will insert 3623 // __kmpc_simple_barrier_spmd calls. If SPMDzation failed, there is 3624 // nothing to guard, or there are no parallel regions, we don't need 3625 // the calls. 3626 if (!SPMDCompatibilityTracker.isValidState()) 3627 return AddDependence(A, this, QueryingAA); 3628 if (SPMDCompatibilityTracker.empty()) 3629 return AddDependence(A, this, QueryingAA); 3630 if (!mayContainParallelRegion()) 3631 return AddDependence(A, this, QueryingAA); 3632 return false; 3633 }; 3634 RegisterVirtualUse(OMPRTL___kmpc_barrier_simple_spmd, SPMDBarrierUseCB); 3635 } 3636 3637 /// Sanitize the string \p S such that it is a suitable global symbol name. 3638 static std::string sanitizeForGlobalName(std::string S) { 3639 std::replace_if( 3640 S.begin(), S.end(), 3641 [](const char C) { 3642 return !((C >= 'a' && C <= 'z') || (C >= 'A' && C <= 'Z') || 3643 (C >= '0' && C <= '9') || C == '_'); 3644 }, 3645 '.'); 3646 return S; 3647 } 3648 3649 /// Modify the IR based on the KernelInfoState as the fixpoint iteration is 3650 /// finished now. 3651 ChangeStatus manifest(Attributor &A) override { 3652 // If we are not looking at a kernel with __kmpc_target_init and 3653 // __kmpc_target_deinit call we cannot actually manifest the information. 3654 if (!KernelInitCB || !KernelDeinitCB) 3655 return ChangeStatus::UNCHANGED; 3656 3657 /// Insert nested Parallelism global variable 3658 Function *Kernel = getAnchorScope(); 3659 Module &M = *Kernel->getParent(); 3660 Type *Int8Ty = Type::getInt8Ty(M.getContext()); 3661 new GlobalVariable(M, Int8Ty, /* isConstant */ true, 3662 GlobalValue::WeakAnyLinkage, 3663 ConstantInt::get(Int8Ty, NestedParallelism ? 1 : 0), 3664 Kernel->getName() + "_nested_parallelism"); 3665 3666 // If we can we change the execution mode to SPMD-mode otherwise we build a 3667 // custom state machine. 3668 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3669 if (!changeToSPMDMode(A, Changed)) { 3670 if (!KernelInitCB->getCalledFunction()->isDeclaration()) 3671 return buildCustomStateMachine(A); 3672 } 3673 3674 return Changed; 3675 } 3676 3677 void insertInstructionGuardsHelper(Attributor &A) { 3678 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3679 3680 auto CreateGuardedRegion = [&](Instruction *RegionStartI, 3681 Instruction *RegionEndI) { 3682 LoopInfo *LI = nullptr; 3683 DominatorTree *DT = nullptr; 3684 MemorySSAUpdater *MSU = nullptr; 3685 using InsertPointTy = OpenMPIRBuilder::InsertPointTy; 3686 3687 BasicBlock *ParentBB = RegionStartI->getParent(); 3688 Function *Fn = ParentBB->getParent(); 3689 Module &M = *Fn->getParent(); 3690 3691 // Create all the blocks and logic. 3692 // ParentBB: 3693 // goto RegionCheckTidBB 3694 // RegionCheckTidBB: 3695 // Tid = __kmpc_hardware_thread_id() 3696 // if (Tid != 0) 3697 // goto RegionBarrierBB 3698 // RegionStartBB: 3699 // <execute instructions guarded> 3700 // goto RegionEndBB 3701 // RegionEndBB: 3702 // <store escaping values to shared mem> 3703 // goto RegionBarrierBB 3704 // RegionBarrierBB: 3705 // __kmpc_simple_barrier_spmd() 3706 // // second barrier is omitted if lacking escaping values. 3707 // <load escaping values from shared mem> 3708 // __kmpc_simple_barrier_spmd() 3709 // goto RegionExitBB 3710 // RegionExitBB: 3711 // <execute rest of instructions> 3712 3713 BasicBlock *RegionEndBB = SplitBlock(ParentBB, RegionEndI->getNextNode(), 3714 DT, LI, MSU, "region.guarded.end"); 3715 BasicBlock *RegionBarrierBB = 3716 SplitBlock(RegionEndBB, &*RegionEndBB->getFirstInsertionPt(), DT, LI, 3717 MSU, "region.barrier"); 3718 BasicBlock *RegionExitBB = 3719 SplitBlock(RegionBarrierBB, &*RegionBarrierBB->getFirstInsertionPt(), 3720 DT, LI, MSU, "region.exit"); 3721 BasicBlock *RegionStartBB = 3722 SplitBlock(ParentBB, RegionStartI, DT, LI, MSU, "region.guarded"); 3723 3724 assert(ParentBB->getUniqueSuccessor() == RegionStartBB && 3725 "Expected a different CFG"); 3726 3727 BasicBlock *RegionCheckTidBB = SplitBlock( 3728 ParentBB, ParentBB->getTerminator(), DT, LI, MSU, "region.check.tid"); 3729 3730 // Register basic blocks with the Attributor. 3731 A.registerManifestAddedBasicBlock(*RegionEndBB); 3732 A.registerManifestAddedBasicBlock(*RegionBarrierBB); 3733 A.registerManifestAddedBasicBlock(*RegionExitBB); 3734 A.registerManifestAddedBasicBlock(*RegionStartBB); 3735 A.registerManifestAddedBasicBlock(*RegionCheckTidBB); 3736 3737 bool HasBroadcastValues = false; 3738 // Find escaping outputs from the guarded region to outside users and 3739 // broadcast their values to them. 3740 for (Instruction &I : *RegionStartBB) { 3741 SmallPtrSet<Instruction *, 4> OutsideUsers; 3742 for (User *Usr : I.users()) { 3743 Instruction &UsrI = *cast<Instruction>(Usr); 3744 if (UsrI.getParent() != RegionStartBB) 3745 OutsideUsers.insert(&UsrI); 3746 } 3747 3748 if (OutsideUsers.empty()) 3749 continue; 3750 3751 HasBroadcastValues = true; 3752 3753 // Emit a global variable in shared memory to store the broadcasted 3754 // value. 3755 auto *SharedMem = new GlobalVariable( 3756 M, I.getType(), /* IsConstant */ false, 3757 GlobalValue::InternalLinkage, UndefValue::get(I.getType()), 3758 sanitizeForGlobalName( 3759 (I.getName() + ".guarded.output.alloc").str()), 3760 nullptr, GlobalValue::NotThreadLocal, 3761 static_cast<unsigned>(AddressSpace::Shared)); 3762 3763 // Emit a store instruction to update the value. 3764 new StoreInst(&I, SharedMem, RegionEndBB->getTerminator()); 3765 3766 LoadInst *LoadI = new LoadInst(I.getType(), SharedMem, 3767 I.getName() + ".guarded.output.load", 3768 RegionBarrierBB->getTerminator()); 3769 3770 // Emit a load instruction and replace uses of the output value. 3771 for (Instruction *UsrI : OutsideUsers) 3772 UsrI->replaceUsesOfWith(&I, LoadI); 3773 } 3774 3775 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3776 3777 // Go to tid check BB in ParentBB. 3778 const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc(); 3779 ParentBB->getTerminator()->eraseFromParent(); 3780 OpenMPIRBuilder::LocationDescription Loc( 3781 InsertPointTy(ParentBB, ParentBB->end()), DL); 3782 OMPInfoCache.OMPBuilder.updateToLocation(Loc); 3783 uint32_t SrcLocStrSize; 3784 auto *SrcLocStr = 3785 OMPInfoCache.OMPBuilder.getOrCreateSrcLocStr(Loc, SrcLocStrSize); 3786 Value *Ident = 3787 OMPInfoCache.OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize); 3788 BranchInst::Create(RegionCheckTidBB, ParentBB)->setDebugLoc(DL); 3789 3790 // Add check for Tid in RegionCheckTidBB 3791 RegionCheckTidBB->getTerminator()->eraseFromParent(); 3792 OpenMPIRBuilder::LocationDescription LocRegionCheckTid( 3793 InsertPointTy(RegionCheckTidBB, RegionCheckTidBB->end()), DL); 3794 OMPInfoCache.OMPBuilder.updateToLocation(LocRegionCheckTid); 3795 FunctionCallee HardwareTidFn = 3796 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3797 M, OMPRTL___kmpc_get_hardware_thread_id_in_block); 3798 CallInst *Tid = 3799 OMPInfoCache.OMPBuilder.Builder.CreateCall(HardwareTidFn, {}); 3800 Tid->setDebugLoc(DL); 3801 OMPInfoCache.setCallingConvention(HardwareTidFn, Tid); 3802 Value *TidCheck = OMPInfoCache.OMPBuilder.Builder.CreateIsNull(Tid); 3803 OMPInfoCache.OMPBuilder.Builder 3804 .CreateCondBr(TidCheck, RegionStartBB, RegionBarrierBB) 3805 ->setDebugLoc(DL); 3806 3807 // First barrier for synchronization, ensures main thread has updated 3808 // values. 3809 FunctionCallee BarrierFn = 3810 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3811 M, OMPRTL___kmpc_barrier_simple_spmd); 3812 OMPInfoCache.OMPBuilder.updateToLocation(InsertPointTy( 3813 RegionBarrierBB, RegionBarrierBB->getFirstInsertionPt())); 3814 CallInst *Barrier = 3815 OMPInfoCache.OMPBuilder.Builder.CreateCall(BarrierFn, {Ident, Tid}); 3816 Barrier->setDebugLoc(DL); 3817 OMPInfoCache.setCallingConvention(BarrierFn, Barrier); 3818 3819 // Second barrier ensures workers have read broadcast values. 3820 if (HasBroadcastValues) { 3821 CallInst *Barrier = CallInst::Create(BarrierFn, {Ident, Tid}, "", 3822 RegionBarrierBB->getTerminator()); 3823 Barrier->setDebugLoc(DL); 3824 OMPInfoCache.setCallingConvention(BarrierFn, Barrier); 3825 } 3826 }; 3827 3828 auto &AllocSharedRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 3829 SmallPtrSet<BasicBlock *, 8> Visited; 3830 for (Instruction *GuardedI : SPMDCompatibilityTracker) { 3831 BasicBlock *BB = GuardedI->getParent(); 3832 if (!Visited.insert(BB).second) 3833 continue; 3834 3835 SmallVector<std::pair<Instruction *, Instruction *>> Reorders; 3836 Instruction *LastEffect = nullptr; 3837 BasicBlock::reverse_iterator IP = BB->rbegin(), IPEnd = BB->rend(); 3838 while (++IP != IPEnd) { 3839 if (!IP->mayHaveSideEffects() && !IP->mayReadFromMemory()) 3840 continue; 3841 Instruction *I = &*IP; 3842 if (OpenMPOpt::getCallIfRegularCall(*I, &AllocSharedRFI)) 3843 continue; 3844 if (!I->user_empty() || !SPMDCompatibilityTracker.contains(I)) { 3845 LastEffect = nullptr; 3846 continue; 3847 } 3848 if (LastEffect) 3849 Reorders.push_back({I, LastEffect}); 3850 LastEffect = &*IP; 3851 } 3852 for (auto &Reorder : Reorders) 3853 Reorder.first->moveBefore(Reorder.second); 3854 } 3855 3856 SmallVector<std::pair<Instruction *, Instruction *>, 4> GuardedRegions; 3857 3858 for (Instruction *GuardedI : SPMDCompatibilityTracker) { 3859 BasicBlock *BB = GuardedI->getParent(); 3860 auto *CalleeAA = A.lookupAAFor<AAKernelInfo>( 3861 IRPosition::function(*GuardedI->getFunction()), nullptr, 3862 DepClassTy::NONE); 3863 assert(CalleeAA != nullptr && "Expected Callee AAKernelInfo"); 3864 auto &CalleeAAFunction = *cast<AAKernelInfoFunction>(CalleeAA); 3865 // Continue if instruction is already guarded. 3866 if (CalleeAAFunction.getGuardedInstructions().contains(GuardedI)) 3867 continue; 3868 3869 Instruction *GuardedRegionStart = nullptr, *GuardedRegionEnd = nullptr; 3870 for (Instruction &I : *BB) { 3871 // If instruction I needs to be guarded update the guarded region 3872 // bounds. 3873 if (SPMDCompatibilityTracker.contains(&I)) { 3874 CalleeAAFunction.getGuardedInstructions().insert(&I); 3875 if (GuardedRegionStart) 3876 GuardedRegionEnd = &I; 3877 else 3878 GuardedRegionStart = GuardedRegionEnd = &I; 3879 3880 continue; 3881 } 3882 3883 // Instruction I does not need guarding, store 3884 // any region found and reset bounds. 3885 if (GuardedRegionStart) { 3886 GuardedRegions.push_back( 3887 std::make_pair(GuardedRegionStart, GuardedRegionEnd)); 3888 GuardedRegionStart = nullptr; 3889 GuardedRegionEnd = nullptr; 3890 } 3891 } 3892 } 3893 3894 for (auto &GR : GuardedRegions) 3895 CreateGuardedRegion(GR.first, GR.second); 3896 } 3897 3898 void forceSingleThreadPerWorkgroupHelper(Attributor &A) { 3899 // Only allow 1 thread per workgroup to continue executing the user code. 3900 // 3901 // InitCB = __kmpc_target_init(...) 3902 // ThreadIdInBlock = __kmpc_get_hardware_thread_id_in_block(); 3903 // if (ThreadIdInBlock != 0) return; 3904 // UserCode: 3905 // // user code 3906 // 3907 auto &Ctx = getAnchorValue().getContext(); 3908 Function *Kernel = getAssociatedFunction(); 3909 assert(Kernel && "Expected an associated function!"); 3910 3911 // Create block for user code to branch to from initial block. 3912 BasicBlock *InitBB = KernelInitCB->getParent(); 3913 BasicBlock *UserCodeBB = InitBB->splitBasicBlock( 3914 KernelInitCB->getNextNode(), "main.thread.user_code"); 3915 BasicBlock *ReturnBB = 3916 BasicBlock::Create(Ctx, "exit.threads", Kernel, UserCodeBB); 3917 3918 // Register blocks with attributor: 3919 A.registerManifestAddedBasicBlock(*InitBB); 3920 A.registerManifestAddedBasicBlock(*UserCodeBB); 3921 A.registerManifestAddedBasicBlock(*ReturnBB); 3922 3923 // Debug location: 3924 const DebugLoc &DLoc = KernelInitCB->getDebugLoc(); 3925 ReturnInst::Create(Ctx, ReturnBB)->setDebugLoc(DLoc); 3926 InitBB->getTerminator()->eraseFromParent(); 3927 3928 // Prepare call to OMPRTL___kmpc_get_hardware_thread_id_in_block. 3929 Module &M = *Kernel->getParent(); 3930 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3931 FunctionCallee ThreadIdInBlockFn = 3932 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3933 M, OMPRTL___kmpc_get_hardware_thread_id_in_block); 3934 3935 // Get thread ID in block. 3936 CallInst *ThreadIdInBlock = 3937 CallInst::Create(ThreadIdInBlockFn, "thread_id.in.block", InitBB); 3938 OMPInfoCache.setCallingConvention(ThreadIdInBlockFn, ThreadIdInBlock); 3939 ThreadIdInBlock->setDebugLoc(DLoc); 3940 3941 // Eliminate all threads in the block with ID not equal to 0: 3942 Instruction *IsMainThread = 3943 ICmpInst::Create(ICmpInst::ICmp, CmpInst::ICMP_NE, ThreadIdInBlock, 3944 ConstantInt::get(ThreadIdInBlock->getType(), 0), 3945 "thread.is_main", InitBB); 3946 IsMainThread->setDebugLoc(DLoc); 3947 BranchInst::Create(ReturnBB, UserCodeBB, IsMainThread, InitBB); 3948 } 3949 3950 bool changeToSPMDMode(Attributor &A, ChangeStatus &Changed) { 3951 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3952 3953 // We cannot change to SPMD mode if the runtime functions aren't availible. 3954 if (!OMPInfoCache.runtimeFnsAvailable( 3955 {OMPRTL___kmpc_get_hardware_thread_id_in_block, 3956 OMPRTL___kmpc_barrier_simple_spmd})) 3957 return false; 3958 3959 if (!SPMDCompatibilityTracker.isAssumed()) { 3960 for (Instruction *NonCompatibleI : SPMDCompatibilityTracker) { 3961 if (!NonCompatibleI) 3962 continue; 3963 3964 // Skip diagnostics on calls to known OpenMP runtime functions for now. 3965 if (auto *CB = dyn_cast<CallBase>(NonCompatibleI)) 3966 if (OMPInfoCache.RTLFunctions.contains(CB->getCalledFunction())) 3967 continue; 3968 3969 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 3970 ORA << "Value has potential side effects preventing SPMD-mode " 3971 "execution"; 3972 if (isa<CallBase>(NonCompatibleI)) { 3973 ORA << ". Add `__attribute__((assume(\"ompx_spmd_amenable\")))` to " 3974 "the called function to override"; 3975 } 3976 return ORA << "."; 3977 }; 3978 A.emitRemark<OptimizationRemarkAnalysis>(NonCompatibleI, "OMP121", 3979 Remark); 3980 3981 LLVM_DEBUG(dbgs() << TAG << "SPMD-incompatible side-effect: " 3982 << *NonCompatibleI << "\n"); 3983 } 3984 3985 return false; 3986 } 3987 3988 // Get the actual kernel, could be the caller of the anchor scope if we have 3989 // a debug wrapper. 3990 Function *Kernel = getAnchorScope(); 3991 if (Kernel->hasLocalLinkage()) { 3992 assert(Kernel->hasOneUse() && "Unexpected use of debug kernel wrapper."); 3993 auto *CB = cast<CallBase>(Kernel->user_back()); 3994 Kernel = CB->getCaller(); 3995 } 3996 assert(OMPInfoCache.Kernels.count(Kernel) && "Expected kernel function!"); 3997 3998 // Check if the kernel is already in SPMD mode, if so, return success. 3999 GlobalVariable *ExecMode = Kernel->getParent()->getGlobalVariable( 4000 (Kernel->getName() + "_exec_mode").str()); 4001 assert(ExecMode && "Kernel without exec mode?"); 4002 assert(ExecMode->getInitializer() && "ExecMode doesn't have initializer!"); 4003 4004 // Set the global exec mode flag to indicate SPMD-Generic mode. 4005 assert(isa<ConstantInt>(ExecMode->getInitializer()) && 4006 "ExecMode is not an integer!"); 4007 const int8_t ExecModeVal = 4008 cast<ConstantInt>(ExecMode->getInitializer())->getSExtValue(); 4009 if (ExecModeVal != OMP_TGT_EXEC_MODE_GENERIC) 4010 return true; 4011 4012 // We will now unconditionally modify the IR, indicate a change. 4013 Changed = ChangeStatus::CHANGED; 4014 4015 // Do not use instruction guards when no parallel is present inside 4016 // the target region. 4017 if (mayContainParallelRegion()) 4018 insertInstructionGuardsHelper(A); 4019 else 4020 forceSingleThreadPerWorkgroupHelper(A); 4021 4022 // Adjust the global exec mode flag that tells the runtime what mode this 4023 // kernel is executed in. 4024 assert(ExecModeVal == OMP_TGT_EXEC_MODE_GENERIC && 4025 "Initially non-SPMD kernel has SPMD exec mode!"); 4026 ExecMode->setInitializer( 4027 ConstantInt::get(ExecMode->getInitializer()->getType(), 4028 ExecModeVal | OMP_TGT_EXEC_MODE_GENERIC_SPMD)); 4029 4030 // Next rewrite the init and deinit calls to indicate we use SPMD-mode now. 4031 const int InitModeArgNo = 1; 4032 const int DeinitModeArgNo = 1; 4033 const int InitUseStateMachineArgNo = 2; 4034 4035 auto &Ctx = getAnchorValue().getContext(); 4036 A.changeUseAfterManifest( 4037 KernelInitCB->getArgOperandUse(InitModeArgNo), 4038 *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx), 4039 OMP_TGT_EXEC_MODE_SPMD)); 4040 A.changeUseAfterManifest( 4041 KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), 4042 *ConstantInt::getBool(Ctx, false)); 4043 A.changeUseAfterManifest( 4044 KernelDeinitCB->getArgOperandUse(DeinitModeArgNo), 4045 *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx), 4046 OMP_TGT_EXEC_MODE_SPMD)); 4047 4048 ++NumOpenMPTargetRegionKernelsSPMD; 4049 4050 auto Remark = [&](OptimizationRemark OR) { 4051 return OR << "Transformed generic-mode kernel to SPMD-mode."; 4052 }; 4053 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP120", Remark); 4054 return true; 4055 }; 4056 4057 ChangeStatus buildCustomStateMachine(Attributor &A) { 4058 // If we have disabled state machine rewrites, don't make a custom one 4059 if (DisableOpenMPOptStateMachineRewrite) 4060 return ChangeStatus::UNCHANGED; 4061 4062 // Don't rewrite the state machine if we are not in a valid state. 4063 if (!ReachedKnownParallelRegions.isValidState()) 4064 return ChangeStatus::UNCHANGED; 4065 4066 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4067 if (!OMPInfoCache.runtimeFnsAvailable( 4068 {OMPRTL___kmpc_get_hardware_num_threads_in_block, 4069 OMPRTL___kmpc_get_warp_size, OMPRTL___kmpc_barrier_simple_generic, 4070 OMPRTL___kmpc_kernel_parallel, OMPRTL___kmpc_kernel_end_parallel})) 4071 return ChangeStatus::UNCHANGED; 4072 4073 const int InitModeArgNo = 1; 4074 const int InitUseStateMachineArgNo = 2; 4075 4076 // Check if the current configuration is non-SPMD and generic state machine. 4077 // If we already have SPMD mode or a custom state machine we do not need to 4078 // go any further. If it is anything but a constant something is weird and 4079 // we give up. 4080 ConstantInt *UseStateMachine = dyn_cast<ConstantInt>( 4081 KernelInitCB->getArgOperand(InitUseStateMachineArgNo)); 4082 ConstantInt *Mode = 4083 dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo)); 4084 4085 // If we are stuck with generic mode, try to create a custom device (=GPU) 4086 // state machine which is specialized for the parallel regions that are 4087 // reachable by the kernel. 4088 if (!UseStateMachine || UseStateMachine->isZero() || !Mode || 4089 (Mode->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD)) 4090 return ChangeStatus::UNCHANGED; 4091 4092 // If not SPMD mode, indicate we use a custom state machine now. 4093 auto &Ctx = getAnchorValue().getContext(); 4094 auto *FalseVal = ConstantInt::getBool(Ctx, false); 4095 A.changeUseAfterManifest( 4096 KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), *FalseVal); 4097 4098 // If we don't actually need a state machine we are done here. This can 4099 // happen if there simply are no parallel regions. In the resulting kernel 4100 // all worker threads will simply exit right away, leaving the main thread 4101 // to do the work alone. 4102 if (!mayContainParallelRegion()) { 4103 ++NumOpenMPTargetRegionKernelsWithoutStateMachine; 4104 4105 auto Remark = [&](OptimizationRemark OR) { 4106 return OR << "Removing unused state machine from generic-mode kernel."; 4107 }; 4108 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP130", Remark); 4109 4110 return ChangeStatus::CHANGED; 4111 } 4112 4113 // Keep track in the statistics of our new shiny custom state machine. 4114 if (ReachedUnknownParallelRegions.empty()) { 4115 ++NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback; 4116 4117 auto Remark = [&](OptimizationRemark OR) { 4118 return OR << "Rewriting generic-mode kernel with a customized state " 4119 "machine."; 4120 }; 4121 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP131", Remark); 4122 } else { 4123 ++NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback; 4124 4125 auto Remark = [&](OptimizationRemarkAnalysis OR) { 4126 return OR << "Generic-mode kernel is executed with a customized state " 4127 "machine that requires a fallback."; 4128 }; 4129 A.emitRemark<OptimizationRemarkAnalysis>(KernelInitCB, "OMP132", Remark); 4130 4131 // Tell the user why we ended up with a fallback. 4132 for (CallBase *UnknownParallelRegionCB : ReachedUnknownParallelRegions) { 4133 if (!UnknownParallelRegionCB) 4134 continue; 4135 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 4136 return ORA << "Call may contain unknown parallel regions. Use " 4137 << "`__attribute__((assume(\"omp_no_parallelism\")))` to " 4138 "override."; 4139 }; 4140 A.emitRemark<OptimizationRemarkAnalysis>(UnknownParallelRegionCB, 4141 "OMP133", Remark); 4142 } 4143 } 4144 4145 // Create all the blocks: 4146 // 4147 // InitCB = __kmpc_target_init(...) 4148 // BlockHwSize = 4149 // __kmpc_get_hardware_num_threads_in_block(); 4150 // WarpSize = __kmpc_get_warp_size(); 4151 // BlockSize = BlockHwSize - WarpSize; 4152 // IsWorkerCheckBB: bool IsWorker = InitCB != -1; 4153 // if (IsWorker) { 4154 // if (InitCB >= BlockSize) return; 4155 // SMBeginBB: __kmpc_barrier_simple_generic(...); 4156 // void *WorkFn; 4157 // bool Active = __kmpc_kernel_parallel(&WorkFn); 4158 // if (!WorkFn) return; 4159 // SMIsActiveCheckBB: if (Active) { 4160 // SMIfCascadeCurrentBB: if (WorkFn == <ParFn0>) 4161 // ParFn0(...); 4162 // SMIfCascadeCurrentBB: else if (WorkFn == <ParFn1>) 4163 // ParFn1(...); 4164 // ... 4165 // SMIfCascadeCurrentBB: else 4166 // ((WorkFnTy*)WorkFn)(...); 4167 // SMEndParallelBB: __kmpc_kernel_end_parallel(...); 4168 // } 4169 // SMDoneBB: __kmpc_barrier_simple_generic(...); 4170 // goto SMBeginBB; 4171 // } 4172 // UserCodeEntryBB: // user code 4173 // __kmpc_target_deinit(...) 4174 // 4175 Function *Kernel = getAssociatedFunction(); 4176 assert(Kernel && "Expected an associated function!"); 4177 4178 BasicBlock *InitBB = KernelInitCB->getParent(); 4179 BasicBlock *UserCodeEntryBB = InitBB->splitBasicBlock( 4180 KernelInitCB->getNextNode(), "thread.user_code.check"); 4181 BasicBlock *IsWorkerCheckBB = 4182 BasicBlock::Create(Ctx, "is_worker_check", Kernel, UserCodeEntryBB); 4183 BasicBlock *StateMachineBeginBB = BasicBlock::Create( 4184 Ctx, "worker_state_machine.begin", Kernel, UserCodeEntryBB); 4185 BasicBlock *StateMachineFinishedBB = BasicBlock::Create( 4186 Ctx, "worker_state_machine.finished", Kernel, UserCodeEntryBB); 4187 BasicBlock *StateMachineIsActiveCheckBB = BasicBlock::Create( 4188 Ctx, "worker_state_machine.is_active.check", Kernel, UserCodeEntryBB); 4189 BasicBlock *StateMachineIfCascadeCurrentBB = 4190 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check", 4191 Kernel, UserCodeEntryBB); 4192 BasicBlock *StateMachineEndParallelBB = 4193 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.end", 4194 Kernel, UserCodeEntryBB); 4195 BasicBlock *StateMachineDoneBarrierBB = BasicBlock::Create( 4196 Ctx, "worker_state_machine.done.barrier", Kernel, UserCodeEntryBB); 4197 A.registerManifestAddedBasicBlock(*InitBB); 4198 A.registerManifestAddedBasicBlock(*UserCodeEntryBB); 4199 A.registerManifestAddedBasicBlock(*IsWorkerCheckBB); 4200 A.registerManifestAddedBasicBlock(*StateMachineBeginBB); 4201 A.registerManifestAddedBasicBlock(*StateMachineFinishedBB); 4202 A.registerManifestAddedBasicBlock(*StateMachineIsActiveCheckBB); 4203 A.registerManifestAddedBasicBlock(*StateMachineIfCascadeCurrentBB); 4204 A.registerManifestAddedBasicBlock(*StateMachineEndParallelBB); 4205 A.registerManifestAddedBasicBlock(*StateMachineDoneBarrierBB); 4206 4207 const DebugLoc &DLoc = KernelInitCB->getDebugLoc(); 4208 ReturnInst::Create(Ctx, StateMachineFinishedBB)->setDebugLoc(DLoc); 4209 InitBB->getTerminator()->eraseFromParent(); 4210 4211 Instruction *IsWorker = 4212 ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_NE, KernelInitCB, 4213 ConstantInt::get(KernelInitCB->getType(), -1), 4214 "thread.is_worker", InitBB); 4215 IsWorker->setDebugLoc(DLoc); 4216 BranchInst::Create(IsWorkerCheckBB, UserCodeEntryBB, IsWorker, InitBB); 4217 4218 Module &M = *Kernel->getParent(); 4219 FunctionCallee BlockHwSizeFn = 4220 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 4221 M, OMPRTL___kmpc_get_hardware_num_threads_in_block); 4222 FunctionCallee WarpSizeFn = 4223 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 4224 M, OMPRTL___kmpc_get_warp_size); 4225 CallInst *BlockHwSize = 4226 CallInst::Create(BlockHwSizeFn, "block.hw_size", IsWorkerCheckBB); 4227 OMPInfoCache.setCallingConvention(BlockHwSizeFn, BlockHwSize); 4228 BlockHwSize->setDebugLoc(DLoc); 4229 CallInst *WarpSize = 4230 CallInst::Create(WarpSizeFn, "warp.size", IsWorkerCheckBB); 4231 OMPInfoCache.setCallingConvention(WarpSizeFn, WarpSize); 4232 WarpSize->setDebugLoc(DLoc); 4233 Instruction *BlockSize = BinaryOperator::CreateSub( 4234 BlockHwSize, WarpSize, "block.size", IsWorkerCheckBB); 4235 BlockSize->setDebugLoc(DLoc); 4236 Instruction *IsMainOrWorker = ICmpInst::Create( 4237 ICmpInst::ICmp, llvm::CmpInst::ICMP_SLT, KernelInitCB, BlockSize, 4238 "thread.is_main_or_worker", IsWorkerCheckBB); 4239 IsMainOrWorker->setDebugLoc(DLoc); 4240 BranchInst::Create(StateMachineBeginBB, StateMachineFinishedBB, 4241 IsMainOrWorker, IsWorkerCheckBB); 4242 4243 // Create local storage for the work function pointer. 4244 const DataLayout &DL = M.getDataLayout(); 4245 Type *VoidPtrTy = Type::getInt8PtrTy(Ctx); 4246 Instruction *WorkFnAI = 4247 new AllocaInst(VoidPtrTy, DL.getAllocaAddrSpace(), nullptr, 4248 "worker.work_fn.addr", &Kernel->getEntryBlock().front()); 4249 WorkFnAI->setDebugLoc(DLoc); 4250 4251 OMPInfoCache.OMPBuilder.updateToLocation( 4252 OpenMPIRBuilder::LocationDescription( 4253 IRBuilder<>::InsertPoint(StateMachineBeginBB, 4254 StateMachineBeginBB->end()), 4255 DLoc)); 4256 4257 Value *Ident = KernelInitCB->getArgOperand(0); 4258 Value *GTid = KernelInitCB; 4259 4260 FunctionCallee BarrierFn = 4261 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 4262 M, OMPRTL___kmpc_barrier_simple_generic); 4263 CallInst *Barrier = 4264 CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineBeginBB); 4265 OMPInfoCache.setCallingConvention(BarrierFn, Barrier); 4266 Barrier->setDebugLoc(DLoc); 4267 4268 if (WorkFnAI->getType()->getPointerAddressSpace() != 4269 (unsigned int)AddressSpace::Generic) { 4270 WorkFnAI = new AddrSpaceCastInst( 4271 WorkFnAI, 4272 PointerType::getWithSamePointeeType( 4273 cast<PointerType>(WorkFnAI->getType()), 4274 (unsigned int)AddressSpace::Generic), 4275 WorkFnAI->getName() + ".generic", StateMachineBeginBB); 4276 WorkFnAI->setDebugLoc(DLoc); 4277 } 4278 4279 FunctionCallee KernelParallelFn = 4280 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 4281 M, OMPRTL___kmpc_kernel_parallel); 4282 CallInst *IsActiveWorker = CallInst::Create( 4283 KernelParallelFn, {WorkFnAI}, "worker.is_active", StateMachineBeginBB); 4284 OMPInfoCache.setCallingConvention(KernelParallelFn, IsActiveWorker); 4285 IsActiveWorker->setDebugLoc(DLoc); 4286 Instruction *WorkFn = new LoadInst(VoidPtrTy, WorkFnAI, "worker.work_fn", 4287 StateMachineBeginBB); 4288 WorkFn->setDebugLoc(DLoc); 4289 4290 FunctionType *ParallelRegionFnTy = FunctionType::get( 4291 Type::getVoidTy(Ctx), {Type::getInt16Ty(Ctx), Type::getInt32Ty(Ctx)}, 4292 false); 4293 Value *WorkFnCast = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 4294 WorkFn, ParallelRegionFnTy->getPointerTo(), "worker.work_fn.addr_cast", 4295 StateMachineBeginBB); 4296 4297 Instruction *IsDone = 4298 ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFn, 4299 Constant::getNullValue(VoidPtrTy), "worker.is_done", 4300 StateMachineBeginBB); 4301 IsDone->setDebugLoc(DLoc); 4302 BranchInst::Create(StateMachineFinishedBB, StateMachineIsActiveCheckBB, 4303 IsDone, StateMachineBeginBB) 4304 ->setDebugLoc(DLoc); 4305 4306 BranchInst::Create(StateMachineIfCascadeCurrentBB, 4307 StateMachineDoneBarrierBB, IsActiveWorker, 4308 StateMachineIsActiveCheckBB) 4309 ->setDebugLoc(DLoc); 4310 4311 Value *ZeroArg = 4312 Constant::getNullValue(ParallelRegionFnTy->getParamType(0)); 4313 4314 // Now that we have most of the CFG skeleton it is time for the if-cascade 4315 // that checks the function pointer we got from the runtime against the 4316 // parallel regions we expect, if there are any. 4317 for (int I = 0, E = ReachedKnownParallelRegions.size(); I < E; ++I) { 4318 auto *ParallelRegion = ReachedKnownParallelRegions[I]; 4319 BasicBlock *PRExecuteBB = BasicBlock::Create( 4320 Ctx, "worker_state_machine.parallel_region.execute", Kernel, 4321 StateMachineEndParallelBB); 4322 CallInst::Create(ParallelRegion, {ZeroArg, GTid}, "", PRExecuteBB) 4323 ->setDebugLoc(DLoc); 4324 BranchInst::Create(StateMachineEndParallelBB, PRExecuteBB) 4325 ->setDebugLoc(DLoc); 4326 4327 BasicBlock *PRNextBB = 4328 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check", 4329 Kernel, StateMachineEndParallelBB); 4330 4331 // Check if we need to compare the pointer at all or if we can just 4332 // call the parallel region function. 4333 Value *IsPR; 4334 if (I + 1 < E || !ReachedUnknownParallelRegions.empty()) { 4335 Instruction *CmpI = ICmpInst::Create( 4336 ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFnCast, ParallelRegion, 4337 "worker.check_parallel_region", StateMachineIfCascadeCurrentBB); 4338 CmpI->setDebugLoc(DLoc); 4339 IsPR = CmpI; 4340 } else { 4341 IsPR = ConstantInt::getTrue(Ctx); 4342 } 4343 4344 BranchInst::Create(PRExecuteBB, PRNextBB, IsPR, 4345 StateMachineIfCascadeCurrentBB) 4346 ->setDebugLoc(DLoc); 4347 StateMachineIfCascadeCurrentBB = PRNextBB; 4348 } 4349 4350 // At the end of the if-cascade we place the indirect function pointer call 4351 // in case we might need it, that is if there can be parallel regions we 4352 // have not handled in the if-cascade above. 4353 if (!ReachedUnknownParallelRegions.empty()) { 4354 StateMachineIfCascadeCurrentBB->setName( 4355 "worker_state_machine.parallel_region.fallback.execute"); 4356 CallInst::Create(ParallelRegionFnTy, WorkFnCast, {ZeroArg, GTid}, "", 4357 StateMachineIfCascadeCurrentBB) 4358 ->setDebugLoc(DLoc); 4359 } 4360 BranchInst::Create(StateMachineEndParallelBB, 4361 StateMachineIfCascadeCurrentBB) 4362 ->setDebugLoc(DLoc); 4363 4364 FunctionCallee EndParallelFn = 4365 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 4366 M, OMPRTL___kmpc_kernel_end_parallel); 4367 CallInst *EndParallel = 4368 CallInst::Create(EndParallelFn, {}, "", StateMachineEndParallelBB); 4369 OMPInfoCache.setCallingConvention(EndParallelFn, EndParallel); 4370 EndParallel->setDebugLoc(DLoc); 4371 BranchInst::Create(StateMachineDoneBarrierBB, StateMachineEndParallelBB) 4372 ->setDebugLoc(DLoc); 4373 4374 CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineDoneBarrierBB) 4375 ->setDebugLoc(DLoc); 4376 BranchInst::Create(StateMachineBeginBB, StateMachineDoneBarrierBB) 4377 ->setDebugLoc(DLoc); 4378 4379 return ChangeStatus::CHANGED; 4380 } 4381 4382 /// Fixpoint iteration update function. Will be called every time a dependence 4383 /// changed its state (and in the beginning). 4384 ChangeStatus updateImpl(Attributor &A) override { 4385 KernelInfoState StateBefore = getState(); 4386 4387 // Callback to check a read/write instruction. 4388 auto CheckRWInst = [&](Instruction &I) { 4389 // We handle calls later. 4390 if (isa<CallBase>(I)) 4391 return true; 4392 // We only care about write effects. 4393 if (!I.mayWriteToMemory()) 4394 return true; 4395 if (auto *SI = dyn_cast<StoreInst>(&I)) { 4396 const auto &UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>( 4397 *this, IRPosition::value(*SI->getPointerOperand()), 4398 DepClassTy::OPTIONAL); 4399 auto &HS = A.getAAFor<AAHeapToStack>( 4400 *this, IRPosition::function(*I.getFunction()), 4401 DepClassTy::OPTIONAL); 4402 if (UnderlyingObjsAA.forallUnderlyingObjects([&](Value &Obj) { 4403 if (AA::isAssumedThreadLocalObject(A, Obj, *this)) 4404 return true; 4405 // Check for AAHeapToStack moved objects which must not be 4406 // guarded. 4407 auto *CB = dyn_cast<CallBase>(&Obj); 4408 return CB && HS.isAssumedHeapToStack(*CB); 4409 })) 4410 return true; 4411 } 4412 4413 // Insert instruction that needs guarding. 4414 SPMDCompatibilityTracker.insert(&I); 4415 return true; 4416 }; 4417 4418 bool UsedAssumedInformationInCheckRWInst = false; 4419 if (!SPMDCompatibilityTracker.isAtFixpoint()) 4420 if (!A.checkForAllReadWriteInstructions( 4421 CheckRWInst, *this, UsedAssumedInformationInCheckRWInst)) 4422 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4423 4424 bool UsedAssumedInformationFromReachingKernels = false; 4425 if (!IsKernelEntry) { 4426 updateParallelLevels(A); 4427 4428 bool AllReachingKernelsKnown = true; 4429 updateReachingKernelEntries(A, AllReachingKernelsKnown); 4430 UsedAssumedInformationFromReachingKernels = !AllReachingKernelsKnown; 4431 4432 if (!SPMDCompatibilityTracker.empty()) { 4433 if (!ParallelLevels.isValidState()) 4434 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4435 else if (!ReachingKernelEntries.isValidState()) 4436 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4437 else { 4438 // Check if all reaching kernels agree on the mode as we can otherwise 4439 // not guard instructions. We might not be sure about the mode so we 4440 // we cannot fix the internal spmd-zation state either. 4441 int SPMD = 0, Generic = 0; 4442 for (auto *Kernel : ReachingKernelEntries) { 4443 auto &CBAA = A.getAAFor<AAKernelInfo>( 4444 *this, IRPosition::function(*Kernel), DepClassTy::OPTIONAL); 4445 if (CBAA.SPMDCompatibilityTracker.isValidState() && 4446 CBAA.SPMDCompatibilityTracker.isAssumed()) 4447 ++SPMD; 4448 else 4449 ++Generic; 4450 if (!CBAA.SPMDCompatibilityTracker.isAtFixpoint()) 4451 UsedAssumedInformationFromReachingKernels = true; 4452 } 4453 if (SPMD != 0 && Generic != 0) 4454 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4455 } 4456 } 4457 } 4458 4459 // Callback to check a call instruction. 4460 bool AllParallelRegionStatesWereFixed = true; 4461 bool AllSPMDStatesWereFixed = true; 4462 auto CheckCallInst = [&](Instruction &I) { 4463 auto &CB = cast<CallBase>(I); 4464 auto &CBAA = A.getAAFor<AAKernelInfo>( 4465 *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL); 4466 getState() ^= CBAA.getState(); 4467 AllSPMDStatesWereFixed &= CBAA.SPMDCompatibilityTracker.isAtFixpoint(); 4468 AllParallelRegionStatesWereFixed &= 4469 CBAA.ReachedKnownParallelRegions.isAtFixpoint(); 4470 AllParallelRegionStatesWereFixed &= 4471 CBAA.ReachedUnknownParallelRegions.isAtFixpoint(); 4472 return true; 4473 }; 4474 4475 bool UsedAssumedInformationInCheckCallInst = false; 4476 if (!A.checkForAllCallLikeInstructions( 4477 CheckCallInst, *this, UsedAssumedInformationInCheckCallInst)) { 4478 LLVM_DEBUG(dbgs() << TAG 4479 << "Failed to visit all call-like instructions!\n";); 4480 return indicatePessimisticFixpoint(); 4481 } 4482 4483 // If we haven't used any assumed information for the reached parallel 4484 // region states we can fix it. 4485 if (!UsedAssumedInformationInCheckCallInst && 4486 AllParallelRegionStatesWereFixed) { 4487 ReachedKnownParallelRegions.indicateOptimisticFixpoint(); 4488 ReachedUnknownParallelRegions.indicateOptimisticFixpoint(); 4489 } 4490 4491 // If we haven't used any assumed information for the SPMD state we can fix 4492 // it. 4493 if (!UsedAssumedInformationInCheckRWInst && 4494 !UsedAssumedInformationInCheckCallInst && 4495 !UsedAssumedInformationFromReachingKernels && AllSPMDStatesWereFixed) 4496 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 4497 4498 return StateBefore == getState() ? ChangeStatus::UNCHANGED 4499 : ChangeStatus::CHANGED; 4500 } 4501 4502 private: 4503 /// Update info regarding reaching kernels. 4504 void updateReachingKernelEntries(Attributor &A, 4505 bool &AllReachingKernelsKnown) { 4506 auto PredCallSite = [&](AbstractCallSite ACS) { 4507 Function *Caller = ACS.getInstruction()->getFunction(); 4508 4509 assert(Caller && "Caller is nullptr"); 4510 4511 auto &CAA = A.getOrCreateAAFor<AAKernelInfo>( 4512 IRPosition::function(*Caller), this, DepClassTy::REQUIRED); 4513 if (CAA.ReachingKernelEntries.isValidState()) { 4514 ReachingKernelEntries ^= CAA.ReachingKernelEntries; 4515 return true; 4516 } 4517 4518 // We lost track of the caller of the associated function, any kernel 4519 // could reach now. 4520 ReachingKernelEntries.indicatePessimisticFixpoint(); 4521 4522 return true; 4523 }; 4524 4525 if (!A.checkForAllCallSites(PredCallSite, *this, 4526 true /* RequireAllCallSites */, 4527 AllReachingKernelsKnown)) 4528 ReachingKernelEntries.indicatePessimisticFixpoint(); 4529 } 4530 4531 /// Update info regarding parallel levels. 4532 void updateParallelLevels(Attributor &A) { 4533 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4534 OMPInformationCache::RuntimeFunctionInfo &Parallel51RFI = 4535 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 4536 4537 auto PredCallSite = [&](AbstractCallSite ACS) { 4538 Function *Caller = ACS.getInstruction()->getFunction(); 4539 4540 assert(Caller && "Caller is nullptr"); 4541 4542 auto &CAA = 4543 A.getOrCreateAAFor<AAKernelInfo>(IRPosition::function(*Caller)); 4544 if (CAA.ParallelLevels.isValidState()) { 4545 // Any function that is called by `__kmpc_parallel_51` will not be 4546 // folded as the parallel level in the function is updated. In order to 4547 // get it right, all the analysis would depend on the implentation. That 4548 // said, if in the future any change to the implementation, the analysis 4549 // could be wrong. As a consequence, we are just conservative here. 4550 if (Caller == Parallel51RFI.Declaration) { 4551 ParallelLevels.indicatePessimisticFixpoint(); 4552 return true; 4553 } 4554 4555 ParallelLevels ^= CAA.ParallelLevels; 4556 4557 return true; 4558 } 4559 4560 // We lost track of the caller of the associated function, any kernel 4561 // could reach now. 4562 ParallelLevels.indicatePessimisticFixpoint(); 4563 4564 return true; 4565 }; 4566 4567 bool AllCallSitesKnown = true; 4568 if (!A.checkForAllCallSites(PredCallSite, *this, 4569 true /* RequireAllCallSites */, 4570 AllCallSitesKnown)) 4571 ParallelLevels.indicatePessimisticFixpoint(); 4572 } 4573 }; 4574 4575 /// The call site kernel info abstract attribute, basically, what can we say 4576 /// about a call site with regards to the KernelInfoState. For now this simply 4577 /// forwards the information from the callee. 4578 struct AAKernelInfoCallSite : AAKernelInfo { 4579 AAKernelInfoCallSite(const IRPosition &IRP, Attributor &A) 4580 : AAKernelInfo(IRP, A) {} 4581 4582 /// See AbstractAttribute::initialize(...). 4583 void initialize(Attributor &A) override { 4584 AAKernelInfo::initialize(A); 4585 4586 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4587 Function *Callee = getAssociatedFunction(); 4588 4589 auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>( 4590 *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL); 4591 4592 // Check for SPMD-mode assumptions. 4593 if (AssumptionAA.hasAssumption("ompx_spmd_amenable")) { 4594 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 4595 indicateOptimisticFixpoint(); 4596 } 4597 4598 // First weed out calls we do not care about, that is readonly/readnone 4599 // calls, intrinsics, and "no_openmp" calls. Neither of these can reach a 4600 // parallel region or anything else we are looking for. 4601 if (!CB.mayWriteToMemory() || isa<IntrinsicInst>(CB)) { 4602 indicateOptimisticFixpoint(); 4603 return; 4604 } 4605 4606 // Next we check if we know the callee. If it is a known OpenMP function 4607 // we will handle them explicitly in the switch below. If it is not, we 4608 // will use an AAKernelInfo object on the callee to gather information and 4609 // merge that into the current state. The latter happens in the updateImpl. 4610 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4611 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee); 4612 if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) { 4613 // Unknown caller or declarations are not analyzable, we give up. 4614 if (!Callee || !A.isFunctionIPOAmendable(*Callee)) { 4615 4616 // Unknown callees might contain parallel regions, except if they have 4617 // an appropriate assumption attached. 4618 if (!(AssumptionAA.hasAssumption("omp_no_openmp") || 4619 AssumptionAA.hasAssumption("omp_no_parallelism"))) 4620 ReachedUnknownParallelRegions.insert(&CB); 4621 4622 // If SPMDCompatibilityTracker is not fixed, we need to give up on the 4623 // idea we can run something unknown in SPMD-mode. 4624 if (!SPMDCompatibilityTracker.isAtFixpoint()) { 4625 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4626 SPMDCompatibilityTracker.insert(&CB); 4627 } 4628 4629 // We have updated the state for this unknown call properly, there won't 4630 // be any change so we indicate a fixpoint. 4631 indicateOptimisticFixpoint(); 4632 } 4633 // If the callee is known and can be used in IPO, we will update the state 4634 // based on the callee state in updateImpl. 4635 return; 4636 } 4637 4638 const unsigned int WrapperFunctionArgNo = 6; 4639 RuntimeFunction RF = It->getSecond(); 4640 switch (RF) { 4641 // All the functions we know are compatible with SPMD mode. 4642 case OMPRTL___kmpc_is_spmd_exec_mode: 4643 case OMPRTL___kmpc_distribute_static_fini: 4644 case OMPRTL___kmpc_for_static_fini: 4645 case OMPRTL___kmpc_global_thread_num: 4646 case OMPRTL___kmpc_get_hardware_num_threads_in_block: 4647 case OMPRTL___kmpc_get_hardware_num_blocks: 4648 case OMPRTL___kmpc_single: 4649 case OMPRTL___kmpc_end_single: 4650 case OMPRTL___kmpc_master: 4651 case OMPRTL___kmpc_end_master: 4652 case OMPRTL___kmpc_barrier: 4653 case OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2: 4654 case OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2: 4655 case OMPRTL___kmpc_nvptx_end_reduce_nowait: 4656 break; 4657 case OMPRTL___kmpc_distribute_static_init_4: 4658 case OMPRTL___kmpc_distribute_static_init_4u: 4659 case OMPRTL___kmpc_distribute_static_init_8: 4660 case OMPRTL___kmpc_distribute_static_init_8u: 4661 case OMPRTL___kmpc_for_static_init_4: 4662 case OMPRTL___kmpc_for_static_init_4u: 4663 case OMPRTL___kmpc_for_static_init_8: 4664 case OMPRTL___kmpc_for_static_init_8u: { 4665 // Check the schedule and allow static schedule in SPMD mode. 4666 unsigned ScheduleArgOpNo = 2; 4667 auto *ScheduleTypeCI = 4668 dyn_cast<ConstantInt>(CB.getArgOperand(ScheduleArgOpNo)); 4669 unsigned ScheduleTypeVal = 4670 ScheduleTypeCI ? ScheduleTypeCI->getZExtValue() : 0; 4671 switch (OMPScheduleType(ScheduleTypeVal)) { 4672 case OMPScheduleType::UnorderedStatic: 4673 case OMPScheduleType::UnorderedStaticChunked: 4674 case OMPScheduleType::OrderedDistribute: 4675 case OMPScheduleType::OrderedDistributeChunked: 4676 break; 4677 default: 4678 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4679 SPMDCompatibilityTracker.insert(&CB); 4680 break; 4681 }; 4682 } break; 4683 case OMPRTL___kmpc_target_init: 4684 KernelInitCB = &CB; 4685 break; 4686 case OMPRTL___kmpc_target_deinit: 4687 KernelDeinitCB = &CB; 4688 break; 4689 case OMPRTL___kmpc_parallel_51: 4690 if (auto *ParallelRegion = dyn_cast<Function>( 4691 CB.getArgOperand(WrapperFunctionArgNo)->stripPointerCasts())) { 4692 ReachedKnownParallelRegions.insert(ParallelRegion); 4693 /// Check nested parallelism 4694 auto &FnAA = A.getAAFor<AAKernelInfo>( 4695 *this, IRPosition::function(*ParallelRegion), DepClassTy::OPTIONAL); 4696 NestedParallelism |= !FnAA.getState().isValidState() || 4697 !FnAA.ReachedKnownParallelRegions.empty() || 4698 !FnAA.ReachedUnknownParallelRegions.empty(); 4699 break; 4700 } 4701 // The condition above should usually get the parallel region function 4702 // pointer and record it. In the off chance it doesn't we assume the 4703 // worst. 4704 ReachedUnknownParallelRegions.insert(&CB); 4705 break; 4706 case OMPRTL___kmpc_omp_task: 4707 // We do not look into tasks right now, just give up. 4708 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4709 SPMDCompatibilityTracker.insert(&CB); 4710 ReachedUnknownParallelRegions.insert(&CB); 4711 break; 4712 case OMPRTL___kmpc_alloc_shared: 4713 case OMPRTL___kmpc_free_shared: 4714 // Return without setting a fixpoint, to be resolved in updateImpl. 4715 return; 4716 default: 4717 // Unknown OpenMP runtime calls cannot be executed in SPMD-mode, 4718 // generally. However, they do not hide parallel regions. 4719 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4720 SPMDCompatibilityTracker.insert(&CB); 4721 break; 4722 } 4723 // All other OpenMP runtime calls will not reach parallel regions so they 4724 // can be safely ignored for now. Since it is a known OpenMP runtime call we 4725 // have now modeled all effects and there is no need for any update. 4726 indicateOptimisticFixpoint(); 4727 } 4728 4729 ChangeStatus updateImpl(Attributor &A) override { 4730 // TODO: Once we have call site specific value information we can provide 4731 // call site specific liveness information and then it makes 4732 // sense to specialize attributes for call sites arguments instead of 4733 // redirecting requests to the callee argument. 4734 Function *F = getAssociatedFunction(); 4735 4736 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4737 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(F); 4738 4739 // If F is not a runtime function, propagate the AAKernelInfo of the callee. 4740 if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) { 4741 const IRPosition &FnPos = IRPosition::function(*F); 4742 auto &FnAA = A.getAAFor<AAKernelInfo>(*this, FnPos, DepClassTy::REQUIRED); 4743 if (getState() == FnAA.getState()) 4744 return ChangeStatus::UNCHANGED; 4745 getState() = FnAA.getState(); 4746 return ChangeStatus::CHANGED; 4747 } 4748 4749 // F is a runtime function that allocates or frees memory, check 4750 // AAHeapToStack and AAHeapToShared. 4751 KernelInfoState StateBefore = getState(); 4752 assert((It->getSecond() == OMPRTL___kmpc_alloc_shared || 4753 It->getSecond() == OMPRTL___kmpc_free_shared) && 4754 "Expected a __kmpc_alloc_shared or __kmpc_free_shared runtime call"); 4755 4756 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4757 4758 auto &HeapToStackAA = A.getAAFor<AAHeapToStack>( 4759 *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL); 4760 auto &HeapToSharedAA = A.getAAFor<AAHeapToShared>( 4761 *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL); 4762 4763 RuntimeFunction RF = It->getSecond(); 4764 4765 switch (RF) { 4766 // If neither HeapToStack nor HeapToShared assume the call is removed, 4767 // assume SPMD incompatibility. 4768 case OMPRTL___kmpc_alloc_shared: 4769 if (!HeapToStackAA.isAssumedHeapToStack(CB) && 4770 !HeapToSharedAA.isAssumedHeapToShared(CB)) 4771 SPMDCompatibilityTracker.insert(&CB); 4772 break; 4773 case OMPRTL___kmpc_free_shared: 4774 if (!HeapToStackAA.isAssumedHeapToStackRemovedFree(CB) && 4775 !HeapToSharedAA.isAssumedHeapToSharedRemovedFree(CB)) 4776 SPMDCompatibilityTracker.insert(&CB); 4777 break; 4778 default: 4779 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4780 SPMDCompatibilityTracker.insert(&CB); 4781 } 4782 4783 return StateBefore == getState() ? ChangeStatus::UNCHANGED 4784 : ChangeStatus::CHANGED; 4785 } 4786 }; 4787 4788 struct AAFoldRuntimeCall 4789 : public StateWrapper<BooleanState, AbstractAttribute> { 4790 using Base = StateWrapper<BooleanState, AbstractAttribute>; 4791 4792 AAFoldRuntimeCall(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 4793 4794 /// Statistics are tracked as part of manifest for now. 4795 void trackStatistics() const override {} 4796 4797 /// Create an abstract attribute biew for the position \p IRP. 4798 static AAFoldRuntimeCall &createForPosition(const IRPosition &IRP, 4799 Attributor &A); 4800 4801 /// See AbstractAttribute::getName() 4802 const std::string getName() const override { return "AAFoldRuntimeCall"; } 4803 4804 /// See AbstractAttribute::getIdAddr() 4805 const char *getIdAddr() const override { return &ID; } 4806 4807 /// This function should return true if the type of the \p AA is 4808 /// AAFoldRuntimeCall 4809 static bool classof(const AbstractAttribute *AA) { 4810 return (AA->getIdAddr() == &ID); 4811 } 4812 4813 static const char ID; 4814 }; 4815 4816 struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall { 4817 AAFoldRuntimeCallCallSiteReturned(const IRPosition &IRP, Attributor &A) 4818 : AAFoldRuntimeCall(IRP, A) {} 4819 4820 /// See AbstractAttribute::getAsStr() 4821 const std::string getAsStr() const override { 4822 if (!isValidState()) 4823 return "<invalid>"; 4824 4825 std::string Str("simplified value: "); 4826 4827 if (!SimplifiedValue) 4828 return Str + std::string("none"); 4829 4830 if (!*SimplifiedValue) 4831 return Str + std::string("nullptr"); 4832 4833 if (ConstantInt *CI = dyn_cast<ConstantInt>(*SimplifiedValue)) 4834 return Str + std::to_string(CI->getSExtValue()); 4835 4836 return Str + std::string("unknown"); 4837 } 4838 4839 void initialize(Attributor &A) override { 4840 if (DisableOpenMPOptFolding) 4841 indicatePessimisticFixpoint(); 4842 4843 Function *Callee = getAssociatedFunction(); 4844 4845 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4846 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee); 4847 assert(It != OMPInfoCache.RuntimeFunctionIDMap.end() && 4848 "Expected a known OpenMP runtime function"); 4849 4850 RFKind = It->getSecond(); 4851 4852 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4853 A.registerSimplificationCallback( 4854 IRPosition::callsite_returned(CB), 4855 [&](const IRPosition &IRP, const AbstractAttribute *AA, 4856 bool &UsedAssumedInformation) -> std::optional<Value *> { 4857 assert((isValidState() || 4858 (SimplifiedValue && *SimplifiedValue == nullptr)) && 4859 "Unexpected invalid state!"); 4860 4861 if (!isAtFixpoint()) { 4862 UsedAssumedInformation = true; 4863 if (AA) 4864 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 4865 } 4866 return SimplifiedValue; 4867 }); 4868 } 4869 4870 ChangeStatus updateImpl(Attributor &A) override { 4871 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4872 switch (RFKind) { 4873 case OMPRTL___kmpc_is_spmd_exec_mode: 4874 Changed |= foldIsSPMDExecMode(A); 4875 break; 4876 case OMPRTL___kmpc_parallel_level: 4877 Changed |= foldParallelLevel(A); 4878 break; 4879 case OMPRTL___kmpc_get_hardware_num_threads_in_block: 4880 Changed = Changed | foldKernelFnAttribute(A, "omp_target_thread_limit"); 4881 break; 4882 case OMPRTL___kmpc_get_hardware_num_blocks: 4883 Changed = Changed | foldKernelFnAttribute(A, "omp_target_num_teams"); 4884 break; 4885 default: 4886 llvm_unreachable("Unhandled OpenMP runtime function!"); 4887 } 4888 4889 return Changed; 4890 } 4891 4892 ChangeStatus manifest(Attributor &A) override { 4893 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4894 4895 if (SimplifiedValue && *SimplifiedValue) { 4896 Instruction &I = *getCtxI(); 4897 A.changeAfterManifest(IRPosition::inst(I), **SimplifiedValue); 4898 A.deleteAfterManifest(I); 4899 4900 CallBase *CB = dyn_cast<CallBase>(&I); 4901 auto Remark = [&](OptimizationRemark OR) { 4902 if (auto *C = dyn_cast<ConstantInt>(*SimplifiedValue)) 4903 return OR << "Replacing OpenMP runtime call " 4904 << CB->getCalledFunction()->getName() << " with " 4905 << ore::NV("FoldedValue", C->getZExtValue()) << "."; 4906 return OR << "Replacing OpenMP runtime call " 4907 << CB->getCalledFunction()->getName() << "."; 4908 }; 4909 4910 if (CB && EnableVerboseRemarks) 4911 A.emitRemark<OptimizationRemark>(CB, "OMP180", Remark); 4912 4913 LLVM_DEBUG(dbgs() << TAG << "Replacing runtime call: " << I << " with " 4914 << **SimplifiedValue << "\n"); 4915 4916 Changed = ChangeStatus::CHANGED; 4917 } 4918 4919 return Changed; 4920 } 4921 4922 ChangeStatus indicatePessimisticFixpoint() override { 4923 SimplifiedValue = nullptr; 4924 return AAFoldRuntimeCall::indicatePessimisticFixpoint(); 4925 } 4926 4927 private: 4928 /// Fold __kmpc_is_spmd_exec_mode into a constant if possible. 4929 ChangeStatus foldIsSPMDExecMode(Attributor &A) { 4930 std::optional<Value *> SimplifiedValueBefore = SimplifiedValue; 4931 4932 unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0; 4933 unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0; 4934 auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 4935 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 4936 4937 if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) 4938 return indicatePessimisticFixpoint(); 4939 4940 for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { 4941 auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K), 4942 DepClassTy::REQUIRED); 4943 4944 if (!AA.isValidState()) { 4945 SimplifiedValue = nullptr; 4946 return indicatePessimisticFixpoint(); 4947 } 4948 4949 if (AA.SPMDCompatibilityTracker.isAssumed()) { 4950 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 4951 ++KnownSPMDCount; 4952 else 4953 ++AssumedSPMDCount; 4954 } else { 4955 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 4956 ++KnownNonSPMDCount; 4957 else 4958 ++AssumedNonSPMDCount; 4959 } 4960 } 4961 4962 if ((AssumedSPMDCount + KnownSPMDCount) && 4963 (AssumedNonSPMDCount + KnownNonSPMDCount)) 4964 return indicatePessimisticFixpoint(); 4965 4966 auto &Ctx = getAnchorValue().getContext(); 4967 if (KnownSPMDCount || AssumedSPMDCount) { 4968 assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 && 4969 "Expected only SPMD kernels!"); 4970 // All reaching kernels are in SPMD mode. Update all function calls to 4971 // __kmpc_is_spmd_exec_mode to 1. 4972 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true); 4973 } else if (KnownNonSPMDCount || AssumedNonSPMDCount) { 4974 assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 && 4975 "Expected only non-SPMD kernels!"); 4976 // All reaching kernels are in non-SPMD mode. Update all function 4977 // calls to __kmpc_is_spmd_exec_mode to 0. 4978 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), false); 4979 } else { 4980 // We have empty reaching kernels, therefore we cannot tell if the 4981 // associated call site can be folded. At this moment, SimplifiedValue 4982 // must be none. 4983 assert(!SimplifiedValue && "SimplifiedValue should be none"); 4984 } 4985 4986 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 4987 : ChangeStatus::CHANGED; 4988 } 4989 4990 /// Fold __kmpc_parallel_level into a constant if possible. 4991 ChangeStatus foldParallelLevel(Attributor &A) { 4992 std::optional<Value *> SimplifiedValueBefore = SimplifiedValue; 4993 4994 auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 4995 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 4996 4997 if (!CallerKernelInfoAA.ParallelLevels.isValidState()) 4998 return indicatePessimisticFixpoint(); 4999 5000 if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) 5001 return indicatePessimisticFixpoint(); 5002 5003 if (CallerKernelInfoAA.ReachingKernelEntries.empty()) { 5004 assert(!SimplifiedValue && 5005 "SimplifiedValue should keep none at this point"); 5006 return ChangeStatus::UNCHANGED; 5007 } 5008 5009 unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0; 5010 unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0; 5011 for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { 5012 auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K), 5013 DepClassTy::REQUIRED); 5014 if (!AA.SPMDCompatibilityTracker.isValidState()) 5015 return indicatePessimisticFixpoint(); 5016 5017 if (AA.SPMDCompatibilityTracker.isAssumed()) { 5018 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 5019 ++KnownSPMDCount; 5020 else 5021 ++AssumedSPMDCount; 5022 } else { 5023 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 5024 ++KnownNonSPMDCount; 5025 else 5026 ++AssumedNonSPMDCount; 5027 } 5028 } 5029 5030 if ((AssumedSPMDCount + KnownSPMDCount) && 5031 (AssumedNonSPMDCount + KnownNonSPMDCount)) 5032 return indicatePessimisticFixpoint(); 5033 5034 auto &Ctx = getAnchorValue().getContext(); 5035 // If the caller can only be reached by SPMD kernel entries, the parallel 5036 // level is 1. Similarly, if the caller can only be reached by non-SPMD 5037 // kernel entries, it is 0. 5038 if (AssumedSPMDCount || KnownSPMDCount) { 5039 assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 && 5040 "Expected only SPMD kernels!"); 5041 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 1); 5042 } else { 5043 assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 && 5044 "Expected only non-SPMD kernels!"); 5045 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 0); 5046 } 5047 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 5048 : ChangeStatus::CHANGED; 5049 } 5050 5051 ChangeStatus foldKernelFnAttribute(Attributor &A, llvm::StringRef Attr) { 5052 // Specialize only if all the calls agree with the attribute constant value 5053 int32_t CurrentAttrValue = -1; 5054 std::optional<Value *> SimplifiedValueBefore = SimplifiedValue; 5055 5056 auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 5057 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 5058 5059 if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) 5060 return indicatePessimisticFixpoint(); 5061 5062 // Iterate over the kernels that reach this function 5063 for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { 5064 int32_t NextAttrVal = K->getFnAttributeAsParsedInteger(Attr, -1); 5065 5066 if (NextAttrVal == -1 || 5067 (CurrentAttrValue != -1 && CurrentAttrValue != NextAttrVal)) 5068 return indicatePessimisticFixpoint(); 5069 CurrentAttrValue = NextAttrVal; 5070 } 5071 5072 if (CurrentAttrValue != -1) { 5073 auto &Ctx = getAnchorValue().getContext(); 5074 SimplifiedValue = 5075 ConstantInt::get(Type::getInt32Ty(Ctx), CurrentAttrValue); 5076 } 5077 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 5078 : ChangeStatus::CHANGED; 5079 } 5080 5081 /// An optional value the associated value is assumed to fold to. That is, we 5082 /// assume the associated value (which is a call) can be replaced by this 5083 /// simplified value. 5084 std::optional<Value *> SimplifiedValue; 5085 5086 /// The runtime function kind of the callee of the associated call site. 5087 RuntimeFunction RFKind; 5088 }; 5089 5090 } // namespace 5091 5092 /// Register folding callsite 5093 void OpenMPOpt::registerFoldRuntimeCall(RuntimeFunction RF) { 5094 auto &RFI = OMPInfoCache.RFIs[RF]; 5095 RFI.foreachUse(SCC, [&](Use &U, Function &F) { 5096 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &RFI); 5097 if (!CI) 5098 return false; 5099 A.getOrCreateAAFor<AAFoldRuntimeCall>( 5100 IRPosition::callsite_returned(*CI), /* QueryingAA */ nullptr, 5101 DepClassTy::NONE, /* ForceUpdate */ false, 5102 /* UpdateAfterInit */ false); 5103 return false; 5104 }); 5105 } 5106 5107 void OpenMPOpt::registerAAs(bool IsModulePass) { 5108 if (SCC.empty()) 5109 return; 5110 5111 if (IsModulePass) { 5112 // Ensure we create the AAKernelInfo AAs first and without triggering an 5113 // update. This will make sure we register all value simplification 5114 // callbacks before any other AA has the chance to create an AAValueSimplify 5115 // or similar. 5116 auto CreateKernelInfoCB = [&](Use &, Function &Kernel) { 5117 A.getOrCreateAAFor<AAKernelInfo>( 5118 IRPosition::function(Kernel), /* QueryingAA */ nullptr, 5119 DepClassTy::NONE, /* ForceUpdate */ false, 5120 /* UpdateAfterInit */ false); 5121 return false; 5122 }; 5123 OMPInformationCache::RuntimeFunctionInfo &InitRFI = 5124 OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 5125 InitRFI.foreachUse(SCC, CreateKernelInfoCB); 5126 5127 registerFoldRuntimeCall(OMPRTL___kmpc_is_spmd_exec_mode); 5128 registerFoldRuntimeCall(OMPRTL___kmpc_parallel_level); 5129 registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_threads_in_block); 5130 registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_blocks); 5131 } 5132 5133 // Create CallSite AA for all Getters. 5134 if (DeduceICVValues) { 5135 for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) { 5136 auto ICVInfo = OMPInfoCache.ICVs[static_cast<InternalControlVar>(Idx)]; 5137 5138 auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter]; 5139 5140 auto CreateAA = [&](Use &U, Function &Caller) { 5141 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI); 5142 if (!CI) 5143 return false; 5144 5145 auto &CB = cast<CallBase>(*CI); 5146 5147 IRPosition CBPos = IRPosition::callsite_function(CB); 5148 A.getOrCreateAAFor<AAICVTracker>(CBPos); 5149 return false; 5150 }; 5151 5152 GetterRFI.foreachUse(SCC, CreateAA); 5153 } 5154 } 5155 5156 // Create an ExecutionDomain AA for every function and a HeapToStack AA for 5157 // every function if there is a device kernel. 5158 if (!isOpenMPDevice(M)) 5159 return; 5160 5161 for (auto *F : SCC) { 5162 if (F->isDeclaration()) 5163 continue; 5164 5165 // We look at internal functions only on-demand but if any use is not a 5166 // direct call or outside the current set of analyzed functions, we have 5167 // to do it eagerly. 5168 if (F->hasLocalLinkage()) { 5169 if (llvm::all_of(F->uses(), [this](const Use &U) { 5170 const auto *CB = dyn_cast<CallBase>(U.getUser()); 5171 return CB && CB->isCallee(&U) && 5172 A.isRunOn(const_cast<Function *>(CB->getCaller())); 5173 })) 5174 continue; 5175 } 5176 registerAAsForFunction(A, *F); 5177 } 5178 } 5179 5180 void OpenMPOpt::registerAAsForFunction(Attributor &A, const Function &F) { 5181 if (!DisableOpenMPOptDeglobalization) 5182 A.getOrCreateAAFor<AAHeapToShared>(IRPosition::function(F)); 5183 A.getOrCreateAAFor<AAExecutionDomain>(IRPosition::function(F)); 5184 if (!DisableOpenMPOptDeglobalization) 5185 A.getOrCreateAAFor<AAHeapToStack>(IRPosition::function(F)); 5186 5187 for (auto &I : instructions(F)) { 5188 if (auto *LI = dyn_cast<LoadInst>(&I)) { 5189 bool UsedAssumedInformation = false; 5190 A.getAssumedSimplified(IRPosition::value(*LI), /* AA */ nullptr, 5191 UsedAssumedInformation, AA::Interprocedural); 5192 continue; 5193 } 5194 if (auto *SI = dyn_cast<StoreInst>(&I)) { 5195 A.getOrCreateAAFor<AAIsDead>(IRPosition::value(*SI)); 5196 continue; 5197 } 5198 if (auto *II = dyn_cast<IntrinsicInst>(&I)) { 5199 if (II->getIntrinsicID() == Intrinsic::assume) { 5200 A.getOrCreateAAFor<AAPotentialValues>( 5201 IRPosition::value(*II->getArgOperand(0))); 5202 continue; 5203 } 5204 } 5205 } 5206 } 5207 5208 const char AAICVTracker::ID = 0; 5209 const char AAKernelInfo::ID = 0; 5210 const char AAExecutionDomain::ID = 0; 5211 const char AAHeapToShared::ID = 0; 5212 const char AAFoldRuntimeCall::ID = 0; 5213 5214 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP, 5215 Attributor &A) { 5216 AAICVTracker *AA = nullptr; 5217 switch (IRP.getPositionKind()) { 5218 case IRPosition::IRP_INVALID: 5219 case IRPosition::IRP_FLOAT: 5220 case IRPosition::IRP_ARGUMENT: 5221 case IRPosition::IRP_CALL_SITE_ARGUMENT: 5222 llvm_unreachable("ICVTracker can only be created for function position!"); 5223 case IRPosition::IRP_RETURNED: 5224 AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A); 5225 break; 5226 case IRPosition::IRP_CALL_SITE_RETURNED: 5227 AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A); 5228 break; 5229 case IRPosition::IRP_CALL_SITE: 5230 AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A); 5231 break; 5232 case IRPosition::IRP_FUNCTION: 5233 AA = new (A.Allocator) AAICVTrackerFunction(IRP, A); 5234 break; 5235 } 5236 5237 return *AA; 5238 } 5239 5240 AAExecutionDomain &AAExecutionDomain::createForPosition(const IRPosition &IRP, 5241 Attributor &A) { 5242 AAExecutionDomainFunction *AA = nullptr; 5243 switch (IRP.getPositionKind()) { 5244 case IRPosition::IRP_INVALID: 5245 case IRPosition::IRP_FLOAT: 5246 case IRPosition::IRP_ARGUMENT: 5247 case IRPosition::IRP_CALL_SITE_ARGUMENT: 5248 case IRPosition::IRP_RETURNED: 5249 case IRPosition::IRP_CALL_SITE_RETURNED: 5250 case IRPosition::IRP_CALL_SITE: 5251 llvm_unreachable( 5252 "AAExecutionDomain can only be created for function position!"); 5253 case IRPosition::IRP_FUNCTION: 5254 AA = new (A.Allocator) AAExecutionDomainFunction(IRP, A); 5255 break; 5256 } 5257 5258 return *AA; 5259 } 5260 5261 AAHeapToShared &AAHeapToShared::createForPosition(const IRPosition &IRP, 5262 Attributor &A) { 5263 AAHeapToSharedFunction *AA = nullptr; 5264 switch (IRP.getPositionKind()) { 5265 case IRPosition::IRP_INVALID: 5266 case IRPosition::IRP_FLOAT: 5267 case IRPosition::IRP_ARGUMENT: 5268 case IRPosition::IRP_CALL_SITE_ARGUMENT: 5269 case IRPosition::IRP_RETURNED: 5270 case IRPosition::IRP_CALL_SITE_RETURNED: 5271 case IRPosition::IRP_CALL_SITE: 5272 llvm_unreachable( 5273 "AAHeapToShared can only be created for function position!"); 5274 case IRPosition::IRP_FUNCTION: 5275 AA = new (A.Allocator) AAHeapToSharedFunction(IRP, A); 5276 break; 5277 } 5278 5279 return *AA; 5280 } 5281 5282 AAKernelInfo &AAKernelInfo::createForPosition(const IRPosition &IRP, 5283 Attributor &A) { 5284 AAKernelInfo *AA = nullptr; 5285 switch (IRP.getPositionKind()) { 5286 case IRPosition::IRP_INVALID: 5287 case IRPosition::IRP_FLOAT: 5288 case IRPosition::IRP_ARGUMENT: 5289 case IRPosition::IRP_RETURNED: 5290 case IRPosition::IRP_CALL_SITE_RETURNED: 5291 case IRPosition::IRP_CALL_SITE_ARGUMENT: 5292 llvm_unreachable("KernelInfo can only be created for function position!"); 5293 case IRPosition::IRP_CALL_SITE: 5294 AA = new (A.Allocator) AAKernelInfoCallSite(IRP, A); 5295 break; 5296 case IRPosition::IRP_FUNCTION: 5297 AA = new (A.Allocator) AAKernelInfoFunction(IRP, A); 5298 break; 5299 } 5300 5301 return *AA; 5302 } 5303 5304 AAFoldRuntimeCall &AAFoldRuntimeCall::createForPosition(const IRPosition &IRP, 5305 Attributor &A) { 5306 AAFoldRuntimeCall *AA = nullptr; 5307 switch (IRP.getPositionKind()) { 5308 case IRPosition::IRP_INVALID: 5309 case IRPosition::IRP_FLOAT: 5310 case IRPosition::IRP_ARGUMENT: 5311 case IRPosition::IRP_RETURNED: 5312 case IRPosition::IRP_FUNCTION: 5313 case IRPosition::IRP_CALL_SITE: 5314 case IRPosition::IRP_CALL_SITE_ARGUMENT: 5315 llvm_unreachable("KernelInfo can only be created for call site position!"); 5316 case IRPosition::IRP_CALL_SITE_RETURNED: 5317 AA = new (A.Allocator) AAFoldRuntimeCallCallSiteReturned(IRP, A); 5318 break; 5319 } 5320 5321 return *AA; 5322 } 5323 5324 PreservedAnalyses OpenMPOptPass::run(Module &M, ModuleAnalysisManager &AM) { 5325 if (!containsOpenMP(M)) 5326 return PreservedAnalyses::all(); 5327 if (DisableOpenMPOptimizations) 5328 return PreservedAnalyses::all(); 5329 5330 FunctionAnalysisManager &FAM = 5331 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 5332 KernelSet Kernels = getDeviceKernels(M); 5333 5334 if (PrintModuleBeforeOptimizations) 5335 LLVM_DEBUG(dbgs() << TAG << "Module before OpenMPOpt Module Pass:\n" << M); 5336 5337 auto IsCalled = [&](Function &F) { 5338 if (Kernels.contains(&F)) 5339 return true; 5340 for (const User *U : F.users()) 5341 if (!isa<BlockAddress>(U)) 5342 return true; 5343 return false; 5344 }; 5345 5346 auto EmitRemark = [&](Function &F) { 5347 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 5348 ORE.emit([&]() { 5349 OptimizationRemarkAnalysis ORA(DEBUG_TYPE, "OMP140", &F); 5350 return ORA << "Could not internalize function. " 5351 << "Some optimizations may not be possible. [OMP140]"; 5352 }); 5353 }; 5354 5355 // Create internal copies of each function if this is a kernel Module. This 5356 // allows iterprocedural passes to see every call edge. 5357 DenseMap<Function *, Function *> InternalizedMap; 5358 if (isOpenMPDevice(M)) { 5359 SmallPtrSet<Function *, 16> InternalizeFns; 5360 for (Function &F : M) 5361 if (!F.isDeclaration() && !Kernels.contains(&F) && IsCalled(F) && 5362 !DisableInternalization) { 5363 if (Attributor::isInternalizable(F)) { 5364 InternalizeFns.insert(&F); 5365 } else if (!F.hasLocalLinkage() && !F.hasFnAttribute(Attribute::Cold)) { 5366 EmitRemark(F); 5367 } 5368 } 5369 5370 Attributor::internalizeFunctions(InternalizeFns, InternalizedMap); 5371 } 5372 5373 // Look at every function in the Module unless it was internalized. 5374 SetVector<Function *> Functions; 5375 SmallVector<Function *, 16> SCC; 5376 for (Function &F : M) 5377 if (!F.isDeclaration() && !InternalizedMap.lookup(&F)) { 5378 SCC.push_back(&F); 5379 Functions.insert(&F); 5380 } 5381 5382 if (SCC.empty()) 5383 return PreservedAnalyses::all(); 5384 5385 AnalysisGetter AG(FAM); 5386 5387 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 5388 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 5389 }; 5390 5391 BumpPtrAllocator Allocator; 5392 CallGraphUpdater CGUpdater; 5393 5394 bool PostLink = LTOPhase == ThinOrFullLTOPhase::FullLTOPostLink || 5395 LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink; 5396 OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ nullptr, Kernels, 5397 PostLink); 5398 5399 unsigned MaxFixpointIterations = 5400 (isOpenMPDevice(M)) ? SetFixpointIterations : 32; 5401 5402 AttributorConfig AC(CGUpdater); 5403 AC.DefaultInitializeLiveInternals = false; 5404 AC.IsModulePass = true; 5405 AC.RewriteSignatures = false; 5406 AC.MaxFixpointIterations = MaxFixpointIterations; 5407 AC.OREGetter = OREGetter; 5408 AC.PassName = DEBUG_TYPE; 5409 AC.InitializationCallback = OpenMPOpt::registerAAsForFunction; 5410 5411 Attributor A(Functions, InfoCache, AC); 5412 5413 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 5414 bool Changed = OMPOpt.run(true); 5415 5416 // Optionally inline device functions for potentially better performance. 5417 if (AlwaysInlineDeviceFunctions && isOpenMPDevice(M)) 5418 for (Function &F : M) 5419 if (!F.isDeclaration() && !Kernels.contains(&F) && 5420 !F.hasFnAttribute(Attribute::NoInline)) 5421 F.addFnAttr(Attribute::AlwaysInline); 5422 5423 if (PrintModuleAfterOptimizations) 5424 LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt Module Pass:\n" << M); 5425 5426 if (Changed) 5427 return PreservedAnalyses::none(); 5428 5429 return PreservedAnalyses::all(); 5430 } 5431 5432 PreservedAnalyses OpenMPOptCGSCCPass::run(LazyCallGraph::SCC &C, 5433 CGSCCAnalysisManager &AM, 5434 LazyCallGraph &CG, 5435 CGSCCUpdateResult &UR) { 5436 if (!containsOpenMP(*C.begin()->getFunction().getParent())) 5437 return PreservedAnalyses::all(); 5438 if (DisableOpenMPOptimizations) 5439 return PreservedAnalyses::all(); 5440 5441 SmallVector<Function *, 16> SCC; 5442 // If there are kernels in the module, we have to run on all SCC's. 5443 for (LazyCallGraph::Node &N : C) { 5444 Function *Fn = &N.getFunction(); 5445 SCC.push_back(Fn); 5446 } 5447 5448 if (SCC.empty()) 5449 return PreservedAnalyses::all(); 5450 5451 Module &M = *C.begin()->getFunction().getParent(); 5452 5453 if (PrintModuleBeforeOptimizations) 5454 LLVM_DEBUG(dbgs() << TAG << "Module before OpenMPOpt CGSCC Pass:\n" << M); 5455 5456 KernelSet Kernels = getDeviceKernels(M); 5457 5458 FunctionAnalysisManager &FAM = 5459 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 5460 5461 AnalysisGetter AG(FAM); 5462 5463 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 5464 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 5465 }; 5466 5467 BumpPtrAllocator Allocator; 5468 CallGraphUpdater CGUpdater; 5469 CGUpdater.initialize(CG, C, AM, UR); 5470 5471 bool PostLink = LTOPhase == ThinOrFullLTOPhase::FullLTOPostLink || 5472 LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink; 5473 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 5474 OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator, 5475 /*CGSCC*/ &Functions, Kernels, PostLink); 5476 5477 unsigned MaxFixpointIterations = 5478 (isOpenMPDevice(M)) ? SetFixpointIterations : 32; 5479 5480 AttributorConfig AC(CGUpdater); 5481 AC.DefaultInitializeLiveInternals = false; 5482 AC.IsModulePass = false; 5483 AC.RewriteSignatures = false; 5484 AC.MaxFixpointIterations = MaxFixpointIterations; 5485 AC.OREGetter = OREGetter; 5486 AC.PassName = DEBUG_TYPE; 5487 AC.InitializationCallback = OpenMPOpt::registerAAsForFunction; 5488 5489 Attributor A(Functions, InfoCache, AC); 5490 5491 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 5492 bool Changed = OMPOpt.run(false); 5493 5494 if (PrintModuleAfterOptimizations) 5495 LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt CGSCC Pass:\n" << M); 5496 5497 if (Changed) 5498 return PreservedAnalyses::none(); 5499 5500 return PreservedAnalyses::all(); 5501 } 5502 5503 KernelSet llvm::omp::getDeviceKernels(Module &M) { 5504 // TODO: Create a more cross-platform way of determining device kernels. 5505 NamedMDNode *MD = M.getNamedMetadata("nvvm.annotations"); 5506 KernelSet Kernels; 5507 5508 if (!MD) 5509 return Kernels; 5510 5511 for (auto *Op : MD->operands()) { 5512 if (Op->getNumOperands() < 2) 5513 continue; 5514 MDString *KindID = dyn_cast<MDString>(Op->getOperand(1)); 5515 if (!KindID || KindID->getString() != "kernel") 5516 continue; 5517 5518 Function *KernelFn = 5519 mdconst::dyn_extract_or_null<Function>(Op->getOperand(0)); 5520 if (!KernelFn) 5521 continue; 5522 5523 ++NumOpenMPTargetRegionKernels; 5524 5525 Kernels.insert(KernelFn); 5526 } 5527 5528 return Kernels; 5529 } 5530 5531 bool llvm::omp::containsOpenMP(Module &M) { 5532 Metadata *MD = M.getModuleFlag("openmp"); 5533 if (!MD) 5534 return false; 5535 5536 return true; 5537 } 5538 5539 bool llvm::omp::isOpenMPDevice(Module &M) { 5540 Metadata *MD = M.getModuleFlag("openmp-device"); 5541 if (!MD) 5542 return false; 5543 5544 return true; 5545 } 5546