1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // OpenMP specific optimizations: 10 // 11 // - Deduplication of runtime calls, e.g., omp_get_thread_num. 12 // - Replacing globalized device memory with stack memory. 13 // - Replacing globalized device memory with shared memory. 14 // - Parallel region merging. 15 // - Transforming generic-mode device kernels to SPMD mode. 16 // - Specializing the state machine for generic-mode device kernels. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "llvm/Transforms/IPO/OpenMPOpt.h" 21 22 #include "llvm/ADT/EnumeratedArray.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/SetVector.h" 25 #include "llvm/ADT/Statistic.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/Analysis/CallGraph.h" 28 #include "llvm/Analysis/CallGraphSCCPass.h" 29 #include "llvm/Analysis/MemoryLocation.h" 30 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/Frontend/OpenMP/OMPConstants.h" 33 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 34 #include "llvm/IR/Assumptions.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DiagnosticInfo.h" 37 #include "llvm/IR/GlobalValue.h" 38 #include "llvm/IR/GlobalVariable.h" 39 #include "llvm/IR/Instruction.h" 40 #include "llvm/IR/Instructions.h" 41 #include "llvm/IR/IntrinsicInst.h" 42 #include "llvm/IR/IntrinsicsAMDGPU.h" 43 #include "llvm/IR/IntrinsicsNVPTX.h" 44 #include "llvm/IR/LLVMContext.h" 45 #include "llvm/InitializePasses.h" 46 #include "llvm/Support/CommandLine.h" 47 #include "llvm/Support/Debug.h" 48 #include "llvm/Transforms/IPO.h" 49 #include "llvm/Transforms/IPO/Attributor.h" 50 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 51 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 52 53 #include <algorithm> 54 55 using namespace llvm; 56 using namespace omp; 57 58 #define DEBUG_TYPE "openmp-opt" 59 60 static cl::opt<bool> DisableOpenMPOptimizations( 61 "openmp-opt-disable", cl::desc("Disable OpenMP specific optimizations."), 62 cl::Hidden, cl::init(false)); 63 64 static cl::opt<bool> EnableParallelRegionMerging( 65 "openmp-opt-enable-merging", 66 cl::desc("Enable the OpenMP region merging optimization."), cl::Hidden, 67 cl::init(false)); 68 69 static cl::opt<bool> 70 DisableInternalization("openmp-opt-disable-internalization", 71 cl::desc("Disable function internalization."), 72 cl::Hidden, cl::init(false)); 73 74 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false), 75 cl::Hidden); 76 static cl::opt<bool> PrintOpenMPKernels("openmp-print-gpu-kernels", 77 cl::init(false), cl::Hidden); 78 79 static cl::opt<bool> HideMemoryTransferLatency( 80 "openmp-hide-memory-transfer-latency", 81 cl::desc("[WIP] Tries to hide the latency of host to device memory" 82 " transfers"), 83 cl::Hidden, cl::init(false)); 84 85 static cl::opt<bool> DisableOpenMPOptDeglobalization( 86 "openmp-opt-disable-deglobalization", 87 cl::desc("Disable OpenMP optimizations involving deglobalization."), 88 cl::Hidden, cl::init(false)); 89 90 static cl::opt<bool> DisableOpenMPOptSPMDization( 91 "openmp-opt-disable-spmdization", 92 cl::desc("Disable OpenMP optimizations involving SPMD-ization."), 93 cl::Hidden, cl::init(false)); 94 95 static cl::opt<bool> DisableOpenMPOptFolding( 96 "openmp-opt-disable-folding", 97 cl::desc("Disable OpenMP optimizations involving folding."), cl::Hidden, 98 cl::init(false)); 99 100 static cl::opt<bool> DisableOpenMPOptStateMachineRewrite( 101 "openmp-opt-disable-state-machine-rewrite", 102 cl::desc("Disable OpenMP optimizations that replace the state machine."), 103 cl::Hidden, cl::init(false)); 104 105 static cl::opt<bool> DisableOpenMPOptBarrierElimination( 106 "openmp-opt-disable-barrier-elimination", 107 cl::desc("Disable OpenMP optimizations that eliminate barriers."), 108 cl::Hidden, cl::init(false)); 109 110 static cl::opt<bool> PrintModuleAfterOptimizations( 111 "openmp-opt-print-module-after", 112 cl::desc("Print the current module after OpenMP optimizations."), 113 cl::Hidden, cl::init(false)); 114 115 static cl::opt<bool> PrintModuleBeforeOptimizations( 116 "openmp-opt-print-module-before", 117 cl::desc("Print the current module before OpenMP optimizations."), 118 cl::Hidden, cl::init(false)); 119 120 static cl::opt<bool> AlwaysInlineDeviceFunctions( 121 "openmp-opt-inline-device", 122 cl::desc("Inline all applicible functions on the device."), cl::Hidden, 123 cl::init(false)); 124 125 static cl::opt<bool> 126 EnableVerboseRemarks("openmp-opt-verbose-remarks", 127 cl::desc("Enables more verbose remarks."), cl::Hidden, 128 cl::init(false)); 129 130 static cl::opt<unsigned> 131 SetFixpointIterations("openmp-opt-max-iterations", cl::Hidden, 132 cl::desc("Maximal number of attributor iterations."), 133 cl::init(256)); 134 135 static cl::opt<unsigned> 136 SharedMemoryLimit("openmp-opt-shared-limit", cl::Hidden, 137 cl::desc("Maximum amount of shared memory to use."), 138 cl::init(std::numeric_limits<unsigned>::max())); 139 140 STATISTIC(NumOpenMPRuntimeCallsDeduplicated, 141 "Number of OpenMP runtime calls deduplicated"); 142 STATISTIC(NumOpenMPParallelRegionsDeleted, 143 "Number of OpenMP parallel regions deleted"); 144 STATISTIC(NumOpenMPRuntimeFunctionsIdentified, 145 "Number of OpenMP runtime functions identified"); 146 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified, 147 "Number of OpenMP runtime function uses identified"); 148 STATISTIC(NumOpenMPTargetRegionKernels, 149 "Number of OpenMP target region entry points (=kernels) identified"); 150 STATISTIC(NumOpenMPTargetRegionKernelsSPMD, 151 "Number of OpenMP target region entry points (=kernels) executed in " 152 "SPMD-mode instead of generic-mode"); 153 STATISTIC(NumOpenMPTargetRegionKernelsWithoutStateMachine, 154 "Number of OpenMP target region entry points (=kernels) executed in " 155 "generic-mode without a state machines"); 156 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback, 157 "Number of OpenMP target region entry points (=kernels) executed in " 158 "generic-mode with customized state machines with fallback"); 159 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback, 160 "Number of OpenMP target region entry points (=kernels) executed in " 161 "generic-mode with customized state machines without fallback"); 162 STATISTIC( 163 NumOpenMPParallelRegionsReplacedInGPUStateMachine, 164 "Number of OpenMP parallel regions replaced with ID in GPU state machines"); 165 STATISTIC(NumOpenMPParallelRegionsMerged, 166 "Number of OpenMP parallel regions merged"); 167 STATISTIC(NumBytesMovedToSharedMemory, 168 "Amount of memory pushed to shared memory"); 169 STATISTIC(NumBarriersEliminated, "Number of redundant barriers eliminated"); 170 171 #if !defined(NDEBUG) 172 static constexpr auto TAG = "[" DEBUG_TYPE "]"; 173 #endif 174 175 namespace { 176 177 struct AAHeapToShared; 178 179 struct AAICVTracker; 180 181 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for 182 /// Attributor runs. 183 struct OMPInformationCache : public InformationCache { 184 OMPInformationCache(Module &M, AnalysisGetter &AG, 185 BumpPtrAllocator &Allocator, SetVector<Function *> &CGSCC, 186 KernelSet &Kernels) 187 : InformationCache(M, AG, Allocator, &CGSCC), OMPBuilder(M), 188 Kernels(Kernels) { 189 190 OMPBuilder.initialize(); 191 initializeRuntimeFunctions(); 192 initializeInternalControlVars(); 193 } 194 195 /// Generic information that describes an internal control variable. 196 struct InternalControlVarInfo { 197 /// The kind, as described by InternalControlVar enum. 198 InternalControlVar Kind; 199 200 /// The name of the ICV. 201 StringRef Name; 202 203 /// Environment variable associated with this ICV. 204 StringRef EnvVarName; 205 206 /// Initial value kind. 207 ICVInitValue InitKind; 208 209 /// Initial value. 210 ConstantInt *InitValue; 211 212 /// Setter RTL function associated with this ICV. 213 RuntimeFunction Setter; 214 215 /// Getter RTL function associated with this ICV. 216 RuntimeFunction Getter; 217 218 /// RTL Function corresponding to the override clause of this ICV 219 RuntimeFunction Clause; 220 }; 221 222 /// Generic information that describes a runtime function 223 struct RuntimeFunctionInfo { 224 225 /// The kind, as described by the RuntimeFunction enum. 226 RuntimeFunction Kind; 227 228 /// The name of the function. 229 StringRef Name; 230 231 /// Flag to indicate a variadic function. 232 bool IsVarArg; 233 234 /// The return type of the function. 235 Type *ReturnType; 236 237 /// The argument types of the function. 238 SmallVector<Type *, 8> ArgumentTypes; 239 240 /// The declaration if available. 241 Function *Declaration = nullptr; 242 243 /// Uses of this runtime function per function containing the use. 244 using UseVector = SmallVector<Use *, 16>; 245 246 /// Clear UsesMap for runtime function. 247 void clearUsesMap() { UsesMap.clear(); } 248 249 /// Boolean conversion that is true if the runtime function was found. 250 operator bool() const { return Declaration; } 251 252 /// Return the vector of uses in function \p F. 253 UseVector &getOrCreateUseVector(Function *F) { 254 std::shared_ptr<UseVector> &UV = UsesMap[F]; 255 if (!UV) 256 UV = std::make_shared<UseVector>(); 257 return *UV; 258 } 259 260 /// Return the vector of uses in function \p F or `nullptr` if there are 261 /// none. 262 const UseVector *getUseVector(Function &F) const { 263 auto I = UsesMap.find(&F); 264 if (I != UsesMap.end()) 265 return I->second.get(); 266 return nullptr; 267 } 268 269 /// Return how many functions contain uses of this runtime function. 270 size_t getNumFunctionsWithUses() const { return UsesMap.size(); } 271 272 /// Return the number of arguments (or the minimal number for variadic 273 /// functions). 274 size_t getNumArgs() const { return ArgumentTypes.size(); } 275 276 /// Run the callback \p CB on each use and forget the use if the result is 277 /// true. The callback will be fed the function in which the use was 278 /// encountered as second argument. 279 void foreachUse(SmallVectorImpl<Function *> &SCC, 280 function_ref<bool(Use &, Function &)> CB) { 281 for (Function *F : SCC) 282 foreachUse(CB, F); 283 } 284 285 /// Run the callback \p CB on each use within the function \p F and forget 286 /// the use if the result is true. 287 void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F) { 288 SmallVector<unsigned, 8> ToBeDeleted; 289 ToBeDeleted.clear(); 290 291 unsigned Idx = 0; 292 UseVector &UV = getOrCreateUseVector(F); 293 294 for (Use *U : UV) { 295 if (CB(*U, *F)) 296 ToBeDeleted.push_back(Idx); 297 ++Idx; 298 } 299 300 // Remove the to-be-deleted indices in reverse order as prior 301 // modifications will not modify the smaller indices. 302 while (!ToBeDeleted.empty()) { 303 unsigned Idx = ToBeDeleted.pop_back_val(); 304 UV[Idx] = UV.back(); 305 UV.pop_back(); 306 } 307 } 308 309 private: 310 /// Map from functions to all uses of this runtime function contained in 311 /// them. 312 DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap; 313 314 public: 315 /// Iterators for the uses of this runtime function. 316 decltype(UsesMap)::iterator begin() { return UsesMap.begin(); } 317 decltype(UsesMap)::iterator end() { return UsesMap.end(); } 318 }; 319 320 /// An OpenMP-IR-Builder instance 321 OpenMPIRBuilder OMPBuilder; 322 323 /// Map from runtime function kind to the runtime function description. 324 EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction, 325 RuntimeFunction::OMPRTL___last> 326 RFIs; 327 328 /// Map from function declarations/definitions to their runtime enum type. 329 DenseMap<Function *, RuntimeFunction> RuntimeFunctionIDMap; 330 331 /// Map from ICV kind to the ICV description. 332 EnumeratedArray<InternalControlVarInfo, InternalControlVar, 333 InternalControlVar::ICV___last> 334 ICVs; 335 336 /// Helper to initialize all internal control variable information for those 337 /// defined in OMPKinds.def. 338 void initializeInternalControlVars() { 339 #define ICV_RT_SET(_Name, RTL) \ 340 { \ 341 auto &ICV = ICVs[_Name]; \ 342 ICV.Setter = RTL; \ 343 } 344 #define ICV_RT_GET(Name, RTL) \ 345 { \ 346 auto &ICV = ICVs[Name]; \ 347 ICV.Getter = RTL; \ 348 } 349 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init) \ 350 { \ 351 auto &ICV = ICVs[Enum]; \ 352 ICV.Name = _Name; \ 353 ICV.Kind = Enum; \ 354 ICV.InitKind = Init; \ 355 ICV.EnvVarName = _EnvVarName; \ 356 switch (ICV.InitKind) { \ 357 case ICV_IMPLEMENTATION_DEFINED: \ 358 ICV.InitValue = nullptr; \ 359 break; \ 360 case ICV_ZERO: \ 361 ICV.InitValue = ConstantInt::get( \ 362 Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0); \ 363 break; \ 364 case ICV_FALSE: \ 365 ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext()); \ 366 break; \ 367 case ICV_LAST: \ 368 break; \ 369 } \ 370 } 371 #include "llvm/Frontend/OpenMP/OMPKinds.def" 372 } 373 374 /// Returns true if the function declaration \p F matches the runtime 375 /// function types, that is, return type \p RTFRetType, and argument types 376 /// \p RTFArgTypes. 377 static bool declMatchesRTFTypes(Function *F, Type *RTFRetType, 378 SmallVector<Type *, 8> &RTFArgTypes) { 379 // TODO: We should output information to the user (under debug output 380 // and via remarks). 381 382 if (!F) 383 return false; 384 if (F->getReturnType() != RTFRetType) 385 return false; 386 if (F->arg_size() != RTFArgTypes.size()) 387 return false; 388 389 auto *RTFTyIt = RTFArgTypes.begin(); 390 for (Argument &Arg : F->args()) { 391 if (Arg.getType() != *RTFTyIt) 392 return false; 393 394 ++RTFTyIt; 395 } 396 397 return true; 398 } 399 400 // Helper to collect all uses of the declaration in the UsesMap. 401 unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) { 402 unsigned NumUses = 0; 403 if (!RFI.Declaration) 404 return NumUses; 405 OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration); 406 407 if (CollectStats) { 408 NumOpenMPRuntimeFunctionsIdentified += 1; 409 NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses(); 410 } 411 412 // TODO: We directly convert uses into proper calls and unknown uses. 413 for (Use &U : RFI.Declaration->uses()) { 414 if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) { 415 if (ModuleSlice.count(UserI->getFunction())) { 416 RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U); 417 ++NumUses; 418 } 419 } else { 420 RFI.getOrCreateUseVector(nullptr).push_back(&U); 421 ++NumUses; 422 } 423 } 424 return NumUses; 425 } 426 427 // Helper function to recollect uses of a runtime function. 428 void recollectUsesForFunction(RuntimeFunction RTF) { 429 auto &RFI = RFIs[RTF]; 430 RFI.clearUsesMap(); 431 collectUses(RFI, /*CollectStats*/ false); 432 } 433 434 // Helper function to recollect uses of all runtime functions. 435 void recollectUses() { 436 for (int Idx = 0; Idx < RFIs.size(); ++Idx) 437 recollectUsesForFunction(static_cast<RuntimeFunction>(Idx)); 438 } 439 440 // Helper function to inherit the calling convention of the function callee. 441 void setCallingConvention(FunctionCallee Callee, CallInst *CI) { 442 if (Function *Fn = dyn_cast<Function>(Callee.getCallee())) 443 CI->setCallingConv(Fn->getCallingConv()); 444 } 445 446 /// Helper to initialize all runtime function information for those defined 447 /// in OpenMPKinds.def. 448 void initializeRuntimeFunctions() { 449 Module &M = *((*ModuleSlice.begin())->getParent()); 450 451 // Helper macros for handling __VA_ARGS__ in OMP_RTL 452 #define OMP_TYPE(VarName, ...) \ 453 Type *VarName = OMPBuilder.VarName; \ 454 (void)VarName; 455 456 #define OMP_ARRAY_TYPE(VarName, ...) \ 457 ArrayType *VarName##Ty = OMPBuilder.VarName##Ty; \ 458 (void)VarName##Ty; \ 459 PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy; \ 460 (void)VarName##PtrTy; 461 462 #define OMP_FUNCTION_TYPE(VarName, ...) \ 463 FunctionType *VarName = OMPBuilder.VarName; \ 464 (void)VarName; \ 465 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 466 (void)VarName##Ptr; 467 468 #define OMP_STRUCT_TYPE(VarName, ...) \ 469 StructType *VarName = OMPBuilder.VarName; \ 470 (void)VarName; \ 471 PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ 472 (void)VarName##Ptr; 473 474 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...) \ 475 { \ 476 SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__}); \ 477 Function *F = M.getFunction(_Name); \ 478 RTLFunctions.insert(F); \ 479 if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) { \ 480 RuntimeFunctionIDMap[F] = _Enum; \ 481 auto &RFI = RFIs[_Enum]; \ 482 RFI.Kind = _Enum; \ 483 RFI.Name = _Name; \ 484 RFI.IsVarArg = _IsVarArg; \ 485 RFI.ReturnType = OMPBuilder._ReturnType; \ 486 RFI.ArgumentTypes = std::move(ArgsTypes); \ 487 RFI.Declaration = F; \ 488 unsigned NumUses = collectUses(RFI); \ 489 (void)NumUses; \ 490 LLVM_DEBUG({ \ 491 dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not") \ 492 << " found\n"; \ 493 if (RFI.Declaration) \ 494 dbgs() << TAG << "-> got " << NumUses << " uses in " \ 495 << RFI.getNumFunctionsWithUses() \ 496 << " different functions.\n"; \ 497 }); \ 498 } \ 499 } 500 #include "llvm/Frontend/OpenMP/OMPKinds.def" 501 502 // TODO: We should attach the attributes defined in OMPKinds.def. 503 } 504 505 /// Collection of known kernels (\see Kernel) in the module. 506 KernelSet &Kernels; 507 508 /// Collection of known OpenMP runtime functions.. 509 DenseSet<const Function *> RTLFunctions; 510 }; 511 512 template <typename Ty, bool InsertInvalidates = true> 513 struct BooleanStateWithSetVector : public BooleanState { 514 bool contains(const Ty &Elem) const { return Set.contains(Elem); } 515 bool insert(const Ty &Elem) { 516 if (InsertInvalidates) 517 BooleanState::indicatePessimisticFixpoint(); 518 return Set.insert(Elem); 519 } 520 521 const Ty &operator[](int Idx) const { return Set[Idx]; } 522 bool operator==(const BooleanStateWithSetVector &RHS) const { 523 return BooleanState::operator==(RHS) && Set == RHS.Set; 524 } 525 bool operator!=(const BooleanStateWithSetVector &RHS) const { 526 return !(*this == RHS); 527 } 528 529 bool empty() const { return Set.empty(); } 530 size_t size() const { return Set.size(); } 531 532 /// "Clamp" this state with \p RHS. 533 BooleanStateWithSetVector &operator^=(const BooleanStateWithSetVector &RHS) { 534 BooleanState::operator^=(RHS); 535 Set.insert(RHS.Set.begin(), RHS.Set.end()); 536 return *this; 537 } 538 539 private: 540 /// A set to keep track of elements. 541 SetVector<Ty> Set; 542 543 public: 544 typename decltype(Set)::iterator begin() { return Set.begin(); } 545 typename decltype(Set)::iterator end() { return Set.end(); } 546 typename decltype(Set)::const_iterator begin() const { return Set.begin(); } 547 typename decltype(Set)::const_iterator end() const { return Set.end(); } 548 }; 549 550 template <typename Ty, bool InsertInvalidates = true> 551 using BooleanStateWithPtrSetVector = 552 BooleanStateWithSetVector<Ty *, InsertInvalidates>; 553 554 struct KernelInfoState : AbstractState { 555 /// Flag to track if we reached a fixpoint. 556 bool IsAtFixpoint = false; 557 558 /// The parallel regions (identified by the outlined parallel functions) that 559 /// can be reached from the associated function. 560 BooleanStateWithPtrSetVector<Function, /* InsertInvalidates */ false> 561 ReachedKnownParallelRegions; 562 563 /// State to track what parallel region we might reach. 564 BooleanStateWithPtrSetVector<CallBase> ReachedUnknownParallelRegions; 565 566 /// State to track if we are in SPMD-mode, assumed or know, and why we decided 567 /// we cannot be. If it is assumed, then RequiresFullRuntime should also be 568 /// false. 569 BooleanStateWithPtrSetVector<Instruction, false> SPMDCompatibilityTracker; 570 571 /// The __kmpc_target_init call in this kernel, if any. If we find more than 572 /// one we abort as the kernel is malformed. 573 CallBase *KernelInitCB = nullptr; 574 575 /// The __kmpc_target_deinit call in this kernel, if any. If we find more than 576 /// one we abort as the kernel is malformed. 577 CallBase *KernelDeinitCB = nullptr; 578 579 /// Flag to indicate if the associated function is a kernel entry. 580 bool IsKernelEntry = false; 581 582 /// State to track what kernel entries can reach the associated function. 583 BooleanStateWithPtrSetVector<Function, false> ReachingKernelEntries; 584 585 /// State to indicate if we can track parallel level of the associated 586 /// function. We will give up tracking if we encounter unknown caller or the 587 /// caller is __kmpc_parallel_51. 588 BooleanStateWithSetVector<uint8_t> ParallelLevels; 589 590 /// Abstract State interface 591 ///{ 592 593 KernelInfoState() = default; 594 KernelInfoState(bool BestState) { 595 if (!BestState) 596 indicatePessimisticFixpoint(); 597 } 598 599 /// See AbstractState::isValidState(...) 600 bool isValidState() const override { return true; } 601 602 /// See AbstractState::isAtFixpoint(...) 603 bool isAtFixpoint() const override { return IsAtFixpoint; } 604 605 /// See AbstractState::indicatePessimisticFixpoint(...) 606 ChangeStatus indicatePessimisticFixpoint() override { 607 IsAtFixpoint = true; 608 ReachingKernelEntries.indicatePessimisticFixpoint(); 609 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 610 ReachedKnownParallelRegions.indicatePessimisticFixpoint(); 611 ReachedUnknownParallelRegions.indicatePessimisticFixpoint(); 612 return ChangeStatus::CHANGED; 613 } 614 615 /// See AbstractState::indicateOptimisticFixpoint(...) 616 ChangeStatus indicateOptimisticFixpoint() override { 617 IsAtFixpoint = true; 618 ReachingKernelEntries.indicateOptimisticFixpoint(); 619 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 620 ReachedKnownParallelRegions.indicateOptimisticFixpoint(); 621 ReachedUnknownParallelRegions.indicateOptimisticFixpoint(); 622 return ChangeStatus::UNCHANGED; 623 } 624 625 /// Return the assumed state 626 KernelInfoState &getAssumed() { return *this; } 627 const KernelInfoState &getAssumed() const { return *this; } 628 629 bool operator==(const KernelInfoState &RHS) const { 630 if (SPMDCompatibilityTracker != RHS.SPMDCompatibilityTracker) 631 return false; 632 if (ReachedKnownParallelRegions != RHS.ReachedKnownParallelRegions) 633 return false; 634 if (ReachedUnknownParallelRegions != RHS.ReachedUnknownParallelRegions) 635 return false; 636 if (ReachingKernelEntries != RHS.ReachingKernelEntries) 637 return false; 638 return true; 639 } 640 641 /// Returns true if this kernel contains any OpenMP parallel regions. 642 bool mayContainParallelRegion() { 643 return !ReachedKnownParallelRegions.empty() || 644 !ReachedUnknownParallelRegions.empty(); 645 } 646 647 /// Return empty set as the best state of potential values. 648 static KernelInfoState getBestState() { return KernelInfoState(true); } 649 650 static KernelInfoState getBestState(KernelInfoState &KIS) { 651 return getBestState(); 652 } 653 654 /// Return full set as the worst state of potential values. 655 static KernelInfoState getWorstState() { return KernelInfoState(false); } 656 657 /// "Clamp" this state with \p KIS. 658 KernelInfoState operator^=(const KernelInfoState &KIS) { 659 // Do not merge two different _init and _deinit call sites. 660 if (KIS.KernelInitCB) { 661 if (KernelInitCB && KernelInitCB != KIS.KernelInitCB) 662 llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt " 663 "assumptions."); 664 KernelInitCB = KIS.KernelInitCB; 665 } 666 if (KIS.KernelDeinitCB) { 667 if (KernelDeinitCB && KernelDeinitCB != KIS.KernelDeinitCB) 668 llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt " 669 "assumptions."); 670 KernelDeinitCB = KIS.KernelDeinitCB; 671 } 672 SPMDCompatibilityTracker ^= KIS.SPMDCompatibilityTracker; 673 ReachedKnownParallelRegions ^= KIS.ReachedKnownParallelRegions; 674 ReachedUnknownParallelRegions ^= KIS.ReachedUnknownParallelRegions; 675 return *this; 676 } 677 678 KernelInfoState operator&=(const KernelInfoState &KIS) { 679 return (*this ^= KIS); 680 } 681 682 ///} 683 }; 684 685 /// Used to map the values physically (in the IR) stored in an offload 686 /// array, to a vector in memory. 687 struct OffloadArray { 688 /// Physical array (in the IR). 689 AllocaInst *Array = nullptr; 690 /// Mapped values. 691 SmallVector<Value *, 8> StoredValues; 692 /// Last stores made in the offload array. 693 SmallVector<StoreInst *, 8> LastAccesses; 694 695 OffloadArray() = default; 696 697 /// Initializes the OffloadArray with the values stored in \p Array before 698 /// instruction \p Before is reached. Returns false if the initialization 699 /// fails. 700 /// This MUST be used immediately after the construction of the object. 701 bool initialize(AllocaInst &Array, Instruction &Before) { 702 if (!Array.getAllocatedType()->isArrayTy()) 703 return false; 704 705 if (!getValues(Array, Before)) 706 return false; 707 708 this->Array = &Array; 709 return true; 710 } 711 712 static const unsigned DeviceIDArgNum = 1; 713 static const unsigned BasePtrsArgNum = 3; 714 static const unsigned PtrsArgNum = 4; 715 static const unsigned SizesArgNum = 5; 716 717 private: 718 /// Traverses the BasicBlock where \p Array is, collecting the stores made to 719 /// \p Array, leaving StoredValues with the values stored before the 720 /// instruction \p Before is reached. 721 bool getValues(AllocaInst &Array, Instruction &Before) { 722 // Initialize container. 723 const uint64_t NumValues = Array.getAllocatedType()->getArrayNumElements(); 724 StoredValues.assign(NumValues, nullptr); 725 LastAccesses.assign(NumValues, nullptr); 726 727 // TODO: This assumes the instruction \p Before is in the same 728 // BasicBlock as Array. Make it general, for any control flow graph. 729 BasicBlock *BB = Array.getParent(); 730 if (BB != Before.getParent()) 731 return false; 732 733 const DataLayout &DL = Array.getModule()->getDataLayout(); 734 const unsigned int PointerSize = DL.getPointerSize(); 735 736 for (Instruction &I : *BB) { 737 if (&I == &Before) 738 break; 739 740 if (!isa<StoreInst>(&I)) 741 continue; 742 743 auto *S = cast<StoreInst>(&I); 744 int64_t Offset = -1; 745 auto *Dst = 746 GetPointerBaseWithConstantOffset(S->getPointerOperand(), Offset, DL); 747 if (Dst == &Array) { 748 int64_t Idx = Offset / PointerSize; 749 StoredValues[Idx] = getUnderlyingObject(S->getValueOperand()); 750 LastAccesses[Idx] = S; 751 } 752 } 753 754 return isFilled(); 755 } 756 757 /// Returns true if all values in StoredValues and 758 /// LastAccesses are not nullptrs. 759 bool isFilled() { 760 const unsigned NumValues = StoredValues.size(); 761 for (unsigned I = 0; I < NumValues; ++I) { 762 if (!StoredValues[I] || !LastAccesses[I]) 763 return false; 764 } 765 766 return true; 767 } 768 }; 769 770 struct OpenMPOpt { 771 772 using OptimizationRemarkGetter = 773 function_ref<OptimizationRemarkEmitter &(Function *)>; 774 775 OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater, 776 OptimizationRemarkGetter OREGetter, 777 OMPInformationCache &OMPInfoCache, Attributor &A) 778 : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater), 779 OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {} 780 781 /// Check if any remarks are enabled for openmp-opt 782 bool remarksEnabled() { 783 auto &Ctx = M.getContext(); 784 return Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(DEBUG_TYPE); 785 } 786 787 /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice. 788 bool run(bool IsModulePass) { 789 if (SCC.empty()) 790 return false; 791 792 bool Changed = false; 793 794 LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size() 795 << " functions in a slice with " 796 << OMPInfoCache.ModuleSlice.size() << " functions\n"); 797 798 if (IsModulePass) { 799 Changed |= runAttributor(IsModulePass); 800 801 // Recollect uses, in case Attributor deleted any. 802 OMPInfoCache.recollectUses(); 803 804 // TODO: This should be folded into buildCustomStateMachine. 805 Changed |= rewriteDeviceCodeStateMachine(); 806 807 if (remarksEnabled()) 808 analysisGlobalization(); 809 810 Changed |= eliminateBarriers(); 811 } else { 812 if (PrintICVValues) 813 printICVs(); 814 if (PrintOpenMPKernels) 815 printKernels(); 816 817 Changed |= runAttributor(IsModulePass); 818 819 // Recollect uses, in case Attributor deleted any. 820 OMPInfoCache.recollectUses(); 821 822 Changed |= deleteParallelRegions(); 823 824 if (HideMemoryTransferLatency) 825 Changed |= hideMemTransfersLatency(); 826 Changed |= deduplicateRuntimeCalls(); 827 if (EnableParallelRegionMerging) { 828 if (mergeParallelRegions()) { 829 deduplicateRuntimeCalls(); 830 Changed = true; 831 } 832 } 833 834 Changed |= eliminateBarriers(); 835 } 836 837 return Changed; 838 } 839 840 /// Print initial ICV values for testing. 841 /// FIXME: This should be done from the Attributor once it is added. 842 void printICVs() const { 843 InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel, 844 ICV_proc_bind}; 845 846 for (Function *F : OMPInfoCache.ModuleSlice) { 847 for (auto ICV : ICVs) { 848 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 849 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 850 return ORA << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name) 851 << " Value: " 852 << (ICVInfo.InitValue 853 ? toString(ICVInfo.InitValue->getValue(), 10, true) 854 : "IMPLEMENTATION_DEFINED"); 855 }; 856 857 emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPICVTracker", Remark); 858 } 859 } 860 } 861 862 /// Print OpenMP GPU kernels for testing. 863 void printKernels() const { 864 for (Function *F : SCC) { 865 if (!OMPInfoCache.Kernels.count(F)) 866 continue; 867 868 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 869 return ORA << "OpenMP GPU kernel " 870 << ore::NV("OpenMPGPUKernel", F->getName()) << "\n"; 871 }; 872 873 emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPGPU", Remark); 874 } 875 } 876 877 /// Return the call if \p U is a callee use in a regular call. If \p RFI is 878 /// given it has to be the callee or a nullptr is returned. 879 static CallInst *getCallIfRegularCall( 880 Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 881 CallInst *CI = dyn_cast<CallInst>(U.getUser()); 882 if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() && 883 (!RFI || 884 (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration))) 885 return CI; 886 return nullptr; 887 } 888 889 /// Return the call if \p V is a regular call. If \p RFI is given it has to be 890 /// the callee or a nullptr is returned. 891 static CallInst *getCallIfRegularCall( 892 Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { 893 CallInst *CI = dyn_cast<CallInst>(&V); 894 if (CI && !CI->hasOperandBundles() && 895 (!RFI || 896 (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration))) 897 return CI; 898 return nullptr; 899 } 900 901 private: 902 /// Merge parallel regions when it is safe. 903 bool mergeParallelRegions() { 904 const unsigned CallbackCalleeOperand = 2; 905 const unsigned CallbackFirstArgOperand = 3; 906 using InsertPointTy = OpenMPIRBuilder::InsertPointTy; 907 908 // Check if there are any __kmpc_fork_call calls to merge. 909 OMPInformationCache::RuntimeFunctionInfo &RFI = 910 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 911 912 if (!RFI.Declaration) 913 return false; 914 915 // Unmergable calls that prevent merging a parallel region. 916 OMPInformationCache::RuntimeFunctionInfo UnmergableCallsInfo[] = { 917 OMPInfoCache.RFIs[OMPRTL___kmpc_push_proc_bind], 918 OMPInfoCache.RFIs[OMPRTL___kmpc_push_num_threads], 919 }; 920 921 bool Changed = false; 922 LoopInfo *LI = nullptr; 923 DominatorTree *DT = nullptr; 924 925 SmallDenseMap<BasicBlock *, SmallPtrSet<Instruction *, 4>> BB2PRMap; 926 927 BasicBlock *StartBB = nullptr, *EndBB = nullptr; 928 auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP) { 929 BasicBlock *CGStartBB = CodeGenIP.getBlock(); 930 BasicBlock *CGEndBB = 931 SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); 932 assert(StartBB != nullptr && "StartBB should not be null"); 933 CGStartBB->getTerminator()->setSuccessor(0, StartBB); 934 assert(EndBB != nullptr && "EndBB should not be null"); 935 EndBB->getTerminator()->setSuccessor(0, CGEndBB); 936 }; 937 938 auto PrivCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &, 939 Value &Inner, Value *&ReplacementValue) -> InsertPointTy { 940 ReplacementValue = &Inner; 941 return CodeGenIP; 942 }; 943 944 auto FiniCB = [&](InsertPointTy CodeGenIP) {}; 945 946 /// Create a sequential execution region within a merged parallel region, 947 /// encapsulated in a master construct with a barrier for synchronization. 948 auto CreateSequentialRegion = [&](Function *OuterFn, 949 BasicBlock *OuterPredBB, 950 Instruction *SeqStartI, 951 Instruction *SeqEndI) { 952 // Isolate the instructions of the sequential region to a separate 953 // block. 954 BasicBlock *ParentBB = SeqStartI->getParent(); 955 BasicBlock *SeqEndBB = 956 SplitBlock(ParentBB, SeqEndI->getNextNode(), DT, LI); 957 BasicBlock *SeqAfterBB = 958 SplitBlock(SeqEndBB, &*SeqEndBB->getFirstInsertionPt(), DT, LI); 959 BasicBlock *SeqStartBB = 960 SplitBlock(ParentBB, SeqStartI, DT, LI, nullptr, "seq.par.merged"); 961 962 assert(ParentBB->getUniqueSuccessor() == SeqStartBB && 963 "Expected a different CFG"); 964 const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc(); 965 ParentBB->getTerminator()->eraseFromParent(); 966 967 auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP) { 968 BasicBlock *CGStartBB = CodeGenIP.getBlock(); 969 BasicBlock *CGEndBB = 970 SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); 971 assert(SeqStartBB != nullptr && "SeqStartBB should not be null"); 972 CGStartBB->getTerminator()->setSuccessor(0, SeqStartBB); 973 assert(SeqEndBB != nullptr && "SeqEndBB should not be null"); 974 SeqEndBB->getTerminator()->setSuccessor(0, CGEndBB); 975 }; 976 auto FiniCB = [&](InsertPointTy CodeGenIP) {}; 977 978 // Find outputs from the sequential region to outside users and 979 // broadcast their values to them. 980 for (Instruction &I : *SeqStartBB) { 981 SmallPtrSet<Instruction *, 4> OutsideUsers; 982 for (User *Usr : I.users()) { 983 Instruction &UsrI = *cast<Instruction>(Usr); 984 // Ignore outputs to LT intrinsics, code extraction for the merged 985 // parallel region will fix them. 986 if (UsrI.isLifetimeStartOrEnd()) 987 continue; 988 989 if (UsrI.getParent() != SeqStartBB) 990 OutsideUsers.insert(&UsrI); 991 } 992 993 if (OutsideUsers.empty()) 994 continue; 995 996 // Emit an alloca in the outer region to store the broadcasted 997 // value. 998 const DataLayout &DL = M.getDataLayout(); 999 AllocaInst *AllocaI = new AllocaInst( 1000 I.getType(), DL.getAllocaAddrSpace(), nullptr, 1001 I.getName() + ".seq.output.alloc", &OuterFn->front().front()); 1002 1003 // Emit a store instruction in the sequential BB to update the 1004 // value. 1005 new StoreInst(&I, AllocaI, SeqStartBB->getTerminator()); 1006 1007 // Emit a load instruction and replace the use of the output value 1008 // with it. 1009 for (Instruction *UsrI : OutsideUsers) { 1010 LoadInst *LoadI = new LoadInst( 1011 I.getType(), AllocaI, I.getName() + ".seq.output.load", UsrI); 1012 UsrI->replaceUsesOfWith(&I, LoadI); 1013 } 1014 } 1015 1016 OpenMPIRBuilder::LocationDescription Loc( 1017 InsertPointTy(ParentBB, ParentBB->end()), DL); 1018 InsertPointTy SeqAfterIP = 1019 OMPInfoCache.OMPBuilder.createMaster(Loc, BodyGenCB, FiniCB); 1020 1021 OMPInfoCache.OMPBuilder.createBarrier(SeqAfterIP, OMPD_parallel); 1022 1023 BranchInst::Create(SeqAfterBB, SeqAfterIP.getBlock()); 1024 1025 LLVM_DEBUG(dbgs() << TAG << "After sequential inlining " << *OuterFn 1026 << "\n"); 1027 }; 1028 1029 // Helper to merge the __kmpc_fork_call calls in MergableCIs. They are all 1030 // contained in BB and only separated by instructions that can be 1031 // redundantly executed in parallel. The block BB is split before the first 1032 // call (in MergableCIs) and after the last so the entire region we merge 1033 // into a single parallel region is contained in a single basic block 1034 // without any other instructions. We use the OpenMPIRBuilder to outline 1035 // that block and call the resulting function via __kmpc_fork_call. 1036 auto Merge = [&](const SmallVectorImpl<CallInst *> &MergableCIs, 1037 BasicBlock *BB) { 1038 // TODO: Change the interface to allow single CIs expanded, e.g, to 1039 // include an outer loop. 1040 assert(MergableCIs.size() > 1 && "Assumed multiple mergable CIs"); 1041 1042 auto Remark = [&](OptimizationRemark OR) { 1043 OR << "Parallel region merged with parallel region" 1044 << (MergableCIs.size() > 2 ? "s" : "") << " at "; 1045 for (auto *CI : llvm::drop_begin(MergableCIs)) { 1046 OR << ore::NV("OpenMPParallelMerge", CI->getDebugLoc()); 1047 if (CI != MergableCIs.back()) 1048 OR << ", "; 1049 } 1050 return OR << "."; 1051 }; 1052 1053 emitRemark<OptimizationRemark>(MergableCIs.front(), "OMP150", Remark); 1054 1055 Function *OriginalFn = BB->getParent(); 1056 LLVM_DEBUG(dbgs() << TAG << "Merge " << MergableCIs.size() 1057 << " parallel regions in " << OriginalFn->getName() 1058 << "\n"); 1059 1060 // Isolate the calls to merge in a separate block. 1061 EndBB = SplitBlock(BB, MergableCIs.back()->getNextNode(), DT, LI); 1062 BasicBlock *AfterBB = 1063 SplitBlock(EndBB, &*EndBB->getFirstInsertionPt(), DT, LI); 1064 StartBB = SplitBlock(BB, MergableCIs.front(), DT, LI, nullptr, 1065 "omp.par.merged"); 1066 1067 assert(BB->getUniqueSuccessor() == StartBB && "Expected a different CFG"); 1068 const DebugLoc DL = BB->getTerminator()->getDebugLoc(); 1069 BB->getTerminator()->eraseFromParent(); 1070 1071 // Create sequential regions for sequential instructions that are 1072 // in-between mergable parallel regions. 1073 for (auto *It = MergableCIs.begin(), *End = MergableCIs.end() - 1; 1074 It != End; ++It) { 1075 Instruction *ForkCI = *It; 1076 Instruction *NextForkCI = *(It + 1); 1077 1078 // Continue if there are not in-between instructions. 1079 if (ForkCI->getNextNode() == NextForkCI) 1080 continue; 1081 1082 CreateSequentialRegion(OriginalFn, BB, ForkCI->getNextNode(), 1083 NextForkCI->getPrevNode()); 1084 } 1085 1086 OpenMPIRBuilder::LocationDescription Loc(InsertPointTy(BB, BB->end()), 1087 DL); 1088 IRBuilder<>::InsertPoint AllocaIP( 1089 &OriginalFn->getEntryBlock(), 1090 OriginalFn->getEntryBlock().getFirstInsertionPt()); 1091 // Create the merged parallel region with default proc binding, to 1092 // avoid overriding binding settings, and without explicit cancellation. 1093 InsertPointTy AfterIP = OMPInfoCache.OMPBuilder.createParallel( 1094 Loc, AllocaIP, BodyGenCB, PrivCB, FiniCB, nullptr, nullptr, 1095 OMP_PROC_BIND_default, /* IsCancellable */ false); 1096 BranchInst::Create(AfterBB, AfterIP.getBlock()); 1097 1098 // Perform the actual outlining. 1099 OMPInfoCache.OMPBuilder.finalize(OriginalFn); 1100 1101 Function *OutlinedFn = MergableCIs.front()->getCaller(); 1102 1103 // Replace the __kmpc_fork_call calls with direct calls to the outlined 1104 // callbacks. 1105 SmallVector<Value *, 8> Args; 1106 for (auto *CI : MergableCIs) { 1107 Value *Callee = CI->getArgOperand(CallbackCalleeOperand); 1108 FunctionType *FT = OMPInfoCache.OMPBuilder.ParallelTask; 1109 Args.clear(); 1110 Args.push_back(OutlinedFn->getArg(0)); 1111 Args.push_back(OutlinedFn->getArg(1)); 1112 for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E; 1113 ++U) 1114 Args.push_back(CI->getArgOperand(U)); 1115 1116 CallInst *NewCI = CallInst::Create(FT, Callee, Args, "", CI); 1117 if (CI->getDebugLoc()) 1118 NewCI->setDebugLoc(CI->getDebugLoc()); 1119 1120 // Forward parameter attributes from the callback to the callee. 1121 for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E; 1122 ++U) 1123 for (const Attribute &A : CI->getAttributes().getParamAttrs(U)) 1124 NewCI->addParamAttr( 1125 U - (CallbackFirstArgOperand - CallbackCalleeOperand), A); 1126 1127 // Emit an explicit barrier to replace the implicit fork-join barrier. 1128 if (CI != MergableCIs.back()) { 1129 // TODO: Remove barrier if the merged parallel region includes the 1130 // 'nowait' clause. 1131 OMPInfoCache.OMPBuilder.createBarrier( 1132 InsertPointTy(NewCI->getParent(), 1133 NewCI->getNextNode()->getIterator()), 1134 OMPD_parallel); 1135 } 1136 1137 CI->eraseFromParent(); 1138 } 1139 1140 assert(OutlinedFn != OriginalFn && "Outlining failed"); 1141 CGUpdater.registerOutlinedFunction(*OriginalFn, *OutlinedFn); 1142 CGUpdater.reanalyzeFunction(*OriginalFn); 1143 1144 NumOpenMPParallelRegionsMerged += MergableCIs.size(); 1145 1146 return true; 1147 }; 1148 1149 // Helper function that identifes sequences of 1150 // __kmpc_fork_call uses in a basic block. 1151 auto DetectPRsCB = [&](Use &U, Function &F) { 1152 CallInst *CI = getCallIfRegularCall(U, &RFI); 1153 BB2PRMap[CI->getParent()].insert(CI); 1154 1155 return false; 1156 }; 1157 1158 BB2PRMap.clear(); 1159 RFI.foreachUse(SCC, DetectPRsCB); 1160 SmallVector<SmallVector<CallInst *, 4>, 4> MergableCIsVector; 1161 // Find mergable parallel regions within a basic block that are 1162 // safe to merge, that is any in-between instructions can safely 1163 // execute in parallel after merging. 1164 // TODO: support merging across basic-blocks. 1165 for (auto &It : BB2PRMap) { 1166 auto &CIs = It.getSecond(); 1167 if (CIs.size() < 2) 1168 continue; 1169 1170 BasicBlock *BB = It.getFirst(); 1171 SmallVector<CallInst *, 4> MergableCIs; 1172 1173 /// Returns true if the instruction is mergable, false otherwise. 1174 /// A terminator instruction is unmergable by definition since merging 1175 /// works within a BB. Instructions before the mergable region are 1176 /// mergable if they are not calls to OpenMP runtime functions that may 1177 /// set different execution parameters for subsequent parallel regions. 1178 /// Instructions in-between parallel regions are mergable if they are not 1179 /// calls to any non-intrinsic function since that may call a non-mergable 1180 /// OpenMP runtime function. 1181 auto IsMergable = [&](Instruction &I, bool IsBeforeMergableRegion) { 1182 // We do not merge across BBs, hence return false (unmergable) if the 1183 // instruction is a terminator. 1184 if (I.isTerminator()) 1185 return false; 1186 1187 if (!isa<CallInst>(&I)) 1188 return true; 1189 1190 CallInst *CI = cast<CallInst>(&I); 1191 if (IsBeforeMergableRegion) { 1192 Function *CalledFunction = CI->getCalledFunction(); 1193 if (!CalledFunction) 1194 return false; 1195 // Return false (unmergable) if the call before the parallel 1196 // region calls an explicit affinity (proc_bind) or number of 1197 // threads (num_threads) compiler-generated function. Those settings 1198 // may be incompatible with following parallel regions. 1199 // TODO: ICV tracking to detect compatibility. 1200 for (const auto &RFI : UnmergableCallsInfo) { 1201 if (CalledFunction == RFI.Declaration) 1202 return false; 1203 } 1204 } else { 1205 // Return false (unmergable) if there is a call instruction 1206 // in-between parallel regions when it is not an intrinsic. It 1207 // may call an unmergable OpenMP runtime function in its callpath. 1208 // TODO: Keep track of possible OpenMP calls in the callpath. 1209 if (!isa<IntrinsicInst>(CI)) 1210 return false; 1211 } 1212 1213 return true; 1214 }; 1215 // Find maximal number of parallel region CIs that are safe to merge. 1216 for (auto It = BB->begin(), End = BB->end(); It != End;) { 1217 Instruction &I = *It; 1218 ++It; 1219 1220 if (CIs.count(&I)) { 1221 MergableCIs.push_back(cast<CallInst>(&I)); 1222 continue; 1223 } 1224 1225 // Continue expanding if the instruction is mergable. 1226 if (IsMergable(I, MergableCIs.empty())) 1227 continue; 1228 1229 // Forward the instruction iterator to skip the next parallel region 1230 // since there is an unmergable instruction which can affect it. 1231 for (; It != End; ++It) { 1232 Instruction &SkipI = *It; 1233 if (CIs.count(&SkipI)) { 1234 LLVM_DEBUG(dbgs() << TAG << "Skip parallel region " << SkipI 1235 << " due to " << I << "\n"); 1236 ++It; 1237 break; 1238 } 1239 } 1240 1241 // Store mergable regions found. 1242 if (MergableCIs.size() > 1) { 1243 MergableCIsVector.push_back(MergableCIs); 1244 LLVM_DEBUG(dbgs() << TAG << "Found " << MergableCIs.size() 1245 << " parallel regions in block " << BB->getName() 1246 << " of function " << BB->getParent()->getName() 1247 << "\n";); 1248 } 1249 1250 MergableCIs.clear(); 1251 } 1252 1253 if (!MergableCIsVector.empty()) { 1254 Changed = true; 1255 1256 for (auto &MergableCIs : MergableCIsVector) 1257 Merge(MergableCIs, BB); 1258 MergableCIsVector.clear(); 1259 } 1260 } 1261 1262 if (Changed) { 1263 /// Re-collect use for fork calls, emitted barrier calls, and 1264 /// any emitted master/end_master calls. 1265 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_fork_call); 1266 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_barrier); 1267 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_master); 1268 OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_end_master); 1269 } 1270 1271 return Changed; 1272 } 1273 1274 /// Try to delete parallel regions if possible. 1275 bool deleteParallelRegions() { 1276 const unsigned CallbackCalleeOperand = 2; 1277 1278 OMPInformationCache::RuntimeFunctionInfo &RFI = 1279 OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; 1280 1281 if (!RFI.Declaration) 1282 return false; 1283 1284 bool Changed = false; 1285 auto DeleteCallCB = [&](Use &U, Function &) { 1286 CallInst *CI = getCallIfRegularCall(U); 1287 if (!CI) 1288 return false; 1289 auto *Fn = dyn_cast<Function>( 1290 CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts()); 1291 if (!Fn) 1292 return false; 1293 if (!Fn->onlyReadsMemory()) 1294 return false; 1295 if (!Fn->hasFnAttribute(Attribute::WillReturn)) 1296 return false; 1297 1298 LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in " 1299 << CI->getCaller()->getName() << "\n"); 1300 1301 auto Remark = [&](OptimizationRemark OR) { 1302 return OR << "Removing parallel region with no side-effects."; 1303 }; 1304 emitRemark<OptimizationRemark>(CI, "OMP160", Remark); 1305 1306 CGUpdater.removeCallSite(*CI); 1307 CI->eraseFromParent(); 1308 Changed = true; 1309 ++NumOpenMPParallelRegionsDeleted; 1310 return true; 1311 }; 1312 1313 RFI.foreachUse(SCC, DeleteCallCB); 1314 1315 return Changed; 1316 } 1317 1318 /// Try to eliminate runtime calls by reusing existing ones. 1319 bool deduplicateRuntimeCalls() { 1320 bool Changed = false; 1321 1322 RuntimeFunction DeduplicableRuntimeCallIDs[] = { 1323 OMPRTL_omp_get_num_threads, 1324 OMPRTL_omp_in_parallel, 1325 OMPRTL_omp_get_cancellation, 1326 OMPRTL_omp_get_thread_limit, 1327 OMPRTL_omp_get_supported_active_levels, 1328 OMPRTL_omp_get_level, 1329 OMPRTL_omp_get_ancestor_thread_num, 1330 OMPRTL_omp_get_team_size, 1331 OMPRTL_omp_get_active_level, 1332 OMPRTL_omp_in_final, 1333 OMPRTL_omp_get_proc_bind, 1334 OMPRTL_omp_get_num_places, 1335 OMPRTL_omp_get_num_procs, 1336 OMPRTL_omp_get_place_num, 1337 OMPRTL_omp_get_partition_num_places, 1338 OMPRTL_omp_get_partition_place_nums}; 1339 1340 // Global-tid is handled separately. 1341 SmallSetVector<Value *, 16> GTIdArgs; 1342 collectGlobalThreadIdArguments(GTIdArgs); 1343 LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size() 1344 << " global thread ID arguments\n"); 1345 1346 for (Function *F : SCC) { 1347 for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs) 1348 Changed |= deduplicateRuntimeCalls( 1349 *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]); 1350 1351 // __kmpc_global_thread_num is special as we can replace it with an 1352 // argument in enough cases to make it worth trying. 1353 Value *GTIdArg = nullptr; 1354 for (Argument &Arg : F->args()) 1355 if (GTIdArgs.count(&Arg)) { 1356 GTIdArg = &Arg; 1357 break; 1358 } 1359 Changed |= deduplicateRuntimeCalls( 1360 *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg); 1361 } 1362 1363 return Changed; 1364 } 1365 1366 /// Tries to hide the latency of runtime calls that involve host to 1367 /// device memory transfers by splitting them into their "issue" and "wait" 1368 /// versions. The "issue" is moved upwards as much as possible. The "wait" is 1369 /// moved downards as much as possible. The "issue" issues the memory transfer 1370 /// asynchronously, returning a handle. The "wait" waits in the returned 1371 /// handle for the memory transfer to finish. 1372 bool hideMemTransfersLatency() { 1373 auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper]; 1374 bool Changed = false; 1375 auto SplitMemTransfers = [&](Use &U, Function &Decl) { 1376 auto *RTCall = getCallIfRegularCall(U, &RFI); 1377 if (!RTCall) 1378 return false; 1379 1380 OffloadArray OffloadArrays[3]; 1381 if (!getValuesInOffloadArrays(*RTCall, OffloadArrays)) 1382 return false; 1383 1384 LLVM_DEBUG(dumpValuesInOffloadArrays(OffloadArrays)); 1385 1386 // TODO: Check if can be moved upwards. 1387 bool WasSplit = false; 1388 Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall); 1389 if (WaitMovementPoint) 1390 WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint); 1391 1392 Changed |= WasSplit; 1393 return WasSplit; 1394 }; 1395 RFI.foreachUse(SCC, SplitMemTransfers); 1396 1397 return Changed; 1398 } 1399 1400 /// Eliminates redundant, aligned barriers in OpenMP offloaded kernels. 1401 /// TODO: Make this an AA and expand it to work across blocks and functions. 1402 bool eliminateBarriers() { 1403 bool Changed = false; 1404 1405 if (DisableOpenMPOptBarrierElimination) 1406 return /*Changed=*/false; 1407 1408 if (OMPInfoCache.Kernels.empty()) 1409 return /*Changed=*/false; 1410 1411 enum ImplicitBarrierType { IBT_ENTRY, IBT_EXIT }; 1412 1413 class BarrierInfo { 1414 Instruction *I; 1415 enum ImplicitBarrierType Type; 1416 1417 public: 1418 BarrierInfo(enum ImplicitBarrierType Type) : I(nullptr), Type(Type) {} 1419 BarrierInfo(Instruction &I) : I(&I) {} 1420 1421 bool isImplicit() { return !I; } 1422 1423 bool isImplicitEntry() { return isImplicit() && Type == IBT_ENTRY; } 1424 1425 bool isImplicitExit() { return isImplicit() && Type == IBT_EXIT; } 1426 1427 Instruction *getInstruction() { return I; } 1428 }; 1429 1430 for (Function *Kernel : OMPInfoCache.Kernels) { 1431 for (BasicBlock &BB : *Kernel) { 1432 SmallVector<BarrierInfo, 8> BarriersInBlock; 1433 SmallPtrSet<Instruction *, 8> BarriersToBeDeleted; 1434 1435 // Add the kernel entry implicit barrier. 1436 if (&Kernel->getEntryBlock() == &BB) 1437 BarriersInBlock.push_back(IBT_ENTRY); 1438 1439 // Find implicit and explicit aligned barriers in the same basic block. 1440 for (Instruction &I : BB) { 1441 if (isa<ReturnInst>(I)) { 1442 // Add the implicit barrier when exiting the kernel. 1443 BarriersInBlock.push_back(IBT_EXIT); 1444 continue; 1445 } 1446 CallBase *CB = dyn_cast<CallBase>(&I); 1447 if (!CB) 1448 continue; 1449 1450 auto IsAlignBarrierCB = [&](CallBase &CB) { 1451 switch (CB.getIntrinsicID()) { 1452 case Intrinsic::nvvm_barrier0: 1453 case Intrinsic::nvvm_barrier0_and: 1454 case Intrinsic::nvvm_barrier0_or: 1455 case Intrinsic::nvvm_barrier0_popc: 1456 return true; 1457 default: 1458 break; 1459 } 1460 return hasAssumption(CB, 1461 KnownAssumptionString("ompx_aligned_barrier")); 1462 }; 1463 1464 if (IsAlignBarrierCB(*CB)) { 1465 // Add an explicit aligned barrier. 1466 BarriersInBlock.push_back(I); 1467 } 1468 } 1469 1470 if (BarriersInBlock.size() <= 1) 1471 continue; 1472 1473 // A barrier in a barrier pair is removeable if all instructions 1474 // between the barriers in the pair are side-effect free modulo the 1475 // barrier operation. 1476 auto IsBarrierRemoveable = [&Kernel](BarrierInfo *StartBI, 1477 BarrierInfo *EndBI) { 1478 assert( 1479 !StartBI->isImplicitExit() && 1480 "Expected start barrier to be other than a kernel exit barrier"); 1481 assert( 1482 !EndBI->isImplicitEntry() && 1483 "Expected end barrier to be other than a kernel entry barrier"); 1484 // If StarBI instructions is null then this the implicit 1485 // kernel entry barrier, so iterate from the first instruction in the 1486 // entry block. 1487 Instruction *I = (StartBI->isImplicitEntry()) 1488 ? &Kernel->getEntryBlock().front() 1489 : StartBI->getInstruction()->getNextNode(); 1490 assert(I && "Expected non-null start instruction"); 1491 Instruction *E = (EndBI->isImplicitExit()) 1492 ? I->getParent()->getTerminator() 1493 : EndBI->getInstruction(); 1494 assert(E && "Expected non-null end instruction"); 1495 1496 for (; I != E; I = I->getNextNode()) { 1497 if (!I->mayHaveSideEffects() && !I->mayReadFromMemory()) 1498 continue; 1499 1500 auto IsPotentiallyAffectedByBarrier = 1501 [](Optional<MemoryLocation> Loc) { 1502 const Value *Obj = (Loc && Loc->Ptr) 1503 ? getUnderlyingObject(Loc->Ptr) 1504 : nullptr; 1505 if (!Obj) { 1506 LLVM_DEBUG( 1507 dbgs() 1508 << "Access to unknown location requires barriers\n"); 1509 return true; 1510 } 1511 if (isa<UndefValue>(Obj)) 1512 return false; 1513 if (isa<AllocaInst>(Obj)) 1514 return false; 1515 if (auto *GV = dyn_cast<GlobalVariable>(Obj)) { 1516 if (GV->isConstant()) 1517 return false; 1518 if (GV->isThreadLocal()) 1519 return false; 1520 if (GV->getAddressSpace() == (int)AddressSpace::Local) 1521 return false; 1522 if (GV->getAddressSpace() == (int)AddressSpace::Constant) 1523 return false; 1524 } 1525 LLVM_DEBUG(dbgs() << "Access to '" << *Obj 1526 << "' requires barriers\n"); 1527 return true; 1528 }; 1529 1530 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) { 1531 Optional<MemoryLocation> Loc = MemoryLocation::getForDest(MI); 1532 if (IsPotentiallyAffectedByBarrier(Loc)) 1533 return false; 1534 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I)) { 1535 Optional<MemoryLocation> Loc = 1536 MemoryLocation::getForSource(MTI); 1537 if (IsPotentiallyAffectedByBarrier(Loc)) 1538 return false; 1539 } 1540 continue; 1541 } 1542 1543 if (auto *LI = dyn_cast<LoadInst>(I)) 1544 if (LI->hasMetadata(LLVMContext::MD_invariant_load)) 1545 continue; 1546 1547 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 1548 if (IsPotentiallyAffectedByBarrier(Loc)) 1549 return false; 1550 } 1551 1552 return true; 1553 }; 1554 1555 // Iterate barrier pairs and remove an explicit barrier if analysis 1556 // deems it removeable. 1557 for (auto *It = BarriersInBlock.begin(), 1558 *End = BarriersInBlock.end() - 1; 1559 It != End; ++It) { 1560 1561 BarrierInfo *StartBI = It; 1562 BarrierInfo *EndBI = (It + 1); 1563 1564 // Cannot remove when both are implicit barriers, continue. 1565 if (StartBI->isImplicit() && EndBI->isImplicit()) 1566 continue; 1567 1568 if (!IsBarrierRemoveable(StartBI, EndBI)) 1569 continue; 1570 1571 assert(!(StartBI->isImplicit() && EndBI->isImplicit()) && 1572 "Expected at least one explicit barrier to remove."); 1573 1574 // Remove an explicit barrier, check first, then second. 1575 if (!StartBI->isImplicit()) { 1576 LLVM_DEBUG(dbgs() << "Remove start barrier " 1577 << *StartBI->getInstruction() << "\n"); 1578 BarriersToBeDeleted.insert(StartBI->getInstruction()); 1579 } else { 1580 LLVM_DEBUG(dbgs() << "Remove end barrier " 1581 << *EndBI->getInstruction() << "\n"); 1582 BarriersToBeDeleted.insert(EndBI->getInstruction()); 1583 } 1584 } 1585 1586 if (BarriersToBeDeleted.empty()) 1587 continue; 1588 1589 Changed = true; 1590 for (Instruction *I : BarriersToBeDeleted) { 1591 ++NumBarriersEliminated; 1592 auto Remark = [&](OptimizationRemark OR) { 1593 return OR << "Redundant barrier eliminated."; 1594 }; 1595 1596 if (EnableVerboseRemarks) 1597 emitRemark<OptimizationRemark>(I, "OMP190", Remark); 1598 I->eraseFromParent(); 1599 } 1600 } 1601 } 1602 1603 return Changed; 1604 } 1605 1606 void analysisGlobalization() { 1607 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 1608 1609 auto CheckGlobalization = [&](Use &U, Function &Decl) { 1610 if (CallInst *CI = getCallIfRegularCall(U, &RFI)) { 1611 auto Remark = [&](OptimizationRemarkMissed ORM) { 1612 return ORM 1613 << "Found thread data sharing on the GPU. " 1614 << "Expect degraded performance due to data globalization."; 1615 }; 1616 emitRemark<OptimizationRemarkMissed>(CI, "OMP112", Remark); 1617 } 1618 1619 return false; 1620 }; 1621 1622 RFI.foreachUse(SCC, CheckGlobalization); 1623 } 1624 1625 /// Maps the values stored in the offload arrays passed as arguments to 1626 /// \p RuntimeCall into the offload arrays in \p OAs. 1627 bool getValuesInOffloadArrays(CallInst &RuntimeCall, 1628 MutableArrayRef<OffloadArray> OAs) { 1629 assert(OAs.size() == 3 && "Need space for three offload arrays!"); 1630 1631 // A runtime call that involves memory offloading looks something like: 1632 // call void @__tgt_target_data_begin_mapper(arg0, arg1, 1633 // i8** %offload_baseptrs, i8** %offload_ptrs, i64* %offload_sizes, 1634 // ...) 1635 // So, the idea is to access the allocas that allocate space for these 1636 // offload arrays, offload_baseptrs, offload_ptrs, offload_sizes. 1637 // Therefore: 1638 // i8** %offload_baseptrs. 1639 Value *BasePtrsArg = 1640 RuntimeCall.getArgOperand(OffloadArray::BasePtrsArgNum); 1641 // i8** %offload_ptrs. 1642 Value *PtrsArg = RuntimeCall.getArgOperand(OffloadArray::PtrsArgNum); 1643 // i8** %offload_sizes. 1644 Value *SizesArg = RuntimeCall.getArgOperand(OffloadArray::SizesArgNum); 1645 1646 // Get values stored in **offload_baseptrs. 1647 auto *V = getUnderlyingObject(BasePtrsArg); 1648 if (!isa<AllocaInst>(V)) 1649 return false; 1650 auto *BasePtrsArray = cast<AllocaInst>(V); 1651 if (!OAs[0].initialize(*BasePtrsArray, RuntimeCall)) 1652 return false; 1653 1654 // Get values stored in **offload_baseptrs. 1655 V = getUnderlyingObject(PtrsArg); 1656 if (!isa<AllocaInst>(V)) 1657 return false; 1658 auto *PtrsArray = cast<AllocaInst>(V); 1659 if (!OAs[1].initialize(*PtrsArray, RuntimeCall)) 1660 return false; 1661 1662 // Get values stored in **offload_sizes. 1663 V = getUnderlyingObject(SizesArg); 1664 // If it's a [constant] global array don't analyze it. 1665 if (isa<GlobalValue>(V)) 1666 return isa<Constant>(V); 1667 if (!isa<AllocaInst>(V)) 1668 return false; 1669 1670 auto *SizesArray = cast<AllocaInst>(V); 1671 if (!OAs[2].initialize(*SizesArray, RuntimeCall)) 1672 return false; 1673 1674 return true; 1675 } 1676 1677 /// Prints the values in the OffloadArrays \p OAs using LLVM_DEBUG. 1678 /// For now this is a way to test that the function getValuesInOffloadArrays 1679 /// is working properly. 1680 /// TODO: Move this to a unittest when unittests are available for OpenMPOpt. 1681 void dumpValuesInOffloadArrays(ArrayRef<OffloadArray> OAs) { 1682 assert(OAs.size() == 3 && "There are three offload arrays to debug!"); 1683 1684 LLVM_DEBUG(dbgs() << TAG << " Successfully got offload values:\n"); 1685 std::string ValuesStr; 1686 raw_string_ostream Printer(ValuesStr); 1687 std::string Separator = " --- "; 1688 1689 for (auto *BP : OAs[0].StoredValues) { 1690 BP->print(Printer); 1691 Printer << Separator; 1692 } 1693 LLVM_DEBUG(dbgs() << "\t\toffload_baseptrs: " << Printer.str() << "\n"); 1694 ValuesStr.clear(); 1695 1696 for (auto *P : OAs[1].StoredValues) { 1697 P->print(Printer); 1698 Printer << Separator; 1699 } 1700 LLVM_DEBUG(dbgs() << "\t\toffload_ptrs: " << Printer.str() << "\n"); 1701 ValuesStr.clear(); 1702 1703 for (auto *S : OAs[2].StoredValues) { 1704 S->print(Printer); 1705 Printer << Separator; 1706 } 1707 LLVM_DEBUG(dbgs() << "\t\toffload_sizes: " << Printer.str() << "\n"); 1708 } 1709 1710 /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be 1711 /// moved. Returns nullptr if the movement is not possible, or not worth it. 1712 Instruction *canBeMovedDownwards(CallInst &RuntimeCall) { 1713 // FIXME: This traverses only the BasicBlock where RuntimeCall is. 1714 // Make it traverse the CFG. 1715 1716 Instruction *CurrentI = &RuntimeCall; 1717 bool IsWorthIt = false; 1718 while ((CurrentI = CurrentI->getNextNode())) { 1719 1720 // TODO: Once we detect the regions to be offloaded we should use the 1721 // alias analysis manager to check if CurrentI may modify one of 1722 // the offloaded regions. 1723 if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) { 1724 if (IsWorthIt) 1725 return CurrentI; 1726 1727 return nullptr; 1728 } 1729 1730 // FIXME: For now if we move it over anything without side effect 1731 // is worth it. 1732 IsWorthIt = true; 1733 } 1734 1735 // Return end of BasicBlock. 1736 return RuntimeCall.getParent()->getTerminator(); 1737 } 1738 1739 /// Splits \p RuntimeCall into its "issue" and "wait" counterparts. 1740 bool splitTargetDataBeginRTC(CallInst &RuntimeCall, 1741 Instruction &WaitMovementPoint) { 1742 // Create stack allocated handle (__tgt_async_info) at the beginning of the 1743 // function. Used for storing information of the async transfer, allowing to 1744 // wait on it later. 1745 auto &IRBuilder = OMPInfoCache.OMPBuilder; 1746 auto *F = RuntimeCall.getCaller(); 1747 Instruction *FirstInst = &(F->getEntryBlock().front()); 1748 AllocaInst *Handle = new AllocaInst( 1749 IRBuilder.AsyncInfo, F->getAddressSpace(), "handle", FirstInst); 1750 1751 // Add "issue" runtime call declaration: 1752 // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32, 1753 // i8**, i8**, i64*, i64*) 1754 FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction( 1755 M, OMPRTL___tgt_target_data_begin_mapper_issue); 1756 1757 // Change RuntimeCall call site for its asynchronous version. 1758 SmallVector<Value *, 16> Args; 1759 for (auto &Arg : RuntimeCall.args()) 1760 Args.push_back(Arg.get()); 1761 Args.push_back(Handle); 1762 1763 CallInst *IssueCallsite = 1764 CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall); 1765 OMPInfoCache.setCallingConvention(IssueDecl, IssueCallsite); 1766 RuntimeCall.eraseFromParent(); 1767 1768 // Add "wait" runtime call declaration: 1769 // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info) 1770 FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction( 1771 M, OMPRTL___tgt_target_data_begin_mapper_wait); 1772 1773 Value *WaitParams[2] = { 1774 IssueCallsite->getArgOperand( 1775 OffloadArray::DeviceIDArgNum), // device_id. 1776 Handle // handle to wait on. 1777 }; 1778 CallInst *WaitCallsite = CallInst::Create( 1779 WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint); 1780 OMPInfoCache.setCallingConvention(WaitDecl, WaitCallsite); 1781 1782 return true; 1783 } 1784 1785 static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent, 1786 bool GlobalOnly, bool &SingleChoice) { 1787 if (CurrentIdent == NextIdent) 1788 return CurrentIdent; 1789 1790 // TODO: Figure out how to actually combine multiple debug locations. For 1791 // now we just keep an existing one if there is a single choice. 1792 if (!GlobalOnly || isa<GlobalValue>(NextIdent)) { 1793 SingleChoice = !CurrentIdent; 1794 return NextIdent; 1795 } 1796 return nullptr; 1797 } 1798 1799 /// Return an `struct ident_t*` value that represents the ones used in the 1800 /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not 1801 /// return a local `struct ident_t*`. For now, if we cannot find a suitable 1802 /// return value we create one from scratch. We also do not yet combine 1803 /// information, e.g., the source locations, see combinedIdentStruct. 1804 Value * 1805 getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI, 1806 Function &F, bool GlobalOnly) { 1807 bool SingleChoice = true; 1808 Value *Ident = nullptr; 1809 auto CombineIdentStruct = [&](Use &U, Function &Caller) { 1810 CallInst *CI = getCallIfRegularCall(U, &RFI); 1811 if (!CI || &F != &Caller) 1812 return false; 1813 Ident = combinedIdentStruct(Ident, CI->getArgOperand(0), 1814 /* GlobalOnly */ true, SingleChoice); 1815 return false; 1816 }; 1817 RFI.foreachUse(SCC, CombineIdentStruct); 1818 1819 if (!Ident || !SingleChoice) { 1820 // The IRBuilder uses the insertion block to get to the module, this is 1821 // unfortunate but we work around it for now. 1822 if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock()) 1823 OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy( 1824 &F.getEntryBlock(), F.getEntryBlock().begin())); 1825 // Create a fallback location if non was found. 1826 // TODO: Use the debug locations of the calls instead. 1827 uint32_t SrcLocStrSize; 1828 Constant *Loc = 1829 OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize); 1830 Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc, SrcLocStrSize); 1831 } 1832 return Ident; 1833 } 1834 1835 /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or 1836 /// \p ReplVal if given. 1837 bool deduplicateRuntimeCalls(Function &F, 1838 OMPInformationCache::RuntimeFunctionInfo &RFI, 1839 Value *ReplVal = nullptr) { 1840 auto *UV = RFI.getUseVector(F); 1841 if (!UV || UV->size() + (ReplVal != nullptr) < 2) 1842 return false; 1843 1844 LLVM_DEBUG( 1845 dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name 1846 << (ReplVal ? " with an existing value\n" : "\n") << "\n"); 1847 1848 assert((!ReplVal || (isa<Argument>(ReplVal) && 1849 cast<Argument>(ReplVal)->getParent() == &F)) && 1850 "Unexpected replacement value!"); 1851 1852 // TODO: Use dominance to find a good position instead. 1853 auto CanBeMoved = [this](CallBase &CB) { 1854 unsigned NumArgs = CB.arg_size(); 1855 if (NumArgs == 0) 1856 return true; 1857 if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr) 1858 return false; 1859 for (unsigned U = 1; U < NumArgs; ++U) 1860 if (isa<Instruction>(CB.getArgOperand(U))) 1861 return false; 1862 return true; 1863 }; 1864 1865 if (!ReplVal) { 1866 for (Use *U : *UV) 1867 if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) { 1868 if (!CanBeMoved(*CI)) 1869 continue; 1870 1871 // If the function is a kernel, dedup will move 1872 // the runtime call right after the kernel init callsite. Otherwise, 1873 // it will move it to the beginning of the caller function. 1874 if (isKernel(F)) { 1875 auto &KernelInitRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 1876 auto *KernelInitUV = KernelInitRFI.getUseVector(F); 1877 1878 if (KernelInitUV->empty()) 1879 continue; 1880 1881 assert(KernelInitUV->size() == 1 && 1882 "Expected a single __kmpc_target_init in kernel\n"); 1883 1884 CallInst *KernelInitCI = 1885 getCallIfRegularCall(*KernelInitUV->front(), &KernelInitRFI); 1886 assert(KernelInitCI && 1887 "Expected a call to __kmpc_target_init in kernel\n"); 1888 1889 CI->moveAfter(KernelInitCI); 1890 } else 1891 CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt()); 1892 ReplVal = CI; 1893 break; 1894 } 1895 if (!ReplVal) 1896 return false; 1897 } 1898 1899 // If we use a call as a replacement value we need to make sure the ident is 1900 // valid at the new location. For now we just pick a global one, either 1901 // existing and used by one of the calls, or created from scratch. 1902 if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) { 1903 if (!CI->arg_empty() && 1904 CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) { 1905 Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F, 1906 /* GlobalOnly */ true); 1907 CI->setArgOperand(0, Ident); 1908 } 1909 } 1910 1911 bool Changed = false; 1912 auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) { 1913 CallInst *CI = getCallIfRegularCall(U, &RFI); 1914 if (!CI || CI == ReplVal || &F != &Caller) 1915 return false; 1916 assert(CI->getCaller() == &F && "Unexpected call!"); 1917 1918 auto Remark = [&](OptimizationRemark OR) { 1919 return OR << "OpenMP runtime call " 1920 << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated."; 1921 }; 1922 if (CI->getDebugLoc()) 1923 emitRemark<OptimizationRemark>(CI, "OMP170", Remark); 1924 else 1925 emitRemark<OptimizationRemark>(&F, "OMP170", Remark); 1926 1927 CGUpdater.removeCallSite(*CI); 1928 CI->replaceAllUsesWith(ReplVal); 1929 CI->eraseFromParent(); 1930 ++NumOpenMPRuntimeCallsDeduplicated; 1931 Changed = true; 1932 return true; 1933 }; 1934 RFI.foreachUse(SCC, ReplaceAndDeleteCB); 1935 1936 return Changed; 1937 } 1938 1939 /// Collect arguments that represent the global thread id in \p GTIdArgs. 1940 void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> >IdArgs) { 1941 // TODO: Below we basically perform a fixpoint iteration with a pessimistic 1942 // initialization. We could define an AbstractAttribute instead and 1943 // run the Attributor here once it can be run as an SCC pass. 1944 1945 // Helper to check the argument \p ArgNo at all call sites of \p F for 1946 // a GTId. 1947 auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) { 1948 if (!F.hasLocalLinkage()) 1949 return false; 1950 for (Use &U : F.uses()) { 1951 if (CallInst *CI = getCallIfRegularCall(U)) { 1952 Value *ArgOp = CI->getArgOperand(ArgNo); 1953 if (CI == &RefCI || GTIdArgs.count(ArgOp) || 1954 getCallIfRegularCall( 1955 *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num])) 1956 continue; 1957 } 1958 return false; 1959 } 1960 return true; 1961 }; 1962 1963 // Helper to identify uses of a GTId as GTId arguments. 1964 auto AddUserArgs = [&](Value >Id) { 1965 for (Use &U : GTId.uses()) 1966 if (CallInst *CI = dyn_cast<CallInst>(U.getUser())) 1967 if (CI->isArgOperand(&U)) 1968 if (Function *Callee = CI->getCalledFunction()) 1969 if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI)) 1970 GTIdArgs.insert(Callee->getArg(U.getOperandNo())); 1971 }; 1972 1973 // The argument users of __kmpc_global_thread_num calls are GTIds. 1974 OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI = 1975 OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]; 1976 1977 GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) { 1978 if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI)) 1979 AddUserArgs(*CI); 1980 return false; 1981 }); 1982 1983 // Transitively search for more arguments by looking at the users of the 1984 // ones we know already. During the search the GTIdArgs vector is extended 1985 // so we cannot cache the size nor can we use a range based for. 1986 for (unsigned U = 0; U < GTIdArgs.size(); ++U) 1987 AddUserArgs(*GTIdArgs[U]); 1988 } 1989 1990 /// Kernel (=GPU) optimizations and utility functions 1991 /// 1992 ///{{ 1993 1994 /// Check if \p F is a kernel, hence entry point for target offloading. 1995 bool isKernel(Function &F) { return OMPInfoCache.Kernels.count(&F); } 1996 1997 /// Cache to remember the unique kernel for a function. 1998 DenseMap<Function *, Optional<Kernel>> UniqueKernelMap; 1999 2000 /// Find the unique kernel that will execute \p F, if any. 2001 Kernel getUniqueKernelFor(Function &F); 2002 2003 /// Find the unique kernel that will execute \p I, if any. 2004 Kernel getUniqueKernelFor(Instruction &I) { 2005 return getUniqueKernelFor(*I.getFunction()); 2006 } 2007 2008 /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in 2009 /// the cases we can avoid taking the address of a function. 2010 bool rewriteDeviceCodeStateMachine(); 2011 2012 /// 2013 ///}} 2014 2015 /// Emit a remark generically 2016 /// 2017 /// This template function can be used to generically emit a remark. The 2018 /// RemarkKind should be one of the following: 2019 /// - OptimizationRemark to indicate a successful optimization attempt 2020 /// - OptimizationRemarkMissed to report a failed optimization attempt 2021 /// - OptimizationRemarkAnalysis to provide additional information about an 2022 /// optimization attempt 2023 /// 2024 /// The remark is built using a callback function provided by the caller that 2025 /// takes a RemarkKind as input and returns a RemarkKind. 2026 template <typename RemarkKind, typename RemarkCallBack> 2027 void emitRemark(Instruction *I, StringRef RemarkName, 2028 RemarkCallBack &&RemarkCB) const { 2029 Function *F = I->getParent()->getParent(); 2030 auto &ORE = OREGetter(F); 2031 2032 if (RemarkName.startswith("OMP")) 2033 ORE.emit([&]() { 2034 return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)) 2035 << " [" << RemarkName << "]"; 2036 }); 2037 else 2038 ORE.emit( 2039 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)); }); 2040 } 2041 2042 /// Emit a remark on a function. 2043 template <typename RemarkKind, typename RemarkCallBack> 2044 void emitRemark(Function *F, StringRef RemarkName, 2045 RemarkCallBack &&RemarkCB) const { 2046 auto &ORE = OREGetter(F); 2047 2048 if (RemarkName.startswith("OMP")) 2049 ORE.emit([&]() { 2050 return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)) 2051 << " [" << RemarkName << "]"; 2052 }); 2053 else 2054 ORE.emit( 2055 [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)); }); 2056 } 2057 2058 /// RAII struct to temporarily change an RTL function's linkage to external. 2059 /// This prevents it from being mistakenly removed by other optimizations. 2060 struct ExternalizationRAII { 2061 ExternalizationRAII(OMPInformationCache &OMPInfoCache, 2062 RuntimeFunction RFKind) 2063 : Declaration(OMPInfoCache.RFIs[RFKind].Declaration) { 2064 if (!Declaration) 2065 return; 2066 2067 LinkageType = Declaration->getLinkage(); 2068 Declaration->setLinkage(GlobalValue::ExternalLinkage); 2069 } 2070 2071 ~ExternalizationRAII() { 2072 if (!Declaration) 2073 return; 2074 2075 Declaration->setLinkage(LinkageType); 2076 } 2077 2078 Function *Declaration; 2079 GlobalValue::LinkageTypes LinkageType; 2080 }; 2081 2082 /// The underlying module. 2083 Module &M; 2084 2085 /// The SCC we are operating on. 2086 SmallVectorImpl<Function *> &SCC; 2087 2088 /// Callback to update the call graph, the first argument is a removed call, 2089 /// the second an optional replacement call. 2090 CallGraphUpdater &CGUpdater; 2091 2092 /// Callback to get an OptimizationRemarkEmitter from a Function * 2093 OptimizationRemarkGetter OREGetter; 2094 2095 /// OpenMP-specific information cache. Also Used for Attributor runs. 2096 OMPInformationCache &OMPInfoCache; 2097 2098 /// Attributor instance. 2099 Attributor &A; 2100 2101 /// Helper function to run Attributor on SCC. 2102 bool runAttributor(bool IsModulePass) { 2103 if (SCC.empty()) 2104 return false; 2105 2106 // Temporarily make these function have external linkage so the Attributor 2107 // doesn't remove them when we try to look them up later. 2108 ExternalizationRAII Parallel(OMPInfoCache, OMPRTL___kmpc_kernel_parallel); 2109 ExternalizationRAII EndParallel(OMPInfoCache, 2110 OMPRTL___kmpc_kernel_end_parallel); 2111 ExternalizationRAII BarrierSPMD(OMPInfoCache, 2112 OMPRTL___kmpc_barrier_simple_spmd); 2113 ExternalizationRAII BarrierGeneric(OMPInfoCache, 2114 OMPRTL___kmpc_barrier_simple_generic); 2115 ExternalizationRAII ThreadId(OMPInfoCache, 2116 OMPRTL___kmpc_get_hardware_thread_id_in_block); 2117 ExternalizationRAII NumThreads( 2118 OMPInfoCache, OMPRTL___kmpc_get_hardware_num_threads_in_block); 2119 ExternalizationRAII WarpSize(OMPInfoCache, OMPRTL___kmpc_get_warp_size); 2120 2121 registerAAs(IsModulePass); 2122 2123 ChangeStatus Changed = A.run(); 2124 2125 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size() 2126 << " functions, result: " << Changed << ".\n"); 2127 2128 return Changed == ChangeStatus::CHANGED; 2129 } 2130 2131 void registerFoldRuntimeCall(RuntimeFunction RF); 2132 2133 /// Populate the Attributor with abstract attribute opportunities in the 2134 /// function. 2135 void registerAAs(bool IsModulePass); 2136 }; 2137 2138 Kernel OpenMPOpt::getUniqueKernelFor(Function &F) { 2139 if (!OMPInfoCache.ModuleSlice.count(&F)) 2140 return nullptr; 2141 2142 // Use a scope to keep the lifetime of the CachedKernel short. 2143 { 2144 Optional<Kernel> &CachedKernel = UniqueKernelMap[&F]; 2145 if (CachedKernel) 2146 return *CachedKernel; 2147 2148 // TODO: We should use an AA to create an (optimistic and callback 2149 // call-aware) call graph. For now we stick to simple patterns that 2150 // are less powerful, basically the worst fixpoint. 2151 if (isKernel(F)) { 2152 CachedKernel = Kernel(&F); 2153 return *CachedKernel; 2154 } 2155 2156 CachedKernel = nullptr; 2157 if (!F.hasLocalLinkage()) { 2158 2159 // See https://openmp.llvm.org/remarks/OptimizationRemarks.html 2160 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 2161 return ORA << "Potentially unknown OpenMP target region caller."; 2162 }; 2163 emitRemark<OptimizationRemarkAnalysis>(&F, "OMP100", Remark); 2164 2165 return nullptr; 2166 } 2167 } 2168 2169 auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel { 2170 if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) { 2171 // Allow use in equality comparisons. 2172 if (Cmp->isEquality()) 2173 return getUniqueKernelFor(*Cmp); 2174 return nullptr; 2175 } 2176 if (auto *CB = dyn_cast<CallBase>(U.getUser())) { 2177 // Allow direct calls. 2178 if (CB->isCallee(&U)) 2179 return getUniqueKernelFor(*CB); 2180 2181 OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI = 2182 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 2183 // Allow the use in __kmpc_parallel_51 calls. 2184 if (OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI)) 2185 return getUniqueKernelFor(*CB); 2186 return nullptr; 2187 } 2188 // Disallow every other use. 2189 return nullptr; 2190 }; 2191 2192 // TODO: In the future we want to track more than just a unique kernel. 2193 SmallPtrSet<Kernel, 2> PotentialKernels; 2194 OMPInformationCache::foreachUse(F, [&](const Use &U) { 2195 PotentialKernels.insert(GetUniqueKernelForUse(U)); 2196 }); 2197 2198 Kernel K = nullptr; 2199 if (PotentialKernels.size() == 1) 2200 K = *PotentialKernels.begin(); 2201 2202 // Cache the result. 2203 UniqueKernelMap[&F] = K; 2204 2205 return K; 2206 } 2207 2208 bool OpenMPOpt::rewriteDeviceCodeStateMachine() { 2209 OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI = 2210 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 2211 2212 bool Changed = false; 2213 if (!KernelParallelRFI) 2214 return Changed; 2215 2216 // If we have disabled state machine changes, exit 2217 if (DisableOpenMPOptStateMachineRewrite) 2218 return Changed; 2219 2220 for (Function *F : SCC) { 2221 2222 // Check if the function is a use in a __kmpc_parallel_51 call at 2223 // all. 2224 bool UnknownUse = false; 2225 bool KernelParallelUse = false; 2226 unsigned NumDirectCalls = 0; 2227 2228 SmallVector<Use *, 2> ToBeReplacedStateMachineUses; 2229 OMPInformationCache::foreachUse(*F, [&](Use &U) { 2230 if (auto *CB = dyn_cast<CallBase>(U.getUser())) 2231 if (CB->isCallee(&U)) { 2232 ++NumDirectCalls; 2233 return; 2234 } 2235 2236 if (isa<ICmpInst>(U.getUser())) { 2237 ToBeReplacedStateMachineUses.push_back(&U); 2238 return; 2239 } 2240 2241 // Find wrapper functions that represent parallel kernels. 2242 CallInst *CI = 2243 OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI); 2244 const unsigned int WrapperFunctionArgNo = 6; 2245 if (!KernelParallelUse && CI && 2246 CI->getArgOperandNo(&U) == WrapperFunctionArgNo) { 2247 KernelParallelUse = true; 2248 ToBeReplacedStateMachineUses.push_back(&U); 2249 return; 2250 } 2251 UnknownUse = true; 2252 }); 2253 2254 // Do not emit a remark if we haven't seen a __kmpc_parallel_51 2255 // use. 2256 if (!KernelParallelUse) 2257 continue; 2258 2259 // If this ever hits, we should investigate. 2260 // TODO: Checking the number of uses is not a necessary restriction and 2261 // should be lifted. 2262 if (UnknownUse || NumDirectCalls != 1 || 2263 ToBeReplacedStateMachineUses.size() > 2) { 2264 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 2265 return ORA << "Parallel region is used in " 2266 << (UnknownUse ? "unknown" : "unexpected") 2267 << " ways. Will not attempt to rewrite the state machine."; 2268 }; 2269 emitRemark<OptimizationRemarkAnalysis>(F, "OMP101", Remark); 2270 continue; 2271 } 2272 2273 // Even if we have __kmpc_parallel_51 calls, we (for now) give 2274 // up if the function is not called from a unique kernel. 2275 Kernel K = getUniqueKernelFor(*F); 2276 if (!K) { 2277 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 2278 return ORA << "Parallel region is not called from a unique kernel. " 2279 "Will not attempt to rewrite the state machine."; 2280 }; 2281 emitRemark<OptimizationRemarkAnalysis>(F, "OMP102", Remark); 2282 continue; 2283 } 2284 2285 // We now know F is a parallel body function called only from the kernel K. 2286 // We also identified the state machine uses in which we replace the 2287 // function pointer by a new global symbol for identification purposes. This 2288 // ensures only direct calls to the function are left. 2289 2290 Module &M = *F->getParent(); 2291 Type *Int8Ty = Type::getInt8Ty(M.getContext()); 2292 2293 auto *ID = new GlobalVariable( 2294 M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage, 2295 UndefValue::get(Int8Ty), F->getName() + ".ID"); 2296 2297 for (Use *U : ToBeReplacedStateMachineUses) 2298 U->set(ConstantExpr::getPointerBitCastOrAddrSpaceCast( 2299 ID, U->get()->getType())); 2300 2301 ++NumOpenMPParallelRegionsReplacedInGPUStateMachine; 2302 2303 Changed = true; 2304 } 2305 2306 return Changed; 2307 } 2308 2309 /// Abstract Attribute for tracking ICV values. 2310 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> { 2311 using Base = StateWrapper<BooleanState, AbstractAttribute>; 2312 AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 2313 2314 void initialize(Attributor &A) override { 2315 Function *F = getAnchorScope(); 2316 if (!F || !A.isFunctionIPOAmendable(*F)) 2317 indicatePessimisticFixpoint(); 2318 } 2319 2320 /// Returns true if value is assumed to be tracked. 2321 bool isAssumedTracked() const { return getAssumed(); } 2322 2323 /// Returns true if value is known to be tracked. 2324 bool isKnownTracked() const { return getAssumed(); } 2325 2326 /// Create an abstract attribute biew for the position \p IRP. 2327 static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A); 2328 2329 /// Return the value with which \p I can be replaced for specific \p ICV. 2330 virtual Optional<Value *> getReplacementValue(InternalControlVar ICV, 2331 const Instruction *I, 2332 Attributor &A) const { 2333 return None; 2334 } 2335 2336 /// Return an assumed unique ICV value if a single candidate is found. If 2337 /// there cannot be one, return a nullptr. If it is not clear yet, return the 2338 /// Optional::NoneType. 2339 virtual Optional<Value *> 2340 getUniqueReplacementValue(InternalControlVar ICV) const = 0; 2341 2342 // Currently only nthreads is being tracked. 2343 // this array will only grow with time. 2344 InternalControlVar TrackableICVs[1] = {ICV_nthreads}; 2345 2346 /// See AbstractAttribute::getName() 2347 const std::string getName() const override { return "AAICVTracker"; } 2348 2349 /// See AbstractAttribute::getIdAddr() 2350 const char *getIdAddr() const override { return &ID; } 2351 2352 /// This function should return true if the type of the \p AA is AAICVTracker 2353 static bool classof(const AbstractAttribute *AA) { 2354 return (AA->getIdAddr() == &ID); 2355 } 2356 2357 static const char ID; 2358 }; 2359 2360 struct AAICVTrackerFunction : public AAICVTracker { 2361 AAICVTrackerFunction(const IRPosition &IRP, Attributor &A) 2362 : AAICVTracker(IRP, A) {} 2363 2364 // FIXME: come up with better string. 2365 const std::string getAsStr() const override { return "ICVTrackerFunction"; } 2366 2367 // FIXME: come up with some stats. 2368 void trackStatistics() const override {} 2369 2370 /// We don't manifest anything for this AA. 2371 ChangeStatus manifest(Attributor &A) override { 2372 return ChangeStatus::UNCHANGED; 2373 } 2374 2375 // Map of ICV to their values at specific program point. 2376 EnumeratedArray<DenseMap<Instruction *, Value *>, InternalControlVar, 2377 InternalControlVar::ICV___last> 2378 ICVReplacementValuesMap; 2379 2380 ChangeStatus updateImpl(Attributor &A) override { 2381 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 2382 2383 Function *F = getAnchorScope(); 2384 2385 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2386 2387 for (InternalControlVar ICV : TrackableICVs) { 2388 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 2389 2390 auto &ValuesMap = ICVReplacementValuesMap[ICV]; 2391 auto TrackValues = [&](Use &U, Function &) { 2392 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U); 2393 if (!CI) 2394 return false; 2395 2396 // FIXME: handle setters with more that 1 arguments. 2397 /// Track new value. 2398 if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second) 2399 HasChanged = ChangeStatus::CHANGED; 2400 2401 return false; 2402 }; 2403 2404 auto CallCheck = [&](Instruction &I) { 2405 Optional<Value *> ReplVal = getValueForCall(A, I, ICV); 2406 if (ReplVal && ValuesMap.insert(std::make_pair(&I, *ReplVal)).second) 2407 HasChanged = ChangeStatus::CHANGED; 2408 2409 return true; 2410 }; 2411 2412 // Track all changes of an ICV. 2413 SetterRFI.foreachUse(TrackValues, F); 2414 2415 bool UsedAssumedInformation = false; 2416 A.checkForAllInstructions(CallCheck, *this, {Instruction::Call}, 2417 UsedAssumedInformation, 2418 /* CheckBBLivenessOnly */ true); 2419 2420 /// TODO: Figure out a way to avoid adding entry in 2421 /// ICVReplacementValuesMap 2422 Instruction *Entry = &F->getEntryBlock().front(); 2423 if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry)) 2424 ValuesMap.insert(std::make_pair(Entry, nullptr)); 2425 } 2426 2427 return HasChanged; 2428 } 2429 2430 /// Helper to check if \p I is a call and get the value for it if it is 2431 /// unique. 2432 Optional<Value *> getValueForCall(Attributor &A, const Instruction &I, 2433 InternalControlVar &ICV) const { 2434 2435 const auto *CB = dyn_cast<CallBase>(&I); 2436 if (!CB || CB->hasFnAttr("no_openmp") || 2437 CB->hasFnAttr("no_openmp_routines")) 2438 return None; 2439 2440 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2441 auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter]; 2442 auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; 2443 Function *CalledFunction = CB->getCalledFunction(); 2444 2445 // Indirect call, assume ICV changes. 2446 if (CalledFunction == nullptr) 2447 return nullptr; 2448 if (CalledFunction == GetterRFI.Declaration) 2449 return None; 2450 if (CalledFunction == SetterRFI.Declaration) { 2451 if (ICVReplacementValuesMap[ICV].count(&I)) 2452 return ICVReplacementValuesMap[ICV].lookup(&I); 2453 2454 return nullptr; 2455 } 2456 2457 // Since we don't know, assume it changes the ICV. 2458 if (CalledFunction->isDeclaration()) 2459 return nullptr; 2460 2461 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2462 *this, IRPosition::callsite_returned(*CB), DepClassTy::REQUIRED); 2463 2464 if (ICVTrackingAA.isAssumedTracked()) { 2465 Optional<Value *> URV = ICVTrackingAA.getUniqueReplacementValue(ICV); 2466 if (!URV || (*URV && AA::isValidAtPosition(AA::ValueAndContext(**URV, I), 2467 OMPInfoCache))) 2468 return URV; 2469 } 2470 2471 // If we don't know, assume it changes. 2472 return nullptr; 2473 } 2474 2475 // We don't check unique value for a function, so return None. 2476 Optional<Value *> 2477 getUniqueReplacementValue(InternalControlVar ICV) const override { 2478 return None; 2479 } 2480 2481 /// Return the value with which \p I can be replaced for specific \p ICV. 2482 Optional<Value *> getReplacementValue(InternalControlVar ICV, 2483 const Instruction *I, 2484 Attributor &A) const override { 2485 const auto &ValuesMap = ICVReplacementValuesMap[ICV]; 2486 if (ValuesMap.count(I)) 2487 return ValuesMap.lookup(I); 2488 2489 SmallVector<const Instruction *, 16> Worklist; 2490 SmallPtrSet<const Instruction *, 16> Visited; 2491 Worklist.push_back(I); 2492 2493 Optional<Value *> ReplVal; 2494 2495 while (!Worklist.empty()) { 2496 const Instruction *CurrInst = Worklist.pop_back_val(); 2497 if (!Visited.insert(CurrInst).second) 2498 continue; 2499 2500 const BasicBlock *CurrBB = CurrInst->getParent(); 2501 2502 // Go up and look for all potential setters/calls that might change the 2503 // ICV. 2504 while ((CurrInst = CurrInst->getPrevNode())) { 2505 if (ValuesMap.count(CurrInst)) { 2506 Optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst); 2507 // Unknown value, track new. 2508 if (!ReplVal) { 2509 ReplVal = NewReplVal; 2510 break; 2511 } 2512 2513 // If we found a new value, we can't know the icv value anymore. 2514 if (NewReplVal) 2515 if (ReplVal != NewReplVal) 2516 return nullptr; 2517 2518 break; 2519 } 2520 2521 Optional<Value *> NewReplVal = getValueForCall(A, *CurrInst, ICV); 2522 if (!NewReplVal) 2523 continue; 2524 2525 // Unknown value, track new. 2526 if (!ReplVal) { 2527 ReplVal = NewReplVal; 2528 break; 2529 } 2530 2531 // if (NewReplVal.hasValue()) 2532 // We found a new value, we can't know the icv value anymore. 2533 if (ReplVal != NewReplVal) 2534 return nullptr; 2535 } 2536 2537 // If we are in the same BB and we have a value, we are done. 2538 if (CurrBB == I->getParent() && ReplVal) 2539 return ReplVal; 2540 2541 // Go through all predecessors and add terminators for analysis. 2542 for (const BasicBlock *Pred : predecessors(CurrBB)) 2543 if (const Instruction *Terminator = Pred->getTerminator()) 2544 Worklist.push_back(Terminator); 2545 } 2546 2547 return ReplVal; 2548 } 2549 }; 2550 2551 struct AAICVTrackerFunctionReturned : AAICVTracker { 2552 AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A) 2553 : AAICVTracker(IRP, A) {} 2554 2555 // FIXME: come up with better string. 2556 const std::string getAsStr() const override { 2557 return "ICVTrackerFunctionReturned"; 2558 } 2559 2560 // FIXME: come up with some stats. 2561 void trackStatistics() const override {} 2562 2563 /// We don't manifest anything for this AA. 2564 ChangeStatus manifest(Attributor &A) override { 2565 return ChangeStatus::UNCHANGED; 2566 } 2567 2568 // Map of ICV to their values at specific program point. 2569 EnumeratedArray<Optional<Value *>, InternalControlVar, 2570 InternalControlVar::ICV___last> 2571 ICVReplacementValuesMap; 2572 2573 /// Return the value with which \p I can be replaced for specific \p ICV. 2574 Optional<Value *> 2575 getUniqueReplacementValue(InternalControlVar ICV) const override { 2576 return ICVReplacementValuesMap[ICV]; 2577 } 2578 2579 ChangeStatus updateImpl(Attributor &A) override { 2580 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2581 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2582 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 2583 2584 if (!ICVTrackingAA.isAssumedTracked()) 2585 return indicatePessimisticFixpoint(); 2586 2587 for (InternalControlVar ICV : TrackableICVs) { 2588 Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 2589 Optional<Value *> UniqueICVValue; 2590 2591 auto CheckReturnInst = [&](Instruction &I) { 2592 Optional<Value *> NewReplVal = 2593 ICVTrackingAA.getReplacementValue(ICV, &I, A); 2594 2595 // If we found a second ICV value there is no unique returned value. 2596 if (UniqueICVValue && UniqueICVValue != NewReplVal) 2597 return false; 2598 2599 UniqueICVValue = NewReplVal; 2600 2601 return true; 2602 }; 2603 2604 bool UsedAssumedInformation = false; 2605 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}, 2606 UsedAssumedInformation, 2607 /* CheckBBLivenessOnly */ true)) 2608 UniqueICVValue = nullptr; 2609 2610 if (UniqueICVValue == ReplVal) 2611 continue; 2612 2613 ReplVal = UniqueICVValue; 2614 Changed = ChangeStatus::CHANGED; 2615 } 2616 2617 return Changed; 2618 } 2619 }; 2620 2621 struct AAICVTrackerCallSite : AAICVTracker { 2622 AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A) 2623 : AAICVTracker(IRP, A) {} 2624 2625 void initialize(Attributor &A) override { 2626 Function *F = getAnchorScope(); 2627 if (!F || !A.isFunctionIPOAmendable(*F)) 2628 indicatePessimisticFixpoint(); 2629 2630 // We only initialize this AA for getters, so we need to know which ICV it 2631 // gets. 2632 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2633 for (InternalControlVar ICV : TrackableICVs) { 2634 auto ICVInfo = OMPInfoCache.ICVs[ICV]; 2635 auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter]; 2636 if (Getter.Declaration == getAssociatedFunction()) { 2637 AssociatedICV = ICVInfo.Kind; 2638 return; 2639 } 2640 } 2641 2642 /// Unknown ICV. 2643 indicatePessimisticFixpoint(); 2644 } 2645 2646 ChangeStatus manifest(Attributor &A) override { 2647 if (!ReplVal || !*ReplVal) 2648 return ChangeStatus::UNCHANGED; 2649 2650 A.changeAfterManifest(IRPosition::inst(*getCtxI()), **ReplVal); 2651 A.deleteAfterManifest(*getCtxI()); 2652 2653 return ChangeStatus::CHANGED; 2654 } 2655 2656 // FIXME: come up with better string. 2657 const std::string getAsStr() const override { return "ICVTrackerCallSite"; } 2658 2659 // FIXME: come up with some stats. 2660 void trackStatistics() const override {} 2661 2662 InternalControlVar AssociatedICV; 2663 Optional<Value *> ReplVal; 2664 2665 ChangeStatus updateImpl(Attributor &A) override { 2666 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2667 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 2668 2669 // We don't have any information, so we assume it changes the ICV. 2670 if (!ICVTrackingAA.isAssumedTracked()) 2671 return indicatePessimisticFixpoint(); 2672 2673 Optional<Value *> NewReplVal = 2674 ICVTrackingAA.getReplacementValue(AssociatedICV, getCtxI(), A); 2675 2676 if (ReplVal == NewReplVal) 2677 return ChangeStatus::UNCHANGED; 2678 2679 ReplVal = NewReplVal; 2680 return ChangeStatus::CHANGED; 2681 } 2682 2683 // Return the value with which associated value can be replaced for specific 2684 // \p ICV. 2685 Optional<Value *> 2686 getUniqueReplacementValue(InternalControlVar ICV) const override { 2687 return ReplVal; 2688 } 2689 }; 2690 2691 struct AAICVTrackerCallSiteReturned : AAICVTracker { 2692 AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A) 2693 : AAICVTracker(IRP, A) {} 2694 2695 // FIXME: come up with better string. 2696 const std::string getAsStr() const override { 2697 return "ICVTrackerCallSiteReturned"; 2698 } 2699 2700 // FIXME: come up with some stats. 2701 void trackStatistics() const override {} 2702 2703 /// We don't manifest anything for this AA. 2704 ChangeStatus manifest(Attributor &A) override { 2705 return ChangeStatus::UNCHANGED; 2706 } 2707 2708 // Map of ICV to their values at specific program point. 2709 EnumeratedArray<Optional<Value *>, InternalControlVar, 2710 InternalControlVar::ICV___last> 2711 ICVReplacementValuesMap; 2712 2713 /// Return the value with which associated value can be replaced for specific 2714 /// \p ICV. 2715 Optional<Value *> 2716 getUniqueReplacementValue(InternalControlVar ICV) const override { 2717 return ICVReplacementValuesMap[ICV]; 2718 } 2719 2720 ChangeStatus updateImpl(Attributor &A) override { 2721 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2722 const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>( 2723 *this, IRPosition::returned(*getAssociatedFunction()), 2724 DepClassTy::REQUIRED); 2725 2726 // We don't have any information, so we assume it changes the ICV. 2727 if (!ICVTrackingAA.isAssumedTracked()) 2728 return indicatePessimisticFixpoint(); 2729 2730 for (InternalControlVar ICV : TrackableICVs) { 2731 Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV]; 2732 Optional<Value *> NewReplVal = 2733 ICVTrackingAA.getUniqueReplacementValue(ICV); 2734 2735 if (ReplVal == NewReplVal) 2736 continue; 2737 2738 ReplVal = NewReplVal; 2739 Changed = ChangeStatus::CHANGED; 2740 } 2741 return Changed; 2742 } 2743 }; 2744 2745 struct AAExecutionDomainFunction : public AAExecutionDomain { 2746 AAExecutionDomainFunction(const IRPosition &IRP, Attributor &A) 2747 : AAExecutionDomain(IRP, A) {} 2748 2749 const std::string getAsStr() const override { 2750 return "[AAExecutionDomain] " + std::to_string(SingleThreadedBBs.size()) + 2751 "/" + std::to_string(NumBBs) + " BBs thread 0 only."; 2752 } 2753 2754 /// See AbstractAttribute::trackStatistics(). 2755 void trackStatistics() const override {} 2756 2757 void initialize(Attributor &A) override { 2758 Function *F = getAnchorScope(); 2759 for (const auto &BB : *F) 2760 SingleThreadedBBs.insert(&BB); 2761 NumBBs = SingleThreadedBBs.size(); 2762 } 2763 2764 ChangeStatus manifest(Attributor &A) override { 2765 LLVM_DEBUG({ 2766 for (const BasicBlock *BB : SingleThreadedBBs) 2767 dbgs() << TAG << " Basic block @" << getAnchorScope()->getName() << " " 2768 << BB->getName() << " is executed by a single thread.\n"; 2769 }); 2770 return ChangeStatus::UNCHANGED; 2771 } 2772 2773 ChangeStatus updateImpl(Attributor &A) override; 2774 2775 /// Check if an instruction is executed by a single thread. 2776 bool isExecutedByInitialThreadOnly(const Instruction &I) const override { 2777 return isExecutedByInitialThreadOnly(*I.getParent()); 2778 } 2779 2780 bool isExecutedByInitialThreadOnly(const BasicBlock &BB) const override { 2781 return isValidState() && SingleThreadedBBs.contains(&BB); 2782 } 2783 2784 /// Set of basic blocks that are executed by a single thread. 2785 SmallSetVector<const BasicBlock *, 16> SingleThreadedBBs; 2786 2787 /// Total number of basic blocks in this function. 2788 long unsigned NumBBs = 0; 2789 }; 2790 2791 ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) { 2792 Function *F = getAnchorScope(); 2793 ReversePostOrderTraversal<Function *> RPOT(F); 2794 auto NumSingleThreadedBBs = SingleThreadedBBs.size(); 2795 2796 bool AllCallSitesKnown; 2797 auto PredForCallSite = [&](AbstractCallSite ACS) { 2798 const auto &ExecutionDomainAA = A.getAAFor<AAExecutionDomain>( 2799 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2800 DepClassTy::REQUIRED); 2801 return ACS.isDirectCall() && 2802 ExecutionDomainAA.isExecutedByInitialThreadOnly( 2803 *ACS.getInstruction()); 2804 }; 2805 2806 if (!A.checkForAllCallSites(PredForCallSite, *this, 2807 /* RequiresAllCallSites */ true, 2808 AllCallSitesKnown)) 2809 SingleThreadedBBs.remove(&F->getEntryBlock()); 2810 2811 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2812 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 2813 2814 // Check if the edge into the successor block contains a condition that only 2815 // lets the main thread execute it. 2816 auto IsInitialThreadOnly = [&](BranchInst *Edge, BasicBlock *SuccessorBB) { 2817 if (!Edge || !Edge->isConditional()) 2818 return false; 2819 if (Edge->getSuccessor(0) != SuccessorBB) 2820 return false; 2821 2822 auto *Cmp = dyn_cast<CmpInst>(Edge->getCondition()); 2823 if (!Cmp || !Cmp->isTrueWhenEqual() || !Cmp->isEquality()) 2824 return false; 2825 2826 ConstantInt *C = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 2827 if (!C) 2828 return false; 2829 2830 // Match: -1 == __kmpc_target_init (for non-SPMD kernels only!) 2831 if (C->isAllOnesValue()) { 2832 auto *CB = dyn_cast<CallBase>(Cmp->getOperand(0)); 2833 CB = CB ? OpenMPOpt::getCallIfRegularCall(*CB, &RFI) : nullptr; 2834 if (!CB) 2835 return false; 2836 const int InitModeArgNo = 1; 2837 auto *ModeCI = dyn_cast<ConstantInt>(CB->getOperand(InitModeArgNo)); 2838 return ModeCI && (ModeCI->getSExtValue() & OMP_TGT_EXEC_MODE_GENERIC); 2839 } 2840 2841 if (C->isZero()) { 2842 // Match: 0 == llvm.nvvm.read.ptx.sreg.tid.x() 2843 if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0))) 2844 if (II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_tid_x) 2845 return true; 2846 2847 // Match: 0 == llvm.amdgcn.workitem.id.x() 2848 if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0))) 2849 if (II->getIntrinsicID() == Intrinsic::amdgcn_workitem_id_x) 2850 return true; 2851 } 2852 2853 return false; 2854 }; 2855 2856 // Merge all the predecessor states into the current basic block. A basic 2857 // block is executed by a single thread if all of its predecessors are. 2858 auto MergePredecessorStates = [&](BasicBlock *BB) { 2859 if (pred_empty(BB)) 2860 return SingleThreadedBBs.contains(BB); 2861 2862 bool IsInitialThread = true; 2863 for (BasicBlock *PredBB : predecessors(BB)) { 2864 if (!IsInitialThreadOnly(dyn_cast<BranchInst>(PredBB->getTerminator()), 2865 BB)) 2866 IsInitialThread &= SingleThreadedBBs.contains(PredBB); 2867 } 2868 2869 return IsInitialThread; 2870 }; 2871 2872 for (auto *BB : RPOT) { 2873 if (!MergePredecessorStates(BB)) 2874 SingleThreadedBBs.remove(BB); 2875 } 2876 2877 return (NumSingleThreadedBBs == SingleThreadedBBs.size()) 2878 ? ChangeStatus::UNCHANGED 2879 : ChangeStatus::CHANGED; 2880 } 2881 2882 /// Try to replace memory allocation calls called by a single thread with a 2883 /// static buffer of shared memory. 2884 struct AAHeapToShared : public StateWrapper<BooleanState, AbstractAttribute> { 2885 using Base = StateWrapper<BooleanState, AbstractAttribute>; 2886 AAHeapToShared(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 2887 2888 /// Create an abstract attribute view for the position \p IRP. 2889 static AAHeapToShared &createForPosition(const IRPosition &IRP, 2890 Attributor &A); 2891 2892 /// Returns true if HeapToShared conversion is assumed to be possible. 2893 virtual bool isAssumedHeapToShared(CallBase &CB) const = 0; 2894 2895 /// Returns true if HeapToShared conversion is assumed and the CB is a 2896 /// callsite to a free operation to be removed. 2897 virtual bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const = 0; 2898 2899 /// See AbstractAttribute::getName(). 2900 const std::string getName() const override { return "AAHeapToShared"; } 2901 2902 /// See AbstractAttribute::getIdAddr(). 2903 const char *getIdAddr() const override { return &ID; } 2904 2905 /// This function should return true if the type of the \p AA is 2906 /// AAHeapToShared. 2907 static bool classof(const AbstractAttribute *AA) { 2908 return (AA->getIdAddr() == &ID); 2909 } 2910 2911 /// Unique ID (due to the unique address) 2912 static const char ID; 2913 }; 2914 2915 struct AAHeapToSharedFunction : public AAHeapToShared { 2916 AAHeapToSharedFunction(const IRPosition &IRP, Attributor &A) 2917 : AAHeapToShared(IRP, A) {} 2918 2919 const std::string getAsStr() const override { 2920 return "[AAHeapToShared] " + std::to_string(MallocCalls.size()) + 2921 " malloc calls eligible."; 2922 } 2923 2924 /// See AbstractAttribute::trackStatistics(). 2925 void trackStatistics() const override {} 2926 2927 /// This functions finds free calls that will be removed by the 2928 /// HeapToShared transformation. 2929 void findPotentialRemovedFreeCalls(Attributor &A) { 2930 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2931 auto &FreeRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared]; 2932 2933 PotentialRemovedFreeCalls.clear(); 2934 // Update free call users of found malloc calls. 2935 for (CallBase *CB : MallocCalls) { 2936 SmallVector<CallBase *, 4> FreeCalls; 2937 for (auto *U : CB->users()) { 2938 CallBase *C = dyn_cast<CallBase>(U); 2939 if (C && C->getCalledFunction() == FreeRFI.Declaration) 2940 FreeCalls.push_back(C); 2941 } 2942 2943 if (FreeCalls.size() != 1) 2944 continue; 2945 2946 PotentialRemovedFreeCalls.insert(FreeCalls.front()); 2947 } 2948 } 2949 2950 void initialize(Attributor &A) override { 2951 if (DisableOpenMPOptDeglobalization) { 2952 indicatePessimisticFixpoint(); 2953 return; 2954 } 2955 2956 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2957 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 2958 2959 Attributor::SimplifictionCallbackTy SCB = 2960 [](const IRPosition &, const AbstractAttribute *, 2961 bool &) -> Optional<Value *> { return nullptr; }; 2962 for (User *U : RFI.Declaration->users()) 2963 if (CallBase *CB = dyn_cast<CallBase>(U)) { 2964 MallocCalls.insert(CB); 2965 A.registerSimplificationCallback(IRPosition::callsite_returned(*CB), 2966 SCB); 2967 } 2968 2969 findPotentialRemovedFreeCalls(A); 2970 } 2971 2972 bool isAssumedHeapToShared(CallBase &CB) const override { 2973 return isValidState() && MallocCalls.count(&CB); 2974 } 2975 2976 bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const override { 2977 return isValidState() && PotentialRemovedFreeCalls.count(&CB); 2978 } 2979 2980 ChangeStatus manifest(Attributor &A) override { 2981 if (MallocCalls.empty()) 2982 return ChangeStatus::UNCHANGED; 2983 2984 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 2985 auto &FreeCall = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared]; 2986 2987 Function *F = getAnchorScope(); 2988 auto *HS = A.lookupAAFor<AAHeapToStack>(IRPosition::function(*F), this, 2989 DepClassTy::OPTIONAL); 2990 2991 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2992 for (CallBase *CB : MallocCalls) { 2993 // Skip replacing this if HeapToStack has already claimed it. 2994 if (HS && HS->isAssumedHeapToStack(*CB)) 2995 continue; 2996 2997 // Find the unique free call to remove it. 2998 SmallVector<CallBase *, 4> FreeCalls; 2999 for (auto *U : CB->users()) { 3000 CallBase *C = dyn_cast<CallBase>(U); 3001 if (C && C->getCalledFunction() == FreeCall.Declaration) 3002 FreeCalls.push_back(C); 3003 } 3004 if (FreeCalls.size() != 1) 3005 continue; 3006 3007 auto *AllocSize = cast<ConstantInt>(CB->getArgOperand(0)); 3008 3009 if (AllocSize->getZExtValue() + SharedMemoryUsed > SharedMemoryLimit) { 3010 LLVM_DEBUG(dbgs() << TAG << "Cannot replace call " << *CB 3011 << " with shared memory." 3012 << " Shared memory usage is limited to " 3013 << SharedMemoryLimit << " bytes\n"); 3014 continue; 3015 } 3016 3017 LLVM_DEBUG(dbgs() << TAG << "Replace globalization call " << *CB 3018 << " with " << AllocSize->getZExtValue() 3019 << " bytes of shared memory\n"); 3020 3021 // Create a new shared memory buffer of the same size as the allocation 3022 // and replace all the uses of the original allocation with it. 3023 Module *M = CB->getModule(); 3024 Type *Int8Ty = Type::getInt8Ty(M->getContext()); 3025 Type *Int8ArrTy = ArrayType::get(Int8Ty, AllocSize->getZExtValue()); 3026 auto *SharedMem = new GlobalVariable( 3027 *M, Int8ArrTy, /* IsConstant */ false, GlobalValue::InternalLinkage, 3028 UndefValue::get(Int8ArrTy), CB->getName() + "_shared", nullptr, 3029 GlobalValue::NotThreadLocal, 3030 static_cast<unsigned>(AddressSpace::Shared)); 3031 auto *NewBuffer = 3032 ConstantExpr::getPointerCast(SharedMem, Int8Ty->getPointerTo()); 3033 3034 auto Remark = [&](OptimizationRemark OR) { 3035 return OR << "Replaced globalized variable with " 3036 << ore::NV("SharedMemory", AllocSize->getZExtValue()) 3037 << ((AllocSize->getZExtValue() != 1) ? " bytes " : " byte ") 3038 << "of shared memory."; 3039 }; 3040 A.emitRemark<OptimizationRemark>(CB, "OMP111", Remark); 3041 3042 MaybeAlign Alignment = CB->getRetAlign(); 3043 assert(Alignment && 3044 "HeapToShared on allocation without alignment attribute"); 3045 SharedMem->setAlignment(MaybeAlign(Alignment)); 3046 3047 A.changeAfterManifest(IRPosition::callsite_returned(*CB), *NewBuffer); 3048 A.deleteAfterManifest(*CB); 3049 A.deleteAfterManifest(*FreeCalls.front()); 3050 3051 SharedMemoryUsed += AllocSize->getZExtValue(); 3052 NumBytesMovedToSharedMemory = SharedMemoryUsed; 3053 Changed = ChangeStatus::CHANGED; 3054 } 3055 3056 return Changed; 3057 } 3058 3059 ChangeStatus updateImpl(Attributor &A) override { 3060 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3061 auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 3062 Function *F = getAnchorScope(); 3063 3064 auto NumMallocCalls = MallocCalls.size(); 3065 3066 // Only consider malloc calls executed by a single thread with a constant. 3067 for (User *U : RFI.Declaration->users()) { 3068 const auto &ED = A.getAAFor<AAExecutionDomain>( 3069 *this, IRPosition::function(*F), DepClassTy::REQUIRED); 3070 if (CallBase *CB = dyn_cast<CallBase>(U)) 3071 if (!isa<ConstantInt>(CB->getArgOperand(0)) || 3072 !ED.isExecutedByInitialThreadOnly(*CB)) 3073 MallocCalls.remove(CB); 3074 } 3075 3076 findPotentialRemovedFreeCalls(A); 3077 3078 if (NumMallocCalls != MallocCalls.size()) 3079 return ChangeStatus::CHANGED; 3080 3081 return ChangeStatus::UNCHANGED; 3082 } 3083 3084 /// Collection of all malloc calls in a function. 3085 SmallSetVector<CallBase *, 4> MallocCalls; 3086 /// Collection of potentially removed free calls in a function. 3087 SmallPtrSet<CallBase *, 4> PotentialRemovedFreeCalls; 3088 /// The total amount of shared memory that has been used for HeapToShared. 3089 unsigned SharedMemoryUsed = 0; 3090 }; 3091 3092 struct AAKernelInfo : public StateWrapper<KernelInfoState, AbstractAttribute> { 3093 using Base = StateWrapper<KernelInfoState, AbstractAttribute>; 3094 AAKernelInfo(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 3095 3096 /// Statistics are tracked as part of manifest for now. 3097 void trackStatistics() const override {} 3098 3099 /// See AbstractAttribute::getAsStr() 3100 const std::string getAsStr() const override { 3101 if (!isValidState()) 3102 return "<invalid>"; 3103 return std::string(SPMDCompatibilityTracker.isAssumed() ? "SPMD" 3104 : "generic") + 3105 std::string(SPMDCompatibilityTracker.isAtFixpoint() ? " [FIX]" 3106 : "") + 3107 std::string(" #PRs: ") + 3108 (ReachedKnownParallelRegions.isValidState() 3109 ? std::to_string(ReachedKnownParallelRegions.size()) 3110 : "<invalid>") + 3111 ", #Unknown PRs: " + 3112 (ReachedUnknownParallelRegions.isValidState() 3113 ? std::to_string(ReachedUnknownParallelRegions.size()) 3114 : "<invalid>") + 3115 ", #Reaching Kernels: " + 3116 (ReachingKernelEntries.isValidState() 3117 ? std::to_string(ReachingKernelEntries.size()) 3118 : "<invalid>"); 3119 } 3120 3121 /// Create an abstract attribute biew for the position \p IRP. 3122 static AAKernelInfo &createForPosition(const IRPosition &IRP, Attributor &A); 3123 3124 /// See AbstractAttribute::getName() 3125 const std::string getName() const override { return "AAKernelInfo"; } 3126 3127 /// See AbstractAttribute::getIdAddr() 3128 const char *getIdAddr() const override { return &ID; } 3129 3130 /// This function should return true if the type of the \p AA is AAKernelInfo 3131 static bool classof(const AbstractAttribute *AA) { 3132 return (AA->getIdAddr() == &ID); 3133 } 3134 3135 static const char ID; 3136 }; 3137 3138 /// The function kernel info abstract attribute, basically, what can we say 3139 /// about a function with regards to the KernelInfoState. 3140 struct AAKernelInfoFunction : AAKernelInfo { 3141 AAKernelInfoFunction(const IRPosition &IRP, Attributor &A) 3142 : AAKernelInfo(IRP, A) {} 3143 3144 SmallPtrSet<Instruction *, 4> GuardedInstructions; 3145 3146 SmallPtrSetImpl<Instruction *> &getGuardedInstructions() { 3147 return GuardedInstructions; 3148 } 3149 3150 /// See AbstractAttribute::initialize(...). 3151 void initialize(Attributor &A) override { 3152 // This is a high-level transform that might change the constant arguments 3153 // of the init and dinit calls. We need to tell the Attributor about this 3154 // to avoid other parts using the current constant value for simpliication. 3155 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3156 3157 Function *Fn = getAnchorScope(); 3158 3159 OMPInformationCache::RuntimeFunctionInfo &InitRFI = 3160 OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 3161 OMPInformationCache::RuntimeFunctionInfo &DeinitRFI = 3162 OMPInfoCache.RFIs[OMPRTL___kmpc_target_deinit]; 3163 3164 // For kernels we perform more initialization work, first we find the init 3165 // and deinit calls. 3166 auto StoreCallBase = [](Use &U, 3167 OMPInformationCache::RuntimeFunctionInfo &RFI, 3168 CallBase *&Storage) { 3169 CallBase *CB = OpenMPOpt::getCallIfRegularCall(U, &RFI); 3170 assert(CB && 3171 "Unexpected use of __kmpc_target_init or __kmpc_target_deinit!"); 3172 assert(!Storage && 3173 "Multiple uses of __kmpc_target_init or __kmpc_target_deinit!"); 3174 Storage = CB; 3175 return false; 3176 }; 3177 InitRFI.foreachUse( 3178 [&](Use &U, Function &) { 3179 StoreCallBase(U, InitRFI, KernelInitCB); 3180 return false; 3181 }, 3182 Fn); 3183 DeinitRFI.foreachUse( 3184 [&](Use &U, Function &) { 3185 StoreCallBase(U, DeinitRFI, KernelDeinitCB); 3186 return false; 3187 }, 3188 Fn); 3189 3190 // Ignore kernels without initializers such as global constructors. 3191 if (!KernelInitCB || !KernelDeinitCB) 3192 return; 3193 3194 // Add itself to the reaching kernel and set IsKernelEntry. 3195 ReachingKernelEntries.insert(Fn); 3196 IsKernelEntry = true; 3197 3198 // For kernels we might need to initialize/finalize the IsSPMD state and 3199 // we need to register a simplification callback so that the Attributor 3200 // knows the constant arguments to __kmpc_target_init and 3201 // __kmpc_target_deinit might actually change. 3202 3203 Attributor::SimplifictionCallbackTy StateMachineSimplifyCB = 3204 [&](const IRPosition &IRP, const AbstractAttribute *AA, 3205 bool &UsedAssumedInformation) -> Optional<Value *> { 3206 // IRP represents the "use generic state machine" argument of an 3207 // __kmpc_target_init call. We will answer this one with the internal 3208 // state. As long as we are not in an invalid state, we will create a 3209 // custom state machine so the value should be a `i1 false`. If we are 3210 // in an invalid state, we won't change the value that is in the IR. 3211 if (!ReachedKnownParallelRegions.isValidState()) 3212 return nullptr; 3213 // If we have disabled state machine rewrites, don't make a custom one. 3214 if (DisableOpenMPOptStateMachineRewrite) 3215 return nullptr; 3216 if (AA) 3217 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 3218 UsedAssumedInformation = !isAtFixpoint(); 3219 auto *FalseVal = 3220 ConstantInt::getBool(IRP.getAnchorValue().getContext(), false); 3221 return FalseVal; 3222 }; 3223 3224 Attributor::SimplifictionCallbackTy ModeSimplifyCB = 3225 [&](const IRPosition &IRP, const AbstractAttribute *AA, 3226 bool &UsedAssumedInformation) -> Optional<Value *> { 3227 // IRP represents the "SPMDCompatibilityTracker" argument of an 3228 // __kmpc_target_init or 3229 // __kmpc_target_deinit call. We will answer this one with the internal 3230 // state. 3231 if (!SPMDCompatibilityTracker.isValidState()) 3232 return nullptr; 3233 if (!SPMDCompatibilityTracker.isAtFixpoint()) { 3234 if (AA) 3235 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 3236 UsedAssumedInformation = true; 3237 } else { 3238 UsedAssumedInformation = false; 3239 } 3240 auto *Val = ConstantInt::getSigned( 3241 IntegerType::getInt8Ty(IRP.getAnchorValue().getContext()), 3242 SPMDCompatibilityTracker.isAssumed() ? OMP_TGT_EXEC_MODE_SPMD 3243 : OMP_TGT_EXEC_MODE_GENERIC); 3244 return Val; 3245 }; 3246 3247 Attributor::SimplifictionCallbackTy IsGenericModeSimplifyCB = 3248 [&](const IRPosition &IRP, const AbstractAttribute *AA, 3249 bool &UsedAssumedInformation) -> Optional<Value *> { 3250 // IRP represents the "RequiresFullRuntime" argument of an 3251 // __kmpc_target_init or __kmpc_target_deinit call. We will answer this 3252 // one with the internal state of the SPMDCompatibilityTracker, so if 3253 // generic then true, if SPMD then false. 3254 if (!SPMDCompatibilityTracker.isValidState()) 3255 return nullptr; 3256 if (!SPMDCompatibilityTracker.isAtFixpoint()) { 3257 if (AA) 3258 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 3259 UsedAssumedInformation = true; 3260 } else { 3261 UsedAssumedInformation = false; 3262 } 3263 auto *Val = ConstantInt::getBool(IRP.getAnchorValue().getContext(), 3264 !SPMDCompatibilityTracker.isAssumed()); 3265 return Val; 3266 }; 3267 3268 constexpr const int InitModeArgNo = 1; 3269 constexpr const int DeinitModeArgNo = 1; 3270 constexpr const int InitUseStateMachineArgNo = 2; 3271 constexpr const int InitRequiresFullRuntimeArgNo = 3; 3272 constexpr const int DeinitRequiresFullRuntimeArgNo = 2; 3273 A.registerSimplificationCallback( 3274 IRPosition::callsite_argument(*KernelInitCB, InitUseStateMachineArgNo), 3275 StateMachineSimplifyCB); 3276 A.registerSimplificationCallback( 3277 IRPosition::callsite_argument(*KernelInitCB, InitModeArgNo), 3278 ModeSimplifyCB); 3279 A.registerSimplificationCallback( 3280 IRPosition::callsite_argument(*KernelDeinitCB, DeinitModeArgNo), 3281 ModeSimplifyCB); 3282 A.registerSimplificationCallback( 3283 IRPosition::callsite_argument(*KernelInitCB, 3284 InitRequiresFullRuntimeArgNo), 3285 IsGenericModeSimplifyCB); 3286 A.registerSimplificationCallback( 3287 IRPosition::callsite_argument(*KernelDeinitCB, 3288 DeinitRequiresFullRuntimeArgNo), 3289 IsGenericModeSimplifyCB); 3290 3291 // Check if we know we are in SPMD-mode already. 3292 ConstantInt *ModeArg = 3293 dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo)); 3294 if (ModeArg && (ModeArg->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD)) 3295 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 3296 // This is a generic region but SPMDization is disabled so stop tracking. 3297 else if (DisableOpenMPOptSPMDization) 3298 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 3299 } 3300 3301 /// Sanitize the string \p S such that it is a suitable global symbol name. 3302 static std::string sanitizeForGlobalName(std::string S) { 3303 std::replace_if( 3304 S.begin(), S.end(), 3305 [](const char C) { 3306 return !((C >= 'a' && C <= 'z') || (C >= 'A' && C <= 'Z') || 3307 (C >= '0' && C <= '9') || C == '_'); 3308 }, 3309 '.'); 3310 return S; 3311 } 3312 3313 /// Modify the IR based on the KernelInfoState as the fixpoint iteration is 3314 /// finished now. 3315 ChangeStatus manifest(Attributor &A) override { 3316 // If we are not looking at a kernel with __kmpc_target_init and 3317 // __kmpc_target_deinit call we cannot actually manifest the information. 3318 if (!KernelInitCB || !KernelDeinitCB) 3319 return ChangeStatus::UNCHANGED; 3320 3321 // If we can we change the execution mode to SPMD-mode otherwise we build a 3322 // custom state machine. 3323 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3324 if (!changeToSPMDMode(A, Changed)) 3325 return buildCustomStateMachine(A); 3326 3327 return Changed; 3328 } 3329 3330 bool changeToSPMDMode(Attributor &A, ChangeStatus &Changed) { 3331 if (!mayContainParallelRegion()) 3332 return false; 3333 3334 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3335 3336 if (!SPMDCompatibilityTracker.isAssumed()) { 3337 for (Instruction *NonCompatibleI : SPMDCompatibilityTracker) { 3338 if (!NonCompatibleI) 3339 continue; 3340 3341 // Skip diagnostics on calls to known OpenMP runtime functions for now. 3342 if (auto *CB = dyn_cast<CallBase>(NonCompatibleI)) 3343 if (OMPInfoCache.RTLFunctions.contains(CB->getCalledFunction())) 3344 continue; 3345 3346 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 3347 ORA << "Value has potential side effects preventing SPMD-mode " 3348 "execution"; 3349 if (isa<CallBase>(NonCompatibleI)) { 3350 ORA << ". Add `__attribute__((assume(\"ompx_spmd_amenable\")))` to " 3351 "the called function to override"; 3352 } 3353 return ORA << "."; 3354 }; 3355 A.emitRemark<OptimizationRemarkAnalysis>(NonCompatibleI, "OMP121", 3356 Remark); 3357 3358 LLVM_DEBUG(dbgs() << TAG << "SPMD-incompatible side-effect: " 3359 << *NonCompatibleI << "\n"); 3360 } 3361 3362 return false; 3363 } 3364 3365 // Get the actual kernel, could be the caller of the anchor scope if we have 3366 // a debug wrapper. 3367 Function *Kernel = getAnchorScope(); 3368 if (Kernel->hasLocalLinkage()) { 3369 assert(Kernel->hasOneUse() && "Unexpected use of debug kernel wrapper."); 3370 auto *CB = cast<CallBase>(Kernel->user_back()); 3371 Kernel = CB->getCaller(); 3372 } 3373 assert(OMPInfoCache.Kernels.count(Kernel) && "Expected kernel function!"); 3374 3375 // Check if the kernel is already in SPMD mode, if so, return success. 3376 GlobalVariable *ExecMode = Kernel->getParent()->getGlobalVariable( 3377 (Kernel->getName() + "_exec_mode").str()); 3378 assert(ExecMode && "Kernel without exec mode?"); 3379 assert(ExecMode->getInitializer() && "ExecMode doesn't have initializer!"); 3380 3381 // Set the global exec mode flag to indicate SPMD-Generic mode. 3382 assert(isa<ConstantInt>(ExecMode->getInitializer()) && 3383 "ExecMode is not an integer!"); 3384 const int8_t ExecModeVal = 3385 cast<ConstantInt>(ExecMode->getInitializer())->getSExtValue(); 3386 if (ExecModeVal != OMP_TGT_EXEC_MODE_GENERIC) 3387 return true; 3388 3389 // We will now unconditionally modify the IR, indicate a change. 3390 Changed = ChangeStatus::CHANGED; 3391 3392 auto CreateGuardedRegion = [&](Instruction *RegionStartI, 3393 Instruction *RegionEndI) { 3394 LoopInfo *LI = nullptr; 3395 DominatorTree *DT = nullptr; 3396 MemorySSAUpdater *MSU = nullptr; 3397 using InsertPointTy = OpenMPIRBuilder::InsertPointTy; 3398 3399 BasicBlock *ParentBB = RegionStartI->getParent(); 3400 Function *Fn = ParentBB->getParent(); 3401 Module &M = *Fn->getParent(); 3402 3403 // Create all the blocks and logic. 3404 // ParentBB: 3405 // goto RegionCheckTidBB 3406 // RegionCheckTidBB: 3407 // Tid = __kmpc_hardware_thread_id() 3408 // if (Tid != 0) 3409 // goto RegionBarrierBB 3410 // RegionStartBB: 3411 // <execute instructions guarded> 3412 // goto RegionEndBB 3413 // RegionEndBB: 3414 // <store escaping values to shared mem> 3415 // goto RegionBarrierBB 3416 // RegionBarrierBB: 3417 // __kmpc_simple_barrier_spmd() 3418 // // second barrier is omitted if lacking escaping values. 3419 // <load escaping values from shared mem> 3420 // __kmpc_simple_barrier_spmd() 3421 // goto RegionExitBB 3422 // RegionExitBB: 3423 // <execute rest of instructions> 3424 3425 BasicBlock *RegionEndBB = SplitBlock(ParentBB, RegionEndI->getNextNode(), 3426 DT, LI, MSU, "region.guarded.end"); 3427 BasicBlock *RegionBarrierBB = 3428 SplitBlock(RegionEndBB, &*RegionEndBB->getFirstInsertionPt(), DT, LI, 3429 MSU, "region.barrier"); 3430 BasicBlock *RegionExitBB = 3431 SplitBlock(RegionBarrierBB, &*RegionBarrierBB->getFirstInsertionPt(), 3432 DT, LI, MSU, "region.exit"); 3433 BasicBlock *RegionStartBB = 3434 SplitBlock(ParentBB, RegionStartI, DT, LI, MSU, "region.guarded"); 3435 3436 assert(ParentBB->getUniqueSuccessor() == RegionStartBB && 3437 "Expected a different CFG"); 3438 3439 BasicBlock *RegionCheckTidBB = SplitBlock( 3440 ParentBB, ParentBB->getTerminator(), DT, LI, MSU, "region.check.tid"); 3441 3442 // Register basic blocks with the Attributor. 3443 A.registerManifestAddedBasicBlock(*RegionEndBB); 3444 A.registerManifestAddedBasicBlock(*RegionBarrierBB); 3445 A.registerManifestAddedBasicBlock(*RegionExitBB); 3446 A.registerManifestAddedBasicBlock(*RegionStartBB); 3447 A.registerManifestAddedBasicBlock(*RegionCheckTidBB); 3448 3449 bool HasBroadcastValues = false; 3450 // Find escaping outputs from the guarded region to outside users and 3451 // broadcast their values to them. 3452 for (Instruction &I : *RegionStartBB) { 3453 SmallPtrSet<Instruction *, 4> OutsideUsers; 3454 for (User *Usr : I.users()) { 3455 Instruction &UsrI = *cast<Instruction>(Usr); 3456 if (UsrI.getParent() != RegionStartBB) 3457 OutsideUsers.insert(&UsrI); 3458 } 3459 3460 if (OutsideUsers.empty()) 3461 continue; 3462 3463 HasBroadcastValues = true; 3464 3465 // Emit a global variable in shared memory to store the broadcasted 3466 // value. 3467 auto *SharedMem = new GlobalVariable( 3468 M, I.getType(), /* IsConstant */ false, 3469 GlobalValue::InternalLinkage, UndefValue::get(I.getType()), 3470 sanitizeForGlobalName( 3471 (I.getName() + ".guarded.output.alloc").str()), 3472 nullptr, GlobalValue::NotThreadLocal, 3473 static_cast<unsigned>(AddressSpace::Shared)); 3474 3475 // Emit a store instruction to update the value. 3476 new StoreInst(&I, SharedMem, RegionEndBB->getTerminator()); 3477 3478 LoadInst *LoadI = new LoadInst(I.getType(), SharedMem, 3479 I.getName() + ".guarded.output.load", 3480 RegionBarrierBB->getTerminator()); 3481 3482 // Emit a load instruction and replace uses of the output value. 3483 for (Instruction *UsrI : OutsideUsers) 3484 UsrI->replaceUsesOfWith(&I, LoadI); 3485 } 3486 3487 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3488 3489 // Go to tid check BB in ParentBB. 3490 const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc(); 3491 ParentBB->getTerminator()->eraseFromParent(); 3492 OpenMPIRBuilder::LocationDescription Loc( 3493 InsertPointTy(ParentBB, ParentBB->end()), DL); 3494 OMPInfoCache.OMPBuilder.updateToLocation(Loc); 3495 uint32_t SrcLocStrSize; 3496 auto *SrcLocStr = 3497 OMPInfoCache.OMPBuilder.getOrCreateSrcLocStr(Loc, SrcLocStrSize); 3498 Value *Ident = 3499 OMPInfoCache.OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize); 3500 BranchInst::Create(RegionCheckTidBB, ParentBB)->setDebugLoc(DL); 3501 3502 // Add check for Tid in RegionCheckTidBB 3503 RegionCheckTidBB->getTerminator()->eraseFromParent(); 3504 OpenMPIRBuilder::LocationDescription LocRegionCheckTid( 3505 InsertPointTy(RegionCheckTidBB, RegionCheckTidBB->end()), DL); 3506 OMPInfoCache.OMPBuilder.updateToLocation(LocRegionCheckTid); 3507 FunctionCallee HardwareTidFn = 3508 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3509 M, OMPRTL___kmpc_get_hardware_thread_id_in_block); 3510 CallInst *Tid = 3511 OMPInfoCache.OMPBuilder.Builder.CreateCall(HardwareTidFn, {}); 3512 Tid->setDebugLoc(DL); 3513 OMPInfoCache.setCallingConvention(HardwareTidFn, Tid); 3514 Value *TidCheck = OMPInfoCache.OMPBuilder.Builder.CreateIsNull(Tid); 3515 OMPInfoCache.OMPBuilder.Builder 3516 .CreateCondBr(TidCheck, RegionStartBB, RegionBarrierBB) 3517 ->setDebugLoc(DL); 3518 3519 // First barrier for synchronization, ensures main thread has updated 3520 // values. 3521 FunctionCallee BarrierFn = 3522 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3523 M, OMPRTL___kmpc_barrier_simple_spmd); 3524 OMPInfoCache.OMPBuilder.updateToLocation(InsertPointTy( 3525 RegionBarrierBB, RegionBarrierBB->getFirstInsertionPt())); 3526 CallInst *Barrier = 3527 OMPInfoCache.OMPBuilder.Builder.CreateCall(BarrierFn, {Ident, Tid}); 3528 Barrier->setDebugLoc(DL); 3529 OMPInfoCache.setCallingConvention(BarrierFn, Barrier); 3530 3531 // Second barrier ensures workers have read broadcast values. 3532 if (HasBroadcastValues) { 3533 CallInst *Barrier = CallInst::Create(BarrierFn, {Ident, Tid}, "", 3534 RegionBarrierBB->getTerminator()); 3535 Barrier->setDebugLoc(DL); 3536 OMPInfoCache.setCallingConvention(BarrierFn, Barrier); 3537 } 3538 }; 3539 3540 auto &AllocSharedRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 3541 SmallPtrSet<BasicBlock *, 8> Visited; 3542 for (Instruction *GuardedI : SPMDCompatibilityTracker) { 3543 BasicBlock *BB = GuardedI->getParent(); 3544 if (!Visited.insert(BB).second) 3545 continue; 3546 3547 SmallVector<std::pair<Instruction *, Instruction *>> Reorders; 3548 Instruction *LastEffect = nullptr; 3549 BasicBlock::reverse_iterator IP = BB->rbegin(), IPEnd = BB->rend(); 3550 while (++IP != IPEnd) { 3551 if (!IP->mayHaveSideEffects() && !IP->mayReadFromMemory()) 3552 continue; 3553 Instruction *I = &*IP; 3554 if (OpenMPOpt::getCallIfRegularCall(*I, &AllocSharedRFI)) 3555 continue; 3556 if (!I->user_empty() || !SPMDCompatibilityTracker.contains(I)) { 3557 LastEffect = nullptr; 3558 continue; 3559 } 3560 if (LastEffect) 3561 Reorders.push_back({I, LastEffect}); 3562 LastEffect = &*IP; 3563 } 3564 for (auto &Reorder : Reorders) 3565 Reorder.first->moveBefore(Reorder.second); 3566 } 3567 3568 SmallVector<std::pair<Instruction *, Instruction *>, 4> GuardedRegions; 3569 3570 for (Instruction *GuardedI : SPMDCompatibilityTracker) { 3571 BasicBlock *BB = GuardedI->getParent(); 3572 auto *CalleeAA = A.lookupAAFor<AAKernelInfo>( 3573 IRPosition::function(*GuardedI->getFunction()), nullptr, 3574 DepClassTy::NONE); 3575 assert(CalleeAA != nullptr && "Expected Callee AAKernelInfo"); 3576 auto &CalleeAAFunction = *cast<AAKernelInfoFunction>(CalleeAA); 3577 // Continue if instruction is already guarded. 3578 if (CalleeAAFunction.getGuardedInstructions().contains(GuardedI)) 3579 continue; 3580 3581 Instruction *GuardedRegionStart = nullptr, *GuardedRegionEnd = nullptr; 3582 for (Instruction &I : *BB) { 3583 // If instruction I needs to be guarded update the guarded region 3584 // bounds. 3585 if (SPMDCompatibilityTracker.contains(&I)) { 3586 CalleeAAFunction.getGuardedInstructions().insert(&I); 3587 if (GuardedRegionStart) 3588 GuardedRegionEnd = &I; 3589 else 3590 GuardedRegionStart = GuardedRegionEnd = &I; 3591 3592 continue; 3593 } 3594 3595 // Instruction I does not need guarding, store 3596 // any region found and reset bounds. 3597 if (GuardedRegionStart) { 3598 GuardedRegions.push_back( 3599 std::make_pair(GuardedRegionStart, GuardedRegionEnd)); 3600 GuardedRegionStart = nullptr; 3601 GuardedRegionEnd = nullptr; 3602 } 3603 } 3604 } 3605 3606 for (auto &GR : GuardedRegions) 3607 CreateGuardedRegion(GR.first, GR.second); 3608 3609 // Adjust the global exec mode flag that tells the runtime what mode this 3610 // kernel is executed in. 3611 assert(ExecModeVal == OMP_TGT_EXEC_MODE_GENERIC && 3612 "Initially non-SPMD kernel has SPMD exec mode!"); 3613 ExecMode->setInitializer( 3614 ConstantInt::get(ExecMode->getInitializer()->getType(), 3615 ExecModeVal | OMP_TGT_EXEC_MODE_GENERIC_SPMD)); 3616 3617 // Next rewrite the init and deinit calls to indicate we use SPMD-mode now. 3618 const int InitModeArgNo = 1; 3619 const int DeinitModeArgNo = 1; 3620 const int InitUseStateMachineArgNo = 2; 3621 const int InitRequiresFullRuntimeArgNo = 3; 3622 const int DeinitRequiresFullRuntimeArgNo = 2; 3623 3624 auto &Ctx = getAnchorValue().getContext(); 3625 A.changeUseAfterManifest( 3626 KernelInitCB->getArgOperandUse(InitModeArgNo), 3627 *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx), 3628 OMP_TGT_EXEC_MODE_SPMD)); 3629 A.changeUseAfterManifest( 3630 KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), 3631 *ConstantInt::getBool(Ctx, false)); 3632 A.changeUseAfterManifest( 3633 KernelDeinitCB->getArgOperandUse(DeinitModeArgNo), 3634 *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx), 3635 OMP_TGT_EXEC_MODE_SPMD)); 3636 A.changeUseAfterManifest( 3637 KernelInitCB->getArgOperandUse(InitRequiresFullRuntimeArgNo), 3638 *ConstantInt::getBool(Ctx, false)); 3639 A.changeUseAfterManifest( 3640 KernelDeinitCB->getArgOperandUse(DeinitRequiresFullRuntimeArgNo), 3641 *ConstantInt::getBool(Ctx, false)); 3642 3643 ++NumOpenMPTargetRegionKernelsSPMD; 3644 3645 auto Remark = [&](OptimizationRemark OR) { 3646 return OR << "Transformed generic-mode kernel to SPMD-mode."; 3647 }; 3648 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP120", Remark); 3649 return true; 3650 }; 3651 3652 ChangeStatus buildCustomStateMachine(Attributor &A) { 3653 // If we have disabled state machine rewrites, don't make a custom one 3654 if (DisableOpenMPOptStateMachineRewrite) 3655 return ChangeStatus::UNCHANGED; 3656 3657 // Don't rewrite the state machine if we are not in a valid state. 3658 if (!ReachedKnownParallelRegions.isValidState()) 3659 return ChangeStatus::UNCHANGED; 3660 3661 const int InitModeArgNo = 1; 3662 const int InitUseStateMachineArgNo = 2; 3663 3664 // Check if the current configuration is non-SPMD and generic state machine. 3665 // If we already have SPMD mode or a custom state machine we do not need to 3666 // go any further. If it is anything but a constant something is weird and 3667 // we give up. 3668 ConstantInt *UseStateMachine = dyn_cast<ConstantInt>( 3669 KernelInitCB->getArgOperand(InitUseStateMachineArgNo)); 3670 ConstantInt *Mode = 3671 dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo)); 3672 3673 // If we are stuck with generic mode, try to create a custom device (=GPU) 3674 // state machine which is specialized for the parallel regions that are 3675 // reachable by the kernel. 3676 if (!UseStateMachine || UseStateMachine->isZero() || !Mode || 3677 (Mode->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD)) 3678 return ChangeStatus::UNCHANGED; 3679 3680 // If not SPMD mode, indicate we use a custom state machine now. 3681 auto &Ctx = getAnchorValue().getContext(); 3682 auto *FalseVal = ConstantInt::getBool(Ctx, false); 3683 A.changeUseAfterManifest( 3684 KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), *FalseVal); 3685 3686 // If we don't actually need a state machine we are done here. This can 3687 // happen if there simply are no parallel regions. In the resulting kernel 3688 // all worker threads will simply exit right away, leaving the main thread 3689 // to do the work alone. 3690 if (!mayContainParallelRegion()) { 3691 ++NumOpenMPTargetRegionKernelsWithoutStateMachine; 3692 3693 auto Remark = [&](OptimizationRemark OR) { 3694 return OR << "Removing unused state machine from generic-mode kernel."; 3695 }; 3696 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP130", Remark); 3697 3698 return ChangeStatus::CHANGED; 3699 } 3700 3701 // Keep track in the statistics of our new shiny custom state machine. 3702 if (ReachedUnknownParallelRegions.empty()) { 3703 ++NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback; 3704 3705 auto Remark = [&](OptimizationRemark OR) { 3706 return OR << "Rewriting generic-mode kernel with a customized state " 3707 "machine."; 3708 }; 3709 A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP131", Remark); 3710 } else { 3711 ++NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback; 3712 3713 auto Remark = [&](OptimizationRemarkAnalysis OR) { 3714 return OR << "Generic-mode kernel is executed with a customized state " 3715 "machine that requires a fallback."; 3716 }; 3717 A.emitRemark<OptimizationRemarkAnalysis>(KernelInitCB, "OMP132", Remark); 3718 3719 // Tell the user why we ended up with a fallback. 3720 for (CallBase *UnknownParallelRegionCB : ReachedUnknownParallelRegions) { 3721 if (!UnknownParallelRegionCB) 3722 continue; 3723 auto Remark = [&](OptimizationRemarkAnalysis ORA) { 3724 return ORA << "Call may contain unknown parallel regions. Use " 3725 << "`__attribute__((assume(\"omp_no_parallelism\")))` to " 3726 "override."; 3727 }; 3728 A.emitRemark<OptimizationRemarkAnalysis>(UnknownParallelRegionCB, 3729 "OMP133", Remark); 3730 } 3731 } 3732 3733 // Create all the blocks: 3734 // 3735 // InitCB = __kmpc_target_init(...) 3736 // BlockHwSize = 3737 // __kmpc_get_hardware_num_threads_in_block(); 3738 // WarpSize = __kmpc_get_warp_size(); 3739 // BlockSize = BlockHwSize - WarpSize; 3740 // IsWorkerCheckBB: bool IsWorker = InitCB != -1; 3741 // if (IsWorker) { 3742 // if (InitCB >= BlockSize) return; 3743 // SMBeginBB: __kmpc_barrier_simple_generic(...); 3744 // void *WorkFn; 3745 // bool Active = __kmpc_kernel_parallel(&WorkFn); 3746 // if (!WorkFn) return; 3747 // SMIsActiveCheckBB: if (Active) { 3748 // SMIfCascadeCurrentBB: if (WorkFn == <ParFn0>) 3749 // ParFn0(...); 3750 // SMIfCascadeCurrentBB: else if (WorkFn == <ParFn1>) 3751 // ParFn1(...); 3752 // ... 3753 // SMIfCascadeCurrentBB: else 3754 // ((WorkFnTy*)WorkFn)(...); 3755 // SMEndParallelBB: __kmpc_kernel_end_parallel(...); 3756 // } 3757 // SMDoneBB: __kmpc_barrier_simple_generic(...); 3758 // goto SMBeginBB; 3759 // } 3760 // UserCodeEntryBB: // user code 3761 // __kmpc_target_deinit(...) 3762 // 3763 Function *Kernel = getAssociatedFunction(); 3764 assert(Kernel && "Expected an associated function!"); 3765 3766 BasicBlock *InitBB = KernelInitCB->getParent(); 3767 BasicBlock *UserCodeEntryBB = InitBB->splitBasicBlock( 3768 KernelInitCB->getNextNode(), "thread.user_code.check"); 3769 BasicBlock *IsWorkerCheckBB = 3770 BasicBlock::Create(Ctx, "is_worker_check", Kernel, UserCodeEntryBB); 3771 BasicBlock *StateMachineBeginBB = BasicBlock::Create( 3772 Ctx, "worker_state_machine.begin", Kernel, UserCodeEntryBB); 3773 BasicBlock *StateMachineFinishedBB = BasicBlock::Create( 3774 Ctx, "worker_state_machine.finished", Kernel, UserCodeEntryBB); 3775 BasicBlock *StateMachineIsActiveCheckBB = BasicBlock::Create( 3776 Ctx, "worker_state_machine.is_active.check", Kernel, UserCodeEntryBB); 3777 BasicBlock *StateMachineIfCascadeCurrentBB = 3778 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check", 3779 Kernel, UserCodeEntryBB); 3780 BasicBlock *StateMachineEndParallelBB = 3781 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.end", 3782 Kernel, UserCodeEntryBB); 3783 BasicBlock *StateMachineDoneBarrierBB = BasicBlock::Create( 3784 Ctx, "worker_state_machine.done.barrier", Kernel, UserCodeEntryBB); 3785 A.registerManifestAddedBasicBlock(*InitBB); 3786 A.registerManifestAddedBasicBlock(*UserCodeEntryBB); 3787 A.registerManifestAddedBasicBlock(*IsWorkerCheckBB); 3788 A.registerManifestAddedBasicBlock(*StateMachineBeginBB); 3789 A.registerManifestAddedBasicBlock(*StateMachineFinishedBB); 3790 A.registerManifestAddedBasicBlock(*StateMachineIsActiveCheckBB); 3791 A.registerManifestAddedBasicBlock(*StateMachineIfCascadeCurrentBB); 3792 A.registerManifestAddedBasicBlock(*StateMachineEndParallelBB); 3793 A.registerManifestAddedBasicBlock(*StateMachineDoneBarrierBB); 3794 3795 const DebugLoc &DLoc = KernelInitCB->getDebugLoc(); 3796 ReturnInst::Create(Ctx, StateMachineFinishedBB)->setDebugLoc(DLoc); 3797 InitBB->getTerminator()->eraseFromParent(); 3798 3799 Instruction *IsWorker = 3800 ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_NE, KernelInitCB, 3801 ConstantInt::get(KernelInitCB->getType(), -1), 3802 "thread.is_worker", InitBB); 3803 IsWorker->setDebugLoc(DLoc); 3804 BranchInst::Create(IsWorkerCheckBB, UserCodeEntryBB, IsWorker, InitBB); 3805 3806 Module &M = *Kernel->getParent(); 3807 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 3808 FunctionCallee BlockHwSizeFn = 3809 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3810 M, OMPRTL___kmpc_get_hardware_num_threads_in_block); 3811 FunctionCallee WarpSizeFn = 3812 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3813 M, OMPRTL___kmpc_get_warp_size); 3814 CallInst *BlockHwSize = 3815 CallInst::Create(BlockHwSizeFn, "block.hw_size", IsWorkerCheckBB); 3816 OMPInfoCache.setCallingConvention(BlockHwSizeFn, BlockHwSize); 3817 BlockHwSize->setDebugLoc(DLoc); 3818 CallInst *WarpSize = 3819 CallInst::Create(WarpSizeFn, "warp.size", IsWorkerCheckBB); 3820 OMPInfoCache.setCallingConvention(WarpSizeFn, WarpSize); 3821 WarpSize->setDebugLoc(DLoc); 3822 Instruction *BlockSize = BinaryOperator::CreateSub( 3823 BlockHwSize, WarpSize, "block.size", IsWorkerCheckBB); 3824 BlockSize->setDebugLoc(DLoc); 3825 Instruction *IsMainOrWorker = ICmpInst::Create( 3826 ICmpInst::ICmp, llvm::CmpInst::ICMP_SLT, KernelInitCB, BlockSize, 3827 "thread.is_main_or_worker", IsWorkerCheckBB); 3828 IsMainOrWorker->setDebugLoc(DLoc); 3829 BranchInst::Create(StateMachineBeginBB, StateMachineFinishedBB, 3830 IsMainOrWorker, IsWorkerCheckBB); 3831 3832 // Create local storage for the work function pointer. 3833 const DataLayout &DL = M.getDataLayout(); 3834 Type *VoidPtrTy = Type::getInt8PtrTy(Ctx); 3835 Instruction *WorkFnAI = 3836 new AllocaInst(VoidPtrTy, DL.getAllocaAddrSpace(), nullptr, 3837 "worker.work_fn.addr", &Kernel->getEntryBlock().front()); 3838 WorkFnAI->setDebugLoc(DLoc); 3839 3840 OMPInfoCache.OMPBuilder.updateToLocation( 3841 OpenMPIRBuilder::LocationDescription( 3842 IRBuilder<>::InsertPoint(StateMachineBeginBB, 3843 StateMachineBeginBB->end()), 3844 DLoc)); 3845 3846 Value *Ident = KernelInitCB->getArgOperand(0); 3847 Value *GTid = KernelInitCB; 3848 3849 FunctionCallee BarrierFn = 3850 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3851 M, OMPRTL___kmpc_barrier_simple_generic); 3852 CallInst *Barrier = 3853 CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineBeginBB); 3854 OMPInfoCache.setCallingConvention(BarrierFn, Barrier); 3855 Barrier->setDebugLoc(DLoc); 3856 3857 if (WorkFnAI->getType()->getPointerAddressSpace() != 3858 (unsigned int)AddressSpace::Generic) { 3859 WorkFnAI = new AddrSpaceCastInst( 3860 WorkFnAI, 3861 PointerType::getWithSamePointeeType( 3862 cast<PointerType>(WorkFnAI->getType()), 3863 (unsigned int)AddressSpace::Generic), 3864 WorkFnAI->getName() + ".generic", StateMachineBeginBB); 3865 WorkFnAI->setDebugLoc(DLoc); 3866 } 3867 3868 FunctionCallee KernelParallelFn = 3869 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3870 M, OMPRTL___kmpc_kernel_parallel); 3871 CallInst *IsActiveWorker = CallInst::Create( 3872 KernelParallelFn, {WorkFnAI}, "worker.is_active", StateMachineBeginBB); 3873 OMPInfoCache.setCallingConvention(KernelParallelFn, IsActiveWorker); 3874 IsActiveWorker->setDebugLoc(DLoc); 3875 Instruction *WorkFn = new LoadInst(VoidPtrTy, WorkFnAI, "worker.work_fn", 3876 StateMachineBeginBB); 3877 WorkFn->setDebugLoc(DLoc); 3878 3879 FunctionType *ParallelRegionFnTy = FunctionType::get( 3880 Type::getVoidTy(Ctx), {Type::getInt16Ty(Ctx), Type::getInt32Ty(Ctx)}, 3881 false); 3882 Value *WorkFnCast = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 3883 WorkFn, ParallelRegionFnTy->getPointerTo(), "worker.work_fn.addr_cast", 3884 StateMachineBeginBB); 3885 3886 Instruction *IsDone = 3887 ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFn, 3888 Constant::getNullValue(VoidPtrTy), "worker.is_done", 3889 StateMachineBeginBB); 3890 IsDone->setDebugLoc(DLoc); 3891 BranchInst::Create(StateMachineFinishedBB, StateMachineIsActiveCheckBB, 3892 IsDone, StateMachineBeginBB) 3893 ->setDebugLoc(DLoc); 3894 3895 BranchInst::Create(StateMachineIfCascadeCurrentBB, 3896 StateMachineDoneBarrierBB, IsActiveWorker, 3897 StateMachineIsActiveCheckBB) 3898 ->setDebugLoc(DLoc); 3899 3900 Value *ZeroArg = 3901 Constant::getNullValue(ParallelRegionFnTy->getParamType(0)); 3902 3903 // Now that we have most of the CFG skeleton it is time for the if-cascade 3904 // that checks the function pointer we got from the runtime against the 3905 // parallel regions we expect, if there are any. 3906 for (int I = 0, E = ReachedKnownParallelRegions.size(); I < E; ++I) { 3907 auto *ParallelRegion = ReachedKnownParallelRegions[I]; 3908 BasicBlock *PRExecuteBB = BasicBlock::Create( 3909 Ctx, "worker_state_machine.parallel_region.execute", Kernel, 3910 StateMachineEndParallelBB); 3911 CallInst::Create(ParallelRegion, {ZeroArg, GTid}, "", PRExecuteBB) 3912 ->setDebugLoc(DLoc); 3913 BranchInst::Create(StateMachineEndParallelBB, PRExecuteBB) 3914 ->setDebugLoc(DLoc); 3915 3916 BasicBlock *PRNextBB = 3917 BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check", 3918 Kernel, StateMachineEndParallelBB); 3919 3920 // Check if we need to compare the pointer at all or if we can just 3921 // call the parallel region function. 3922 Value *IsPR; 3923 if (I + 1 < E || !ReachedUnknownParallelRegions.empty()) { 3924 Instruction *CmpI = ICmpInst::Create( 3925 ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFnCast, ParallelRegion, 3926 "worker.check_parallel_region", StateMachineIfCascadeCurrentBB); 3927 CmpI->setDebugLoc(DLoc); 3928 IsPR = CmpI; 3929 } else { 3930 IsPR = ConstantInt::getTrue(Ctx); 3931 } 3932 3933 BranchInst::Create(PRExecuteBB, PRNextBB, IsPR, 3934 StateMachineIfCascadeCurrentBB) 3935 ->setDebugLoc(DLoc); 3936 StateMachineIfCascadeCurrentBB = PRNextBB; 3937 } 3938 3939 // At the end of the if-cascade we place the indirect function pointer call 3940 // in case we might need it, that is if there can be parallel regions we 3941 // have not handled in the if-cascade above. 3942 if (!ReachedUnknownParallelRegions.empty()) { 3943 StateMachineIfCascadeCurrentBB->setName( 3944 "worker_state_machine.parallel_region.fallback.execute"); 3945 CallInst::Create(ParallelRegionFnTy, WorkFnCast, {ZeroArg, GTid}, "", 3946 StateMachineIfCascadeCurrentBB) 3947 ->setDebugLoc(DLoc); 3948 } 3949 BranchInst::Create(StateMachineEndParallelBB, 3950 StateMachineIfCascadeCurrentBB) 3951 ->setDebugLoc(DLoc); 3952 3953 FunctionCallee EndParallelFn = 3954 OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( 3955 M, OMPRTL___kmpc_kernel_end_parallel); 3956 CallInst *EndParallel = 3957 CallInst::Create(EndParallelFn, {}, "", StateMachineEndParallelBB); 3958 OMPInfoCache.setCallingConvention(EndParallelFn, EndParallel); 3959 EndParallel->setDebugLoc(DLoc); 3960 BranchInst::Create(StateMachineDoneBarrierBB, StateMachineEndParallelBB) 3961 ->setDebugLoc(DLoc); 3962 3963 CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineDoneBarrierBB) 3964 ->setDebugLoc(DLoc); 3965 BranchInst::Create(StateMachineBeginBB, StateMachineDoneBarrierBB) 3966 ->setDebugLoc(DLoc); 3967 3968 return ChangeStatus::CHANGED; 3969 } 3970 3971 /// Fixpoint iteration update function. Will be called every time a dependence 3972 /// changed its state (and in the beginning). 3973 ChangeStatus updateImpl(Attributor &A) override { 3974 KernelInfoState StateBefore = getState(); 3975 3976 // Callback to check a read/write instruction. 3977 auto CheckRWInst = [&](Instruction &I) { 3978 // We handle calls later. 3979 if (isa<CallBase>(I)) 3980 return true; 3981 // We only care about write effects. 3982 if (!I.mayWriteToMemory()) 3983 return true; 3984 if (auto *SI = dyn_cast<StoreInst>(&I)) { 3985 SmallVector<const Value *> Objects; 3986 getUnderlyingObjects(SI->getPointerOperand(), Objects); 3987 if (llvm::all_of(Objects, 3988 [](const Value *Obj) { return isa<AllocaInst>(Obj); })) 3989 return true; 3990 // Check for AAHeapToStack moved objects which must not be guarded. 3991 auto &HS = A.getAAFor<AAHeapToStack>( 3992 *this, IRPosition::function(*I.getFunction()), 3993 DepClassTy::OPTIONAL); 3994 if (llvm::all_of(Objects, [&HS](const Value *Obj) { 3995 auto *CB = dyn_cast<CallBase>(Obj); 3996 if (!CB) 3997 return false; 3998 return HS.isAssumedHeapToStack(*CB); 3999 })) { 4000 return true; 4001 } 4002 } 4003 4004 // Insert instruction that needs guarding. 4005 SPMDCompatibilityTracker.insert(&I); 4006 return true; 4007 }; 4008 4009 bool UsedAssumedInformationInCheckRWInst = false; 4010 if (!SPMDCompatibilityTracker.isAtFixpoint()) 4011 if (!A.checkForAllReadWriteInstructions( 4012 CheckRWInst, *this, UsedAssumedInformationInCheckRWInst)) 4013 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4014 4015 bool UsedAssumedInformationFromReachingKernels = false; 4016 if (!IsKernelEntry) { 4017 updateParallelLevels(A); 4018 4019 bool AllReachingKernelsKnown = true; 4020 updateReachingKernelEntries(A, AllReachingKernelsKnown); 4021 UsedAssumedInformationFromReachingKernels = !AllReachingKernelsKnown; 4022 4023 if (!ParallelLevels.isValidState()) 4024 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4025 else if (!ReachingKernelEntries.isValidState()) 4026 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4027 else if (!SPMDCompatibilityTracker.empty()) { 4028 // Check if all reaching kernels agree on the mode as we can otherwise 4029 // not guard instructions. We might not be sure about the mode so we 4030 // we cannot fix the internal spmd-zation state either. 4031 int SPMD = 0, Generic = 0; 4032 for (auto *Kernel : ReachingKernelEntries) { 4033 auto &CBAA = A.getAAFor<AAKernelInfo>( 4034 *this, IRPosition::function(*Kernel), DepClassTy::OPTIONAL); 4035 if (CBAA.SPMDCompatibilityTracker.isValidState() && 4036 CBAA.SPMDCompatibilityTracker.isAssumed()) 4037 ++SPMD; 4038 else 4039 ++Generic; 4040 if (!CBAA.SPMDCompatibilityTracker.isAtFixpoint()) 4041 UsedAssumedInformationFromReachingKernels = true; 4042 } 4043 if (SPMD != 0 && Generic != 0) 4044 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4045 } 4046 } 4047 4048 // Callback to check a call instruction. 4049 bool AllParallelRegionStatesWereFixed = true; 4050 bool AllSPMDStatesWereFixed = true; 4051 auto CheckCallInst = [&](Instruction &I) { 4052 auto &CB = cast<CallBase>(I); 4053 auto &CBAA = A.getAAFor<AAKernelInfo>( 4054 *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL); 4055 getState() ^= CBAA.getState(); 4056 AllSPMDStatesWereFixed &= CBAA.SPMDCompatibilityTracker.isAtFixpoint(); 4057 AllParallelRegionStatesWereFixed &= 4058 CBAA.ReachedKnownParallelRegions.isAtFixpoint(); 4059 AllParallelRegionStatesWereFixed &= 4060 CBAA.ReachedUnknownParallelRegions.isAtFixpoint(); 4061 return true; 4062 }; 4063 4064 bool UsedAssumedInformationInCheckCallInst = false; 4065 if (!A.checkForAllCallLikeInstructions( 4066 CheckCallInst, *this, UsedAssumedInformationInCheckCallInst)) { 4067 LLVM_DEBUG(dbgs() << TAG 4068 << "Failed to visit all call-like instructions!\n";); 4069 return indicatePessimisticFixpoint(); 4070 } 4071 4072 // If we haven't used any assumed information for the reached parallel 4073 // region states we can fix it. 4074 if (!UsedAssumedInformationInCheckCallInst && 4075 AllParallelRegionStatesWereFixed) { 4076 ReachedKnownParallelRegions.indicateOptimisticFixpoint(); 4077 ReachedUnknownParallelRegions.indicateOptimisticFixpoint(); 4078 } 4079 4080 // If we are sure there are no parallel regions in the kernel we do not 4081 // want SPMD mode. 4082 if (IsKernelEntry && ReachedUnknownParallelRegions.isAtFixpoint() && 4083 ReachedKnownParallelRegions.isAtFixpoint() && 4084 ReachedUnknownParallelRegions.isValidState() && 4085 ReachedKnownParallelRegions.isValidState() && 4086 !mayContainParallelRegion()) 4087 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4088 4089 // If we haven't used any assumed information for the SPMD state we can fix 4090 // it. 4091 if (!UsedAssumedInformationInCheckRWInst && 4092 !UsedAssumedInformationInCheckCallInst && 4093 !UsedAssumedInformationFromReachingKernels && AllSPMDStatesWereFixed) 4094 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 4095 4096 return StateBefore == getState() ? ChangeStatus::UNCHANGED 4097 : ChangeStatus::CHANGED; 4098 } 4099 4100 private: 4101 /// Update info regarding reaching kernels. 4102 void updateReachingKernelEntries(Attributor &A, 4103 bool &AllReachingKernelsKnown) { 4104 auto PredCallSite = [&](AbstractCallSite ACS) { 4105 Function *Caller = ACS.getInstruction()->getFunction(); 4106 4107 assert(Caller && "Caller is nullptr"); 4108 4109 auto &CAA = A.getOrCreateAAFor<AAKernelInfo>( 4110 IRPosition::function(*Caller), this, DepClassTy::REQUIRED); 4111 if (CAA.ReachingKernelEntries.isValidState()) { 4112 ReachingKernelEntries ^= CAA.ReachingKernelEntries; 4113 return true; 4114 } 4115 4116 // We lost track of the caller of the associated function, any kernel 4117 // could reach now. 4118 ReachingKernelEntries.indicatePessimisticFixpoint(); 4119 4120 return true; 4121 }; 4122 4123 if (!A.checkForAllCallSites(PredCallSite, *this, 4124 true /* RequireAllCallSites */, 4125 AllReachingKernelsKnown)) 4126 ReachingKernelEntries.indicatePessimisticFixpoint(); 4127 } 4128 4129 /// Update info regarding parallel levels. 4130 void updateParallelLevels(Attributor &A) { 4131 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4132 OMPInformationCache::RuntimeFunctionInfo &Parallel51RFI = 4133 OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; 4134 4135 auto PredCallSite = [&](AbstractCallSite ACS) { 4136 Function *Caller = ACS.getInstruction()->getFunction(); 4137 4138 assert(Caller && "Caller is nullptr"); 4139 4140 auto &CAA = 4141 A.getOrCreateAAFor<AAKernelInfo>(IRPosition::function(*Caller)); 4142 if (CAA.ParallelLevels.isValidState()) { 4143 // Any function that is called by `__kmpc_parallel_51` will not be 4144 // folded as the parallel level in the function is updated. In order to 4145 // get it right, all the analysis would depend on the implentation. That 4146 // said, if in the future any change to the implementation, the analysis 4147 // could be wrong. As a consequence, we are just conservative here. 4148 if (Caller == Parallel51RFI.Declaration) { 4149 ParallelLevels.indicatePessimisticFixpoint(); 4150 return true; 4151 } 4152 4153 ParallelLevels ^= CAA.ParallelLevels; 4154 4155 return true; 4156 } 4157 4158 // We lost track of the caller of the associated function, any kernel 4159 // could reach now. 4160 ParallelLevels.indicatePessimisticFixpoint(); 4161 4162 return true; 4163 }; 4164 4165 bool AllCallSitesKnown = true; 4166 if (!A.checkForAllCallSites(PredCallSite, *this, 4167 true /* RequireAllCallSites */, 4168 AllCallSitesKnown)) 4169 ParallelLevels.indicatePessimisticFixpoint(); 4170 } 4171 }; 4172 4173 /// The call site kernel info abstract attribute, basically, what can we say 4174 /// about a call site with regards to the KernelInfoState. For now this simply 4175 /// forwards the information from the callee. 4176 struct AAKernelInfoCallSite : AAKernelInfo { 4177 AAKernelInfoCallSite(const IRPosition &IRP, Attributor &A) 4178 : AAKernelInfo(IRP, A) {} 4179 4180 /// See AbstractAttribute::initialize(...). 4181 void initialize(Attributor &A) override { 4182 AAKernelInfo::initialize(A); 4183 4184 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4185 Function *Callee = getAssociatedFunction(); 4186 4187 auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>( 4188 *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL); 4189 4190 // Check for SPMD-mode assumptions. 4191 if (AssumptionAA.hasAssumption("ompx_spmd_amenable")) { 4192 SPMDCompatibilityTracker.indicateOptimisticFixpoint(); 4193 indicateOptimisticFixpoint(); 4194 } 4195 4196 // First weed out calls we do not care about, that is readonly/readnone 4197 // calls, intrinsics, and "no_openmp" calls. Neither of these can reach a 4198 // parallel region or anything else we are looking for. 4199 if (!CB.mayWriteToMemory() || isa<IntrinsicInst>(CB)) { 4200 indicateOptimisticFixpoint(); 4201 return; 4202 } 4203 4204 // Next we check if we know the callee. If it is a known OpenMP function 4205 // we will handle them explicitly in the switch below. If it is not, we 4206 // will use an AAKernelInfo object on the callee to gather information and 4207 // merge that into the current state. The latter happens in the updateImpl. 4208 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4209 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee); 4210 if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) { 4211 // Unknown caller or declarations are not analyzable, we give up. 4212 if (!Callee || !A.isFunctionIPOAmendable(*Callee)) { 4213 4214 // Unknown callees might contain parallel regions, except if they have 4215 // an appropriate assumption attached. 4216 if (!(AssumptionAA.hasAssumption("omp_no_openmp") || 4217 AssumptionAA.hasAssumption("omp_no_parallelism"))) 4218 ReachedUnknownParallelRegions.insert(&CB); 4219 4220 // If SPMDCompatibilityTracker is not fixed, we need to give up on the 4221 // idea we can run something unknown in SPMD-mode. 4222 if (!SPMDCompatibilityTracker.isAtFixpoint()) { 4223 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4224 SPMDCompatibilityTracker.insert(&CB); 4225 } 4226 4227 // We have updated the state for this unknown call properly, there won't 4228 // be any change so we indicate a fixpoint. 4229 indicateOptimisticFixpoint(); 4230 } 4231 // If the callee is known and can be used in IPO, we will update the state 4232 // based on the callee state in updateImpl. 4233 return; 4234 } 4235 4236 const unsigned int WrapperFunctionArgNo = 6; 4237 RuntimeFunction RF = It->getSecond(); 4238 switch (RF) { 4239 // All the functions we know are compatible with SPMD mode. 4240 case OMPRTL___kmpc_is_spmd_exec_mode: 4241 case OMPRTL___kmpc_distribute_static_fini: 4242 case OMPRTL___kmpc_for_static_fini: 4243 case OMPRTL___kmpc_global_thread_num: 4244 case OMPRTL___kmpc_get_hardware_num_threads_in_block: 4245 case OMPRTL___kmpc_get_hardware_num_blocks: 4246 case OMPRTL___kmpc_single: 4247 case OMPRTL___kmpc_end_single: 4248 case OMPRTL___kmpc_master: 4249 case OMPRTL___kmpc_end_master: 4250 case OMPRTL___kmpc_barrier: 4251 case OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2: 4252 case OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2: 4253 case OMPRTL___kmpc_nvptx_end_reduce_nowait: 4254 break; 4255 case OMPRTL___kmpc_distribute_static_init_4: 4256 case OMPRTL___kmpc_distribute_static_init_4u: 4257 case OMPRTL___kmpc_distribute_static_init_8: 4258 case OMPRTL___kmpc_distribute_static_init_8u: 4259 case OMPRTL___kmpc_for_static_init_4: 4260 case OMPRTL___kmpc_for_static_init_4u: 4261 case OMPRTL___kmpc_for_static_init_8: 4262 case OMPRTL___kmpc_for_static_init_8u: { 4263 // Check the schedule and allow static schedule in SPMD mode. 4264 unsigned ScheduleArgOpNo = 2; 4265 auto *ScheduleTypeCI = 4266 dyn_cast<ConstantInt>(CB.getArgOperand(ScheduleArgOpNo)); 4267 unsigned ScheduleTypeVal = 4268 ScheduleTypeCI ? ScheduleTypeCI->getZExtValue() : 0; 4269 switch (OMPScheduleType(ScheduleTypeVal)) { 4270 case OMPScheduleType::UnorderedStatic: 4271 case OMPScheduleType::UnorderedStaticChunked: 4272 case OMPScheduleType::OrderedDistribute: 4273 case OMPScheduleType::OrderedDistributeChunked: 4274 break; 4275 default: 4276 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4277 SPMDCompatibilityTracker.insert(&CB); 4278 break; 4279 }; 4280 } break; 4281 case OMPRTL___kmpc_target_init: 4282 KernelInitCB = &CB; 4283 break; 4284 case OMPRTL___kmpc_target_deinit: 4285 KernelDeinitCB = &CB; 4286 break; 4287 case OMPRTL___kmpc_parallel_51: 4288 if (auto *ParallelRegion = dyn_cast<Function>( 4289 CB.getArgOperand(WrapperFunctionArgNo)->stripPointerCasts())) { 4290 ReachedKnownParallelRegions.insert(ParallelRegion); 4291 break; 4292 } 4293 // The condition above should usually get the parallel region function 4294 // pointer and record it. In the off chance it doesn't we assume the 4295 // worst. 4296 ReachedUnknownParallelRegions.insert(&CB); 4297 break; 4298 case OMPRTL___kmpc_omp_task: 4299 // We do not look into tasks right now, just give up. 4300 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4301 SPMDCompatibilityTracker.insert(&CB); 4302 ReachedUnknownParallelRegions.insert(&CB); 4303 break; 4304 case OMPRTL___kmpc_alloc_shared: 4305 case OMPRTL___kmpc_free_shared: 4306 // Return without setting a fixpoint, to be resolved in updateImpl. 4307 return; 4308 default: 4309 // Unknown OpenMP runtime calls cannot be executed in SPMD-mode, 4310 // generally. However, they do not hide parallel regions. 4311 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4312 SPMDCompatibilityTracker.insert(&CB); 4313 break; 4314 } 4315 // All other OpenMP runtime calls will not reach parallel regions so they 4316 // can be safely ignored for now. Since it is a known OpenMP runtime call we 4317 // have now modeled all effects and there is no need for any update. 4318 indicateOptimisticFixpoint(); 4319 } 4320 4321 ChangeStatus updateImpl(Attributor &A) override { 4322 // TODO: Once we have call site specific value information we can provide 4323 // call site specific liveness information and then it makes 4324 // sense to specialize attributes for call sites arguments instead of 4325 // redirecting requests to the callee argument. 4326 Function *F = getAssociatedFunction(); 4327 4328 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4329 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(F); 4330 4331 // If F is not a runtime function, propagate the AAKernelInfo of the callee. 4332 if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) { 4333 const IRPosition &FnPos = IRPosition::function(*F); 4334 auto &FnAA = A.getAAFor<AAKernelInfo>(*this, FnPos, DepClassTy::REQUIRED); 4335 if (getState() == FnAA.getState()) 4336 return ChangeStatus::UNCHANGED; 4337 getState() = FnAA.getState(); 4338 return ChangeStatus::CHANGED; 4339 } 4340 4341 // F is a runtime function that allocates or frees memory, check 4342 // AAHeapToStack and AAHeapToShared. 4343 KernelInfoState StateBefore = getState(); 4344 assert((It->getSecond() == OMPRTL___kmpc_alloc_shared || 4345 It->getSecond() == OMPRTL___kmpc_free_shared) && 4346 "Expected a __kmpc_alloc_shared or __kmpc_free_shared runtime call"); 4347 4348 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4349 4350 auto &HeapToStackAA = A.getAAFor<AAHeapToStack>( 4351 *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL); 4352 auto &HeapToSharedAA = A.getAAFor<AAHeapToShared>( 4353 *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL); 4354 4355 RuntimeFunction RF = It->getSecond(); 4356 4357 switch (RF) { 4358 // If neither HeapToStack nor HeapToShared assume the call is removed, 4359 // assume SPMD incompatibility. 4360 case OMPRTL___kmpc_alloc_shared: 4361 if (!HeapToStackAA.isAssumedHeapToStack(CB) && 4362 !HeapToSharedAA.isAssumedHeapToShared(CB)) 4363 SPMDCompatibilityTracker.insert(&CB); 4364 break; 4365 case OMPRTL___kmpc_free_shared: 4366 if (!HeapToStackAA.isAssumedHeapToStackRemovedFree(CB) && 4367 !HeapToSharedAA.isAssumedHeapToSharedRemovedFree(CB)) 4368 SPMDCompatibilityTracker.insert(&CB); 4369 break; 4370 default: 4371 SPMDCompatibilityTracker.indicatePessimisticFixpoint(); 4372 SPMDCompatibilityTracker.insert(&CB); 4373 } 4374 4375 return StateBefore == getState() ? ChangeStatus::UNCHANGED 4376 : ChangeStatus::CHANGED; 4377 } 4378 }; 4379 4380 struct AAFoldRuntimeCall 4381 : public StateWrapper<BooleanState, AbstractAttribute> { 4382 using Base = StateWrapper<BooleanState, AbstractAttribute>; 4383 4384 AAFoldRuntimeCall(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 4385 4386 /// Statistics are tracked as part of manifest for now. 4387 void trackStatistics() const override {} 4388 4389 /// Create an abstract attribute biew for the position \p IRP. 4390 static AAFoldRuntimeCall &createForPosition(const IRPosition &IRP, 4391 Attributor &A); 4392 4393 /// See AbstractAttribute::getName() 4394 const std::string getName() const override { return "AAFoldRuntimeCall"; } 4395 4396 /// See AbstractAttribute::getIdAddr() 4397 const char *getIdAddr() const override { return &ID; } 4398 4399 /// This function should return true if the type of the \p AA is 4400 /// AAFoldRuntimeCall 4401 static bool classof(const AbstractAttribute *AA) { 4402 return (AA->getIdAddr() == &ID); 4403 } 4404 4405 static const char ID; 4406 }; 4407 4408 struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall { 4409 AAFoldRuntimeCallCallSiteReturned(const IRPosition &IRP, Attributor &A) 4410 : AAFoldRuntimeCall(IRP, A) {} 4411 4412 /// See AbstractAttribute::getAsStr() 4413 const std::string getAsStr() const override { 4414 if (!isValidState()) 4415 return "<invalid>"; 4416 4417 std::string Str("simplified value: "); 4418 4419 if (!SimplifiedValue) 4420 return Str + std::string("none"); 4421 4422 if (!SimplifiedValue.value()) 4423 return Str + std::string("nullptr"); 4424 4425 if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.value())) 4426 return Str + std::to_string(CI->getSExtValue()); 4427 4428 return Str + std::string("unknown"); 4429 } 4430 4431 void initialize(Attributor &A) override { 4432 if (DisableOpenMPOptFolding) 4433 indicatePessimisticFixpoint(); 4434 4435 Function *Callee = getAssociatedFunction(); 4436 4437 auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache()); 4438 const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee); 4439 assert(It != OMPInfoCache.RuntimeFunctionIDMap.end() && 4440 "Expected a known OpenMP runtime function"); 4441 4442 RFKind = It->getSecond(); 4443 4444 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4445 A.registerSimplificationCallback( 4446 IRPosition::callsite_returned(CB), 4447 [&](const IRPosition &IRP, const AbstractAttribute *AA, 4448 bool &UsedAssumedInformation) -> Optional<Value *> { 4449 assert((isValidState() || 4450 (SimplifiedValue && SimplifiedValue.value() == nullptr)) && 4451 "Unexpected invalid state!"); 4452 4453 if (!isAtFixpoint()) { 4454 UsedAssumedInformation = true; 4455 if (AA) 4456 A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); 4457 } 4458 return SimplifiedValue; 4459 }); 4460 } 4461 4462 ChangeStatus updateImpl(Attributor &A) override { 4463 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4464 switch (RFKind) { 4465 case OMPRTL___kmpc_is_spmd_exec_mode: 4466 Changed |= foldIsSPMDExecMode(A); 4467 break; 4468 case OMPRTL___kmpc_is_generic_main_thread_id: 4469 Changed |= foldIsGenericMainThread(A); 4470 break; 4471 case OMPRTL___kmpc_parallel_level: 4472 Changed |= foldParallelLevel(A); 4473 break; 4474 case OMPRTL___kmpc_get_hardware_num_threads_in_block: 4475 Changed = Changed | foldKernelFnAttribute(A, "omp_target_thread_limit"); 4476 break; 4477 case OMPRTL___kmpc_get_hardware_num_blocks: 4478 Changed = Changed | foldKernelFnAttribute(A, "omp_target_num_teams"); 4479 break; 4480 default: 4481 llvm_unreachable("Unhandled OpenMP runtime function!"); 4482 } 4483 4484 return Changed; 4485 } 4486 4487 ChangeStatus manifest(Attributor &A) override { 4488 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4489 4490 if (SimplifiedValue && *SimplifiedValue) { 4491 Instruction &I = *getCtxI(); 4492 A.changeAfterManifest(IRPosition::inst(I), **SimplifiedValue); 4493 A.deleteAfterManifest(I); 4494 4495 CallBase *CB = dyn_cast<CallBase>(&I); 4496 auto Remark = [&](OptimizationRemark OR) { 4497 if (auto *C = dyn_cast<ConstantInt>(*SimplifiedValue)) 4498 return OR << "Replacing OpenMP runtime call " 4499 << CB->getCalledFunction()->getName() << " with " 4500 << ore::NV("FoldedValue", C->getZExtValue()) << "."; 4501 return OR << "Replacing OpenMP runtime call " 4502 << CB->getCalledFunction()->getName() << "."; 4503 }; 4504 4505 if (CB && EnableVerboseRemarks) 4506 A.emitRemark<OptimizationRemark>(CB, "OMP180", Remark); 4507 4508 LLVM_DEBUG(dbgs() << TAG << "Replacing runtime call: " << I << " with " 4509 << **SimplifiedValue << "\n"); 4510 4511 Changed = ChangeStatus::CHANGED; 4512 } 4513 4514 return Changed; 4515 } 4516 4517 ChangeStatus indicatePessimisticFixpoint() override { 4518 SimplifiedValue = nullptr; 4519 return AAFoldRuntimeCall::indicatePessimisticFixpoint(); 4520 } 4521 4522 private: 4523 /// Fold __kmpc_is_spmd_exec_mode into a constant if possible. 4524 ChangeStatus foldIsSPMDExecMode(Attributor &A) { 4525 Optional<Value *> SimplifiedValueBefore = SimplifiedValue; 4526 4527 unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0; 4528 unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0; 4529 auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 4530 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 4531 4532 if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) 4533 return indicatePessimisticFixpoint(); 4534 4535 for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { 4536 auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K), 4537 DepClassTy::REQUIRED); 4538 4539 if (!AA.isValidState()) { 4540 SimplifiedValue = nullptr; 4541 return indicatePessimisticFixpoint(); 4542 } 4543 4544 if (AA.SPMDCompatibilityTracker.isAssumed()) { 4545 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 4546 ++KnownSPMDCount; 4547 else 4548 ++AssumedSPMDCount; 4549 } else { 4550 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 4551 ++KnownNonSPMDCount; 4552 else 4553 ++AssumedNonSPMDCount; 4554 } 4555 } 4556 4557 if ((AssumedSPMDCount + KnownSPMDCount) && 4558 (AssumedNonSPMDCount + KnownNonSPMDCount)) 4559 return indicatePessimisticFixpoint(); 4560 4561 auto &Ctx = getAnchorValue().getContext(); 4562 if (KnownSPMDCount || AssumedSPMDCount) { 4563 assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 && 4564 "Expected only SPMD kernels!"); 4565 // All reaching kernels are in SPMD mode. Update all function calls to 4566 // __kmpc_is_spmd_exec_mode to 1. 4567 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true); 4568 } else if (KnownNonSPMDCount || AssumedNonSPMDCount) { 4569 assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 && 4570 "Expected only non-SPMD kernels!"); 4571 // All reaching kernels are in non-SPMD mode. Update all function 4572 // calls to __kmpc_is_spmd_exec_mode to 0. 4573 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), false); 4574 } else { 4575 // We have empty reaching kernels, therefore we cannot tell if the 4576 // associated call site can be folded. At this moment, SimplifiedValue 4577 // must be none. 4578 assert(!SimplifiedValue && "SimplifiedValue should be none"); 4579 } 4580 4581 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 4582 : ChangeStatus::CHANGED; 4583 } 4584 4585 /// Fold __kmpc_is_generic_main_thread_id into a constant if possible. 4586 ChangeStatus foldIsGenericMainThread(Attributor &A) { 4587 Optional<Value *> SimplifiedValueBefore = SimplifiedValue; 4588 4589 CallBase &CB = cast<CallBase>(getAssociatedValue()); 4590 Function *F = CB.getFunction(); 4591 const auto &ExecutionDomainAA = A.getAAFor<AAExecutionDomain>( 4592 *this, IRPosition::function(*F), DepClassTy::REQUIRED); 4593 4594 if (!ExecutionDomainAA.isValidState()) 4595 return indicatePessimisticFixpoint(); 4596 4597 auto &Ctx = getAnchorValue().getContext(); 4598 if (ExecutionDomainAA.isExecutedByInitialThreadOnly(CB)) 4599 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true); 4600 else 4601 return indicatePessimisticFixpoint(); 4602 4603 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 4604 : ChangeStatus::CHANGED; 4605 } 4606 4607 /// Fold __kmpc_parallel_level into a constant if possible. 4608 ChangeStatus foldParallelLevel(Attributor &A) { 4609 Optional<Value *> SimplifiedValueBefore = SimplifiedValue; 4610 4611 auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 4612 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 4613 4614 if (!CallerKernelInfoAA.ParallelLevels.isValidState()) 4615 return indicatePessimisticFixpoint(); 4616 4617 if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) 4618 return indicatePessimisticFixpoint(); 4619 4620 if (CallerKernelInfoAA.ReachingKernelEntries.empty()) { 4621 assert(!SimplifiedValue && 4622 "SimplifiedValue should keep none at this point"); 4623 return ChangeStatus::UNCHANGED; 4624 } 4625 4626 unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0; 4627 unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0; 4628 for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { 4629 auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K), 4630 DepClassTy::REQUIRED); 4631 if (!AA.SPMDCompatibilityTracker.isValidState()) 4632 return indicatePessimisticFixpoint(); 4633 4634 if (AA.SPMDCompatibilityTracker.isAssumed()) { 4635 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 4636 ++KnownSPMDCount; 4637 else 4638 ++AssumedSPMDCount; 4639 } else { 4640 if (AA.SPMDCompatibilityTracker.isAtFixpoint()) 4641 ++KnownNonSPMDCount; 4642 else 4643 ++AssumedNonSPMDCount; 4644 } 4645 } 4646 4647 if ((AssumedSPMDCount + KnownSPMDCount) && 4648 (AssumedNonSPMDCount + KnownNonSPMDCount)) 4649 return indicatePessimisticFixpoint(); 4650 4651 auto &Ctx = getAnchorValue().getContext(); 4652 // If the caller can only be reached by SPMD kernel entries, the parallel 4653 // level is 1. Similarly, if the caller can only be reached by non-SPMD 4654 // kernel entries, it is 0. 4655 if (AssumedSPMDCount || KnownSPMDCount) { 4656 assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 && 4657 "Expected only SPMD kernels!"); 4658 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 1); 4659 } else { 4660 assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 && 4661 "Expected only non-SPMD kernels!"); 4662 SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 0); 4663 } 4664 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 4665 : ChangeStatus::CHANGED; 4666 } 4667 4668 ChangeStatus foldKernelFnAttribute(Attributor &A, llvm::StringRef Attr) { 4669 // Specialize only if all the calls agree with the attribute constant value 4670 int32_t CurrentAttrValue = -1; 4671 Optional<Value *> SimplifiedValueBefore = SimplifiedValue; 4672 4673 auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>( 4674 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 4675 4676 if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) 4677 return indicatePessimisticFixpoint(); 4678 4679 // Iterate over the kernels that reach this function 4680 for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { 4681 int32_t NextAttrVal = -1; 4682 if (K->hasFnAttribute(Attr)) 4683 NextAttrVal = 4684 std::stoi(K->getFnAttribute(Attr).getValueAsString().str()); 4685 4686 if (NextAttrVal == -1 || 4687 (CurrentAttrValue != -1 && CurrentAttrValue != NextAttrVal)) 4688 return indicatePessimisticFixpoint(); 4689 CurrentAttrValue = NextAttrVal; 4690 } 4691 4692 if (CurrentAttrValue != -1) { 4693 auto &Ctx = getAnchorValue().getContext(); 4694 SimplifiedValue = 4695 ConstantInt::get(Type::getInt32Ty(Ctx), CurrentAttrValue); 4696 } 4697 return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED 4698 : ChangeStatus::CHANGED; 4699 } 4700 4701 /// An optional value the associated value is assumed to fold to. That is, we 4702 /// assume the associated value (which is a call) can be replaced by this 4703 /// simplified value. 4704 Optional<Value *> SimplifiedValue; 4705 4706 /// The runtime function kind of the callee of the associated call site. 4707 RuntimeFunction RFKind; 4708 }; 4709 4710 } // namespace 4711 4712 /// Register folding callsite 4713 void OpenMPOpt::registerFoldRuntimeCall(RuntimeFunction RF) { 4714 auto &RFI = OMPInfoCache.RFIs[RF]; 4715 RFI.foreachUse(SCC, [&](Use &U, Function &F) { 4716 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &RFI); 4717 if (!CI) 4718 return false; 4719 A.getOrCreateAAFor<AAFoldRuntimeCall>( 4720 IRPosition::callsite_returned(*CI), /* QueryingAA */ nullptr, 4721 DepClassTy::NONE, /* ForceUpdate */ false, 4722 /* UpdateAfterInit */ false); 4723 return false; 4724 }); 4725 } 4726 4727 void OpenMPOpt::registerAAs(bool IsModulePass) { 4728 if (SCC.empty()) 4729 return; 4730 4731 if (IsModulePass) { 4732 // Ensure we create the AAKernelInfo AAs first and without triggering an 4733 // update. This will make sure we register all value simplification 4734 // callbacks before any other AA has the chance to create an AAValueSimplify 4735 // or similar. 4736 auto CreateKernelInfoCB = [&](Use &, Function &Kernel) { 4737 A.getOrCreateAAFor<AAKernelInfo>( 4738 IRPosition::function(Kernel), /* QueryingAA */ nullptr, 4739 DepClassTy::NONE, /* ForceUpdate */ false, 4740 /* UpdateAfterInit */ false); 4741 return false; 4742 }; 4743 OMPInformationCache::RuntimeFunctionInfo &InitRFI = 4744 OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; 4745 InitRFI.foreachUse(SCC, CreateKernelInfoCB); 4746 4747 registerFoldRuntimeCall(OMPRTL___kmpc_is_generic_main_thread_id); 4748 registerFoldRuntimeCall(OMPRTL___kmpc_is_spmd_exec_mode); 4749 registerFoldRuntimeCall(OMPRTL___kmpc_parallel_level); 4750 registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_threads_in_block); 4751 registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_blocks); 4752 } 4753 4754 // Create CallSite AA for all Getters. 4755 for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) { 4756 auto ICVInfo = OMPInfoCache.ICVs[static_cast<InternalControlVar>(Idx)]; 4757 4758 auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter]; 4759 4760 auto CreateAA = [&](Use &U, Function &Caller) { 4761 CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI); 4762 if (!CI) 4763 return false; 4764 4765 auto &CB = cast<CallBase>(*CI); 4766 4767 IRPosition CBPos = IRPosition::callsite_function(CB); 4768 A.getOrCreateAAFor<AAICVTracker>(CBPos); 4769 return false; 4770 }; 4771 4772 GetterRFI.foreachUse(SCC, CreateAA); 4773 } 4774 auto &GlobalizationRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; 4775 auto CreateAA = [&](Use &U, Function &F) { 4776 A.getOrCreateAAFor<AAHeapToShared>(IRPosition::function(F)); 4777 return false; 4778 }; 4779 if (!DisableOpenMPOptDeglobalization) 4780 GlobalizationRFI.foreachUse(SCC, CreateAA); 4781 4782 // Create an ExecutionDomain AA for every function and a HeapToStack AA for 4783 // every function if there is a device kernel. 4784 if (!isOpenMPDevice(M)) 4785 return; 4786 4787 for (auto *F : SCC) { 4788 if (F->isDeclaration()) 4789 continue; 4790 4791 A.getOrCreateAAFor<AAExecutionDomain>(IRPosition::function(*F)); 4792 if (!DisableOpenMPOptDeglobalization) 4793 A.getOrCreateAAFor<AAHeapToStack>(IRPosition::function(*F)); 4794 4795 for (auto &I : instructions(*F)) { 4796 if (auto *LI = dyn_cast<LoadInst>(&I)) { 4797 bool UsedAssumedInformation = false; 4798 A.getAssumedSimplified(IRPosition::value(*LI), /* AA */ nullptr, 4799 UsedAssumedInformation, AA::Interprocedural); 4800 } else if (auto *SI = dyn_cast<StoreInst>(&I)) { 4801 A.getOrCreateAAFor<AAIsDead>(IRPosition::value(*SI)); 4802 } 4803 } 4804 } 4805 } 4806 4807 const char AAICVTracker::ID = 0; 4808 const char AAKernelInfo::ID = 0; 4809 const char AAExecutionDomain::ID = 0; 4810 const char AAHeapToShared::ID = 0; 4811 const char AAFoldRuntimeCall::ID = 0; 4812 4813 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP, 4814 Attributor &A) { 4815 AAICVTracker *AA = nullptr; 4816 switch (IRP.getPositionKind()) { 4817 case IRPosition::IRP_INVALID: 4818 case IRPosition::IRP_FLOAT: 4819 case IRPosition::IRP_ARGUMENT: 4820 case IRPosition::IRP_CALL_SITE_ARGUMENT: 4821 llvm_unreachable("ICVTracker can only be created for function position!"); 4822 case IRPosition::IRP_RETURNED: 4823 AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A); 4824 break; 4825 case IRPosition::IRP_CALL_SITE_RETURNED: 4826 AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A); 4827 break; 4828 case IRPosition::IRP_CALL_SITE: 4829 AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A); 4830 break; 4831 case IRPosition::IRP_FUNCTION: 4832 AA = new (A.Allocator) AAICVTrackerFunction(IRP, A); 4833 break; 4834 } 4835 4836 return *AA; 4837 } 4838 4839 AAExecutionDomain &AAExecutionDomain::createForPosition(const IRPosition &IRP, 4840 Attributor &A) { 4841 AAExecutionDomainFunction *AA = nullptr; 4842 switch (IRP.getPositionKind()) { 4843 case IRPosition::IRP_INVALID: 4844 case IRPosition::IRP_FLOAT: 4845 case IRPosition::IRP_ARGUMENT: 4846 case IRPosition::IRP_CALL_SITE_ARGUMENT: 4847 case IRPosition::IRP_RETURNED: 4848 case IRPosition::IRP_CALL_SITE_RETURNED: 4849 case IRPosition::IRP_CALL_SITE: 4850 llvm_unreachable( 4851 "AAExecutionDomain can only be created for function position!"); 4852 case IRPosition::IRP_FUNCTION: 4853 AA = new (A.Allocator) AAExecutionDomainFunction(IRP, A); 4854 break; 4855 } 4856 4857 return *AA; 4858 } 4859 4860 AAHeapToShared &AAHeapToShared::createForPosition(const IRPosition &IRP, 4861 Attributor &A) { 4862 AAHeapToSharedFunction *AA = nullptr; 4863 switch (IRP.getPositionKind()) { 4864 case IRPosition::IRP_INVALID: 4865 case IRPosition::IRP_FLOAT: 4866 case IRPosition::IRP_ARGUMENT: 4867 case IRPosition::IRP_CALL_SITE_ARGUMENT: 4868 case IRPosition::IRP_RETURNED: 4869 case IRPosition::IRP_CALL_SITE_RETURNED: 4870 case IRPosition::IRP_CALL_SITE: 4871 llvm_unreachable( 4872 "AAHeapToShared can only be created for function position!"); 4873 case IRPosition::IRP_FUNCTION: 4874 AA = new (A.Allocator) AAHeapToSharedFunction(IRP, A); 4875 break; 4876 } 4877 4878 return *AA; 4879 } 4880 4881 AAKernelInfo &AAKernelInfo::createForPosition(const IRPosition &IRP, 4882 Attributor &A) { 4883 AAKernelInfo *AA = nullptr; 4884 switch (IRP.getPositionKind()) { 4885 case IRPosition::IRP_INVALID: 4886 case IRPosition::IRP_FLOAT: 4887 case IRPosition::IRP_ARGUMENT: 4888 case IRPosition::IRP_RETURNED: 4889 case IRPosition::IRP_CALL_SITE_RETURNED: 4890 case IRPosition::IRP_CALL_SITE_ARGUMENT: 4891 llvm_unreachable("KernelInfo can only be created for function position!"); 4892 case IRPosition::IRP_CALL_SITE: 4893 AA = new (A.Allocator) AAKernelInfoCallSite(IRP, A); 4894 break; 4895 case IRPosition::IRP_FUNCTION: 4896 AA = new (A.Allocator) AAKernelInfoFunction(IRP, A); 4897 break; 4898 } 4899 4900 return *AA; 4901 } 4902 4903 AAFoldRuntimeCall &AAFoldRuntimeCall::createForPosition(const IRPosition &IRP, 4904 Attributor &A) { 4905 AAFoldRuntimeCall *AA = nullptr; 4906 switch (IRP.getPositionKind()) { 4907 case IRPosition::IRP_INVALID: 4908 case IRPosition::IRP_FLOAT: 4909 case IRPosition::IRP_ARGUMENT: 4910 case IRPosition::IRP_RETURNED: 4911 case IRPosition::IRP_FUNCTION: 4912 case IRPosition::IRP_CALL_SITE: 4913 case IRPosition::IRP_CALL_SITE_ARGUMENT: 4914 llvm_unreachable("KernelInfo can only be created for call site position!"); 4915 case IRPosition::IRP_CALL_SITE_RETURNED: 4916 AA = new (A.Allocator) AAFoldRuntimeCallCallSiteReturned(IRP, A); 4917 break; 4918 } 4919 4920 return *AA; 4921 } 4922 4923 PreservedAnalyses OpenMPOptPass::run(Module &M, ModuleAnalysisManager &AM) { 4924 if (!containsOpenMP(M)) 4925 return PreservedAnalyses::all(); 4926 if (DisableOpenMPOptimizations) 4927 return PreservedAnalyses::all(); 4928 4929 FunctionAnalysisManager &FAM = 4930 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 4931 KernelSet Kernels = getDeviceKernels(M); 4932 4933 if (PrintModuleBeforeOptimizations) 4934 LLVM_DEBUG(dbgs() << TAG << "Module before OpenMPOpt Module Pass:\n" << M); 4935 4936 auto IsCalled = [&](Function &F) { 4937 if (Kernels.contains(&F)) 4938 return true; 4939 for (const User *U : F.users()) 4940 if (!isa<BlockAddress>(U)) 4941 return true; 4942 return false; 4943 }; 4944 4945 auto EmitRemark = [&](Function &F) { 4946 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 4947 ORE.emit([&]() { 4948 OptimizationRemarkAnalysis ORA(DEBUG_TYPE, "OMP140", &F); 4949 return ORA << "Could not internalize function. " 4950 << "Some optimizations may not be possible. [OMP140]"; 4951 }); 4952 }; 4953 4954 // Create internal copies of each function if this is a kernel Module. This 4955 // allows iterprocedural passes to see every call edge. 4956 DenseMap<Function *, Function *> InternalizedMap; 4957 if (isOpenMPDevice(M)) { 4958 SmallPtrSet<Function *, 16> InternalizeFns; 4959 for (Function &F : M) 4960 if (!F.isDeclaration() && !Kernels.contains(&F) && IsCalled(F) && 4961 !DisableInternalization) { 4962 if (Attributor::isInternalizable(F)) { 4963 InternalizeFns.insert(&F); 4964 } else if (!F.hasLocalLinkage() && !F.hasFnAttribute(Attribute::Cold)) { 4965 EmitRemark(F); 4966 } 4967 } 4968 4969 Attributor::internalizeFunctions(InternalizeFns, InternalizedMap); 4970 } 4971 4972 // Look at every function in the Module unless it was internalized. 4973 SmallVector<Function *, 16> SCC; 4974 for (Function &F : M) 4975 if (!F.isDeclaration() && !InternalizedMap.lookup(&F)) 4976 SCC.push_back(&F); 4977 4978 if (SCC.empty()) 4979 return PreservedAnalyses::all(); 4980 4981 AnalysisGetter AG(FAM); 4982 4983 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 4984 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 4985 }; 4986 4987 BumpPtrAllocator Allocator; 4988 CallGraphUpdater CGUpdater; 4989 4990 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 4991 OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ Functions, Kernels); 4992 4993 unsigned MaxFixpointIterations = 4994 (isOpenMPDevice(M)) ? SetFixpointIterations : 32; 4995 4996 AttributorConfig AC(CGUpdater); 4997 AC.DefaultInitializeLiveInternals = false; 4998 AC.RewriteSignatures = false; 4999 AC.MaxFixpointIterations = MaxFixpointIterations; 5000 AC.OREGetter = OREGetter; 5001 AC.PassName = DEBUG_TYPE; 5002 5003 Attributor A(Functions, InfoCache, AC); 5004 5005 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 5006 bool Changed = OMPOpt.run(true); 5007 5008 // Optionally inline device functions for potentially better performance. 5009 if (AlwaysInlineDeviceFunctions && isOpenMPDevice(M)) 5010 for (Function &F : M) 5011 if (!F.isDeclaration() && !Kernels.contains(&F) && 5012 !F.hasFnAttribute(Attribute::NoInline)) 5013 F.addFnAttr(Attribute::AlwaysInline); 5014 5015 if (PrintModuleAfterOptimizations) 5016 LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt Module Pass:\n" << M); 5017 5018 if (Changed) 5019 return PreservedAnalyses::none(); 5020 5021 return PreservedAnalyses::all(); 5022 } 5023 5024 PreservedAnalyses OpenMPOptCGSCCPass::run(LazyCallGraph::SCC &C, 5025 CGSCCAnalysisManager &AM, 5026 LazyCallGraph &CG, 5027 CGSCCUpdateResult &UR) { 5028 if (!containsOpenMP(*C.begin()->getFunction().getParent())) 5029 return PreservedAnalyses::all(); 5030 if (DisableOpenMPOptimizations) 5031 return PreservedAnalyses::all(); 5032 5033 SmallVector<Function *, 16> SCC; 5034 // If there are kernels in the module, we have to run on all SCC's. 5035 for (LazyCallGraph::Node &N : C) { 5036 Function *Fn = &N.getFunction(); 5037 SCC.push_back(Fn); 5038 } 5039 5040 if (SCC.empty()) 5041 return PreservedAnalyses::all(); 5042 5043 Module &M = *C.begin()->getFunction().getParent(); 5044 5045 if (PrintModuleBeforeOptimizations) 5046 LLVM_DEBUG(dbgs() << TAG << "Module before OpenMPOpt CGSCC Pass:\n" << M); 5047 5048 KernelSet Kernels = getDeviceKernels(M); 5049 5050 FunctionAnalysisManager &FAM = 5051 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 5052 5053 AnalysisGetter AG(FAM); 5054 5055 auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { 5056 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); 5057 }; 5058 5059 BumpPtrAllocator Allocator; 5060 CallGraphUpdater CGUpdater; 5061 CGUpdater.initialize(CG, C, AM, UR); 5062 5063 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 5064 OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator, 5065 /*CGSCC*/ Functions, Kernels); 5066 5067 unsigned MaxFixpointIterations = 5068 (isOpenMPDevice(M)) ? SetFixpointIterations : 32; 5069 5070 AttributorConfig AC(CGUpdater); 5071 AC.DefaultInitializeLiveInternals = false; 5072 AC.IsModulePass = false; 5073 AC.RewriteSignatures = false; 5074 AC.MaxFixpointIterations = MaxFixpointIterations; 5075 AC.OREGetter = OREGetter; 5076 AC.PassName = DEBUG_TYPE; 5077 5078 Attributor A(Functions, InfoCache, AC); 5079 5080 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 5081 bool Changed = OMPOpt.run(false); 5082 5083 if (PrintModuleAfterOptimizations) 5084 LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt CGSCC Pass:\n" << M); 5085 5086 if (Changed) 5087 return PreservedAnalyses::none(); 5088 5089 return PreservedAnalyses::all(); 5090 } 5091 5092 namespace { 5093 5094 struct OpenMPOptCGSCCLegacyPass : public CallGraphSCCPass { 5095 CallGraphUpdater CGUpdater; 5096 static char ID; 5097 5098 OpenMPOptCGSCCLegacyPass() : CallGraphSCCPass(ID) { 5099 initializeOpenMPOptCGSCCLegacyPassPass(*PassRegistry::getPassRegistry()); 5100 } 5101 5102 void getAnalysisUsage(AnalysisUsage &AU) const override { 5103 CallGraphSCCPass::getAnalysisUsage(AU); 5104 } 5105 5106 bool runOnSCC(CallGraphSCC &CGSCC) override { 5107 if (!containsOpenMP(CGSCC.getCallGraph().getModule())) 5108 return false; 5109 if (DisableOpenMPOptimizations || skipSCC(CGSCC)) 5110 return false; 5111 5112 SmallVector<Function *, 16> SCC; 5113 // If there are kernels in the module, we have to run on all SCC's. 5114 for (CallGraphNode *CGN : CGSCC) { 5115 Function *Fn = CGN->getFunction(); 5116 if (!Fn || Fn->isDeclaration()) 5117 continue; 5118 SCC.push_back(Fn); 5119 } 5120 5121 if (SCC.empty()) 5122 return false; 5123 5124 Module &M = CGSCC.getCallGraph().getModule(); 5125 KernelSet Kernels = getDeviceKernels(M); 5126 5127 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 5128 CGUpdater.initialize(CG, CGSCC); 5129 5130 // Maintain a map of functions to avoid rebuilding the ORE 5131 DenseMap<Function *, std::unique_ptr<OptimizationRemarkEmitter>> OREMap; 5132 auto OREGetter = [&OREMap](Function *F) -> OptimizationRemarkEmitter & { 5133 std::unique_ptr<OptimizationRemarkEmitter> &ORE = OREMap[F]; 5134 if (!ORE) 5135 ORE = std::make_unique<OptimizationRemarkEmitter>(F); 5136 return *ORE; 5137 }; 5138 5139 AnalysisGetter AG; 5140 SetVector<Function *> Functions(SCC.begin(), SCC.end()); 5141 BumpPtrAllocator Allocator; 5142 OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, 5143 Allocator, 5144 /*CGSCC*/ Functions, Kernels); 5145 5146 unsigned MaxFixpointIterations = 5147 (isOpenMPDevice(M)) ? SetFixpointIterations : 32; 5148 5149 AttributorConfig AC(CGUpdater); 5150 AC.DefaultInitializeLiveInternals = false; 5151 AC.IsModulePass = false; 5152 AC.RewriteSignatures = false; 5153 AC.MaxFixpointIterations = MaxFixpointIterations; 5154 AC.OREGetter = OREGetter; 5155 AC.PassName = DEBUG_TYPE; 5156 5157 Attributor A(Functions, InfoCache, AC); 5158 5159 OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); 5160 bool Result = OMPOpt.run(false); 5161 5162 if (PrintModuleAfterOptimizations) 5163 LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt CGSCC Pass:\n" << M); 5164 5165 return Result; 5166 } 5167 5168 bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); } 5169 }; 5170 5171 } // end anonymous namespace 5172 5173 KernelSet llvm::omp::getDeviceKernels(Module &M) { 5174 // TODO: Create a more cross-platform way of determining device kernels. 5175 NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations"); 5176 KernelSet Kernels; 5177 5178 if (!MD) 5179 return Kernels; 5180 5181 for (auto *Op : MD->operands()) { 5182 if (Op->getNumOperands() < 2) 5183 continue; 5184 MDString *KindID = dyn_cast<MDString>(Op->getOperand(1)); 5185 if (!KindID || KindID->getString() != "kernel") 5186 continue; 5187 5188 Function *KernelFn = 5189 mdconst::dyn_extract_or_null<Function>(Op->getOperand(0)); 5190 if (!KernelFn) 5191 continue; 5192 5193 ++NumOpenMPTargetRegionKernels; 5194 5195 Kernels.insert(KernelFn); 5196 } 5197 5198 return Kernels; 5199 } 5200 5201 bool llvm::omp::containsOpenMP(Module &M) { 5202 Metadata *MD = M.getModuleFlag("openmp"); 5203 if (!MD) 5204 return false; 5205 5206 return true; 5207 } 5208 5209 bool llvm::omp::isOpenMPDevice(Module &M) { 5210 Metadata *MD = M.getModuleFlag("openmp-device"); 5211 if (!MD) 5212 return false; 5213 5214 return true; 5215 } 5216 5217 char OpenMPOptCGSCCLegacyPass::ID = 0; 5218 5219 INITIALIZE_PASS_BEGIN(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc", 5220 "OpenMP specific optimizations", false, false) 5221 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 5222 INITIALIZE_PASS_END(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc", 5223 "OpenMP specific optimizations", false, false) 5224 5225 Pass *llvm::createOpenMPOptCGSCCLegacyPass() { 5226 return new OpenMPOptCGSCCLegacyPass(); 5227 } 5228