xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp (revision d56accc7c3dcc897489b6a07834763a03b9f3d68)
1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // OpenMP specific optimizations:
10 //
11 // - Deduplication of runtime calls, e.g., omp_get_thread_num.
12 // - Replacing globalized device memory with stack memory.
13 // - Replacing globalized device memory with shared memory.
14 // - Parallel region merging.
15 // - Transforming generic-mode device kernels to SPMD mode.
16 // - Specializing the state machine for generic-mode device kernels.
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "llvm/Transforms/IPO/OpenMPOpt.h"
21 
22 #include "llvm/ADT/EnumeratedArray.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/CallGraph.h"
28 #include "llvm/Analysis/CallGraphSCCPass.h"
29 #include "llvm/Analysis/MemoryLocation.h"
30 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/Frontend/OpenMP/OMPConstants.h"
33 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
34 #include "llvm/IR/Assumptions.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DiagnosticInfo.h"
37 #include "llvm/IR/GlobalValue.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/IntrinsicsAMDGPU.h"
43 #include "llvm/IR/IntrinsicsNVPTX.h"
44 #include "llvm/IR/LLVMContext.h"
45 #include "llvm/InitializePasses.h"
46 #include "llvm/Support/CommandLine.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Transforms/IPO.h"
49 #include "llvm/Transforms/IPO/Attributor.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
52 #include "llvm/Transforms/Utils/CodeExtractor.h"
53 
54 #include <algorithm>
55 
56 using namespace llvm;
57 using namespace omp;
58 
59 #define DEBUG_TYPE "openmp-opt"
60 
61 static cl::opt<bool> DisableOpenMPOptimizations(
62     "openmp-opt-disable", cl::ZeroOrMore,
63     cl::desc("Disable OpenMP specific optimizations."), cl::Hidden,
64     cl::init(false));
65 
66 static cl::opt<bool> EnableParallelRegionMerging(
67     "openmp-opt-enable-merging", cl::ZeroOrMore,
68     cl::desc("Enable the OpenMP region merging optimization."), cl::Hidden,
69     cl::init(false));
70 
71 static cl::opt<bool>
72     DisableInternalization("openmp-opt-disable-internalization", cl::ZeroOrMore,
73                            cl::desc("Disable function internalization."),
74                            cl::Hidden, cl::init(false));
75 
76 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false),
77                                     cl::Hidden);
78 static cl::opt<bool> PrintOpenMPKernels("openmp-print-gpu-kernels",
79                                         cl::init(false), cl::Hidden);
80 
81 static cl::opt<bool> HideMemoryTransferLatency(
82     "openmp-hide-memory-transfer-latency",
83     cl::desc("[WIP] Tries to hide the latency of host to device memory"
84              " transfers"),
85     cl::Hidden, cl::init(false));
86 
87 static cl::opt<bool> DisableOpenMPOptDeglobalization(
88     "openmp-opt-disable-deglobalization", cl::ZeroOrMore,
89     cl::desc("Disable OpenMP optimizations involving deglobalization."),
90     cl::Hidden, cl::init(false));
91 
92 static cl::opt<bool> DisableOpenMPOptSPMDization(
93     "openmp-opt-disable-spmdization", cl::ZeroOrMore,
94     cl::desc("Disable OpenMP optimizations involving SPMD-ization."),
95     cl::Hidden, cl::init(false));
96 
97 static cl::opt<bool> DisableOpenMPOptFolding(
98     "openmp-opt-disable-folding", cl::ZeroOrMore,
99     cl::desc("Disable OpenMP optimizations involving folding."), cl::Hidden,
100     cl::init(false));
101 
102 static cl::opt<bool> DisableOpenMPOptStateMachineRewrite(
103     "openmp-opt-disable-state-machine-rewrite", cl::ZeroOrMore,
104     cl::desc("Disable OpenMP optimizations that replace the state machine."),
105     cl::Hidden, cl::init(false));
106 
107 static cl::opt<bool> DisableOpenMPOptBarrierElimination(
108     "openmp-opt-disable-barrier-elimination", cl::ZeroOrMore,
109     cl::desc("Disable OpenMP optimizations that eliminate barriers."),
110     cl::Hidden, cl::init(false));
111 
112 static cl::opt<bool> PrintModuleAfterOptimizations(
113     "openmp-opt-print-module", cl::ZeroOrMore,
114     cl::desc("Print the current module after OpenMP optimizations."),
115     cl::Hidden, cl::init(false));
116 
117 static cl::opt<bool> AlwaysInlineDeviceFunctions(
118     "openmp-opt-inline-device", cl::ZeroOrMore,
119     cl::desc("Inline all applicible functions on the device."), cl::Hidden,
120     cl::init(false));
121 
122 static cl::opt<bool>
123     EnableVerboseRemarks("openmp-opt-verbose-remarks", cl::ZeroOrMore,
124                          cl::desc("Enables more verbose remarks."), cl::Hidden,
125                          cl::init(false));
126 
127 static cl::opt<unsigned>
128     SetFixpointIterations("openmp-opt-max-iterations", cl::Hidden,
129                           cl::desc("Maximal number of attributor iterations."),
130                           cl::init(256));
131 
132 STATISTIC(NumOpenMPRuntimeCallsDeduplicated,
133           "Number of OpenMP runtime calls deduplicated");
134 STATISTIC(NumOpenMPParallelRegionsDeleted,
135           "Number of OpenMP parallel regions deleted");
136 STATISTIC(NumOpenMPRuntimeFunctionsIdentified,
137           "Number of OpenMP runtime functions identified");
138 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified,
139           "Number of OpenMP runtime function uses identified");
140 STATISTIC(NumOpenMPTargetRegionKernels,
141           "Number of OpenMP target region entry points (=kernels) identified");
142 STATISTIC(NumOpenMPTargetRegionKernelsSPMD,
143           "Number of OpenMP target region entry points (=kernels) executed in "
144           "SPMD-mode instead of generic-mode");
145 STATISTIC(NumOpenMPTargetRegionKernelsWithoutStateMachine,
146           "Number of OpenMP target region entry points (=kernels) executed in "
147           "generic-mode without a state machines");
148 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback,
149           "Number of OpenMP target region entry points (=kernels) executed in "
150           "generic-mode with customized state machines with fallback");
151 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback,
152           "Number of OpenMP target region entry points (=kernels) executed in "
153           "generic-mode with customized state machines without fallback");
154 STATISTIC(
155     NumOpenMPParallelRegionsReplacedInGPUStateMachine,
156     "Number of OpenMP parallel regions replaced with ID in GPU state machines");
157 STATISTIC(NumOpenMPParallelRegionsMerged,
158           "Number of OpenMP parallel regions merged");
159 STATISTIC(NumBytesMovedToSharedMemory,
160           "Amount of memory pushed to shared memory");
161 STATISTIC(NumBarriersEliminated, "Number of redundant barriers eliminated");
162 
163 #if !defined(NDEBUG)
164 static constexpr auto TAG = "[" DEBUG_TYPE "]";
165 #endif
166 
167 namespace {
168 
169 struct AAHeapToShared;
170 
171 struct AAICVTracker;
172 
173 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for
174 /// Attributor runs.
175 struct OMPInformationCache : public InformationCache {
176   OMPInformationCache(Module &M, AnalysisGetter &AG,
177                       BumpPtrAllocator &Allocator, SetVector<Function *> &CGSCC,
178                       KernelSet &Kernels)
179       : InformationCache(M, AG, Allocator, &CGSCC), OMPBuilder(M),
180         Kernels(Kernels) {
181 
182     OMPBuilder.initialize();
183     initializeRuntimeFunctions();
184     initializeInternalControlVars();
185   }
186 
187   /// Generic information that describes an internal control variable.
188   struct InternalControlVarInfo {
189     /// The kind, as described by InternalControlVar enum.
190     InternalControlVar Kind;
191 
192     /// The name of the ICV.
193     StringRef Name;
194 
195     /// Environment variable associated with this ICV.
196     StringRef EnvVarName;
197 
198     /// Initial value kind.
199     ICVInitValue InitKind;
200 
201     /// Initial value.
202     ConstantInt *InitValue;
203 
204     /// Setter RTL function associated with this ICV.
205     RuntimeFunction Setter;
206 
207     /// Getter RTL function associated with this ICV.
208     RuntimeFunction Getter;
209 
210     /// RTL Function corresponding to the override clause of this ICV
211     RuntimeFunction Clause;
212   };
213 
214   /// Generic information that describes a runtime function
215   struct RuntimeFunctionInfo {
216 
217     /// The kind, as described by the RuntimeFunction enum.
218     RuntimeFunction Kind;
219 
220     /// The name of the function.
221     StringRef Name;
222 
223     /// Flag to indicate a variadic function.
224     bool IsVarArg;
225 
226     /// The return type of the function.
227     Type *ReturnType;
228 
229     /// The argument types of the function.
230     SmallVector<Type *, 8> ArgumentTypes;
231 
232     /// The declaration if available.
233     Function *Declaration = nullptr;
234 
235     /// Uses of this runtime function per function containing the use.
236     using UseVector = SmallVector<Use *, 16>;
237 
238     /// Clear UsesMap for runtime function.
239     void clearUsesMap() { UsesMap.clear(); }
240 
241     /// Boolean conversion that is true if the runtime function was found.
242     operator bool() const { return Declaration; }
243 
244     /// Return the vector of uses in function \p F.
245     UseVector &getOrCreateUseVector(Function *F) {
246       std::shared_ptr<UseVector> &UV = UsesMap[F];
247       if (!UV)
248         UV = std::make_shared<UseVector>();
249       return *UV;
250     }
251 
252     /// Return the vector of uses in function \p F or `nullptr` if there are
253     /// none.
254     const UseVector *getUseVector(Function &F) const {
255       auto I = UsesMap.find(&F);
256       if (I != UsesMap.end())
257         return I->second.get();
258       return nullptr;
259     }
260 
261     /// Return how many functions contain uses of this runtime function.
262     size_t getNumFunctionsWithUses() const { return UsesMap.size(); }
263 
264     /// Return the number of arguments (or the minimal number for variadic
265     /// functions).
266     size_t getNumArgs() const { return ArgumentTypes.size(); }
267 
268     /// Run the callback \p CB on each use and forget the use if the result is
269     /// true. The callback will be fed the function in which the use was
270     /// encountered as second argument.
271     void foreachUse(SmallVectorImpl<Function *> &SCC,
272                     function_ref<bool(Use &, Function &)> CB) {
273       for (Function *F : SCC)
274         foreachUse(CB, F);
275     }
276 
277     /// Run the callback \p CB on each use within the function \p F and forget
278     /// the use if the result is true.
279     void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F) {
280       SmallVector<unsigned, 8> ToBeDeleted;
281       ToBeDeleted.clear();
282 
283       unsigned Idx = 0;
284       UseVector &UV = getOrCreateUseVector(F);
285 
286       for (Use *U : UV) {
287         if (CB(*U, *F))
288           ToBeDeleted.push_back(Idx);
289         ++Idx;
290       }
291 
292       // Remove the to-be-deleted indices in reverse order as prior
293       // modifications will not modify the smaller indices.
294       while (!ToBeDeleted.empty()) {
295         unsigned Idx = ToBeDeleted.pop_back_val();
296         UV[Idx] = UV.back();
297         UV.pop_back();
298       }
299     }
300 
301   private:
302     /// Map from functions to all uses of this runtime function contained in
303     /// them.
304     DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap;
305 
306   public:
307     /// Iterators for the uses of this runtime function.
308     decltype(UsesMap)::iterator begin() { return UsesMap.begin(); }
309     decltype(UsesMap)::iterator end() { return UsesMap.end(); }
310   };
311 
312   /// An OpenMP-IR-Builder instance
313   OpenMPIRBuilder OMPBuilder;
314 
315   /// Map from runtime function kind to the runtime function description.
316   EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction,
317                   RuntimeFunction::OMPRTL___last>
318       RFIs;
319 
320   /// Map from function declarations/definitions to their runtime enum type.
321   DenseMap<Function *, RuntimeFunction> RuntimeFunctionIDMap;
322 
323   /// Map from ICV kind to the ICV description.
324   EnumeratedArray<InternalControlVarInfo, InternalControlVar,
325                   InternalControlVar::ICV___last>
326       ICVs;
327 
328   /// Helper to initialize all internal control variable information for those
329   /// defined in OMPKinds.def.
330   void initializeInternalControlVars() {
331 #define ICV_RT_SET(_Name, RTL)                                                 \
332   {                                                                            \
333     auto &ICV = ICVs[_Name];                                                   \
334     ICV.Setter = RTL;                                                          \
335   }
336 #define ICV_RT_GET(Name, RTL)                                                  \
337   {                                                                            \
338     auto &ICV = ICVs[Name];                                                    \
339     ICV.Getter = RTL;                                                          \
340   }
341 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init)                           \
342   {                                                                            \
343     auto &ICV = ICVs[Enum];                                                    \
344     ICV.Name = _Name;                                                          \
345     ICV.Kind = Enum;                                                           \
346     ICV.InitKind = Init;                                                       \
347     ICV.EnvVarName = _EnvVarName;                                              \
348     switch (ICV.InitKind) {                                                    \
349     case ICV_IMPLEMENTATION_DEFINED:                                           \
350       ICV.InitValue = nullptr;                                                 \
351       break;                                                                   \
352     case ICV_ZERO:                                                             \
353       ICV.InitValue = ConstantInt::get(                                        \
354           Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0);                \
355       break;                                                                   \
356     case ICV_FALSE:                                                            \
357       ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext());    \
358       break;                                                                   \
359     case ICV_LAST:                                                             \
360       break;                                                                   \
361     }                                                                          \
362   }
363 #include "llvm/Frontend/OpenMP/OMPKinds.def"
364   }
365 
366   /// Returns true if the function declaration \p F matches the runtime
367   /// function types, that is, return type \p RTFRetType, and argument types
368   /// \p RTFArgTypes.
369   static bool declMatchesRTFTypes(Function *F, Type *RTFRetType,
370                                   SmallVector<Type *, 8> &RTFArgTypes) {
371     // TODO: We should output information to the user (under debug output
372     //       and via remarks).
373 
374     if (!F)
375       return false;
376     if (F->getReturnType() != RTFRetType)
377       return false;
378     if (F->arg_size() != RTFArgTypes.size())
379       return false;
380 
381     auto *RTFTyIt = RTFArgTypes.begin();
382     for (Argument &Arg : F->args()) {
383       if (Arg.getType() != *RTFTyIt)
384         return false;
385 
386       ++RTFTyIt;
387     }
388 
389     return true;
390   }
391 
392   // Helper to collect all uses of the declaration in the UsesMap.
393   unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) {
394     unsigned NumUses = 0;
395     if (!RFI.Declaration)
396       return NumUses;
397     OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration);
398 
399     if (CollectStats) {
400       NumOpenMPRuntimeFunctionsIdentified += 1;
401       NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses();
402     }
403 
404     // TODO: We directly convert uses into proper calls and unknown uses.
405     for (Use &U : RFI.Declaration->uses()) {
406       if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) {
407         if (ModuleSlice.count(UserI->getFunction())) {
408           RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U);
409           ++NumUses;
410         }
411       } else {
412         RFI.getOrCreateUseVector(nullptr).push_back(&U);
413         ++NumUses;
414       }
415     }
416     return NumUses;
417   }
418 
419   // Helper function to recollect uses of a runtime function.
420   void recollectUsesForFunction(RuntimeFunction RTF) {
421     auto &RFI = RFIs[RTF];
422     RFI.clearUsesMap();
423     collectUses(RFI, /*CollectStats*/ false);
424   }
425 
426   // Helper function to recollect uses of all runtime functions.
427   void recollectUses() {
428     for (int Idx = 0; Idx < RFIs.size(); ++Idx)
429       recollectUsesForFunction(static_cast<RuntimeFunction>(Idx));
430   }
431 
432   // Helper function to inherit the calling convention of the function callee.
433   void setCallingConvention(FunctionCallee Callee, CallInst *CI) {
434     if (Function *Fn = dyn_cast<Function>(Callee.getCallee()))
435       CI->setCallingConv(Fn->getCallingConv());
436   }
437 
438   /// Helper to initialize all runtime function information for those defined
439   /// in OpenMPKinds.def.
440   void initializeRuntimeFunctions() {
441     Module &M = *((*ModuleSlice.begin())->getParent());
442 
443     // Helper macros for handling __VA_ARGS__ in OMP_RTL
444 #define OMP_TYPE(VarName, ...)                                                 \
445   Type *VarName = OMPBuilder.VarName;                                          \
446   (void)VarName;
447 
448 #define OMP_ARRAY_TYPE(VarName, ...)                                           \
449   ArrayType *VarName##Ty = OMPBuilder.VarName##Ty;                             \
450   (void)VarName##Ty;                                                           \
451   PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy;                     \
452   (void)VarName##PtrTy;
453 
454 #define OMP_FUNCTION_TYPE(VarName, ...)                                        \
455   FunctionType *VarName = OMPBuilder.VarName;                                  \
456   (void)VarName;                                                               \
457   PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr;                         \
458   (void)VarName##Ptr;
459 
460 #define OMP_STRUCT_TYPE(VarName, ...)                                          \
461   StructType *VarName = OMPBuilder.VarName;                                    \
462   (void)VarName;                                                               \
463   PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr;                         \
464   (void)VarName##Ptr;
465 
466 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...)                     \
467   {                                                                            \
468     SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__});                           \
469     Function *F = M.getFunction(_Name);                                        \
470     RTLFunctions.insert(F);                                                    \
471     if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) {           \
472       RuntimeFunctionIDMap[F] = _Enum;                                         \
473       auto &RFI = RFIs[_Enum];                                                 \
474       RFI.Kind = _Enum;                                                        \
475       RFI.Name = _Name;                                                        \
476       RFI.IsVarArg = _IsVarArg;                                                \
477       RFI.ReturnType = OMPBuilder._ReturnType;                                 \
478       RFI.ArgumentTypes = std::move(ArgsTypes);                                \
479       RFI.Declaration = F;                                                     \
480       unsigned NumUses = collectUses(RFI);                                     \
481       (void)NumUses;                                                           \
482       LLVM_DEBUG({                                                             \
483         dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not")           \
484                << " found\n";                                                  \
485         if (RFI.Declaration)                                                   \
486           dbgs() << TAG << "-> got " << NumUses << " uses in "                 \
487                  << RFI.getNumFunctionsWithUses()                              \
488                  << " different functions.\n";                                 \
489       });                                                                      \
490     }                                                                          \
491   }
492 #include "llvm/Frontend/OpenMP/OMPKinds.def"
493 
494     // Remove the `noinline` attribute from `__kmpc`, `_OMP::` and `omp_`
495     // functions, except if `optnone` is present.
496     for (Function &F : M) {
497       for (StringRef Prefix : {"__kmpc", "_ZN4_OMP", "omp_"})
498         if (F.getName().startswith(Prefix) &&
499             !F.hasFnAttribute(Attribute::OptimizeNone))
500           F.removeFnAttr(Attribute::NoInline);
501     }
502 
503     // TODO: We should attach the attributes defined in OMPKinds.def.
504   }
505 
506   /// Collection of known kernels (\see Kernel) in the module.
507   KernelSet &Kernels;
508 
509   /// Collection of known OpenMP runtime functions..
510   DenseSet<const Function *> RTLFunctions;
511 };
512 
513 template <typename Ty, bool InsertInvalidates = true>
514 struct BooleanStateWithSetVector : public BooleanState {
515   bool contains(const Ty &Elem) const { return Set.contains(Elem); }
516   bool insert(const Ty &Elem) {
517     if (InsertInvalidates)
518       BooleanState::indicatePessimisticFixpoint();
519     return Set.insert(Elem);
520   }
521 
522   const Ty &operator[](int Idx) const { return Set[Idx]; }
523   bool operator==(const BooleanStateWithSetVector &RHS) const {
524     return BooleanState::operator==(RHS) && Set == RHS.Set;
525   }
526   bool operator!=(const BooleanStateWithSetVector &RHS) const {
527     return !(*this == RHS);
528   }
529 
530   bool empty() const { return Set.empty(); }
531   size_t size() const { return Set.size(); }
532 
533   /// "Clamp" this state with \p RHS.
534   BooleanStateWithSetVector &operator^=(const BooleanStateWithSetVector &RHS) {
535     BooleanState::operator^=(RHS);
536     Set.insert(RHS.Set.begin(), RHS.Set.end());
537     return *this;
538   }
539 
540 private:
541   /// A set to keep track of elements.
542   SetVector<Ty> Set;
543 
544 public:
545   typename decltype(Set)::iterator begin() { return Set.begin(); }
546   typename decltype(Set)::iterator end() { return Set.end(); }
547   typename decltype(Set)::const_iterator begin() const { return Set.begin(); }
548   typename decltype(Set)::const_iterator end() const { return Set.end(); }
549 };
550 
551 template <typename Ty, bool InsertInvalidates = true>
552 using BooleanStateWithPtrSetVector =
553     BooleanStateWithSetVector<Ty *, InsertInvalidates>;
554 
555 struct KernelInfoState : AbstractState {
556   /// Flag to track if we reached a fixpoint.
557   bool IsAtFixpoint = false;
558 
559   /// The parallel regions (identified by the outlined parallel functions) that
560   /// can be reached from the associated function.
561   BooleanStateWithPtrSetVector<Function, /* InsertInvalidates */ false>
562       ReachedKnownParallelRegions;
563 
564   /// State to track what parallel region we might reach.
565   BooleanStateWithPtrSetVector<CallBase> ReachedUnknownParallelRegions;
566 
567   /// State to track if we are in SPMD-mode, assumed or know, and why we decided
568   /// we cannot be. If it is assumed, then RequiresFullRuntime should also be
569   /// false.
570   BooleanStateWithPtrSetVector<Instruction, false> SPMDCompatibilityTracker;
571 
572   /// The __kmpc_target_init call in this kernel, if any. If we find more than
573   /// one we abort as the kernel is malformed.
574   CallBase *KernelInitCB = nullptr;
575 
576   /// The __kmpc_target_deinit call in this kernel, if any. If we find more than
577   /// one we abort as the kernel is malformed.
578   CallBase *KernelDeinitCB = nullptr;
579 
580   /// Flag to indicate if the associated function is a kernel entry.
581   bool IsKernelEntry = false;
582 
583   /// State to track what kernel entries can reach the associated function.
584   BooleanStateWithPtrSetVector<Function, false> ReachingKernelEntries;
585 
586   /// State to indicate if we can track parallel level of the associated
587   /// function. We will give up tracking if we encounter unknown caller or the
588   /// caller is __kmpc_parallel_51.
589   BooleanStateWithSetVector<uint8_t> ParallelLevels;
590 
591   /// Abstract State interface
592   ///{
593 
594   KernelInfoState() {}
595   KernelInfoState(bool BestState) {
596     if (!BestState)
597       indicatePessimisticFixpoint();
598   }
599 
600   /// See AbstractState::isValidState(...)
601   bool isValidState() const override { return true; }
602 
603   /// See AbstractState::isAtFixpoint(...)
604   bool isAtFixpoint() const override { return IsAtFixpoint; }
605 
606   /// See AbstractState::indicatePessimisticFixpoint(...)
607   ChangeStatus indicatePessimisticFixpoint() override {
608     IsAtFixpoint = true;
609     ReachingKernelEntries.indicatePessimisticFixpoint();
610     SPMDCompatibilityTracker.indicatePessimisticFixpoint();
611     ReachedKnownParallelRegions.indicatePessimisticFixpoint();
612     ReachedUnknownParallelRegions.indicatePessimisticFixpoint();
613     return ChangeStatus::CHANGED;
614   }
615 
616   /// See AbstractState::indicateOptimisticFixpoint(...)
617   ChangeStatus indicateOptimisticFixpoint() override {
618     IsAtFixpoint = true;
619     ReachingKernelEntries.indicateOptimisticFixpoint();
620     SPMDCompatibilityTracker.indicateOptimisticFixpoint();
621     ReachedKnownParallelRegions.indicateOptimisticFixpoint();
622     ReachedUnknownParallelRegions.indicateOptimisticFixpoint();
623     return ChangeStatus::UNCHANGED;
624   }
625 
626   /// Return the assumed state
627   KernelInfoState &getAssumed() { return *this; }
628   const KernelInfoState &getAssumed() const { return *this; }
629 
630   bool operator==(const KernelInfoState &RHS) const {
631     if (SPMDCompatibilityTracker != RHS.SPMDCompatibilityTracker)
632       return false;
633     if (ReachedKnownParallelRegions != RHS.ReachedKnownParallelRegions)
634       return false;
635     if (ReachedUnknownParallelRegions != RHS.ReachedUnknownParallelRegions)
636       return false;
637     if (ReachingKernelEntries != RHS.ReachingKernelEntries)
638       return false;
639     return true;
640   }
641 
642   /// Returns true if this kernel contains any OpenMP parallel regions.
643   bool mayContainParallelRegion() {
644     return !ReachedKnownParallelRegions.empty() ||
645            !ReachedUnknownParallelRegions.empty();
646   }
647 
648   /// Return empty set as the best state of potential values.
649   static KernelInfoState getBestState() { return KernelInfoState(true); }
650 
651   static KernelInfoState getBestState(KernelInfoState &KIS) {
652     return getBestState();
653   }
654 
655   /// Return full set as the worst state of potential values.
656   static KernelInfoState getWorstState() { return KernelInfoState(false); }
657 
658   /// "Clamp" this state with \p KIS.
659   KernelInfoState operator^=(const KernelInfoState &KIS) {
660     // Do not merge two different _init and _deinit call sites.
661     if (KIS.KernelInitCB) {
662       if (KernelInitCB && KernelInitCB != KIS.KernelInitCB)
663         llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt "
664                          "assumptions.");
665       KernelInitCB = KIS.KernelInitCB;
666     }
667     if (KIS.KernelDeinitCB) {
668       if (KernelDeinitCB && KernelDeinitCB != KIS.KernelDeinitCB)
669         llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt "
670                          "assumptions.");
671       KernelDeinitCB = KIS.KernelDeinitCB;
672     }
673     SPMDCompatibilityTracker ^= KIS.SPMDCompatibilityTracker;
674     ReachedKnownParallelRegions ^= KIS.ReachedKnownParallelRegions;
675     ReachedUnknownParallelRegions ^= KIS.ReachedUnknownParallelRegions;
676     return *this;
677   }
678 
679   KernelInfoState operator&=(const KernelInfoState &KIS) {
680     return (*this ^= KIS);
681   }
682 
683   ///}
684 };
685 
686 /// Used to map the values physically (in the IR) stored in an offload
687 /// array, to a vector in memory.
688 struct OffloadArray {
689   /// Physical array (in the IR).
690   AllocaInst *Array = nullptr;
691   /// Mapped values.
692   SmallVector<Value *, 8> StoredValues;
693   /// Last stores made in the offload array.
694   SmallVector<StoreInst *, 8> LastAccesses;
695 
696   OffloadArray() = default;
697 
698   /// Initializes the OffloadArray with the values stored in \p Array before
699   /// instruction \p Before is reached. Returns false if the initialization
700   /// fails.
701   /// This MUST be used immediately after the construction of the object.
702   bool initialize(AllocaInst &Array, Instruction &Before) {
703     if (!Array.getAllocatedType()->isArrayTy())
704       return false;
705 
706     if (!getValues(Array, Before))
707       return false;
708 
709     this->Array = &Array;
710     return true;
711   }
712 
713   static const unsigned DeviceIDArgNum = 1;
714   static const unsigned BasePtrsArgNum = 3;
715   static const unsigned PtrsArgNum = 4;
716   static const unsigned SizesArgNum = 5;
717 
718 private:
719   /// Traverses the BasicBlock where \p Array is, collecting the stores made to
720   /// \p Array, leaving StoredValues with the values stored before the
721   /// instruction \p Before is reached.
722   bool getValues(AllocaInst &Array, Instruction &Before) {
723     // Initialize container.
724     const uint64_t NumValues = Array.getAllocatedType()->getArrayNumElements();
725     StoredValues.assign(NumValues, nullptr);
726     LastAccesses.assign(NumValues, nullptr);
727 
728     // TODO: This assumes the instruction \p Before is in the same
729     //  BasicBlock as Array. Make it general, for any control flow graph.
730     BasicBlock *BB = Array.getParent();
731     if (BB != Before.getParent())
732       return false;
733 
734     const DataLayout &DL = Array.getModule()->getDataLayout();
735     const unsigned int PointerSize = DL.getPointerSize();
736 
737     for (Instruction &I : *BB) {
738       if (&I == &Before)
739         break;
740 
741       if (!isa<StoreInst>(&I))
742         continue;
743 
744       auto *S = cast<StoreInst>(&I);
745       int64_t Offset = -1;
746       auto *Dst =
747           GetPointerBaseWithConstantOffset(S->getPointerOperand(), Offset, DL);
748       if (Dst == &Array) {
749         int64_t Idx = Offset / PointerSize;
750         StoredValues[Idx] = getUnderlyingObject(S->getValueOperand());
751         LastAccesses[Idx] = S;
752       }
753     }
754 
755     return isFilled();
756   }
757 
758   /// Returns true if all values in StoredValues and
759   /// LastAccesses are not nullptrs.
760   bool isFilled() {
761     const unsigned NumValues = StoredValues.size();
762     for (unsigned I = 0; I < NumValues; ++I) {
763       if (!StoredValues[I] || !LastAccesses[I])
764         return false;
765     }
766 
767     return true;
768   }
769 };
770 
771 struct OpenMPOpt {
772 
773   using OptimizationRemarkGetter =
774       function_ref<OptimizationRemarkEmitter &(Function *)>;
775 
776   OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater,
777             OptimizationRemarkGetter OREGetter,
778             OMPInformationCache &OMPInfoCache, Attributor &A)
779       : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater),
780         OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {}
781 
782   /// Check if any remarks are enabled for openmp-opt
783   bool remarksEnabled() {
784     auto &Ctx = M.getContext();
785     return Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(DEBUG_TYPE);
786   }
787 
788   /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice.
789   bool run(bool IsModulePass) {
790     if (SCC.empty())
791       return false;
792 
793     bool Changed = false;
794 
795     LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size()
796                       << " functions in a slice with "
797                       << OMPInfoCache.ModuleSlice.size() << " functions\n");
798 
799     if (IsModulePass) {
800       Changed |= runAttributor(IsModulePass);
801 
802       // Recollect uses, in case Attributor deleted any.
803       OMPInfoCache.recollectUses();
804 
805       // TODO: This should be folded into buildCustomStateMachine.
806       Changed |= rewriteDeviceCodeStateMachine();
807 
808       if (remarksEnabled())
809         analysisGlobalization();
810 
811       Changed |= eliminateBarriers();
812     } else {
813       if (PrintICVValues)
814         printICVs();
815       if (PrintOpenMPKernels)
816         printKernels();
817 
818       Changed |= runAttributor(IsModulePass);
819 
820       // Recollect uses, in case Attributor deleted any.
821       OMPInfoCache.recollectUses();
822 
823       Changed |= deleteParallelRegions();
824 
825       if (HideMemoryTransferLatency)
826         Changed |= hideMemTransfersLatency();
827       Changed |= deduplicateRuntimeCalls();
828       if (EnableParallelRegionMerging) {
829         if (mergeParallelRegions()) {
830           deduplicateRuntimeCalls();
831           Changed = true;
832         }
833       }
834 
835       Changed |= eliminateBarriers();
836     }
837 
838     return Changed;
839   }
840 
841   /// Print initial ICV values for testing.
842   /// FIXME: This should be done from the Attributor once it is added.
843   void printICVs() const {
844     InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel,
845                                  ICV_proc_bind};
846 
847     for (Function *F : OMPInfoCache.ModuleSlice) {
848       for (auto ICV : ICVs) {
849         auto ICVInfo = OMPInfoCache.ICVs[ICV];
850         auto Remark = [&](OptimizationRemarkAnalysis ORA) {
851           return ORA << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name)
852                      << " Value: "
853                      << (ICVInfo.InitValue
854                              ? toString(ICVInfo.InitValue->getValue(), 10, true)
855                              : "IMPLEMENTATION_DEFINED");
856         };
857 
858         emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPICVTracker", Remark);
859       }
860     }
861   }
862 
863   /// Print OpenMP GPU kernels for testing.
864   void printKernels() const {
865     for (Function *F : SCC) {
866       if (!OMPInfoCache.Kernels.count(F))
867         continue;
868 
869       auto Remark = [&](OptimizationRemarkAnalysis ORA) {
870         return ORA << "OpenMP GPU kernel "
871                    << ore::NV("OpenMPGPUKernel", F->getName()) << "\n";
872       };
873 
874       emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPGPU", Remark);
875     }
876   }
877 
878   /// Return the call if \p U is a callee use in a regular call. If \p RFI is
879   /// given it has to be the callee or a nullptr is returned.
880   static CallInst *getCallIfRegularCall(
881       Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) {
882     CallInst *CI = dyn_cast<CallInst>(U.getUser());
883     if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() &&
884         (!RFI ||
885          (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration)))
886       return CI;
887     return nullptr;
888   }
889 
890   /// Return the call if \p V is a regular call. If \p RFI is given it has to be
891   /// the callee or a nullptr is returned.
892   static CallInst *getCallIfRegularCall(
893       Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) {
894     CallInst *CI = dyn_cast<CallInst>(&V);
895     if (CI && !CI->hasOperandBundles() &&
896         (!RFI ||
897          (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration)))
898       return CI;
899     return nullptr;
900   }
901 
902 private:
903   /// Merge parallel regions when it is safe.
904   bool mergeParallelRegions() {
905     const unsigned CallbackCalleeOperand = 2;
906     const unsigned CallbackFirstArgOperand = 3;
907     using InsertPointTy = OpenMPIRBuilder::InsertPointTy;
908 
909     // Check if there are any __kmpc_fork_call calls to merge.
910     OMPInformationCache::RuntimeFunctionInfo &RFI =
911         OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call];
912 
913     if (!RFI.Declaration)
914       return false;
915 
916     // Unmergable calls that prevent merging a parallel region.
917     OMPInformationCache::RuntimeFunctionInfo UnmergableCallsInfo[] = {
918         OMPInfoCache.RFIs[OMPRTL___kmpc_push_proc_bind],
919         OMPInfoCache.RFIs[OMPRTL___kmpc_push_num_threads],
920     };
921 
922     bool Changed = false;
923     LoopInfo *LI = nullptr;
924     DominatorTree *DT = nullptr;
925 
926     SmallDenseMap<BasicBlock *, SmallPtrSet<Instruction *, 4>> BB2PRMap;
927 
928     BasicBlock *StartBB = nullptr, *EndBB = nullptr;
929     auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
930                          BasicBlock &ContinuationIP) {
931       BasicBlock *CGStartBB = CodeGenIP.getBlock();
932       BasicBlock *CGEndBB =
933           SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI);
934       assert(StartBB != nullptr && "StartBB should not be null");
935       CGStartBB->getTerminator()->setSuccessor(0, StartBB);
936       assert(EndBB != nullptr && "EndBB should not be null");
937       EndBB->getTerminator()->setSuccessor(0, CGEndBB);
938     };
939 
940     auto PrivCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &,
941                       Value &Inner, Value *&ReplacementValue) -> InsertPointTy {
942       ReplacementValue = &Inner;
943       return CodeGenIP;
944     };
945 
946     auto FiniCB = [&](InsertPointTy CodeGenIP) {};
947 
948     /// Create a sequential execution region within a merged parallel region,
949     /// encapsulated in a master construct with a barrier for synchronization.
950     auto CreateSequentialRegion = [&](Function *OuterFn,
951                                       BasicBlock *OuterPredBB,
952                                       Instruction *SeqStartI,
953                                       Instruction *SeqEndI) {
954       // Isolate the instructions of the sequential region to a separate
955       // block.
956       BasicBlock *ParentBB = SeqStartI->getParent();
957       BasicBlock *SeqEndBB =
958           SplitBlock(ParentBB, SeqEndI->getNextNode(), DT, LI);
959       BasicBlock *SeqAfterBB =
960           SplitBlock(SeqEndBB, &*SeqEndBB->getFirstInsertionPt(), DT, LI);
961       BasicBlock *SeqStartBB =
962           SplitBlock(ParentBB, SeqStartI, DT, LI, nullptr, "seq.par.merged");
963 
964       assert(ParentBB->getUniqueSuccessor() == SeqStartBB &&
965              "Expected a different CFG");
966       const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc();
967       ParentBB->getTerminator()->eraseFromParent();
968 
969       auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
970                            BasicBlock &ContinuationIP) {
971         BasicBlock *CGStartBB = CodeGenIP.getBlock();
972         BasicBlock *CGEndBB =
973             SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI);
974         assert(SeqStartBB != nullptr && "SeqStartBB should not be null");
975         CGStartBB->getTerminator()->setSuccessor(0, SeqStartBB);
976         assert(SeqEndBB != nullptr && "SeqEndBB should not be null");
977         SeqEndBB->getTerminator()->setSuccessor(0, CGEndBB);
978       };
979       auto FiniCB = [&](InsertPointTy CodeGenIP) {};
980 
981       // Find outputs from the sequential region to outside users and
982       // broadcast their values to them.
983       for (Instruction &I : *SeqStartBB) {
984         SmallPtrSet<Instruction *, 4> OutsideUsers;
985         for (User *Usr : I.users()) {
986           Instruction &UsrI = *cast<Instruction>(Usr);
987           // Ignore outputs to LT intrinsics, code extraction for the merged
988           // parallel region will fix them.
989           if (UsrI.isLifetimeStartOrEnd())
990             continue;
991 
992           if (UsrI.getParent() != SeqStartBB)
993             OutsideUsers.insert(&UsrI);
994         }
995 
996         if (OutsideUsers.empty())
997           continue;
998 
999         // Emit an alloca in the outer region to store the broadcasted
1000         // value.
1001         const DataLayout &DL = M.getDataLayout();
1002         AllocaInst *AllocaI = new AllocaInst(
1003             I.getType(), DL.getAllocaAddrSpace(), nullptr,
1004             I.getName() + ".seq.output.alloc", &OuterFn->front().front());
1005 
1006         // Emit a store instruction in the sequential BB to update the
1007         // value.
1008         new StoreInst(&I, AllocaI, SeqStartBB->getTerminator());
1009 
1010         // Emit a load instruction and replace the use of the output value
1011         // with it.
1012         for (Instruction *UsrI : OutsideUsers) {
1013           LoadInst *LoadI = new LoadInst(
1014               I.getType(), AllocaI, I.getName() + ".seq.output.load", UsrI);
1015           UsrI->replaceUsesOfWith(&I, LoadI);
1016         }
1017       }
1018 
1019       OpenMPIRBuilder::LocationDescription Loc(
1020           InsertPointTy(ParentBB, ParentBB->end()), DL);
1021       InsertPointTy SeqAfterIP =
1022           OMPInfoCache.OMPBuilder.createMaster(Loc, BodyGenCB, FiniCB);
1023 
1024       OMPInfoCache.OMPBuilder.createBarrier(SeqAfterIP, OMPD_parallel);
1025 
1026       BranchInst::Create(SeqAfterBB, SeqAfterIP.getBlock());
1027 
1028       LLVM_DEBUG(dbgs() << TAG << "After sequential inlining " << *OuterFn
1029                         << "\n");
1030     };
1031 
1032     // Helper to merge the __kmpc_fork_call calls in MergableCIs. They are all
1033     // contained in BB and only separated by instructions that can be
1034     // redundantly executed in parallel. The block BB is split before the first
1035     // call (in MergableCIs) and after the last so the entire region we merge
1036     // into a single parallel region is contained in a single basic block
1037     // without any other instructions. We use the OpenMPIRBuilder to outline
1038     // that block and call the resulting function via __kmpc_fork_call.
1039     auto Merge = [&](const SmallVectorImpl<CallInst *> &MergableCIs,
1040                      BasicBlock *BB) {
1041       // TODO: Change the interface to allow single CIs expanded, e.g, to
1042       // include an outer loop.
1043       assert(MergableCIs.size() > 1 && "Assumed multiple mergable CIs");
1044 
1045       auto Remark = [&](OptimizationRemark OR) {
1046         OR << "Parallel region merged with parallel region"
1047            << (MergableCIs.size() > 2 ? "s" : "") << " at ";
1048         for (auto *CI : llvm::drop_begin(MergableCIs)) {
1049           OR << ore::NV("OpenMPParallelMerge", CI->getDebugLoc());
1050           if (CI != MergableCIs.back())
1051             OR << ", ";
1052         }
1053         return OR << ".";
1054       };
1055 
1056       emitRemark<OptimizationRemark>(MergableCIs.front(), "OMP150", Remark);
1057 
1058       Function *OriginalFn = BB->getParent();
1059       LLVM_DEBUG(dbgs() << TAG << "Merge " << MergableCIs.size()
1060                         << " parallel regions in " << OriginalFn->getName()
1061                         << "\n");
1062 
1063       // Isolate the calls to merge in a separate block.
1064       EndBB = SplitBlock(BB, MergableCIs.back()->getNextNode(), DT, LI);
1065       BasicBlock *AfterBB =
1066           SplitBlock(EndBB, &*EndBB->getFirstInsertionPt(), DT, LI);
1067       StartBB = SplitBlock(BB, MergableCIs.front(), DT, LI, nullptr,
1068                            "omp.par.merged");
1069 
1070       assert(BB->getUniqueSuccessor() == StartBB && "Expected a different CFG");
1071       const DebugLoc DL = BB->getTerminator()->getDebugLoc();
1072       BB->getTerminator()->eraseFromParent();
1073 
1074       // Create sequential regions for sequential instructions that are
1075       // in-between mergable parallel regions.
1076       for (auto *It = MergableCIs.begin(), *End = MergableCIs.end() - 1;
1077            It != End; ++It) {
1078         Instruction *ForkCI = *It;
1079         Instruction *NextForkCI = *(It + 1);
1080 
1081         // Continue if there are not in-between instructions.
1082         if (ForkCI->getNextNode() == NextForkCI)
1083           continue;
1084 
1085         CreateSequentialRegion(OriginalFn, BB, ForkCI->getNextNode(),
1086                                NextForkCI->getPrevNode());
1087       }
1088 
1089       OpenMPIRBuilder::LocationDescription Loc(InsertPointTy(BB, BB->end()),
1090                                                DL);
1091       IRBuilder<>::InsertPoint AllocaIP(
1092           &OriginalFn->getEntryBlock(),
1093           OriginalFn->getEntryBlock().getFirstInsertionPt());
1094       // Create the merged parallel region with default proc binding, to
1095       // avoid overriding binding settings, and without explicit cancellation.
1096       InsertPointTy AfterIP = OMPInfoCache.OMPBuilder.createParallel(
1097           Loc, AllocaIP, BodyGenCB, PrivCB, FiniCB, nullptr, nullptr,
1098           OMP_PROC_BIND_default, /* IsCancellable */ false);
1099       BranchInst::Create(AfterBB, AfterIP.getBlock());
1100 
1101       // Perform the actual outlining.
1102       OMPInfoCache.OMPBuilder.finalize(OriginalFn);
1103 
1104       Function *OutlinedFn = MergableCIs.front()->getCaller();
1105 
1106       // Replace the __kmpc_fork_call calls with direct calls to the outlined
1107       // callbacks.
1108       SmallVector<Value *, 8> Args;
1109       for (auto *CI : MergableCIs) {
1110         Value *Callee =
1111             CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts();
1112         FunctionType *FT =
1113             cast<FunctionType>(Callee->getType()->getPointerElementType());
1114         Args.clear();
1115         Args.push_back(OutlinedFn->getArg(0));
1116         Args.push_back(OutlinedFn->getArg(1));
1117         for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E;
1118              ++U)
1119           Args.push_back(CI->getArgOperand(U));
1120 
1121         CallInst *NewCI = CallInst::Create(FT, Callee, Args, "", CI);
1122         if (CI->getDebugLoc())
1123           NewCI->setDebugLoc(CI->getDebugLoc());
1124 
1125         // Forward parameter attributes from the callback to the callee.
1126         for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E;
1127              ++U)
1128           for (const Attribute &A : CI->getAttributes().getParamAttrs(U))
1129             NewCI->addParamAttr(
1130                 U - (CallbackFirstArgOperand - CallbackCalleeOperand), A);
1131 
1132         // Emit an explicit barrier to replace the implicit fork-join barrier.
1133         if (CI != MergableCIs.back()) {
1134           // TODO: Remove barrier if the merged parallel region includes the
1135           // 'nowait' clause.
1136           OMPInfoCache.OMPBuilder.createBarrier(
1137               InsertPointTy(NewCI->getParent(),
1138                             NewCI->getNextNode()->getIterator()),
1139               OMPD_parallel);
1140         }
1141 
1142         CI->eraseFromParent();
1143       }
1144 
1145       assert(OutlinedFn != OriginalFn && "Outlining failed");
1146       CGUpdater.registerOutlinedFunction(*OriginalFn, *OutlinedFn);
1147       CGUpdater.reanalyzeFunction(*OriginalFn);
1148 
1149       NumOpenMPParallelRegionsMerged += MergableCIs.size();
1150 
1151       return true;
1152     };
1153 
1154     // Helper function that identifes sequences of
1155     // __kmpc_fork_call uses in a basic block.
1156     auto DetectPRsCB = [&](Use &U, Function &F) {
1157       CallInst *CI = getCallIfRegularCall(U, &RFI);
1158       BB2PRMap[CI->getParent()].insert(CI);
1159 
1160       return false;
1161     };
1162 
1163     BB2PRMap.clear();
1164     RFI.foreachUse(SCC, DetectPRsCB);
1165     SmallVector<SmallVector<CallInst *, 4>, 4> MergableCIsVector;
1166     // Find mergable parallel regions within a basic block that are
1167     // safe to merge, that is any in-between instructions can safely
1168     // execute in parallel after merging.
1169     // TODO: support merging across basic-blocks.
1170     for (auto &It : BB2PRMap) {
1171       auto &CIs = It.getSecond();
1172       if (CIs.size() < 2)
1173         continue;
1174 
1175       BasicBlock *BB = It.getFirst();
1176       SmallVector<CallInst *, 4> MergableCIs;
1177 
1178       /// Returns true if the instruction is mergable, false otherwise.
1179       /// A terminator instruction is unmergable by definition since merging
1180       /// works within a BB. Instructions before the mergable region are
1181       /// mergable if they are not calls to OpenMP runtime functions that may
1182       /// set different execution parameters for subsequent parallel regions.
1183       /// Instructions in-between parallel regions are mergable if they are not
1184       /// calls to any non-intrinsic function since that may call a non-mergable
1185       /// OpenMP runtime function.
1186       auto IsMergable = [&](Instruction &I, bool IsBeforeMergableRegion) {
1187         // We do not merge across BBs, hence return false (unmergable) if the
1188         // instruction is a terminator.
1189         if (I.isTerminator())
1190           return false;
1191 
1192         if (!isa<CallInst>(&I))
1193           return true;
1194 
1195         CallInst *CI = cast<CallInst>(&I);
1196         if (IsBeforeMergableRegion) {
1197           Function *CalledFunction = CI->getCalledFunction();
1198           if (!CalledFunction)
1199             return false;
1200           // Return false (unmergable) if the call before the parallel
1201           // region calls an explicit affinity (proc_bind) or number of
1202           // threads (num_threads) compiler-generated function. Those settings
1203           // may be incompatible with following parallel regions.
1204           // TODO: ICV tracking to detect compatibility.
1205           for (const auto &RFI : UnmergableCallsInfo) {
1206             if (CalledFunction == RFI.Declaration)
1207               return false;
1208           }
1209         } else {
1210           // Return false (unmergable) if there is a call instruction
1211           // in-between parallel regions when it is not an intrinsic. It
1212           // may call an unmergable OpenMP runtime function in its callpath.
1213           // TODO: Keep track of possible OpenMP calls in the callpath.
1214           if (!isa<IntrinsicInst>(CI))
1215             return false;
1216         }
1217 
1218         return true;
1219       };
1220       // Find maximal number of parallel region CIs that are safe to merge.
1221       for (auto It = BB->begin(), End = BB->end(); It != End;) {
1222         Instruction &I = *It;
1223         ++It;
1224 
1225         if (CIs.count(&I)) {
1226           MergableCIs.push_back(cast<CallInst>(&I));
1227           continue;
1228         }
1229 
1230         // Continue expanding if the instruction is mergable.
1231         if (IsMergable(I, MergableCIs.empty()))
1232           continue;
1233 
1234         // Forward the instruction iterator to skip the next parallel region
1235         // since there is an unmergable instruction which can affect it.
1236         for (; It != End; ++It) {
1237           Instruction &SkipI = *It;
1238           if (CIs.count(&SkipI)) {
1239             LLVM_DEBUG(dbgs() << TAG << "Skip parallel region " << SkipI
1240                               << " due to " << I << "\n");
1241             ++It;
1242             break;
1243           }
1244         }
1245 
1246         // Store mergable regions found.
1247         if (MergableCIs.size() > 1) {
1248           MergableCIsVector.push_back(MergableCIs);
1249           LLVM_DEBUG(dbgs() << TAG << "Found " << MergableCIs.size()
1250                             << " parallel regions in block " << BB->getName()
1251                             << " of function " << BB->getParent()->getName()
1252                             << "\n";);
1253         }
1254 
1255         MergableCIs.clear();
1256       }
1257 
1258       if (!MergableCIsVector.empty()) {
1259         Changed = true;
1260 
1261         for (auto &MergableCIs : MergableCIsVector)
1262           Merge(MergableCIs, BB);
1263         MergableCIsVector.clear();
1264       }
1265     }
1266 
1267     if (Changed) {
1268       /// Re-collect use for fork calls, emitted barrier calls, and
1269       /// any emitted master/end_master calls.
1270       OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_fork_call);
1271       OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_barrier);
1272       OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_master);
1273       OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_end_master);
1274     }
1275 
1276     return Changed;
1277   }
1278 
1279   /// Try to delete parallel regions if possible.
1280   bool deleteParallelRegions() {
1281     const unsigned CallbackCalleeOperand = 2;
1282 
1283     OMPInformationCache::RuntimeFunctionInfo &RFI =
1284         OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call];
1285 
1286     if (!RFI.Declaration)
1287       return false;
1288 
1289     bool Changed = false;
1290     auto DeleteCallCB = [&](Use &U, Function &) {
1291       CallInst *CI = getCallIfRegularCall(U);
1292       if (!CI)
1293         return false;
1294       auto *Fn = dyn_cast<Function>(
1295           CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts());
1296       if (!Fn)
1297         return false;
1298       if (!Fn->onlyReadsMemory())
1299         return false;
1300       if (!Fn->hasFnAttribute(Attribute::WillReturn))
1301         return false;
1302 
1303       LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in "
1304                         << CI->getCaller()->getName() << "\n");
1305 
1306       auto Remark = [&](OptimizationRemark OR) {
1307         return OR << "Removing parallel region with no side-effects.";
1308       };
1309       emitRemark<OptimizationRemark>(CI, "OMP160", Remark);
1310 
1311       CGUpdater.removeCallSite(*CI);
1312       CI->eraseFromParent();
1313       Changed = true;
1314       ++NumOpenMPParallelRegionsDeleted;
1315       return true;
1316     };
1317 
1318     RFI.foreachUse(SCC, DeleteCallCB);
1319 
1320     return Changed;
1321   }
1322 
1323   /// Try to eliminate runtime calls by reusing existing ones.
1324   bool deduplicateRuntimeCalls() {
1325     bool Changed = false;
1326 
1327     RuntimeFunction DeduplicableRuntimeCallIDs[] = {
1328         OMPRTL_omp_get_num_threads,
1329         OMPRTL_omp_in_parallel,
1330         OMPRTL_omp_get_cancellation,
1331         OMPRTL_omp_get_thread_limit,
1332         OMPRTL_omp_get_supported_active_levels,
1333         OMPRTL_omp_get_level,
1334         OMPRTL_omp_get_ancestor_thread_num,
1335         OMPRTL_omp_get_team_size,
1336         OMPRTL_omp_get_active_level,
1337         OMPRTL_omp_in_final,
1338         OMPRTL_omp_get_proc_bind,
1339         OMPRTL_omp_get_num_places,
1340         OMPRTL_omp_get_num_procs,
1341         OMPRTL_omp_get_place_num,
1342         OMPRTL_omp_get_partition_num_places,
1343         OMPRTL_omp_get_partition_place_nums};
1344 
1345     // Global-tid is handled separately.
1346     SmallSetVector<Value *, 16> GTIdArgs;
1347     collectGlobalThreadIdArguments(GTIdArgs);
1348     LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size()
1349                       << " global thread ID arguments\n");
1350 
1351     for (Function *F : SCC) {
1352       for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs)
1353         Changed |= deduplicateRuntimeCalls(
1354             *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]);
1355 
1356       // __kmpc_global_thread_num is special as we can replace it with an
1357       // argument in enough cases to make it worth trying.
1358       Value *GTIdArg = nullptr;
1359       for (Argument &Arg : F->args())
1360         if (GTIdArgs.count(&Arg)) {
1361           GTIdArg = &Arg;
1362           break;
1363         }
1364       Changed |= deduplicateRuntimeCalls(
1365           *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg);
1366     }
1367 
1368     return Changed;
1369   }
1370 
1371   /// Tries to hide the latency of runtime calls that involve host to
1372   /// device memory transfers by splitting them into their "issue" and "wait"
1373   /// versions. The "issue" is moved upwards as much as possible. The "wait" is
1374   /// moved downards as much as possible. The "issue" issues the memory transfer
1375   /// asynchronously, returning a handle. The "wait" waits in the returned
1376   /// handle for the memory transfer to finish.
1377   bool hideMemTransfersLatency() {
1378     auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper];
1379     bool Changed = false;
1380     auto SplitMemTransfers = [&](Use &U, Function &Decl) {
1381       auto *RTCall = getCallIfRegularCall(U, &RFI);
1382       if (!RTCall)
1383         return false;
1384 
1385       OffloadArray OffloadArrays[3];
1386       if (!getValuesInOffloadArrays(*RTCall, OffloadArrays))
1387         return false;
1388 
1389       LLVM_DEBUG(dumpValuesInOffloadArrays(OffloadArrays));
1390 
1391       // TODO: Check if can be moved upwards.
1392       bool WasSplit = false;
1393       Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall);
1394       if (WaitMovementPoint)
1395         WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint);
1396 
1397       Changed |= WasSplit;
1398       return WasSplit;
1399     };
1400     RFI.foreachUse(SCC, SplitMemTransfers);
1401 
1402     return Changed;
1403   }
1404 
1405   /// Eliminates redundant, aligned barriers in OpenMP offloaded kernels.
1406   /// TODO: Make this an AA and expand it to work across blocks and functions.
1407   bool eliminateBarriers() {
1408     bool Changed = false;
1409 
1410     if (DisableOpenMPOptBarrierElimination)
1411       return /*Changed=*/false;
1412 
1413     if (OMPInfoCache.Kernels.empty())
1414       return /*Changed=*/false;
1415 
1416     enum ImplicitBarrierType { IBT_ENTRY, IBT_EXIT };
1417 
1418     class BarrierInfo {
1419       Instruction *I;
1420       enum ImplicitBarrierType Type;
1421 
1422     public:
1423       BarrierInfo(enum ImplicitBarrierType Type) : I(nullptr), Type(Type) {}
1424       BarrierInfo(Instruction &I) : I(&I) {}
1425 
1426       bool isImplicit() { return !I; }
1427 
1428       bool isImplicitEntry() { return isImplicit() && Type == IBT_ENTRY; }
1429 
1430       bool isImplicitExit() { return isImplicit() && Type == IBT_EXIT; }
1431 
1432       Instruction *getInstruction() { return I; }
1433     };
1434 
1435     for (Function *Kernel : OMPInfoCache.Kernels) {
1436       for (BasicBlock &BB : *Kernel) {
1437         SmallVector<BarrierInfo, 8> BarriersInBlock;
1438         SmallPtrSet<Instruction *, 8> BarriersToBeDeleted;
1439 
1440         // Add the kernel entry implicit barrier.
1441         if (&Kernel->getEntryBlock() == &BB)
1442           BarriersInBlock.push_back(IBT_ENTRY);
1443 
1444         // Find implicit and explicit aligned barriers in the same basic block.
1445         for (Instruction &I : BB) {
1446           if (isa<ReturnInst>(I)) {
1447             // Add the implicit barrier when exiting the kernel.
1448             BarriersInBlock.push_back(IBT_EXIT);
1449             continue;
1450           }
1451           CallBase *CB = dyn_cast<CallBase>(&I);
1452           if (!CB)
1453             continue;
1454 
1455           auto IsAlignBarrierCB = [&](CallBase &CB) {
1456             switch (CB.getIntrinsicID()) {
1457             case Intrinsic::nvvm_barrier0:
1458             case Intrinsic::nvvm_barrier0_and:
1459             case Intrinsic::nvvm_barrier0_or:
1460             case Intrinsic::nvvm_barrier0_popc:
1461               return true;
1462             default:
1463               break;
1464             }
1465             return hasAssumption(CB,
1466                                  KnownAssumptionString("ompx_aligned_barrier"));
1467           };
1468 
1469           if (IsAlignBarrierCB(*CB)) {
1470             // Add an explicit aligned barrier.
1471             BarriersInBlock.push_back(I);
1472           }
1473         }
1474 
1475         if (BarriersInBlock.size() <= 1)
1476           continue;
1477 
1478         // A barrier in a barrier pair is removeable if all instructions
1479         // between the barriers in the pair are side-effect free modulo the
1480         // barrier operation.
1481         auto IsBarrierRemoveable = [&Kernel](BarrierInfo *StartBI,
1482                                              BarrierInfo *EndBI) {
1483           assert(
1484               !StartBI->isImplicitExit() &&
1485               "Expected start barrier to be other than a kernel exit barrier");
1486           assert(
1487               !EndBI->isImplicitEntry() &&
1488               "Expected end barrier to be other than a kernel entry barrier");
1489           // If StarBI instructions is null then this the implicit
1490           // kernel entry barrier, so iterate from the first instruction in the
1491           // entry block.
1492           Instruction *I = (StartBI->isImplicitEntry())
1493                                ? &Kernel->getEntryBlock().front()
1494                                : StartBI->getInstruction()->getNextNode();
1495           assert(I && "Expected non-null start instruction");
1496           Instruction *E = (EndBI->isImplicitExit())
1497                                ? I->getParent()->getTerminator()
1498                                : EndBI->getInstruction();
1499           assert(E && "Expected non-null end instruction");
1500 
1501           for (; I != E; I = I->getNextNode()) {
1502             if (!I->mayHaveSideEffects() && !I->mayReadFromMemory())
1503               continue;
1504 
1505             auto IsPotentiallyAffectedByBarrier =
1506                 [](Optional<MemoryLocation> Loc) {
1507                   const Value *Obj = (Loc && Loc->Ptr)
1508                                          ? getUnderlyingObject(Loc->Ptr)
1509                                          : nullptr;
1510                   if (!Obj) {
1511                     LLVM_DEBUG(
1512                         dbgs()
1513                         << "Access to unknown location requires barriers\n");
1514                     return true;
1515                   }
1516                   if (isa<UndefValue>(Obj))
1517                     return false;
1518                   if (isa<AllocaInst>(Obj))
1519                     return false;
1520                   if (auto *GV = dyn_cast<GlobalVariable>(Obj)) {
1521                     if (GV->isConstant())
1522                       return false;
1523                     if (GV->isThreadLocal())
1524                       return false;
1525                     if (GV->getAddressSpace() == (int)AddressSpace::Local)
1526                       return false;
1527                     if (GV->getAddressSpace() == (int)AddressSpace::Constant)
1528                       return false;
1529                   }
1530                   LLVM_DEBUG(dbgs() << "Access to '" << *Obj
1531                                     << "' requires barriers\n");
1532                   return true;
1533                 };
1534 
1535             if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
1536               Optional<MemoryLocation> Loc = MemoryLocation::getForDest(MI);
1537               if (IsPotentiallyAffectedByBarrier(Loc))
1538                 return false;
1539               if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I)) {
1540                 Optional<MemoryLocation> Loc =
1541                     MemoryLocation::getForSource(MTI);
1542                 if (IsPotentiallyAffectedByBarrier(Loc))
1543                   return false;
1544               }
1545               continue;
1546             }
1547 
1548             if (auto *LI = dyn_cast<LoadInst>(I))
1549               if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1550                 continue;
1551 
1552             Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
1553             if (IsPotentiallyAffectedByBarrier(Loc))
1554               return false;
1555           }
1556 
1557           return true;
1558         };
1559 
1560         // Iterate barrier pairs and remove an explicit barrier if analysis
1561         // deems it removeable.
1562         for (auto *It = BarriersInBlock.begin(),
1563                   *End = BarriersInBlock.end() - 1;
1564              It != End; ++It) {
1565 
1566           BarrierInfo *StartBI = It;
1567           BarrierInfo *EndBI = (It + 1);
1568 
1569           // Cannot remove when both are implicit barriers, continue.
1570           if (StartBI->isImplicit() && EndBI->isImplicit())
1571             continue;
1572 
1573           if (!IsBarrierRemoveable(StartBI, EndBI))
1574             continue;
1575 
1576           assert(!(StartBI->isImplicit() && EndBI->isImplicit()) &&
1577                  "Expected at least one explicit barrier to remove.");
1578 
1579           // Remove an explicit barrier, check first, then second.
1580           if (!StartBI->isImplicit()) {
1581             LLVM_DEBUG(dbgs() << "Remove start barrier "
1582                               << *StartBI->getInstruction() << "\n");
1583             BarriersToBeDeleted.insert(StartBI->getInstruction());
1584           } else {
1585             LLVM_DEBUG(dbgs() << "Remove end barrier "
1586                               << *EndBI->getInstruction() << "\n");
1587             BarriersToBeDeleted.insert(EndBI->getInstruction());
1588           }
1589         }
1590 
1591         if (BarriersToBeDeleted.empty())
1592           continue;
1593 
1594         Changed = true;
1595         for (Instruction *I : BarriersToBeDeleted) {
1596           ++NumBarriersEliminated;
1597           auto Remark = [&](OptimizationRemark OR) {
1598             return OR << "Redundant barrier eliminated.";
1599           };
1600 
1601           if (EnableVerboseRemarks)
1602             emitRemark<OptimizationRemark>(I, "OMP190", Remark);
1603           I->eraseFromParent();
1604         }
1605       }
1606     }
1607 
1608     return Changed;
1609   }
1610 
1611   void analysisGlobalization() {
1612     auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
1613 
1614     auto CheckGlobalization = [&](Use &U, Function &Decl) {
1615       if (CallInst *CI = getCallIfRegularCall(U, &RFI)) {
1616         auto Remark = [&](OptimizationRemarkMissed ORM) {
1617           return ORM
1618                  << "Found thread data sharing on the GPU. "
1619                  << "Expect degraded performance due to data globalization.";
1620         };
1621         emitRemark<OptimizationRemarkMissed>(CI, "OMP112", Remark);
1622       }
1623 
1624       return false;
1625     };
1626 
1627     RFI.foreachUse(SCC, CheckGlobalization);
1628   }
1629 
1630   /// Maps the values stored in the offload arrays passed as arguments to
1631   /// \p RuntimeCall into the offload arrays in \p OAs.
1632   bool getValuesInOffloadArrays(CallInst &RuntimeCall,
1633                                 MutableArrayRef<OffloadArray> OAs) {
1634     assert(OAs.size() == 3 && "Need space for three offload arrays!");
1635 
1636     // A runtime call that involves memory offloading looks something like:
1637     // call void @__tgt_target_data_begin_mapper(arg0, arg1,
1638     //   i8** %offload_baseptrs, i8** %offload_ptrs, i64* %offload_sizes,
1639     // ...)
1640     // So, the idea is to access the allocas that allocate space for these
1641     // offload arrays, offload_baseptrs, offload_ptrs, offload_sizes.
1642     // Therefore:
1643     // i8** %offload_baseptrs.
1644     Value *BasePtrsArg =
1645         RuntimeCall.getArgOperand(OffloadArray::BasePtrsArgNum);
1646     // i8** %offload_ptrs.
1647     Value *PtrsArg = RuntimeCall.getArgOperand(OffloadArray::PtrsArgNum);
1648     // i8** %offload_sizes.
1649     Value *SizesArg = RuntimeCall.getArgOperand(OffloadArray::SizesArgNum);
1650 
1651     // Get values stored in **offload_baseptrs.
1652     auto *V = getUnderlyingObject(BasePtrsArg);
1653     if (!isa<AllocaInst>(V))
1654       return false;
1655     auto *BasePtrsArray = cast<AllocaInst>(V);
1656     if (!OAs[0].initialize(*BasePtrsArray, RuntimeCall))
1657       return false;
1658 
1659     // Get values stored in **offload_baseptrs.
1660     V = getUnderlyingObject(PtrsArg);
1661     if (!isa<AllocaInst>(V))
1662       return false;
1663     auto *PtrsArray = cast<AllocaInst>(V);
1664     if (!OAs[1].initialize(*PtrsArray, RuntimeCall))
1665       return false;
1666 
1667     // Get values stored in **offload_sizes.
1668     V = getUnderlyingObject(SizesArg);
1669     // If it's a [constant] global array don't analyze it.
1670     if (isa<GlobalValue>(V))
1671       return isa<Constant>(V);
1672     if (!isa<AllocaInst>(V))
1673       return false;
1674 
1675     auto *SizesArray = cast<AllocaInst>(V);
1676     if (!OAs[2].initialize(*SizesArray, RuntimeCall))
1677       return false;
1678 
1679     return true;
1680   }
1681 
1682   /// Prints the values in the OffloadArrays \p OAs using LLVM_DEBUG.
1683   /// For now this is a way to test that the function getValuesInOffloadArrays
1684   /// is working properly.
1685   /// TODO: Move this to a unittest when unittests are available for OpenMPOpt.
1686   void dumpValuesInOffloadArrays(ArrayRef<OffloadArray> OAs) {
1687     assert(OAs.size() == 3 && "There are three offload arrays to debug!");
1688 
1689     LLVM_DEBUG(dbgs() << TAG << " Successfully got offload values:\n");
1690     std::string ValuesStr;
1691     raw_string_ostream Printer(ValuesStr);
1692     std::string Separator = " --- ";
1693 
1694     for (auto *BP : OAs[0].StoredValues) {
1695       BP->print(Printer);
1696       Printer << Separator;
1697     }
1698     LLVM_DEBUG(dbgs() << "\t\toffload_baseptrs: " << Printer.str() << "\n");
1699     ValuesStr.clear();
1700 
1701     for (auto *P : OAs[1].StoredValues) {
1702       P->print(Printer);
1703       Printer << Separator;
1704     }
1705     LLVM_DEBUG(dbgs() << "\t\toffload_ptrs: " << Printer.str() << "\n");
1706     ValuesStr.clear();
1707 
1708     for (auto *S : OAs[2].StoredValues) {
1709       S->print(Printer);
1710       Printer << Separator;
1711     }
1712     LLVM_DEBUG(dbgs() << "\t\toffload_sizes: " << Printer.str() << "\n");
1713   }
1714 
1715   /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be
1716   /// moved. Returns nullptr if the movement is not possible, or not worth it.
1717   Instruction *canBeMovedDownwards(CallInst &RuntimeCall) {
1718     // FIXME: This traverses only the BasicBlock where RuntimeCall is.
1719     //  Make it traverse the CFG.
1720 
1721     Instruction *CurrentI = &RuntimeCall;
1722     bool IsWorthIt = false;
1723     while ((CurrentI = CurrentI->getNextNode())) {
1724 
1725       // TODO: Once we detect the regions to be offloaded we should use the
1726       //  alias analysis manager to check if CurrentI may modify one of
1727       //  the offloaded regions.
1728       if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) {
1729         if (IsWorthIt)
1730           return CurrentI;
1731 
1732         return nullptr;
1733       }
1734 
1735       // FIXME: For now if we move it over anything without side effect
1736       //  is worth it.
1737       IsWorthIt = true;
1738     }
1739 
1740     // Return end of BasicBlock.
1741     return RuntimeCall.getParent()->getTerminator();
1742   }
1743 
1744   /// Splits \p RuntimeCall into its "issue" and "wait" counterparts.
1745   bool splitTargetDataBeginRTC(CallInst &RuntimeCall,
1746                                Instruction &WaitMovementPoint) {
1747     // Create stack allocated handle (__tgt_async_info) at the beginning of the
1748     // function. Used for storing information of the async transfer, allowing to
1749     // wait on it later.
1750     auto &IRBuilder = OMPInfoCache.OMPBuilder;
1751     auto *F = RuntimeCall.getCaller();
1752     Instruction *FirstInst = &(F->getEntryBlock().front());
1753     AllocaInst *Handle = new AllocaInst(
1754         IRBuilder.AsyncInfo, F->getAddressSpace(), "handle", FirstInst);
1755 
1756     // Add "issue" runtime call declaration:
1757     // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32,
1758     //   i8**, i8**, i64*, i64*)
1759     FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction(
1760         M, OMPRTL___tgt_target_data_begin_mapper_issue);
1761 
1762     // Change RuntimeCall call site for its asynchronous version.
1763     SmallVector<Value *, 16> Args;
1764     for (auto &Arg : RuntimeCall.args())
1765       Args.push_back(Arg.get());
1766     Args.push_back(Handle);
1767 
1768     CallInst *IssueCallsite =
1769         CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall);
1770     OMPInfoCache.setCallingConvention(IssueDecl, IssueCallsite);
1771     RuntimeCall.eraseFromParent();
1772 
1773     // Add "wait" runtime call declaration:
1774     // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info)
1775     FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction(
1776         M, OMPRTL___tgt_target_data_begin_mapper_wait);
1777 
1778     Value *WaitParams[2] = {
1779         IssueCallsite->getArgOperand(
1780             OffloadArray::DeviceIDArgNum), // device_id.
1781         Handle                             // handle to wait on.
1782     };
1783     CallInst *WaitCallsite = CallInst::Create(
1784         WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint);
1785     OMPInfoCache.setCallingConvention(WaitDecl, WaitCallsite);
1786 
1787     return true;
1788   }
1789 
1790   static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent,
1791                                     bool GlobalOnly, bool &SingleChoice) {
1792     if (CurrentIdent == NextIdent)
1793       return CurrentIdent;
1794 
1795     // TODO: Figure out how to actually combine multiple debug locations. For
1796     //       now we just keep an existing one if there is a single choice.
1797     if (!GlobalOnly || isa<GlobalValue>(NextIdent)) {
1798       SingleChoice = !CurrentIdent;
1799       return NextIdent;
1800     }
1801     return nullptr;
1802   }
1803 
1804   /// Return an `struct ident_t*` value that represents the ones used in the
1805   /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not
1806   /// return a local `struct ident_t*`. For now, if we cannot find a suitable
1807   /// return value we create one from scratch. We also do not yet combine
1808   /// information, e.g., the source locations, see combinedIdentStruct.
1809   Value *
1810   getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI,
1811                                  Function &F, bool GlobalOnly) {
1812     bool SingleChoice = true;
1813     Value *Ident = nullptr;
1814     auto CombineIdentStruct = [&](Use &U, Function &Caller) {
1815       CallInst *CI = getCallIfRegularCall(U, &RFI);
1816       if (!CI || &F != &Caller)
1817         return false;
1818       Ident = combinedIdentStruct(Ident, CI->getArgOperand(0),
1819                                   /* GlobalOnly */ true, SingleChoice);
1820       return false;
1821     };
1822     RFI.foreachUse(SCC, CombineIdentStruct);
1823 
1824     if (!Ident || !SingleChoice) {
1825       // The IRBuilder uses the insertion block to get to the module, this is
1826       // unfortunate but we work around it for now.
1827       if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock())
1828         OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy(
1829             &F.getEntryBlock(), F.getEntryBlock().begin()));
1830       // Create a fallback location if non was found.
1831       // TODO: Use the debug locations of the calls instead.
1832       uint32_t SrcLocStrSize;
1833       Constant *Loc =
1834           OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
1835       Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc, SrcLocStrSize);
1836     }
1837     return Ident;
1838   }
1839 
1840   /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or
1841   /// \p ReplVal if given.
1842   bool deduplicateRuntimeCalls(Function &F,
1843                                OMPInformationCache::RuntimeFunctionInfo &RFI,
1844                                Value *ReplVal = nullptr) {
1845     auto *UV = RFI.getUseVector(F);
1846     if (!UV || UV->size() + (ReplVal != nullptr) < 2)
1847       return false;
1848 
1849     LLVM_DEBUG(
1850         dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name
1851                << (ReplVal ? " with an existing value\n" : "\n") << "\n");
1852 
1853     assert((!ReplVal || (isa<Argument>(ReplVal) &&
1854                          cast<Argument>(ReplVal)->getParent() == &F)) &&
1855            "Unexpected replacement value!");
1856 
1857     // TODO: Use dominance to find a good position instead.
1858     auto CanBeMoved = [this](CallBase &CB) {
1859       unsigned NumArgs = CB.arg_size();
1860       if (NumArgs == 0)
1861         return true;
1862       if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr)
1863         return false;
1864       for (unsigned U = 1; U < NumArgs; ++U)
1865         if (isa<Instruction>(CB.getArgOperand(U)))
1866           return false;
1867       return true;
1868     };
1869 
1870     if (!ReplVal) {
1871       for (Use *U : *UV)
1872         if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) {
1873           if (!CanBeMoved(*CI))
1874             continue;
1875 
1876           // If the function is a kernel, dedup will move
1877           // the runtime call right after the kernel init callsite. Otherwise,
1878           // it will move it to the beginning of the caller function.
1879           if (isKernel(F)) {
1880             auto &KernelInitRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init];
1881             auto *KernelInitUV = KernelInitRFI.getUseVector(F);
1882 
1883             if (KernelInitUV->empty())
1884               continue;
1885 
1886             assert(KernelInitUV->size() == 1 &&
1887                    "Expected a single __kmpc_target_init in kernel\n");
1888 
1889             CallInst *KernelInitCI =
1890                 getCallIfRegularCall(*KernelInitUV->front(), &KernelInitRFI);
1891             assert(KernelInitCI &&
1892                    "Expected a call to __kmpc_target_init in kernel\n");
1893 
1894             CI->moveAfter(KernelInitCI);
1895           } else
1896             CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt());
1897           ReplVal = CI;
1898           break;
1899         }
1900       if (!ReplVal)
1901         return false;
1902     }
1903 
1904     // If we use a call as a replacement value we need to make sure the ident is
1905     // valid at the new location. For now we just pick a global one, either
1906     // existing and used by one of the calls, or created from scratch.
1907     if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) {
1908       if (!CI->arg_empty() &&
1909           CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) {
1910         Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F,
1911                                                       /* GlobalOnly */ true);
1912         CI->setArgOperand(0, Ident);
1913       }
1914     }
1915 
1916     bool Changed = false;
1917     auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) {
1918       CallInst *CI = getCallIfRegularCall(U, &RFI);
1919       if (!CI || CI == ReplVal || &F != &Caller)
1920         return false;
1921       assert(CI->getCaller() == &F && "Unexpected call!");
1922 
1923       auto Remark = [&](OptimizationRemark OR) {
1924         return OR << "OpenMP runtime call "
1925                   << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated.";
1926       };
1927       if (CI->getDebugLoc())
1928         emitRemark<OptimizationRemark>(CI, "OMP170", Remark);
1929       else
1930         emitRemark<OptimizationRemark>(&F, "OMP170", Remark);
1931 
1932       CGUpdater.removeCallSite(*CI);
1933       CI->replaceAllUsesWith(ReplVal);
1934       CI->eraseFromParent();
1935       ++NumOpenMPRuntimeCallsDeduplicated;
1936       Changed = true;
1937       return true;
1938     };
1939     RFI.foreachUse(SCC, ReplaceAndDeleteCB);
1940 
1941     return Changed;
1942   }
1943 
1944   /// Collect arguments that represent the global thread id in \p GTIdArgs.
1945   void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> &GTIdArgs) {
1946     // TODO: Below we basically perform a fixpoint iteration with a pessimistic
1947     //       initialization. We could define an AbstractAttribute instead and
1948     //       run the Attributor here once it can be run as an SCC pass.
1949 
1950     // Helper to check the argument \p ArgNo at all call sites of \p F for
1951     // a GTId.
1952     auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) {
1953       if (!F.hasLocalLinkage())
1954         return false;
1955       for (Use &U : F.uses()) {
1956         if (CallInst *CI = getCallIfRegularCall(U)) {
1957           Value *ArgOp = CI->getArgOperand(ArgNo);
1958           if (CI == &RefCI || GTIdArgs.count(ArgOp) ||
1959               getCallIfRegularCall(
1960                   *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]))
1961             continue;
1962         }
1963         return false;
1964       }
1965       return true;
1966     };
1967 
1968     // Helper to identify uses of a GTId as GTId arguments.
1969     auto AddUserArgs = [&](Value &GTId) {
1970       for (Use &U : GTId.uses())
1971         if (CallInst *CI = dyn_cast<CallInst>(U.getUser()))
1972           if (CI->isArgOperand(&U))
1973             if (Function *Callee = CI->getCalledFunction())
1974               if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI))
1975                 GTIdArgs.insert(Callee->getArg(U.getOperandNo()));
1976     };
1977 
1978     // The argument users of __kmpc_global_thread_num calls are GTIds.
1979     OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI =
1980         OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num];
1981 
1982     GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) {
1983       if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI))
1984         AddUserArgs(*CI);
1985       return false;
1986     });
1987 
1988     // Transitively search for more arguments by looking at the users of the
1989     // ones we know already. During the search the GTIdArgs vector is extended
1990     // so we cannot cache the size nor can we use a range based for.
1991     for (unsigned U = 0; U < GTIdArgs.size(); ++U)
1992       AddUserArgs(*GTIdArgs[U]);
1993   }
1994 
1995   /// Kernel (=GPU) optimizations and utility functions
1996   ///
1997   ///{{
1998 
1999   /// Check if \p F is a kernel, hence entry point for target offloading.
2000   bool isKernel(Function &F) { return OMPInfoCache.Kernels.count(&F); }
2001 
2002   /// Cache to remember the unique kernel for a function.
2003   DenseMap<Function *, Optional<Kernel>> UniqueKernelMap;
2004 
2005   /// Find the unique kernel that will execute \p F, if any.
2006   Kernel getUniqueKernelFor(Function &F);
2007 
2008   /// Find the unique kernel that will execute \p I, if any.
2009   Kernel getUniqueKernelFor(Instruction &I) {
2010     return getUniqueKernelFor(*I.getFunction());
2011   }
2012 
2013   /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in
2014   /// the cases we can avoid taking the address of a function.
2015   bool rewriteDeviceCodeStateMachine();
2016 
2017   ///
2018   ///}}
2019 
2020   /// Emit a remark generically
2021   ///
2022   /// This template function can be used to generically emit a remark. The
2023   /// RemarkKind should be one of the following:
2024   ///   - OptimizationRemark to indicate a successful optimization attempt
2025   ///   - OptimizationRemarkMissed to report a failed optimization attempt
2026   ///   - OptimizationRemarkAnalysis to provide additional information about an
2027   ///     optimization attempt
2028   ///
2029   /// The remark is built using a callback function provided by the caller that
2030   /// takes a RemarkKind as input and returns a RemarkKind.
2031   template <typename RemarkKind, typename RemarkCallBack>
2032   void emitRemark(Instruction *I, StringRef RemarkName,
2033                   RemarkCallBack &&RemarkCB) const {
2034     Function *F = I->getParent()->getParent();
2035     auto &ORE = OREGetter(F);
2036 
2037     if (RemarkName.startswith("OMP"))
2038       ORE.emit([&]() {
2039         return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I))
2040                << " [" << RemarkName << "]";
2041       });
2042     else
2043       ORE.emit(
2044           [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)); });
2045   }
2046 
2047   /// Emit a remark on a function.
2048   template <typename RemarkKind, typename RemarkCallBack>
2049   void emitRemark(Function *F, StringRef RemarkName,
2050                   RemarkCallBack &&RemarkCB) const {
2051     auto &ORE = OREGetter(F);
2052 
2053     if (RemarkName.startswith("OMP"))
2054       ORE.emit([&]() {
2055         return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F))
2056                << " [" << RemarkName << "]";
2057       });
2058     else
2059       ORE.emit(
2060           [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)); });
2061   }
2062 
2063   /// RAII struct to temporarily change an RTL function's linkage to external.
2064   /// This prevents it from being mistakenly removed by other optimizations.
2065   struct ExternalizationRAII {
2066     ExternalizationRAII(OMPInformationCache &OMPInfoCache,
2067                         RuntimeFunction RFKind)
2068         : Declaration(OMPInfoCache.RFIs[RFKind].Declaration) {
2069       if (!Declaration)
2070         return;
2071 
2072       LinkageType = Declaration->getLinkage();
2073       Declaration->setLinkage(GlobalValue::ExternalLinkage);
2074     }
2075 
2076     ~ExternalizationRAII() {
2077       if (!Declaration)
2078         return;
2079 
2080       Declaration->setLinkage(LinkageType);
2081     }
2082 
2083     Function *Declaration;
2084     GlobalValue::LinkageTypes LinkageType;
2085   };
2086 
2087   /// The underlying module.
2088   Module &M;
2089 
2090   /// The SCC we are operating on.
2091   SmallVectorImpl<Function *> &SCC;
2092 
2093   /// Callback to update the call graph, the first argument is a removed call,
2094   /// the second an optional replacement call.
2095   CallGraphUpdater &CGUpdater;
2096 
2097   /// Callback to get an OptimizationRemarkEmitter from a Function *
2098   OptimizationRemarkGetter OREGetter;
2099 
2100   /// OpenMP-specific information cache. Also Used for Attributor runs.
2101   OMPInformationCache &OMPInfoCache;
2102 
2103   /// Attributor instance.
2104   Attributor &A;
2105 
2106   /// Helper function to run Attributor on SCC.
2107   bool runAttributor(bool IsModulePass) {
2108     if (SCC.empty())
2109       return false;
2110 
2111     // Temporarily make these function have external linkage so the Attributor
2112     // doesn't remove them when we try to look them up later.
2113     ExternalizationRAII Parallel(OMPInfoCache, OMPRTL___kmpc_kernel_parallel);
2114     ExternalizationRAII EndParallel(OMPInfoCache,
2115                                     OMPRTL___kmpc_kernel_end_parallel);
2116     ExternalizationRAII BarrierSPMD(OMPInfoCache,
2117                                     OMPRTL___kmpc_barrier_simple_spmd);
2118     ExternalizationRAII BarrierGeneric(OMPInfoCache,
2119                                        OMPRTL___kmpc_barrier_simple_generic);
2120     ExternalizationRAII ThreadId(OMPInfoCache,
2121                                  OMPRTL___kmpc_get_hardware_thread_id_in_block);
2122     ExternalizationRAII WarpSize(OMPInfoCache, OMPRTL___kmpc_get_warp_size);
2123 
2124     registerAAs(IsModulePass);
2125 
2126     ChangeStatus Changed = A.run();
2127 
2128     LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size()
2129                       << " functions, result: " << Changed << ".\n");
2130 
2131     return Changed == ChangeStatus::CHANGED;
2132   }
2133 
2134   void registerFoldRuntimeCall(RuntimeFunction RF);
2135 
2136   /// Populate the Attributor with abstract attribute opportunities in the
2137   /// function.
2138   void registerAAs(bool IsModulePass);
2139 };
2140 
2141 Kernel OpenMPOpt::getUniqueKernelFor(Function &F) {
2142   if (!OMPInfoCache.ModuleSlice.count(&F))
2143     return nullptr;
2144 
2145   // Use a scope to keep the lifetime of the CachedKernel short.
2146   {
2147     Optional<Kernel> &CachedKernel = UniqueKernelMap[&F];
2148     if (CachedKernel)
2149       return *CachedKernel;
2150 
2151     // TODO: We should use an AA to create an (optimistic and callback
2152     //       call-aware) call graph. For now we stick to simple patterns that
2153     //       are less powerful, basically the worst fixpoint.
2154     if (isKernel(F)) {
2155       CachedKernel = Kernel(&F);
2156       return *CachedKernel;
2157     }
2158 
2159     CachedKernel = nullptr;
2160     if (!F.hasLocalLinkage()) {
2161 
2162       // See https://openmp.llvm.org/remarks/OptimizationRemarks.html
2163       auto Remark = [&](OptimizationRemarkAnalysis ORA) {
2164         return ORA << "Potentially unknown OpenMP target region caller.";
2165       };
2166       emitRemark<OptimizationRemarkAnalysis>(&F, "OMP100", Remark);
2167 
2168       return nullptr;
2169     }
2170   }
2171 
2172   auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel {
2173     if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) {
2174       // Allow use in equality comparisons.
2175       if (Cmp->isEquality())
2176         return getUniqueKernelFor(*Cmp);
2177       return nullptr;
2178     }
2179     if (auto *CB = dyn_cast<CallBase>(U.getUser())) {
2180       // Allow direct calls.
2181       if (CB->isCallee(&U))
2182         return getUniqueKernelFor(*CB);
2183 
2184       OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI =
2185           OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51];
2186       // Allow the use in __kmpc_parallel_51 calls.
2187       if (OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI))
2188         return getUniqueKernelFor(*CB);
2189       return nullptr;
2190     }
2191     // Disallow every other use.
2192     return nullptr;
2193   };
2194 
2195   // TODO: In the future we want to track more than just a unique kernel.
2196   SmallPtrSet<Kernel, 2> PotentialKernels;
2197   OMPInformationCache::foreachUse(F, [&](const Use &U) {
2198     PotentialKernels.insert(GetUniqueKernelForUse(U));
2199   });
2200 
2201   Kernel K = nullptr;
2202   if (PotentialKernels.size() == 1)
2203     K = *PotentialKernels.begin();
2204 
2205   // Cache the result.
2206   UniqueKernelMap[&F] = K;
2207 
2208   return K;
2209 }
2210 
2211 bool OpenMPOpt::rewriteDeviceCodeStateMachine() {
2212   OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI =
2213       OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51];
2214 
2215   bool Changed = false;
2216   if (!KernelParallelRFI)
2217     return Changed;
2218 
2219   // If we have disabled state machine changes, exit
2220   if (DisableOpenMPOptStateMachineRewrite)
2221     return Changed;
2222 
2223   for (Function *F : SCC) {
2224 
2225     // Check if the function is a use in a __kmpc_parallel_51 call at
2226     // all.
2227     bool UnknownUse = false;
2228     bool KernelParallelUse = false;
2229     unsigned NumDirectCalls = 0;
2230 
2231     SmallVector<Use *, 2> ToBeReplacedStateMachineUses;
2232     OMPInformationCache::foreachUse(*F, [&](Use &U) {
2233       if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2234         if (CB->isCallee(&U)) {
2235           ++NumDirectCalls;
2236           return;
2237         }
2238 
2239       if (isa<ICmpInst>(U.getUser())) {
2240         ToBeReplacedStateMachineUses.push_back(&U);
2241         return;
2242       }
2243 
2244       // Find wrapper functions that represent parallel kernels.
2245       CallInst *CI =
2246           OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI);
2247       const unsigned int WrapperFunctionArgNo = 6;
2248       if (!KernelParallelUse && CI &&
2249           CI->getArgOperandNo(&U) == WrapperFunctionArgNo) {
2250         KernelParallelUse = true;
2251         ToBeReplacedStateMachineUses.push_back(&U);
2252         return;
2253       }
2254       UnknownUse = true;
2255     });
2256 
2257     // Do not emit a remark if we haven't seen a __kmpc_parallel_51
2258     // use.
2259     if (!KernelParallelUse)
2260       continue;
2261 
2262     // If this ever hits, we should investigate.
2263     // TODO: Checking the number of uses is not a necessary restriction and
2264     // should be lifted.
2265     if (UnknownUse || NumDirectCalls != 1 ||
2266         ToBeReplacedStateMachineUses.size() > 2) {
2267       auto Remark = [&](OptimizationRemarkAnalysis ORA) {
2268         return ORA << "Parallel region is used in "
2269                    << (UnknownUse ? "unknown" : "unexpected")
2270                    << " ways. Will not attempt to rewrite the state machine.";
2271       };
2272       emitRemark<OptimizationRemarkAnalysis>(F, "OMP101", Remark);
2273       continue;
2274     }
2275 
2276     // Even if we have __kmpc_parallel_51 calls, we (for now) give
2277     // up if the function is not called from a unique kernel.
2278     Kernel K = getUniqueKernelFor(*F);
2279     if (!K) {
2280       auto Remark = [&](OptimizationRemarkAnalysis ORA) {
2281         return ORA << "Parallel region is not called from a unique kernel. "
2282                       "Will not attempt to rewrite the state machine.";
2283       };
2284       emitRemark<OptimizationRemarkAnalysis>(F, "OMP102", Remark);
2285       continue;
2286     }
2287 
2288     // We now know F is a parallel body function called only from the kernel K.
2289     // We also identified the state machine uses in which we replace the
2290     // function pointer by a new global symbol for identification purposes. This
2291     // ensures only direct calls to the function are left.
2292 
2293     Module &M = *F->getParent();
2294     Type *Int8Ty = Type::getInt8Ty(M.getContext());
2295 
2296     auto *ID = new GlobalVariable(
2297         M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage,
2298         UndefValue::get(Int8Ty), F->getName() + ".ID");
2299 
2300     for (Use *U : ToBeReplacedStateMachineUses)
2301       U->set(ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2302           ID, U->get()->getType()));
2303 
2304     ++NumOpenMPParallelRegionsReplacedInGPUStateMachine;
2305 
2306     Changed = true;
2307   }
2308 
2309   return Changed;
2310 }
2311 
2312 /// Abstract Attribute for tracking ICV values.
2313 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> {
2314   using Base = StateWrapper<BooleanState, AbstractAttribute>;
2315   AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
2316 
2317   void initialize(Attributor &A) override {
2318     Function *F = getAnchorScope();
2319     if (!F || !A.isFunctionIPOAmendable(*F))
2320       indicatePessimisticFixpoint();
2321   }
2322 
2323   /// Returns true if value is assumed to be tracked.
2324   bool isAssumedTracked() const { return getAssumed(); }
2325 
2326   /// Returns true if value is known to be tracked.
2327   bool isKnownTracked() const { return getAssumed(); }
2328 
2329   /// Create an abstract attribute biew for the position \p IRP.
2330   static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A);
2331 
2332   /// Return the value with which \p I can be replaced for specific \p ICV.
2333   virtual Optional<Value *> getReplacementValue(InternalControlVar ICV,
2334                                                 const Instruction *I,
2335                                                 Attributor &A) const {
2336     return None;
2337   }
2338 
2339   /// Return an assumed unique ICV value if a single candidate is found. If
2340   /// there cannot be one, return a nullptr. If it is not clear yet, return the
2341   /// Optional::NoneType.
2342   virtual Optional<Value *>
2343   getUniqueReplacementValue(InternalControlVar ICV) const = 0;
2344 
2345   // Currently only nthreads is being tracked.
2346   // this array will only grow with time.
2347   InternalControlVar TrackableICVs[1] = {ICV_nthreads};
2348 
2349   /// See AbstractAttribute::getName()
2350   const std::string getName() const override { return "AAICVTracker"; }
2351 
2352   /// See AbstractAttribute::getIdAddr()
2353   const char *getIdAddr() const override { return &ID; }
2354 
2355   /// This function should return true if the type of the \p AA is AAICVTracker
2356   static bool classof(const AbstractAttribute *AA) {
2357     return (AA->getIdAddr() == &ID);
2358   }
2359 
2360   static const char ID;
2361 };
2362 
2363 struct AAICVTrackerFunction : public AAICVTracker {
2364   AAICVTrackerFunction(const IRPosition &IRP, Attributor &A)
2365       : AAICVTracker(IRP, A) {}
2366 
2367   // FIXME: come up with better string.
2368   const std::string getAsStr() const override { return "ICVTrackerFunction"; }
2369 
2370   // FIXME: come up with some stats.
2371   void trackStatistics() const override {}
2372 
2373   /// We don't manifest anything for this AA.
2374   ChangeStatus manifest(Attributor &A) override {
2375     return ChangeStatus::UNCHANGED;
2376   }
2377 
2378   // Map of ICV to their values at specific program point.
2379   EnumeratedArray<DenseMap<Instruction *, Value *>, InternalControlVar,
2380                   InternalControlVar::ICV___last>
2381       ICVReplacementValuesMap;
2382 
2383   ChangeStatus updateImpl(Attributor &A) override {
2384     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2385 
2386     Function *F = getAnchorScope();
2387 
2388     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2389 
2390     for (InternalControlVar ICV : TrackableICVs) {
2391       auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter];
2392 
2393       auto &ValuesMap = ICVReplacementValuesMap[ICV];
2394       auto TrackValues = [&](Use &U, Function &) {
2395         CallInst *CI = OpenMPOpt::getCallIfRegularCall(U);
2396         if (!CI)
2397           return false;
2398 
2399         // FIXME: handle setters with more that 1 arguments.
2400         /// Track new value.
2401         if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second)
2402           HasChanged = ChangeStatus::CHANGED;
2403 
2404         return false;
2405       };
2406 
2407       auto CallCheck = [&](Instruction &I) {
2408         Optional<Value *> ReplVal = getValueForCall(A, I, ICV);
2409         if (ReplVal.hasValue() &&
2410             ValuesMap.insert(std::make_pair(&I, *ReplVal)).second)
2411           HasChanged = ChangeStatus::CHANGED;
2412 
2413         return true;
2414       };
2415 
2416       // Track all changes of an ICV.
2417       SetterRFI.foreachUse(TrackValues, F);
2418 
2419       bool UsedAssumedInformation = false;
2420       A.checkForAllInstructions(CallCheck, *this, {Instruction::Call},
2421                                 UsedAssumedInformation,
2422                                 /* CheckBBLivenessOnly */ true);
2423 
2424       /// TODO: Figure out a way to avoid adding entry in
2425       /// ICVReplacementValuesMap
2426       Instruction *Entry = &F->getEntryBlock().front();
2427       if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry))
2428         ValuesMap.insert(std::make_pair(Entry, nullptr));
2429     }
2430 
2431     return HasChanged;
2432   }
2433 
2434   /// Helper to check if \p I is a call and get the value for it if it is
2435   /// unique.
2436   Optional<Value *> getValueForCall(Attributor &A, const Instruction &I,
2437                                     InternalControlVar &ICV) const {
2438 
2439     const auto *CB = dyn_cast<CallBase>(&I);
2440     if (!CB || CB->hasFnAttr("no_openmp") ||
2441         CB->hasFnAttr("no_openmp_routines"))
2442       return None;
2443 
2444     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2445     auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter];
2446     auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter];
2447     Function *CalledFunction = CB->getCalledFunction();
2448 
2449     // Indirect call, assume ICV changes.
2450     if (CalledFunction == nullptr)
2451       return nullptr;
2452     if (CalledFunction == GetterRFI.Declaration)
2453       return None;
2454     if (CalledFunction == SetterRFI.Declaration) {
2455       if (ICVReplacementValuesMap[ICV].count(&I))
2456         return ICVReplacementValuesMap[ICV].lookup(&I);
2457 
2458       return nullptr;
2459     }
2460 
2461     // Since we don't know, assume it changes the ICV.
2462     if (CalledFunction->isDeclaration())
2463       return nullptr;
2464 
2465     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
2466         *this, IRPosition::callsite_returned(*CB), DepClassTy::REQUIRED);
2467 
2468     if (ICVTrackingAA.isAssumedTracked()) {
2469       Optional<Value *> URV = ICVTrackingAA.getUniqueReplacementValue(ICV);
2470       if (!URV || (*URV && AA::isValidAtPosition(**URV, I, OMPInfoCache)))
2471         return URV;
2472     }
2473 
2474     // If we don't know, assume it changes.
2475     return nullptr;
2476   }
2477 
2478   // We don't check unique value for a function, so return None.
2479   Optional<Value *>
2480   getUniqueReplacementValue(InternalControlVar ICV) const override {
2481     return None;
2482   }
2483 
2484   /// Return the value with which \p I can be replaced for specific \p ICV.
2485   Optional<Value *> getReplacementValue(InternalControlVar ICV,
2486                                         const Instruction *I,
2487                                         Attributor &A) const override {
2488     const auto &ValuesMap = ICVReplacementValuesMap[ICV];
2489     if (ValuesMap.count(I))
2490       return ValuesMap.lookup(I);
2491 
2492     SmallVector<const Instruction *, 16> Worklist;
2493     SmallPtrSet<const Instruction *, 16> Visited;
2494     Worklist.push_back(I);
2495 
2496     Optional<Value *> ReplVal;
2497 
2498     while (!Worklist.empty()) {
2499       const Instruction *CurrInst = Worklist.pop_back_val();
2500       if (!Visited.insert(CurrInst).second)
2501         continue;
2502 
2503       const BasicBlock *CurrBB = CurrInst->getParent();
2504 
2505       // Go up and look for all potential setters/calls that might change the
2506       // ICV.
2507       while ((CurrInst = CurrInst->getPrevNode())) {
2508         if (ValuesMap.count(CurrInst)) {
2509           Optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst);
2510           // Unknown value, track new.
2511           if (!ReplVal.hasValue()) {
2512             ReplVal = NewReplVal;
2513             break;
2514           }
2515 
2516           // If we found a new value, we can't know the icv value anymore.
2517           if (NewReplVal.hasValue())
2518             if (ReplVal != NewReplVal)
2519               return nullptr;
2520 
2521           break;
2522         }
2523 
2524         Optional<Value *> NewReplVal = getValueForCall(A, *CurrInst, ICV);
2525         if (!NewReplVal.hasValue())
2526           continue;
2527 
2528         // Unknown value, track new.
2529         if (!ReplVal.hasValue()) {
2530           ReplVal = NewReplVal;
2531           break;
2532         }
2533 
2534         // if (NewReplVal.hasValue())
2535         // We found a new value, we can't know the icv value anymore.
2536         if (ReplVal != NewReplVal)
2537           return nullptr;
2538       }
2539 
2540       // If we are in the same BB and we have a value, we are done.
2541       if (CurrBB == I->getParent() && ReplVal.hasValue())
2542         return ReplVal;
2543 
2544       // Go through all predecessors and add terminators for analysis.
2545       for (const BasicBlock *Pred : predecessors(CurrBB))
2546         if (const Instruction *Terminator = Pred->getTerminator())
2547           Worklist.push_back(Terminator);
2548     }
2549 
2550     return ReplVal;
2551   }
2552 };
2553 
2554 struct AAICVTrackerFunctionReturned : AAICVTracker {
2555   AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A)
2556       : AAICVTracker(IRP, A) {}
2557 
2558   // FIXME: come up with better string.
2559   const std::string getAsStr() const override {
2560     return "ICVTrackerFunctionReturned";
2561   }
2562 
2563   // FIXME: come up with some stats.
2564   void trackStatistics() const override {}
2565 
2566   /// We don't manifest anything for this AA.
2567   ChangeStatus manifest(Attributor &A) override {
2568     return ChangeStatus::UNCHANGED;
2569   }
2570 
2571   // Map of ICV to their values at specific program point.
2572   EnumeratedArray<Optional<Value *>, InternalControlVar,
2573                   InternalControlVar::ICV___last>
2574       ICVReplacementValuesMap;
2575 
2576   /// Return the value with which \p I can be replaced for specific \p ICV.
2577   Optional<Value *>
2578   getUniqueReplacementValue(InternalControlVar ICV) const override {
2579     return ICVReplacementValuesMap[ICV];
2580   }
2581 
2582   ChangeStatus updateImpl(Attributor &A) override {
2583     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2584     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
2585         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
2586 
2587     if (!ICVTrackingAA.isAssumedTracked())
2588       return indicatePessimisticFixpoint();
2589 
2590     for (InternalControlVar ICV : TrackableICVs) {
2591       Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV];
2592       Optional<Value *> UniqueICVValue;
2593 
2594       auto CheckReturnInst = [&](Instruction &I) {
2595         Optional<Value *> NewReplVal =
2596             ICVTrackingAA.getReplacementValue(ICV, &I, A);
2597 
2598         // If we found a second ICV value there is no unique returned value.
2599         if (UniqueICVValue.hasValue() && UniqueICVValue != NewReplVal)
2600           return false;
2601 
2602         UniqueICVValue = NewReplVal;
2603 
2604         return true;
2605       };
2606 
2607       bool UsedAssumedInformation = false;
2608       if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret},
2609                                      UsedAssumedInformation,
2610                                      /* CheckBBLivenessOnly */ true))
2611         UniqueICVValue = nullptr;
2612 
2613       if (UniqueICVValue == ReplVal)
2614         continue;
2615 
2616       ReplVal = UniqueICVValue;
2617       Changed = ChangeStatus::CHANGED;
2618     }
2619 
2620     return Changed;
2621   }
2622 };
2623 
2624 struct AAICVTrackerCallSite : AAICVTracker {
2625   AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A)
2626       : AAICVTracker(IRP, A) {}
2627 
2628   void initialize(Attributor &A) override {
2629     Function *F = getAnchorScope();
2630     if (!F || !A.isFunctionIPOAmendable(*F))
2631       indicatePessimisticFixpoint();
2632 
2633     // We only initialize this AA for getters, so we need to know which ICV it
2634     // gets.
2635     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2636     for (InternalControlVar ICV : TrackableICVs) {
2637       auto ICVInfo = OMPInfoCache.ICVs[ICV];
2638       auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter];
2639       if (Getter.Declaration == getAssociatedFunction()) {
2640         AssociatedICV = ICVInfo.Kind;
2641         return;
2642       }
2643     }
2644 
2645     /// Unknown ICV.
2646     indicatePessimisticFixpoint();
2647   }
2648 
2649   ChangeStatus manifest(Attributor &A) override {
2650     if (!ReplVal.hasValue() || !ReplVal.getValue())
2651       return ChangeStatus::UNCHANGED;
2652 
2653     A.changeValueAfterManifest(*getCtxI(), **ReplVal);
2654     A.deleteAfterManifest(*getCtxI());
2655 
2656     return ChangeStatus::CHANGED;
2657   }
2658 
2659   // FIXME: come up with better string.
2660   const std::string getAsStr() const override { return "ICVTrackerCallSite"; }
2661 
2662   // FIXME: come up with some stats.
2663   void trackStatistics() const override {}
2664 
2665   InternalControlVar AssociatedICV;
2666   Optional<Value *> ReplVal;
2667 
2668   ChangeStatus updateImpl(Attributor &A) override {
2669     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
2670         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
2671 
2672     // We don't have any information, so we assume it changes the ICV.
2673     if (!ICVTrackingAA.isAssumedTracked())
2674       return indicatePessimisticFixpoint();
2675 
2676     Optional<Value *> NewReplVal =
2677         ICVTrackingAA.getReplacementValue(AssociatedICV, getCtxI(), A);
2678 
2679     if (ReplVal == NewReplVal)
2680       return ChangeStatus::UNCHANGED;
2681 
2682     ReplVal = NewReplVal;
2683     return ChangeStatus::CHANGED;
2684   }
2685 
2686   // Return the value with which associated value can be replaced for specific
2687   // \p ICV.
2688   Optional<Value *>
2689   getUniqueReplacementValue(InternalControlVar ICV) const override {
2690     return ReplVal;
2691   }
2692 };
2693 
2694 struct AAICVTrackerCallSiteReturned : AAICVTracker {
2695   AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A)
2696       : AAICVTracker(IRP, A) {}
2697 
2698   // FIXME: come up with better string.
2699   const std::string getAsStr() const override {
2700     return "ICVTrackerCallSiteReturned";
2701   }
2702 
2703   // FIXME: come up with some stats.
2704   void trackStatistics() const override {}
2705 
2706   /// We don't manifest anything for this AA.
2707   ChangeStatus manifest(Attributor &A) override {
2708     return ChangeStatus::UNCHANGED;
2709   }
2710 
2711   // Map of ICV to their values at specific program point.
2712   EnumeratedArray<Optional<Value *>, InternalControlVar,
2713                   InternalControlVar::ICV___last>
2714       ICVReplacementValuesMap;
2715 
2716   /// Return the value with which associated value can be replaced for specific
2717   /// \p ICV.
2718   Optional<Value *>
2719   getUniqueReplacementValue(InternalControlVar ICV) const override {
2720     return ICVReplacementValuesMap[ICV];
2721   }
2722 
2723   ChangeStatus updateImpl(Attributor &A) override {
2724     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2725     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
2726         *this, IRPosition::returned(*getAssociatedFunction()),
2727         DepClassTy::REQUIRED);
2728 
2729     // We don't have any information, so we assume it changes the ICV.
2730     if (!ICVTrackingAA.isAssumedTracked())
2731       return indicatePessimisticFixpoint();
2732 
2733     for (InternalControlVar ICV : TrackableICVs) {
2734       Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV];
2735       Optional<Value *> NewReplVal =
2736           ICVTrackingAA.getUniqueReplacementValue(ICV);
2737 
2738       if (ReplVal == NewReplVal)
2739         continue;
2740 
2741       ReplVal = NewReplVal;
2742       Changed = ChangeStatus::CHANGED;
2743     }
2744     return Changed;
2745   }
2746 };
2747 
2748 struct AAExecutionDomainFunction : public AAExecutionDomain {
2749   AAExecutionDomainFunction(const IRPosition &IRP, Attributor &A)
2750       : AAExecutionDomain(IRP, A) {}
2751 
2752   const std::string getAsStr() const override {
2753     return "[AAExecutionDomain] " + std::to_string(SingleThreadedBBs.size()) +
2754            "/" + std::to_string(NumBBs) + " BBs thread 0 only.";
2755   }
2756 
2757   /// See AbstractAttribute::trackStatistics().
2758   void trackStatistics() const override {}
2759 
2760   void initialize(Attributor &A) override {
2761     Function *F = getAnchorScope();
2762     for (const auto &BB : *F)
2763       SingleThreadedBBs.insert(&BB);
2764     NumBBs = SingleThreadedBBs.size();
2765   }
2766 
2767   ChangeStatus manifest(Attributor &A) override {
2768     LLVM_DEBUG({
2769       for (const BasicBlock *BB : SingleThreadedBBs)
2770         dbgs() << TAG << " Basic block @" << getAnchorScope()->getName() << " "
2771                << BB->getName() << " is executed by a single thread.\n";
2772     });
2773     return ChangeStatus::UNCHANGED;
2774   }
2775 
2776   ChangeStatus updateImpl(Attributor &A) override;
2777 
2778   /// Check if an instruction is executed by a single thread.
2779   bool isExecutedByInitialThreadOnly(const Instruction &I) const override {
2780     return isExecutedByInitialThreadOnly(*I.getParent());
2781   }
2782 
2783   bool isExecutedByInitialThreadOnly(const BasicBlock &BB) const override {
2784     return isValidState() && SingleThreadedBBs.contains(&BB);
2785   }
2786 
2787   /// Set of basic blocks that are executed by a single thread.
2788   SmallSetVector<const BasicBlock *, 16> SingleThreadedBBs;
2789 
2790   /// Total number of basic blocks in this function.
2791   long unsigned NumBBs;
2792 };
2793 
2794 ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) {
2795   Function *F = getAnchorScope();
2796   ReversePostOrderTraversal<Function *> RPOT(F);
2797   auto NumSingleThreadedBBs = SingleThreadedBBs.size();
2798 
2799   bool AllCallSitesKnown;
2800   auto PredForCallSite = [&](AbstractCallSite ACS) {
2801     const auto &ExecutionDomainAA = A.getAAFor<AAExecutionDomain>(
2802         *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2803         DepClassTy::REQUIRED);
2804     return ACS.isDirectCall() &&
2805            ExecutionDomainAA.isExecutedByInitialThreadOnly(
2806                *ACS.getInstruction());
2807   };
2808 
2809   if (!A.checkForAllCallSites(PredForCallSite, *this,
2810                               /* RequiresAllCallSites */ true,
2811                               AllCallSitesKnown))
2812     SingleThreadedBBs.remove(&F->getEntryBlock());
2813 
2814   auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2815   auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init];
2816 
2817   // Check if the edge into the successor block contains a condition that only
2818   // lets the main thread execute it.
2819   auto IsInitialThreadOnly = [&](BranchInst *Edge, BasicBlock *SuccessorBB) {
2820     if (!Edge || !Edge->isConditional())
2821       return false;
2822     if (Edge->getSuccessor(0) != SuccessorBB)
2823       return false;
2824 
2825     auto *Cmp = dyn_cast<CmpInst>(Edge->getCondition());
2826     if (!Cmp || !Cmp->isTrueWhenEqual() || !Cmp->isEquality())
2827       return false;
2828 
2829     ConstantInt *C = dyn_cast<ConstantInt>(Cmp->getOperand(1));
2830     if (!C)
2831       return false;
2832 
2833     // Match: -1 == __kmpc_target_init (for non-SPMD kernels only!)
2834     if (C->isAllOnesValue()) {
2835       auto *CB = dyn_cast<CallBase>(Cmp->getOperand(0));
2836       CB = CB ? OpenMPOpt::getCallIfRegularCall(*CB, &RFI) : nullptr;
2837       if (!CB)
2838         return false;
2839       const int InitModeArgNo = 1;
2840       auto *ModeCI = dyn_cast<ConstantInt>(CB->getOperand(InitModeArgNo));
2841       return ModeCI && (ModeCI->getSExtValue() & OMP_TGT_EXEC_MODE_GENERIC);
2842     }
2843 
2844     if (C->isZero()) {
2845       // Match: 0 == llvm.nvvm.read.ptx.sreg.tid.x()
2846       if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0)))
2847         if (II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_tid_x)
2848           return true;
2849 
2850       // Match: 0 == llvm.amdgcn.workitem.id.x()
2851       if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0)))
2852         if (II->getIntrinsicID() == Intrinsic::amdgcn_workitem_id_x)
2853           return true;
2854     }
2855 
2856     return false;
2857   };
2858 
2859   // Merge all the predecessor states into the current basic block. A basic
2860   // block is executed by a single thread if all of its predecessors are.
2861   auto MergePredecessorStates = [&](BasicBlock *BB) {
2862     if (pred_empty(BB))
2863       return SingleThreadedBBs.contains(BB);
2864 
2865     bool IsInitialThread = true;
2866     for (BasicBlock *PredBB : predecessors(BB)) {
2867       if (!IsInitialThreadOnly(dyn_cast<BranchInst>(PredBB->getTerminator()),
2868                                BB))
2869         IsInitialThread &= SingleThreadedBBs.contains(PredBB);
2870     }
2871 
2872     return IsInitialThread;
2873   };
2874 
2875   for (auto *BB : RPOT) {
2876     if (!MergePredecessorStates(BB))
2877       SingleThreadedBBs.remove(BB);
2878   }
2879 
2880   return (NumSingleThreadedBBs == SingleThreadedBBs.size())
2881              ? ChangeStatus::UNCHANGED
2882              : ChangeStatus::CHANGED;
2883 }
2884 
2885 /// Try to replace memory allocation calls called by a single thread with a
2886 /// static buffer of shared memory.
2887 struct AAHeapToShared : public StateWrapper<BooleanState, AbstractAttribute> {
2888   using Base = StateWrapper<BooleanState, AbstractAttribute>;
2889   AAHeapToShared(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
2890 
2891   /// Create an abstract attribute view for the position \p IRP.
2892   static AAHeapToShared &createForPosition(const IRPosition &IRP,
2893                                            Attributor &A);
2894 
2895   /// Returns true if HeapToShared conversion is assumed to be possible.
2896   virtual bool isAssumedHeapToShared(CallBase &CB) const = 0;
2897 
2898   /// Returns true if HeapToShared conversion is assumed and the CB is a
2899   /// callsite to a free operation to be removed.
2900   virtual bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const = 0;
2901 
2902   /// See AbstractAttribute::getName().
2903   const std::string getName() const override { return "AAHeapToShared"; }
2904 
2905   /// See AbstractAttribute::getIdAddr().
2906   const char *getIdAddr() const override { return &ID; }
2907 
2908   /// This function should return true if the type of the \p AA is
2909   /// AAHeapToShared.
2910   static bool classof(const AbstractAttribute *AA) {
2911     return (AA->getIdAddr() == &ID);
2912   }
2913 
2914   /// Unique ID (due to the unique address)
2915   static const char ID;
2916 };
2917 
2918 struct AAHeapToSharedFunction : public AAHeapToShared {
2919   AAHeapToSharedFunction(const IRPosition &IRP, Attributor &A)
2920       : AAHeapToShared(IRP, A) {}
2921 
2922   const std::string getAsStr() const override {
2923     return "[AAHeapToShared] " + std::to_string(MallocCalls.size()) +
2924            " malloc calls eligible.";
2925   }
2926 
2927   /// See AbstractAttribute::trackStatistics().
2928   void trackStatistics() const override {}
2929 
2930   /// This functions finds free calls that will be removed by the
2931   /// HeapToShared transformation.
2932   void findPotentialRemovedFreeCalls(Attributor &A) {
2933     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2934     auto &FreeRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared];
2935 
2936     PotentialRemovedFreeCalls.clear();
2937     // Update free call users of found malloc calls.
2938     for (CallBase *CB : MallocCalls) {
2939       SmallVector<CallBase *, 4> FreeCalls;
2940       for (auto *U : CB->users()) {
2941         CallBase *C = dyn_cast<CallBase>(U);
2942         if (C && C->getCalledFunction() == FreeRFI.Declaration)
2943           FreeCalls.push_back(C);
2944       }
2945 
2946       if (FreeCalls.size() != 1)
2947         continue;
2948 
2949       PotentialRemovedFreeCalls.insert(FreeCalls.front());
2950     }
2951   }
2952 
2953   void initialize(Attributor &A) override {
2954     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2955     auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
2956 
2957     for (User *U : RFI.Declaration->users())
2958       if (CallBase *CB = dyn_cast<CallBase>(U))
2959         MallocCalls.insert(CB);
2960 
2961     findPotentialRemovedFreeCalls(A);
2962   }
2963 
2964   bool isAssumedHeapToShared(CallBase &CB) const override {
2965     return isValidState() && MallocCalls.count(&CB);
2966   }
2967 
2968   bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const override {
2969     return isValidState() && PotentialRemovedFreeCalls.count(&CB);
2970   }
2971 
2972   ChangeStatus manifest(Attributor &A) override {
2973     if (MallocCalls.empty())
2974       return ChangeStatus::UNCHANGED;
2975 
2976     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2977     auto &FreeCall = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared];
2978 
2979     Function *F = getAnchorScope();
2980     auto *HS = A.lookupAAFor<AAHeapToStack>(IRPosition::function(*F), this,
2981                                             DepClassTy::OPTIONAL);
2982 
2983     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2984     for (CallBase *CB : MallocCalls) {
2985       // Skip replacing this if HeapToStack has already claimed it.
2986       if (HS && HS->isAssumedHeapToStack(*CB))
2987         continue;
2988 
2989       // Find the unique free call to remove it.
2990       SmallVector<CallBase *, 4> FreeCalls;
2991       for (auto *U : CB->users()) {
2992         CallBase *C = dyn_cast<CallBase>(U);
2993         if (C && C->getCalledFunction() == FreeCall.Declaration)
2994           FreeCalls.push_back(C);
2995       }
2996       if (FreeCalls.size() != 1)
2997         continue;
2998 
2999       auto *AllocSize = cast<ConstantInt>(CB->getArgOperand(0));
3000 
3001       LLVM_DEBUG(dbgs() << TAG << "Replace globalization call " << *CB
3002                         << " with " << AllocSize->getZExtValue()
3003                         << " bytes of shared memory\n");
3004 
3005       // Create a new shared memory buffer of the same size as the allocation
3006       // and replace all the uses of the original allocation with it.
3007       Module *M = CB->getModule();
3008       Type *Int8Ty = Type::getInt8Ty(M->getContext());
3009       Type *Int8ArrTy = ArrayType::get(Int8Ty, AllocSize->getZExtValue());
3010       auto *SharedMem = new GlobalVariable(
3011           *M, Int8ArrTy, /* IsConstant */ false, GlobalValue::InternalLinkage,
3012           UndefValue::get(Int8ArrTy), CB->getName() + "_shared", nullptr,
3013           GlobalValue::NotThreadLocal,
3014           static_cast<unsigned>(AddressSpace::Shared));
3015       auto *NewBuffer =
3016           ConstantExpr::getPointerCast(SharedMem, Int8Ty->getPointerTo());
3017 
3018       auto Remark = [&](OptimizationRemark OR) {
3019         return OR << "Replaced globalized variable with "
3020                   << ore::NV("SharedMemory", AllocSize->getZExtValue())
3021                   << ((AllocSize->getZExtValue() != 1) ? " bytes " : " byte ")
3022                   << "of shared memory.";
3023       };
3024       A.emitRemark<OptimizationRemark>(CB, "OMP111", Remark);
3025 
3026       MaybeAlign Alignment = CB->getRetAlign();
3027       assert(Alignment &&
3028              "HeapToShared on allocation without alignment attribute");
3029       SharedMem->setAlignment(MaybeAlign(Alignment));
3030 
3031       A.changeValueAfterManifest(*CB, *NewBuffer);
3032       A.deleteAfterManifest(*CB);
3033       A.deleteAfterManifest(*FreeCalls.front());
3034 
3035       NumBytesMovedToSharedMemory += AllocSize->getZExtValue();
3036       Changed = ChangeStatus::CHANGED;
3037     }
3038 
3039     return Changed;
3040   }
3041 
3042   ChangeStatus updateImpl(Attributor &A) override {
3043     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3044     auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
3045     Function *F = getAnchorScope();
3046 
3047     auto NumMallocCalls = MallocCalls.size();
3048 
3049     // Only consider malloc calls executed by a single thread with a constant.
3050     for (User *U : RFI.Declaration->users()) {
3051       const auto &ED = A.getAAFor<AAExecutionDomain>(
3052           *this, IRPosition::function(*F), DepClassTy::REQUIRED);
3053       if (CallBase *CB = dyn_cast<CallBase>(U))
3054         if (!isa<ConstantInt>(CB->getArgOperand(0)) ||
3055             !ED.isExecutedByInitialThreadOnly(*CB))
3056           MallocCalls.remove(CB);
3057     }
3058 
3059     findPotentialRemovedFreeCalls(A);
3060 
3061     if (NumMallocCalls != MallocCalls.size())
3062       return ChangeStatus::CHANGED;
3063 
3064     return ChangeStatus::UNCHANGED;
3065   }
3066 
3067   /// Collection of all malloc calls in a function.
3068   SmallSetVector<CallBase *, 4> MallocCalls;
3069   /// Collection of potentially removed free calls in a function.
3070   SmallPtrSet<CallBase *, 4> PotentialRemovedFreeCalls;
3071 };
3072 
3073 struct AAKernelInfo : public StateWrapper<KernelInfoState, AbstractAttribute> {
3074   using Base = StateWrapper<KernelInfoState, AbstractAttribute>;
3075   AAKernelInfo(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
3076 
3077   /// Statistics are tracked as part of manifest for now.
3078   void trackStatistics() const override {}
3079 
3080   /// See AbstractAttribute::getAsStr()
3081   const std::string getAsStr() const override {
3082     if (!isValidState())
3083       return "<invalid>";
3084     return std::string(SPMDCompatibilityTracker.isAssumed() ? "SPMD"
3085                                                             : "generic") +
3086            std::string(SPMDCompatibilityTracker.isAtFixpoint() ? " [FIX]"
3087                                                                : "") +
3088            std::string(" #PRs: ") +
3089            (ReachedKnownParallelRegions.isValidState()
3090                 ? std::to_string(ReachedKnownParallelRegions.size())
3091                 : "<invalid>") +
3092            ", #Unknown PRs: " +
3093            (ReachedUnknownParallelRegions.isValidState()
3094                 ? std::to_string(ReachedUnknownParallelRegions.size())
3095                 : "<invalid>") +
3096            ", #Reaching Kernels: " +
3097            (ReachingKernelEntries.isValidState()
3098                 ? std::to_string(ReachingKernelEntries.size())
3099                 : "<invalid>");
3100   }
3101 
3102   /// Create an abstract attribute biew for the position \p IRP.
3103   static AAKernelInfo &createForPosition(const IRPosition &IRP, Attributor &A);
3104 
3105   /// See AbstractAttribute::getName()
3106   const std::string getName() const override { return "AAKernelInfo"; }
3107 
3108   /// See AbstractAttribute::getIdAddr()
3109   const char *getIdAddr() const override { return &ID; }
3110 
3111   /// This function should return true if the type of the \p AA is AAKernelInfo
3112   static bool classof(const AbstractAttribute *AA) {
3113     return (AA->getIdAddr() == &ID);
3114   }
3115 
3116   static const char ID;
3117 };
3118 
3119 /// The function kernel info abstract attribute, basically, what can we say
3120 /// about a function with regards to the KernelInfoState.
3121 struct AAKernelInfoFunction : AAKernelInfo {
3122   AAKernelInfoFunction(const IRPosition &IRP, Attributor &A)
3123       : AAKernelInfo(IRP, A) {}
3124 
3125   SmallPtrSet<Instruction *, 4> GuardedInstructions;
3126 
3127   SmallPtrSetImpl<Instruction *> &getGuardedInstructions() {
3128     return GuardedInstructions;
3129   }
3130 
3131   /// See AbstractAttribute::initialize(...).
3132   void initialize(Attributor &A) override {
3133     // This is a high-level transform that might change the constant arguments
3134     // of the init and dinit calls. We need to tell the Attributor about this
3135     // to avoid other parts using the current constant value for simpliication.
3136     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3137 
3138     Function *Fn = getAnchorScope();
3139     if (!OMPInfoCache.Kernels.count(Fn))
3140       return;
3141 
3142     // Add itself to the reaching kernel and set IsKernelEntry.
3143     ReachingKernelEntries.insert(Fn);
3144     IsKernelEntry = true;
3145 
3146     OMPInformationCache::RuntimeFunctionInfo &InitRFI =
3147         OMPInfoCache.RFIs[OMPRTL___kmpc_target_init];
3148     OMPInformationCache::RuntimeFunctionInfo &DeinitRFI =
3149         OMPInfoCache.RFIs[OMPRTL___kmpc_target_deinit];
3150 
3151     // For kernels we perform more initialization work, first we find the init
3152     // and deinit calls.
3153     auto StoreCallBase = [](Use &U,
3154                             OMPInformationCache::RuntimeFunctionInfo &RFI,
3155                             CallBase *&Storage) {
3156       CallBase *CB = OpenMPOpt::getCallIfRegularCall(U, &RFI);
3157       assert(CB &&
3158              "Unexpected use of __kmpc_target_init or __kmpc_target_deinit!");
3159       assert(!Storage &&
3160              "Multiple uses of __kmpc_target_init or __kmpc_target_deinit!");
3161       Storage = CB;
3162       return false;
3163     };
3164     InitRFI.foreachUse(
3165         [&](Use &U, Function &) {
3166           StoreCallBase(U, InitRFI, KernelInitCB);
3167           return false;
3168         },
3169         Fn);
3170     DeinitRFI.foreachUse(
3171         [&](Use &U, Function &) {
3172           StoreCallBase(U, DeinitRFI, KernelDeinitCB);
3173           return false;
3174         },
3175         Fn);
3176 
3177     // Ignore kernels without initializers such as global constructors.
3178     if (!KernelInitCB || !KernelDeinitCB) {
3179       indicateOptimisticFixpoint();
3180       return;
3181     }
3182 
3183     // For kernels we might need to initialize/finalize the IsSPMD state and
3184     // we need to register a simplification callback so that the Attributor
3185     // knows the constant arguments to __kmpc_target_init and
3186     // __kmpc_target_deinit might actually change.
3187 
3188     Attributor::SimplifictionCallbackTy StateMachineSimplifyCB =
3189         [&](const IRPosition &IRP, const AbstractAttribute *AA,
3190             bool &UsedAssumedInformation) -> Optional<Value *> {
3191       // IRP represents the "use generic state machine" argument of an
3192       // __kmpc_target_init call. We will answer this one with the internal
3193       // state. As long as we are not in an invalid state, we will create a
3194       // custom state machine so the value should be a `i1 false`. If we are
3195       // in an invalid state, we won't change the value that is in the IR.
3196       if (!ReachedKnownParallelRegions.isValidState())
3197         return nullptr;
3198       // If we have disabled state machine rewrites, don't make a custom one.
3199       if (DisableOpenMPOptStateMachineRewrite)
3200         return nullptr;
3201       if (AA)
3202         A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
3203       UsedAssumedInformation = !isAtFixpoint();
3204       auto *FalseVal =
3205           ConstantInt::getBool(IRP.getAnchorValue().getContext(), false);
3206       return FalseVal;
3207     };
3208 
3209     Attributor::SimplifictionCallbackTy ModeSimplifyCB =
3210         [&](const IRPosition &IRP, const AbstractAttribute *AA,
3211             bool &UsedAssumedInformation) -> Optional<Value *> {
3212       // IRP represents the "SPMDCompatibilityTracker" argument of an
3213       // __kmpc_target_init or
3214       // __kmpc_target_deinit call. We will answer this one with the internal
3215       // state.
3216       if (!SPMDCompatibilityTracker.isValidState())
3217         return nullptr;
3218       if (!SPMDCompatibilityTracker.isAtFixpoint()) {
3219         if (AA)
3220           A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
3221         UsedAssumedInformation = true;
3222       } else {
3223         UsedAssumedInformation = false;
3224       }
3225       auto *Val = ConstantInt::getSigned(
3226           IntegerType::getInt8Ty(IRP.getAnchorValue().getContext()),
3227           SPMDCompatibilityTracker.isAssumed() ? OMP_TGT_EXEC_MODE_SPMD
3228                                                : OMP_TGT_EXEC_MODE_GENERIC);
3229       return Val;
3230     };
3231 
3232     Attributor::SimplifictionCallbackTy IsGenericModeSimplifyCB =
3233         [&](const IRPosition &IRP, const AbstractAttribute *AA,
3234             bool &UsedAssumedInformation) -> Optional<Value *> {
3235       // IRP represents the "RequiresFullRuntime" argument of an
3236       // __kmpc_target_init or __kmpc_target_deinit call. We will answer this
3237       // one with the internal state of the SPMDCompatibilityTracker, so if
3238       // generic then true, if SPMD then false.
3239       if (!SPMDCompatibilityTracker.isValidState())
3240         return nullptr;
3241       if (!SPMDCompatibilityTracker.isAtFixpoint()) {
3242         if (AA)
3243           A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
3244         UsedAssumedInformation = true;
3245       } else {
3246         UsedAssumedInformation = false;
3247       }
3248       auto *Val = ConstantInt::getBool(IRP.getAnchorValue().getContext(),
3249                                        !SPMDCompatibilityTracker.isAssumed());
3250       return Val;
3251     };
3252 
3253     constexpr const int InitModeArgNo = 1;
3254     constexpr const int DeinitModeArgNo = 1;
3255     constexpr const int InitUseStateMachineArgNo = 2;
3256     constexpr const int InitRequiresFullRuntimeArgNo = 3;
3257     constexpr const int DeinitRequiresFullRuntimeArgNo = 2;
3258     A.registerSimplificationCallback(
3259         IRPosition::callsite_argument(*KernelInitCB, InitUseStateMachineArgNo),
3260         StateMachineSimplifyCB);
3261     A.registerSimplificationCallback(
3262         IRPosition::callsite_argument(*KernelInitCB, InitModeArgNo),
3263         ModeSimplifyCB);
3264     A.registerSimplificationCallback(
3265         IRPosition::callsite_argument(*KernelDeinitCB, DeinitModeArgNo),
3266         ModeSimplifyCB);
3267     A.registerSimplificationCallback(
3268         IRPosition::callsite_argument(*KernelInitCB,
3269                                       InitRequiresFullRuntimeArgNo),
3270         IsGenericModeSimplifyCB);
3271     A.registerSimplificationCallback(
3272         IRPosition::callsite_argument(*KernelDeinitCB,
3273                                       DeinitRequiresFullRuntimeArgNo),
3274         IsGenericModeSimplifyCB);
3275 
3276     // Check if we know we are in SPMD-mode already.
3277     ConstantInt *ModeArg =
3278         dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo));
3279     if (ModeArg && (ModeArg->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD))
3280       SPMDCompatibilityTracker.indicateOptimisticFixpoint();
3281     // This is a generic region but SPMDization is disabled so stop tracking.
3282     else if (DisableOpenMPOptSPMDization)
3283       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
3284   }
3285 
3286   /// Sanitize the string \p S such that it is a suitable global symbol name.
3287   static std::string sanitizeForGlobalName(std::string S) {
3288     std::replace_if(
3289         S.begin(), S.end(),
3290         [](const char C) {
3291           return !((C >= 'a' && C <= 'z') || (C >= 'A' && C <= 'Z') ||
3292                    (C >= '0' && C <= '9') || C == '_');
3293         },
3294         '.');
3295     return S;
3296   }
3297 
3298   /// Modify the IR based on the KernelInfoState as the fixpoint iteration is
3299   /// finished now.
3300   ChangeStatus manifest(Attributor &A) override {
3301     // If we are not looking at a kernel with __kmpc_target_init and
3302     // __kmpc_target_deinit call we cannot actually manifest the information.
3303     if (!KernelInitCB || !KernelDeinitCB)
3304       return ChangeStatus::UNCHANGED;
3305 
3306     // If we can we change the execution mode to SPMD-mode otherwise we build a
3307     // custom state machine.
3308     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3309     if (!changeToSPMDMode(A, Changed))
3310       return buildCustomStateMachine(A);
3311 
3312     return Changed;
3313   }
3314 
3315   bool changeToSPMDMode(Attributor &A, ChangeStatus &Changed) {
3316     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3317 
3318     if (!SPMDCompatibilityTracker.isAssumed()) {
3319       for (Instruction *NonCompatibleI : SPMDCompatibilityTracker) {
3320         if (!NonCompatibleI)
3321           continue;
3322 
3323         // Skip diagnostics on calls to known OpenMP runtime functions for now.
3324         if (auto *CB = dyn_cast<CallBase>(NonCompatibleI))
3325           if (OMPInfoCache.RTLFunctions.contains(CB->getCalledFunction()))
3326             continue;
3327 
3328         auto Remark = [&](OptimizationRemarkAnalysis ORA) {
3329           ORA << "Value has potential side effects preventing SPMD-mode "
3330                  "execution";
3331           if (isa<CallBase>(NonCompatibleI)) {
3332             ORA << ". Add `__attribute__((assume(\"ompx_spmd_amenable\")))` to "
3333                    "the called function to override";
3334           }
3335           return ORA << ".";
3336         };
3337         A.emitRemark<OptimizationRemarkAnalysis>(NonCompatibleI, "OMP121",
3338                                                  Remark);
3339 
3340         LLVM_DEBUG(dbgs() << TAG << "SPMD-incompatible side-effect: "
3341                           << *NonCompatibleI << "\n");
3342       }
3343 
3344       return false;
3345     }
3346 
3347     // Check if the kernel is already in SPMD mode, if so, return success.
3348     Function *Kernel = getAnchorScope();
3349     GlobalVariable *ExecMode = Kernel->getParent()->getGlobalVariable(
3350         (Kernel->getName() + "_exec_mode").str());
3351     assert(ExecMode && "Kernel without exec mode?");
3352     assert(ExecMode->getInitializer() && "ExecMode doesn't have initializer!");
3353 
3354     // Set the global exec mode flag to indicate SPMD-Generic mode.
3355     assert(isa<ConstantInt>(ExecMode->getInitializer()) &&
3356            "ExecMode is not an integer!");
3357     const int8_t ExecModeVal =
3358         cast<ConstantInt>(ExecMode->getInitializer())->getSExtValue();
3359     if (ExecModeVal != OMP_TGT_EXEC_MODE_GENERIC)
3360       return true;
3361 
3362     // We will now unconditionally modify the IR, indicate a change.
3363     Changed = ChangeStatus::CHANGED;
3364 
3365     auto CreateGuardedRegion = [&](Instruction *RegionStartI,
3366                                    Instruction *RegionEndI) {
3367       LoopInfo *LI = nullptr;
3368       DominatorTree *DT = nullptr;
3369       MemorySSAUpdater *MSU = nullptr;
3370       using InsertPointTy = OpenMPIRBuilder::InsertPointTy;
3371 
3372       BasicBlock *ParentBB = RegionStartI->getParent();
3373       Function *Fn = ParentBB->getParent();
3374       Module &M = *Fn->getParent();
3375 
3376       // Create all the blocks and logic.
3377       // ParentBB:
3378       //    goto RegionCheckTidBB
3379       // RegionCheckTidBB:
3380       //    Tid = __kmpc_hardware_thread_id()
3381       //    if (Tid != 0)
3382       //        goto RegionBarrierBB
3383       // RegionStartBB:
3384       //    <execute instructions guarded>
3385       //    goto RegionEndBB
3386       // RegionEndBB:
3387       //    <store escaping values to shared mem>
3388       //    goto RegionBarrierBB
3389       //  RegionBarrierBB:
3390       //    __kmpc_simple_barrier_spmd()
3391       //    // second barrier is omitted if lacking escaping values.
3392       //    <load escaping values from shared mem>
3393       //    __kmpc_simple_barrier_spmd()
3394       //    goto RegionExitBB
3395       // RegionExitBB:
3396       //    <execute rest of instructions>
3397 
3398       BasicBlock *RegionEndBB = SplitBlock(ParentBB, RegionEndI->getNextNode(),
3399                                            DT, LI, MSU, "region.guarded.end");
3400       BasicBlock *RegionBarrierBB =
3401           SplitBlock(RegionEndBB, &*RegionEndBB->getFirstInsertionPt(), DT, LI,
3402                      MSU, "region.barrier");
3403       BasicBlock *RegionExitBB =
3404           SplitBlock(RegionBarrierBB, &*RegionBarrierBB->getFirstInsertionPt(),
3405                      DT, LI, MSU, "region.exit");
3406       BasicBlock *RegionStartBB =
3407           SplitBlock(ParentBB, RegionStartI, DT, LI, MSU, "region.guarded");
3408 
3409       assert(ParentBB->getUniqueSuccessor() == RegionStartBB &&
3410              "Expected a different CFG");
3411 
3412       BasicBlock *RegionCheckTidBB = SplitBlock(
3413           ParentBB, ParentBB->getTerminator(), DT, LI, MSU, "region.check.tid");
3414 
3415       // Register basic blocks with the Attributor.
3416       A.registerManifestAddedBasicBlock(*RegionEndBB);
3417       A.registerManifestAddedBasicBlock(*RegionBarrierBB);
3418       A.registerManifestAddedBasicBlock(*RegionExitBB);
3419       A.registerManifestAddedBasicBlock(*RegionStartBB);
3420       A.registerManifestAddedBasicBlock(*RegionCheckTidBB);
3421 
3422       bool HasBroadcastValues = false;
3423       // Find escaping outputs from the guarded region to outside users and
3424       // broadcast their values to them.
3425       for (Instruction &I : *RegionStartBB) {
3426         SmallPtrSet<Instruction *, 4> OutsideUsers;
3427         for (User *Usr : I.users()) {
3428           Instruction &UsrI = *cast<Instruction>(Usr);
3429           if (UsrI.getParent() != RegionStartBB)
3430             OutsideUsers.insert(&UsrI);
3431         }
3432 
3433         if (OutsideUsers.empty())
3434           continue;
3435 
3436         HasBroadcastValues = true;
3437 
3438         // Emit a global variable in shared memory to store the broadcasted
3439         // value.
3440         auto *SharedMem = new GlobalVariable(
3441             M, I.getType(), /* IsConstant */ false,
3442             GlobalValue::InternalLinkage, UndefValue::get(I.getType()),
3443             sanitizeForGlobalName(
3444                 (I.getName() + ".guarded.output.alloc").str()),
3445             nullptr, GlobalValue::NotThreadLocal,
3446             static_cast<unsigned>(AddressSpace::Shared));
3447 
3448         // Emit a store instruction to update the value.
3449         new StoreInst(&I, SharedMem, RegionEndBB->getTerminator());
3450 
3451         LoadInst *LoadI = new LoadInst(I.getType(), SharedMem,
3452                                        I.getName() + ".guarded.output.load",
3453                                        RegionBarrierBB->getTerminator());
3454 
3455         // Emit a load instruction and replace uses of the output value.
3456         for (Instruction *UsrI : OutsideUsers)
3457           UsrI->replaceUsesOfWith(&I, LoadI);
3458       }
3459 
3460       auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3461 
3462       // Go to tid check BB in ParentBB.
3463       const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc();
3464       ParentBB->getTerminator()->eraseFromParent();
3465       OpenMPIRBuilder::LocationDescription Loc(
3466           InsertPointTy(ParentBB, ParentBB->end()), DL);
3467       OMPInfoCache.OMPBuilder.updateToLocation(Loc);
3468       uint32_t SrcLocStrSize;
3469       auto *SrcLocStr =
3470           OMPInfoCache.OMPBuilder.getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3471       Value *Ident =
3472           OMPInfoCache.OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3473       BranchInst::Create(RegionCheckTidBB, ParentBB)->setDebugLoc(DL);
3474 
3475       // Add check for Tid in RegionCheckTidBB
3476       RegionCheckTidBB->getTerminator()->eraseFromParent();
3477       OpenMPIRBuilder::LocationDescription LocRegionCheckTid(
3478           InsertPointTy(RegionCheckTidBB, RegionCheckTidBB->end()), DL);
3479       OMPInfoCache.OMPBuilder.updateToLocation(LocRegionCheckTid);
3480       FunctionCallee HardwareTidFn =
3481           OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3482               M, OMPRTL___kmpc_get_hardware_thread_id_in_block);
3483       CallInst *Tid =
3484           OMPInfoCache.OMPBuilder.Builder.CreateCall(HardwareTidFn, {});
3485       Tid->setDebugLoc(DL);
3486       OMPInfoCache.setCallingConvention(HardwareTidFn, Tid);
3487       Value *TidCheck = OMPInfoCache.OMPBuilder.Builder.CreateIsNull(Tid);
3488       OMPInfoCache.OMPBuilder.Builder
3489           .CreateCondBr(TidCheck, RegionStartBB, RegionBarrierBB)
3490           ->setDebugLoc(DL);
3491 
3492       // First barrier for synchronization, ensures main thread has updated
3493       // values.
3494       FunctionCallee BarrierFn =
3495           OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3496               M, OMPRTL___kmpc_barrier_simple_spmd);
3497       OMPInfoCache.OMPBuilder.updateToLocation(InsertPointTy(
3498           RegionBarrierBB, RegionBarrierBB->getFirstInsertionPt()));
3499       CallInst *Barrier =
3500           OMPInfoCache.OMPBuilder.Builder.CreateCall(BarrierFn, {Ident, Tid});
3501       Barrier->setDebugLoc(DL);
3502       OMPInfoCache.setCallingConvention(BarrierFn, Barrier);
3503 
3504       // Second barrier ensures workers have read broadcast values.
3505       if (HasBroadcastValues) {
3506         CallInst *Barrier = CallInst::Create(BarrierFn, {Ident, Tid}, "",
3507                                              RegionBarrierBB->getTerminator());
3508         Barrier->setDebugLoc(DL);
3509         OMPInfoCache.setCallingConvention(BarrierFn, Barrier);
3510       }
3511     };
3512 
3513     auto &AllocSharedRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
3514     SmallPtrSet<BasicBlock *, 8> Visited;
3515     for (Instruction *GuardedI : SPMDCompatibilityTracker) {
3516       BasicBlock *BB = GuardedI->getParent();
3517       if (!Visited.insert(BB).second)
3518         continue;
3519 
3520       SmallVector<std::pair<Instruction *, Instruction *>> Reorders;
3521       Instruction *LastEffect = nullptr;
3522       BasicBlock::reverse_iterator IP = BB->rbegin(), IPEnd = BB->rend();
3523       while (++IP != IPEnd) {
3524         if (!IP->mayHaveSideEffects() && !IP->mayReadFromMemory())
3525           continue;
3526         Instruction *I = &*IP;
3527         if (OpenMPOpt::getCallIfRegularCall(*I, &AllocSharedRFI))
3528           continue;
3529         if (!I->user_empty() || !SPMDCompatibilityTracker.contains(I)) {
3530           LastEffect = nullptr;
3531           continue;
3532         }
3533         if (LastEffect)
3534           Reorders.push_back({I, LastEffect});
3535         LastEffect = &*IP;
3536       }
3537       for (auto &Reorder : Reorders)
3538         Reorder.first->moveBefore(Reorder.second);
3539     }
3540 
3541     SmallVector<std::pair<Instruction *, Instruction *>, 4> GuardedRegions;
3542 
3543     for (Instruction *GuardedI : SPMDCompatibilityTracker) {
3544       BasicBlock *BB = GuardedI->getParent();
3545       auto *CalleeAA = A.lookupAAFor<AAKernelInfo>(
3546           IRPosition::function(*GuardedI->getFunction()), nullptr,
3547           DepClassTy::NONE);
3548       assert(CalleeAA != nullptr && "Expected Callee AAKernelInfo");
3549       auto &CalleeAAFunction = *cast<AAKernelInfoFunction>(CalleeAA);
3550       // Continue if instruction is already guarded.
3551       if (CalleeAAFunction.getGuardedInstructions().contains(GuardedI))
3552         continue;
3553 
3554       Instruction *GuardedRegionStart = nullptr, *GuardedRegionEnd = nullptr;
3555       for (Instruction &I : *BB) {
3556         // If instruction I needs to be guarded update the guarded region
3557         // bounds.
3558         if (SPMDCompatibilityTracker.contains(&I)) {
3559           CalleeAAFunction.getGuardedInstructions().insert(&I);
3560           if (GuardedRegionStart)
3561             GuardedRegionEnd = &I;
3562           else
3563             GuardedRegionStart = GuardedRegionEnd = &I;
3564 
3565           continue;
3566         }
3567 
3568         // Instruction I does not need guarding, store
3569         // any region found and reset bounds.
3570         if (GuardedRegionStart) {
3571           GuardedRegions.push_back(
3572               std::make_pair(GuardedRegionStart, GuardedRegionEnd));
3573           GuardedRegionStart = nullptr;
3574           GuardedRegionEnd = nullptr;
3575         }
3576       }
3577     }
3578 
3579     for (auto &GR : GuardedRegions)
3580       CreateGuardedRegion(GR.first, GR.second);
3581 
3582     // Adjust the global exec mode flag that tells the runtime what mode this
3583     // kernel is executed in.
3584     assert(ExecModeVal == OMP_TGT_EXEC_MODE_GENERIC &&
3585            "Initially non-SPMD kernel has SPMD exec mode!");
3586     ExecMode->setInitializer(
3587         ConstantInt::get(ExecMode->getInitializer()->getType(),
3588                          ExecModeVal | OMP_TGT_EXEC_MODE_GENERIC_SPMD));
3589 
3590     // Next rewrite the init and deinit calls to indicate we use SPMD-mode now.
3591     const int InitModeArgNo = 1;
3592     const int DeinitModeArgNo = 1;
3593     const int InitUseStateMachineArgNo = 2;
3594     const int InitRequiresFullRuntimeArgNo = 3;
3595     const int DeinitRequiresFullRuntimeArgNo = 2;
3596 
3597     auto &Ctx = getAnchorValue().getContext();
3598     A.changeUseAfterManifest(
3599         KernelInitCB->getArgOperandUse(InitModeArgNo),
3600         *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx),
3601                                 OMP_TGT_EXEC_MODE_SPMD));
3602     A.changeUseAfterManifest(
3603         KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo),
3604         *ConstantInt::getBool(Ctx, false));
3605     A.changeUseAfterManifest(
3606         KernelDeinitCB->getArgOperandUse(DeinitModeArgNo),
3607         *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx),
3608                                 OMP_TGT_EXEC_MODE_SPMD));
3609     A.changeUseAfterManifest(
3610         KernelInitCB->getArgOperandUse(InitRequiresFullRuntimeArgNo),
3611         *ConstantInt::getBool(Ctx, false));
3612     A.changeUseAfterManifest(
3613         KernelDeinitCB->getArgOperandUse(DeinitRequiresFullRuntimeArgNo),
3614         *ConstantInt::getBool(Ctx, false));
3615 
3616     ++NumOpenMPTargetRegionKernelsSPMD;
3617 
3618     auto Remark = [&](OptimizationRemark OR) {
3619       return OR << "Transformed generic-mode kernel to SPMD-mode.";
3620     };
3621     A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP120", Remark);
3622     return true;
3623   };
3624 
3625   ChangeStatus buildCustomStateMachine(Attributor &A) {
3626     // If we have disabled state machine rewrites, don't make a custom one
3627     if (DisableOpenMPOptStateMachineRewrite)
3628       return ChangeStatus::UNCHANGED;
3629 
3630     // Don't rewrite the state machine if we are not in a valid state.
3631     if (!ReachedKnownParallelRegions.isValidState())
3632       return ChangeStatus::UNCHANGED;
3633 
3634     const int InitModeArgNo = 1;
3635     const int InitUseStateMachineArgNo = 2;
3636 
3637     // Check if the current configuration is non-SPMD and generic state machine.
3638     // If we already have SPMD mode or a custom state machine we do not need to
3639     // go any further. If it is anything but a constant something is weird and
3640     // we give up.
3641     ConstantInt *UseStateMachine = dyn_cast<ConstantInt>(
3642         KernelInitCB->getArgOperand(InitUseStateMachineArgNo));
3643     ConstantInt *Mode =
3644         dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo));
3645 
3646     // If we are stuck with generic mode, try to create a custom device (=GPU)
3647     // state machine which is specialized for the parallel regions that are
3648     // reachable by the kernel.
3649     if (!UseStateMachine || UseStateMachine->isZero() || !Mode ||
3650         (Mode->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD))
3651       return ChangeStatus::UNCHANGED;
3652 
3653     // If not SPMD mode, indicate we use a custom state machine now.
3654     auto &Ctx = getAnchorValue().getContext();
3655     auto *FalseVal = ConstantInt::getBool(Ctx, false);
3656     A.changeUseAfterManifest(
3657         KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), *FalseVal);
3658 
3659     // If we don't actually need a state machine we are done here. This can
3660     // happen if there simply are no parallel regions. In the resulting kernel
3661     // all worker threads will simply exit right away, leaving the main thread
3662     // to do the work alone.
3663     if (!mayContainParallelRegion()) {
3664       ++NumOpenMPTargetRegionKernelsWithoutStateMachine;
3665 
3666       auto Remark = [&](OptimizationRemark OR) {
3667         return OR << "Removing unused state machine from generic-mode kernel.";
3668       };
3669       A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP130", Remark);
3670 
3671       return ChangeStatus::CHANGED;
3672     }
3673 
3674     // Keep track in the statistics of our new shiny custom state machine.
3675     if (ReachedUnknownParallelRegions.empty()) {
3676       ++NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback;
3677 
3678       auto Remark = [&](OptimizationRemark OR) {
3679         return OR << "Rewriting generic-mode kernel with a customized state "
3680                      "machine.";
3681       };
3682       A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP131", Remark);
3683     } else {
3684       ++NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback;
3685 
3686       auto Remark = [&](OptimizationRemarkAnalysis OR) {
3687         return OR << "Generic-mode kernel is executed with a customized state "
3688                      "machine that requires a fallback.";
3689       };
3690       A.emitRemark<OptimizationRemarkAnalysis>(KernelInitCB, "OMP132", Remark);
3691 
3692       // Tell the user why we ended up with a fallback.
3693       for (CallBase *UnknownParallelRegionCB : ReachedUnknownParallelRegions) {
3694         if (!UnknownParallelRegionCB)
3695           continue;
3696         auto Remark = [&](OptimizationRemarkAnalysis ORA) {
3697           return ORA << "Call may contain unknown parallel regions. Use "
3698                      << "`__attribute__((assume(\"omp_no_parallelism\")))` to "
3699                         "override.";
3700         };
3701         A.emitRemark<OptimizationRemarkAnalysis>(UnknownParallelRegionCB,
3702                                                  "OMP133", Remark);
3703       }
3704     }
3705 
3706     // Create all the blocks:
3707     //
3708     //                       InitCB = __kmpc_target_init(...)
3709     //                       BlockHwSize =
3710     //                         __kmpc_get_hardware_num_threads_in_block();
3711     //                       WarpSize = __kmpc_get_warp_size();
3712     //                       BlockSize = BlockHwSize - WarpSize;
3713     //                       if (InitCB >= BlockSize) return;
3714     // IsWorkerCheckBB:      bool IsWorker = InitCB >= 0;
3715     //                       if (IsWorker) {
3716     // SMBeginBB:               __kmpc_barrier_simple_generic(...);
3717     //                         void *WorkFn;
3718     //                         bool Active = __kmpc_kernel_parallel(&WorkFn);
3719     //                         if (!WorkFn) return;
3720     // SMIsActiveCheckBB:       if (Active) {
3721     // SMIfCascadeCurrentBB:      if      (WorkFn == <ParFn0>)
3722     //                              ParFn0(...);
3723     // SMIfCascadeCurrentBB:      else if (WorkFn == <ParFn1>)
3724     //                              ParFn1(...);
3725     //                            ...
3726     // SMIfCascadeCurrentBB:      else
3727     //                              ((WorkFnTy*)WorkFn)(...);
3728     // SMEndParallelBB:           __kmpc_kernel_end_parallel(...);
3729     //                          }
3730     // SMDoneBB:                __kmpc_barrier_simple_generic(...);
3731     //                          goto SMBeginBB;
3732     //                       }
3733     // UserCodeEntryBB:      // user code
3734     //                       __kmpc_target_deinit(...)
3735     //
3736     Function *Kernel = getAssociatedFunction();
3737     assert(Kernel && "Expected an associated function!");
3738 
3739     BasicBlock *InitBB = KernelInitCB->getParent();
3740     BasicBlock *UserCodeEntryBB = InitBB->splitBasicBlock(
3741         KernelInitCB->getNextNode(), "thread.user_code.check");
3742     BasicBlock *IsWorkerCheckBB =
3743         BasicBlock::Create(Ctx, "is_worker_check", Kernel, UserCodeEntryBB);
3744     BasicBlock *StateMachineBeginBB = BasicBlock::Create(
3745         Ctx, "worker_state_machine.begin", Kernel, UserCodeEntryBB);
3746     BasicBlock *StateMachineFinishedBB = BasicBlock::Create(
3747         Ctx, "worker_state_machine.finished", Kernel, UserCodeEntryBB);
3748     BasicBlock *StateMachineIsActiveCheckBB = BasicBlock::Create(
3749         Ctx, "worker_state_machine.is_active.check", Kernel, UserCodeEntryBB);
3750     BasicBlock *StateMachineIfCascadeCurrentBB =
3751         BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check",
3752                            Kernel, UserCodeEntryBB);
3753     BasicBlock *StateMachineEndParallelBB =
3754         BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.end",
3755                            Kernel, UserCodeEntryBB);
3756     BasicBlock *StateMachineDoneBarrierBB = BasicBlock::Create(
3757         Ctx, "worker_state_machine.done.barrier", Kernel, UserCodeEntryBB);
3758     A.registerManifestAddedBasicBlock(*InitBB);
3759     A.registerManifestAddedBasicBlock(*UserCodeEntryBB);
3760     A.registerManifestAddedBasicBlock(*IsWorkerCheckBB);
3761     A.registerManifestAddedBasicBlock(*StateMachineBeginBB);
3762     A.registerManifestAddedBasicBlock(*StateMachineFinishedBB);
3763     A.registerManifestAddedBasicBlock(*StateMachineIsActiveCheckBB);
3764     A.registerManifestAddedBasicBlock(*StateMachineIfCascadeCurrentBB);
3765     A.registerManifestAddedBasicBlock(*StateMachineEndParallelBB);
3766     A.registerManifestAddedBasicBlock(*StateMachineDoneBarrierBB);
3767 
3768     const DebugLoc &DLoc = KernelInitCB->getDebugLoc();
3769     ReturnInst::Create(Ctx, StateMachineFinishedBB)->setDebugLoc(DLoc);
3770     InitBB->getTerminator()->eraseFromParent();
3771 
3772     Module &M = *Kernel->getParent();
3773     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3774     FunctionCallee BlockHwSizeFn =
3775         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3776             M, OMPRTL___kmpc_get_hardware_num_threads_in_block);
3777     FunctionCallee WarpSizeFn =
3778         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3779             M, OMPRTL___kmpc_get_warp_size);
3780     CallInst *BlockHwSize =
3781         CallInst::Create(BlockHwSizeFn, "block.hw_size", InitBB);
3782     OMPInfoCache.setCallingConvention(BlockHwSizeFn, BlockHwSize);
3783     BlockHwSize->setDebugLoc(DLoc);
3784     CallInst *WarpSize = CallInst::Create(WarpSizeFn, "warp.size", InitBB);
3785     OMPInfoCache.setCallingConvention(WarpSizeFn, WarpSize);
3786     WarpSize->setDebugLoc(DLoc);
3787     Instruction *BlockSize =
3788         BinaryOperator::CreateSub(BlockHwSize, WarpSize, "block.size", InitBB);
3789     BlockSize->setDebugLoc(DLoc);
3790     Instruction *IsMainOrWorker =
3791         ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_SLT, KernelInitCB,
3792                          BlockSize, "thread.is_main_or_worker", InitBB);
3793     IsMainOrWorker->setDebugLoc(DLoc);
3794     BranchInst::Create(IsWorkerCheckBB, StateMachineFinishedBB, IsMainOrWorker,
3795                        InitBB);
3796 
3797     Instruction *IsWorker =
3798         ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_NE, KernelInitCB,
3799                          ConstantInt::get(KernelInitCB->getType(), -1),
3800                          "thread.is_worker", IsWorkerCheckBB);
3801     IsWorker->setDebugLoc(DLoc);
3802     BranchInst::Create(StateMachineBeginBB, UserCodeEntryBB, IsWorker,
3803                        IsWorkerCheckBB);
3804 
3805     // Create local storage for the work function pointer.
3806     const DataLayout &DL = M.getDataLayout();
3807     Type *VoidPtrTy = Type::getInt8PtrTy(Ctx);
3808     Instruction *WorkFnAI =
3809         new AllocaInst(VoidPtrTy, DL.getAllocaAddrSpace(), nullptr,
3810                        "worker.work_fn.addr", &Kernel->getEntryBlock().front());
3811     WorkFnAI->setDebugLoc(DLoc);
3812 
3813     OMPInfoCache.OMPBuilder.updateToLocation(
3814         OpenMPIRBuilder::LocationDescription(
3815             IRBuilder<>::InsertPoint(StateMachineBeginBB,
3816                                      StateMachineBeginBB->end()),
3817             DLoc));
3818 
3819     Value *Ident = KernelInitCB->getArgOperand(0);
3820     Value *GTid = KernelInitCB;
3821 
3822     FunctionCallee BarrierFn =
3823         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3824             M, OMPRTL___kmpc_barrier_simple_generic);
3825     CallInst *Barrier =
3826         CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineBeginBB);
3827     OMPInfoCache.setCallingConvention(BarrierFn, Barrier);
3828     Barrier->setDebugLoc(DLoc);
3829 
3830     if (WorkFnAI->getType()->getPointerAddressSpace() !=
3831         (unsigned int)AddressSpace::Generic) {
3832       WorkFnAI = new AddrSpaceCastInst(
3833           WorkFnAI,
3834           PointerType::getWithSamePointeeType(
3835               cast<PointerType>(WorkFnAI->getType()),
3836               (unsigned int)AddressSpace::Generic),
3837           WorkFnAI->getName() + ".generic", StateMachineBeginBB);
3838       WorkFnAI->setDebugLoc(DLoc);
3839     }
3840 
3841     FunctionCallee KernelParallelFn =
3842         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3843             M, OMPRTL___kmpc_kernel_parallel);
3844     CallInst *IsActiveWorker = CallInst::Create(
3845         KernelParallelFn, {WorkFnAI}, "worker.is_active", StateMachineBeginBB);
3846     OMPInfoCache.setCallingConvention(KernelParallelFn, IsActiveWorker);
3847     IsActiveWorker->setDebugLoc(DLoc);
3848     Instruction *WorkFn = new LoadInst(VoidPtrTy, WorkFnAI, "worker.work_fn",
3849                                        StateMachineBeginBB);
3850     WorkFn->setDebugLoc(DLoc);
3851 
3852     FunctionType *ParallelRegionFnTy = FunctionType::get(
3853         Type::getVoidTy(Ctx), {Type::getInt16Ty(Ctx), Type::getInt32Ty(Ctx)},
3854         false);
3855     Value *WorkFnCast = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
3856         WorkFn, ParallelRegionFnTy->getPointerTo(), "worker.work_fn.addr_cast",
3857         StateMachineBeginBB);
3858 
3859     Instruction *IsDone =
3860         ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFn,
3861                          Constant::getNullValue(VoidPtrTy), "worker.is_done",
3862                          StateMachineBeginBB);
3863     IsDone->setDebugLoc(DLoc);
3864     BranchInst::Create(StateMachineFinishedBB, StateMachineIsActiveCheckBB,
3865                        IsDone, StateMachineBeginBB)
3866         ->setDebugLoc(DLoc);
3867 
3868     BranchInst::Create(StateMachineIfCascadeCurrentBB,
3869                        StateMachineDoneBarrierBB, IsActiveWorker,
3870                        StateMachineIsActiveCheckBB)
3871         ->setDebugLoc(DLoc);
3872 
3873     Value *ZeroArg =
3874         Constant::getNullValue(ParallelRegionFnTy->getParamType(0));
3875 
3876     // Now that we have most of the CFG skeleton it is time for the if-cascade
3877     // that checks the function pointer we got from the runtime against the
3878     // parallel regions we expect, if there are any.
3879     for (int I = 0, E = ReachedKnownParallelRegions.size(); I < E; ++I) {
3880       auto *ParallelRegion = ReachedKnownParallelRegions[I];
3881       BasicBlock *PRExecuteBB = BasicBlock::Create(
3882           Ctx, "worker_state_machine.parallel_region.execute", Kernel,
3883           StateMachineEndParallelBB);
3884       CallInst::Create(ParallelRegion, {ZeroArg, GTid}, "", PRExecuteBB)
3885           ->setDebugLoc(DLoc);
3886       BranchInst::Create(StateMachineEndParallelBB, PRExecuteBB)
3887           ->setDebugLoc(DLoc);
3888 
3889       BasicBlock *PRNextBB =
3890           BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check",
3891                              Kernel, StateMachineEndParallelBB);
3892 
3893       // Check if we need to compare the pointer at all or if we can just
3894       // call the parallel region function.
3895       Value *IsPR;
3896       if (I + 1 < E || !ReachedUnknownParallelRegions.empty()) {
3897         Instruction *CmpI = ICmpInst::Create(
3898             ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFnCast, ParallelRegion,
3899             "worker.check_parallel_region", StateMachineIfCascadeCurrentBB);
3900         CmpI->setDebugLoc(DLoc);
3901         IsPR = CmpI;
3902       } else {
3903         IsPR = ConstantInt::getTrue(Ctx);
3904       }
3905 
3906       BranchInst::Create(PRExecuteBB, PRNextBB, IsPR,
3907                          StateMachineIfCascadeCurrentBB)
3908           ->setDebugLoc(DLoc);
3909       StateMachineIfCascadeCurrentBB = PRNextBB;
3910     }
3911 
3912     // At the end of the if-cascade we place the indirect function pointer call
3913     // in case we might need it, that is if there can be parallel regions we
3914     // have not handled in the if-cascade above.
3915     if (!ReachedUnknownParallelRegions.empty()) {
3916       StateMachineIfCascadeCurrentBB->setName(
3917           "worker_state_machine.parallel_region.fallback.execute");
3918       CallInst::Create(ParallelRegionFnTy, WorkFnCast, {ZeroArg, GTid}, "",
3919                        StateMachineIfCascadeCurrentBB)
3920           ->setDebugLoc(DLoc);
3921     }
3922     BranchInst::Create(StateMachineEndParallelBB,
3923                        StateMachineIfCascadeCurrentBB)
3924         ->setDebugLoc(DLoc);
3925 
3926     FunctionCallee EndParallelFn =
3927         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3928             M, OMPRTL___kmpc_kernel_end_parallel);
3929     CallInst *EndParallel =
3930         CallInst::Create(EndParallelFn, {}, "", StateMachineEndParallelBB);
3931     OMPInfoCache.setCallingConvention(EndParallelFn, EndParallel);
3932     EndParallel->setDebugLoc(DLoc);
3933     BranchInst::Create(StateMachineDoneBarrierBB, StateMachineEndParallelBB)
3934         ->setDebugLoc(DLoc);
3935 
3936     CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineDoneBarrierBB)
3937         ->setDebugLoc(DLoc);
3938     BranchInst::Create(StateMachineBeginBB, StateMachineDoneBarrierBB)
3939         ->setDebugLoc(DLoc);
3940 
3941     return ChangeStatus::CHANGED;
3942   }
3943 
3944   /// Fixpoint iteration update function. Will be called every time a dependence
3945   /// changed its state (and in the beginning).
3946   ChangeStatus updateImpl(Attributor &A) override {
3947     KernelInfoState StateBefore = getState();
3948 
3949     // Callback to check a read/write instruction.
3950     auto CheckRWInst = [&](Instruction &I) {
3951       // We handle calls later.
3952       if (isa<CallBase>(I))
3953         return true;
3954       // We only care about write effects.
3955       if (!I.mayWriteToMemory())
3956         return true;
3957       if (auto *SI = dyn_cast<StoreInst>(&I)) {
3958         SmallVector<const Value *> Objects;
3959         getUnderlyingObjects(SI->getPointerOperand(), Objects);
3960         if (llvm::all_of(Objects,
3961                          [](const Value *Obj) { return isa<AllocaInst>(Obj); }))
3962           return true;
3963         // Check for AAHeapToStack moved objects which must not be guarded.
3964         auto &HS = A.getAAFor<AAHeapToStack>(
3965             *this, IRPosition::function(*I.getFunction()),
3966             DepClassTy::OPTIONAL);
3967         if (llvm::all_of(Objects, [&HS](const Value *Obj) {
3968               auto *CB = dyn_cast<CallBase>(Obj);
3969               if (!CB)
3970                 return false;
3971               return HS.isAssumedHeapToStack(*CB);
3972             })) {
3973           return true;
3974         }
3975       }
3976 
3977       // Insert instruction that needs guarding.
3978       SPMDCompatibilityTracker.insert(&I);
3979       return true;
3980     };
3981 
3982     bool UsedAssumedInformationInCheckRWInst = false;
3983     if (!SPMDCompatibilityTracker.isAtFixpoint())
3984       if (!A.checkForAllReadWriteInstructions(
3985               CheckRWInst, *this, UsedAssumedInformationInCheckRWInst))
3986         SPMDCompatibilityTracker.indicatePessimisticFixpoint();
3987 
3988     bool UsedAssumedInformationFromReachingKernels = false;
3989     if (!IsKernelEntry) {
3990       updateParallelLevels(A);
3991 
3992       bool AllReachingKernelsKnown = true;
3993       updateReachingKernelEntries(A, AllReachingKernelsKnown);
3994       UsedAssumedInformationFromReachingKernels = !AllReachingKernelsKnown;
3995 
3996       if (!ParallelLevels.isValidState())
3997         SPMDCompatibilityTracker.indicatePessimisticFixpoint();
3998       else if (!ReachingKernelEntries.isValidState())
3999         SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4000       else if (!SPMDCompatibilityTracker.empty()) {
4001         // Check if all reaching kernels agree on the mode as we can otherwise
4002         // not guard instructions. We might not be sure about the mode so we
4003         // we cannot fix the internal spmd-zation state either.
4004         int SPMD = 0, Generic = 0;
4005         for (auto *Kernel : ReachingKernelEntries) {
4006           auto &CBAA = A.getAAFor<AAKernelInfo>(
4007               *this, IRPosition::function(*Kernel), DepClassTy::OPTIONAL);
4008           if (CBAA.SPMDCompatibilityTracker.isValidState() &&
4009               CBAA.SPMDCompatibilityTracker.isAssumed())
4010             ++SPMD;
4011           else
4012             ++Generic;
4013           if (!CBAA.SPMDCompatibilityTracker.isAtFixpoint())
4014             UsedAssumedInformationFromReachingKernels = true;
4015         }
4016         if (SPMD != 0 && Generic != 0)
4017           SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4018       }
4019     }
4020 
4021     // Callback to check a call instruction.
4022     bool AllParallelRegionStatesWereFixed = true;
4023     bool AllSPMDStatesWereFixed = true;
4024     auto CheckCallInst = [&](Instruction &I) {
4025       auto &CB = cast<CallBase>(I);
4026       auto &CBAA = A.getAAFor<AAKernelInfo>(
4027           *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL);
4028       getState() ^= CBAA.getState();
4029       AllSPMDStatesWereFixed &= CBAA.SPMDCompatibilityTracker.isAtFixpoint();
4030       AllParallelRegionStatesWereFixed &=
4031           CBAA.ReachedKnownParallelRegions.isAtFixpoint();
4032       AllParallelRegionStatesWereFixed &=
4033           CBAA.ReachedUnknownParallelRegions.isAtFixpoint();
4034       return true;
4035     };
4036 
4037     bool UsedAssumedInformationInCheckCallInst = false;
4038     if (!A.checkForAllCallLikeInstructions(
4039             CheckCallInst, *this, UsedAssumedInformationInCheckCallInst)) {
4040       LLVM_DEBUG(dbgs() << TAG
4041                         << "Failed to visit all call-like instructions!\n";);
4042       return indicatePessimisticFixpoint();
4043     }
4044 
4045     // If we haven't used any assumed information for the reached parallel
4046     // region states we can fix it.
4047     if (!UsedAssumedInformationInCheckCallInst &&
4048         AllParallelRegionStatesWereFixed) {
4049       ReachedKnownParallelRegions.indicateOptimisticFixpoint();
4050       ReachedUnknownParallelRegions.indicateOptimisticFixpoint();
4051     }
4052 
4053     // If we are sure there are no parallel regions in the kernel we do not
4054     // want SPMD mode.
4055     if (IsKernelEntry && ReachedUnknownParallelRegions.isAtFixpoint() &&
4056         ReachedKnownParallelRegions.isAtFixpoint() &&
4057         ReachedUnknownParallelRegions.isValidState() &&
4058         ReachedKnownParallelRegions.isValidState() &&
4059         !mayContainParallelRegion())
4060       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4061 
4062     // If we haven't used any assumed information for the SPMD state we can fix
4063     // it.
4064     if (!UsedAssumedInformationInCheckRWInst &&
4065         !UsedAssumedInformationInCheckCallInst &&
4066         !UsedAssumedInformationFromReachingKernels && AllSPMDStatesWereFixed)
4067       SPMDCompatibilityTracker.indicateOptimisticFixpoint();
4068 
4069     return StateBefore == getState() ? ChangeStatus::UNCHANGED
4070                                      : ChangeStatus::CHANGED;
4071   }
4072 
4073 private:
4074   /// Update info regarding reaching kernels.
4075   void updateReachingKernelEntries(Attributor &A,
4076                                    bool &AllReachingKernelsKnown) {
4077     auto PredCallSite = [&](AbstractCallSite ACS) {
4078       Function *Caller = ACS.getInstruction()->getFunction();
4079 
4080       assert(Caller && "Caller is nullptr");
4081 
4082       auto &CAA = A.getOrCreateAAFor<AAKernelInfo>(
4083           IRPosition::function(*Caller), this, DepClassTy::REQUIRED);
4084       if (CAA.ReachingKernelEntries.isValidState()) {
4085         ReachingKernelEntries ^= CAA.ReachingKernelEntries;
4086         return true;
4087       }
4088 
4089       // We lost track of the caller of the associated function, any kernel
4090       // could reach now.
4091       ReachingKernelEntries.indicatePessimisticFixpoint();
4092 
4093       return true;
4094     };
4095 
4096     if (!A.checkForAllCallSites(PredCallSite, *this,
4097                                 true /* RequireAllCallSites */,
4098                                 AllReachingKernelsKnown))
4099       ReachingKernelEntries.indicatePessimisticFixpoint();
4100   }
4101 
4102   /// Update info regarding parallel levels.
4103   void updateParallelLevels(Attributor &A) {
4104     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
4105     OMPInformationCache::RuntimeFunctionInfo &Parallel51RFI =
4106         OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51];
4107 
4108     auto PredCallSite = [&](AbstractCallSite ACS) {
4109       Function *Caller = ACS.getInstruction()->getFunction();
4110 
4111       assert(Caller && "Caller is nullptr");
4112 
4113       auto &CAA =
4114           A.getOrCreateAAFor<AAKernelInfo>(IRPosition::function(*Caller));
4115       if (CAA.ParallelLevels.isValidState()) {
4116         // Any function that is called by `__kmpc_parallel_51` will not be
4117         // folded as the parallel level in the function is updated. In order to
4118         // get it right, all the analysis would depend on the implentation. That
4119         // said, if in the future any change to the implementation, the analysis
4120         // could be wrong. As a consequence, we are just conservative here.
4121         if (Caller == Parallel51RFI.Declaration) {
4122           ParallelLevels.indicatePessimisticFixpoint();
4123           return true;
4124         }
4125 
4126         ParallelLevels ^= CAA.ParallelLevels;
4127 
4128         return true;
4129       }
4130 
4131       // We lost track of the caller of the associated function, any kernel
4132       // could reach now.
4133       ParallelLevels.indicatePessimisticFixpoint();
4134 
4135       return true;
4136     };
4137 
4138     bool AllCallSitesKnown = true;
4139     if (!A.checkForAllCallSites(PredCallSite, *this,
4140                                 true /* RequireAllCallSites */,
4141                                 AllCallSitesKnown))
4142       ParallelLevels.indicatePessimisticFixpoint();
4143   }
4144 };
4145 
4146 /// The call site kernel info abstract attribute, basically, what can we say
4147 /// about a call site with regards to the KernelInfoState. For now this simply
4148 /// forwards the information from the callee.
4149 struct AAKernelInfoCallSite : AAKernelInfo {
4150   AAKernelInfoCallSite(const IRPosition &IRP, Attributor &A)
4151       : AAKernelInfo(IRP, A) {}
4152 
4153   /// See AbstractAttribute::initialize(...).
4154   void initialize(Attributor &A) override {
4155     AAKernelInfo::initialize(A);
4156 
4157     CallBase &CB = cast<CallBase>(getAssociatedValue());
4158     Function *Callee = getAssociatedFunction();
4159 
4160     auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
4161         *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL);
4162 
4163     // Check for SPMD-mode assumptions.
4164     if (AssumptionAA.hasAssumption("ompx_spmd_amenable")) {
4165       SPMDCompatibilityTracker.indicateOptimisticFixpoint();
4166       indicateOptimisticFixpoint();
4167     }
4168 
4169     // First weed out calls we do not care about, that is readonly/readnone
4170     // calls, intrinsics, and "no_openmp" calls. Neither of these can reach a
4171     // parallel region or anything else we are looking for.
4172     if (!CB.mayWriteToMemory() || isa<IntrinsicInst>(CB)) {
4173       indicateOptimisticFixpoint();
4174       return;
4175     }
4176 
4177     // Next we check if we know the callee. If it is a known OpenMP function
4178     // we will handle them explicitly in the switch below. If it is not, we
4179     // will use an AAKernelInfo object on the callee to gather information and
4180     // merge that into the current state. The latter happens in the updateImpl.
4181     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
4182     const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee);
4183     if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) {
4184       // Unknown caller or declarations are not analyzable, we give up.
4185       if (!Callee || !A.isFunctionIPOAmendable(*Callee)) {
4186 
4187         // Unknown callees might contain parallel regions, except if they have
4188         // an appropriate assumption attached.
4189         if (!(AssumptionAA.hasAssumption("omp_no_openmp") ||
4190               AssumptionAA.hasAssumption("omp_no_parallelism")))
4191           ReachedUnknownParallelRegions.insert(&CB);
4192 
4193         // If SPMDCompatibilityTracker is not fixed, we need to give up on the
4194         // idea we can run something unknown in SPMD-mode.
4195         if (!SPMDCompatibilityTracker.isAtFixpoint()) {
4196           SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4197           SPMDCompatibilityTracker.insert(&CB);
4198         }
4199 
4200         // We have updated the state for this unknown call properly, there won't
4201         // be any change so we indicate a fixpoint.
4202         indicateOptimisticFixpoint();
4203       }
4204       // If the callee is known and can be used in IPO, we will update the state
4205       // based on the callee state in updateImpl.
4206       return;
4207     }
4208 
4209     const unsigned int WrapperFunctionArgNo = 6;
4210     RuntimeFunction RF = It->getSecond();
4211     switch (RF) {
4212     // All the functions we know are compatible with SPMD mode.
4213     case OMPRTL___kmpc_is_spmd_exec_mode:
4214     case OMPRTL___kmpc_distribute_static_fini:
4215     case OMPRTL___kmpc_for_static_fini:
4216     case OMPRTL___kmpc_global_thread_num:
4217     case OMPRTL___kmpc_get_hardware_num_threads_in_block:
4218     case OMPRTL___kmpc_get_hardware_num_blocks:
4219     case OMPRTL___kmpc_single:
4220     case OMPRTL___kmpc_end_single:
4221     case OMPRTL___kmpc_master:
4222     case OMPRTL___kmpc_end_master:
4223     case OMPRTL___kmpc_barrier:
4224     case OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2:
4225     case OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2:
4226     case OMPRTL___kmpc_nvptx_end_reduce_nowait:
4227       break;
4228     case OMPRTL___kmpc_distribute_static_init_4:
4229     case OMPRTL___kmpc_distribute_static_init_4u:
4230     case OMPRTL___kmpc_distribute_static_init_8:
4231     case OMPRTL___kmpc_distribute_static_init_8u:
4232     case OMPRTL___kmpc_for_static_init_4:
4233     case OMPRTL___kmpc_for_static_init_4u:
4234     case OMPRTL___kmpc_for_static_init_8:
4235     case OMPRTL___kmpc_for_static_init_8u: {
4236       // Check the schedule and allow static schedule in SPMD mode.
4237       unsigned ScheduleArgOpNo = 2;
4238       auto *ScheduleTypeCI =
4239           dyn_cast<ConstantInt>(CB.getArgOperand(ScheduleArgOpNo));
4240       unsigned ScheduleTypeVal =
4241           ScheduleTypeCI ? ScheduleTypeCI->getZExtValue() : 0;
4242       switch (OMPScheduleType(ScheduleTypeVal)) {
4243       case OMPScheduleType::Static:
4244       case OMPScheduleType::StaticChunked:
4245       case OMPScheduleType::Distribute:
4246       case OMPScheduleType::DistributeChunked:
4247         break;
4248       default:
4249         SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4250         SPMDCompatibilityTracker.insert(&CB);
4251         break;
4252       };
4253     } break;
4254     case OMPRTL___kmpc_target_init:
4255       KernelInitCB = &CB;
4256       break;
4257     case OMPRTL___kmpc_target_deinit:
4258       KernelDeinitCB = &CB;
4259       break;
4260     case OMPRTL___kmpc_parallel_51:
4261       if (auto *ParallelRegion = dyn_cast<Function>(
4262               CB.getArgOperand(WrapperFunctionArgNo)->stripPointerCasts())) {
4263         ReachedKnownParallelRegions.insert(ParallelRegion);
4264         break;
4265       }
4266       // The condition above should usually get the parallel region function
4267       // pointer and record it. In the off chance it doesn't we assume the
4268       // worst.
4269       ReachedUnknownParallelRegions.insert(&CB);
4270       break;
4271     case OMPRTL___kmpc_omp_task:
4272       // We do not look into tasks right now, just give up.
4273       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4274       SPMDCompatibilityTracker.insert(&CB);
4275       ReachedUnknownParallelRegions.insert(&CB);
4276       break;
4277     case OMPRTL___kmpc_alloc_shared:
4278     case OMPRTL___kmpc_free_shared:
4279       // Return without setting a fixpoint, to be resolved in updateImpl.
4280       return;
4281     default:
4282       // Unknown OpenMP runtime calls cannot be executed in SPMD-mode,
4283       // generally. However, they do not hide parallel regions.
4284       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4285       SPMDCompatibilityTracker.insert(&CB);
4286       break;
4287     }
4288     // All other OpenMP runtime calls will not reach parallel regions so they
4289     // can be safely ignored for now. Since it is a known OpenMP runtime call we
4290     // have now modeled all effects and there is no need for any update.
4291     indicateOptimisticFixpoint();
4292   }
4293 
4294   ChangeStatus updateImpl(Attributor &A) override {
4295     // TODO: Once we have call site specific value information we can provide
4296     //       call site specific liveness information and then it makes
4297     //       sense to specialize attributes for call sites arguments instead of
4298     //       redirecting requests to the callee argument.
4299     Function *F = getAssociatedFunction();
4300 
4301     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
4302     const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(F);
4303 
4304     // If F is not a runtime function, propagate the AAKernelInfo of the callee.
4305     if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) {
4306       const IRPosition &FnPos = IRPosition::function(*F);
4307       auto &FnAA = A.getAAFor<AAKernelInfo>(*this, FnPos, DepClassTy::REQUIRED);
4308       if (getState() == FnAA.getState())
4309         return ChangeStatus::UNCHANGED;
4310       getState() = FnAA.getState();
4311       return ChangeStatus::CHANGED;
4312     }
4313 
4314     // F is a runtime function that allocates or frees memory, check
4315     // AAHeapToStack and AAHeapToShared.
4316     KernelInfoState StateBefore = getState();
4317     assert((It->getSecond() == OMPRTL___kmpc_alloc_shared ||
4318             It->getSecond() == OMPRTL___kmpc_free_shared) &&
4319            "Expected a __kmpc_alloc_shared or __kmpc_free_shared runtime call");
4320 
4321     CallBase &CB = cast<CallBase>(getAssociatedValue());
4322 
4323     auto &HeapToStackAA = A.getAAFor<AAHeapToStack>(
4324         *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL);
4325     auto &HeapToSharedAA = A.getAAFor<AAHeapToShared>(
4326         *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL);
4327 
4328     RuntimeFunction RF = It->getSecond();
4329 
4330     switch (RF) {
4331     // If neither HeapToStack nor HeapToShared assume the call is removed,
4332     // assume SPMD incompatibility.
4333     case OMPRTL___kmpc_alloc_shared:
4334       if (!HeapToStackAA.isAssumedHeapToStack(CB) &&
4335           !HeapToSharedAA.isAssumedHeapToShared(CB))
4336         SPMDCompatibilityTracker.insert(&CB);
4337       break;
4338     case OMPRTL___kmpc_free_shared:
4339       if (!HeapToStackAA.isAssumedHeapToStackRemovedFree(CB) &&
4340           !HeapToSharedAA.isAssumedHeapToSharedRemovedFree(CB))
4341         SPMDCompatibilityTracker.insert(&CB);
4342       break;
4343     default:
4344       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4345       SPMDCompatibilityTracker.insert(&CB);
4346     }
4347 
4348     return StateBefore == getState() ? ChangeStatus::UNCHANGED
4349                                      : ChangeStatus::CHANGED;
4350   }
4351 };
4352 
4353 struct AAFoldRuntimeCall
4354     : public StateWrapper<BooleanState, AbstractAttribute> {
4355   using Base = StateWrapper<BooleanState, AbstractAttribute>;
4356 
4357   AAFoldRuntimeCall(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
4358 
4359   /// Statistics are tracked as part of manifest for now.
4360   void trackStatistics() const override {}
4361 
4362   /// Create an abstract attribute biew for the position \p IRP.
4363   static AAFoldRuntimeCall &createForPosition(const IRPosition &IRP,
4364                                               Attributor &A);
4365 
4366   /// See AbstractAttribute::getName()
4367   const std::string getName() const override { return "AAFoldRuntimeCall"; }
4368 
4369   /// See AbstractAttribute::getIdAddr()
4370   const char *getIdAddr() const override { return &ID; }
4371 
4372   /// This function should return true if the type of the \p AA is
4373   /// AAFoldRuntimeCall
4374   static bool classof(const AbstractAttribute *AA) {
4375     return (AA->getIdAddr() == &ID);
4376   }
4377 
4378   static const char ID;
4379 };
4380 
4381 struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall {
4382   AAFoldRuntimeCallCallSiteReturned(const IRPosition &IRP, Attributor &A)
4383       : AAFoldRuntimeCall(IRP, A) {}
4384 
4385   /// See AbstractAttribute::getAsStr()
4386   const std::string getAsStr() const override {
4387     if (!isValidState())
4388       return "<invalid>";
4389 
4390     std::string Str("simplified value: ");
4391 
4392     if (!SimplifiedValue.hasValue())
4393       return Str + std::string("none");
4394 
4395     if (!SimplifiedValue.getValue())
4396       return Str + std::string("nullptr");
4397 
4398     if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.getValue()))
4399       return Str + std::to_string(CI->getSExtValue());
4400 
4401     return Str + std::string("unknown");
4402   }
4403 
4404   void initialize(Attributor &A) override {
4405     if (DisableOpenMPOptFolding)
4406       indicatePessimisticFixpoint();
4407 
4408     Function *Callee = getAssociatedFunction();
4409 
4410     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
4411     const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee);
4412     assert(It != OMPInfoCache.RuntimeFunctionIDMap.end() &&
4413            "Expected a known OpenMP runtime function");
4414 
4415     RFKind = It->getSecond();
4416 
4417     CallBase &CB = cast<CallBase>(getAssociatedValue());
4418     A.registerSimplificationCallback(
4419         IRPosition::callsite_returned(CB),
4420         [&](const IRPosition &IRP, const AbstractAttribute *AA,
4421             bool &UsedAssumedInformation) -> Optional<Value *> {
4422           assert((isValidState() || (SimplifiedValue.hasValue() &&
4423                                      SimplifiedValue.getValue() == nullptr)) &&
4424                  "Unexpected invalid state!");
4425 
4426           if (!isAtFixpoint()) {
4427             UsedAssumedInformation = true;
4428             if (AA)
4429               A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
4430           }
4431           return SimplifiedValue;
4432         });
4433   }
4434 
4435   ChangeStatus updateImpl(Attributor &A) override {
4436     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4437     switch (RFKind) {
4438     case OMPRTL___kmpc_is_spmd_exec_mode:
4439       Changed |= foldIsSPMDExecMode(A);
4440       break;
4441     case OMPRTL___kmpc_is_generic_main_thread_id:
4442       Changed |= foldIsGenericMainThread(A);
4443       break;
4444     case OMPRTL___kmpc_parallel_level:
4445       Changed |= foldParallelLevel(A);
4446       break;
4447     case OMPRTL___kmpc_get_hardware_num_threads_in_block:
4448       Changed = Changed | foldKernelFnAttribute(A, "omp_target_thread_limit");
4449       break;
4450     case OMPRTL___kmpc_get_hardware_num_blocks:
4451       Changed = Changed | foldKernelFnAttribute(A, "omp_target_num_teams");
4452       break;
4453     default:
4454       llvm_unreachable("Unhandled OpenMP runtime function!");
4455     }
4456 
4457     return Changed;
4458   }
4459 
4460   ChangeStatus manifest(Attributor &A) override {
4461     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4462 
4463     if (SimplifiedValue.hasValue() && SimplifiedValue.getValue()) {
4464       Instruction &I = *getCtxI();
4465       A.changeValueAfterManifest(I, **SimplifiedValue);
4466       A.deleteAfterManifest(I);
4467 
4468       CallBase *CB = dyn_cast<CallBase>(&I);
4469       auto Remark = [&](OptimizationRemark OR) {
4470         if (auto *C = dyn_cast<ConstantInt>(*SimplifiedValue))
4471           return OR << "Replacing OpenMP runtime call "
4472                     << CB->getCalledFunction()->getName() << " with "
4473                     << ore::NV("FoldedValue", C->getZExtValue()) << ".";
4474         return OR << "Replacing OpenMP runtime call "
4475                   << CB->getCalledFunction()->getName() << ".";
4476       };
4477 
4478       if (CB && EnableVerboseRemarks)
4479         A.emitRemark<OptimizationRemark>(CB, "OMP180", Remark);
4480 
4481       LLVM_DEBUG(dbgs() << TAG << "Replacing runtime call: " << I << " with "
4482                         << **SimplifiedValue << "\n");
4483 
4484       Changed = ChangeStatus::CHANGED;
4485     }
4486 
4487     return Changed;
4488   }
4489 
4490   ChangeStatus indicatePessimisticFixpoint() override {
4491     SimplifiedValue = nullptr;
4492     return AAFoldRuntimeCall::indicatePessimisticFixpoint();
4493   }
4494 
4495 private:
4496   /// Fold __kmpc_is_spmd_exec_mode into a constant if possible.
4497   ChangeStatus foldIsSPMDExecMode(Attributor &A) {
4498     Optional<Value *> SimplifiedValueBefore = SimplifiedValue;
4499 
4500     unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0;
4501     unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0;
4502     auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>(
4503         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
4504 
4505     if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState())
4506       return indicatePessimisticFixpoint();
4507 
4508     for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) {
4509       auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K),
4510                                           DepClassTy::REQUIRED);
4511 
4512       if (!AA.isValidState()) {
4513         SimplifiedValue = nullptr;
4514         return indicatePessimisticFixpoint();
4515       }
4516 
4517       if (AA.SPMDCompatibilityTracker.isAssumed()) {
4518         if (AA.SPMDCompatibilityTracker.isAtFixpoint())
4519           ++KnownSPMDCount;
4520         else
4521           ++AssumedSPMDCount;
4522       } else {
4523         if (AA.SPMDCompatibilityTracker.isAtFixpoint())
4524           ++KnownNonSPMDCount;
4525         else
4526           ++AssumedNonSPMDCount;
4527       }
4528     }
4529 
4530     if ((AssumedSPMDCount + KnownSPMDCount) &&
4531         (AssumedNonSPMDCount + KnownNonSPMDCount))
4532       return indicatePessimisticFixpoint();
4533 
4534     auto &Ctx = getAnchorValue().getContext();
4535     if (KnownSPMDCount || AssumedSPMDCount) {
4536       assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 &&
4537              "Expected only SPMD kernels!");
4538       // All reaching kernels are in SPMD mode. Update all function calls to
4539       // __kmpc_is_spmd_exec_mode to 1.
4540       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true);
4541     } else if (KnownNonSPMDCount || AssumedNonSPMDCount) {
4542       assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 &&
4543              "Expected only non-SPMD kernels!");
4544       // All reaching kernels are in non-SPMD mode. Update all function
4545       // calls to __kmpc_is_spmd_exec_mode to 0.
4546       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), false);
4547     } else {
4548       // We have empty reaching kernels, therefore we cannot tell if the
4549       // associated call site can be folded. At this moment, SimplifiedValue
4550       // must be none.
4551       assert(!SimplifiedValue.hasValue() && "SimplifiedValue should be none");
4552     }
4553 
4554     return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED
4555                                                     : ChangeStatus::CHANGED;
4556   }
4557 
4558   /// Fold __kmpc_is_generic_main_thread_id into a constant if possible.
4559   ChangeStatus foldIsGenericMainThread(Attributor &A) {
4560     Optional<Value *> SimplifiedValueBefore = SimplifiedValue;
4561 
4562     CallBase &CB = cast<CallBase>(getAssociatedValue());
4563     Function *F = CB.getFunction();
4564     const auto &ExecutionDomainAA = A.getAAFor<AAExecutionDomain>(
4565         *this, IRPosition::function(*F), DepClassTy::REQUIRED);
4566 
4567     if (!ExecutionDomainAA.isValidState())
4568       return indicatePessimisticFixpoint();
4569 
4570     auto &Ctx = getAnchorValue().getContext();
4571     if (ExecutionDomainAA.isExecutedByInitialThreadOnly(CB))
4572       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true);
4573     else
4574       return indicatePessimisticFixpoint();
4575 
4576     return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED
4577                                                     : ChangeStatus::CHANGED;
4578   }
4579 
4580   /// Fold __kmpc_parallel_level into a constant if possible.
4581   ChangeStatus foldParallelLevel(Attributor &A) {
4582     Optional<Value *> SimplifiedValueBefore = SimplifiedValue;
4583 
4584     auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>(
4585         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
4586 
4587     if (!CallerKernelInfoAA.ParallelLevels.isValidState())
4588       return indicatePessimisticFixpoint();
4589 
4590     if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState())
4591       return indicatePessimisticFixpoint();
4592 
4593     if (CallerKernelInfoAA.ReachingKernelEntries.empty()) {
4594       assert(!SimplifiedValue.hasValue() &&
4595              "SimplifiedValue should keep none at this point");
4596       return ChangeStatus::UNCHANGED;
4597     }
4598 
4599     unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0;
4600     unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0;
4601     for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) {
4602       auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K),
4603                                           DepClassTy::REQUIRED);
4604       if (!AA.SPMDCompatibilityTracker.isValidState())
4605         return indicatePessimisticFixpoint();
4606 
4607       if (AA.SPMDCompatibilityTracker.isAssumed()) {
4608         if (AA.SPMDCompatibilityTracker.isAtFixpoint())
4609           ++KnownSPMDCount;
4610         else
4611           ++AssumedSPMDCount;
4612       } else {
4613         if (AA.SPMDCompatibilityTracker.isAtFixpoint())
4614           ++KnownNonSPMDCount;
4615         else
4616           ++AssumedNonSPMDCount;
4617       }
4618     }
4619 
4620     if ((AssumedSPMDCount + KnownSPMDCount) &&
4621         (AssumedNonSPMDCount + KnownNonSPMDCount))
4622       return indicatePessimisticFixpoint();
4623 
4624     auto &Ctx = getAnchorValue().getContext();
4625     // If the caller can only be reached by SPMD kernel entries, the parallel
4626     // level is 1. Similarly, if the caller can only be reached by non-SPMD
4627     // kernel entries, it is 0.
4628     if (AssumedSPMDCount || KnownSPMDCount) {
4629       assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 &&
4630              "Expected only SPMD kernels!");
4631       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 1);
4632     } else {
4633       assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 &&
4634              "Expected only non-SPMD kernels!");
4635       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 0);
4636     }
4637     return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED
4638                                                     : ChangeStatus::CHANGED;
4639   }
4640 
4641   ChangeStatus foldKernelFnAttribute(Attributor &A, llvm::StringRef Attr) {
4642     // Specialize only if all the calls agree with the attribute constant value
4643     int32_t CurrentAttrValue = -1;
4644     Optional<Value *> SimplifiedValueBefore = SimplifiedValue;
4645 
4646     auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>(
4647         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
4648 
4649     if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState())
4650       return indicatePessimisticFixpoint();
4651 
4652     // Iterate over the kernels that reach this function
4653     for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) {
4654       int32_t NextAttrVal = -1;
4655       if (K->hasFnAttribute(Attr))
4656         NextAttrVal =
4657             std::stoi(K->getFnAttribute(Attr).getValueAsString().str());
4658 
4659       if (NextAttrVal == -1 ||
4660           (CurrentAttrValue != -1 && CurrentAttrValue != NextAttrVal))
4661         return indicatePessimisticFixpoint();
4662       CurrentAttrValue = NextAttrVal;
4663     }
4664 
4665     if (CurrentAttrValue != -1) {
4666       auto &Ctx = getAnchorValue().getContext();
4667       SimplifiedValue =
4668           ConstantInt::get(Type::getInt32Ty(Ctx), CurrentAttrValue);
4669     }
4670     return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED
4671                                                     : ChangeStatus::CHANGED;
4672   }
4673 
4674   /// An optional value the associated value is assumed to fold to. That is, we
4675   /// assume the associated value (which is a call) can be replaced by this
4676   /// simplified value.
4677   Optional<Value *> SimplifiedValue;
4678 
4679   /// The runtime function kind of the callee of the associated call site.
4680   RuntimeFunction RFKind;
4681 };
4682 
4683 } // namespace
4684 
4685 /// Register folding callsite
4686 void OpenMPOpt::registerFoldRuntimeCall(RuntimeFunction RF) {
4687   auto &RFI = OMPInfoCache.RFIs[RF];
4688   RFI.foreachUse(SCC, [&](Use &U, Function &F) {
4689     CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &RFI);
4690     if (!CI)
4691       return false;
4692     A.getOrCreateAAFor<AAFoldRuntimeCall>(
4693         IRPosition::callsite_returned(*CI), /* QueryingAA */ nullptr,
4694         DepClassTy::NONE, /* ForceUpdate */ false,
4695         /* UpdateAfterInit */ false);
4696     return false;
4697   });
4698 }
4699 
4700 void OpenMPOpt::registerAAs(bool IsModulePass) {
4701   if (SCC.empty())
4702 
4703     return;
4704   if (IsModulePass) {
4705     // Ensure we create the AAKernelInfo AAs first and without triggering an
4706     // update. This will make sure we register all value simplification
4707     // callbacks before any other AA has the chance to create an AAValueSimplify
4708     // or similar.
4709     for (Function *Kernel : OMPInfoCache.Kernels)
4710       A.getOrCreateAAFor<AAKernelInfo>(
4711           IRPosition::function(*Kernel), /* QueryingAA */ nullptr,
4712           DepClassTy::NONE, /* ForceUpdate */ false,
4713           /* UpdateAfterInit */ false);
4714 
4715     registerFoldRuntimeCall(OMPRTL___kmpc_is_generic_main_thread_id);
4716     registerFoldRuntimeCall(OMPRTL___kmpc_is_spmd_exec_mode);
4717     registerFoldRuntimeCall(OMPRTL___kmpc_parallel_level);
4718     registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_threads_in_block);
4719     registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_blocks);
4720   }
4721 
4722   // Create CallSite AA for all Getters.
4723   for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) {
4724     auto ICVInfo = OMPInfoCache.ICVs[static_cast<InternalControlVar>(Idx)];
4725 
4726     auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter];
4727 
4728     auto CreateAA = [&](Use &U, Function &Caller) {
4729       CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI);
4730       if (!CI)
4731         return false;
4732 
4733       auto &CB = cast<CallBase>(*CI);
4734 
4735       IRPosition CBPos = IRPosition::callsite_function(CB);
4736       A.getOrCreateAAFor<AAICVTracker>(CBPos);
4737       return false;
4738     };
4739 
4740     GetterRFI.foreachUse(SCC, CreateAA);
4741   }
4742   auto &GlobalizationRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
4743   auto CreateAA = [&](Use &U, Function &F) {
4744     A.getOrCreateAAFor<AAHeapToShared>(IRPosition::function(F));
4745     return false;
4746   };
4747   if (!DisableOpenMPOptDeglobalization)
4748     GlobalizationRFI.foreachUse(SCC, CreateAA);
4749 
4750   // Create an ExecutionDomain AA for every function and a HeapToStack AA for
4751   // every function if there is a device kernel.
4752   if (!isOpenMPDevice(M))
4753     return;
4754 
4755   for (auto *F : SCC) {
4756     if (F->isDeclaration())
4757       continue;
4758 
4759     A.getOrCreateAAFor<AAExecutionDomain>(IRPosition::function(*F));
4760     if (!DisableOpenMPOptDeglobalization)
4761       A.getOrCreateAAFor<AAHeapToStack>(IRPosition::function(*F));
4762 
4763     for (auto &I : instructions(*F)) {
4764       if (auto *LI = dyn_cast<LoadInst>(&I)) {
4765         bool UsedAssumedInformation = false;
4766         A.getAssumedSimplified(IRPosition::value(*LI), /* AA */ nullptr,
4767                                UsedAssumedInformation);
4768       } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
4769         A.getOrCreateAAFor<AAIsDead>(IRPosition::value(*SI));
4770       }
4771     }
4772   }
4773 }
4774 
4775 const char AAICVTracker::ID = 0;
4776 const char AAKernelInfo::ID = 0;
4777 const char AAExecutionDomain::ID = 0;
4778 const char AAHeapToShared::ID = 0;
4779 const char AAFoldRuntimeCall::ID = 0;
4780 
4781 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP,
4782                                               Attributor &A) {
4783   AAICVTracker *AA = nullptr;
4784   switch (IRP.getPositionKind()) {
4785   case IRPosition::IRP_INVALID:
4786   case IRPosition::IRP_FLOAT:
4787   case IRPosition::IRP_ARGUMENT:
4788   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4789     llvm_unreachable("ICVTracker can only be created for function position!");
4790   case IRPosition::IRP_RETURNED:
4791     AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A);
4792     break;
4793   case IRPosition::IRP_CALL_SITE_RETURNED:
4794     AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A);
4795     break;
4796   case IRPosition::IRP_CALL_SITE:
4797     AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A);
4798     break;
4799   case IRPosition::IRP_FUNCTION:
4800     AA = new (A.Allocator) AAICVTrackerFunction(IRP, A);
4801     break;
4802   }
4803 
4804   return *AA;
4805 }
4806 
4807 AAExecutionDomain &AAExecutionDomain::createForPosition(const IRPosition &IRP,
4808                                                         Attributor &A) {
4809   AAExecutionDomainFunction *AA = nullptr;
4810   switch (IRP.getPositionKind()) {
4811   case IRPosition::IRP_INVALID:
4812   case IRPosition::IRP_FLOAT:
4813   case IRPosition::IRP_ARGUMENT:
4814   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4815   case IRPosition::IRP_RETURNED:
4816   case IRPosition::IRP_CALL_SITE_RETURNED:
4817   case IRPosition::IRP_CALL_SITE:
4818     llvm_unreachable(
4819         "AAExecutionDomain can only be created for function position!");
4820   case IRPosition::IRP_FUNCTION:
4821     AA = new (A.Allocator) AAExecutionDomainFunction(IRP, A);
4822     break;
4823   }
4824 
4825   return *AA;
4826 }
4827 
4828 AAHeapToShared &AAHeapToShared::createForPosition(const IRPosition &IRP,
4829                                                   Attributor &A) {
4830   AAHeapToSharedFunction *AA = nullptr;
4831   switch (IRP.getPositionKind()) {
4832   case IRPosition::IRP_INVALID:
4833   case IRPosition::IRP_FLOAT:
4834   case IRPosition::IRP_ARGUMENT:
4835   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4836   case IRPosition::IRP_RETURNED:
4837   case IRPosition::IRP_CALL_SITE_RETURNED:
4838   case IRPosition::IRP_CALL_SITE:
4839     llvm_unreachable(
4840         "AAHeapToShared can only be created for function position!");
4841   case IRPosition::IRP_FUNCTION:
4842     AA = new (A.Allocator) AAHeapToSharedFunction(IRP, A);
4843     break;
4844   }
4845 
4846   return *AA;
4847 }
4848 
4849 AAKernelInfo &AAKernelInfo::createForPosition(const IRPosition &IRP,
4850                                               Attributor &A) {
4851   AAKernelInfo *AA = nullptr;
4852   switch (IRP.getPositionKind()) {
4853   case IRPosition::IRP_INVALID:
4854   case IRPosition::IRP_FLOAT:
4855   case IRPosition::IRP_ARGUMENT:
4856   case IRPosition::IRP_RETURNED:
4857   case IRPosition::IRP_CALL_SITE_RETURNED:
4858   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4859     llvm_unreachable("KernelInfo can only be created for function position!");
4860   case IRPosition::IRP_CALL_SITE:
4861     AA = new (A.Allocator) AAKernelInfoCallSite(IRP, A);
4862     break;
4863   case IRPosition::IRP_FUNCTION:
4864     AA = new (A.Allocator) AAKernelInfoFunction(IRP, A);
4865     break;
4866   }
4867 
4868   return *AA;
4869 }
4870 
4871 AAFoldRuntimeCall &AAFoldRuntimeCall::createForPosition(const IRPosition &IRP,
4872                                                         Attributor &A) {
4873   AAFoldRuntimeCall *AA = nullptr;
4874   switch (IRP.getPositionKind()) {
4875   case IRPosition::IRP_INVALID:
4876   case IRPosition::IRP_FLOAT:
4877   case IRPosition::IRP_ARGUMENT:
4878   case IRPosition::IRP_RETURNED:
4879   case IRPosition::IRP_FUNCTION:
4880   case IRPosition::IRP_CALL_SITE:
4881   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4882     llvm_unreachable("KernelInfo can only be created for call site position!");
4883   case IRPosition::IRP_CALL_SITE_RETURNED:
4884     AA = new (A.Allocator) AAFoldRuntimeCallCallSiteReturned(IRP, A);
4885     break;
4886   }
4887 
4888   return *AA;
4889 }
4890 
4891 PreservedAnalyses OpenMPOptPass::run(Module &M, ModuleAnalysisManager &AM) {
4892   if (!containsOpenMP(M))
4893     return PreservedAnalyses::all();
4894   if (DisableOpenMPOptimizations)
4895     return PreservedAnalyses::all();
4896 
4897   FunctionAnalysisManager &FAM =
4898       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
4899   KernelSet Kernels = getDeviceKernels(M);
4900 
4901   auto IsCalled = [&](Function &F) {
4902     if (Kernels.contains(&F))
4903       return true;
4904     for (const User *U : F.users())
4905       if (!isa<BlockAddress>(U))
4906         return true;
4907     return false;
4908   };
4909 
4910   auto EmitRemark = [&](Function &F) {
4911     auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
4912     ORE.emit([&]() {
4913       OptimizationRemarkAnalysis ORA(DEBUG_TYPE, "OMP140", &F);
4914       return ORA << "Could not internalize function. "
4915                  << "Some optimizations may not be possible. [OMP140]";
4916     });
4917   };
4918 
4919   // Create internal copies of each function if this is a kernel Module. This
4920   // allows iterprocedural passes to see every call edge.
4921   DenseMap<Function *, Function *> InternalizedMap;
4922   if (isOpenMPDevice(M)) {
4923     SmallPtrSet<Function *, 16> InternalizeFns;
4924     for (Function &F : M)
4925       if (!F.isDeclaration() && !Kernels.contains(&F) && IsCalled(F) &&
4926           !DisableInternalization) {
4927         if (Attributor::isInternalizable(F)) {
4928           InternalizeFns.insert(&F);
4929         } else if (!F.hasLocalLinkage() && !F.hasFnAttribute(Attribute::Cold)) {
4930           EmitRemark(F);
4931         }
4932       }
4933 
4934     Attributor::internalizeFunctions(InternalizeFns, InternalizedMap);
4935   }
4936 
4937   // Look at every function in the Module unless it was internalized.
4938   SmallVector<Function *, 16> SCC;
4939   for (Function &F : M)
4940     if (!F.isDeclaration() && !InternalizedMap.lookup(&F))
4941       SCC.push_back(&F);
4942 
4943   if (SCC.empty())
4944     return PreservedAnalyses::all();
4945 
4946   AnalysisGetter AG(FAM);
4947 
4948   auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & {
4949     return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F);
4950   };
4951 
4952   BumpPtrAllocator Allocator;
4953   CallGraphUpdater CGUpdater;
4954 
4955   SetVector<Function *> Functions(SCC.begin(), SCC.end());
4956   OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ Functions, Kernels);
4957 
4958   unsigned MaxFixpointIterations =
4959       (isOpenMPDevice(M)) ? SetFixpointIterations : 32;
4960   Attributor A(Functions, InfoCache, CGUpdater, nullptr, true, false,
4961                MaxFixpointIterations, OREGetter, DEBUG_TYPE);
4962 
4963   OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
4964   bool Changed = OMPOpt.run(true);
4965 
4966   // Optionally inline device functions for potentially better performance.
4967   if (AlwaysInlineDeviceFunctions && isOpenMPDevice(M))
4968     for (Function &F : M)
4969       if (!F.isDeclaration() && !Kernels.contains(&F) &&
4970           !F.hasFnAttribute(Attribute::NoInline))
4971         F.addFnAttr(Attribute::AlwaysInline);
4972 
4973   if (PrintModuleAfterOptimizations)
4974     LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt Module Pass:\n" << M);
4975 
4976   if (Changed)
4977     return PreservedAnalyses::none();
4978 
4979   return PreservedAnalyses::all();
4980 }
4981 
4982 PreservedAnalyses OpenMPOptCGSCCPass::run(LazyCallGraph::SCC &C,
4983                                           CGSCCAnalysisManager &AM,
4984                                           LazyCallGraph &CG,
4985                                           CGSCCUpdateResult &UR) {
4986   if (!containsOpenMP(*C.begin()->getFunction().getParent()))
4987     return PreservedAnalyses::all();
4988   if (DisableOpenMPOptimizations)
4989     return PreservedAnalyses::all();
4990 
4991   SmallVector<Function *, 16> SCC;
4992   // If there are kernels in the module, we have to run on all SCC's.
4993   for (LazyCallGraph::Node &N : C) {
4994     Function *Fn = &N.getFunction();
4995     SCC.push_back(Fn);
4996   }
4997 
4998   if (SCC.empty())
4999     return PreservedAnalyses::all();
5000 
5001   Module &M = *C.begin()->getFunction().getParent();
5002 
5003   KernelSet Kernels = getDeviceKernels(M);
5004 
5005   FunctionAnalysisManager &FAM =
5006       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
5007 
5008   AnalysisGetter AG(FAM);
5009 
5010   auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & {
5011     return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F);
5012   };
5013 
5014   BumpPtrAllocator Allocator;
5015   CallGraphUpdater CGUpdater;
5016   CGUpdater.initialize(CG, C, AM, UR);
5017 
5018   SetVector<Function *> Functions(SCC.begin(), SCC.end());
5019   OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator,
5020                                 /*CGSCC*/ Functions, Kernels);
5021 
5022   unsigned MaxFixpointIterations =
5023       (isOpenMPDevice(M)) ? SetFixpointIterations : 32;
5024   Attributor A(Functions, InfoCache, CGUpdater, nullptr, false, true,
5025                MaxFixpointIterations, OREGetter, DEBUG_TYPE);
5026 
5027   OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
5028   bool Changed = OMPOpt.run(false);
5029 
5030   if (PrintModuleAfterOptimizations)
5031     LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt CGSCC Pass:\n" << M);
5032 
5033   if (Changed)
5034     return PreservedAnalyses::none();
5035 
5036   return PreservedAnalyses::all();
5037 }
5038 
5039 namespace {
5040 
5041 struct OpenMPOptCGSCCLegacyPass : public CallGraphSCCPass {
5042   CallGraphUpdater CGUpdater;
5043   static char ID;
5044 
5045   OpenMPOptCGSCCLegacyPass() : CallGraphSCCPass(ID) {
5046     initializeOpenMPOptCGSCCLegacyPassPass(*PassRegistry::getPassRegistry());
5047   }
5048 
5049   void getAnalysisUsage(AnalysisUsage &AU) const override {
5050     CallGraphSCCPass::getAnalysisUsage(AU);
5051   }
5052 
5053   bool runOnSCC(CallGraphSCC &CGSCC) override {
5054     if (!containsOpenMP(CGSCC.getCallGraph().getModule()))
5055       return false;
5056     if (DisableOpenMPOptimizations || skipSCC(CGSCC))
5057       return false;
5058 
5059     SmallVector<Function *, 16> SCC;
5060     // If there are kernels in the module, we have to run on all SCC's.
5061     for (CallGraphNode *CGN : CGSCC) {
5062       Function *Fn = CGN->getFunction();
5063       if (!Fn || Fn->isDeclaration())
5064         continue;
5065       SCC.push_back(Fn);
5066     }
5067 
5068     if (SCC.empty())
5069       return false;
5070 
5071     Module &M = CGSCC.getCallGraph().getModule();
5072     KernelSet Kernels = getDeviceKernels(M);
5073 
5074     CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
5075     CGUpdater.initialize(CG, CGSCC);
5076 
5077     // Maintain a map of functions to avoid rebuilding the ORE
5078     DenseMap<Function *, std::unique_ptr<OptimizationRemarkEmitter>> OREMap;
5079     auto OREGetter = [&OREMap](Function *F) -> OptimizationRemarkEmitter & {
5080       std::unique_ptr<OptimizationRemarkEmitter> &ORE = OREMap[F];
5081       if (!ORE)
5082         ORE = std::make_unique<OptimizationRemarkEmitter>(F);
5083       return *ORE;
5084     };
5085 
5086     AnalysisGetter AG;
5087     SetVector<Function *> Functions(SCC.begin(), SCC.end());
5088     BumpPtrAllocator Allocator;
5089     OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG,
5090                                   Allocator,
5091                                   /*CGSCC*/ Functions, Kernels);
5092 
5093     unsigned MaxFixpointIterations =
5094         (isOpenMPDevice(M)) ? SetFixpointIterations : 32;
5095     Attributor A(Functions, InfoCache, CGUpdater, nullptr, false, true,
5096                  MaxFixpointIterations, OREGetter, DEBUG_TYPE);
5097 
5098     OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
5099     bool Result = OMPOpt.run(false);
5100 
5101     if (PrintModuleAfterOptimizations)
5102       LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt CGSCC Pass:\n" << M);
5103 
5104     return Result;
5105   }
5106 
5107   bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); }
5108 };
5109 
5110 } // end anonymous namespace
5111 
5112 KernelSet llvm::omp::getDeviceKernels(Module &M) {
5113   // TODO: Create a more cross-platform way of determining device kernels.
5114   NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
5115   KernelSet Kernels;
5116 
5117   if (!MD)
5118     return Kernels;
5119 
5120   for (auto *Op : MD->operands()) {
5121     if (Op->getNumOperands() < 2)
5122       continue;
5123     MDString *KindID = dyn_cast<MDString>(Op->getOperand(1));
5124     if (!KindID || KindID->getString() != "kernel")
5125       continue;
5126 
5127     Function *KernelFn =
5128         mdconst::dyn_extract_or_null<Function>(Op->getOperand(0));
5129     if (!KernelFn)
5130       continue;
5131 
5132     ++NumOpenMPTargetRegionKernels;
5133 
5134     Kernels.insert(KernelFn);
5135   }
5136 
5137   return Kernels;
5138 }
5139 
5140 bool llvm::omp::containsOpenMP(Module &M) {
5141   Metadata *MD = M.getModuleFlag("openmp");
5142   if (!MD)
5143     return false;
5144 
5145   return true;
5146 }
5147 
5148 bool llvm::omp::isOpenMPDevice(Module &M) {
5149   Metadata *MD = M.getModuleFlag("openmp-device");
5150   if (!MD)
5151     return false;
5152 
5153   return true;
5154 }
5155 
5156 char OpenMPOptCGSCCLegacyPass::ID = 0;
5157 
5158 INITIALIZE_PASS_BEGIN(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc",
5159                       "OpenMP specific optimizations", false, false)
5160 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
5161 INITIALIZE_PASS_END(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc",
5162                     "OpenMP specific optimizations", false, false)
5163 
5164 Pass *llvm::createOpenMPOptCGSCCLegacyPass() {
5165   return new OpenMPOptCGSCCLegacyPass();
5166 }
5167