xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp (revision 8c2dd68caa963f1900a8228b0732b04f5d530ffa)
1 //===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides a generalized class for OpenMP runtime code generation
10 // specialized by GPU targets NVPTX and AMDGCN.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGOpenMPRuntimeGPU.h"
15 #include "CGOpenMPRuntimeNVPTX.h"
16 #include "CodeGenFunction.h"
17 #include "clang/AST/Attr.h"
18 #include "clang/AST/DeclOpenMP.h"
19 #include "clang/AST/StmtOpenMP.h"
20 #include "clang/AST/StmtVisitor.h"
21 #include "clang/Basic/Cuda.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/Frontend/OpenMP/OMPGridValues.h"
24 #include "llvm/IR/IntrinsicsNVPTX.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 using namespace llvm::omp;
29 
30 namespace {
31 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
32 class NVPTXActionTy final : public PrePostActionTy {
33   llvm::FunctionCallee EnterCallee = nullptr;
34   ArrayRef<llvm::Value *> EnterArgs;
35   llvm::FunctionCallee ExitCallee = nullptr;
36   ArrayRef<llvm::Value *> ExitArgs;
37   bool Conditional = false;
38   llvm::BasicBlock *ContBlock = nullptr;
39 
40 public:
41   NVPTXActionTy(llvm::FunctionCallee EnterCallee,
42                 ArrayRef<llvm::Value *> EnterArgs,
43                 llvm::FunctionCallee ExitCallee,
44                 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
45       : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
46         ExitArgs(ExitArgs), Conditional(Conditional) {}
47   void Enter(CodeGenFunction &CGF) override {
48     llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
49     if (Conditional) {
50       llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
51       auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
52       ContBlock = CGF.createBasicBlock("omp_if.end");
53       // Generate the branch (If-stmt)
54       CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
55       CGF.EmitBlock(ThenBlock);
56     }
57   }
58   void Done(CodeGenFunction &CGF) {
59     // Emit the rest of blocks/branches
60     CGF.EmitBranch(ContBlock);
61     CGF.EmitBlock(ContBlock, true);
62   }
63   void Exit(CodeGenFunction &CGF) override {
64     CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
65   }
66 };
67 
68 /// A class to track the execution mode when codegening directives within
69 /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
70 /// to the target region and used by containing directives such as 'parallel'
71 /// to emit optimized code.
72 class ExecutionRuntimeModesRAII {
73 private:
74   CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode =
75       CGOpenMPRuntimeGPU::EM_Unknown;
76   CGOpenMPRuntimeGPU::ExecutionMode &ExecMode;
77   bool SavedRuntimeMode = false;
78   bool *RuntimeMode = nullptr;
79 
80 public:
81   /// Constructor for Non-SPMD mode.
82   ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode)
83       : ExecMode(ExecMode) {
84     SavedExecMode = ExecMode;
85     ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD;
86   }
87   /// Constructor for SPMD mode.
88   ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode,
89                             bool &RuntimeMode, bool FullRuntimeMode)
90       : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
91     SavedExecMode = ExecMode;
92     SavedRuntimeMode = RuntimeMode;
93     ExecMode = CGOpenMPRuntimeGPU::EM_SPMD;
94     RuntimeMode = FullRuntimeMode;
95   }
96   ~ExecutionRuntimeModesRAII() {
97     ExecMode = SavedExecMode;
98     if (RuntimeMode)
99       *RuntimeMode = SavedRuntimeMode;
100   }
101 };
102 
103 /// GPU Configuration:  This information can be derived from cuda registers,
104 /// however, providing compile time constants helps generate more efficient
105 /// code.  For all practical purposes this is fine because the configuration
106 /// is the same for all known NVPTX architectures.
107 enum MachineConfiguration : unsigned {
108   /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target
109   /// specific Grid Values like GV_Warp_Size, GV_Warp_Size_Log2,
110   /// and GV_Warp_Size_Log2_Mask.
111 
112   /// Global memory alignment for performance.
113   GlobalMemoryAlignment = 128,
114 
115   /// Maximal size of the shared memory buffer.
116   SharedMemorySize = 128,
117 };
118 
119 static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
120   RefExpr = RefExpr->IgnoreParens();
121   if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
122     const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
123     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
124       Base = TempASE->getBase()->IgnoreParenImpCasts();
125     RefExpr = Base;
126   } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
127     const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
128     while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
129       Base = TempOASE->getBase()->IgnoreParenImpCasts();
130     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
131       Base = TempASE->getBase()->IgnoreParenImpCasts();
132     RefExpr = Base;
133   }
134   RefExpr = RefExpr->IgnoreParenImpCasts();
135   if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
136     return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
137   const auto *ME = cast<MemberExpr>(RefExpr);
138   return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
139 }
140 
141 
142 static RecordDecl *buildRecordForGlobalizedVars(
143     ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
144     ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
145     llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
146         &MappedDeclsFields, int BufSize) {
147   using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
148   if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
149     return nullptr;
150   SmallVector<VarsDataTy, 4> GlobalizedVars;
151   for (const ValueDecl *D : EscapedDecls)
152     GlobalizedVars.emplace_back(
153         CharUnits::fromQuantity(std::max(
154             C.getDeclAlign(D).getQuantity(),
155             static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
156         D);
157   for (const ValueDecl *D : EscapedDeclsForTeams)
158     GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
159   llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
160     return L.first > R.first;
161   });
162 
163   // Build struct _globalized_locals_ty {
164   //         /*  globalized vars  */[WarSize] align (max(decl_align,
165   //         GlobalMemoryAlignment))
166   //         /*  globalized vars  */ for EscapedDeclsForTeams
167   //       };
168   RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
169   GlobalizedRD->startDefinition();
170   llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
171       EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
172   for (const auto &Pair : GlobalizedVars) {
173     const ValueDecl *VD = Pair.second;
174     QualType Type = VD->getType();
175     if (Type->isLValueReferenceType())
176       Type = C.getPointerType(Type.getNonReferenceType());
177     else
178       Type = Type.getNonReferenceType();
179     SourceLocation Loc = VD->getLocation();
180     FieldDecl *Field;
181     if (SingleEscaped.count(VD)) {
182       Field = FieldDecl::Create(
183           C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
184           C.getTrivialTypeSourceInfo(Type, SourceLocation()),
185           /*BW=*/nullptr, /*Mutable=*/false,
186           /*InitStyle=*/ICIS_NoInit);
187       Field->setAccess(AS_public);
188       if (VD->hasAttrs()) {
189         for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
190              E(VD->getAttrs().end());
191              I != E; ++I)
192           Field->addAttr(*I);
193       }
194     } else {
195       llvm::APInt ArraySize(32, BufSize);
196       Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
197                                     0);
198       Field = FieldDecl::Create(
199           C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
200           C.getTrivialTypeSourceInfo(Type, SourceLocation()),
201           /*BW=*/nullptr, /*Mutable=*/false,
202           /*InitStyle=*/ICIS_NoInit);
203       Field->setAccess(AS_public);
204       llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
205                                      static_cast<CharUnits::QuantityType>(
206                                          GlobalMemoryAlignment)));
207       Field->addAttr(AlignedAttr::CreateImplicit(
208           C, /*IsAlignmentExpr=*/true,
209           IntegerLiteral::Create(C, Align,
210                                  C.getIntTypeForBitwidth(32, /*Signed=*/0),
211                                  SourceLocation()),
212           {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
213     }
214     GlobalizedRD->addDecl(Field);
215     MappedDeclsFields.try_emplace(VD, Field);
216   }
217   GlobalizedRD->completeDefinition();
218   return GlobalizedRD;
219 }
220 
221 /// Get the list of variables that can escape their declaration context.
222 class CheckVarsEscapingDeclContext final
223     : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
224   CodeGenFunction &CGF;
225   llvm::SetVector<const ValueDecl *> EscapedDecls;
226   llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
227   llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
228   RecordDecl *GlobalizedRD = nullptr;
229   llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
230   bool AllEscaped = false;
231   bool IsForCombinedParallelRegion = false;
232 
233   void markAsEscaped(const ValueDecl *VD) {
234     // Do not globalize declare target variables.
235     if (!isa<VarDecl>(VD) ||
236         OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
237       return;
238     VD = cast<ValueDecl>(VD->getCanonicalDecl());
239     // Use user-specified allocation.
240     if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
241       return;
242     // Variables captured by value must be globalized.
243     if (auto *CSI = CGF.CapturedStmtInfo) {
244       if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
245         // Check if need to capture the variable that was already captured by
246         // value in the outer region.
247         if (!IsForCombinedParallelRegion) {
248           if (!FD->hasAttrs())
249             return;
250           const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
251           if (!Attr)
252             return;
253           if (((Attr->getCaptureKind() != OMPC_map) &&
254                !isOpenMPPrivate(Attr->getCaptureKind())) ||
255               ((Attr->getCaptureKind() == OMPC_map) &&
256                !FD->getType()->isAnyPointerType()))
257             return;
258         }
259         if (!FD->getType()->isReferenceType()) {
260           assert(!VD->getType()->isVariablyModifiedType() &&
261                  "Parameter captured by value with variably modified type");
262           EscapedParameters.insert(VD);
263         } else if (!IsForCombinedParallelRegion) {
264           return;
265         }
266       }
267     }
268     if ((!CGF.CapturedStmtInfo ||
269          (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
270         VD->getType()->isReferenceType())
271       // Do not globalize variables with reference type.
272       return;
273     if (VD->getType()->isVariablyModifiedType())
274       EscapedVariableLengthDecls.insert(VD);
275     else
276       EscapedDecls.insert(VD);
277   }
278 
279   void VisitValueDecl(const ValueDecl *VD) {
280     if (VD->getType()->isLValueReferenceType())
281       markAsEscaped(VD);
282     if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
283       if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
284         const bool SavedAllEscaped = AllEscaped;
285         AllEscaped = VD->getType()->isLValueReferenceType();
286         Visit(VarD->getInit());
287         AllEscaped = SavedAllEscaped;
288       }
289     }
290   }
291   void VisitOpenMPCapturedStmt(const CapturedStmt *S,
292                                ArrayRef<OMPClause *> Clauses,
293                                bool IsCombinedParallelRegion) {
294     if (!S)
295       return;
296     for (const CapturedStmt::Capture &C : S->captures()) {
297       if (C.capturesVariable() && !C.capturesVariableByCopy()) {
298         const ValueDecl *VD = C.getCapturedVar();
299         bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
300         if (IsCombinedParallelRegion) {
301           // Check if the variable is privatized in the combined construct and
302           // those private copies must be shared in the inner parallel
303           // directive.
304           IsForCombinedParallelRegion = false;
305           for (const OMPClause *C : Clauses) {
306             if (!isOpenMPPrivate(C->getClauseKind()) ||
307                 C->getClauseKind() == OMPC_reduction ||
308                 C->getClauseKind() == OMPC_linear ||
309                 C->getClauseKind() == OMPC_private)
310               continue;
311             ArrayRef<const Expr *> Vars;
312             if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
313               Vars = PC->getVarRefs();
314             else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
315               Vars = PC->getVarRefs();
316             else
317               llvm_unreachable("Unexpected clause.");
318             for (const auto *E : Vars) {
319               const Decl *D =
320                   cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
321               if (D == VD->getCanonicalDecl()) {
322                 IsForCombinedParallelRegion = true;
323                 break;
324               }
325             }
326             if (IsForCombinedParallelRegion)
327               break;
328           }
329         }
330         markAsEscaped(VD);
331         if (isa<OMPCapturedExprDecl>(VD))
332           VisitValueDecl(VD);
333         IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
334       }
335     }
336   }
337 
338   void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
339     assert(!GlobalizedRD &&
340            "Record for globalized variables is built already.");
341     ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
342     unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
343     if (IsInTTDRegion)
344       EscapedDeclsForTeams = EscapedDecls.getArrayRef();
345     else
346       EscapedDeclsForParallel = EscapedDecls.getArrayRef();
347     GlobalizedRD = ::buildRecordForGlobalizedVars(
348         CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
349         MappedDeclsFields, WarpSize);
350   }
351 
352 public:
353   CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
354                                ArrayRef<const ValueDecl *> TeamsReductions)
355       : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
356   }
357   virtual ~CheckVarsEscapingDeclContext() = default;
358   void VisitDeclStmt(const DeclStmt *S) {
359     if (!S)
360       return;
361     for (const Decl *D : S->decls())
362       if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
363         VisitValueDecl(VD);
364   }
365   void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
366     if (!D)
367       return;
368     if (!D->hasAssociatedStmt())
369       return;
370     if (const auto *S =
371             dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
372       // Do not analyze directives that do not actually require capturing,
373       // like `omp for` or `omp simd` directives.
374       llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
375       getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
376       if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
377         VisitStmt(S->getCapturedStmt());
378         return;
379       }
380       VisitOpenMPCapturedStmt(
381           S, D->clauses(),
382           CaptureRegions.back() == OMPD_parallel &&
383               isOpenMPDistributeDirective(D->getDirectiveKind()));
384     }
385   }
386   void VisitCapturedStmt(const CapturedStmt *S) {
387     if (!S)
388       return;
389     for (const CapturedStmt::Capture &C : S->captures()) {
390       if (C.capturesVariable() && !C.capturesVariableByCopy()) {
391         const ValueDecl *VD = C.getCapturedVar();
392         markAsEscaped(VD);
393         if (isa<OMPCapturedExprDecl>(VD))
394           VisitValueDecl(VD);
395       }
396     }
397   }
398   void VisitLambdaExpr(const LambdaExpr *E) {
399     if (!E)
400       return;
401     for (const LambdaCapture &C : E->captures()) {
402       if (C.capturesVariable()) {
403         if (C.getCaptureKind() == LCK_ByRef) {
404           const ValueDecl *VD = C.getCapturedVar();
405           markAsEscaped(VD);
406           if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
407             VisitValueDecl(VD);
408         }
409       }
410     }
411   }
412   void VisitBlockExpr(const BlockExpr *E) {
413     if (!E)
414       return;
415     for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
416       if (C.isByRef()) {
417         const VarDecl *VD = C.getVariable();
418         markAsEscaped(VD);
419         if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
420           VisitValueDecl(VD);
421       }
422     }
423   }
424   void VisitCallExpr(const CallExpr *E) {
425     if (!E)
426       return;
427     for (const Expr *Arg : E->arguments()) {
428       if (!Arg)
429         continue;
430       if (Arg->isLValue()) {
431         const bool SavedAllEscaped = AllEscaped;
432         AllEscaped = true;
433         Visit(Arg);
434         AllEscaped = SavedAllEscaped;
435       } else {
436         Visit(Arg);
437       }
438     }
439     Visit(E->getCallee());
440   }
441   void VisitDeclRefExpr(const DeclRefExpr *E) {
442     if (!E)
443       return;
444     const ValueDecl *VD = E->getDecl();
445     if (AllEscaped)
446       markAsEscaped(VD);
447     if (isa<OMPCapturedExprDecl>(VD))
448       VisitValueDecl(VD);
449     else if (const auto *VarD = dyn_cast<VarDecl>(VD))
450       if (VarD->isInitCapture())
451         VisitValueDecl(VD);
452   }
453   void VisitUnaryOperator(const UnaryOperator *E) {
454     if (!E)
455       return;
456     if (E->getOpcode() == UO_AddrOf) {
457       const bool SavedAllEscaped = AllEscaped;
458       AllEscaped = true;
459       Visit(E->getSubExpr());
460       AllEscaped = SavedAllEscaped;
461     } else {
462       Visit(E->getSubExpr());
463     }
464   }
465   void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
466     if (!E)
467       return;
468     if (E->getCastKind() == CK_ArrayToPointerDecay) {
469       const bool SavedAllEscaped = AllEscaped;
470       AllEscaped = true;
471       Visit(E->getSubExpr());
472       AllEscaped = SavedAllEscaped;
473     } else {
474       Visit(E->getSubExpr());
475     }
476   }
477   void VisitExpr(const Expr *E) {
478     if (!E)
479       return;
480     bool SavedAllEscaped = AllEscaped;
481     if (!E->isLValue())
482       AllEscaped = false;
483     for (const Stmt *Child : E->children())
484       if (Child)
485         Visit(Child);
486     AllEscaped = SavedAllEscaped;
487   }
488   void VisitStmt(const Stmt *S) {
489     if (!S)
490       return;
491     for (const Stmt *Child : S->children())
492       if (Child)
493         Visit(Child);
494   }
495 
496   /// Returns the record that handles all the escaped local variables and used
497   /// instead of their original storage.
498   const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
499     if (!GlobalizedRD)
500       buildRecordForGlobalizedVars(IsInTTDRegion);
501     return GlobalizedRD;
502   }
503 
504   /// Returns the field in the globalized record for the escaped variable.
505   const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
506     assert(GlobalizedRD &&
507            "Record for globalized variables must be generated already.");
508     auto I = MappedDeclsFields.find(VD);
509     if (I == MappedDeclsFields.end())
510       return nullptr;
511     return I->getSecond();
512   }
513 
514   /// Returns the list of the escaped local variables/parameters.
515   ArrayRef<const ValueDecl *> getEscapedDecls() const {
516     return EscapedDecls.getArrayRef();
517   }
518 
519   /// Checks if the escaped local variable is actually a parameter passed by
520   /// value.
521   const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
522     return EscapedParameters;
523   }
524 
525   /// Returns the list of the escaped variables with the variably modified
526   /// types.
527   ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
528     return EscapedVariableLengthDecls.getArrayRef();
529   }
530 };
531 } // anonymous namespace
532 
533 /// Get the id of the warp in the block.
534 /// We assume that the warp size is 32, which is always the case
535 /// on the NVPTX device, to generate more efficient code.
536 static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
537   CGBuilderTy &Bld = CGF.Builder;
538   unsigned LaneIDBits =
539       CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size_Log2);
540   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
541   return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
542 }
543 
544 /// Get the id of the current lane in the Warp.
545 /// We assume that the warp size is 32, which is always the case
546 /// on the NVPTX device, to generate more efficient code.
547 static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
548   CGBuilderTy &Bld = CGF.Builder;
549   unsigned LaneIDMask = CGF.getContext().getTargetInfo().getGridValue(
550       llvm::omp::GV_Warp_Size_Log2_Mask);
551   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
552   return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
553                        "nvptx_lane_id");
554 }
555 
556 /// Get the value of the thread_limit clause in the teams directive.
557 /// For the 'generic' execution mode, the runtime encodes thread_limit in
558 /// the launch parameters, always starting thread_limit+warpSize threads per
559 /// CTA. The threads in the last warp are reserved for master execution.
560 /// For the 'spmd' execution mode, all threads in a CTA are part of the team.
561 static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
562                                    bool IsInSPMDExecutionMode = false) {
563   CGBuilderTy &Bld = CGF.Builder;
564   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
565   return IsInSPMDExecutionMode
566              ? RT.getGPUNumThreads(CGF)
567              : Bld.CreateNUWSub(RT.getGPUNumThreads(CGF),
568                                 RT.getGPUWarpSize(CGF), "thread_limit");
569 }
570 
571 /// Get the thread id of the OMP master thread.
572 /// The master thread id is the first thread (lane) of the last warp in the
573 /// GPU block.  Warp size is assumed to be some power of 2.
574 /// Thread id is 0 indexed.
575 /// E.g: If NumThreads is 33, master id is 32.
576 ///      If NumThreads is 64, master id is 32.
577 ///      If NumThreads is 1024, master id is 992.
578 static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
579   CGBuilderTy &Bld = CGF.Builder;
580   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
581   llvm::Value *NumThreads = RT.getGPUNumThreads(CGF);
582   // We assume that the warp size is a power of 2.
583   llvm::Value *Mask = Bld.CreateNUWSub(RT.getGPUWarpSize(CGF), Bld.getInt32(1));
584 
585   return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
586                        Bld.CreateNot(Mask), "master_tid");
587 }
588 
589 CGOpenMPRuntimeGPU::WorkerFunctionState::WorkerFunctionState(
590     CodeGenModule &CGM, SourceLocation Loc)
591     : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
592       Loc(Loc) {
593   createWorkerFunction(CGM);
594 }
595 
596 void CGOpenMPRuntimeGPU::WorkerFunctionState::createWorkerFunction(
597     CodeGenModule &CGM) {
598   // Create an worker function with no arguments.
599 
600   WorkerFn = llvm::Function::Create(
601       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
602       /*placeholder=*/"_worker", &CGM.getModule());
603   CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
604   WorkerFn->setDoesNotRecurse();
605 }
606 
607 CGOpenMPRuntimeGPU::ExecutionMode
608 CGOpenMPRuntimeGPU::getExecutionMode() const {
609   return CurrentExecutionMode;
610 }
611 
612 static CGOpenMPRuntimeGPU::DataSharingMode
613 getDataSharingMode(CodeGenModule &CGM) {
614   return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA
615                                           : CGOpenMPRuntimeGPU::Generic;
616 }
617 
618 /// Check for inner (nested) SPMD construct, if any
619 static bool hasNestedSPMDDirective(ASTContext &Ctx,
620                                    const OMPExecutableDirective &D) {
621   const auto *CS = D.getInnermostCapturedStmt();
622   const auto *Body =
623       CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
624   const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
625 
626   if (const auto *NestedDir =
627           dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
628     OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
629     switch (D.getDirectiveKind()) {
630     case OMPD_target:
631       if (isOpenMPParallelDirective(DKind))
632         return true;
633       if (DKind == OMPD_teams) {
634         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
635             /*IgnoreCaptured=*/true);
636         if (!Body)
637           return false;
638         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
639         if (const auto *NND =
640                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
641           DKind = NND->getDirectiveKind();
642           if (isOpenMPParallelDirective(DKind))
643             return true;
644         }
645       }
646       return false;
647     case OMPD_target_teams:
648       return isOpenMPParallelDirective(DKind);
649     case OMPD_target_simd:
650     case OMPD_target_parallel:
651     case OMPD_target_parallel_for:
652     case OMPD_target_parallel_for_simd:
653     case OMPD_target_teams_distribute:
654     case OMPD_target_teams_distribute_simd:
655     case OMPD_target_teams_distribute_parallel_for:
656     case OMPD_target_teams_distribute_parallel_for_simd:
657     case OMPD_parallel:
658     case OMPD_for:
659     case OMPD_parallel_for:
660     case OMPD_parallel_master:
661     case OMPD_parallel_sections:
662     case OMPD_for_simd:
663     case OMPD_parallel_for_simd:
664     case OMPD_cancel:
665     case OMPD_cancellation_point:
666     case OMPD_ordered:
667     case OMPD_threadprivate:
668     case OMPD_allocate:
669     case OMPD_task:
670     case OMPD_simd:
671     case OMPD_sections:
672     case OMPD_section:
673     case OMPD_single:
674     case OMPD_master:
675     case OMPD_critical:
676     case OMPD_taskyield:
677     case OMPD_barrier:
678     case OMPD_taskwait:
679     case OMPD_taskgroup:
680     case OMPD_atomic:
681     case OMPD_flush:
682     case OMPD_depobj:
683     case OMPD_scan:
684     case OMPD_teams:
685     case OMPD_target_data:
686     case OMPD_target_exit_data:
687     case OMPD_target_enter_data:
688     case OMPD_distribute:
689     case OMPD_distribute_simd:
690     case OMPD_distribute_parallel_for:
691     case OMPD_distribute_parallel_for_simd:
692     case OMPD_teams_distribute:
693     case OMPD_teams_distribute_simd:
694     case OMPD_teams_distribute_parallel_for:
695     case OMPD_teams_distribute_parallel_for_simd:
696     case OMPD_target_update:
697     case OMPD_declare_simd:
698     case OMPD_declare_variant:
699     case OMPD_begin_declare_variant:
700     case OMPD_end_declare_variant:
701     case OMPD_declare_target:
702     case OMPD_end_declare_target:
703     case OMPD_declare_reduction:
704     case OMPD_declare_mapper:
705     case OMPD_taskloop:
706     case OMPD_taskloop_simd:
707     case OMPD_master_taskloop:
708     case OMPD_master_taskloop_simd:
709     case OMPD_parallel_master_taskloop:
710     case OMPD_parallel_master_taskloop_simd:
711     case OMPD_requires:
712     case OMPD_unknown:
713     default:
714       llvm_unreachable("Unexpected directive.");
715     }
716   }
717 
718   return false;
719 }
720 
721 static bool supportsSPMDExecutionMode(ASTContext &Ctx,
722                                       const OMPExecutableDirective &D) {
723   OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
724   switch (DirectiveKind) {
725   case OMPD_target:
726   case OMPD_target_teams:
727     return hasNestedSPMDDirective(Ctx, D);
728   case OMPD_target_parallel:
729   case OMPD_target_parallel_for:
730   case OMPD_target_parallel_for_simd:
731   case OMPD_target_teams_distribute_parallel_for:
732   case OMPD_target_teams_distribute_parallel_for_simd:
733   case OMPD_target_simd:
734   case OMPD_target_teams_distribute_simd:
735     return true;
736   case OMPD_target_teams_distribute:
737     return false;
738   case OMPD_parallel:
739   case OMPD_for:
740   case OMPD_parallel_for:
741   case OMPD_parallel_master:
742   case OMPD_parallel_sections:
743   case OMPD_for_simd:
744   case OMPD_parallel_for_simd:
745   case OMPD_cancel:
746   case OMPD_cancellation_point:
747   case OMPD_ordered:
748   case OMPD_threadprivate:
749   case OMPD_allocate:
750   case OMPD_task:
751   case OMPD_simd:
752   case OMPD_sections:
753   case OMPD_section:
754   case OMPD_single:
755   case OMPD_master:
756   case OMPD_critical:
757   case OMPD_taskyield:
758   case OMPD_barrier:
759   case OMPD_taskwait:
760   case OMPD_taskgroup:
761   case OMPD_atomic:
762   case OMPD_flush:
763   case OMPD_depobj:
764   case OMPD_scan:
765   case OMPD_teams:
766   case OMPD_target_data:
767   case OMPD_target_exit_data:
768   case OMPD_target_enter_data:
769   case OMPD_distribute:
770   case OMPD_distribute_simd:
771   case OMPD_distribute_parallel_for:
772   case OMPD_distribute_parallel_for_simd:
773   case OMPD_teams_distribute:
774   case OMPD_teams_distribute_simd:
775   case OMPD_teams_distribute_parallel_for:
776   case OMPD_teams_distribute_parallel_for_simd:
777   case OMPD_target_update:
778   case OMPD_declare_simd:
779   case OMPD_declare_variant:
780   case OMPD_begin_declare_variant:
781   case OMPD_end_declare_variant:
782   case OMPD_declare_target:
783   case OMPD_end_declare_target:
784   case OMPD_declare_reduction:
785   case OMPD_declare_mapper:
786   case OMPD_taskloop:
787   case OMPD_taskloop_simd:
788   case OMPD_master_taskloop:
789   case OMPD_master_taskloop_simd:
790   case OMPD_parallel_master_taskloop:
791   case OMPD_parallel_master_taskloop_simd:
792   case OMPD_requires:
793   case OMPD_unknown:
794   default:
795     break;
796   }
797   llvm_unreachable(
798       "Unknown programming model for OpenMP directive on NVPTX target.");
799 }
800 
801 /// Check if the directive is loops based and has schedule clause at all or has
802 /// static scheduling.
803 static bool hasStaticScheduling(const OMPExecutableDirective &D) {
804   assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
805          isOpenMPLoopDirective(D.getDirectiveKind()) &&
806          "Expected loop-based directive.");
807   return !D.hasClausesOfKind<OMPOrderedClause>() &&
808          (!D.hasClausesOfKind<OMPScheduleClause>() ||
809           llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
810                        [](const OMPScheduleClause *C) {
811                          return C->getScheduleKind() == OMPC_SCHEDULE_static;
812                        }));
813 }
814 
815 /// Check for inner (nested) lightweight runtime construct, if any
816 static bool hasNestedLightweightDirective(ASTContext &Ctx,
817                                           const OMPExecutableDirective &D) {
818   assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
819   const auto *CS = D.getInnermostCapturedStmt();
820   const auto *Body =
821       CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
822   const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
823 
824   if (const auto *NestedDir =
825           dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
826     OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
827     switch (D.getDirectiveKind()) {
828     case OMPD_target:
829       if (isOpenMPParallelDirective(DKind) &&
830           isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
831           hasStaticScheduling(*NestedDir))
832         return true;
833       if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
834         return true;
835       if (DKind == OMPD_parallel) {
836         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
837             /*IgnoreCaptured=*/true);
838         if (!Body)
839           return false;
840         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
841         if (const auto *NND =
842                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
843           DKind = NND->getDirectiveKind();
844           if (isOpenMPWorksharingDirective(DKind) &&
845               isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
846             return true;
847         }
848       } else if (DKind == OMPD_teams) {
849         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
850             /*IgnoreCaptured=*/true);
851         if (!Body)
852           return false;
853         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
854         if (const auto *NND =
855                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
856           DKind = NND->getDirectiveKind();
857           if (isOpenMPParallelDirective(DKind) &&
858               isOpenMPWorksharingDirective(DKind) &&
859               isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
860             return true;
861           if (DKind == OMPD_parallel) {
862             Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
863                 /*IgnoreCaptured=*/true);
864             if (!Body)
865               return false;
866             ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
867             if (const auto *NND =
868                     dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
869               DKind = NND->getDirectiveKind();
870               if (isOpenMPWorksharingDirective(DKind) &&
871                   isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
872                 return true;
873             }
874           }
875         }
876       }
877       return false;
878     case OMPD_target_teams:
879       if (isOpenMPParallelDirective(DKind) &&
880           isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
881           hasStaticScheduling(*NestedDir))
882         return true;
883       if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
884         return true;
885       if (DKind == OMPD_parallel) {
886         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
887             /*IgnoreCaptured=*/true);
888         if (!Body)
889           return false;
890         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
891         if (const auto *NND =
892                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
893           DKind = NND->getDirectiveKind();
894           if (isOpenMPWorksharingDirective(DKind) &&
895               isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
896             return true;
897         }
898       }
899       return false;
900     case OMPD_target_parallel:
901       if (DKind == OMPD_simd)
902         return true;
903       return isOpenMPWorksharingDirective(DKind) &&
904              isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
905     case OMPD_target_teams_distribute:
906     case OMPD_target_simd:
907     case OMPD_target_parallel_for:
908     case OMPD_target_parallel_for_simd:
909     case OMPD_target_teams_distribute_simd:
910     case OMPD_target_teams_distribute_parallel_for:
911     case OMPD_target_teams_distribute_parallel_for_simd:
912     case OMPD_parallel:
913     case OMPD_for:
914     case OMPD_parallel_for:
915     case OMPD_parallel_master:
916     case OMPD_parallel_sections:
917     case OMPD_for_simd:
918     case OMPD_parallel_for_simd:
919     case OMPD_cancel:
920     case OMPD_cancellation_point:
921     case OMPD_ordered:
922     case OMPD_threadprivate:
923     case OMPD_allocate:
924     case OMPD_task:
925     case OMPD_simd:
926     case OMPD_sections:
927     case OMPD_section:
928     case OMPD_single:
929     case OMPD_master:
930     case OMPD_critical:
931     case OMPD_taskyield:
932     case OMPD_barrier:
933     case OMPD_taskwait:
934     case OMPD_taskgroup:
935     case OMPD_atomic:
936     case OMPD_flush:
937     case OMPD_depobj:
938     case OMPD_scan:
939     case OMPD_teams:
940     case OMPD_target_data:
941     case OMPD_target_exit_data:
942     case OMPD_target_enter_data:
943     case OMPD_distribute:
944     case OMPD_distribute_simd:
945     case OMPD_distribute_parallel_for:
946     case OMPD_distribute_parallel_for_simd:
947     case OMPD_teams_distribute:
948     case OMPD_teams_distribute_simd:
949     case OMPD_teams_distribute_parallel_for:
950     case OMPD_teams_distribute_parallel_for_simd:
951     case OMPD_target_update:
952     case OMPD_declare_simd:
953     case OMPD_declare_variant:
954     case OMPD_begin_declare_variant:
955     case OMPD_end_declare_variant:
956     case OMPD_declare_target:
957     case OMPD_end_declare_target:
958     case OMPD_declare_reduction:
959     case OMPD_declare_mapper:
960     case OMPD_taskloop:
961     case OMPD_taskloop_simd:
962     case OMPD_master_taskloop:
963     case OMPD_master_taskloop_simd:
964     case OMPD_parallel_master_taskloop:
965     case OMPD_parallel_master_taskloop_simd:
966     case OMPD_requires:
967     case OMPD_unknown:
968     default:
969       llvm_unreachable("Unexpected directive.");
970     }
971   }
972 
973   return false;
974 }
975 
976 /// Checks if the construct supports lightweight runtime. It must be SPMD
977 /// construct + inner loop-based construct with static scheduling.
978 static bool supportsLightweightRuntime(ASTContext &Ctx,
979                                        const OMPExecutableDirective &D) {
980   if (!supportsSPMDExecutionMode(Ctx, D))
981     return false;
982   OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
983   switch (DirectiveKind) {
984   case OMPD_target:
985   case OMPD_target_teams:
986   case OMPD_target_parallel:
987     return hasNestedLightweightDirective(Ctx, D);
988   case OMPD_target_parallel_for:
989   case OMPD_target_parallel_for_simd:
990   case OMPD_target_teams_distribute_parallel_for:
991   case OMPD_target_teams_distribute_parallel_for_simd:
992     // (Last|First)-privates must be shared in parallel region.
993     return hasStaticScheduling(D);
994   case OMPD_target_simd:
995   case OMPD_target_teams_distribute_simd:
996     return true;
997   case OMPD_target_teams_distribute:
998     return false;
999   case OMPD_parallel:
1000   case OMPD_for:
1001   case OMPD_parallel_for:
1002   case OMPD_parallel_master:
1003   case OMPD_parallel_sections:
1004   case OMPD_for_simd:
1005   case OMPD_parallel_for_simd:
1006   case OMPD_cancel:
1007   case OMPD_cancellation_point:
1008   case OMPD_ordered:
1009   case OMPD_threadprivate:
1010   case OMPD_allocate:
1011   case OMPD_task:
1012   case OMPD_simd:
1013   case OMPD_sections:
1014   case OMPD_section:
1015   case OMPD_single:
1016   case OMPD_master:
1017   case OMPD_critical:
1018   case OMPD_taskyield:
1019   case OMPD_barrier:
1020   case OMPD_taskwait:
1021   case OMPD_taskgroup:
1022   case OMPD_atomic:
1023   case OMPD_flush:
1024   case OMPD_depobj:
1025   case OMPD_scan:
1026   case OMPD_teams:
1027   case OMPD_target_data:
1028   case OMPD_target_exit_data:
1029   case OMPD_target_enter_data:
1030   case OMPD_distribute:
1031   case OMPD_distribute_simd:
1032   case OMPD_distribute_parallel_for:
1033   case OMPD_distribute_parallel_for_simd:
1034   case OMPD_teams_distribute:
1035   case OMPD_teams_distribute_simd:
1036   case OMPD_teams_distribute_parallel_for:
1037   case OMPD_teams_distribute_parallel_for_simd:
1038   case OMPD_target_update:
1039   case OMPD_declare_simd:
1040   case OMPD_declare_variant:
1041   case OMPD_begin_declare_variant:
1042   case OMPD_end_declare_variant:
1043   case OMPD_declare_target:
1044   case OMPD_end_declare_target:
1045   case OMPD_declare_reduction:
1046   case OMPD_declare_mapper:
1047   case OMPD_taskloop:
1048   case OMPD_taskloop_simd:
1049   case OMPD_master_taskloop:
1050   case OMPD_master_taskloop_simd:
1051   case OMPD_parallel_master_taskloop:
1052   case OMPD_parallel_master_taskloop_simd:
1053   case OMPD_requires:
1054   case OMPD_unknown:
1055   default:
1056     break;
1057   }
1058   llvm_unreachable(
1059       "Unknown programming model for OpenMP directive on NVPTX target.");
1060 }
1061 
1062 void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
1063                                              StringRef ParentName,
1064                                              llvm::Function *&OutlinedFn,
1065                                              llvm::Constant *&OutlinedFnID,
1066                                              bool IsOffloadEntry,
1067                                              const RegionCodeGenTy &CodeGen) {
1068   ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
1069   EntryFunctionState EST;
1070   WorkerFunctionState WST(CGM, D.getBeginLoc());
1071   Work.clear();
1072   WrapperFunctionsMap.clear();
1073 
1074   // Emit target region as a standalone region.
1075   class NVPTXPrePostActionTy : public PrePostActionTy {
1076     CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1077     CGOpenMPRuntimeGPU::WorkerFunctionState &WST;
1078 
1079   public:
1080     NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST,
1081                          CGOpenMPRuntimeGPU::WorkerFunctionState &WST)
1082         : EST(EST), WST(WST) {}
1083     void Enter(CodeGenFunction &CGF) override {
1084       auto &RT =
1085           static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1086       RT.emitNonSPMDEntryHeader(CGF, EST, WST);
1087       // Skip target region initialization.
1088       RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1089     }
1090     void Exit(CodeGenFunction &CGF) override {
1091       auto &RT =
1092           static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1093       RT.clearLocThreadIdInsertPt(CGF);
1094       RT.emitNonSPMDEntryFooter(CGF, EST);
1095     }
1096   } Action(EST, WST);
1097   CodeGen.setAction(Action);
1098   IsInTTDRegion = true;
1099   // Reserve place for the globalized memory.
1100   GlobalizedRecords.emplace_back();
1101   if (!KernelStaticGlobalized) {
1102     KernelStaticGlobalized = new llvm::GlobalVariable(
1103         CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1104         llvm::GlobalValue::InternalLinkage,
1105         llvm::UndefValue::get(CGM.VoidPtrTy),
1106         "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1107         llvm::GlobalValue::NotThreadLocal,
1108         CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1109   }
1110   emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1111                                    IsOffloadEntry, CodeGen);
1112   IsInTTDRegion = false;
1113 
1114   // Now change the name of the worker function to correspond to this target
1115   // region's entry function.
1116   WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
1117 
1118   // Create the worker function
1119   emitWorkerFunction(WST);
1120 }
1121 
1122 // Setup NVPTX threads for master-worker OpenMP scheme.
1123 void CGOpenMPRuntimeGPU::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
1124                                                   EntryFunctionState &EST,
1125                                                   WorkerFunctionState &WST) {
1126   CGBuilderTy &Bld = CGF.Builder;
1127 
1128   llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
1129   llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
1130   llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
1131   EST.ExitBB = CGF.createBasicBlock(".exit");
1132 
1133   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1134   llvm::Value *IsWorker =
1135       Bld.CreateICmpULT(RT.getGPUThreadID(CGF), getThreadLimit(CGF));
1136   Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
1137 
1138   CGF.EmitBlock(WorkerBB);
1139   emitCall(CGF, WST.Loc, WST.WorkerFn);
1140   CGF.EmitBranch(EST.ExitBB);
1141 
1142   CGF.EmitBlock(MasterCheckBB);
1143   llvm::Value *IsMaster =
1144       Bld.CreateICmpEQ(RT.getGPUThreadID(CGF), getMasterThreadID(CGF));
1145   Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
1146 
1147   CGF.EmitBlock(MasterBB);
1148   IsInTargetMasterThreadRegion = true;
1149   // SEQUENTIAL (MASTER) REGION START
1150   // First action in sequential region:
1151   // Initialize the state of the OpenMP runtime library on the GPU.
1152   // TODO: Optimize runtime initialization and pass in correct value.
1153   llvm::Value *Args[] = {getThreadLimit(CGF),
1154                          Bld.getInt16(/*RequiresOMPRuntime=*/1)};
1155   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1156                           CGM.getModule(), OMPRTL___kmpc_kernel_init),
1157                       Args);
1158 
1159   // For data sharing, we need to initialize the stack.
1160   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1161       CGM.getModule(), OMPRTL___kmpc_data_sharing_init_stack));
1162 
1163   emitGenericVarsProlog(CGF, WST.Loc);
1164 }
1165 
1166 void CGOpenMPRuntimeGPU::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
1167                                                   EntryFunctionState &EST) {
1168   IsInTargetMasterThreadRegion = false;
1169   if (!CGF.HaveInsertPoint())
1170     return;
1171 
1172   emitGenericVarsEpilog(CGF);
1173 
1174   if (!EST.ExitBB)
1175     EST.ExitBB = CGF.createBasicBlock(".exit");
1176 
1177   llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
1178   CGF.EmitBranch(TerminateBB);
1179 
1180   CGF.EmitBlock(TerminateBB);
1181   // Signal termination condition.
1182   // TODO: Optimize runtime initialization and pass in correct value.
1183   llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
1184   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1185                           CGM.getModule(), OMPRTL___kmpc_kernel_deinit),
1186                       Args);
1187   // Barrier to terminate worker threads.
1188   syncCTAThreads(CGF);
1189   // Master thread jumps to exit point.
1190   CGF.EmitBranch(EST.ExitBB);
1191 
1192   CGF.EmitBlock(EST.ExitBB);
1193   EST.ExitBB = nullptr;
1194 }
1195 
1196 void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
1197                                           StringRef ParentName,
1198                                           llvm::Function *&OutlinedFn,
1199                                           llvm::Constant *&OutlinedFnID,
1200                                           bool IsOffloadEntry,
1201                                           const RegionCodeGenTy &CodeGen) {
1202   ExecutionRuntimeModesRAII ModeRAII(
1203       CurrentExecutionMode, RequiresFullRuntime,
1204       CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
1205           !supportsLightweightRuntime(CGM.getContext(), D));
1206   EntryFunctionState EST;
1207 
1208   // Emit target region as a standalone region.
1209   class NVPTXPrePostActionTy : public PrePostActionTy {
1210     CGOpenMPRuntimeGPU &RT;
1211     CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1212     const OMPExecutableDirective &D;
1213 
1214   public:
1215     NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
1216                          CGOpenMPRuntimeGPU::EntryFunctionState &EST,
1217                          const OMPExecutableDirective &D)
1218         : RT(RT), EST(EST), D(D) {}
1219     void Enter(CodeGenFunction &CGF) override {
1220       RT.emitSPMDEntryHeader(CGF, EST, D);
1221       // Skip target region initialization.
1222       RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1223     }
1224     void Exit(CodeGenFunction &CGF) override {
1225       RT.clearLocThreadIdInsertPt(CGF);
1226       RT.emitSPMDEntryFooter(CGF, EST);
1227     }
1228   } Action(*this, EST, D);
1229   CodeGen.setAction(Action);
1230   IsInTTDRegion = true;
1231   // Reserve place for the globalized memory.
1232   GlobalizedRecords.emplace_back();
1233   if (!KernelStaticGlobalized) {
1234     KernelStaticGlobalized = new llvm::GlobalVariable(
1235         CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1236         llvm::GlobalValue::InternalLinkage,
1237         llvm::UndefValue::get(CGM.VoidPtrTy),
1238         "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1239         llvm::GlobalValue::NotThreadLocal,
1240         CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1241   }
1242   emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1243                                    IsOffloadEntry, CodeGen);
1244   IsInTTDRegion = false;
1245 }
1246 
1247 void CGOpenMPRuntimeGPU::emitSPMDEntryHeader(
1248     CodeGenFunction &CGF, EntryFunctionState &EST,
1249     const OMPExecutableDirective &D) {
1250   CGBuilderTy &Bld = CGF.Builder;
1251 
1252   // Setup BBs in entry function.
1253   llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
1254   EST.ExitBB = CGF.createBasicBlock(".exit");
1255 
1256   llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
1257                          /*RequiresOMPRuntime=*/
1258                          Bld.getInt16(RequiresFullRuntime ? 1 : 0)};
1259   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1260                           CGM.getModule(), OMPRTL___kmpc_spmd_kernel_init),
1261                       Args);
1262 
1263   if (RequiresFullRuntime) {
1264     // For data sharing, we need to initialize the stack.
1265     CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1266         CGM.getModule(), OMPRTL___kmpc_data_sharing_init_stack_spmd));
1267   }
1268 
1269   CGF.EmitBranch(ExecuteBB);
1270 
1271   CGF.EmitBlock(ExecuteBB);
1272 
1273   IsInTargetMasterThreadRegion = true;
1274 }
1275 
1276 void CGOpenMPRuntimeGPU::emitSPMDEntryFooter(CodeGenFunction &CGF,
1277                                                EntryFunctionState &EST) {
1278   IsInTargetMasterThreadRegion = false;
1279   if (!CGF.HaveInsertPoint())
1280     return;
1281 
1282   if (!EST.ExitBB)
1283     EST.ExitBB = CGF.createBasicBlock(".exit");
1284 
1285   llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
1286   CGF.EmitBranch(OMPDeInitBB);
1287 
1288   CGF.EmitBlock(OMPDeInitBB);
1289   // DeInitialize the OMP state in the runtime; called by all active threads.
1290   llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
1291                          CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)};
1292   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1293                           CGM.getModule(), OMPRTL___kmpc_spmd_kernel_deinit_v2),
1294                       Args);
1295   CGF.EmitBranch(EST.ExitBB);
1296 
1297   CGF.EmitBlock(EST.ExitBB);
1298   EST.ExitBB = nullptr;
1299 }
1300 
1301 // Create a unique global variable to indicate the execution mode of this target
1302 // region. The execution mode is either 'generic', or 'spmd' depending on the
1303 // target directive. This variable is picked up by the offload library to setup
1304 // the device appropriately before kernel launch. If the execution mode is
1305 // 'generic', the runtime reserves one warp for the master, otherwise, all
1306 // warps participate in parallel work.
1307 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1308                                      bool Mode) {
1309   auto *GVMode =
1310       new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1311                                llvm::GlobalValue::WeakAnyLinkage,
1312                                llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
1313                                Twine(Name, "_exec_mode"));
1314   CGM.addCompilerUsedGlobal(GVMode);
1315 }
1316 
1317 void CGOpenMPRuntimeGPU::emitWorkerFunction(WorkerFunctionState &WST) {
1318   ASTContext &Ctx = CGM.getContext();
1319 
1320   CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
1321   CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
1322                     WST.Loc, WST.Loc);
1323   emitWorkerLoop(CGF, WST);
1324   CGF.FinishFunction();
1325 }
1326 
1327 void CGOpenMPRuntimeGPU::emitWorkerLoop(CodeGenFunction &CGF,
1328                                         WorkerFunctionState &WST) {
1329   //
1330   // The workers enter this loop and wait for parallel work from the master.
1331   // When the master encounters a parallel region it sets up the work + variable
1332   // arguments, and wakes up the workers.  The workers first check to see if
1333   // they are required for the parallel region, i.e., within the # of requested
1334   // parallel threads.  The activated workers load the variable arguments and
1335   // execute the parallel work.
1336   //
1337 
1338   CGBuilderTy &Bld = CGF.Builder;
1339 
1340   llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
1341   llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
1342   llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
1343   llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
1344   llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
1345   llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
1346 
1347   CGF.EmitBranch(AwaitBB);
1348 
1349   // Workers wait for work from master.
1350   CGF.EmitBlock(AwaitBB);
1351   // Wait for parallel work
1352   syncCTAThreads(CGF);
1353 
1354   Address WorkFn =
1355       CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
1356   Address ExecStatus =
1357       CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
1358   CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
1359   CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
1360 
1361   // TODO: Optimize runtime initialization and pass in correct value.
1362   llvm::Value *Args[] = {WorkFn.getPointer()};
1363   llvm::Value *Ret =
1364       CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1365                               CGM.getModule(), OMPRTL___kmpc_kernel_parallel),
1366                           Args);
1367   Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
1368 
1369   // On termination condition (workid == 0), exit loop.
1370   llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
1371   llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
1372   Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
1373 
1374   // Activate requested workers.
1375   CGF.EmitBlock(SelectWorkersBB);
1376   llvm::Value *IsActive =
1377       Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
1378   Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
1379 
1380   // Signal start of parallel region.
1381   CGF.EmitBlock(ExecuteBB);
1382   // Skip initialization.
1383   setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1384 
1385   // Process work items: outlined parallel functions.
1386   for (llvm::Function *W : Work) {
1387     // Try to match this outlined function.
1388     llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
1389 
1390     llvm::Value *WorkFnMatch =
1391         Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
1392 
1393     llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
1394     llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
1395     Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
1396 
1397     // Execute this outlined function.
1398     CGF.EmitBlock(ExecuteFNBB);
1399 
1400     // Insert call to work function via shared wrapper. The shared
1401     // wrapper takes two arguments:
1402     //   - the parallelism level;
1403     //   - the thread ID;
1404     emitCall(CGF, WST.Loc, W,
1405              {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1406 
1407     // Go to end of parallel region.
1408     CGF.EmitBranch(TerminateBB);
1409 
1410     CGF.EmitBlock(CheckNextBB);
1411   }
1412   // Default case: call to outlined function through pointer if the target
1413   // region makes a declare target call that may contain an orphaned parallel
1414   // directive.
1415   auto *ParallelFnTy =
1416       llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
1417                               /*isVarArg=*/false);
1418   llvm::Value *WorkFnCast =
1419       Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo());
1420   // Insert call to work function via shared wrapper. The shared
1421   // wrapper takes two arguments:
1422   //   - the parallelism level;
1423   //   - the thread ID;
1424   emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast},
1425            {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1426   // Go to end of parallel region.
1427   CGF.EmitBranch(TerminateBB);
1428 
1429   // Signal end of parallel region.
1430   CGF.EmitBlock(TerminateBB);
1431   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1432                           CGM.getModule(), OMPRTL___kmpc_kernel_end_parallel),
1433                       llvm::None);
1434   CGF.EmitBranch(BarrierBB);
1435 
1436   // All active and inactive workers wait at a barrier after parallel region.
1437   CGF.EmitBlock(BarrierBB);
1438   // Barrier after parallel region.
1439   syncCTAThreads(CGF);
1440   CGF.EmitBranch(AwaitBB);
1441 
1442   // Exit target region.
1443   CGF.EmitBlock(ExitBB);
1444   // Skip initialization.
1445   clearLocThreadIdInsertPt(CGF);
1446 }
1447 
1448 void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
1449                                               llvm::Constant *Addr,
1450                                               uint64_t Size, int32_t,
1451                                               llvm::GlobalValue::LinkageTypes) {
1452   // TODO: Add support for global variables on the device after declare target
1453   // support.
1454   if (!isa<llvm::Function>(Addr))
1455     return;
1456   llvm::Module &M = CGM.getModule();
1457   llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1458 
1459   // Get "nvvm.annotations" metadata node
1460   llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1461 
1462   llvm::Metadata *MDVals[] = {
1463       llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
1464       llvm::ConstantAsMetadata::get(
1465           llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1466   // Append metadata to nvvm.annotations
1467   MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1468 }
1469 
1470 void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
1471     const OMPExecutableDirective &D, StringRef ParentName,
1472     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1473     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1474   if (!IsOffloadEntry) // Nothing to do.
1475     return;
1476 
1477   assert(!ParentName.empty() && "Invalid target region parent name!");
1478 
1479   bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1480   if (Mode)
1481     emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1482                    CodeGen);
1483   else
1484     emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1485                       CodeGen);
1486 
1487   setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1488 }
1489 
1490 namespace {
1491 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
1492 /// Enum for accesseing the reserved_2 field of the ident_t struct.
1493 enum ModeFlagsTy : unsigned {
1494   /// Bit set to 1 when in SPMD mode.
1495   KMP_IDENT_SPMD_MODE = 0x01,
1496   /// Bit set to 1 when a simplified runtime is used.
1497   KMP_IDENT_SIMPLE_RT_MODE = 0x02,
1498   LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
1499 };
1500 
1501 /// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
1502 static const ModeFlagsTy UndefinedMode =
1503     (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
1504 } // anonymous namespace
1505 
1506 unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const {
1507   switch (getExecutionMode()) {
1508   case EM_SPMD:
1509     if (requiresFullRuntime())
1510       return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
1511     return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
1512   case EM_NonSPMD:
1513     assert(requiresFullRuntime() && "Expected full runtime.");
1514     return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
1515   case EM_Unknown:
1516     return UndefinedMode;
1517   }
1518   llvm_unreachable("Unknown flags are requested.");
1519 }
1520 
1521 CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
1522     : CGOpenMPRuntime(CGM, "_", "$") {
1523   if (!CGM.getLangOpts().OpenMPIsDevice)
1524     llvm_unreachable("OpenMP NVPTX can only handle device code.");
1525 }
1526 
1527 void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
1528                                               ProcBindKind ProcBind,
1529                                               SourceLocation Loc) {
1530   // Do nothing in case of SPMD mode and L0 parallel.
1531   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1532     return;
1533 
1534   CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1535 }
1536 
1537 void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
1538                                                 llvm::Value *NumThreads,
1539                                                 SourceLocation Loc) {
1540   // Do nothing in case of SPMD mode and L0 parallel.
1541   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1542     return;
1543 
1544   CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
1545 }
1546 
1547 void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
1548                                               const Expr *NumTeams,
1549                                               const Expr *ThreadLimit,
1550                                               SourceLocation Loc) {}
1551 
1552 llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction(
1553     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1554     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1555   // Emit target region as a standalone region.
1556   class NVPTXPrePostActionTy : public PrePostActionTy {
1557     bool &IsInParallelRegion;
1558     bool PrevIsInParallelRegion;
1559 
1560   public:
1561     NVPTXPrePostActionTy(bool &IsInParallelRegion)
1562         : IsInParallelRegion(IsInParallelRegion) {}
1563     void Enter(CodeGenFunction &CGF) override {
1564       PrevIsInParallelRegion = IsInParallelRegion;
1565       IsInParallelRegion = true;
1566     }
1567     void Exit(CodeGenFunction &CGF) override {
1568       IsInParallelRegion = PrevIsInParallelRegion;
1569     }
1570   } Action(IsInParallelRegion);
1571   CodeGen.setAction(Action);
1572   bool PrevIsInTTDRegion = IsInTTDRegion;
1573   IsInTTDRegion = false;
1574   bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
1575   IsInTargetMasterThreadRegion = false;
1576   auto *OutlinedFun =
1577       cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
1578           D, ThreadIDVar, InnermostKind, CodeGen));
1579   IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
1580   IsInTTDRegion = PrevIsInTTDRegion;
1581   if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD &&
1582       !IsInParallelRegion) {
1583     llvm::Function *WrapperFun =
1584         createParallelDataSharingWrapper(OutlinedFun, D);
1585     WrapperFunctionsMap[OutlinedFun] = WrapperFun;
1586   }
1587 
1588   return OutlinedFun;
1589 }
1590 
1591 /// Get list of lastprivate variables from the teams distribute ... or
1592 /// teams {distribute ...} directives.
1593 static void
1594 getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1595                              llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1596   assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
1597          "expected teams directive.");
1598   const OMPExecutableDirective *Dir = &D;
1599   if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1600     if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
1601             Ctx,
1602             D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
1603                 /*IgnoreCaptured=*/true))) {
1604       Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
1605       if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
1606         Dir = nullptr;
1607     }
1608   }
1609   if (!Dir)
1610     return;
1611   for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
1612     for (const Expr *E : C->getVarRefs())
1613       Vars.push_back(getPrivateItem(E));
1614   }
1615 }
1616 
1617 /// Get list of reduction variables from the teams ... directives.
1618 static void
1619 getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1620                       llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1621   assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
1622          "expected teams directive.");
1623   for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1624     for (const Expr *E : C->privates())
1625       Vars.push_back(getPrivateItem(E));
1626   }
1627 }
1628 
1629 llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
1630     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1631     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1632   SourceLocation Loc = D.getBeginLoc();
1633 
1634   const RecordDecl *GlobalizedRD = nullptr;
1635   llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
1636   llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
1637   unsigned WarpSize = CGM.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
1638   // Globalize team reductions variable unconditionally in all modes.
1639   if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1640     getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
1641   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
1642     getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
1643     if (!LastPrivatesReductions.empty()) {
1644       GlobalizedRD = ::buildRecordForGlobalizedVars(
1645           CGM.getContext(), llvm::None, LastPrivatesReductions,
1646           MappedDeclsFields, WarpSize);
1647     }
1648   } else if (!LastPrivatesReductions.empty()) {
1649     assert(!TeamAndReductions.first &&
1650            "Previous team declaration is not expected.");
1651     TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
1652     std::swap(TeamAndReductions.second, LastPrivatesReductions);
1653   }
1654 
1655   // Emit target region as a standalone region.
1656   class NVPTXPrePostActionTy : public PrePostActionTy {
1657     SourceLocation &Loc;
1658     const RecordDecl *GlobalizedRD;
1659     llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1660         &MappedDeclsFields;
1661 
1662   public:
1663     NVPTXPrePostActionTy(
1664         SourceLocation &Loc, const RecordDecl *GlobalizedRD,
1665         llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1666             &MappedDeclsFields)
1667         : Loc(Loc), GlobalizedRD(GlobalizedRD),
1668           MappedDeclsFields(MappedDeclsFields) {}
1669     void Enter(CodeGenFunction &CGF) override {
1670       auto &Rt =
1671           static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1672       if (GlobalizedRD) {
1673         auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
1674         I->getSecond().GlobalRecord = GlobalizedRD;
1675         I->getSecond().MappedParams =
1676             std::make_unique<CodeGenFunction::OMPMapVars>();
1677         DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
1678         for (const auto &Pair : MappedDeclsFields) {
1679           assert(Pair.getFirst()->isCanonicalDecl() &&
1680                  "Expected canonical declaration");
1681           Data.insert(std::make_pair(Pair.getFirst(),
1682                                      MappedVarData(Pair.getSecond(),
1683                                                    /*IsOnePerTeam=*/true)));
1684         }
1685       }
1686       Rt.emitGenericVarsProlog(CGF, Loc);
1687     }
1688     void Exit(CodeGenFunction &CGF) override {
1689       static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
1690           .emitGenericVarsEpilog(CGF);
1691     }
1692   } Action(Loc, GlobalizedRD, MappedDeclsFields);
1693   CodeGen.setAction(Action);
1694   llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
1695       D, ThreadIDVar, InnermostKind, CodeGen);
1696 
1697   return OutlinedFun;
1698 }
1699 
1700 void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
1701                                                  SourceLocation Loc,
1702                                                  bool WithSPMDCheck) {
1703   if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1704       getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1705     return;
1706 
1707   CGBuilderTy &Bld = CGF.Builder;
1708 
1709   const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1710   if (I == FunctionGlobalizedDecls.end())
1711     return;
1712   if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
1713     QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
1714     QualType SecGlobalRecTy;
1715 
1716     // Recover pointer to this function's global record. The runtime will
1717     // handle the specifics of the allocation of the memory.
1718     // Use actual memory size of the record including the padding
1719     // for alignment purposes.
1720     unsigned Alignment =
1721         CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
1722     unsigned GlobalRecordSize =
1723         CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
1724     GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
1725 
1726     llvm::PointerType *GlobalRecPtrTy =
1727         CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
1728     llvm::Value *GlobalRecCastAddr;
1729     llvm::Value *IsTTD = nullptr;
1730     if (!IsInTTDRegion &&
1731         (WithSPMDCheck ||
1732          getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
1733       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
1734       llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
1735       llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
1736       if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
1737         llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1738         llvm::Value *ThreadID = getThreadID(CGF, Loc);
1739         llvm::Value *PL = CGF.EmitRuntimeCall(
1740             OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
1741                                                   OMPRTL___kmpc_parallel_level),
1742             {RTLoc, ThreadID});
1743         IsTTD = Bld.CreateIsNull(PL);
1744       }
1745       llvm::Value *IsSPMD = Bld.CreateIsNotNull(
1746           CGF.EmitNounwindRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1747               CGM.getModule(), OMPRTL___kmpc_is_spmd_exec_mode)));
1748       Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
1749       // There is no need to emit line number for unconditional branch.
1750       (void)ApplyDebugLocation::CreateEmpty(CGF);
1751       CGF.EmitBlock(SPMDBB);
1752       Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
1753                                CharUnits::fromQuantity(Alignment));
1754       CGF.EmitBranch(ExitBB);
1755       // There is no need to emit line number for unconditional branch.
1756       (void)ApplyDebugLocation::CreateEmpty(CGF);
1757       CGF.EmitBlock(NonSPMDBB);
1758       llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
1759       if (const RecordDecl *SecGlobalizedVarsRecord =
1760               I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
1761         SecGlobalRecTy =
1762             CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
1763 
1764         // Recover pointer to this function's global record. The runtime will
1765         // handle the specifics of the allocation of the memory.
1766         // Use actual memory size of the record including the padding
1767         // for alignment purposes.
1768         unsigned Alignment =
1769             CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
1770         unsigned GlobalRecordSize =
1771             CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
1772         GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
1773         Size = Bld.CreateSelect(
1774             IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
1775       }
1776       // TODO: allow the usage of shared memory to be controlled by
1777       // the user, for now, default to global.
1778       llvm::Value *GlobalRecordSizeArg[] = {
1779           Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
1780       llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
1781           OMPBuilder.getOrCreateRuntimeFunction(
1782               CGM.getModule(), OMPRTL___kmpc_data_sharing_coalesced_push_stack),
1783           GlobalRecordSizeArg);
1784       GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1785           GlobalRecValue, GlobalRecPtrTy);
1786       CGF.EmitBlock(ExitBB);
1787       auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
1788                                 /*NumReservedValues=*/2, "_select_stack");
1789       Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
1790       Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
1791       GlobalRecCastAddr = Phi;
1792       I->getSecond().GlobalRecordAddr = Phi;
1793       I->getSecond().IsInSPMDModeFlag = IsSPMD;
1794     } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
1795       assert(GlobalizedRecords.back().Records.size() < 2 &&
1796              "Expected less than 2 globalized records: one for target and one "
1797              "for teams.");
1798       unsigned Offset = 0;
1799       for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
1800         QualType RDTy = CGM.getContext().getRecordType(RD);
1801         unsigned Alignment =
1802             CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
1803         unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
1804         Offset =
1805             llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
1806       }
1807       unsigned Alignment =
1808           CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
1809       Offset = llvm::alignTo(Offset, Alignment);
1810       GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
1811       ++GlobalizedRecords.back().RegionCounter;
1812       if (GlobalizedRecords.back().Records.size() == 1) {
1813         assert(KernelStaticGlobalized &&
1814                "Kernel static pointer must be initialized already.");
1815         auto *UseSharedMemory = new llvm::GlobalVariable(
1816             CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
1817             llvm::GlobalValue::InternalLinkage, nullptr,
1818             "_openmp_static_kernel$is_shared");
1819         UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1820         QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
1821             /*DestWidth=*/16, /*Signed=*/0);
1822         llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
1823             Address(UseSharedMemory,
1824                     CGM.getContext().getTypeAlignInChars(Int16Ty)),
1825             /*Volatile=*/false, Int16Ty, Loc);
1826         auto *StaticGlobalized = new llvm::GlobalVariable(
1827             CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
1828             llvm::GlobalValue::CommonLinkage, nullptr);
1829         auto *RecSize = new llvm::GlobalVariable(
1830             CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
1831             llvm::GlobalValue::InternalLinkage, nullptr,
1832             "_openmp_static_kernel$size");
1833         RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1834         llvm::Value *Ld = CGF.EmitLoadOfScalar(
1835             Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
1836             CGM.getContext().getSizeType(), Loc);
1837         llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1838             KernelStaticGlobalized, CGM.VoidPtrPtrTy);
1839         llvm::Value *GlobalRecordSizeArg[] = {
1840             llvm::ConstantInt::get(
1841                 CGM.Int16Ty,
1842                 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 1 : 0),
1843             StaticGlobalized, Ld, IsInSharedMemory, ResAddr};
1844         CGF.EmitRuntimeCall(
1845             OMPBuilder.getOrCreateRuntimeFunction(
1846                 CGM.getModule(), OMPRTL___kmpc_get_team_static_memory),
1847             GlobalRecordSizeArg);
1848         GlobalizedRecords.back().Buffer = StaticGlobalized;
1849         GlobalizedRecords.back().RecSize = RecSize;
1850         GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
1851         GlobalizedRecords.back().Loc = Loc;
1852       }
1853       assert(KernelStaticGlobalized && "Global address must be set already.");
1854       Address FrameAddr = CGF.EmitLoadOfPointer(
1855           Address(KernelStaticGlobalized, CGM.getPointerAlign()),
1856           CGM.getContext()
1857               .getPointerType(CGM.getContext().VoidPtrTy)
1858               .castAs<PointerType>());
1859       llvm::Value *GlobalRecValue =
1860           Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer();
1861       I->getSecond().GlobalRecordAddr = GlobalRecValue;
1862       I->getSecond().IsInSPMDModeFlag = nullptr;
1863       GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1864           GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
1865     } else {
1866       // TODO: allow the usage of shared memory to be controlled by
1867       // the user, for now, default to global.
1868       bool UseSharedMemory =
1869           IsInTTDRegion && GlobalRecordSize <= SharedMemorySize;
1870       llvm::Value *GlobalRecordSizeArg[] = {
1871           llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
1872           CGF.Builder.getInt16(UseSharedMemory ? 1 : 0)};
1873       llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
1874           OMPBuilder.getOrCreateRuntimeFunction(
1875               CGM.getModule(),
1876               IsInTTDRegion ? OMPRTL___kmpc_data_sharing_push_stack
1877                             : OMPRTL___kmpc_data_sharing_coalesced_push_stack),
1878           GlobalRecordSizeArg);
1879       GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1880           GlobalRecValue, GlobalRecPtrTy);
1881       I->getSecond().GlobalRecordAddr = GlobalRecValue;
1882       I->getSecond().IsInSPMDModeFlag = nullptr;
1883     }
1884     LValue Base =
1885         CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
1886 
1887     // Emit the "global alloca" which is a GEP from the global declaration
1888     // record using the pointer returned by the runtime.
1889     LValue SecBase;
1890     decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
1891     if (IsTTD) {
1892       SecIt = I->getSecond().SecondaryLocalVarData->begin();
1893       llvm::PointerType *SecGlobalRecPtrTy =
1894           CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
1895       SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
1896           Bld.CreatePointerBitCastOrAddrSpaceCast(
1897               I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
1898           SecGlobalRecTy);
1899     }
1900     for (auto &Rec : I->getSecond().LocalVarData) {
1901       bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
1902       llvm::Value *ParValue;
1903       if (EscapedParam) {
1904         const auto *VD = cast<VarDecl>(Rec.first);
1905         LValue ParLVal =
1906             CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
1907         ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
1908       }
1909       LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
1910       // Emit VarAddr basing on lane-id if required.
1911       QualType VarTy;
1912       if (Rec.second.IsOnePerTeam) {
1913         VarTy = Rec.second.FD->getType();
1914       } else {
1915         llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
1916             VarAddr.getAddress(CGF).getPointer(),
1917             {Bld.getInt32(0), getNVPTXLaneID(CGF)});
1918         VarTy =
1919             Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
1920         VarAddr = CGF.MakeAddrLValue(
1921             Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
1922             AlignmentSource::Decl);
1923       }
1924       Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
1925       if (!IsInTTDRegion &&
1926           (WithSPMDCheck ||
1927            getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
1928         assert(I->getSecond().IsInSPMDModeFlag &&
1929                "Expected unknown execution mode or required SPMD check.");
1930         if (IsTTD) {
1931           assert(SecIt->second.IsOnePerTeam &&
1932                  "Secondary glob data must be one per team.");
1933           LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
1934           VarAddr.setAddress(
1935               Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(CGF),
1936                                        VarAddr.getPointer(CGF)),
1937                       VarAddr.getAlignment()));
1938           Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
1939         }
1940         Address GlobalPtr = Rec.second.PrivateAddr;
1941         Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
1942         Rec.second.PrivateAddr = Address(
1943             Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
1944                              LocalAddr.getPointer(), GlobalPtr.getPointer()),
1945             LocalAddr.getAlignment());
1946       }
1947       if (EscapedParam) {
1948         const auto *VD = cast<VarDecl>(Rec.first);
1949         CGF.EmitStoreOfScalar(ParValue, VarAddr);
1950         I->getSecond().MappedParams->setVarAddr(CGF, VD,
1951                                                 VarAddr.getAddress(CGF));
1952       }
1953       if (IsTTD)
1954         ++SecIt;
1955     }
1956   }
1957   for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
1958     // Recover pointer to this function's global record. The runtime will
1959     // handle the specifics of the allocation of the memory.
1960     // Use actual memory size of the record including the padding
1961     // for alignment purposes.
1962     CGBuilderTy &Bld = CGF.Builder;
1963     llvm::Value *Size = CGF.getTypeSize(VD->getType());
1964     CharUnits Align = CGM.getContext().getDeclAlign(VD);
1965     Size = Bld.CreateNUWAdd(
1966         Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
1967     llvm::Value *AlignVal =
1968         llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
1969     Size = Bld.CreateUDiv(Size, AlignVal);
1970     Size = Bld.CreateNUWMul(Size, AlignVal);
1971     // TODO: allow the usage of shared memory to be controlled by
1972     // the user, for now, default to global.
1973     llvm::Value *GlobalRecordSizeArg[] = {
1974         Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
1975     llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
1976         OMPBuilder.getOrCreateRuntimeFunction(
1977             CGM.getModule(), OMPRTL___kmpc_data_sharing_coalesced_push_stack),
1978         GlobalRecordSizeArg);
1979     llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1980         GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
1981     LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
1982                                      CGM.getContext().getDeclAlign(VD),
1983                                      AlignmentSource::Decl);
1984     I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
1985                                             Base.getAddress(CGF));
1986     I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
1987   }
1988   I->getSecond().MappedParams->apply(CGF);
1989 }
1990 
1991 void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
1992                                                  bool WithSPMDCheck) {
1993   if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1994       getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1995     return;
1996 
1997   const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1998   if (I != FunctionGlobalizedDecls.end()) {
1999     I->getSecond().MappedParams->restore(CGF);
2000     if (!CGF.HaveInsertPoint())
2001       return;
2002     for (llvm::Value *Addr :
2003          llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
2004       CGF.EmitRuntimeCall(
2005           OMPBuilder.getOrCreateRuntimeFunction(
2006               CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
2007           Addr);
2008     }
2009     if (I->getSecond().GlobalRecordAddr) {
2010       if (!IsInTTDRegion &&
2011           (WithSPMDCheck ||
2012            getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
2013         CGBuilderTy &Bld = CGF.Builder;
2014         llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2015         llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
2016         Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB);
2017         // There is no need to emit line number for unconditional branch.
2018         (void)ApplyDebugLocation::CreateEmpty(CGF);
2019         CGF.EmitBlock(NonSPMDBB);
2020         CGF.EmitRuntimeCall(
2021             OMPBuilder.getOrCreateRuntimeFunction(
2022                 CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
2023             CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
2024         CGF.EmitBlock(ExitBB);
2025       } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
2026         assert(GlobalizedRecords.back().RegionCounter > 0 &&
2027                "region counter must be > 0.");
2028         --GlobalizedRecords.back().RegionCounter;
2029         // Emit the restore function only in the target region.
2030         if (GlobalizedRecords.back().RegionCounter == 0) {
2031           QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
2032               /*DestWidth=*/16, /*Signed=*/0);
2033           llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
2034               Address(GlobalizedRecords.back().UseSharedMemory,
2035                       CGM.getContext().getTypeAlignInChars(Int16Ty)),
2036               /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
2037           llvm::Value *Args[] = {
2038               llvm::ConstantInt::get(
2039                   CGM.Int16Ty,
2040                   getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 1 : 0),
2041               IsInSharedMemory};
2042           CGF.EmitRuntimeCall(
2043               OMPBuilder.getOrCreateRuntimeFunction(
2044                   CGM.getModule(), OMPRTL___kmpc_restore_team_static_memory),
2045               Args);
2046         }
2047       } else {
2048         CGF.EmitRuntimeCall(
2049             OMPBuilder.getOrCreateRuntimeFunction(
2050                 CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
2051             I->getSecond().GlobalRecordAddr);
2052       }
2053     }
2054   }
2055 }
2056 
2057 void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
2058                                          const OMPExecutableDirective &D,
2059                                          SourceLocation Loc,
2060                                          llvm::Function *OutlinedFn,
2061                                          ArrayRef<llvm::Value *> CapturedVars) {
2062   if (!CGF.HaveInsertPoint())
2063     return;
2064 
2065   Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2066                                                       /*Name=*/".zero.addr");
2067   CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2068   llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2069   OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
2070   OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2071   OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2072   emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2073 }
2074 
2075 void CGOpenMPRuntimeGPU::emitParallelCall(
2076     CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2077     ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2078   if (!CGF.HaveInsertPoint())
2079     return;
2080 
2081   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
2082     emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2083   else
2084     emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2085 }
2086 
2087 void CGOpenMPRuntimeGPU::emitNonSPMDParallelCall(
2088     CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
2089     ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2090   llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
2091 
2092   // Force inline this outlined function at its call site.
2093   Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
2094 
2095   // Ensure we do not inline the function. This is trivially true for the ones
2096   // passed to __kmpc_fork_call but the ones calles in serialized regions
2097   // could be inlined. This is not a perfect but it is closer to the invariant
2098   // we want, namely, every data environment starts with a new function.
2099   // TODO: We should pass the if condition to the runtime function and do the
2100   //       handling there. Much cleaner code.
2101   cast<llvm::Function>(OutlinedFn)->addFnAttr(llvm::Attribute::NoInline);
2102 
2103   Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2104                                                       /*Name=*/".zero.addr");
2105   CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2106   // ThreadId for serialized parallels is 0.
2107   Address ThreadIDAddr = ZeroAddr;
2108   auto &&CodeGen = [this, Fn, CapturedVars, Loc, &ThreadIDAddr](
2109                        CodeGenFunction &CGF, PrePostActionTy &Action) {
2110     Action.Enter(CGF);
2111 
2112     Address ZeroAddr =
2113         CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2114                                          /*Name=*/".bound.zero.addr");
2115     CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2116     llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2117     OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2118     OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2119     OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2120     emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
2121   };
2122   auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2123                                         PrePostActionTy &) {
2124 
2125     RegionCodeGenTy RCG(CodeGen);
2126     llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2127     llvm::Value *ThreadID = getThreadID(CGF, Loc);
2128     llvm::Value *Args[] = {RTLoc, ThreadID};
2129 
2130     NVPTXActionTy Action(
2131         OMPBuilder.getOrCreateRuntimeFunction(
2132             CGM.getModule(), OMPRTL___kmpc_serialized_parallel),
2133         Args,
2134         OMPBuilder.getOrCreateRuntimeFunction(
2135             CGM.getModule(), OMPRTL___kmpc_end_serialized_parallel),
2136         Args);
2137     RCG.setAction(Action);
2138     RCG(CGF);
2139   };
2140 
2141   auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
2142                                                   PrePostActionTy &Action) {
2143     CGBuilderTy &Bld = CGF.Builder;
2144     llvm::Function *WFn = WrapperFunctionsMap[Fn];
2145     assert(WFn && "Wrapper function does not exist!");
2146     llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
2147 
2148     // Prepare for parallel region. Indicate the outlined function.
2149     llvm::Value *Args[] = {ID};
2150     CGF.EmitRuntimeCall(
2151         OMPBuilder.getOrCreateRuntimeFunction(
2152             CGM.getModule(), OMPRTL___kmpc_kernel_prepare_parallel),
2153         Args);
2154 
2155     // Create a private scope that will globalize the arguments
2156     // passed from the outside of the target region.
2157     CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
2158 
2159     // There's something to share.
2160     if (!CapturedVars.empty()) {
2161       // Prepare for parallel region. Indicate the outlined function.
2162       Address SharedArgs =
2163           CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
2164       llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
2165 
2166       llvm::Value *DataSharingArgs[] = {
2167           SharedArgsPtr,
2168           llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
2169       CGF.EmitRuntimeCall(
2170           OMPBuilder.getOrCreateRuntimeFunction(
2171               CGM.getModule(), OMPRTL___kmpc_begin_sharing_variables),
2172           DataSharingArgs);
2173 
2174       // Store variable address in a list of references to pass to workers.
2175       unsigned Idx = 0;
2176       ASTContext &Ctx = CGF.getContext();
2177       Address SharedArgListAddress = CGF.EmitLoadOfPointer(
2178           SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
2179                           .castAs<PointerType>());
2180       for (llvm::Value *V : CapturedVars) {
2181         Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
2182         llvm::Value *PtrV;
2183         if (V->getType()->isIntegerTy())
2184           PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
2185         else
2186           PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
2187         CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
2188                               Ctx.getPointerType(Ctx.VoidPtrTy));
2189         ++Idx;
2190       }
2191     }
2192 
2193     // Activate workers. This barrier is used by the master to signal
2194     // work for the workers.
2195     syncCTAThreads(CGF);
2196 
2197     // OpenMP [2.5, Parallel Construct, p.49]
2198     // There is an implied barrier at the end of a parallel region. After the
2199     // end of a parallel region, only the master thread of the team resumes
2200     // execution of the enclosing task region.
2201     //
2202     // The master waits at this barrier until all workers are done.
2203     syncCTAThreads(CGF);
2204 
2205     if (!CapturedVars.empty())
2206       CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2207           CGM.getModule(), OMPRTL___kmpc_end_sharing_variables));
2208 
2209     // Remember for post-processing in worker loop.
2210     Work.emplace_back(WFn);
2211   };
2212 
2213   auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
2214                              CodeGenFunction &CGF, PrePostActionTy &Action) {
2215     if (IsInParallelRegion) {
2216       SeqGen(CGF, Action);
2217     } else if (IsInTargetMasterThreadRegion) {
2218       L0ParallelGen(CGF, Action);
2219     } else {
2220       // Check for master and then parallelism:
2221       // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
2222       //   Serialized execution.
2223       // } else {
2224       //   Worker call.
2225       // }
2226       CGBuilderTy &Bld = CGF.Builder;
2227       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2228       llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
2229       llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
2230       llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
2231       llvm::Value *IsSPMD = Bld.CreateIsNotNull(
2232           CGF.EmitNounwindRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2233               CGM.getModule(), OMPRTL___kmpc_is_spmd_exec_mode)));
2234       Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
2235       // There is no need to emit line number for unconditional branch.
2236       (void)ApplyDebugLocation::CreateEmpty(CGF);
2237       CGF.EmitBlock(ParallelCheckBB);
2238       llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2239       llvm::Value *ThreadID = getThreadID(CGF, Loc);
2240       llvm::Value *PL = CGF.EmitRuntimeCall(
2241           OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
2242                                                 OMPRTL___kmpc_parallel_level),
2243           {RTLoc, ThreadID});
2244       llvm::Value *Res = Bld.CreateIsNotNull(PL);
2245       Bld.CreateCondBr(Res, SeqBB, MasterBB);
2246       CGF.EmitBlock(SeqBB);
2247       SeqGen(CGF, Action);
2248       CGF.EmitBranch(ExitBB);
2249       // There is no need to emit line number for unconditional branch.
2250       (void)ApplyDebugLocation::CreateEmpty(CGF);
2251       CGF.EmitBlock(MasterBB);
2252       L0ParallelGen(CGF, Action);
2253       CGF.EmitBranch(ExitBB);
2254       // There is no need to emit line number for unconditional branch.
2255       (void)ApplyDebugLocation::CreateEmpty(CGF);
2256       // Emit the continuation block for code after the if.
2257       CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2258     }
2259   };
2260 
2261   if (IfCond) {
2262     emitIfClause(CGF, IfCond, LNParallelGen, SeqGen);
2263   } else {
2264     CodeGenFunction::RunCleanupsScope Scope(CGF);
2265     RegionCodeGenTy ThenRCG(LNParallelGen);
2266     ThenRCG(CGF);
2267   }
2268 }
2269 
2270 void CGOpenMPRuntimeGPU::emitSPMDParallelCall(
2271     CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2272     ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2273   // Just call the outlined function to execute the parallel region.
2274   // OutlinedFn(&GTid, &zero, CapturedStruct);
2275   //
2276   llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2277 
2278   Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2279                                                       /*Name=*/".zero.addr");
2280   CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2281   // ThreadId for serialized parallels is 0.
2282   Address ThreadIDAddr = ZeroAddr;
2283   auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, &ThreadIDAddr](
2284                        CodeGenFunction &CGF, PrePostActionTy &Action) {
2285     Action.Enter(CGF);
2286 
2287     Address ZeroAddr =
2288         CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2289                                          /*Name=*/".bound.zero.addr");
2290     CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2291     llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2292     OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2293     OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2294     OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2295     emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2296   };
2297   auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2298                                         PrePostActionTy &) {
2299 
2300     RegionCodeGenTy RCG(CodeGen);
2301     llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2302     llvm::Value *ThreadID = getThreadID(CGF, Loc);
2303     llvm::Value *Args[] = {RTLoc, ThreadID};
2304 
2305     NVPTXActionTy Action(
2306         OMPBuilder.getOrCreateRuntimeFunction(
2307             CGM.getModule(), OMPRTL___kmpc_serialized_parallel),
2308         Args,
2309         OMPBuilder.getOrCreateRuntimeFunction(
2310             CGM.getModule(), OMPRTL___kmpc_end_serialized_parallel),
2311         Args);
2312     RCG.setAction(Action);
2313     RCG(CGF);
2314   };
2315 
2316   if (IsInTargetMasterThreadRegion) {
2317     // In the worker need to use the real thread id.
2318     ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
2319     RegionCodeGenTy RCG(CodeGen);
2320     RCG(CGF);
2321   } else {
2322     // If we are not in the target region, it is definitely L2 parallelism or
2323     // more, because for SPMD mode we always has L1 parallel level, sowe don't
2324     // need to check for orphaned directives.
2325     RegionCodeGenTy RCG(SeqGen);
2326     RCG(CGF);
2327   }
2328 }
2329 
2330 void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) {
2331   // Always emit simple barriers!
2332   if (!CGF.HaveInsertPoint())
2333     return;
2334   // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
2335   // This function does not use parameters, so we can emit just default values.
2336   llvm::Value *Args[] = {
2337       llvm::ConstantPointerNull::get(
2338           cast<llvm::PointerType>(getIdentTyPointerTy())),
2339       llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
2340   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2341                           CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd),
2342                       Args);
2343 }
2344 
2345 void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF,
2346                                            SourceLocation Loc,
2347                                            OpenMPDirectiveKind Kind, bool,
2348                                            bool) {
2349   // Always emit simple barriers!
2350   if (!CGF.HaveInsertPoint())
2351     return;
2352   // Build call __kmpc_cancel_barrier(loc, thread_id);
2353   unsigned Flags = getDefaultFlagsForBarriers(Kind);
2354   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2355                          getThreadID(CGF, Loc)};
2356 
2357   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2358                           CGM.getModule(), OMPRTL___kmpc_barrier),
2359                       Args);
2360 }
2361 
2362 void CGOpenMPRuntimeGPU::emitCriticalRegion(
2363     CodeGenFunction &CGF, StringRef CriticalName,
2364     const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
2365     const Expr *Hint) {
2366   llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
2367   llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
2368   llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
2369   llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
2370   llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
2371 
2372   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2373 
2374   // Get the mask of active threads in the warp.
2375   llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2376       CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask));
2377   // Fetch team-local id of the thread.
2378   llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
2379 
2380   // Get the width of the team.
2381   llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF);
2382 
2383   // Initialize the counter variable for the loop.
2384   QualType Int32Ty =
2385       CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
2386   Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
2387   LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
2388   CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
2389                         /*isInit=*/true);
2390 
2391   // Block checks if loop counter exceeds upper bound.
2392   CGF.EmitBlock(LoopBB);
2393   llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2394   llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
2395   CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
2396 
2397   // Block tests which single thread should execute region, and which threads
2398   // should go straight to synchronisation point.
2399   CGF.EmitBlock(TestBB);
2400   CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2401   llvm::Value *CmpThreadToCounter =
2402       CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
2403   CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
2404 
2405   // Block emits the body of the critical region.
2406   CGF.EmitBlock(BodyBB);
2407 
2408   // Output the critical statement.
2409   CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
2410                                       Hint);
2411 
2412   // After the body surrounded by the critical region, the single executing
2413   // thread will jump to the synchronisation point.
2414   // Block waits for all threads in current team to finish then increments the
2415   // counter variable and returns to the loop.
2416   CGF.EmitBlock(SyncBB);
2417   // Reconverge active threads in the warp.
2418   (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2419                                 CGM.getModule(), OMPRTL___kmpc_syncwarp),
2420                             Mask);
2421 
2422   llvm::Value *IncCounterVal =
2423       CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
2424   CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
2425   CGF.EmitBranch(LoopBB);
2426 
2427   // Block that is reached when  all threads in the team complete the region.
2428   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2429 }
2430 
2431 /// Cast value to the specified type.
2432 static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
2433                                     QualType ValTy, QualType CastTy,
2434                                     SourceLocation Loc) {
2435   assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
2436          "Cast type must sized.");
2437   assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
2438          "Val type must sized.");
2439   llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
2440   if (ValTy == CastTy)
2441     return Val;
2442   if (CGF.getContext().getTypeSizeInChars(ValTy) ==
2443       CGF.getContext().getTypeSizeInChars(CastTy))
2444     return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
2445   if (CastTy->isIntegerType() && ValTy->isIntegerType())
2446     return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
2447                                      CastTy->hasSignedIntegerRepresentation());
2448   Address CastItem = CGF.CreateMemTemp(CastTy);
2449   Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2450       CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
2451   CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
2452                         LValueBaseInfo(AlignmentSource::Type),
2453                         TBAAAccessInfo());
2454   return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc,
2455                               LValueBaseInfo(AlignmentSource::Type),
2456                               TBAAAccessInfo());
2457 }
2458 
2459 /// This function creates calls to one of two shuffle functions to copy
2460 /// variables between lanes in a warp.
2461 static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
2462                                                  llvm::Value *Elem,
2463                                                  QualType ElemType,
2464                                                  llvm::Value *Offset,
2465                                                  SourceLocation Loc) {
2466   CodeGenModule &CGM = CGF.CGM;
2467   CGBuilderTy &Bld = CGF.Builder;
2468   CGOpenMPRuntimeGPU &RT =
2469       *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime()));
2470   llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder();
2471 
2472   CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2473   assert(Size.getQuantity() <= 8 &&
2474          "Unsupported bitwidth in shuffle instruction.");
2475 
2476   RuntimeFunction ShuffleFn = Size.getQuantity() <= 4
2477                                   ? OMPRTL___kmpc_shuffle_int32
2478                                   : OMPRTL___kmpc_shuffle_int64;
2479 
2480   // Cast all types to 32- or 64-bit values before calling shuffle routines.
2481   QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
2482       Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
2483   llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
2484   llvm::Value *WarpSize =
2485       Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
2486 
2487   llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
2488       OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn),
2489       {ElemCast, Offset, WarpSize});
2490 
2491   return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
2492 }
2493 
2494 static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
2495                             Address DestAddr, QualType ElemType,
2496                             llvm::Value *Offset, SourceLocation Loc) {
2497   CGBuilderTy &Bld = CGF.Builder;
2498 
2499   CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2500   // Create the loop over the big sized data.
2501   // ptr = (void*)Elem;
2502   // ptrEnd = (void*) Elem + 1;
2503   // Step = 8;
2504   // while (ptr + Step < ptrEnd)
2505   //   shuffle((int64_t)*ptr);
2506   // Step = 4;
2507   // while (ptr + Step < ptrEnd)
2508   //   shuffle((int32_t)*ptr);
2509   // ...
2510   Address ElemPtr = DestAddr;
2511   Address Ptr = SrcAddr;
2512   Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
2513       Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
2514   for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
2515     if (Size < CharUnits::fromQuantity(IntSize))
2516       continue;
2517     QualType IntType = CGF.getContext().getIntTypeForBitwidth(
2518         CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
2519         /*Signed=*/1);
2520     llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
2521     Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
2522     ElemPtr =
2523         Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
2524     if (Size.getQuantity() / IntSize > 1) {
2525       llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
2526       llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
2527       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
2528       llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
2529       CGF.EmitBlock(PreCondBB);
2530       llvm::PHINode *PhiSrc =
2531           Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
2532       PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
2533       llvm::PHINode *PhiDest =
2534           Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
2535       PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
2536       Ptr = Address(PhiSrc, Ptr.getAlignment());
2537       ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
2538       llvm::Value *PtrDiff = Bld.CreatePtrDiff(
2539           PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
2540                                    Ptr.getPointer(), CGF.VoidPtrTy));
2541       Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
2542                        ThenBB, ExitBB);
2543       CGF.EmitBlock(ThenBB);
2544       llvm::Value *Res = createRuntimeShuffleFunction(
2545           CGF,
2546           CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
2547                                LValueBaseInfo(AlignmentSource::Type),
2548                                TBAAAccessInfo()),
2549           IntType, Offset, Loc);
2550       CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
2551                             LValueBaseInfo(AlignmentSource::Type),
2552                             TBAAAccessInfo());
2553       Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
2554       Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2555       PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
2556       PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
2557       CGF.EmitBranch(PreCondBB);
2558       CGF.EmitBlock(ExitBB);
2559     } else {
2560       llvm::Value *Res = createRuntimeShuffleFunction(
2561           CGF,
2562           CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
2563                                LValueBaseInfo(AlignmentSource::Type),
2564                                TBAAAccessInfo()),
2565           IntType, Offset, Loc);
2566       CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
2567                             LValueBaseInfo(AlignmentSource::Type),
2568                             TBAAAccessInfo());
2569       Ptr = Bld.CreateConstGEP(Ptr, 1);
2570       ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2571     }
2572     Size = Size % IntSize;
2573   }
2574 }
2575 
2576 namespace {
2577 enum CopyAction : unsigned {
2578   // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
2579   // the warp using shuffle instructions.
2580   RemoteLaneToThread,
2581   // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
2582   ThreadCopy,
2583   // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
2584   ThreadToScratchpad,
2585   // ScratchpadToThread: Copy from a scratchpad array in global memory
2586   // containing team-reduced data to a thread's stack.
2587   ScratchpadToThread,
2588 };
2589 } // namespace
2590 
2591 struct CopyOptionsTy {
2592   llvm::Value *RemoteLaneOffset;
2593   llvm::Value *ScratchpadIndex;
2594   llvm::Value *ScratchpadWidth;
2595 };
2596 
2597 /// Emit instructions to copy a Reduce list, which contains partially
2598 /// aggregated values, in the specified direction.
2599 static void emitReductionListCopy(
2600     CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
2601     ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
2602     CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
2603 
2604   CodeGenModule &CGM = CGF.CGM;
2605   ASTContext &C = CGM.getContext();
2606   CGBuilderTy &Bld = CGF.Builder;
2607 
2608   llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
2609   llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
2610   llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
2611 
2612   // Iterates, element-by-element, through the source Reduce list and
2613   // make a copy.
2614   unsigned Idx = 0;
2615   unsigned Size = Privates.size();
2616   for (const Expr *Private : Privates) {
2617     Address SrcElementAddr = Address::invalid();
2618     Address DestElementAddr = Address::invalid();
2619     Address DestElementPtrAddr = Address::invalid();
2620     // Should we shuffle in an element from a remote lane?
2621     bool ShuffleInElement = false;
2622     // Set to true to update the pointer in the dest Reduce list to a
2623     // newly created element.
2624     bool UpdateDestListPtr = false;
2625     // Increment the src or dest pointer to the scratchpad, for each
2626     // new element.
2627     bool IncrScratchpadSrc = false;
2628     bool IncrScratchpadDest = false;
2629 
2630     switch (Action) {
2631     case RemoteLaneToThread: {
2632       // Step 1.1: Get the address for the src element in the Reduce list.
2633       Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
2634       SrcElementAddr = CGF.EmitLoadOfPointer(
2635           SrcElementPtrAddr,
2636           C.getPointerType(Private->getType())->castAs<PointerType>());
2637 
2638       // Step 1.2: Create a temporary to store the element in the destination
2639       // Reduce list.
2640       DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
2641       DestElementAddr =
2642           CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
2643       ShuffleInElement = true;
2644       UpdateDestListPtr = true;
2645       break;
2646     }
2647     case ThreadCopy: {
2648       // Step 1.1: Get the address for the src element in the Reduce list.
2649       Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
2650       SrcElementAddr = CGF.EmitLoadOfPointer(
2651           SrcElementPtrAddr,
2652           C.getPointerType(Private->getType())->castAs<PointerType>());
2653 
2654       // Step 1.2: Get the address for dest element.  The destination
2655       // element has already been created on the thread's stack.
2656       DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
2657       DestElementAddr = CGF.EmitLoadOfPointer(
2658           DestElementPtrAddr,
2659           C.getPointerType(Private->getType())->castAs<PointerType>());
2660       break;
2661     }
2662     case ThreadToScratchpad: {
2663       // Step 1.1: Get the address for the src element in the Reduce list.
2664       Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
2665       SrcElementAddr = CGF.EmitLoadOfPointer(
2666           SrcElementPtrAddr,
2667           C.getPointerType(Private->getType())->castAs<PointerType>());
2668 
2669       // Step 1.2: Get the address for dest element:
2670       // address = base + index * ElementSizeInChars.
2671       llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2672       llvm::Value *CurrentOffset =
2673           Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
2674       llvm::Value *ScratchPadElemAbsolutePtrVal =
2675           Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
2676       ScratchPadElemAbsolutePtrVal =
2677           Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
2678       DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
2679                                 C.getTypeAlignInChars(Private->getType()));
2680       IncrScratchpadDest = true;
2681       break;
2682     }
2683     case ScratchpadToThread: {
2684       // Step 1.1: Get the address for the src element in the scratchpad.
2685       // address = base + index * ElementSizeInChars.
2686       llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2687       llvm::Value *CurrentOffset =
2688           Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
2689       llvm::Value *ScratchPadElemAbsolutePtrVal =
2690           Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
2691       ScratchPadElemAbsolutePtrVal =
2692           Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
2693       SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
2694                                C.getTypeAlignInChars(Private->getType()));
2695       IncrScratchpadSrc = true;
2696 
2697       // Step 1.2: Create a temporary to store the element in the destination
2698       // Reduce list.
2699       DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
2700       DestElementAddr =
2701           CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
2702       UpdateDestListPtr = true;
2703       break;
2704     }
2705     }
2706 
2707     // Regardless of src and dest of copy, we emit the load of src
2708     // element as this is required in all directions
2709     SrcElementAddr = Bld.CreateElementBitCast(
2710         SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
2711     DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
2712                                                SrcElementAddr.getElementType());
2713 
2714     // Now that all active lanes have read the element in the
2715     // Reduce list, shuffle over the value from the remote lane.
2716     if (ShuffleInElement) {
2717       shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
2718                       RemoteLaneOffset, Private->getExprLoc());
2719     } else {
2720       switch (CGF.getEvaluationKind(Private->getType())) {
2721       case TEK_Scalar: {
2722         llvm::Value *Elem = CGF.EmitLoadOfScalar(
2723             SrcElementAddr, /*Volatile=*/false, Private->getType(),
2724             Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type),
2725             TBAAAccessInfo());
2726         // Store the source element value to the dest element address.
2727         CGF.EmitStoreOfScalar(
2728             Elem, DestElementAddr, /*Volatile=*/false, Private->getType(),
2729             LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2730         break;
2731       }
2732       case TEK_Complex: {
2733         CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
2734             CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
2735             Private->getExprLoc());
2736         CGF.EmitStoreOfComplex(
2737             Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
2738             /*isInit=*/false);
2739         break;
2740       }
2741       case TEK_Aggregate:
2742         CGF.EmitAggregateCopy(
2743             CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
2744             CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
2745             Private->getType(), AggValueSlot::DoesNotOverlap);
2746         break;
2747       }
2748     }
2749 
2750     // Step 3.1: Modify reference in dest Reduce list as needed.
2751     // Modifying the reference in Reduce list to point to the newly
2752     // created element.  The element is live in the current function
2753     // scope and that of functions it invokes (i.e., reduce_function).
2754     // RemoteReduceData[i] = (void*)&RemoteElem
2755     if (UpdateDestListPtr) {
2756       CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
2757                                 DestElementAddr.getPointer(), CGF.VoidPtrTy),
2758                             DestElementPtrAddr, /*Volatile=*/false,
2759                             C.VoidPtrTy);
2760     }
2761 
2762     // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
2763     // address of the next element in scratchpad memory, unless we're currently
2764     // processing the last one.  Memory alignment is also taken care of here.
2765     if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
2766       llvm::Value *ScratchpadBasePtr =
2767           IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
2768       llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2769       ScratchpadBasePtr = Bld.CreateNUWAdd(
2770           ScratchpadBasePtr,
2771           Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
2772 
2773       // Take care of global memory alignment for performance
2774       ScratchpadBasePtr = Bld.CreateNUWSub(
2775           ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2776       ScratchpadBasePtr = Bld.CreateUDiv(
2777           ScratchpadBasePtr,
2778           llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2779       ScratchpadBasePtr = Bld.CreateNUWAdd(
2780           ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2781       ScratchpadBasePtr = Bld.CreateNUWMul(
2782           ScratchpadBasePtr,
2783           llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2784 
2785       if (IncrScratchpadDest)
2786         DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2787       else /* IncrScratchpadSrc = true */
2788         SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2789     }
2790 
2791     ++Idx;
2792   }
2793 }
2794 
2795 /// This function emits a helper that gathers Reduce lists from the first
2796 /// lane of every active warp to lanes in the first warp.
2797 ///
2798 /// void inter_warp_copy_func(void* reduce_data, num_warps)
2799 ///   shared smem[warp_size];
2800 ///   For all data entries D in reduce_data:
2801 ///     sync
2802 ///     If (I am the first lane in each warp)
2803 ///       Copy my local D to smem[warp_id]
2804 ///     sync
2805 ///     if (I am the first warp)
2806 ///       Copy smem[thread_id] to my local D
2807 static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
2808                                               ArrayRef<const Expr *> Privates,
2809                                               QualType ReductionArrayTy,
2810                                               SourceLocation Loc) {
2811   ASTContext &C = CGM.getContext();
2812   llvm::Module &M = CGM.getModule();
2813 
2814   // ReduceList: thread local Reduce list.
2815   // At the stage of the computation when this function is called, partially
2816   // aggregated values reside in the first lane of every active warp.
2817   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2818                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2819   // NumWarps: number of warps active in the parallel region.  This could
2820   // be smaller than 32 (max warps in a CTA) for partial block reduction.
2821   ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2822                                 C.getIntTypeForBitwidth(32, /* Signed */ true),
2823                                 ImplicitParamDecl::Other);
2824   FunctionArgList Args;
2825   Args.push_back(&ReduceListArg);
2826   Args.push_back(&NumWarpsArg);
2827 
2828   const CGFunctionInfo &CGFI =
2829       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2830   auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
2831                                     llvm::GlobalValue::InternalLinkage,
2832                                     "_omp_reduction_inter_warp_copy_func", &M);
2833   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2834   Fn->setDoesNotRecurse();
2835   CodeGenFunction CGF(CGM);
2836   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2837 
2838   CGBuilderTy &Bld = CGF.Builder;
2839 
2840   // This array is used as a medium to transfer, one reduce element at a time,
2841   // the data from the first lane of every warp to lanes in the first warp
2842   // in order to perform the final step of a reduction in a parallel region
2843   // (reduction across warps).  The array is placed in NVPTX __shared__ memory
2844   // for reduced latency, as well as to have a distinct copy for concurrently
2845   // executing target regions.  The array is declared with common linkage so
2846   // as to be shared across compilation units.
2847   StringRef TransferMediumName =
2848       "__openmp_nvptx_data_transfer_temporary_storage";
2849   llvm::GlobalVariable *TransferMedium =
2850       M.getGlobalVariable(TransferMediumName);
2851   unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
2852   if (!TransferMedium) {
2853     auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
2854     unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
2855     TransferMedium = new llvm::GlobalVariable(
2856         M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage,
2857         llvm::UndefValue::get(Ty), TransferMediumName,
2858         /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
2859         SharedAddressSpace);
2860     CGM.addCompilerUsedGlobal(TransferMedium);
2861   }
2862 
2863   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2864   // Get the CUDA thread id of the current OpenMP thread on the GPU.
2865   llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
2866   // nvptx_lane_id = nvptx_id % warpsize
2867   llvm::Value *LaneID = getNVPTXLaneID(CGF);
2868   // nvptx_warp_id = nvptx_id / warpsize
2869   llvm::Value *WarpID = getNVPTXWarpID(CGF);
2870 
2871   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2872   Address LocalReduceList(
2873       Bld.CreatePointerBitCastOrAddrSpaceCast(
2874           CGF.EmitLoadOfScalar(
2875               AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
2876               LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
2877           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2878       CGF.getPointerAlign());
2879 
2880   unsigned Idx = 0;
2881   for (const Expr *Private : Privates) {
2882     //
2883     // Warp master copies reduce element to transfer medium in __shared__
2884     // memory.
2885     //
2886     unsigned RealTySize =
2887         C.getTypeSizeInChars(Private->getType())
2888             .alignTo(C.getTypeAlignInChars(Private->getType()))
2889             .getQuantity();
2890     for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
2891       unsigned NumIters = RealTySize / TySize;
2892       if (NumIters == 0)
2893         continue;
2894       QualType CType = C.getIntTypeForBitwidth(
2895           C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
2896       llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
2897       CharUnits Align = CharUnits::fromQuantity(TySize);
2898       llvm::Value *Cnt = nullptr;
2899       Address CntAddr = Address::invalid();
2900       llvm::BasicBlock *PrecondBB = nullptr;
2901       llvm::BasicBlock *ExitBB = nullptr;
2902       if (NumIters > 1) {
2903         CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
2904         CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
2905                               /*Volatile=*/false, C.IntTy);
2906         PrecondBB = CGF.createBasicBlock("precond");
2907         ExitBB = CGF.createBasicBlock("exit");
2908         llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
2909         // There is no need to emit line number for unconditional branch.
2910         (void)ApplyDebugLocation::CreateEmpty(CGF);
2911         CGF.EmitBlock(PrecondBB);
2912         Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
2913         llvm::Value *Cmp =
2914             Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
2915         Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
2916         CGF.EmitBlock(BodyBB);
2917       }
2918       // kmpc_barrier.
2919       CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2920                                              /*EmitChecks=*/false,
2921                                              /*ForceSimpleCall=*/true);
2922       llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2923       llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2924       llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2925 
2926       // if (lane_id == 0)
2927       llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
2928       Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
2929       CGF.EmitBlock(ThenBB);
2930 
2931       // Reduce element = LocalReduceList[i]
2932       Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2933       llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2934           ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2935       // elemptr = ((CopyType*)(elemptrptr)) + I
2936       Address ElemPtr = Address(ElemPtrPtr, Align);
2937       ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
2938       if (NumIters > 1) {
2939         ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
2940                           ElemPtr.getAlignment());
2941       }
2942 
2943       // Get pointer to location in transfer medium.
2944       // MediumPtr = &medium[warp_id]
2945       llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
2946           TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
2947       Address MediumPtr(MediumPtrVal, Align);
2948       // Casting to actual data type.
2949       // MediumPtr = (CopyType*)MediumPtrAddr;
2950       MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
2951 
2952       // elem = *elemptr
2953       //*MediumPtr = elem
2954       llvm::Value *Elem = CGF.EmitLoadOfScalar(
2955           ElemPtr, /*Volatile=*/false, CType, Loc,
2956           LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2957       // Store the source element value to the dest element address.
2958       CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType,
2959                             LValueBaseInfo(AlignmentSource::Type),
2960                             TBAAAccessInfo());
2961 
2962       Bld.CreateBr(MergeBB);
2963 
2964       CGF.EmitBlock(ElseBB);
2965       Bld.CreateBr(MergeBB);
2966 
2967       CGF.EmitBlock(MergeBB);
2968 
2969       // kmpc_barrier.
2970       CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2971                                              /*EmitChecks=*/false,
2972                                              /*ForceSimpleCall=*/true);
2973 
2974       //
2975       // Warp 0 copies reduce element from transfer medium.
2976       //
2977       llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
2978       llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
2979       llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
2980 
2981       Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
2982       llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
2983           AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
2984 
2985       // Up to 32 threads in warp 0 are active.
2986       llvm::Value *IsActiveThread =
2987           Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
2988       Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
2989 
2990       CGF.EmitBlock(W0ThenBB);
2991 
2992       // SrcMediumPtr = &medium[tid]
2993       llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
2994           TransferMedium,
2995           {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
2996       Address SrcMediumPtr(SrcMediumPtrVal, Align);
2997       // SrcMediumVal = *SrcMediumPtr;
2998       SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
2999 
3000       // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
3001       Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3002       llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
3003           TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
3004       Address TargetElemPtr = Address(TargetElemPtrVal, Align);
3005       TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
3006       if (NumIters > 1) {
3007         TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
3008                                 TargetElemPtr.getAlignment());
3009       }
3010 
3011       // *TargetElemPtr = SrcMediumVal;
3012       llvm::Value *SrcMediumValue =
3013           CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
3014       CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
3015                             CType);
3016       Bld.CreateBr(W0MergeBB);
3017 
3018       CGF.EmitBlock(W0ElseBB);
3019       Bld.CreateBr(W0MergeBB);
3020 
3021       CGF.EmitBlock(W0MergeBB);
3022 
3023       if (NumIters > 1) {
3024         Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
3025         CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
3026         CGF.EmitBranch(PrecondBB);
3027         (void)ApplyDebugLocation::CreateEmpty(CGF);
3028         CGF.EmitBlock(ExitBB);
3029       }
3030       RealTySize %= TySize;
3031     }
3032     ++Idx;
3033   }
3034 
3035   CGF.FinishFunction();
3036   return Fn;
3037 }
3038 
3039 /// Emit a helper that reduces data across two OpenMP threads (lanes)
3040 /// in the same warp.  It uses shuffle instructions to copy over data from
3041 /// a remote lane's stack.  The reduction algorithm performed is specified
3042 /// by the fourth parameter.
3043 ///
3044 /// Algorithm Versions.
3045 /// Full Warp Reduce (argument value 0):
3046 ///   This algorithm assumes that all 32 lanes are active and gathers
3047 ///   data from these 32 lanes, producing a single resultant value.
3048 /// Contiguous Partial Warp Reduce (argument value 1):
3049 ///   This algorithm assumes that only a *contiguous* subset of lanes
3050 ///   are active.  This happens for the last warp in a parallel region
3051 ///   when the user specified num_threads is not an integer multiple of
3052 ///   32.  This contiguous subset always starts with the zeroth lane.
3053 /// Partial Warp Reduce (argument value 2):
3054 ///   This algorithm gathers data from any number of lanes at any position.
3055 /// All reduced values are stored in the lowest possible lane.  The set
3056 /// of problems every algorithm addresses is a super set of those
3057 /// addressable by algorithms with a lower version number.  Overhead
3058 /// increases as algorithm version increases.
3059 ///
3060 /// Terminology
3061 /// Reduce element:
3062 ///   Reduce element refers to the individual data field with primitive
3063 ///   data types to be combined and reduced across threads.
3064 /// Reduce list:
3065 ///   Reduce list refers to a collection of local, thread-private
3066 ///   reduce elements.
3067 /// Remote Reduce list:
3068 ///   Remote Reduce list refers to a collection of remote (relative to
3069 ///   the current thread) reduce elements.
3070 ///
3071 /// We distinguish between three states of threads that are important to
3072 /// the implementation of this function.
3073 /// Alive threads:
3074 ///   Threads in a warp executing the SIMT instruction, as distinguished from
3075 ///   threads that are inactive due to divergent control flow.
3076 /// Active threads:
3077 ///   The minimal set of threads that has to be alive upon entry to this
3078 ///   function.  The computation is correct iff active threads are alive.
3079 ///   Some threads are alive but they are not active because they do not
3080 ///   contribute to the computation in any useful manner.  Turning them off
3081 ///   may introduce control flow overheads without any tangible benefits.
3082 /// Effective threads:
3083 ///   In order to comply with the argument requirements of the shuffle
3084 ///   function, we must keep all lanes holding data alive.  But at most
3085 ///   half of them perform value aggregation; we refer to this half of
3086 ///   threads as effective. The other half is simply handing off their
3087 ///   data.
3088 ///
3089 /// Procedure
3090 /// Value shuffle:
3091 ///   In this step active threads transfer data from higher lane positions
3092 ///   in the warp to lower lane positions, creating Remote Reduce list.
3093 /// Value aggregation:
3094 ///   In this step, effective threads combine their thread local Reduce list
3095 ///   with Remote Reduce list and store the result in the thread local
3096 ///   Reduce list.
3097 /// Value copy:
3098 ///   In this step, we deal with the assumption made by algorithm 2
3099 ///   (i.e. contiguity assumption).  When we have an odd number of lanes
3100 ///   active, say 2k+1, only k threads will be effective and therefore k
3101 ///   new values will be produced.  However, the Reduce list owned by the
3102 ///   (2k+1)th thread is ignored in the value aggregation.  Therefore
3103 ///   we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
3104 ///   that the contiguity assumption still holds.
3105 static llvm::Function *emitShuffleAndReduceFunction(
3106     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3107     QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
3108   ASTContext &C = CGM.getContext();
3109 
3110   // Thread local Reduce list used to host the values of data to be reduced.
3111   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3112                                   C.VoidPtrTy, ImplicitParamDecl::Other);
3113   // Current lane id; could be logical.
3114   ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
3115                               ImplicitParamDecl::Other);
3116   // Offset of the remote source lane relative to the current lane.
3117   ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3118                                         C.ShortTy, ImplicitParamDecl::Other);
3119   // Algorithm version.  This is expected to be known at compile time.
3120   ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3121                                C.ShortTy, ImplicitParamDecl::Other);
3122   FunctionArgList Args;
3123   Args.push_back(&ReduceListArg);
3124   Args.push_back(&LaneIDArg);
3125   Args.push_back(&RemoteLaneOffsetArg);
3126   Args.push_back(&AlgoVerArg);
3127 
3128   const CGFunctionInfo &CGFI =
3129       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3130   auto *Fn = llvm::Function::Create(
3131       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3132       "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
3133   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3134   Fn->setDoesNotRecurse();
3135 
3136   CodeGenFunction CGF(CGM);
3137   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3138 
3139   CGBuilderTy &Bld = CGF.Builder;
3140 
3141   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3142   Address LocalReduceList(
3143       Bld.CreatePointerBitCastOrAddrSpaceCast(
3144           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3145                                C.VoidPtrTy, SourceLocation()),
3146           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3147       CGF.getPointerAlign());
3148 
3149   Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
3150   llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
3151       AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3152 
3153   Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
3154   llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
3155       AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3156 
3157   Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
3158   llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
3159       AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3160 
3161   // Create a local thread-private variable to host the Reduce list
3162   // from a remote lane.
3163   Address RemoteReduceList =
3164       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
3165 
3166   // This loop iterates through the list of reduce elements and copies,
3167   // element by element, from a remote lane in the warp to RemoteReduceList,
3168   // hosted on the thread's stack.
3169   emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
3170                         LocalReduceList, RemoteReduceList,
3171                         {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
3172                          /*ScratchpadIndex=*/nullptr,
3173                          /*ScratchpadWidth=*/nullptr});
3174 
3175   // The actions to be performed on the Remote Reduce list is dependent
3176   // on the algorithm version.
3177   //
3178   //  if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
3179   //  LaneId % 2 == 0 && Offset > 0):
3180   //    do the reduction value aggregation
3181   //
3182   //  The thread local variable Reduce list is mutated in place to host the
3183   //  reduced data, which is the aggregated value produced from local and
3184   //  remote lanes.
3185   //
3186   //  Note that AlgoVer is expected to be a constant integer known at compile
3187   //  time.
3188   //  When AlgoVer==0, the first conjunction evaluates to true, making
3189   //    the entire predicate true during compile time.
3190   //  When AlgoVer==1, the second conjunction has only the second part to be
3191   //    evaluated during runtime.  Other conjunctions evaluates to false
3192   //    during compile time.
3193   //  When AlgoVer==2, the third conjunction has only the second part to be
3194   //    evaluated during runtime.  Other conjunctions evaluates to false
3195   //    during compile time.
3196   llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
3197 
3198   llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3199   llvm::Value *CondAlgo1 = Bld.CreateAnd(
3200       Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
3201 
3202   llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
3203   llvm::Value *CondAlgo2 = Bld.CreateAnd(
3204       Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
3205   CondAlgo2 = Bld.CreateAnd(
3206       CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
3207 
3208   llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
3209   CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
3210 
3211   llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
3212   llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
3213   llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
3214   Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
3215 
3216   CGF.EmitBlock(ThenBB);
3217   // reduce_function(LocalReduceList, RemoteReduceList)
3218   llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3219       LocalReduceList.getPointer(), CGF.VoidPtrTy);
3220   llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3221       RemoteReduceList.getPointer(), CGF.VoidPtrTy);
3222   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3223       CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
3224   Bld.CreateBr(MergeBB);
3225 
3226   CGF.EmitBlock(ElseBB);
3227   Bld.CreateBr(MergeBB);
3228 
3229   CGF.EmitBlock(MergeBB);
3230 
3231   // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
3232   // Reduce list.
3233   Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3234   llvm::Value *CondCopy = Bld.CreateAnd(
3235       Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
3236 
3237   llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
3238   llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
3239   llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
3240   Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
3241 
3242   CGF.EmitBlock(CpyThenBB);
3243   emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
3244                         RemoteReduceList, LocalReduceList);
3245   Bld.CreateBr(CpyMergeBB);
3246 
3247   CGF.EmitBlock(CpyElseBB);
3248   Bld.CreateBr(CpyMergeBB);
3249 
3250   CGF.EmitBlock(CpyMergeBB);
3251 
3252   CGF.FinishFunction();
3253   return Fn;
3254 }
3255 
3256 /// This function emits a helper that copies all the reduction variables from
3257 /// the team into the provided global buffer for the reduction variables.
3258 ///
3259 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3260 ///   For all data entries D in reduce_data:
3261 ///     Copy local D to buffer.D[Idx]
3262 static llvm::Value *emitListToGlobalCopyFunction(
3263     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3264     QualType ReductionArrayTy, SourceLocation Loc,
3265     const RecordDecl *TeamReductionRec,
3266     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3267         &VarFieldMap) {
3268   ASTContext &C = CGM.getContext();
3269 
3270   // Buffer: global reduction buffer.
3271   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3272                               C.VoidPtrTy, ImplicitParamDecl::Other);
3273   // Idx: index of the buffer.
3274   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3275                            ImplicitParamDecl::Other);
3276   // ReduceList: thread local Reduce list.
3277   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3278                                   C.VoidPtrTy, ImplicitParamDecl::Other);
3279   FunctionArgList Args;
3280   Args.push_back(&BufferArg);
3281   Args.push_back(&IdxArg);
3282   Args.push_back(&ReduceListArg);
3283 
3284   const CGFunctionInfo &CGFI =
3285       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3286   auto *Fn = llvm::Function::Create(
3287       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3288       "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
3289   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3290   Fn->setDoesNotRecurse();
3291   CodeGenFunction CGF(CGM);
3292   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3293 
3294   CGBuilderTy &Bld = CGF.Builder;
3295 
3296   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3297   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3298   Address LocalReduceList(
3299       Bld.CreatePointerBitCastOrAddrSpaceCast(
3300           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3301                                C.VoidPtrTy, Loc),
3302           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3303       CGF.getPointerAlign());
3304   QualType StaticTy = C.getRecordType(TeamReductionRec);
3305   llvm::Type *LLVMReductionsBufferTy =
3306       CGM.getTypes().ConvertTypeForMem(StaticTy);
3307   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3308       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3309       LLVMReductionsBufferTy->getPointerTo());
3310   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3311                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3312                                               /*Volatile=*/false, C.IntTy,
3313                                               Loc)};
3314   unsigned Idx = 0;
3315   for (const Expr *Private : Privates) {
3316     // Reduce element = LocalReduceList[i]
3317     Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3318     llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3319         ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3320     // elemptr = ((CopyType*)(elemptrptr)) + I
3321     ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3322         ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3323     Address ElemPtr =
3324         Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3325     const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3326     // Global = Buffer.VD[Idx];
3327     const FieldDecl *FD = VarFieldMap.lookup(VD);
3328     LValue GlobLVal = CGF.EmitLValueForField(
3329         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3330     llvm::Value *BufferPtr =
3331         Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3332     GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3333     switch (CGF.getEvaluationKind(Private->getType())) {
3334     case TEK_Scalar: {
3335       llvm::Value *V = CGF.EmitLoadOfScalar(
3336           ElemPtr, /*Volatile=*/false, Private->getType(), Loc,
3337           LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
3338       CGF.EmitStoreOfScalar(V, GlobLVal);
3339       break;
3340     }
3341     case TEK_Complex: {
3342       CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
3343           CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
3344       CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
3345       break;
3346     }
3347     case TEK_Aggregate:
3348       CGF.EmitAggregateCopy(GlobLVal,
3349                             CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3350                             Private->getType(), AggValueSlot::DoesNotOverlap);
3351       break;
3352     }
3353     ++Idx;
3354   }
3355 
3356   CGF.FinishFunction();
3357   return Fn;
3358 }
3359 
3360 /// This function emits a helper that reduces all the reduction variables from
3361 /// the team into the provided global buffer for the reduction variables.
3362 ///
3363 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
3364 ///  void *GlobPtrs[];
3365 ///  GlobPtrs[0] = (void*)&buffer.D0[Idx];
3366 ///  ...
3367 ///  GlobPtrs[N] = (void*)&buffer.DN[Idx];
3368 ///  reduce_function(GlobPtrs, reduce_data);
3369 static llvm::Value *emitListToGlobalReduceFunction(
3370     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3371     QualType ReductionArrayTy, SourceLocation Loc,
3372     const RecordDecl *TeamReductionRec,
3373     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3374         &VarFieldMap,
3375     llvm::Function *ReduceFn) {
3376   ASTContext &C = CGM.getContext();
3377 
3378   // Buffer: global reduction buffer.
3379   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3380                               C.VoidPtrTy, ImplicitParamDecl::Other);
3381   // Idx: index of the buffer.
3382   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3383                            ImplicitParamDecl::Other);
3384   // ReduceList: thread local Reduce list.
3385   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3386                                   C.VoidPtrTy, ImplicitParamDecl::Other);
3387   FunctionArgList Args;
3388   Args.push_back(&BufferArg);
3389   Args.push_back(&IdxArg);
3390   Args.push_back(&ReduceListArg);
3391 
3392   const CGFunctionInfo &CGFI =
3393       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3394   auto *Fn = llvm::Function::Create(
3395       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3396       "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
3397   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3398   Fn->setDoesNotRecurse();
3399   CodeGenFunction CGF(CGM);
3400   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3401 
3402   CGBuilderTy &Bld = CGF.Builder;
3403 
3404   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3405   QualType StaticTy = C.getRecordType(TeamReductionRec);
3406   llvm::Type *LLVMReductionsBufferTy =
3407       CGM.getTypes().ConvertTypeForMem(StaticTy);
3408   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3409       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3410       LLVMReductionsBufferTy->getPointerTo());
3411 
3412   // 1. Build a list of reduction variables.
3413   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3414   Address ReductionList =
3415       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3416   auto IPriv = Privates.begin();
3417   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3418                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3419                                               /*Volatile=*/false, C.IntTy,
3420                                               Loc)};
3421   unsigned Idx = 0;
3422   for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
3423     Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3424     // Global = Buffer.VD[Idx];
3425     const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
3426     const FieldDecl *FD = VarFieldMap.lookup(VD);
3427     LValue GlobLVal = CGF.EmitLValueForField(
3428         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3429     llvm::Value *BufferPtr =
3430         Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3431     llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
3432     CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
3433     if ((*IPriv)->getType()->isVariablyModifiedType()) {
3434       // Store array size.
3435       ++Idx;
3436       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3437       llvm::Value *Size = CGF.Builder.CreateIntCast(
3438           CGF.getVLASize(
3439                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3440               .NumElts,
3441           CGF.SizeTy, /*isSigned=*/false);
3442       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3443                               Elem);
3444     }
3445   }
3446 
3447   // Call reduce_function(GlobalReduceList, ReduceList)
3448   llvm::Value *GlobalReduceList =
3449       CGF.EmitCastToVoidPtr(ReductionList.getPointer());
3450   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3451   llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
3452       AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
3453   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3454       CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
3455   CGF.FinishFunction();
3456   return Fn;
3457 }
3458 
3459 /// This function emits a helper that copies all the reduction variables from
3460 /// the team into the provided global buffer for the reduction variables.
3461 ///
3462 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3463 ///   For all data entries D in reduce_data:
3464 ///     Copy buffer.D[Idx] to local D;
3465 static llvm::Value *emitGlobalToListCopyFunction(
3466     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3467     QualType ReductionArrayTy, SourceLocation Loc,
3468     const RecordDecl *TeamReductionRec,
3469     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3470         &VarFieldMap) {
3471   ASTContext &C = CGM.getContext();
3472 
3473   // Buffer: global reduction buffer.
3474   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3475                               C.VoidPtrTy, ImplicitParamDecl::Other);
3476   // Idx: index of the buffer.
3477   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3478                            ImplicitParamDecl::Other);
3479   // ReduceList: thread local Reduce list.
3480   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3481                                   C.VoidPtrTy, ImplicitParamDecl::Other);
3482   FunctionArgList Args;
3483   Args.push_back(&BufferArg);
3484   Args.push_back(&IdxArg);
3485   Args.push_back(&ReduceListArg);
3486 
3487   const CGFunctionInfo &CGFI =
3488       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3489   auto *Fn = llvm::Function::Create(
3490       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3491       "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
3492   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3493   Fn->setDoesNotRecurse();
3494   CodeGenFunction CGF(CGM);
3495   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3496 
3497   CGBuilderTy &Bld = CGF.Builder;
3498 
3499   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3500   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3501   Address LocalReduceList(
3502       Bld.CreatePointerBitCastOrAddrSpaceCast(
3503           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3504                                C.VoidPtrTy, Loc),
3505           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3506       CGF.getPointerAlign());
3507   QualType StaticTy = C.getRecordType(TeamReductionRec);
3508   llvm::Type *LLVMReductionsBufferTy =
3509       CGM.getTypes().ConvertTypeForMem(StaticTy);
3510   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3511       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3512       LLVMReductionsBufferTy->getPointerTo());
3513 
3514   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3515                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3516                                               /*Volatile=*/false, C.IntTy,
3517                                               Loc)};
3518   unsigned Idx = 0;
3519   for (const Expr *Private : Privates) {
3520     // Reduce element = LocalReduceList[i]
3521     Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3522     llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3523         ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3524     // elemptr = ((CopyType*)(elemptrptr)) + I
3525     ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3526         ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3527     Address ElemPtr =
3528         Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3529     const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3530     // Global = Buffer.VD[Idx];
3531     const FieldDecl *FD = VarFieldMap.lookup(VD);
3532     LValue GlobLVal = CGF.EmitLValueForField(
3533         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3534     llvm::Value *BufferPtr =
3535         Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3536     GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3537     switch (CGF.getEvaluationKind(Private->getType())) {
3538     case TEK_Scalar: {
3539       llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
3540       CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(),
3541                             LValueBaseInfo(AlignmentSource::Type),
3542                             TBAAAccessInfo());
3543       break;
3544     }
3545     case TEK_Complex: {
3546       CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
3547       CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3548                              /*isInit=*/false);
3549       break;
3550     }
3551     case TEK_Aggregate:
3552       CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3553                             GlobLVal, Private->getType(),
3554                             AggValueSlot::DoesNotOverlap);
3555       break;
3556     }
3557     ++Idx;
3558   }
3559 
3560   CGF.FinishFunction();
3561   return Fn;
3562 }
3563 
3564 /// This function emits a helper that reduces all the reduction variables from
3565 /// the team into the provided global buffer for the reduction variables.
3566 ///
3567 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
3568 ///  void *GlobPtrs[];
3569 ///  GlobPtrs[0] = (void*)&buffer.D0[Idx];
3570 ///  ...
3571 ///  GlobPtrs[N] = (void*)&buffer.DN[Idx];
3572 ///  reduce_function(reduce_data, GlobPtrs);
3573 static llvm::Value *emitGlobalToListReduceFunction(
3574     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3575     QualType ReductionArrayTy, SourceLocation Loc,
3576     const RecordDecl *TeamReductionRec,
3577     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3578         &VarFieldMap,
3579     llvm::Function *ReduceFn) {
3580   ASTContext &C = CGM.getContext();
3581 
3582   // Buffer: global reduction buffer.
3583   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3584                               C.VoidPtrTy, ImplicitParamDecl::Other);
3585   // Idx: index of the buffer.
3586   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3587                            ImplicitParamDecl::Other);
3588   // ReduceList: thread local Reduce list.
3589   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3590                                   C.VoidPtrTy, ImplicitParamDecl::Other);
3591   FunctionArgList Args;
3592   Args.push_back(&BufferArg);
3593   Args.push_back(&IdxArg);
3594   Args.push_back(&ReduceListArg);
3595 
3596   const CGFunctionInfo &CGFI =
3597       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3598   auto *Fn = llvm::Function::Create(
3599       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3600       "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
3601   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3602   Fn->setDoesNotRecurse();
3603   CodeGenFunction CGF(CGM);
3604   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3605 
3606   CGBuilderTy &Bld = CGF.Builder;
3607 
3608   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3609   QualType StaticTy = C.getRecordType(TeamReductionRec);
3610   llvm::Type *LLVMReductionsBufferTy =
3611       CGM.getTypes().ConvertTypeForMem(StaticTy);
3612   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3613       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3614       LLVMReductionsBufferTy->getPointerTo());
3615 
3616   // 1. Build a list of reduction variables.
3617   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3618   Address ReductionList =
3619       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3620   auto IPriv = Privates.begin();
3621   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3622                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3623                                               /*Volatile=*/false, C.IntTy,
3624                                               Loc)};
3625   unsigned Idx = 0;
3626   for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
3627     Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3628     // Global = Buffer.VD[Idx];
3629     const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
3630     const FieldDecl *FD = VarFieldMap.lookup(VD);
3631     LValue GlobLVal = CGF.EmitLValueForField(
3632         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3633     llvm::Value *BufferPtr =
3634         Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3635     llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
3636     CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
3637     if ((*IPriv)->getType()->isVariablyModifiedType()) {
3638       // Store array size.
3639       ++Idx;
3640       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3641       llvm::Value *Size = CGF.Builder.CreateIntCast(
3642           CGF.getVLASize(
3643                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3644               .NumElts,
3645           CGF.SizeTy, /*isSigned=*/false);
3646       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3647                               Elem);
3648     }
3649   }
3650 
3651   // Call reduce_function(ReduceList, GlobalReduceList)
3652   llvm::Value *GlobalReduceList =
3653       CGF.EmitCastToVoidPtr(ReductionList.getPointer());
3654   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3655   llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
3656       AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
3657   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3658       CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
3659   CGF.FinishFunction();
3660   return Fn;
3661 }
3662 
3663 ///
3664 /// Design of OpenMP reductions on the GPU
3665 ///
3666 /// Consider a typical OpenMP program with one or more reduction
3667 /// clauses:
3668 ///
3669 /// float foo;
3670 /// double bar;
3671 /// #pragma omp target teams distribute parallel for \
3672 ///             reduction(+:foo) reduction(*:bar)
3673 /// for (int i = 0; i < N; i++) {
3674 ///   foo += A[i]; bar *= B[i];
3675 /// }
3676 ///
3677 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
3678 /// all teams.  In our OpenMP implementation on the NVPTX device an
3679 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
3680 /// within a team are mapped to CUDA threads within a threadblock.
3681 /// Our goal is to efficiently aggregate values across all OpenMP
3682 /// threads such that:
3683 ///
3684 ///   - the compiler and runtime are logically concise, and
3685 ///   - the reduction is performed efficiently in a hierarchical
3686 ///     manner as follows: within OpenMP threads in the same warp,
3687 ///     across warps in a threadblock, and finally across teams on
3688 ///     the NVPTX device.
3689 ///
3690 /// Introduction to Decoupling
3691 ///
3692 /// We would like to decouple the compiler and the runtime so that the
3693 /// latter is ignorant of the reduction variables (number, data types)
3694 /// and the reduction operators.  This allows a simpler interface
3695 /// and implementation while still attaining good performance.
3696 ///
3697 /// Pseudocode for the aforementioned OpenMP program generated by the
3698 /// compiler is as follows:
3699 ///
3700 /// 1. Create private copies of reduction variables on each OpenMP
3701 ///    thread: 'foo_private', 'bar_private'
3702 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
3703 ///    to it and writes the result in 'foo_private' and 'bar_private'
3704 ///    respectively.
3705 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
3706 ///    and store the result on the team master:
3707 ///
3708 ///     __kmpc_nvptx_parallel_reduce_nowait_v2(...,
3709 ///        reduceData, shuffleReduceFn, interWarpCpyFn)
3710 ///
3711 ///     where:
3712 ///       struct ReduceData {
3713 ///         double *foo;
3714 ///         double *bar;
3715 ///       } reduceData
3716 ///       reduceData.foo = &foo_private
3717 ///       reduceData.bar = &bar_private
3718 ///
3719 ///     'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
3720 ///     auxiliary functions generated by the compiler that operate on
3721 ///     variables of type 'ReduceData'.  They aid the runtime perform
3722 ///     algorithmic steps in a data agnostic manner.
3723 ///
3724 ///     'shuffleReduceFn' is a pointer to a function that reduces data
3725 ///     of type 'ReduceData' across two OpenMP threads (lanes) in the
3726 ///     same warp.  It takes the following arguments as input:
3727 ///
3728 ///     a. variable of type 'ReduceData' on the calling lane,
3729 ///     b. its lane_id,
3730 ///     c. an offset relative to the current lane_id to generate a
3731 ///        remote_lane_id.  The remote lane contains the second
3732 ///        variable of type 'ReduceData' that is to be reduced.
3733 ///     d. an algorithm version parameter determining which reduction
3734 ///        algorithm to use.
3735 ///
3736 ///     'shuffleReduceFn' retrieves data from the remote lane using
3737 ///     efficient GPU shuffle intrinsics and reduces, using the
3738 ///     algorithm specified by the 4th parameter, the two operands
3739 ///     element-wise.  The result is written to the first operand.
3740 ///
3741 ///     Different reduction algorithms are implemented in different
3742 ///     runtime functions, all calling 'shuffleReduceFn' to perform
3743 ///     the essential reduction step.  Therefore, based on the 4th
3744 ///     parameter, this function behaves slightly differently to
3745 ///     cooperate with the runtime to ensure correctness under
3746 ///     different circumstances.
3747 ///
3748 ///     'InterWarpCpyFn' is a pointer to a function that transfers
3749 ///     reduced variables across warps.  It tunnels, through CUDA
3750 ///     shared memory, the thread-private data of type 'ReduceData'
3751 ///     from lane 0 of each warp to a lane in the first warp.
3752 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
3753 ///    The last team writes the global reduced value to memory.
3754 ///
3755 ///     ret = __kmpc_nvptx_teams_reduce_nowait(...,
3756 ///             reduceData, shuffleReduceFn, interWarpCpyFn,
3757 ///             scratchpadCopyFn, loadAndReduceFn)
3758 ///
3759 ///     'scratchpadCopyFn' is a helper that stores reduced
3760 ///     data from the team master to a scratchpad array in
3761 ///     global memory.
3762 ///
3763 ///     'loadAndReduceFn' is a helper that loads data from
3764 ///     the scratchpad array and reduces it with the input
3765 ///     operand.
3766 ///
3767 ///     These compiler generated functions hide address
3768 ///     calculation and alignment information from the runtime.
3769 /// 5. if ret == 1:
3770 ///     The team master of the last team stores the reduced
3771 ///     result to the globals in memory.
3772 ///     foo += reduceData.foo; bar *= reduceData.bar
3773 ///
3774 ///
3775 /// Warp Reduction Algorithms
3776 ///
3777 /// On the warp level, we have three algorithms implemented in the
3778 /// OpenMP runtime depending on the number of active lanes:
3779 ///
3780 /// Full Warp Reduction
3781 ///
3782 /// The reduce algorithm within a warp where all lanes are active
3783 /// is implemented in the runtime as follows:
3784 ///
3785 /// full_warp_reduce(void *reduce_data,
3786 ///                  kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3787 ///   for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
3788 ///     ShuffleReduceFn(reduce_data, 0, offset, 0);
3789 /// }
3790 ///
3791 /// The algorithm completes in log(2, WARPSIZE) steps.
3792 ///
3793 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
3794 /// not used therefore we save instructions by not retrieving lane_id
3795 /// from the corresponding special registers.  The 4th parameter, which
3796 /// represents the version of the algorithm being used, is set to 0 to
3797 /// signify full warp reduction.
3798 ///
3799 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3800 ///
3801 /// #reduce_elem refers to an element in the local lane's data structure
3802 /// #remote_elem is retrieved from a remote lane
3803 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3804 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
3805 ///
3806 /// Contiguous Partial Warp Reduction
3807 ///
3808 /// This reduce algorithm is used within a warp where only the first
3809 /// 'n' (n <= WARPSIZE) lanes are active.  It is typically used when the
3810 /// number of OpenMP threads in a parallel region is not a multiple of
3811 /// WARPSIZE.  The algorithm is implemented in the runtime as follows:
3812 ///
3813 /// void
3814 /// contiguous_partial_reduce(void *reduce_data,
3815 ///                           kmp_ShuffleReductFctPtr ShuffleReduceFn,
3816 ///                           int size, int lane_id) {
3817 ///   int curr_size;
3818 ///   int offset;
3819 ///   curr_size = size;
3820 ///   mask = curr_size/2;
3821 ///   while (offset>0) {
3822 ///     ShuffleReduceFn(reduce_data, lane_id, offset, 1);
3823 ///     curr_size = (curr_size+1)/2;
3824 ///     offset = curr_size/2;
3825 ///   }
3826 /// }
3827 ///
3828 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3829 ///
3830 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3831 /// if (lane_id < offset)
3832 ///     reduce_elem = reduce_elem REDUCE_OP remote_elem
3833 /// else
3834 ///     reduce_elem = remote_elem
3835 ///
3836 /// This algorithm assumes that the data to be reduced are located in a
3837 /// contiguous subset of lanes starting from the first.  When there is
3838 /// an odd number of active lanes, the data in the last lane is not
3839 /// aggregated with any other lane's dat but is instead copied over.
3840 ///
3841 /// Dispersed Partial Warp Reduction
3842 ///
3843 /// This algorithm is used within a warp when any discontiguous subset of
3844 /// lanes are active.  It is used to implement the reduction operation
3845 /// across lanes in an OpenMP simd region or in a nested parallel region.
3846 ///
3847 /// void
3848 /// dispersed_partial_reduce(void *reduce_data,
3849 ///                          kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3850 ///   int size, remote_id;
3851 ///   int logical_lane_id = number_of_active_lanes_before_me() * 2;
3852 ///   do {
3853 ///       remote_id = next_active_lane_id_right_after_me();
3854 ///       # the above function returns 0 of no active lane
3855 ///       # is present right after the current lane.
3856 ///       size = number_of_active_lanes_in_this_warp();
3857 ///       logical_lane_id /= 2;
3858 ///       ShuffleReduceFn(reduce_data, logical_lane_id,
3859 ///                       remote_id-1-threadIdx.x, 2);
3860 ///   } while (logical_lane_id % 2 == 0 && size > 1);
3861 /// }
3862 ///
3863 /// There is no assumption made about the initial state of the reduction.
3864 /// Any number of lanes (>=1) could be active at any position.  The reduction
3865 /// result is returned in the first active lane.
3866 ///
3867 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3868 ///
3869 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3870 /// if (lane_id % 2 == 0 && offset > 0)
3871 ///     reduce_elem = reduce_elem REDUCE_OP remote_elem
3872 /// else
3873 ///     reduce_elem = remote_elem
3874 ///
3875 ///
3876 /// Intra-Team Reduction
3877 ///
3878 /// This function, as implemented in the runtime call
3879 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
3880 /// threads in a team.  It first reduces within a warp using the
3881 /// aforementioned algorithms.  We then proceed to gather all such
3882 /// reduced values at the first warp.
3883 ///
3884 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
3885 /// data from each of the "warp master" (zeroth lane of each warp, where
3886 /// warp-reduced data is held) to the zeroth warp.  This step reduces (in
3887 /// a mathematical sense) the problem of reduction across warp masters in
3888 /// a block to the problem of warp reduction.
3889 ///
3890 ///
3891 /// Inter-Team Reduction
3892 ///
3893 /// Once a team has reduced its data to a single value, it is stored in
3894 /// a global scratchpad array.  Since each team has a distinct slot, this
3895 /// can be done without locking.
3896 ///
3897 /// The last team to write to the scratchpad array proceeds to reduce the
3898 /// scratchpad array.  One or more workers in the last team use the helper
3899 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
3900 /// the k'th worker reduces every k'th element.
3901 ///
3902 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
3903 /// reduce across workers and compute a globally reduced value.
3904 ///
3905 void CGOpenMPRuntimeGPU::emitReduction(
3906     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
3907     ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
3908     ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
3909   if (!CGF.HaveInsertPoint())
3910     return;
3911 
3912   bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
3913 #ifndef NDEBUG
3914   bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
3915 #endif
3916 
3917   if (Options.SimpleReduction) {
3918     assert(!TeamsReduction && !ParallelReduction &&
3919            "Invalid reduction selection in emitReduction.");
3920     CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
3921                                    ReductionOps, Options);
3922     return;
3923   }
3924 
3925   assert((TeamsReduction || ParallelReduction) &&
3926          "Invalid reduction selection in emitReduction.");
3927 
3928   // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
3929   // RedList, shuffle_reduce_func, interwarp_copy_func);
3930   // or
3931   // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
3932   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
3933   llvm::Value *ThreadId = getThreadID(CGF, Loc);
3934 
3935   llvm::Value *Res;
3936   ASTContext &C = CGM.getContext();
3937   // 1. Build a list of reduction variables.
3938   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3939   auto Size = RHSExprs.size();
3940   for (const Expr *E : Privates) {
3941     if (E->getType()->isVariablyModifiedType())
3942       // Reserve place for array size.
3943       ++Size;
3944   }
3945   llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
3946   QualType ReductionArrayTy =
3947       C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
3948                              /*IndexTypeQuals=*/0);
3949   Address ReductionList =
3950       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3951   auto IPriv = Privates.begin();
3952   unsigned Idx = 0;
3953   for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
3954     Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3955     CGF.Builder.CreateStore(
3956         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3957             CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
3958         Elem);
3959     if ((*IPriv)->getType()->isVariablyModifiedType()) {
3960       // Store array size.
3961       ++Idx;
3962       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3963       llvm::Value *Size = CGF.Builder.CreateIntCast(
3964           CGF.getVLASize(
3965                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3966               .NumElts,
3967           CGF.SizeTy, /*isSigned=*/false);
3968       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3969                               Elem);
3970     }
3971   }
3972 
3973   llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3974       ReductionList.getPointer(), CGF.VoidPtrTy);
3975   llvm::Function *ReductionFn = emitReductionFunction(
3976       Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
3977       LHSExprs, RHSExprs, ReductionOps);
3978   llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
3979   llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
3980       CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
3981   llvm::Value *InterWarpCopyFn =
3982       emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
3983 
3984   if (ParallelReduction) {
3985     llvm::Value *Args[] = {RTLoc,
3986                            ThreadId,
3987                            CGF.Builder.getInt32(RHSExprs.size()),
3988                            ReductionArrayTySize,
3989                            RL,
3990                            ShuffleAndReduceFn,
3991                            InterWarpCopyFn};
3992 
3993     Res = CGF.EmitRuntimeCall(
3994         OMPBuilder.getOrCreateRuntimeFunction(
3995             CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2),
3996         Args);
3997   } else {
3998     assert(TeamsReduction && "expected teams reduction.");
3999     llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
4000     llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
4001     int Cnt = 0;
4002     for (const Expr *DRE : Privates) {
4003       PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
4004       ++Cnt;
4005     }
4006     const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
4007         CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
4008         C.getLangOpts().OpenMPCUDAReductionBufNum);
4009     TeamsReductions.push_back(TeamReductionRec);
4010     if (!KernelTeamsReductionPtr) {
4011       KernelTeamsReductionPtr = new llvm::GlobalVariable(
4012           CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
4013           llvm::GlobalValue::InternalLinkage, nullptr,
4014           "_openmp_teams_reductions_buffer_$_$ptr");
4015     }
4016     llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
4017         Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
4018         /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
4019     llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
4020         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4021     llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
4022         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4023         ReductionFn);
4024     llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
4025         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4026     llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
4027         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4028         ReductionFn);
4029 
4030     llvm::Value *Args[] = {
4031         RTLoc,
4032         ThreadId,
4033         GlobalBufferPtr,
4034         CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
4035         RL,
4036         ShuffleAndReduceFn,
4037         InterWarpCopyFn,
4038         GlobalToBufferCpyFn,
4039         GlobalToBufferRedFn,
4040         BufferToGlobalCpyFn,
4041         BufferToGlobalRedFn};
4042 
4043     Res = CGF.EmitRuntimeCall(
4044         OMPBuilder.getOrCreateRuntimeFunction(
4045             CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2),
4046         Args);
4047   }
4048 
4049   // 5. Build if (res == 1)
4050   llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
4051   llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
4052   llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
4053       Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
4054   CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
4055 
4056   // 6. Build then branch: where we have reduced values in the master
4057   //    thread in each team.
4058   //    __kmpc_end_reduce{_nowait}(<gtid>);
4059   //    break;
4060   CGF.EmitBlock(ThenBB);
4061 
4062   // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
4063   auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
4064                     this](CodeGenFunction &CGF, PrePostActionTy &Action) {
4065     auto IPriv = Privates.begin();
4066     auto ILHS = LHSExprs.begin();
4067     auto IRHS = RHSExprs.begin();
4068     for (const Expr *E : ReductionOps) {
4069       emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
4070                                   cast<DeclRefExpr>(*IRHS));
4071       ++IPriv;
4072       ++ILHS;
4073       ++IRHS;
4074     }
4075   };
4076   llvm::Value *EndArgs[] = {ThreadId};
4077   RegionCodeGenTy RCG(CodeGen);
4078   NVPTXActionTy Action(
4079       nullptr, llvm::None,
4080       OMPBuilder.getOrCreateRuntimeFunction(
4081           CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait),
4082       EndArgs);
4083   RCG.setAction(Action);
4084   RCG(CGF);
4085   // There is no need to emit line number for unconditional branch.
4086   (void)ApplyDebugLocation::CreateEmpty(CGF);
4087   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
4088 }
4089 
4090 const VarDecl *
4091 CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD,
4092                                        const VarDecl *NativeParam) const {
4093   if (!NativeParam->getType()->isReferenceType())
4094     return NativeParam;
4095   QualType ArgType = NativeParam->getType();
4096   QualifierCollector QC;
4097   const Type *NonQualTy = QC.strip(ArgType);
4098   QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4099   if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
4100     if (Attr->getCaptureKind() == OMPC_map) {
4101       PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4102                                                         LangAS::opencl_global);
4103     } else if (Attr->getCaptureKind() == OMPC_firstprivate &&
4104                PointeeTy.isConstant(CGM.getContext())) {
4105       PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4106                                                         LangAS::opencl_generic);
4107     }
4108   }
4109   ArgType = CGM.getContext().getPointerType(PointeeTy);
4110   QC.addRestrict();
4111   enum { NVPTX_local_addr = 5 };
4112   QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
4113   ArgType = QC.apply(CGM.getContext(), ArgType);
4114   if (isa<ImplicitParamDecl>(NativeParam))
4115     return ImplicitParamDecl::Create(
4116         CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
4117         NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
4118   return ParmVarDecl::Create(
4119       CGM.getContext(),
4120       const_cast<DeclContext *>(NativeParam->getDeclContext()),
4121       NativeParam->getBeginLoc(), NativeParam->getLocation(),
4122       NativeParam->getIdentifier(), ArgType,
4123       /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
4124 }
4125 
4126 Address
4127 CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
4128                                           const VarDecl *NativeParam,
4129                                           const VarDecl *TargetParam) const {
4130   assert(NativeParam != TargetParam &&
4131          NativeParam->getType()->isReferenceType() &&
4132          "Native arg must not be the same as target arg.");
4133   Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
4134   QualType NativeParamType = NativeParam->getType();
4135   QualifierCollector QC;
4136   const Type *NonQualTy = QC.strip(NativeParamType);
4137   QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4138   unsigned NativePointeeAddrSpace =
4139       CGF.getContext().getTargetAddressSpace(NativePointeeTy);
4140   QualType TargetTy = TargetParam->getType();
4141   llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
4142       LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
4143   // First cast to generic.
4144   TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4145       TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4146                       /*AddrSpace=*/0));
4147   // Cast from generic to native address space.
4148   TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4149       TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4150                       NativePointeeAddrSpace));
4151   Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
4152   CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
4153                         NativeParamType);
4154   return NativeParamAddr;
4155 }
4156 
4157 void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
4158     CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
4159     ArrayRef<llvm::Value *> Args) const {
4160   SmallVector<llvm::Value *, 4> TargetArgs;
4161   TargetArgs.reserve(Args.size());
4162   auto *FnType = OutlinedFn.getFunctionType();
4163   for (unsigned I = 0, E = Args.size(); I < E; ++I) {
4164     if (FnType->isVarArg() && FnType->getNumParams() <= I) {
4165       TargetArgs.append(std::next(Args.begin(), I), Args.end());
4166       break;
4167     }
4168     llvm::Type *TargetType = FnType->getParamType(I);
4169     llvm::Value *NativeArg = Args[I];
4170     if (!TargetType->isPointerTy()) {
4171       TargetArgs.emplace_back(NativeArg);
4172       continue;
4173     }
4174     llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4175         NativeArg,
4176         NativeArg->getType()->getPointerElementType()->getPointerTo());
4177     TargetArgs.emplace_back(
4178         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
4179   }
4180   CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
4181 }
4182 
4183 /// Emit function which wraps the outline parallel region
4184 /// and controls the arguments which are passed to this function.
4185 /// The wrapper ensures that the outlined function is called
4186 /// with the correct arguments when data is shared.
4187 llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
4188     llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
4189   ASTContext &Ctx = CGM.getContext();
4190   const auto &CS = *D.getCapturedStmt(OMPD_parallel);
4191 
4192   // Create a function that takes as argument the source thread.
4193   FunctionArgList WrapperArgs;
4194   QualType Int16QTy =
4195       Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
4196   QualType Int32QTy =
4197       Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
4198   ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4199                                      /*Id=*/nullptr, Int16QTy,
4200                                      ImplicitParamDecl::Other);
4201   ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4202                                /*Id=*/nullptr, Int32QTy,
4203                                ImplicitParamDecl::Other);
4204   WrapperArgs.emplace_back(&ParallelLevelArg);
4205   WrapperArgs.emplace_back(&WrapperArg);
4206 
4207   const CGFunctionInfo &CGFI =
4208       CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
4209 
4210   auto *Fn = llvm::Function::Create(
4211       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
4212       Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
4213   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
4214   Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
4215   Fn->setDoesNotRecurse();
4216 
4217   CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
4218   CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
4219                     D.getBeginLoc(), D.getBeginLoc());
4220 
4221   const auto *RD = CS.getCapturedRecordDecl();
4222   auto CurField = RD->field_begin();
4223 
4224   Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
4225                                                       /*Name=*/".zero.addr");
4226   CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
4227   // Get the array of arguments.
4228   SmallVector<llvm::Value *, 8> Args;
4229 
4230   Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
4231   Args.emplace_back(ZeroAddr.getPointer());
4232 
4233   CGBuilderTy &Bld = CGF.Builder;
4234   auto CI = CS.capture_begin();
4235 
4236   // Use global memory for data sharing.
4237   // Handle passing of global args to workers.
4238   Address GlobalArgs =
4239       CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
4240   llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
4241   llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
4242   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
4243                           CGM.getModule(), OMPRTL___kmpc_get_shared_variables),
4244                       DataSharingArgs);
4245 
4246   // Retrieve the shared variables from the list of references returned
4247   // by the runtime. Pass the variables to the outlined function.
4248   Address SharedArgListAddress = Address::invalid();
4249   if (CS.capture_size() > 0 ||
4250       isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4251     SharedArgListAddress = CGF.EmitLoadOfPointer(
4252         GlobalArgs, CGF.getContext()
4253                         .getPointerType(CGF.getContext().getPointerType(
4254                             CGF.getContext().VoidPtrTy))
4255                         .castAs<PointerType>());
4256   }
4257   unsigned Idx = 0;
4258   if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4259     Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4260     Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4261         Src, CGF.SizeTy->getPointerTo());
4262     llvm::Value *LB = CGF.EmitLoadOfScalar(
4263         TypedAddress,
4264         /*Volatile=*/false,
4265         CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4266         cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
4267     Args.emplace_back(LB);
4268     ++Idx;
4269     Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4270     TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4271         Src, CGF.SizeTy->getPointerTo());
4272     llvm::Value *UB = CGF.EmitLoadOfScalar(
4273         TypedAddress,
4274         /*Volatile=*/false,
4275         CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4276         cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
4277     Args.emplace_back(UB);
4278     ++Idx;
4279   }
4280   if (CS.capture_size() > 0) {
4281     ASTContext &CGFContext = CGF.getContext();
4282     for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
4283       QualType ElemTy = CurField->getType();
4284       Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
4285       Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4286           Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
4287       llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
4288                                               /*Volatile=*/false,
4289                                               CGFContext.getPointerType(ElemTy),
4290                                               CI->getLocation());
4291       if (CI->capturesVariableByCopy() &&
4292           !CI->getCapturedVar()->getType()->isAnyPointerType()) {
4293         Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
4294                               CI->getLocation());
4295       }
4296       Args.emplace_back(Arg);
4297     }
4298   }
4299 
4300   emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
4301   CGF.FinishFunction();
4302   return Fn;
4303 }
4304 
4305 void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
4306                                               const Decl *D) {
4307   if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
4308     return;
4309 
4310   assert(D && "Expected function or captured|block decl.");
4311   assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
4312          "Function is registered already.");
4313   assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
4314          "Team is set but not processed.");
4315   const Stmt *Body = nullptr;
4316   bool NeedToDelayGlobalization = false;
4317   if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
4318     Body = FD->getBody();
4319   } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
4320     Body = BD->getBody();
4321   } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
4322     Body = CD->getBody();
4323     NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
4324     if (NeedToDelayGlobalization &&
4325         getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
4326       return;
4327   }
4328   if (!Body)
4329     return;
4330   CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
4331   VarChecker.Visit(Body);
4332   const RecordDecl *GlobalizedVarsRecord =
4333       VarChecker.getGlobalizedRecord(IsInTTDRegion);
4334   TeamAndReductions.first = nullptr;
4335   TeamAndReductions.second.clear();
4336   ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
4337       VarChecker.getEscapedVariableLengthDecls();
4338   if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
4339     return;
4340   auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
4341   I->getSecond().MappedParams =
4342       std::make_unique<CodeGenFunction::OMPMapVars>();
4343   I->getSecond().GlobalRecord = GlobalizedVarsRecord;
4344   I->getSecond().EscapedParameters.insert(
4345       VarChecker.getEscapedParameters().begin(),
4346       VarChecker.getEscapedParameters().end());
4347   I->getSecond().EscapedVariableLengthDecls.append(
4348       EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
4349   DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
4350   for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4351     assert(VD->isCanonicalDecl() && "Expected canonical declaration");
4352     const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4353     Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
4354   }
4355   if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
4356     CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
4357     VarChecker.Visit(Body);
4358     I->getSecond().SecondaryGlobalRecord =
4359         VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
4360     I->getSecond().SecondaryLocalVarData.emplace();
4361     DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
4362     for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4363       assert(VD->isCanonicalDecl() && "Expected canonical declaration");
4364       const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4365       Data.insert(
4366           std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
4367     }
4368   }
4369   if (!NeedToDelayGlobalization) {
4370     emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
4371     struct GlobalizationScope final : EHScopeStack::Cleanup {
4372       GlobalizationScope() = default;
4373 
4374       void Emit(CodeGenFunction &CGF, Flags flags) override {
4375         static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
4376             .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
4377       }
4378     };
4379     CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
4380   }
4381 }
4382 
4383 Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
4384                                                         const VarDecl *VD) {
4385   if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
4386     const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4387     auto AS = LangAS::Default;
4388     switch (A->getAllocatorType()) {
4389       // Use the default allocator here as by default local vars are
4390       // threadlocal.
4391     case OMPAllocateDeclAttr::OMPNullMemAlloc:
4392     case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4393     case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4394     case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4395     case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4396       // Follow the user decision - use default allocation.
4397       return Address::invalid();
4398     case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4399       // TODO: implement aupport for user-defined allocators.
4400       return Address::invalid();
4401     case OMPAllocateDeclAttr::OMPConstMemAlloc:
4402       AS = LangAS::cuda_constant;
4403       break;
4404     case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
4405       AS = LangAS::cuda_shared;
4406       break;
4407     case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4408     case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
4409       break;
4410     }
4411     llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
4412     auto *GV = new llvm::GlobalVariable(
4413         CGM.getModule(), VarTy, /*isConstant=*/false,
4414         llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
4415         VD->getName(),
4416         /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
4417         CGM.getContext().getTargetAddressSpace(AS));
4418     CharUnits Align = CGM.getContext().getDeclAlign(VD);
4419     GV->setAlignment(Align.getAsAlign());
4420     return Address(
4421         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4422             GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
4423                     VD->getType().getAddressSpace()))),
4424         Align);
4425   }
4426 
4427   if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
4428     return Address::invalid();
4429 
4430   VD = VD->getCanonicalDecl();
4431   auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
4432   if (I == FunctionGlobalizedDecls.end())
4433     return Address::invalid();
4434   auto VDI = I->getSecond().LocalVarData.find(VD);
4435   if (VDI != I->getSecond().LocalVarData.end())
4436     return VDI->second.PrivateAddr;
4437   if (VD->hasAttrs()) {
4438     for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
4439          E(VD->attr_end());
4440          IT != E; ++IT) {
4441       auto VDI = I->getSecond().LocalVarData.find(
4442           cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
4443               ->getCanonicalDecl());
4444       if (VDI != I->getSecond().LocalVarData.end())
4445         return VDI->second.PrivateAddr;
4446     }
4447   }
4448 
4449   return Address::invalid();
4450 }
4451 
4452 void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) {
4453   FunctionGlobalizedDecls.erase(CGF.CurFn);
4454   CGOpenMPRuntime::functionFinished(CGF);
4455 }
4456 
4457 void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk(
4458     CodeGenFunction &CGF, const OMPLoopDirective &S,
4459     OpenMPDistScheduleClauseKind &ScheduleKind,
4460     llvm::Value *&Chunk) const {
4461   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
4462   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
4463     ScheduleKind = OMPC_DIST_SCHEDULE_static;
4464     Chunk = CGF.EmitScalarConversion(
4465         RT.getGPUNumThreads(CGF),
4466         CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4467         S.getIterationVariable()->getType(), S.getBeginLoc());
4468     return;
4469   }
4470   CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
4471       CGF, S, ScheduleKind, Chunk);
4472 }
4473 
4474 void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk(
4475     CodeGenFunction &CGF, const OMPLoopDirective &S,
4476     OpenMPScheduleClauseKind &ScheduleKind,
4477     const Expr *&ChunkExpr) const {
4478   ScheduleKind = OMPC_SCHEDULE_static;
4479   // Chunk size is 1 in this case.
4480   llvm::APInt ChunkSize(32, 1);
4481   ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
4482       CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4483       SourceLocation());
4484 }
4485 
4486 void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
4487     CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
4488   assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
4489          " Expected target-based directive.");
4490   const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
4491   for (const CapturedStmt::Capture &C : CS->captures()) {
4492     // Capture variables captured by reference in lambdas for target-based
4493     // directives.
4494     if (!C.capturesVariable())
4495       continue;
4496     const VarDecl *VD = C.getCapturedVar();
4497     const auto *RD = VD->getType()
4498                          .getCanonicalType()
4499                          .getNonReferenceType()
4500                          ->getAsCXXRecordDecl();
4501     if (!RD || !RD->isLambda())
4502       continue;
4503     Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4504     LValue VDLVal;
4505     if (VD->getType().getCanonicalType()->isReferenceType())
4506       VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
4507     else
4508       VDLVal = CGF.MakeAddrLValue(
4509           VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
4510     llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
4511     FieldDecl *ThisCapture = nullptr;
4512     RD->getCaptureFields(Captures, ThisCapture);
4513     if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
4514       LValue ThisLVal =
4515           CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
4516       llvm::Value *CXXThis = CGF.LoadCXXThis();
4517       CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
4518     }
4519     for (const LambdaCapture &LC : RD->captures()) {
4520       if (LC.getCaptureKind() != LCK_ByRef)
4521         continue;
4522       const VarDecl *VD = LC.getCapturedVar();
4523       if (!CS->capturesVariable(VD))
4524         continue;
4525       auto It = Captures.find(VD);
4526       assert(It != Captures.end() && "Found lambda capture without field.");
4527       LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
4528       Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4529       if (VD->getType().getCanonicalType()->isReferenceType())
4530         VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
4531                                                VD->getType().getCanonicalType())
4532                      .getAddress(CGF);
4533       CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
4534     }
4535   }
4536 }
4537 
4538 unsigned CGOpenMPRuntimeGPU::getDefaultFirstprivateAddressSpace() const {
4539   return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
4540 }
4541 
4542 bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
4543                                                             LangAS &AS) {
4544   if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
4545     return false;
4546   const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4547   switch(A->getAllocatorType()) {
4548   case OMPAllocateDeclAttr::OMPNullMemAlloc:
4549   case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4550   // Not supported, fallback to the default mem space.
4551   case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4552   case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4553   case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
4554   case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4555   case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4556     AS = LangAS::Default;
4557     return true;
4558   case OMPAllocateDeclAttr::OMPConstMemAlloc:
4559     AS = LangAS::cuda_constant;
4560     return true;
4561   case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
4562     AS = LangAS::cuda_shared;
4563     return true;
4564   case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4565     llvm_unreachable("Expected predefined allocator for the variables with the "
4566                      "static storage.");
4567   }
4568   return false;
4569 }
4570 
4571 // Get current CudaArch and ignore any unknown values
4572 static CudaArch getCudaArch(CodeGenModule &CGM) {
4573   if (!CGM.getTarget().hasFeature("ptx"))
4574     return CudaArch::UNKNOWN;
4575   for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) {
4576     if (Feature.getValue()) {
4577       CudaArch Arch = StringToCudaArch(Feature.getKey());
4578       if (Arch != CudaArch::UNKNOWN)
4579         return Arch;
4580     }
4581   }
4582   return CudaArch::UNKNOWN;
4583 }
4584 
4585 /// Check to see if target architecture supports unified addressing which is
4586 /// a restriction for OpenMP requires clause "unified_shared_memory".
4587 void CGOpenMPRuntimeGPU::processRequiresDirective(
4588     const OMPRequiresDecl *D) {
4589   for (const OMPClause *Clause : D->clauselists()) {
4590     if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
4591       CudaArch Arch = getCudaArch(CGM);
4592       switch (Arch) {
4593       case CudaArch::SM_20:
4594       case CudaArch::SM_21:
4595       case CudaArch::SM_30:
4596       case CudaArch::SM_32:
4597       case CudaArch::SM_35:
4598       case CudaArch::SM_37:
4599       case CudaArch::SM_50:
4600       case CudaArch::SM_52:
4601       case CudaArch::SM_53:
4602       case CudaArch::SM_60:
4603       case CudaArch::SM_61:
4604       case CudaArch::SM_62: {
4605         SmallString<256> Buffer;
4606         llvm::raw_svector_ostream Out(Buffer);
4607         Out << "Target architecture " << CudaArchToString(Arch)
4608             << " does not support unified addressing";
4609         CGM.Error(Clause->getBeginLoc(), Out.str());
4610         return;
4611       }
4612       case CudaArch::SM_70:
4613       case CudaArch::SM_72:
4614       case CudaArch::SM_75:
4615       case CudaArch::SM_80:
4616       case CudaArch::GFX600:
4617       case CudaArch::GFX601:
4618       case CudaArch::GFX602:
4619       case CudaArch::GFX700:
4620       case CudaArch::GFX701:
4621       case CudaArch::GFX702:
4622       case CudaArch::GFX703:
4623       case CudaArch::GFX704:
4624       case CudaArch::GFX705:
4625       case CudaArch::GFX801:
4626       case CudaArch::GFX802:
4627       case CudaArch::GFX803:
4628       case CudaArch::GFX805:
4629       case CudaArch::GFX810:
4630       case CudaArch::GFX900:
4631       case CudaArch::GFX902:
4632       case CudaArch::GFX904:
4633       case CudaArch::GFX906:
4634       case CudaArch::GFX908:
4635       case CudaArch::GFX909:
4636       case CudaArch::GFX90c:
4637       case CudaArch::GFX1010:
4638       case CudaArch::GFX1011:
4639       case CudaArch::GFX1012:
4640       case CudaArch::GFX1030:
4641       case CudaArch::GFX1031:
4642       case CudaArch::GFX1032:
4643       case CudaArch::GFX1033:
4644       case CudaArch::UNUSED:
4645       case CudaArch::UNKNOWN:
4646         break;
4647       case CudaArch::LAST:
4648         llvm_unreachable("Unexpected Cuda arch.");
4649       }
4650     }
4651   }
4652   CGOpenMPRuntime::processRequiresDirective(D);
4653 }
4654 
4655 /// Get number of SMs and number of blocks per SM.
4656 static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
4657   std::pair<unsigned, unsigned> Data;
4658   if (CGM.getLangOpts().OpenMPCUDANumSMs)
4659     Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
4660   if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
4661     Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
4662   if (Data.first && Data.second)
4663     return Data;
4664   switch (getCudaArch(CGM)) {
4665   case CudaArch::SM_20:
4666   case CudaArch::SM_21:
4667   case CudaArch::SM_30:
4668   case CudaArch::SM_32:
4669   case CudaArch::SM_35:
4670   case CudaArch::SM_37:
4671   case CudaArch::SM_50:
4672   case CudaArch::SM_52:
4673   case CudaArch::SM_53:
4674     return {16, 16};
4675   case CudaArch::SM_60:
4676   case CudaArch::SM_61:
4677   case CudaArch::SM_62:
4678     return {56, 32};
4679   case CudaArch::SM_70:
4680   case CudaArch::SM_72:
4681   case CudaArch::SM_75:
4682   case CudaArch::SM_80:
4683     return {84, 32};
4684   case CudaArch::GFX600:
4685   case CudaArch::GFX601:
4686   case CudaArch::GFX602:
4687   case CudaArch::GFX700:
4688   case CudaArch::GFX701:
4689   case CudaArch::GFX702:
4690   case CudaArch::GFX703:
4691   case CudaArch::GFX704:
4692   case CudaArch::GFX705:
4693   case CudaArch::GFX801:
4694   case CudaArch::GFX802:
4695   case CudaArch::GFX803:
4696   case CudaArch::GFX805:
4697   case CudaArch::GFX810:
4698   case CudaArch::GFX900:
4699   case CudaArch::GFX902:
4700   case CudaArch::GFX904:
4701   case CudaArch::GFX906:
4702   case CudaArch::GFX908:
4703   case CudaArch::GFX909:
4704   case CudaArch::GFX90c:
4705   case CudaArch::GFX1010:
4706   case CudaArch::GFX1011:
4707   case CudaArch::GFX1012:
4708   case CudaArch::GFX1030:
4709   case CudaArch::GFX1031:
4710   case CudaArch::GFX1032:
4711   case CudaArch::GFX1033:
4712   case CudaArch::UNUSED:
4713   case CudaArch::UNKNOWN:
4714     break;
4715   case CudaArch::LAST:
4716     llvm_unreachable("Unexpected Cuda arch.");
4717   }
4718   llvm_unreachable("Unexpected NVPTX target without ptx feature.");
4719 }
4720 
4721 void CGOpenMPRuntimeGPU::clear() {
4722   if (!GlobalizedRecords.empty() &&
4723       !CGM.getLangOpts().OpenMPCUDATargetParallel) {
4724     ASTContext &C = CGM.getContext();
4725     llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
4726     llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
4727     RecordDecl *StaticRD = C.buildImplicitRecord(
4728         "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
4729     StaticRD->startDefinition();
4730     RecordDecl *SharedStaticRD = C.buildImplicitRecord(
4731         "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
4732     SharedStaticRD->startDefinition();
4733     for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
4734       if (Records.Records.empty())
4735         continue;
4736       unsigned Size = 0;
4737       unsigned RecAlignment = 0;
4738       for (const RecordDecl *RD : Records.Records) {
4739         QualType RDTy = C.getRecordType(RD);
4740         unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
4741         RecAlignment = std::max(RecAlignment, Alignment);
4742         unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
4743         Size =
4744             llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
4745       }
4746       Size = llvm::alignTo(Size, RecAlignment);
4747       llvm::APInt ArySize(/*numBits=*/64, Size);
4748       QualType SubTy = C.getConstantArrayType(
4749           C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
4750       const bool UseSharedMemory = Size <= SharedMemorySize;
4751       auto *Field =
4752           FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD,
4753                             SourceLocation(), SourceLocation(), nullptr, SubTy,
4754                             C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
4755                             /*BW=*/nullptr, /*Mutable=*/false,
4756                             /*InitStyle=*/ICIS_NoInit);
4757       Field->setAccess(AS_public);
4758       if (UseSharedMemory) {
4759         SharedStaticRD->addDecl(Field);
4760         SharedRecs.push_back(&Records);
4761       } else {
4762         StaticRD->addDecl(Field);
4763         GlobalRecs.push_back(&Records);
4764       }
4765       Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
4766       Records.UseSharedMemory->setInitializer(
4767           llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0));
4768     }
4769     // Allocate SharedMemorySize buffer for the shared memory.
4770     // FIXME: nvlink does not handle weak linkage correctly (object with the
4771     // different size are reported as erroneous).
4772     // Restore this code as sson as nvlink is fixed.
4773     if (!SharedStaticRD->field_empty()) {
4774       llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize);
4775       QualType SubTy = C.getConstantArrayType(
4776           C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
4777       auto *Field = FieldDecl::Create(
4778           C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy,
4779           C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
4780           /*BW=*/nullptr, /*Mutable=*/false,
4781           /*InitStyle=*/ICIS_NoInit);
4782       Field->setAccess(AS_public);
4783       SharedStaticRD->addDecl(Field);
4784     }
4785     SharedStaticRD->completeDefinition();
4786     if (!SharedStaticRD->field_empty()) {
4787       QualType StaticTy = C.getRecordType(SharedStaticRD);
4788       llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
4789       auto *GV = new llvm::GlobalVariable(
4790           CGM.getModule(), LLVMStaticTy,
4791           /*isConstant=*/false, llvm::GlobalValue::WeakAnyLinkage,
4792           llvm::UndefValue::get(LLVMStaticTy),
4793           "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
4794           llvm::GlobalValue::NotThreadLocal,
4795           C.getTargetAddressSpace(LangAS::cuda_shared));
4796       auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
4797           GV, CGM.VoidPtrTy);
4798       for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
4799         Rec->Buffer->replaceAllUsesWith(Replacement);
4800         Rec->Buffer->eraseFromParent();
4801       }
4802     }
4803     StaticRD->completeDefinition();
4804     if (!StaticRD->field_empty()) {
4805       QualType StaticTy = C.getRecordType(StaticRD);
4806       std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
4807       llvm::APInt Size1(32, SMsBlockPerSM.second);
4808       QualType Arr1Ty =
4809           C.getConstantArrayType(StaticTy, Size1, nullptr, ArrayType::Normal,
4810                                  /*IndexTypeQuals=*/0);
4811       llvm::APInt Size2(32, SMsBlockPerSM.first);
4812       QualType Arr2Ty =
4813           C.getConstantArrayType(Arr1Ty, Size2, nullptr, ArrayType::Normal,
4814                                  /*IndexTypeQuals=*/0);
4815       llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
4816       // FIXME: nvlink does not handle weak linkage correctly (object with the
4817       // different size are reported as erroneous).
4818       // Restore CommonLinkage as soon as nvlink is fixed.
4819       auto *GV = new llvm::GlobalVariable(
4820           CGM.getModule(), LLVMArr2Ty,
4821           /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
4822           llvm::Constant::getNullValue(LLVMArr2Ty),
4823           "_openmp_static_glob_rd_$_");
4824       auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
4825           GV, CGM.VoidPtrTy);
4826       for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
4827         Rec->Buffer->replaceAllUsesWith(Replacement);
4828         Rec->Buffer->eraseFromParent();
4829       }
4830     }
4831   }
4832   if (!TeamsReductions.empty()) {
4833     ASTContext &C = CGM.getContext();
4834     RecordDecl *StaticRD = C.buildImplicitRecord(
4835         "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
4836     StaticRD->startDefinition();
4837     for (const RecordDecl *TeamReductionRec : TeamsReductions) {
4838       QualType RecTy = C.getRecordType(TeamReductionRec);
4839       auto *Field = FieldDecl::Create(
4840           C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
4841           C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
4842           /*BW=*/nullptr, /*Mutable=*/false,
4843           /*InitStyle=*/ICIS_NoInit);
4844       Field->setAccess(AS_public);
4845       StaticRD->addDecl(Field);
4846     }
4847     StaticRD->completeDefinition();
4848     QualType StaticTy = C.getRecordType(StaticRD);
4849     llvm::Type *LLVMReductionsBufferTy =
4850         CGM.getTypes().ConvertTypeForMem(StaticTy);
4851     // FIXME: nvlink does not handle weak linkage correctly (object with the
4852     // different size are reported as erroneous).
4853     // Restore CommonLinkage as soon as nvlink is fixed.
4854     auto *GV = new llvm::GlobalVariable(
4855         CGM.getModule(), LLVMReductionsBufferTy,
4856         /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
4857         llvm::Constant::getNullValue(LLVMReductionsBufferTy),
4858         "_openmp_teams_reductions_buffer_$_");
4859     KernelTeamsReductionPtr->setInitializer(
4860         llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
4861                                                              CGM.VoidPtrTy));
4862   }
4863   CGOpenMPRuntime::clear();
4864 }
4865