xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides a generalized class for OpenMP runtime code generation
10 // specialized by GPU targets NVPTX and AMDGCN.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGOpenMPRuntimeGPU.h"
15 #include "CodeGenFunction.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/DeclOpenMP.h"
18 #include "clang/AST/StmtOpenMP.h"
19 #include "clang/AST/StmtVisitor.h"
20 #include "clang/Basic/Cuda.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/Frontend/OpenMP/OMPGridValues.h"
23 #include "llvm/Support/MathExtras.h"
24 
25 using namespace clang;
26 using namespace CodeGen;
27 using namespace llvm::omp;
28 
29 namespace {
30 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
31 class NVPTXActionTy final : public PrePostActionTy {
32   llvm::FunctionCallee EnterCallee = nullptr;
33   ArrayRef<llvm::Value *> EnterArgs;
34   llvm::FunctionCallee ExitCallee = nullptr;
35   ArrayRef<llvm::Value *> ExitArgs;
36   bool Conditional = false;
37   llvm::BasicBlock *ContBlock = nullptr;
38 
39 public:
40   NVPTXActionTy(llvm::FunctionCallee EnterCallee,
41                 ArrayRef<llvm::Value *> EnterArgs,
42                 llvm::FunctionCallee ExitCallee,
43                 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
44       : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
45         ExitArgs(ExitArgs), Conditional(Conditional) {}
46   void Enter(CodeGenFunction &CGF) override {
47     llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
48     if (Conditional) {
49       llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
50       auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
51       ContBlock = CGF.createBasicBlock("omp_if.end");
52       // Generate the branch (If-stmt)
53       CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
54       CGF.EmitBlock(ThenBlock);
55     }
56   }
57   void Done(CodeGenFunction &CGF) {
58     // Emit the rest of blocks/branches
59     CGF.EmitBranch(ContBlock);
60     CGF.EmitBlock(ContBlock, true);
61   }
62   void Exit(CodeGenFunction &CGF) override {
63     CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
64   }
65 };
66 
67 /// A class to track the execution mode when codegening directives within
68 /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
69 /// to the target region and used by containing directives such as 'parallel'
70 /// to emit optimized code.
71 class ExecutionRuntimeModesRAII {
72 private:
73   CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode =
74       CGOpenMPRuntimeGPU::EM_Unknown;
75   CGOpenMPRuntimeGPU::ExecutionMode &ExecMode;
76   bool SavedRuntimeMode = false;
77   bool *RuntimeMode = nullptr;
78 
79 public:
80   /// Constructor for Non-SPMD mode.
81   ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode)
82       : ExecMode(ExecMode) {
83     SavedExecMode = ExecMode;
84     ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD;
85   }
86   /// Constructor for SPMD mode.
87   ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode,
88                             bool &RuntimeMode, bool FullRuntimeMode)
89       : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
90     SavedExecMode = ExecMode;
91     SavedRuntimeMode = RuntimeMode;
92     ExecMode = CGOpenMPRuntimeGPU::EM_SPMD;
93     RuntimeMode = FullRuntimeMode;
94   }
95   ~ExecutionRuntimeModesRAII() {
96     ExecMode = SavedExecMode;
97     if (RuntimeMode)
98       *RuntimeMode = SavedRuntimeMode;
99   }
100 };
101 
102 /// GPU Configuration:  This information can be derived from cuda registers,
103 /// however, providing compile time constants helps generate more efficient
104 /// code.  For all practical purposes this is fine because the configuration
105 /// is the same for all known NVPTX architectures.
106 enum MachineConfiguration : unsigned {
107   /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target
108   /// specific Grid Values like GV_Warp_Size, GV_Slot_Size
109 
110   /// Global memory alignment for performance.
111   GlobalMemoryAlignment = 128,
112 
113   /// Maximal size of the shared memory buffer.
114   SharedMemorySize = 128,
115 };
116 
117 static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
118   RefExpr = RefExpr->IgnoreParens();
119   if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
120     const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
121     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
122       Base = TempASE->getBase()->IgnoreParenImpCasts();
123     RefExpr = Base;
124   } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
125     const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
126     while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
127       Base = TempOASE->getBase()->IgnoreParenImpCasts();
128     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
129       Base = TempASE->getBase()->IgnoreParenImpCasts();
130     RefExpr = Base;
131   }
132   RefExpr = RefExpr->IgnoreParenImpCasts();
133   if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
134     return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
135   const auto *ME = cast<MemberExpr>(RefExpr);
136   return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
137 }
138 
139 
140 static RecordDecl *buildRecordForGlobalizedVars(
141     ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
142     ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
143     llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
144         &MappedDeclsFields, int BufSize) {
145   using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
146   if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
147     return nullptr;
148   SmallVector<VarsDataTy, 4> GlobalizedVars;
149   for (const ValueDecl *D : EscapedDecls)
150     GlobalizedVars.emplace_back(
151         CharUnits::fromQuantity(std::max(
152             C.getDeclAlign(D).getQuantity(),
153             static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
154         D);
155   for (const ValueDecl *D : EscapedDeclsForTeams)
156     GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
157   llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
158     return L.first > R.first;
159   });
160 
161   // Build struct _globalized_locals_ty {
162   //         /*  globalized vars  */[WarSize] align (max(decl_align,
163   //         GlobalMemoryAlignment))
164   //         /*  globalized vars  */ for EscapedDeclsForTeams
165   //       };
166   RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
167   GlobalizedRD->startDefinition();
168   llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
169       EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
170   for (const auto &Pair : GlobalizedVars) {
171     const ValueDecl *VD = Pair.second;
172     QualType Type = VD->getType();
173     if (Type->isLValueReferenceType())
174       Type = C.getPointerType(Type.getNonReferenceType());
175     else
176       Type = Type.getNonReferenceType();
177     SourceLocation Loc = VD->getLocation();
178     FieldDecl *Field;
179     if (SingleEscaped.count(VD)) {
180       Field = FieldDecl::Create(
181           C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
182           C.getTrivialTypeSourceInfo(Type, SourceLocation()),
183           /*BW=*/nullptr, /*Mutable=*/false,
184           /*InitStyle=*/ICIS_NoInit);
185       Field->setAccess(AS_public);
186       if (VD->hasAttrs()) {
187         for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
188              E(VD->getAttrs().end());
189              I != E; ++I)
190           Field->addAttr(*I);
191       }
192     } else {
193       llvm::APInt ArraySize(32, BufSize);
194       Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
195                                     0);
196       Field = FieldDecl::Create(
197           C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
198           C.getTrivialTypeSourceInfo(Type, SourceLocation()),
199           /*BW=*/nullptr, /*Mutable=*/false,
200           /*InitStyle=*/ICIS_NoInit);
201       Field->setAccess(AS_public);
202       llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
203                                      static_cast<CharUnits::QuantityType>(
204                                          GlobalMemoryAlignment)));
205       Field->addAttr(AlignedAttr::CreateImplicit(
206           C, /*IsAlignmentExpr=*/true,
207           IntegerLiteral::Create(C, Align,
208                                  C.getIntTypeForBitwidth(32, /*Signed=*/0),
209                                  SourceLocation()),
210           {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
211     }
212     GlobalizedRD->addDecl(Field);
213     MappedDeclsFields.try_emplace(VD, Field);
214   }
215   GlobalizedRD->completeDefinition();
216   return GlobalizedRD;
217 }
218 
219 /// Get the list of variables that can escape their declaration context.
220 class CheckVarsEscapingDeclContext final
221     : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
222   CodeGenFunction &CGF;
223   llvm::SetVector<const ValueDecl *> EscapedDecls;
224   llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
225   llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
226   RecordDecl *GlobalizedRD = nullptr;
227   llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
228   bool AllEscaped = false;
229   bool IsForCombinedParallelRegion = false;
230 
231   void markAsEscaped(const ValueDecl *VD) {
232     // Do not globalize declare target variables.
233     if (!isa<VarDecl>(VD) ||
234         OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
235       return;
236     VD = cast<ValueDecl>(VD->getCanonicalDecl());
237     // Use user-specified allocation.
238     if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
239       return;
240     // Variables captured by value must be globalized.
241     if (auto *CSI = CGF.CapturedStmtInfo) {
242       if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
243         // Check if need to capture the variable that was already captured by
244         // value in the outer region.
245         if (!IsForCombinedParallelRegion) {
246           if (!FD->hasAttrs())
247             return;
248           const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
249           if (!Attr)
250             return;
251           if (((Attr->getCaptureKind() != OMPC_map) &&
252                !isOpenMPPrivate(Attr->getCaptureKind())) ||
253               ((Attr->getCaptureKind() == OMPC_map) &&
254                !FD->getType()->isAnyPointerType()))
255             return;
256         }
257         if (!FD->getType()->isReferenceType()) {
258           assert(!VD->getType()->isVariablyModifiedType() &&
259                  "Parameter captured by value with variably modified type");
260           EscapedParameters.insert(VD);
261         } else if (!IsForCombinedParallelRegion) {
262           return;
263         }
264       }
265     }
266     if ((!CGF.CapturedStmtInfo ||
267          (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
268         VD->getType()->isReferenceType())
269       // Do not globalize variables with reference type.
270       return;
271     if (VD->getType()->isVariablyModifiedType())
272       EscapedVariableLengthDecls.insert(VD);
273     else
274       EscapedDecls.insert(VD);
275   }
276 
277   void VisitValueDecl(const ValueDecl *VD) {
278     if (VD->getType()->isLValueReferenceType())
279       markAsEscaped(VD);
280     if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
281       if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
282         const bool SavedAllEscaped = AllEscaped;
283         AllEscaped = VD->getType()->isLValueReferenceType();
284         Visit(VarD->getInit());
285         AllEscaped = SavedAllEscaped;
286       }
287     }
288   }
289   void VisitOpenMPCapturedStmt(const CapturedStmt *S,
290                                ArrayRef<OMPClause *> Clauses,
291                                bool IsCombinedParallelRegion) {
292     if (!S)
293       return;
294     for (const CapturedStmt::Capture &C : S->captures()) {
295       if (C.capturesVariable() && !C.capturesVariableByCopy()) {
296         const ValueDecl *VD = C.getCapturedVar();
297         bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
298         if (IsCombinedParallelRegion) {
299           // Check if the variable is privatized in the combined construct and
300           // those private copies must be shared in the inner parallel
301           // directive.
302           IsForCombinedParallelRegion = false;
303           for (const OMPClause *C : Clauses) {
304             if (!isOpenMPPrivate(C->getClauseKind()) ||
305                 C->getClauseKind() == OMPC_reduction ||
306                 C->getClauseKind() == OMPC_linear ||
307                 C->getClauseKind() == OMPC_private)
308               continue;
309             ArrayRef<const Expr *> Vars;
310             if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
311               Vars = PC->getVarRefs();
312             else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
313               Vars = PC->getVarRefs();
314             else
315               llvm_unreachable("Unexpected clause.");
316             for (const auto *E : Vars) {
317               const Decl *D =
318                   cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
319               if (D == VD->getCanonicalDecl()) {
320                 IsForCombinedParallelRegion = true;
321                 break;
322               }
323             }
324             if (IsForCombinedParallelRegion)
325               break;
326           }
327         }
328         markAsEscaped(VD);
329         if (isa<OMPCapturedExprDecl>(VD))
330           VisitValueDecl(VD);
331         IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
332       }
333     }
334   }
335 
336   void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
337     assert(!GlobalizedRD &&
338            "Record for globalized variables is built already.");
339     ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
340     unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
341     if (IsInTTDRegion)
342       EscapedDeclsForTeams = EscapedDecls.getArrayRef();
343     else
344       EscapedDeclsForParallel = EscapedDecls.getArrayRef();
345     GlobalizedRD = ::buildRecordForGlobalizedVars(
346         CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
347         MappedDeclsFields, WarpSize);
348   }
349 
350 public:
351   CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
352                                ArrayRef<const ValueDecl *> TeamsReductions)
353       : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
354   }
355   virtual ~CheckVarsEscapingDeclContext() = default;
356   void VisitDeclStmt(const DeclStmt *S) {
357     if (!S)
358       return;
359     for (const Decl *D : S->decls())
360       if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
361         VisitValueDecl(VD);
362   }
363   void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
364     if (!D)
365       return;
366     if (!D->hasAssociatedStmt())
367       return;
368     if (const auto *S =
369             dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
370       // Do not analyze directives that do not actually require capturing,
371       // like `omp for` or `omp simd` directives.
372       llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
373       getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
374       if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
375         VisitStmt(S->getCapturedStmt());
376         return;
377       }
378       VisitOpenMPCapturedStmt(
379           S, D->clauses(),
380           CaptureRegions.back() == OMPD_parallel &&
381               isOpenMPDistributeDirective(D->getDirectiveKind()));
382     }
383   }
384   void VisitCapturedStmt(const CapturedStmt *S) {
385     if (!S)
386       return;
387     for (const CapturedStmt::Capture &C : S->captures()) {
388       if (C.capturesVariable() && !C.capturesVariableByCopy()) {
389         const ValueDecl *VD = C.getCapturedVar();
390         markAsEscaped(VD);
391         if (isa<OMPCapturedExprDecl>(VD))
392           VisitValueDecl(VD);
393       }
394     }
395   }
396   void VisitLambdaExpr(const LambdaExpr *E) {
397     if (!E)
398       return;
399     for (const LambdaCapture &C : E->captures()) {
400       if (C.capturesVariable()) {
401         if (C.getCaptureKind() == LCK_ByRef) {
402           const ValueDecl *VD = C.getCapturedVar();
403           markAsEscaped(VD);
404           if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
405             VisitValueDecl(VD);
406         }
407       }
408     }
409   }
410   void VisitBlockExpr(const BlockExpr *E) {
411     if (!E)
412       return;
413     for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
414       if (C.isByRef()) {
415         const VarDecl *VD = C.getVariable();
416         markAsEscaped(VD);
417         if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
418           VisitValueDecl(VD);
419       }
420     }
421   }
422   void VisitCallExpr(const CallExpr *E) {
423     if (!E)
424       return;
425     for (const Expr *Arg : E->arguments()) {
426       if (!Arg)
427         continue;
428       if (Arg->isLValue()) {
429         const bool SavedAllEscaped = AllEscaped;
430         AllEscaped = true;
431         Visit(Arg);
432         AllEscaped = SavedAllEscaped;
433       } else {
434         Visit(Arg);
435       }
436     }
437     Visit(E->getCallee());
438   }
439   void VisitDeclRefExpr(const DeclRefExpr *E) {
440     if (!E)
441       return;
442     const ValueDecl *VD = E->getDecl();
443     if (AllEscaped)
444       markAsEscaped(VD);
445     if (isa<OMPCapturedExprDecl>(VD))
446       VisitValueDecl(VD);
447     else if (const auto *VarD = dyn_cast<VarDecl>(VD))
448       if (VarD->isInitCapture())
449         VisitValueDecl(VD);
450   }
451   void VisitUnaryOperator(const UnaryOperator *E) {
452     if (!E)
453       return;
454     if (E->getOpcode() == UO_AddrOf) {
455       const bool SavedAllEscaped = AllEscaped;
456       AllEscaped = true;
457       Visit(E->getSubExpr());
458       AllEscaped = SavedAllEscaped;
459     } else {
460       Visit(E->getSubExpr());
461     }
462   }
463   void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
464     if (!E)
465       return;
466     if (E->getCastKind() == CK_ArrayToPointerDecay) {
467       const bool SavedAllEscaped = AllEscaped;
468       AllEscaped = true;
469       Visit(E->getSubExpr());
470       AllEscaped = SavedAllEscaped;
471     } else {
472       Visit(E->getSubExpr());
473     }
474   }
475   void VisitExpr(const Expr *E) {
476     if (!E)
477       return;
478     bool SavedAllEscaped = AllEscaped;
479     if (!E->isLValue())
480       AllEscaped = false;
481     for (const Stmt *Child : E->children())
482       if (Child)
483         Visit(Child);
484     AllEscaped = SavedAllEscaped;
485   }
486   void VisitStmt(const Stmt *S) {
487     if (!S)
488       return;
489     for (const Stmt *Child : S->children())
490       if (Child)
491         Visit(Child);
492   }
493 
494   /// Returns the record that handles all the escaped local variables and used
495   /// instead of their original storage.
496   const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
497     if (!GlobalizedRD)
498       buildRecordForGlobalizedVars(IsInTTDRegion);
499     return GlobalizedRD;
500   }
501 
502   /// Returns the field in the globalized record for the escaped variable.
503   const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
504     assert(GlobalizedRD &&
505            "Record for globalized variables must be generated already.");
506     auto I = MappedDeclsFields.find(VD);
507     if (I == MappedDeclsFields.end())
508       return nullptr;
509     return I->getSecond();
510   }
511 
512   /// Returns the list of the escaped local variables/parameters.
513   ArrayRef<const ValueDecl *> getEscapedDecls() const {
514     return EscapedDecls.getArrayRef();
515   }
516 
517   /// Checks if the escaped local variable is actually a parameter passed by
518   /// value.
519   const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
520     return EscapedParameters;
521   }
522 
523   /// Returns the list of the escaped variables with the variably modified
524   /// types.
525   ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
526     return EscapedVariableLengthDecls.getArrayRef();
527   }
528 };
529 } // anonymous namespace
530 
531 /// Get the id of the warp in the block.
532 /// We assume that the warp size is 32, which is always the case
533 /// on the NVPTX device, to generate more efficient code.
534 static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
535   CGBuilderTy &Bld = CGF.Builder;
536   unsigned LaneIDBits =
537       llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
538   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
539   return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
540 }
541 
542 /// Get the id of the current lane in the Warp.
543 /// We assume that the warp size is 32, which is always the case
544 /// on the NVPTX device, to generate more efficient code.
545 static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
546   CGBuilderTy &Bld = CGF.Builder;
547   unsigned LaneIDBits =
548       llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
549   unsigned LaneIDMask = ~0u >> (32u - LaneIDBits);
550   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
551   return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
552                        "nvptx_lane_id");
553 }
554 
555 CGOpenMPRuntimeGPU::ExecutionMode
556 CGOpenMPRuntimeGPU::getExecutionMode() const {
557   return CurrentExecutionMode;
558 }
559 
560 static CGOpenMPRuntimeGPU::DataSharingMode
561 getDataSharingMode(CodeGenModule &CGM) {
562   return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA
563                                           : CGOpenMPRuntimeGPU::Generic;
564 }
565 
566 /// Check for inner (nested) SPMD construct, if any
567 static bool hasNestedSPMDDirective(ASTContext &Ctx,
568                                    const OMPExecutableDirective &D) {
569   const auto *CS = D.getInnermostCapturedStmt();
570   const auto *Body =
571       CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
572   const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
573 
574   if (const auto *NestedDir =
575           dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
576     OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
577     switch (D.getDirectiveKind()) {
578     case OMPD_target:
579       if (isOpenMPParallelDirective(DKind))
580         return true;
581       if (DKind == OMPD_teams) {
582         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
583             /*IgnoreCaptured=*/true);
584         if (!Body)
585           return false;
586         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
587         if (const auto *NND =
588                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
589           DKind = NND->getDirectiveKind();
590           if (isOpenMPParallelDirective(DKind))
591             return true;
592         }
593       }
594       return false;
595     case OMPD_target_teams:
596       return isOpenMPParallelDirective(DKind);
597     case OMPD_target_simd:
598     case OMPD_target_parallel:
599     case OMPD_target_parallel_for:
600     case OMPD_target_parallel_for_simd:
601     case OMPD_target_teams_distribute:
602     case OMPD_target_teams_distribute_simd:
603     case OMPD_target_teams_distribute_parallel_for:
604     case OMPD_target_teams_distribute_parallel_for_simd:
605     case OMPD_parallel:
606     case OMPD_for:
607     case OMPD_parallel_for:
608     case OMPD_parallel_master:
609     case OMPD_parallel_sections:
610     case OMPD_for_simd:
611     case OMPD_parallel_for_simd:
612     case OMPD_cancel:
613     case OMPD_cancellation_point:
614     case OMPD_ordered:
615     case OMPD_threadprivate:
616     case OMPD_allocate:
617     case OMPD_task:
618     case OMPD_simd:
619     case OMPD_sections:
620     case OMPD_section:
621     case OMPD_single:
622     case OMPD_master:
623     case OMPD_critical:
624     case OMPD_taskyield:
625     case OMPD_barrier:
626     case OMPD_taskwait:
627     case OMPD_taskgroup:
628     case OMPD_atomic:
629     case OMPD_flush:
630     case OMPD_depobj:
631     case OMPD_scan:
632     case OMPD_teams:
633     case OMPD_target_data:
634     case OMPD_target_exit_data:
635     case OMPD_target_enter_data:
636     case OMPD_distribute:
637     case OMPD_distribute_simd:
638     case OMPD_distribute_parallel_for:
639     case OMPD_distribute_parallel_for_simd:
640     case OMPD_teams_distribute:
641     case OMPD_teams_distribute_simd:
642     case OMPD_teams_distribute_parallel_for:
643     case OMPD_teams_distribute_parallel_for_simd:
644     case OMPD_target_update:
645     case OMPD_declare_simd:
646     case OMPD_declare_variant:
647     case OMPD_begin_declare_variant:
648     case OMPD_end_declare_variant:
649     case OMPD_declare_target:
650     case OMPD_end_declare_target:
651     case OMPD_declare_reduction:
652     case OMPD_declare_mapper:
653     case OMPD_taskloop:
654     case OMPD_taskloop_simd:
655     case OMPD_master_taskloop:
656     case OMPD_master_taskloop_simd:
657     case OMPD_parallel_master_taskloop:
658     case OMPD_parallel_master_taskloop_simd:
659     case OMPD_requires:
660     case OMPD_unknown:
661     default:
662       llvm_unreachable("Unexpected directive.");
663     }
664   }
665 
666   return false;
667 }
668 
669 static bool supportsSPMDExecutionMode(ASTContext &Ctx,
670                                       const OMPExecutableDirective &D) {
671   OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
672   switch (DirectiveKind) {
673   case OMPD_target:
674   case OMPD_target_teams:
675     return hasNestedSPMDDirective(Ctx, D);
676   case OMPD_target_parallel:
677   case OMPD_target_parallel_for:
678   case OMPD_target_parallel_for_simd:
679   case OMPD_target_teams_distribute_parallel_for:
680   case OMPD_target_teams_distribute_parallel_for_simd:
681   case OMPD_target_simd:
682   case OMPD_target_teams_distribute_simd:
683     return true;
684   case OMPD_target_teams_distribute:
685     return false;
686   case OMPD_parallel:
687   case OMPD_for:
688   case OMPD_parallel_for:
689   case OMPD_parallel_master:
690   case OMPD_parallel_sections:
691   case OMPD_for_simd:
692   case OMPD_parallel_for_simd:
693   case OMPD_cancel:
694   case OMPD_cancellation_point:
695   case OMPD_ordered:
696   case OMPD_threadprivate:
697   case OMPD_allocate:
698   case OMPD_task:
699   case OMPD_simd:
700   case OMPD_sections:
701   case OMPD_section:
702   case OMPD_single:
703   case OMPD_master:
704   case OMPD_critical:
705   case OMPD_taskyield:
706   case OMPD_barrier:
707   case OMPD_taskwait:
708   case OMPD_taskgroup:
709   case OMPD_atomic:
710   case OMPD_flush:
711   case OMPD_depobj:
712   case OMPD_scan:
713   case OMPD_teams:
714   case OMPD_target_data:
715   case OMPD_target_exit_data:
716   case OMPD_target_enter_data:
717   case OMPD_distribute:
718   case OMPD_distribute_simd:
719   case OMPD_distribute_parallel_for:
720   case OMPD_distribute_parallel_for_simd:
721   case OMPD_teams_distribute:
722   case OMPD_teams_distribute_simd:
723   case OMPD_teams_distribute_parallel_for:
724   case OMPD_teams_distribute_parallel_for_simd:
725   case OMPD_target_update:
726   case OMPD_declare_simd:
727   case OMPD_declare_variant:
728   case OMPD_begin_declare_variant:
729   case OMPD_end_declare_variant:
730   case OMPD_declare_target:
731   case OMPD_end_declare_target:
732   case OMPD_declare_reduction:
733   case OMPD_declare_mapper:
734   case OMPD_taskloop:
735   case OMPD_taskloop_simd:
736   case OMPD_master_taskloop:
737   case OMPD_master_taskloop_simd:
738   case OMPD_parallel_master_taskloop:
739   case OMPD_parallel_master_taskloop_simd:
740   case OMPD_requires:
741   case OMPD_unknown:
742   default:
743     break;
744   }
745   llvm_unreachable(
746       "Unknown programming model for OpenMP directive on NVPTX target.");
747 }
748 
749 /// Check if the directive is loops based and has schedule clause at all or has
750 /// static scheduling.
751 static bool hasStaticScheduling(const OMPExecutableDirective &D) {
752   assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
753          isOpenMPLoopDirective(D.getDirectiveKind()) &&
754          "Expected loop-based directive.");
755   return !D.hasClausesOfKind<OMPOrderedClause>() &&
756          (!D.hasClausesOfKind<OMPScheduleClause>() ||
757           llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
758                        [](const OMPScheduleClause *C) {
759                          return C->getScheduleKind() == OMPC_SCHEDULE_static;
760                        }));
761 }
762 
763 /// Check for inner (nested) lightweight runtime construct, if any
764 static bool hasNestedLightweightDirective(ASTContext &Ctx,
765                                           const OMPExecutableDirective &D) {
766   assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
767   const auto *CS = D.getInnermostCapturedStmt();
768   const auto *Body =
769       CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
770   const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
771 
772   if (const auto *NestedDir =
773           dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
774     OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
775     switch (D.getDirectiveKind()) {
776     case OMPD_target:
777       if (isOpenMPParallelDirective(DKind) &&
778           isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
779           hasStaticScheduling(*NestedDir))
780         return true;
781       if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
782         return true;
783       if (DKind == OMPD_parallel) {
784         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
785             /*IgnoreCaptured=*/true);
786         if (!Body)
787           return false;
788         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
789         if (const auto *NND =
790                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
791           DKind = NND->getDirectiveKind();
792           if (isOpenMPWorksharingDirective(DKind) &&
793               isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
794             return true;
795         }
796       } else if (DKind == OMPD_teams) {
797         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
798             /*IgnoreCaptured=*/true);
799         if (!Body)
800           return false;
801         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
802         if (const auto *NND =
803                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
804           DKind = NND->getDirectiveKind();
805           if (isOpenMPParallelDirective(DKind) &&
806               isOpenMPWorksharingDirective(DKind) &&
807               isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
808             return true;
809           if (DKind == OMPD_parallel) {
810             Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
811                 /*IgnoreCaptured=*/true);
812             if (!Body)
813               return false;
814             ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
815             if (const auto *NND =
816                     dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
817               DKind = NND->getDirectiveKind();
818               if (isOpenMPWorksharingDirective(DKind) &&
819                   isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
820                 return true;
821             }
822           }
823         }
824       }
825       return false;
826     case OMPD_target_teams:
827       if (isOpenMPParallelDirective(DKind) &&
828           isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
829           hasStaticScheduling(*NestedDir))
830         return true;
831       if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
832         return true;
833       if (DKind == OMPD_parallel) {
834         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
835             /*IgnoreCaptured=*/true);
836         if (!Body)
837           return false;
838         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
839         if (const auto *NND =
840                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
841           DKind = NND->getDirectiveKind();
842           if (isOpenMPWorksharingDirective(DKind) &&
843               isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
844             return true;
845         }
846       }
847       return false;
848     case OMPD_target_parallel:
849       if (DKind == OMPD_simd)
850         return true;
851       return isOpenMPWorksharingDirective(DKind) &&
852              isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
853     case OMPD_target_teams_distribute:
854     case OMPD_target_simd:
855     case OMPD_target_parallel_for:
856     case OMPD_target_parallel_for_simd:
857     case OMPD_target_teams_distribute_simd:
858     case OMPD_target_teams_distribute_parallel_for:
859     case OMPD_target_teams_distribute_parallel_for_simd:
860     case OMPD_parallel:
861     case OMPD_for:
862     case OMPD_parallel_for:
863     case OMPD_parallel_master:
864     case OMPD_parallel_sections:
865     case OMPD_for_simd:
866     case OMPD_parallel_for_simd:
867     case OMPD_cancel:
868     case OMPD_cancellation_point:
869     case OMPD_ordered:
870     case OMPD_threadprivate:
871     case OMPD_allocate:
872     case OMPD_task:
873     case OMPD_simd:
874     case OMPD_sections:
875     case OMPD_section:
876     case OMPD_single:
877     case OMPD_master:
878     case OMPD_critical:
879     case OMPD_taskyield:
880     case OMPD_barrier:
881     case OMPD_taskwait:
882     case OMPD_taskgroup:
883     case OMPD_atomic:
884     case OMPD_flush:
885     case OMPD_depobj:
886     case OMPD_scan:
887     case OMPD_teams:
888     case OMPD_target_data:
889     case OMPD_target_exit_data:
890     case OMPD_target_enter_data:
891     case OMPD_distribute:
892     case OMPD_distribute_simd:
893     case OMPD_distribute_parallel_for:
894     case OMPD_distribute_parallel_for_simd:
895     case OMPD_teams_distribute:
896     case OMPD_teams_distribute_simd:
897     case OMPD_teams_distribute_parallel_for:
898     case OMPD_teams_distribute_parallel_for_simd:
899     case OMPD_target_update:
900     case OMPD_declare_simd:
901     case OMPD_declare_variant:
902     case OMPD_begin_declare_variant:
903     case OMPD_end_declare_variant:
904     case OMPD_declare_target:
905     case OMPD_end_declare_target:
906     case OMPD_declare_reduction:
907     case OMPD_declare_mapper:
908     case OMPD_taskloop:
909     case OMPD_taskloop_simd:
910     case OMPD_master_taskloop:
911     case OMPD_master_taskloop_simd:
912     case OMPD_parallel_master_taskloop:
913     case OMPD_parallel_master_taskloop_simd:
914     case OMPD_requires:
915     case OMPD_unknown:
916     default:
917       llvm_unreachable("Unexpected directive.");
918     }
919   }
920 
921   return false;
922 }
923 
924 /// Checks if the construct supports lightweight runtime. It must be SPMD
925 /// construct + inner loop-based construct with static scheduling.
926 static bool supportsLightweightRuntime(ASTContext &Ctx,
927                                        const OMPExecutableDirective &D) {
928   if (!supportsSPMDExecutionMode(Ctx, D))
929     return false;
930   OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
931   switch (DirectiveKind) {
932   case OMPD_target:
933   case OMPD_target_teams:
934   case OMPD_target_parallel:
935     return hasNestedLightweightDirective(Ctx, D);
936   case OMPD_target_parallel_for:
937   case OMPD_target_parallel_for_simd:
938   case OMPD_target_teams_distribute_parallel_for:
939   case OMPD_target_teams_distribute_parallel_for_simd:
940     // (Last|First)-privates must be shared in parallel region.
941     return hasStaticScheduling(D);
942   case OMPD_target_simd:
943   case OMPD_target_teams_distribute_simd:
944     return true;
945   case OMPD_target_teams_distribute:
946     return false;
947   case OMPD_parallel:
948   case OMPD_for:
949   case OMPD_parallel_for:
950   case OMPD_parallel_master:
951   case OMPD_parallel_sections:
952   case OMPD_for_simd:
953   case OMPD_parallel_for_simd:
954   case OMPD_cancel:
955   case OMPD_cancellation_point:
956   case OMPD_ordered:
957   case OMPD_threadprivate:
958   case OMPD_allocate:
959   case OMPD_task:
960   case OMPD_simd:
961   case OMPD_sections:
962   case OMPD_section:
963   case OMPD_single:
964   case OMPD_master:
965   case OMPD_critical:
966   case OMPD_taskyield:
967   case OMPD_barrier:
968   case OMPD_taskwait:
969   case OMPD_taskgroup:
970   case OMPD_atomic:
971   case OMPD_flush:
972   case OMPD_depobj:
973   case OMPD_scan:
974   case OMPD_teams:
975   case OMPD_target_data:
976   case OMPD_target_exit_data:
977   case OMPD_target_enter_data:
978   case OMPD_distribute:
979   case OMPD_distribute_simd:
980   case OMPD_distribute_parallel_for:
981   case OMPD_distribute_parallel_for_simd:
982   case OMPD_teams_distribute:
983   case OMPD_teams_distribute_simd:
984   case OMPD_teams_distribute_parallel_for:
985   case OMPD_teams_distribute_parallel_for_simd:
986   case OMPD_target_update:
987   case OMPD_declare_simd:
988   case OMPD_declare_variant:
989   case OMPD_begin_declare_variant:
990   case OMPD_end_declare_variant:
991   case OMPD_declare_target:
992   case OMPD_end_declare_target:
993   case OMPD_declare_reduction:
994   case OMPD_declare_mapper:
995   case OMPD_taskloop:
996   case OMPD_taskloop_simd:
997   case OMPD_master_taskloop:
998   case OMPD_master_taskloop_simd:
999   case OMPD_parallel_master_taskloop:
1000   case OMPD_parallel_master_taskloop_simd:
1001   case OMPD_requires:
1002   case OMPD_unknown:
1003   default:
1004     break;
1005   }
1006   llvm_unreachable(
1007       "Unknown programming model for OpenMP directive on NVPTX target.");
1008 }
1009 
1010 void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
1011                                              StringRef ParentName,
1012                                              llvm::Function *&OutlinedFn,
1013                                              llvm::Constant *&OutlinedFnID,
1014                                              bool IsOffloadEntry,
1015                                              const RegionCodeGenTy &CodeGen) {
1016   ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
1017   EntryFunctionState EST;
1018   WrapperFunctionsMap.clear();
1019 
1020   // Emit target region as a standalone region.
1021   class NVPTXPrePostActionTy : public PrePostActionTy {
1022     CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1023 
1024   public:
1025     NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST)
1026         : EST(EST) {}
1027     void Enter(CodeGenFunction &CGF) override {
1028       auto &RT =
1029           static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1030       RT.emitKernelInit(CGF, EST, /* IsSPMD */ false);
1031       // Skip target region initialization.
1032       RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1033     }
1034     void Exit(CodeGenFunction &CGF) override {
1035       auto &RT =
1036           static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1037       RT.clearLocThreadIdInsertPt(CGF);
1038       RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ false);
1039     }
1040   } Action(EST);
1041   CodeGen.setAction(Action);
1042   IsInTTDRegion = true;
1043   emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1044                                    IsOffloadEntry, CodeGen);
1045   IsInTTDRegion = false;
1046 }
1047 
1048 void CGOpenMPRuntimeGPU::emitKernelInit(CodeGenFunction &CGF,
1049                                         EntryFunctionState &EST, bool IsSPMD) {
1050   CGBuilderTy &Bld = CGF.Builder;
1051   Bld.restoreIP(OMPBuilder.createTargetInit(Bld, IsSPMD, requiresFullRuntime()));
1052   IsInTargetMasterThreadRegion = IsSPMD;
1053   if (!IsSPMD)
1054     emitGenericVarsProlog(CGF, EST.Loc);
1055 }
1056 
1057 void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
1058                                           EntryFunctionState &EST,
1059                                           bool IsSPMD) {
1060   if (!IsSPMD)
1061     emitGenericVarsEpilog(CGF);
1062 
1063   CGBuilderTy &Bld = CGF.Builder;
1064   OMPBuilder.createTargetDeinit(Bld, IsSPMD, requiresFullRuntime());
1065 }
1066 
1067 void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
1068                                           StringRef ParentName,
1069                                           llvm::Function *&OutlinedFn,
1070                                           llvm::Constant *&OutlinedFnID,
1071                                           bool IsOffloadEntry,
1072                                           const RegionCodeGenTy &CodeGen) {
1073   ExecutionRuntimeModesRAII ModeRAII(
1074       CurrentExecutionMode, RequiresFullRuntime,
1075       CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
1076           !supportsLightweightRuntime(CGM.getContext(), D));
1077   EntryFunctionState EST;
1078 
1079   // Emit target region as a standalone region.
1080   class NVPTXPrePostActionTy : public PrePostActionTy {
1081     CGOpenMPRuntimeGPU &RT;
1082     CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1083 
1084   public:
1085     NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
1086                          CGOpenMPRuntimeGPU::EntryFunctionState &EST)
1087         : RT(RT), EST(EST) {}
1088     void Enter(CodeGenFunction &CGF) override {
1089       RT.emitKernelInit(CGF, EST, /* IsSPMD */ true);
1090       // Skip target region initialization.
1091       RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1092     }
1093     void Exit(CodeGenFunction &CGF) override {
1094       RT.clearLocThreadIdInsertPt(CGF);
1095       RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ true);
1096     }
1097   } Action(*this, EST);
1098   CodeGen.setAction(Action);
1099   IsInTTDRegion = true;
1100   emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1101                                    IsOffloadEntry, CodeGen);
1102   IsInTTDRegion = false;
1103 }
1104 
1105 // Create a unique global variable to indicate the execution mode of this target
1106 // region. The execution mode is either 'generic', or 'spmd' depending on the
1107 // target directive. This variable is picked up by the offload library to setup
1108 // the device appropriately before kernel launch. If the execution mode is
1109 // 'generic', the runtime reserves one warp for the master, otherwise, all
1110 // warps participate in parallel work.
1111 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1112                                      bool Mode) {
1113   auto *GVMode = new llvm::GlobalVariable(
1114       CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1115       llvm::GlobalValue::WeakAnyLinkage,
1116       llvm::ConstantInt::get(CGM.Int8Ty, Mode ? OMP_TGT_EXEC_MODE_SPMD
1117                                               : OMP_TGT_EXEC_MODE_GENERIC),
1118       Twine(Name, "_exec_mode"));
1119   CGM.addCompilerUsedGlobal(GVMode);
1120 }
1121 
1122 void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
1123                                               llvm::Constant *Addr,
1124                                               uint64_t Size, int32_t,
1125                                               llvm::GlobalValue::LinkageTypes) {
1126   // TODO: Add support for global variables on the device after declare target
1127   // support.
1128   if (!isa<llvm::Function>(Addr))
1129     return;
1130   llvm::Module &M = CGM.getModule();
1131   llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1132 
1133   // Get "nvvm.annotations" metadata node
1134   llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1135 
1136   llvm::Metadata *MDVals[] = {
1137       llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
1138       llvm::ConstantAsMetadata::get(
1139           llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1140   // Append metadata to nvvm.annotations
1141   MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1142 }
1143 
1144 void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
1145     const OMPExecutableDirective &D, StringRef ParentName,
1146     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1147     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1148   if (!IsOffloadEntry) // Nothing to do.
1149     return;
1150 
1151   assert(!ParentName.empty() && "Invalid target region parent name!");
1152 
1153   bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1154   if (Mode)
1155     emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1156                    CodeGen);
1157   else
1158     emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1159                       CodeGen);
1160 
1161   setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1162 }
1163 
1164 namespace {
1165 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
1166 /// Enum for accesseing the reserved_2 field of the ident_t struct.
1167 enum ModeFlagsTy : unsigned {
1168   /// Bit set to 1 when in SPMD mode.
1169   KMP_IDENT_SPMD_MODE = 0x01,
1170   /// Bit set to 1 when a simplified runtime is used.
1171   KMP_IDENT_SIMPLE_RT_MODE = 0x02,
1172   LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
1173 };
1174 
1175 /// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
1176 static const ModeFlagsTy UndefinedMode =
1177     (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
1178 } // anonymous namespace
1179 
1180 unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const {
1181   switch (getExecutionMode()) {
1182   case EM_SPMD:
1183     if (requiresFullRuntime())
1184       return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
1185     return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
1186   case EM_NonSPMD:
1187     assert(requiresFullRuntime() && "Expected full runtime.");
1188     return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
1189   case EM_Unknown:
1190     return UndefinedMode;
1191   }
1192   llvm_unreachable("Unknown flags are requested.");
1193 }
1194 
1195 CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
1196     : CGOpenMPRuntime(CGM, "_", "$") {
1197   if (!CGM.getLangOpts().OpenMPIsDevice)
1198     llvm_unreachable("OpenMP can only handle device code.");
1199 
1200   llvm::OpenMPIRBuilder &OMPBuilder = getOMPBuilder();
1201   if (CGM.getLangOpts().OpenMPTargetNewRuntime) {
1202     OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTargetDebug,
1203                                 "__omp_rtl_debug_kind");
1204     OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTeamSubscription,
1205                                 "__omp_rtl_assume_teams_oversubscription");
1206     OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPThreadSubscription,
1207                                 "__omp_rtl_assume_threads_oversubscription");
1208   }
1209 }
1210 
1211 void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
1212                                               ProcBindKind ProcBind,
1213                                               SourceLocation Loc) {
1214   // Do nothing in case of SPMD mode and L0 parallel.
1215   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1216     return;
1217 
1218   CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1219 }
1220 
1221 void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
1222                                                 llvm::Value *NumThreads,
1223                                                 SourceLocation Loc) {
1224   // Do nothing in case of SPMD mode and L0 parallel.
1225   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1226     return;
1227 
1228   CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
1229 }
1230 
1231 void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
1232                                               const Expr *NumTeams,
1233                                               const Expr *ThreadLimit,
1234                                               SourceLocation Loc) {}
1235 
1236 llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction(
1237     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1238     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1239   // Emit target region as a standalone region.
1240   class NVPTXPrePostActionTy : public PrePostActionTy {
1241     bool &IsInParallelRegion;
1242     bool PrevIsInParallelRegion;
1243 
1244   public:
1245     NVPTXPrePostActionTy(bool &IsInParallelRegion)
1246         : IsInParallelRegion(IsInParallelRegion) {}
1247     void Enter(CodeGenFunction &CGF) override {
1248       PrevIsInParallelRegion = IsInParallelRegion;
1249       IsInParallelRegion = true;
1250     }
1251     void Exit(CodeGenFunction &CGF) override {
1252       IsInParallelRegion = PrevIsInParallelRegion;
1253     }
1254   } Action(IsInParallelRegion);
1255   CodeGen.setAction(Action);
1256   bool PrevIsInTTDRegion = IsInTTDRegion;
1257   IsInTTDRegion = false;
1258   bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
1259   IsInTargetMasterThreadRegion = false;
1260   auto *OutlinedFun =
1261       cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
1262           D, ThreadIDVar, InnermostKind, CodeGen));
1263   IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
1264   IsInTTDRegion = PrevIsInTTDRegion;
1265   if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD &&
1266       !IsInParallelRegion) {
1267     llvm::Function *WrapperFun =
1268         createParallelDataSharingWrapper(OutlinedFun, D);
1269     WrapperFunctionsMap[OutlinedFun] = WrapperFun;
1270   }
1271 
1272   return OutlinedFun;
1273 }
1274 
1275 /// Get list of lastprivate variables from the teams distribute ... or
1276 /// teams {distribute ...} directives.
1277 static void
1278 getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1279                              llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1280   assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
1281          "expected teams directive.");
1282   const OMPExecutableDirective *Dir = &D;
1283   if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1284     if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
1285             Ctx,
1286             D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
1287                 /*IgnoreCaptured=*/true))) {
1288       Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
1289       if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
1290         Dir = nullptr;
1291     }
1292   }
1293   if (!Dir)
1294     return;
1295   for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
1296     for (const Expr *E : C->getVarRefs())
1297       Vars.push_back(getPrivateItem(E));
1298   }
1299 }
1300 
1301 /// Get list of reduction variables from the teams ... directives.
1302 static void
1303 getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1304                       llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1305   assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
1306          "expected teams directive.");
1307   for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1308     for (const Expr *E : C->privates())
1309       Vars.push_back(getPrivateItem(E));
1310   }
1311 }
1312 
1313 llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
1314     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1315     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1316   SourceLocation Loc = D.getBeginLoc();
1317 
1318   const RecordDecl *GlobalizedRD = nullptr;
1319   llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
1320   llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
1321   unsigned WarpSize = CGM.getTarget().getGridValue().GV_Warp_Size;
1322   // Globalize team reductions variable unconditionally in all modes.
1323   if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1324     getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
1325   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
1326     getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
1327     if (!LastPrivatesReductions.empty()) {
1328       GlobalizedRD = ::buildRecordForGlobalizedVars(
1329           CGM.getContext(), llvm::None, LastPrivatesReductions,
1330           MappedDeclsFields, WarpSize);
1331     }
1332   } else if (!LastPrivatesReductions.empty()) {
1333     assert(!TeamAndReductions.first &&
1334            "Previous team declaration is not expected.");
1335     TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
1336     std::swap(TeamAndReductions.second, LastPrivatesReductions);
1337   }
1338 
1339   // Emit target region as a standalone region.
1340   class NVPTXPrePostActionTy : public PrePostActionTy {
1341     SourceLocation &Loc;
1342     const RecordDecl *GlobalizedRD;
1343     llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1344         &MappedDeclsFields;
1345 
1346   public:
1347     NVPTXPrePostActionTy(
1348         SourceLocation &Loc, const RecordDecl *GlobalizedRD,
1349         llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1350             &MappedDeclsFields)
1351         : Loc(Loc), GlobalizedRD(GlobalizedRD),
1352           MappedDeclsFields(MappedDeclsFields) {}
1353     void Enter(CodeGenFunction &CGF) override {
1354       auto &Rt =
1355           static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1356       if (GlobalizedRD) {
1357         auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
1358         I->getSecond().MappedParams =
1359             std::make_unique<CodeGenFunction::OMPMapVars>();
1360         DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
1361         for (const auto &Pair : MappedDeclsFields) {
1362           assert(Pair.getFirst()->isCanonicalDecl() &&
1363                  "Expected canonical declaration");
1364           Data.insert(std::make_pair(Pair.getFirst(), MappedVarData()));
1365         }
1366       }
1367       Rt.emitGenericVarsProlog(CGF, Loc);
1368     }
1369     void Exit(CodeGenFunction &CGF) override {
1370       static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
1371           .emitGenericVarsEpilog(CGF);
1372     }
1373   } Action(Loc, GlobalizedRD, MappedDeclsFields);
1374   CodeGen.setAction(Action);
1375   llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
1376       D, ThreadIDVar, InnermostKind, CodeGen);
1377 
1378   return OutlinedFun;
1379 }
1380 
1381 void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
1382                                                  SourceLocation Loc,
1383                                                  bool WithSPMDCheck) {
1384   if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1385       getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1386     return;
1387 
1388   CGBuilderTy &Bld = CGF.Builder;
1389 
1390   const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1391   if (I == FunctionGlobalizedDecls.end())
1392     return;
1393 
1394   for (auto &Rec : I->getSecond().LocalVarData) {
1395     const auto *VD = cast<VarDecl>(Rec.first);
1396     bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
1397     QualType VarTy = VD->getType();
1398 
1399     // Get the local allocation of a firstprivate variable before sharing
1400     llvm::Value *ParValue;
1401     if (EscapedParam) {
1402       LValue ParLVal =
1403           CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
1404       ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
1405     }
1406 
1407     // Allocate space for the variable to be globalized
1408     llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
1409     llvm::Instruction *VoidPtr =
1410         CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1411                                 CGM.getModule(), OMPRTL___kmpc_alloc_shared),
1412                             AllocArgs, VD->getName());
1413 
1414     // Cast the void pointer and get the address of the globalized variable.
1415     llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo();
1416     llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1417         VoidPtr, VarPtrTy, VD->getName() + "_on_stack");
1418     LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy);
1419     Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
1420     Rec.second.GlobalizedVal = VoidPtr;
1421 
1422     // Assign the local allocation to the newly globalized location.
1423     if (EscapedParam) {
1424       CGF.EmitStoreOfScalar(ParValue, VarAddr);
1425       I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF));
1426     }
1427     if (auto *DI = CGF.getDebugInfo())
1428       VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation()));
1429   }
1430   for (const auto *VD : I->getSecond().EscapedVariableLengthDecls) {
1431     // Use actual memory size of the VLA object including the padding
1432     // for alignment purposes.
1433     llvm::Value *Size = CGF.getTypeSize(VD->getType());
1434     CharUnits Align = CGM.getContext().getDeclAlign(VD);
1435     Size = Bld.CreateNUWAdd(
1436         Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
1437     llvm::Value *AlignVal =
1438         llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
1439 
1440     Size = Bld.CreateUDiv(Size, AlignVal);
1441     Size = Bld.CreateNUWMul(Size, AlignVal);
1442 
1443     // Allocate space for this VLA object to be globalized.
1444     llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
1445     llvm::Instruction *VoidPtr =
1446         CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1447                                 CGM.getModule(), OMPRTL___kmpc_alloc_shared),
1448                             AllocArgs, VD->getName());
1449 
1450     I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(
1451         std::pair<llvm::Value *, llvm::Value *>(
1452             {VoidPtr, CGF.getTypeSize(VD->getType())}));
1453     LValue Base = CGF.MakeAddrLValue(VoidPtr, VD->getType(),
1454                                      CGM.getContext().getDeclAlign(VD),
1455                                      AlignmentSource::Decl);
1456     I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
1457                                             Base.getAddress(CGF));
1458   }
1459   I->getSecond().MappedParams->apply(CGF);
1460 }
1461 
1462 void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
1463                                                  bool WithSPMDCheck) {
1464   if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1465       getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1466     return;
1467 
1468   const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1469   if (I != FunctionGlobalizedDecls.end()) {
1470     // Deallocate the memory for each globalized VLA object
1471     for (auto AddrSizePair :
1472          llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
1473       CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1474                               CGM.getModule(), OMPRTL___kmpc_free_shared),
1475                           {AddrSizePair.first, AddrSizePair.second});
1476     }
1477     // Deallocate the memory for each globalized value
1478     for (auto &Rec : llvm::reverse(I->getSecond().LocalVarData)) {
1479       const auto *VD = cast<VarDecl>(Rec.first);
1480       I->getSecond().MappedParams->restore(CGF);
1481 
1482       llvm::Value *FreeArgs[] = {Rec.second.GlobalizedVal,
1483                                  CGF.getTypeSize(VD->getType())};
1484       CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1485                               CGM.getModule(), OMPRTL___kmpc_free_shared),
1486                           FreeArgs);
1487     }
1488   }
1489 }
1490 
1491 void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
1492                                          const OMPExecutableDirective &D,
1493                                          SourceLocation Loc,
1494                                          llvm::Function *OutlinedFn,
1495                                          ArrayRef<llvm::Value *> CapturedVars) {
1496   if (!CGF.HaveInsertPoint())
1497     return;
1498 
1499   Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
1500                                                       /*Name=*/".zero.addr");
1501   CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr);
1502   llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1503   OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
1504   OutlinedFnArgs.push_back(ZeroAddr.getPointer());
1505   OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1506   emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
1507 }
1508 
1509 void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
1510                                           SourceLocation Loc,
1511                                           llvm::Function *OutlinedFn,
1512                                           ArrayRef<llvm::Value *> CapturedVars,
1513                                           const Expr *IfCond) {
1514   if (!CGF.HaveInsertPoint())
1515     return;
1516 
1517   auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars,
1518                         IfCond](CodeGenFunction &CGF, PrePostActionTy &Action) {
1519     CGBuilderTy &Bld = CGF.Builder;
1520     llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn];
1521     llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
1522     if (WFn)
1523       ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
1524     llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, CGM.Int8PtrTy);
1525 
1526     // Create a private scope that will globalize the arguments
1527     // passed from the outside of the target region.
1528     // TODO: Is that needed?
1529     CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
1530 
1531     Address CapturedVarsAddrs = CGF.CreateDefaultAlignTempAlloca(
1532         llvm::ArrayType::get(CGM.VoidPtrTy, CapturedVars.size()),
1533         "captured_vars_addrs");
1534     // There's something to share.
1535     if (!CapturedVars.empty()) {
1536       // Prepare for parallel region. Indicate the outlined function.
1537       ASTContext &Ctx = CGF.getContext();
1538       unsigned Idx = 0;
1539       for (llvm::Value *V : CapturedVars) {
1540         Address Dst = Bld.CreateConstArrayGEP(CapturedVarsAddrs, Idx);
1541         llvm::Value *PtrV;
1542         if (V->getType()->isIntegerTy())
1543           PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
1544         else
1545           PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
1546         CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
1547                               Ctx.getPointerType(Ctx.VoidPtrTy));
1548         ++Idx;
1549       }
1550     }
1551 
1552     llvm::Value *IfCondVal = nullptr;
1553     if (IfCond)
1554       IfCondVal = Bld.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.Int32Ty,
1555                                     /* isSigned */ false);
1556     else
1557       IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1);
1558 
1559     assert(IfCondVal && "Expected a value");
1560     llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1561     llvm::Value *Args[] = {
1562         RTLoc,
1563         getThreadID(CGF, Loc),
1564         IfCondVal,
1565         llvm::ConstantInt::get(CGF.Int32Ty, -1),
1566         llvm::ConstantInt::get(CGF.Int32Ty, -1),
1567         FnPtr,
1568         ID,
1569         Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(),
1570                                    CGF.VoidPtrPtrTy),
1571         llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
1572     CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1573                             CGM.getModule(), OMPRTL___kmpc_parallel_51),
1574                         Args);
1575   };
1576 
1577   RegionCodeGenTy RCG(ParallelGen);
1578   RCG(CGF);
1579 }
1580 
1581 void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) {
1582   // Always emit simple barriers!
1583   if (!CGF.HaveInsertPoint())
1584     return;
1585   // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
1586   // This function does not use parameters, so we can emit just default values.
1587   llvm::Value *Args[] = {
1588       llvm::ConstantPointerNull::get(
1589           cast<llvm::PointerType>(getIdentTyPointerTy())),
1590       llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
1591   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1592                           CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd),
1593                       Args);
1594 }
1595 
1596 void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF,
1597                                            SourceLocation Loc,
1598                                            OpenMPDirectiveKind Kind, bool,
1599                                            bool) {
1600   // Always emit simple barriers!
1601   if (!CGF.HaveInsertPoint())
1602     return;
1603   // Build call __kmpc_cancel_barrier(loc, thread_id);
1604   unsigned Flags = getDefaultFlagsForBarriers(Kind);
1605   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
1606                          getThreadID(CGF, Loc)};
1607 
1608   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1609                           CGM.getModule(), OMPRTL___kmpc_barrier),
1610                       Args);
1611 }
1612 
1613 void CGOpenMPRuntimeGPU::emitCriticalRegion(
1614     CodeGenFunction &CGF, StringRef CriticalName,
1615     const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
1616     const Expr *Hint) {
1617   llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
1618   llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
1619   llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
1620   llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
1621   llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
1622 
1623   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1624 
1625   // Get the mask of active threads in the warp.
1626   llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1627       CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask));
1628   // Fetch team-local id of the thread.
1629   llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
1630 
1631   // Get the width of the team.
1632   llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF);
1633 
1634   // Initialize the counter variable for the loop.
1635   QualType Int32Ty =
1636       CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
1637   Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
1638   LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
1639   CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
1640                         /*isInit=*/true);
1641 
1642   // Block checks if loop counter exceeds upper bound.
1643   CGF.EmitBlock(LoopBB);
1644   llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
1645   llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
1646   CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
1647 
1648   // Block tests which single thread should execute region, and which threads
1649   // should go straight to synchronisation point.
1650   CGF.EmitBlock(TestBB);
1651   CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
1652   llvm::Value *CmpThreadToCounter =
1653       CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
1654   CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
1655 
1656   // Block emits the body of the critical region.
1657   CGF.EmitBlock(BodyBB);
1658 
1659   // Output the critical statement.
1660   CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
1661                                       Hint);
1662 
1663   // After the body surrounded by the critical region, the single executing
1664   // thread will jump to the synchronisation point.
1665   // Block waits for all threads in current team to finish then increments the
1666   // counter variable and returns to the loop.
1667   CGF.EmitBlock(SyncBB);
1668   // Reconverge active threads in the warp.
1669   (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1670                                 CGM.getModule(), OMPRTL___kmpc_syncwarp),
1671                             Mask);
1672 
1673   llvm::Value *IncCounterVal =
1674       CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
1675   CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
1676   CGF.EmitBranch(LoopBB);
1677 
1678   // Block that is reached when  all threads in the team complete the region.
1679   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1680 }
1681 
1682 /// Cast value to the specified type.
1683 static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
1684                                     QualType ValTy, QualType CastTy,
1685                                     SourceLocation Loc) {
1686   assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
1687          "Cast type must sized.");
1688   assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
1689          "Val type must sized.");
1690   llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
1691   if (ValTy == CastTy)
1692     return Val;
1693   if (CGF.getContext().getTypeSizeInChars(ValTy) ==
1694       CGF.getContext().getTypeSizeInChars(CastTy))
1695     return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
1696   if (CastTy->isIntegerType() && ValTy->isIntegerType())
1697     return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
1698                                      CastTy->hasSignedIntegerRepresentation());
1699   Address CastItem = CGF.CreateMemTemp(CastTy);
1700   Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1701       CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
1702   CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
1703                         LValueBaseInfo(AlignmentSource::Type),
1704                         TBAAAccessInfo());
1705   return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc,
1706                               LValueBaseInfo(AlignmentSource::Type),
1707                               TBAAAccessInfo());
1708 }
1709 
1710 /// This function creates calls to one of two shuffle functions to copy
1711 /// variables between lanes in a warp.
1712 static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
1713                                                  llvm::Value *Elem,
1714                                                  QualType ElemType,
1715                                                  llvm::Value *Offset,
1716                                                  SourceLocation Loc) {
1717   CodeGenModule &CGM = CGF.CGM;
1718   CGBuilderTy &Bld = CGF.Builder;
1719   CGOpenMPRuntimeGPU &RT =
1720       *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime()));
1721   llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder();
1722 
1723   CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
1724   assert(Size.getQuantity() <= 8 &&
1725          "Unsupported bitwidth in shuffle instruction.");
1726 
1727   RuntimeFunction ShuffleFn = Size.getQuantity() <= 4
1728                                   ? OMPRTL___kmpc_shuffle_int32
1729                                   : OMPRTL___kmpc_shuffle_int64;
1730 
1731   // Cast all types to 32- or 64-bit values before calling shuffle routines.
1732   QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
1733       Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
1734   llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
1735   llvm::Value *WarpSize =
1736       Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
1737 
1738   llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
1739       OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn),
1740       {ElemCast, Offset, WarpSize});
1741 
1742   return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
1743 }
1744 
1745 static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
1746                             Address DestAddr, QualType ElemType,
1747                             llvm::Value *Offset, SourceLocation Loc) {
1748   CGBuilderTy &Bld = CGF.Builder;
1749 
1750   CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
1751   // Create the loop over the big sized data.
1752   // ptr = (void*)Elem;
1753   // ptrEnd = (void*) Elem + 1;
1754   // Step = 8;
1755   // while (ptr + Step < ptrEnd)
1756   //   shuffle((int64_t)*ptr);
1757   // Step = 4;
1758   // while (ptr + Step < ptrEnd)
1759   //   shuffle((int32_t)*ptr);
1760   // ...
1761   Address ElemPtr = DestAddr;
1762   Address Ptr = SrcAddr;
1763   Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
1764       Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
1765   for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
1766     if (Size < CharUnits::fromQuantity(IntSize))
1767       continue;
1768     QualType IntType = CGF.getContext().getIntTypeForBitwidth(
1769         CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
1770         /*Signed=*/1);
1771     llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
1772     Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
1773     ElemPtr =
1774         Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
1775     if (Size.getQuantity() / IntSize > 1) {
1776       llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
1777       llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
1778       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
1779       llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
1780       CGF.EmitBlock(PreCondBB);
1781       llvm::PHINode *PhiSrc =
1782           Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
1783       PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
1784       llvm::PHINode *PhiDest =
1785           Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
1786       PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
1787       Ptr = Address(PhiSrc, Ptr.getAlignment());
1788       ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
1789       llvm::Value *PtrDiff = Bld.CreatePtrDiff(
1790           PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
1791                                    Ptr.getPointer(), CGF.VoidPtrTy));
1792       Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
1793                        ThenBB, ExitBB);
1794       CGF.EmitBlock(ThenBB);
1795       llvm::Value *Res = createRuntimeShuffleFunction(
1796           CGF,
1797           CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
1798                                LValueBaseInfo(AlignmentSource::Type),
1799                                TBAAAccessInfo()),
1800           IntType, Offset, Loc);
1801       CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
1802                             LValueBaseInfo(AlignmentSource::Type),
1803                             TBAAAccessInfo());
1804       Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
1805       Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
1806       PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
1807       PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
1808       CGF.EmitBranch(PreCondBB);
1809       CGF.EmitBlock(ExitBB);
1810     } else {
1811       llvm::Value *Res = createRuntimeShuffleFunction(
1812           CGF,
1813           CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
1814                                LValueBaseInfo(AlignmentSource::Type),
1815                                TBAAAccessInfo()),
1816           IntType, Offset, Loc);
1817       CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
1818                             LValueBaseInfo(AlignmentSource::Type),
1819                             TBAAAccessInfo());
1820       Ptr = Bld.CreateConstGEP(Ptr, 1);
1821       ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
1822     }
1823     Size = Size % IntSize;
1824   }
1825 }
1826 
1827 namespace {
1828 enum CopyAction : unsigned {
1829   // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
1830   // the warp using shuffle instructions.
1831   RemoteLaneToThread,
1832   // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
1833   ThreadCopy,
1834   // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
1835   ThreadToScratchpad,
1836   // ScratchpadToThread: Copy from a scratchpad array in global memory
1837   // containing team-reduced data to a thread's stack.
1838   ScratchpadToThread,
1839 };
1840 } // namespace
1841 
1842 struct CopyOptionsTy {
1843   llvm::Value *RemoteLaneOffset;
1844   llvm::Value *ScratchpadIndex;
1845   llvm::Value *ScratchpadWidth;
1846 };
1847 
1848 /// Emit instructions to copy a Reduce list, which contains partially
1849 /// aggregated values, in the specified direction.
1850 static void emitReductionListCopy(
1851     CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
1852     ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
1853     CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
1854 
1855   CodeGenModule &CGM = CGF.CGM;
1856   ASTContext &C = CGM.getContext();
1857   CGBuilderTy &Bld = CGF.Builder;
1858 
1859   llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
1860   llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
1861   llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
1862 
1863   // Iterates, element-by-element, through the source Reduce list and
1864   // make a copy.
1865   unsigned Idx = 0;
1866   unsigned Size = Privates.size();
1867   for (const Expr *Private : Privates) {
1868     Address SrcElementAddr = Address::invalid();
1869     Address DestElementAddr = Address::invalid();
1870     Address DestElementPtrAddr = Address::invalid();
1871     // Should we shuffle in an element from a remote lane?
1872     bool ShuffleInElement = false;
1873     // Set to true to update the pointer in the dest Reduce list to a
1874     // newly created element.
1875     bool UpdateDestListPtr = false;
1876     // Increment the src or dest pointer to the scratchpad, for each
1877     // new element.
1878     bool IncrScratchpadSrc = false;
1879     bool IncrScratchpadDest = false;
1880 
1881     switch (Action) {
1882     case RemoteLaneToThread: {
1883       // Step 1.1: Get the address for the src element in the Reduce list.
1884       Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1885       SrcElementAddr = CGF.EmitLoadOfPointer(
1886           SrcElementPtrAddr,
1887           C.getPointerType(Private->getType())->castAs<PointerType>());
1888 
1889       // Step 1.2: Create a temporary to store the element in the destination
1890       // Reduce list.
1891       DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1892       DestElementAddr =
1893           CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1894       ShuffleInElement = true;
1895       UpdateDestListPtr = true;
1896       break;
1897     }
1898     case ThreadCopy: {
1899       // Step 1.1: Get the address for the src element in the Reduce list.
1900       Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1901       SrcElementAddr = CGF.EmitLoadOfPointer(
1902           SrcElementPtrAddr,
1903           C.getPointerType(Private->getType())->castAs<PointerType>());
1904 
1905       // Step 1.2: Get the address for dest element.  The destination
1906       // element has already been created on the thread's stack.
1907       DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1908       DestElementAddr = CGF.EmitLoadOfPointer(
1909           DestElementPtrAddr,
1910           C.getPointerType(Private->getType())->castAs<PointerType>());
1911       break;
1912     }
1913     case ThreadToScratchpad: {
1914       // Step 1.1: Get the address for the src element in the Reduce list.
1915       Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1916       SrcElementAddr = CGF.EmitLoadOfPointer(
1917           SrcElementPtrAddr,
1918           C.getPointerType(Private->getType())->castAs<PointerType>());
1919 
1920       // Step 1.2: Get the address for dest element:
1921       // address = base + index * ElementSizeInChars.
1922       llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1923       llvm::Value *CurrentOffset =
1924           Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
1925       llvm::Value *ScratchPadElemAbsolutePtrVal =
1926           Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
1927       ScratchPadElemAbsolutePtrVal =
1928           Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1929       DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
1930                                 C.getTypeAlignInChars(Private->getType()));
1931       IncrScratchpadDest = true;
1932       break;
1933     }
1934     case ScratchpadToThread: {
1935       // Step 1.1: Get the address for the src element in the scratchpad.
1936       // address = base + index * ElementSizeInChars.
1937       llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1938       llvm::Value *CurrentOffset =
1939           Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
1940       llvm::Value *ScratchPadElemAbsolutePtrVal =
1941           Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
1942       ScratchPadElemAbsolutePtrVal =
1943           Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1944       SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
1945                                C.getTypeAlignInChars(Private->getType()));
1946       IncrScratchpadSrc = true;
1947 
1948       // Step 1.2: Create a temporary to store the element in the destination
1949       // Reduce list.
1950       DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1951       DestElementAddr =
1952           CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1953       UpdateDestListPtr = true;
1954       break;
1955     }
1956     }
1957 
1958     // Regardless of src and dest of copy, we emit the load of src
1959     // element as this is required in all directions
1960     SrcElementAddr = Bld.CreateElementBitCast(
1961         SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
1962     DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
1963                                                SrcElementAddr.getElementType());
1964 
1965     // Now that all active lanes have read the element in the
1966     // Reduce list, shuffle over the value from the remote lane.
1967     if (ShuffleInElement) {
1968       shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
1969                       RemoteLaneOffset, Private->getExprLoc());
1970     } else {
1971       switch (CGF.getEvaluationKind(Private->getType())) {
1972       case TEK_Scalar: {
1973         llvm::Value *Elem = CGF.EmitLoadOfScalar(
1974             SrcElementAddr, /*Volatile=*/false, Private->getType(),
1975             Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type),
1976             TBAAAccessInfo());
1977         // Store the source element value to the dest element address.
1978         CGF.EmitStoreOfScalar(
1979             Elem, DestElementAddr, /*Volatile=*/false, Private->getType(),
1980             LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
1981         break;
1982       }
1983       case TEK_Complex: {
1984         CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
1985             CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
1986             Private->getExprLoc());
1987         CGF.EmitStoreOfComplex(
1988             Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
1989             /*isInit=*/false);
1990         break;
1991       }
1992       case TEK_Aggregate:
1993         CGF.EmitAggregateCopy(
1994             CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
1995             CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
1996             Private->getType(), AggValueSlot::DoesNotOverlap);
1997         break;
1998       }
1999     }
2000 
2001     // Step 3.1: Modify reference in dest Reduce list as needed.
2002     // Modifying the reference in Reduce list to point to the newly
2003     // created element.  The element is live in the current function
2004     // scope and that of functions it invokes (i.e., reduce_function).
2005     // RemoteReduceData[i] = (void*)&RemoteElem
2006     if (UpdateDestListPtr) {
2007       CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
2008                                 DestElementAddr.getPointer(), CGF.VoidPtrTy),
2009                             DestElementPtrAddr, /*Volatile=*/false,
2010                             C.VoidPtrTy);
2011     }
2012 
2013     // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
2014     // address of the next element in scratchpad memory, unless we're currently
2015     // processing the last one.  Memory alignment is also taken care of here.
2016     if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
2017       llvm::Value *ScratchpadBasePtr =
2018           IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
2019       llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2020       ScratchpadBasePtr = Bld.CreateNUWAdd(
2021           ScratchpadBasePtr,
2022           Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
2023 
2024       // Take care of global memory alignment for performance
2025       ScratchpadBasePtr = Bld.CreateNUWSub(
2026           ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2027       ScratchpadBasePtr = Bld.CreateUDiv(
2028           ScratchpadBasePtr,
2029           llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2030       ScratchpadBasePtr = Bld.CreateNUWAdd(
2031           ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2032       ScratchpadBasePtr = Bld.CreateNUWMul(
2033           ScratchpadBasePtr,
2034           llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2035 
2036       if (IncrScratchpadDest)
2037         DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2038       else /* IncrScratchpadSrc = true */
2039         SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2040     }
2041 
2042     ++Idx;
2043   }
2044 }
2045 
2046 /// This function emits a helper that gathers Reduce lists from the first
2047 /// lane of every active warp to lanes in the first warp.
2048 ///
2049 /// void inter_warp_copy_func(void* reduce_data, num_warps)
2050 ///   shared smem[warp_size];
2051 ///   For all data entries D in reduce_data:
2052 ///     sync
2053 ///     If (I am the first lane in each warp)
2054 ///       Copy my local D to smem[warp_id]
2055 ///     sync
2056 ///     if (I am the first warp)
2057 ///       Copy smem[thread_id] to my local D
2058 static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
2059                                               ArrayRef<const Expr *> Privates,
2060                                               QualType ReductionArrayTy,
2061                                               SourceLocation Loc) {
2062   ASTContext &C = CGM.getContext();
2063   llvm::Module &M = CGM.getModule();
2064 
2065   // ReduceList: thread local Reduce list.
2066   // At the stage of the computation when this function is called, partially
2067   // aggregated values reside in the first lane of every active warp.
2068   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2069                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2070   // NumWarps: number of warps active in the parallel region.  This could
2071   // be smaller than 32 (max warps in a CTA) for partial block reduction.
2072   ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2073                                 C.getIntTypeForBitwidth(32, /* Signed */ true),
2074                                 ImplicitParamDecl::Other);
2075   FunctionArgList Args;
2076   Args.push_back(&ReduceListArg);
2077   Args.push_back(&NumWarpsArg);
2078 
2079   const CGFunctionInfo &CGFI =
2080       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2081   auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
2082                                     llvm::GlobalValue::InternalLinkage,
2083                                     "_omp_reduction_inter_warp_copy_func", &M);
2084   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2085   Fn->setDoesNotRecurse();
2086   CodeGenFunction CGF(CGM);
2087   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2088 
2089   CGBuilderTy &Bld = CGF.Builder;
2090 
2091   // This array is used as a medium to transfer, one reduce element at a time,
2092   // the data from the first lane of every warp to lanes in the first warp
2093   // in order to perform the final step of a reduction in a parallel region
2094   // (reduction across warps).  The array is placed in NVPTX __shared__ memory
2095   // for reduced latency, as well as to have a distinct copy for concurrently
2096   // executing target regions.  The array is declared with common linkage so
2097   // as to be shared across compilation units.
2098   StringRef TransferMediumName =
2099       "__openmp_nvptx_data_transfer_temporary_storage";
2100   llvm::GlobalVariable *TransferMedium =
2101       M.getGlobalVariable(TransferMediumName);
2102   unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
2103   if (!TransferMedium) {
2104     auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
2105     unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
2106     TransferMedium = new llvm::GlobalVariable(
2107         M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage,
2108         llvm::UndefValue::get(Ty), TransferMediumName,
2109         /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
2110         SharedAddressSpace);
2111     CGM.addCompilerUsedGlobal(TransferMedium);
2112   }
2113 
2114   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2115   // Get the CUDA thread id of the current OpenMP thread on the GPU.
2116   llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
2117   // nvptx_lane_id = nvptx_id % warpsize
2118   llvm::Value *LaneID = getNVPTXLaneID(CGF);
2119   // nvptx_warp_id = nvptx_id / warpsize
2120   llvm::Value *WarpID = getNVPTXWarpID(CGF);
2121 
2122   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2123   Address LocalReduceList(
2124       Bld.CreatePointerBitCastOrAddrSpaceCast(
2125           CGF.EmitLoadOfScalar(
2126               AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
2127               LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
2128           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2129       CGF.getPointerAlign());
2130 
2131   unsigned Idx = 0;
2132   for (const Expr *Private : Privates) {
2133     //
2134     // Warp master copies reduce element to transfer medium in __shared__
2135     // memory.
2136     //
2137     unsigned RealTySize =
2138         C.getTypeSizeInChars(Private->getType())
2139             .alignTo(C.getTypeAlignInChars(Private->getType()))
2140             .getQuantity();
2141     for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
2142       unsigned NumIters = RealTySize / TySize;
2143       if (NumIters == 0)
2144         continue;
2145       QualType CType = C.getIntTypeForBitwidth(
2146           C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
2147       llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
2148       CharUnits Align = CharUnits::fromQuantity(TySize);
2149       llvm::Value *Cnt = nullptr;
2150       Address CntAddr = Address::invalid();
2151       llvm::BasicBlock *PrecondBB = nullptr;
2152       llvm::BasicBlock *ExitBB = nullptr;
2153       if (NumIters > 1) {
2154         CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
2155         CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
2156                               /*Volatile=*/false, C.IntTy);
2157         PrecondBB = CGF.createBasicBlock("precond");
2158         ExitBB = CGF.createBasicBlock("exit");
2159         llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
2160         // There is no need to emit line number for unconditional branch.
2161         (void)ApplyDebugLocation::CreateEmpty(CGF);
2162         CGF.EmitBlock(PrecondBB);
2163         Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
2164         llvm::Value *Cmp =
2165             Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
2166         Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
2167         CGF.EmitBlock(BodyBB);
2168       }
2169       // kmpc_barrier.
2170       CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2171                                              /*EmitChecks=*/false,
2172                                              /*ForceSimpleCall=*/true);
2173       llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2174       llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2175       llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2176 
2177       // if (lane_id == 0)
2178       llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
2179       Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
2180       CGF.EmitBlock(ThenBB);
2181 
2182       // Reduce element = LocalReduceList[i]
2183       Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2184       llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2185           ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2186       // elemptr = ((CopyType*)(elemptrptr)) + I
2187       Address ElemPtr = Address(ElemPtrPtr, Align);
2188       ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
2189       if (NumIters > 1) {
2190         ElemPtr = Address(Bld.CreateGEP(ElemPtr.getElementType(),
2191                                         ElemPtr.getPointer(), Cnt),
2192                           ElemPtr.getAlignment());
2193       }
2194 
2195       // Get pointer to location in transfer medium.
2196       // MediumPtr = &medium[warp_id]
2197       llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
2198           TransferMedium->getValueType(), TransferMedium,
2199           {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
2200       Address MediumPtr(MediumPtrVal, Align);
2201       // Casting to actual data type.
2202       // MediumPtr = (CopyType*)MediumPtrAddr;
2203       MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
2204 
2205       // elem = *elemptr
2206       //*MediumPtr = elem
2207       llvm::Value *Elem = CGF.EmitLoadOfScalar(
2208           ElemPtr, /*Volatile=*/false, CType, Loc,
2209           LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2210       // Store the source element value to the dest element address.
2211       CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType,
2212                             LValueBaseInfo(AlignmentSource::Type),
2213                             TBAAAccessInfo());
2214 
2215       Bld.CreateBr(MergeBB);
2216 
2217       CGF.EmitBlock(ElseBB);
2218       Bld.CreateBr(MergeBB);
2219 
2220       CGF.EmitBlock(MergeBB);
2221 
2222       // kmpc_barrier.
2223       CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2224                                              /*EmitChecks=*/false,
2225                                              /*ForceSimpleCall=*/true);
2226 
2227       //
2228       // Warp 0 copies reduce element from transfer medium.
2229       //
2230       llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
2231       llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
2232       llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
2233 
2234       Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
2235       llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
2236           AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
2237 
2238       // Up to 32 threads in warp 0 are active.
2239       llvm::Value *IsActiveThread =
2240           Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
2241       Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
2242 
2243       CGF.EmitBlock(W0ThenBB);
2244 
2245       // SrcMediumPtr = &medium[tid]
2246       llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
2247           TransferMedium->getValueType(), TransferMedium,
2248           {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
2249       Address SrcMediumPtr(SrcMediumPtrVal, Align);
2250       // SrcMediumVal = *SrcMediumPtr;
2251       SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
2252 
2253       // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
2254       Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2255       llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
2256           TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
2257       Address TargetElemPtr = Address(TargetElemPtrVal, Align);
2258       TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
2259       if (NumIters > 1) {
2260         TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getElementType(),
2261                                               TargetElemPtr.getPointer(), Cnt),
2262                                 TargetElemPtr.getAlignment());
2263       }
2264 
2265       // *TargetElemPtr = SrcMediumVal;
2266       llvm::Value *SrcMediumValue =
2267           CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
2268       CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
2269                             CType);
2270       Bld.CreateBr(W0MergeBB);
2271 
2272       CGF.EmitBlock(W0ElseBB);
2273       Bld.CreateBr(W0MergeBB);
2274 
2275       CGF.EmitBlock(W0MergeBB);
2276 
2277       if (NumIters > 1) {
2278         Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
2279         CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
2280         CGF.EmitBranch(PrecondBB);
2281         (void)ApplyDebugLocation::CreateEmpty(CGF);
2282         CGF.EmitBlock(ExitBB);
2283       }
2284       RealTySize %= TySize;
2285     }
2286     ++Idx;
2287   }
2288 
2289   CGF.FinishFunction();
2290   return Fn;
2291 }
2292 
2293 /// Emit a helper that reduces data across two OpenMP threads (lanes)
2294 /// in the same warp.  It uses shuffle instructions to copy over data from
2295 /// a remote lane's stack.  The reduction algorithm performed is specified
2296 /// by the fourth parameter.
2297 ///
2298 /// Algorithm Versions.
2299 /// Full Warp Reduce (argument value 0):
2300 ///   This algorithm assumes that all 32 lanes are active and gathers
2301 ///   data from these 32 lanes, producing a single resultant value.
2302 /// Contiguous Partial Warp Reduce (argument value 1):
2303 ///   This algorithm assumes that only a *contiguous* subset of lanes
2304 ///   are active.  This happens for the last warp in a parallel region
2305 ///   when the user specified num_threads is not an integer multiple of
2306 ///   32.  This contiguous subset always starts with the zeroth lane.
2307 /// Partial Warp Reduce (argument value 2):
2308 ///   This algorithm gathers data from any number of lanes at any position.
2309 /// All reduced values are stored in the lowest possible lane.  The set
2310 /// of problems every algorithm addresses is a super set of those
2311 /// addressable by algorithms with a lower version number.  Overhead
2312 /// increases as algorithm version increases.
2313 ///
2314 /// Terminology
2315 /// Reduce element:
2316 ///   Reduce element refers to the individual data field with primitive
2317 ///   data types to be combined and reduced across threads.
2318 /// Reduce list:
2319 ///   Reduce list refers to a collection of local, thread-private
2320 ///   reduce elements.
2321 /// Remote Reduce list:
2322 ///   Remote Reduce list refers to a collection of remote (relative to
2323 ///   the current thread) reduce elements.
2324 ///
2325 /// We distinguish between three states of threads that are important to
2326 /// the implementation of this function.
2327 /// Alive threads:
2328 ///   Threads in a warp executing the SIMT instruction, as distinguished from
2329 ///   threads that are inactive due to divergent control flow.
2330 /// Active threads:
2331 ///   The minimal set of threads that has to be alive upon entry to this
2332 ///   function.  The computation is correct iff active threads are alive.
2333 ///   Some threads are alive but they are not active because they do not
2334 ///   contribute to the computation in any useful manner.  Turning them off
2335 ///   may introduce control flow overheads without any tangible benefits.
2336 /// Effective threads:
2337 ///   In order to comply with the argument requirements of the shuffle
2338 ///   function, we must keep all lanes holding data alive.  But at most
2339 ///   half of them perform value aggregation; we refer to this half of
2340 ///   threads as effective. The other half is simply handing off their
2341 ///   data.
2342 ///
2343 /// Procedure
2344 /// Value shuffle:
2345 ///   In this step active threads transfer data from higher lane positions
2346 ///   in the warp to lower lane positions, creating Remote Reduce list.
2347 /// Value aggregation:
2348 ///   In this step, effective threads combine their thread local Reduce list
2349 ///   with Remote Reduce list and store the result in the thread local
2350 ///   Reduce list.
2351 /// Value copy:
2352 ///   In this step, we deal with the assumption made by algorithm 2
2353 ///   (i.e. contiguity assumption).  When we have an odd number of lanes
2354 ///   active, say 2k+1, only k threads will be effective and therefore k
2355 ///   new values will be produced.  However, the Reduce list owned by the
2356 ///   (2k+1)th thread is ignored in the value aggregation.  Therefore
2357 ///   we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
2358 ///   that the contiguity assumption still holds.
2359 static llvm::Function *emitShuffleAndReduceFunction(
2360     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2361     QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
2362   ASTContext &C = CGM.getContext();
2363 
2364   // Thread local Reduce list used to host the values of data to be reduced.
2365   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2366                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2367   // Current lane id; could be logical.
2368   ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
2369                               ImplicitParamDecl::Other);
2370   // Offset of the remote source lane relative to the current lane.
2371   ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2372                                         C.ShortTy, ImplicitParamDecl::Other);
2373   // Algorithm version.  This is expected to be known at compile time.
2374   ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2375                                C.ShortTy, ImplicitParamDecl::Other);
2376   FunctionArgList Args;
2377   Args.push_back(&ReduceListArg);
2378   Args.push_back(&LaneIDArg);
2379   Args.push_back(&RemoteLaneOffsetArg);
2380   Args.push_back(&AlgoVerArg);
2381 
2382   const CGFunctionInfo &CGFI =
2383       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2384   auto *Fn = llvm::Function::Create(
2385       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2386       "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
2387   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2388   Fn->setDoesNotRecurse();
2389 
2390   CodeGenFunction CGF(CGM);
2391   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2392 
2393   CGBuilderTy &Bld = CGF.Builder;
2394 
2395   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2396   Address LocalReduceList(
2397       Bld.CreatePointerBitCastOrAddrSpaceCast(
2398           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2399                                C.VoidPtrTy, SourceLocation()),
2400           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2401       CGF.getPointerAlign());
2402 
2403   Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
2404   llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
2405       AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2406 
2407   Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
2408   llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
2409       AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2410 
2411   Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
2412   llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
2413       AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2414 
2415   // Create a local thread-private variable to host the Reduce list
2416   // from a remote lane.
2417   Address RemoteReduceList =
2418       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
2419 
2420   // This loop iterates through the list of reduce elements and copies,
2421   // element by element, from a remote lane in the warp to RemoteReduceList,
2422   // hosted on the thread's stack.
2423   emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
2424                         LocalReduceList, RemoteReduceList,
2425                         {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
2426                          /*ScratchpadIndex=*/nullptr,
2427                          /*ScratchpadWidth=*/nullptr});
2428 
2429   // The actions to be performed on the Remote Reduce list is dependent
2430   // on the algorithm version.
2431   //
2432   //  if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
2433   //  LaneId % 2 == 0 && Offset > 0):
2434   //    do the reduction value aggregation
2435   //
2436   //  The thread local variable Reduce list is mutated in place to host the
2437   //  reduced data, which is the aggregated value produced from local and
2438   //  remote lanes.
2439   //
2440   //  Note that AlgoVer is expected to be a constant integer known at compile
2441   //  time.
2442   //  When AlgoVer==0, the first conjunction evaluates to true, making
2443   //    the entire predicate true during compile time.
2444   //  When AlgoVer==1, the second conjunction has only the second part to be
2445   //    evaluated during runtime.  Other conjunctions evaluates to false
2446   //    during compile time.
2447   //  When AlgoVer==2, the third conjunction has only the second part to be
2448   //    evaluated during runtime.  Other conjunctions evaluates to false
2449   //    during compile time.
2450   llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
2451 
2452   llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
2453   llvm::Value *CondAlgo1 = Bld.CreateAnd(
2454       Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
2455 
2456   llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
2457   llvm::Value *CondAlgo2 = Bld.CreateAnd(
2458       Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
2459   CondAlgo2 = Bld.CreateAnd(
2460       CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
2461 
2462   llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
2463   CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
2464 
2465   llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2466   llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2467   llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2468   Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
2469 
2470   CGF.EmitBlock(ThenBB);
2471   // reduce_function(LocalReduceList, RemoteReduceList)
2472   llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2473       LocalReduceList.getPointer(), CGF.VoidPtrTy);
2474   llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2475       RemoteReduceList.getPointer(), CGF.VoidPtrTy);
2476   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2477       CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
2478   Bld.CreateBr(MergeBB);
2479 
2480   CGF.EmitBlock(ElseBB);
2481   Bld.CreateBr(MergeBB);
2482 
2483   CGF.EmitBlock(MergeBB);
2484 
2485   // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
2486   // Reduce list.
2487   Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
2488   llvm::Value *CondCopy = Bld.CreateAnd(
2489       Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
2490 
2491   llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
2492   llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
2493   llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
2494   Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
2495 
2496   CGF.EmitBlock(CpyThenBB);
2497   emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
2498                         RemoteReduceList, LocalReduceList);
2499   Bld.CreateBr(CpyMergeBB);
2500 
2501   CGF.EmitBlock(CpyElseBB);
2502   Bld.CreateBr(CpyMergeBB);
2503 
2504   CGF.EmitBlock(CpyMergeBB);
2505 
2506   CGF.FinishFunction();
2507   return Fn;
2508 }
2509 
2510 /// This function emits a helper that copies all the reduction variables from
2511 /// the team into the provided global buffer for the reduction variables.
2512 ///
2513 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
2514 ///   For all data entries D in reduce_data:
2515 ///     Copy local D to buffer.D[Idx]
2516 static llvm::Value *emitListToGlobalCopyFunction(
2517     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2518     QualType ReductionArrayTy, SourceLocation Loc,
2519     const RecordDecl *TeamReductionRec,
2520     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2521         &VarFieldMap) {
2522   ASTContext &C = CGM.getContext();
2523 
2524   // Buffer: global reduction buffer.
2525   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2526                               C.VoidPtrTy, ImplicitParamDecl::Other);
2527   // Idx: index of the buffer.
2528   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2529                            ImplicitParamDecl::Other);
2530   // ReduceList: thread local Reduce list.
2531   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2532                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2533   FunctionArgList Args;
2534   Args.push_back(&BufferArg);
2535   Args.push_back(&IdxArg);
2536   Args.push_back(&ReduceListArg);
2537 
2538   const CGFunctionInfo &CGFI =
2539       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2540   auto *Fn = llvm::Function::Create(
2541       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2542       "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
2543   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2544   Fn->setDoesNotRecurse();
2545   CodeGenFunction CGF(CGM);
2546   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2547 
2548   CGBuilderTy &Bld = CGF.Builder;
2549 
2550   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2551   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2552   Address LocalReduceList(
2553       Bld.CreatePointerBitCastOrAddrSpaceCast(
2554           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2555                                C.VoidPtrTy, Loc),
2556           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2557       CGF.getPointerAlign());
2558   QualType StaticTy = C.getRecordType(TeamReductionRec);
2559   llvm::Type *LLVMReductionsBufferTy =
2560       CGM.getTypes().ConvertTypeForMem(StaticTy);
2561   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2562       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2563       LLVMReductionsBufferTy->getPointerTo());
2564   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2565                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2566                                               /*Volatile=*/false, C.IntTy,
2567                                               Loc)};
2568   unsigned Idx = 0;
2569   for (const Expr *Private : Privates) {
2570     // Reduce element = LocalReduceList[i]
2571     Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2572     llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2573         ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2574     // elemptr = ((CopyType*)(elemptrptr)) + I
2575     ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2576         ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
2577     Address ElemPtr =
2578         Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
2579     const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
2580     // Global = Buffer.VD[Idx];
2581     const FieldDecl *FD = VarFieldMap.lookup(VD);
2582     LValue GlobLVal = CGF.EmitLValueForField(
2583         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2584     Address GlobAddr = GlobLVal.getAddress(CGF);
2585     llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2586         GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2587     GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment()));
2588     switch (CGF.getEvaluationKind(Private->getType())) {
2589     case TEK_Scalar: {
2590       llvm::Value *V = CGF.EmitLoadOfScalar(
2591           ElemPtr, /*Volatile=*/false, Private->getType(), Loc,
2592           LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2593       CGF.EmitStoreOfScalar(V, GlobLVal);
2594       break;
2595     }
2596     case TEK_Complex: {
2597       CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
2598           CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
2599       CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
2600       break;
2601     }
2602     case TEK_Aggregate:
2603       CGF.EmitAggregateCopy(GlobLVal,
2604                             CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2605                             Private->getType(), AggValueSlot::DoesNotOverlap);
2606       break;
2607     }
2608     ++Idx;
2609   }
2610 
2611   CGF.FinishFunction();
2612   return Fn;
2613 }
2614 
2615 /// This function emits a helper that reduces all the reduction variables from
2616 /// the team into the provided global buffer for the reduction variables.
2617 ///
2618 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
2619 ///  void *GlobPtrs[];
2620 ///  GlobPtrs[0] = (void*)&buffer.D0[Idx];
2621 ///  ...
2622 ///  GlobPtrs[N] = (void*)&buffer.DN[Idx];
2623 ///  reduce_function(GlobPtrs, reduce_data);
2624 static llvm::Value *emitListToGlobalReduceFunction(
2625     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2626     QualType ReductionArrayTy, SourceLocation Loc,
2627     const RecordDecl *TeamReductionRec,
2628     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2629         &VarFieldMap,
2630     llvm::Function *ReduceFn) {
2631   ASTContext &C = CGM.getContext();
2632 
2633   // Buffer: global reduction buffer.
2634   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2635                               C.VoidPtrTy, ImplicitParamDecl::Other);
2636   // Idx: index of the buffer.
2637   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2638                            ImplicitParamDecl::Other);
2639   // ReduceList: thread local Reduce list.
2640   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2641                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2642   FunctionArgList Args;
2643   Args.push_back(&BufferArg);
2644   Args.push_back(&IdxArg);
2645   Args.push_back(&ReduceListArg);
2646 
2647   const CGFunctionInfo &CGFI =
2648       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2649   auto *Fn = llvm::Function::Create(
2650       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2651       "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
2652   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2653   Fn->setDoesNotRecurse();
2654   CodeGenFunction CGF(CGM);
2655   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2656 
2657   CGBuilderTy &Bld = CGF.Builder;
2658 
2659   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2660   QualType StaticTy = C.getRecordType(TeamReductionRec);
2661   llvm::Type *LLVMReductionsBufferTy =
2662       CGM.getTypes().ConvertTypeForMem(StaticTy);
2663   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2664       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2665       LLVMReductionsBufferTy->getPointerTo());
2666 
2667   // 1. Build a list of reduction variables.
2668   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2669   Address ReductionList =
2670       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2671   auto IPriv = Privates.begin();
2672   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2673                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2674                                               /*Volatile=*/false, C.IntTy,
2675                                               Loc)};
2676   unsigned Idx = 0;
2677   for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
2678     Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2679     // Global = Buffer.VD[Idx];
2680     const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
2681     const FieldDecl *FD = VarFieldMap.lookup(VD);
2682     LValue GlobLVal = CGF.EmitLValueForField(
2683         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2684     Address GlobAddr = GlobLVal.getAddress(CGF);
2685     llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2686         GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2687     llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
2688     CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
2689     if ((*IPriv)->getType()->isVariablyModifiedType()) {
2690       // Store array size.
2691       ++Idx;
2692       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2693       llvm::Value *Size = CGF.Builder.CreateIntCast(
2694           CGF.getVLASize(
2695                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2696               .NumElts,
2697           CGF.SizeTy, /*isSigned=*/false);
2698       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2699                               Elem);
2700     }
2701   }
2702 
2703   // Call reduce_function(GlobalReduceList, ReduceList)
2704   llvm::Value *GlobalReduceList =
2705       CGF.EmitCastToVoidPtr(ReductionList.getPointer());
2706   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2707   llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
2708       AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2709   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2710       CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
2711   CGF.FinishFunction();
2712   return Fn;
2713 }
2714 
2715 /// This function emits a helper that copies all the reduction variables from
2716 /// the team into the provided global buffer for the reduction variables.
2717 ///
2718 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
2719 ///   For all data entries D in reduce_data:
2720 ///     Copy buffer.D[Idx] to local D;
2721 static llvm::Value *emitGlobalToListCopyFunction(
2722     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2723     QualType ReductionArrayTy, SourceLocation Loc,
2724     const RecordDecl *TeamReductionRec,
2725     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2726         &VarFieldMap) {
2727   ASTContext &C = CGM.getContext();
2728 
2729   // Buffer: global reduction buffer.
2730   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2731                               C.VoidPtrTy, ImplicitParamDecl::Other);
2732   // Idx: index of the buffer.
2733   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2734                            ImplicitParamDecl::Other);
2735   // ReduceList: thread local Reduce list.
2736   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2737                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2738   FunctionArgList Args;
2739   Args.push_back(&BufferArg);
2740   Args.push_back(&IdxArg);
2741   Args.push_back(&ReduceListArg);
2742 
2743   const CGFunctionInfo &CGFI =
2744       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2745   auto *Fn = llvm::Function::Create(
2746       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2747       "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
2748   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2749   Fn->setDoesNotRecurse();
2750   CodeGenFunction CGF(CGM);
2751   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2752 
2753   CGBuilderTy &Bld = CGF.Builder;
2754 
2755   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2756   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2757   Address LocalReduceList(
2758       Bld.CreatePointerBitCastOrAddrSpaceCast(
2759           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2760                                C.VoidPtrTy, Loc),
2761           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2762       CGF.getPointerAlign());
2763   QualType StaticTy = C.getRecordType(TeamReductionRec);
2764   llvm::Type *LLVMReductionsBufferTy =
2765       CGM.getTypes().ConvertTypeForMem(StaticTy);
2766   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2767       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2768       LLVMReductionsBufferTy->getPointerTo());
2769 
2770   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2771                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2772                                               /*Volatile=*/false, C.IntTy,
2773                                               Loc)};
2774   unsigned Idx = 0;
2775   for (const Expr *Private : Privates) {
2776     // Reduce element = LocalReduceList[i]
2777     Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2778     llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2779         ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2780     // elemptr = ((CopyType*)(elemptrptr)) + I
2781     ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2782         ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
2783     Address ElemPtr =
2784         Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
2785     const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
2786     // Global = Buffer.VD[Idx];
2787     const FieldDecl *FD = VarFieldMap.lookup(VD);
2788     LValue GlobLVal = CGF.EmitLValueForField(
2789         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2790     Address GlobAddr = GlobLVal.getAddress(CGF);
2791     llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2792         GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2793     GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment()));
2794     switch (CGF.getEvaluationKind(Private->getType())) {
2795     case TEK_Scalar: {
2796       llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
2797       CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(),
2798                             LValueBaseInfo(AlignmentSource::Type),
2799                             TBAAAccessInfo());
2800       break;
2801     }
2802     case TEK_Complex: {
2803       CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
2804       CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2805                              /*isInit=*/false);
2806       break;
2807     }
2808     case TEK_Aggregate:
2809       CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2810                             GlobLVal, Private->getType(),
2811                             AggValueSlot::DoesNotOverlap);
2812       break;
2813     }
2814     ++Idx;
2815   }
2816 
2817   CGF.FinishFunction();
2818   return Fn;
2819 }
2820 
2821 /// This function emits a helper that reduces all the reduction variables from
2822 /// the team into the provided global buffer for the reduction variables.
2823 ///
2824 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
2825 ///  void *GlobPtrs[];
2826 ///  GlobPtrs[0] = (void*)&buffer.D0[Idx];
2827 ///  ...
2828 ///  GlobPtrs[N] = (void*)&buffer.DN[Idx];
2829 ///  reduce_function(reduce_data, GlobPtrs);
2830 static llvm::Value *emitGlobalToListReduceFunction(
2831     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2832     QualType ReductionArrayTy, SourceLocation Loc,
2833     const RecordDecl *TeamReductionRec,
2834     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2835         &VarFieldMap,
2836     llvm::Function *ReduceFn) {
2837   ASTContext &C = CGM.getContext();
2838 
2839   // Buffer: global reduction buffer.
2840   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2841                               C.VoidPtrTy, ImplicitParamDecl::Other);
2842   // Idx: index of the buffer.
2843   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2844                            ImplicitParamDecl::Other);
2845   // ReduceList: thread local Reduce list.
2846   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2847                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2848   FunctionArgList Args;
2849   Args.push_back(&BufferArg);
2850   Args.push_back(&IdxArg);
2851   Args.push_back(&ReduceListArg);
2852 
2853   const CGFunctionInfo &CGFI =
2854       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2855   auto *Fn = llvm::Function::Create(
2856       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2857       "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
2858   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2859   Fn->setDoesNotRecurse();
2860   CodeGenFunction CGF(CGM);
2861   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2862 
2863   CGBuilderTy &Bld = CGF.Builder;
2864 
2865   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2866   QualType StaticTy = C.getRecordType(TeamReductionRec);
2867   llvm::Type *LLVMReductionsBufferTy =
2868       CGM.getTypes().ConvertTypeForMem(StaticTy);
2869   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2870       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2871       LLVMReductionsBufferTy->getPointerTo());
2872 
2873   // 1. Build a list of reduction variables.
2874   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2875   Address ReductionList =
2876       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2877   auto IPriv = Privates.begin();
2878   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2879                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2880                                               /*Volatile=*/false, C.IntTy,
2881                                               Loc)};
2882   unsigned Idx = 0;
2883   for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
2884     Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2885     // Global = Buffer.VD[Idx];
2886     const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
2887     const FieldDecl *FD = VarFieldMap.lookup(VD);
2888     LValue GlobLVal = CGF.EmitLValueForField(
2889         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2890     Address GlobAddr = GlobLVal.getAddress(CGF);
2891     llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2892         GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2893     llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
2894     CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
2895     if ((*IPriv)->getType()->isVariablyModifiedType()) {
2896       // Store array size.
2897       ++Idx;
2898       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2899       llvm::Value *Size = CGF.Builder.CreateIntCast(
2900           CGF.getVLASize(
2901                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2902               .NumElts,
2903           CGF.SizeTy, /*isSigned=*/false);
2904       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2905                               Elem);
2906     }
2907   }
2908 
2909   // Call reduce_function(ReduceList, GlobalReduceList)
2910   llvm::Value *GlobalReduceList =
2911       CGF.EmitCastToVoidPtr(ReductionList.getPointer());
2912   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2913   llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
2914       AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2915   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2916       CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
2917   CGF.FinishFunction();
2918   return Fn;
2919 }
2920 
2921 ///
2922 /// Design of OpenMP reductions on the GPU
2923 ///
2924 /// Consider a typical OpenMP program with one or more reduction
2925 /// clauses:
2926 ///
2927 /// float foo;
2928 /// double bar;
2929 /// #pragma omp target teams distribute parallel for \
2930 ///             reduction(+:foo) reduction(*:bar)
2931 /// for (int i = 0; i < N; i++) {
2932 ///   foo += A[i]; bar *= B[i];
2933 /// }
2934 ///
2935 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
2936 /// all teams.  In our OpenMP implementation on the NVPTX device an
2937 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
2938 /// within a team are mapped to CUDA threads within a threadblock.
2939 /// Our goal is to efficiently aggregate values across all OpenMP
2940 /// threads such that:
2941 ///
2942 ///   - the compiler and runtime are logically concise, and
2943 ///   - the reduction is performed efficiently in a hierarchical
2944 ///     manner as follows: within OpenMP threads in the same warp,
2945 ///     across warps in a threadblock, and finally across teams on
2946 ///     the NVPTX device.
2947 ///
2948 /// Introduction to Decoupling
2949 ///
2950 /// We would like to decouple the compiler and the runtime so that the
2951 /// latter is ignorant of the reduction variables (number, data types)
2952 /// and the reduction operators.  This allows a simpler interface
2953 /// and implementation while still attaining good performance.
2954 ///
2955 /// Pseudocode for the aforementioned OpenMP program generated by the
2956 /// compiler is as follows:
2957 ///
2958 /// 1. Create private copies of reduction variables on each OpenMP
2959 ///    thread: 'foo_private', 'bar_private'
2960 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
2961 ///    to it and writes the result in 'foo_private' and 'bar_private'
2962 ///    respectively.
2963 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
2964 ///    and store the result on the team master:
2965 ///
2966 ///     __kmpc_nvptx_parallel_reduce_nowait_v2(...,
2967 ///        reduceData, shuffleReduceFn, interWarpCpyFn)
2968 ///
2969 ///     where:
2970 ///       struct ReduceData {
2971 ///         double *foo;
2972 ///         double *bar;
2973 ///       } reduceData
2974 ///       reduceData.foo = &foo_private
2975 ///       reduceData.bar = &bar_private
2976 ///
2977 ///     'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
2978 ///     auxiliary functions generated by the compiler that operate on
2979 ///     variables of type 'ReduceData'.  They aid the runtime perform
2980 ///     algorithmic steps in a data agnostic manner.
2981 ///
2982 ///     'shuffleReduceFn' is a pointer to a function that reduces data
2983 ///     of type 'ReduceData' across two OpenMP threads (lanes) in the
2984 ///     same warp.  It takes the following arguments as input:
2985 ///
2986 ///     a. variable of type 'ReduceData' on the calling lane,
2987 ///     b. its lane_id,
2988 ///     c. an offset relative to the current lane_id to generate a
2989 ///        remote_lane_id.  The remote lane contains the second
2990 ///        variable of type 'ReduceData' that is to be reduced.
2991 ///     d. an algorithm version parameter determining which reduction
2992 ///        algorithm to use.
2993 ///
2994 ///     'shuffleReduceFn' retrieves data from the remote lane using
2995 ///     efficient GPU shuffle intrinsics and reduces, using the
2996 ///     algorithm specified by the 4th parameter, the two operands
2997 ///     element-wise.  The result is written to the first operand.
2998 ///
2999 ///     Different reduction algorithms are implemented in different
3000 ///     runtime functions, all calling 'shuffleReduceFn' to perform
3001 ///     the essential reduction step.  Therefore, based on the 4th
3002 ///     parameter, this function behaves slightly differently to
3003 ///     cooperate with the runtime to ensure correctness under
3004 ///     different circumstances.
3005 ///
3006 ///     'InterWarpCpyFn' is a pointer to a function that transfers
3007 ///     reduced variables across warps.  It tunnels, through CUDA
3008 ///     shared memory, the thread-private data of type 'ReduceData'
3009 ///     from lane 0 of each warp to a lane in the first warp.
3010 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
3011 ///    The last team writes the global reduced value to memory.
3012 ///
3013 ///     ret = __kmpc_nvptx_teams_reduce_nowait(...,
3014 ///             reduceData, shuffleReduceFn, interWarpCpyFn,
3015 ///             scratchpadCopyFn, loadAndReduceFn)
3016 ///
3017 ///     'scratchpadCopyFn' is a helper that stores reduced
3018 ///     data from the team master to a scratchpad array in
3019 ///     global memory.
3020 ///
3021 ///     'loadAndReduceFn' is a helper that loads data from
3022 ///     the scratchpad array and reduces it with the input
3023 ///     operand.
3024 ///
3025 ///     These compiler generated functions hide address
3026 ///     calculation and alignment information from the runtime.
3027 /// 5. if ret == 1:
3028 ///     The team master of the last team stores the reduced
3029 ///     result to the globals in memory.
3030 ///     foo += reduceData.foo; bar *= reduceData.bar
3031 ///
3032 ///
3033 /// Warp Reduction Algorithms
3034 ///
3035 /// On the warp level, we have three algorithms implemented in the
3036 /// OpenMP runtime depending on the number of active lanes:
3037 ///
3038 /// Full Warp Reduction
3039 ///
3040 /// The reduce algorithm within a warp where all lanes are active
3041 /// is implemented in the runtime as follows:
3042 ///
3043 /// full_warp_reduce(void *reduce_data,
3044 ///                  kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3045 ///   for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
3046 ///     ShuffleReduceFn(reduce_data, 0, offset, 0);
3047 /// }
3048 ///
3049 /// The algorithm completes in log(2, WARPSIZE) steps.
3050 ///
3051 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
3052 /// not used therefore we save instructions by not retrieving lane_id
3053 /// from the corresponding special registers.  The 4th parameter, which
3054 /// represents the version of the algorithm being used, is set to 0 to
3055 /// signify full warp reduction.
3056 ///
3057 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3058 ///
3059 /// #reduce_elem refers to an element in the local lane's data structure
3060 /// #remote_elem is retrieved from a remote lane
3061 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3062 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
3063 ///
3064 /// Contiguous Partial Warp Reduction
3065 ///
3066 /// This reduce algorithm is used within a warp where only the first
3067 /// 'n' (n <= WARPSIZE) lanes are active.  It is typically used when the
3068 /// number of OpenMP threads in a parallel region is not a multiple of
3069 /// WARPSIZE.  The algorithm is implemented in the runtime as follows:
3070 ///
3071 /// void
3072 /// contiguous_partial_reduce(void *reduce_data,
3073 ///                           kmp_ShuffleReductFctPtr ShuffleReduceFn,
3074 ///                           int size, int lane_id) {
3075 ///   int curr_size;
3076 ///   int offset;
3077 ///   curr_size = size;
3078 ///   mask = curr_size/2;
3079 ///   while (offset>0) {
3080 ///     ShuffleReduceFn(reduce_data, lane_id, offset, 1);
3081 ///     curr_size = (curr_size+1)/2;
3082 ///     offset = curr_size/2;
3083 ///   }
3084 /// }
3085 ///
3086 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3087 ///
3088 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3089 /// if (lane_id < offset)
3090 ///     reduce_elem = reduce_elem REDUCE_OP remote_elem
3091 /// else
3092 ///     reduce_elem = remote_elem
3093 ///
3094 /// This algorithm assumes that the data to be reduced are located in a
3095 /// contiguous subset of lanes starting from the first.  When there is
3096 /// an odd number of active lanes, the data in the last lane is not
3097 /// aggregated with any other lane's dat but is instead copied over.
3098 ///
3099 /// Dispersed Partial Warp Reduction
3100 ///
3101 /// This algorithm is used within a warp when any discontiguous subset of
3102 /// lanes are active.  It is used to implement the reduction operation
3103 /// across lanes in an OpenMP simd region or in a nested parallel region.
3104 ///
3105 /// void
3106 /// dispersed_partial_reduce(void *reduce_data,
3107 ///                          kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3108 ///   int size, remote_id;
3109 ///   int logical_lane_id = number_of_active_lanes_before_me() * 2;
3110 ///   do {
3111 ///       remote_id = next_active_lane_id_right_after_me();
3112 ///       # the above function returns 0 of no active lane
3113 ///       # is present right after the current lane.
3114 ///       size = number_of_active_lanes_in_this_warp();
3115 ///       logical_lane_id /= 2;
3116 ///       ShuffleReduceFn(reduce_data, logical_lane_id,
3117 ///                       remote_id-1-threadIdx.x, 2);
3118 ///   } while (logical_lane_id % 2 == 0 && size > 1);
3119 /// }
3120 ///
3121 /// There is no assumption made about the initial state of the reduction.
3122 /// Any number of lanes (>=1) could be active at any position.  The reduction
3123 /// result is returned in the first active lane.
3124 ///
3125 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3126 ///
3127 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3128 /// if (lane_id % 2 == 0 && offset > 0)
3129 ///     reduce_elem = reduce_elem REDUCE_OP remote_elem
3130 /// else
3131 ///     reduce_elem = remote_elem
3132 ///
3133 ///
3134 /// Intra-Team Reduction
3135 ///
3136 /// This function, as implemented in the runtime call
3137 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
3138 /// threads in a team.  It first reduces within a warp using the
3139 /// aforementioned algorithms.  We then proceed to gather all such
3140 /// reduced values at the first warp.
3141 ///
3142 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
3143 /// data from each of the "warp master" (zeroth lane of each warp, where
3144 /// warp-reduced data is held) to the zeroth warp.  This step reduces (in
3145 /// a mathematical sense) the problem of reduction across warp masters in
3146 /// a block to the problem of warp reduction.
3147 ///
3148 ///
3149 /// Inter-Team Reduction
3150 ///
3151 /// Once a team has reduced its data to a single value, it is stored in
3152 /// a global scratchpad array.  Since each team has a distinct slot, this
3153 /// can be done without locking.
3154 ///
3155 /// The last team to write to the scratchpad array proceeds to reduce the
3156 /// scratchpad array.  One or more workers in the last team use the helper
3157 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
3158 /// the k'th worker reduces every k'th element.
3159 ///
3160 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
3161 /// reduce across workers and compute a globally reduced value.
3162 ///
3163 void CGOpenMPRuntimeGPU::emitReduction(
3164     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
3165     ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
3166     ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
3167   if (!CGF.HaveInsertPoint())
3168     return;
3169 
3170   bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
3171 #ifndef NDEBUG
3172   bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
3173 #endif
3174 
3175   if (Options.SimpleReduction) {
3176     assert(!TeamsReduction && !ParallelReduction &&
3177            "Invalid reduction selection in emitReduction.");
3178     CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
3179                                    ReductionOps, Options);
3180     return;
3181   }
3182 
3183   assert((TeamsReduction || ParallelReduction) &&
3184          "Invalid reduction selection in emitReduction.");
3185 
3186   // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
3187   // RedList, shuffle_reduce_func, interwarp_copy_func);
3188   // or
3189   // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
3190   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
3191   llvm::Value *ThreadId = getThreadID(CGF, Loc);
3192 
3193   llvm::Value *Res;
3194   ASTContext &C = CGM.getContext();
3195   // 1. Build a list of reduction variables.
3196   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3197   auto Size = RHSExprs.size();
3198   for (const Expr *E : Privates) {
3199     if (E->getType()->isVariablyModifiedType())
3200       // Reserve place for array size.
3201       ++Size;
3202   }
3203   llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
3204   QualType ReductionArrayTy =
3205       C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
3206                              /*IndexTypeQuals=*/0);
3207   Address ReductionList =
3208       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3209   auto IPriv = Privates.begin();
3210   unsigned Idx = 0;
3211   for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
3212     Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3213     CGF.Builder.CreateStore(
3214         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3215             CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
3216         Elem);
3217     if ((*IPriv)->getType()->isVariablyModifiedType()) {
3218       // Store array size.
3219       ++Idx;
3220       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3221       llvm::Value *Size = CGF.Builder.CreateIntCast(
3222           CGF.getVLASize(
3223                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3224               .NumElts,
3225           CGF.SizeTy, /*isSigned=*/false);
3226       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3227                               Elem);
3228     }
3229   }
3230 
3231   llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3232       ReductionList.getPointer(), CGF.VoidPtrTy);
3233   llvm::Function *ReductionFn = emitReductionFunction(
3234       Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
3235       LHSExprs, RHSExprs, ReductionOps);
3236   llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
3237   llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
3238       CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
3239   llvm::Value *InterWarpCopyFn =
3240       emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
3241 
3242   if (ParallelReduction) {
3243     llvm::Value *Args[] = {RTLoc,
3244                            ThreadId,
3245                            CGF.Builder.getInt32(RHSExprs.size()),
3246                            ReductionArrayTySize,
3247                            RL,
3248                            ShuffleAndReduceFn,
3249                            InterWarpCopyFn};
3250 
3251     Res = CGF.EmitRuntimeCall(
3252         OMPBuilder.getOrCreateRuntimeFunction(
3253             CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2),
3254         Args);
3255   } else {
3256     assert(TeamsReduction && "expected teams reduction.");
3257     llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
3258     llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
3259     int Cnt = 0;
3260     for (const Expr *DRE : Privates) {
3261       PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
3262       ++Cnt;
3263     }
3264     const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
3265         CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
3266         C.getLangOpts().OpenMPCUDAReductionBufNum);
3267     TeamsReductions.push_back(TeamReductionRec);
3268     if (!KernelTeamsReductionPtr) {
3269       KernelTeamsReductionPtr = new llvm::GlobalVariable(
3270           CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
3271           llvm::GlobalValue::InternalLinkage, nullptr,
3272           "_openmp_teams_reductions_buffer_$_$ptr");
3273     }
3274     llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
3275         Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
3276         /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
3277     llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
3278         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
3279     llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
3280         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
3281         ReductionFn);
3282     llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
3283         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
3284     llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
3285         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
3286         ReductionFn);
3287 
3288     llvm::Value *Args[] = {
3289         RTLoc,
3290         ThreadId,
3291         GlobalBufferPtr,
3292         CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
3293         RL,
3294         ShuffleAndReduceFn,
3295         InterWarpCopyFn,
3296         GlobalToBufferCpyFn,
3297         GlobalToBufferRedFn,
3298         BufferToGlobalCpyFn,
3299         BufferToGlobalRedFn};
3300 
3301     Res = CGF.EmitRuntimeCall(
3302         OMPBuilder.getOrCreateRuntimeFunction(
3303             CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2),
3304         Args);
3305   }
3306 
3307   // 5. Build if (res == 1)
3308   llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
3309   llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
3310   llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
3311       Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
3312   CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
3313 
3314   // 6. Build then branch: where we have reduced values in the master
3315   //    thread in each team.
3316   //    __kmpc_end_reduce{_nowait}(<gtid>);
3317   //    break;
3318   CGF.EmitBlock(ThenBB);
3319 
3320   // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
3321   auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
3322                     this](CodeGenFunction &CGF, PrePostActionTy &Action) {
3323     auto IPriv = Privates.begin();
3324     auto ILHS = LHSExprs.begin();
3325     auto IRHS = RHSExprs.begin();
3326     for (const Expr *E : ReductionOps) {
3327       emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
3328                                   cast<DeclRefExpr>(*IRHS));
3329       ++IPriv;
3330       ++ILHS;
3331       ++IRHS;
3332     }
3333   };
3334   llvm::Value *EndArgs[] = {ThreadId};
3335   RegionCodeGenTy RCG(CodeGen);
3336   NVPTXActionTy Action(
3337       nullptr, llvm::None,
3338       OMPBuilder.getOrCreateRuntimeFunction(
3339           CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait),
3340       EndArgs);
3341   RCG.setAction(Action);
3342   RCG(CGF);
3343   // There is no need to emit line number for unconditional branch.
3344   (void)ApplyDebugLocation::CreateEmpty(CGF);
3345   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
3346 }
3347 
3348 const VarDecl *
3349 CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD,
3350                                        const VarDecl *NativeParam) const {
3351   if (!NativeParam->getType()->isReferenceType())
3352     return NativeParam;
3353   QualType ArgType = NativeParam->getType();
3354   QualifierCollector QC;
3355   const Type *NonQualTy = QC.strip(ArgType);
3356   QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3357   if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
3358     if (Attr->getCaptureKind() == OMPC_map) {
3359       PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
3360                                                         LangAS::opencl_global);
3361     }
3362   }
3363   ArgType = CGM.getContext().getPointerType(PointeeTy);
3364   QC.addRestrict();
3365   enum { NVPTX_local_addr = 5 };
3366   QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
3367   ArgType = QC.apply(CGM.getContext(), ArgType);
3368   if (isa<ImplicitParamDecl>(NativeParam))
3369     return ImplicitParamDecl::Create(
3370         CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
3371         NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
3372   return ParmVarDecl::Create(
3373       CGM.getContext(),
3374       const_cast<DeclContext *>(NativeParam->getDeclContext()),
3375       NativeParam->getBeginLoc(), NativeParam->getLocation(),
3376       NativeParam->getIdentifier(), ArgType,
3377       /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
3378 }
3379 
3380 Address
3381 CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
3382                                           const VarDecl *NativeParam,
3383                                           const VarDecl *TargetParam) const {
3384   assert(NativeParam != TargetParam &&
3385          NativeParam->getType()->isReferenceType() &&
3386          "Native arg must not be the same as target arg.");
3387   Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
3388   QualType NativeParamType = NativeParam->getType();
3389   QualifierCollector QC;
3390   const Type *NonQualTy = QC.strip(NativeParamType);
3391   QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3392   unsigned NativePointeeAddrSpace =
3393       CGF.getContext().getTargetAddressSpace(NativePointeeTy);
3394   QualType TargetTy = TargetParam->getType();
3395   llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
3396       LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
3397   // First cast to generic.
3398   TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3399       TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
3400                       /*AddrSpace=*/0));
3401   // Cast from generic to native address space.
3402   TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3403       TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
3404                       NativePointeeAddrSpace));
3405   Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
3406   CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
3407                         NativeParamType);
3408   return NativeParamAddr;
3409 }
3410 
3411 void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
3412     CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
3413     ArrayRef<llvm::Value *> Args) const {
3414   SmallVector<llvm::Value *, 4> TargetArgs;
3415   TargetArgs.reserve(Args.size());
3416   auto *FnType = OutlinedFn.getFunctionType();
3417   for (unsigned I = 0, E = Args.size(); I < E; ++I) {
3418     if (FnType->isVarArg() && FnType->getNumParams() <= I) {
3419       TargetArgs.append(std::next(Args.begin(), I), Args.end());
3420       break;
3421     }
3422     llvm::Type *TargetType = FnType->getParamType(I);
3423     llvm::Value *NativeArg = Args[I];
3424     if (!TargetType->isPointerTy()) {
3425       TargetArgs.emplace_back(NativeArg);
3426       continue;
3427     }
3428     llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3429         NativeArg,
3430         NativeArg->getType()->getPointerElementType()->getPointerTo());
3431     TargetArgs.emplace_back(
3432         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
3433   }
3434   CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
3435 }
3436 
3437 /// Emit function which wraps the outline parallel region
3438 /// and controls the arguments which are passed to this function.
3439 /// The wrapper ensures that the outlined function is called
3440 /// with the correct arguments when data is shared.
3441 llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
3442     llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
3443   ASTContext &Ctx = CGM.getContext();
3444   const auto &CS = *D.getCapturedStmt(OMPD_parallel);
3445 
3446   // Create a function that takes as argument the source thread.
3447   FunctionArgList WrapperArgs;
3448   QualType Int16QTy =
3449       Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
3450   QualType Int32QTy =
3451       Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
3452   ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
3453                                      /*Id=*/nullptr, Int16QTy,
3454                                      ImplicitParamDecl::Other);
3455   ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
3456                                /*Id=*/nullptr, Int32QTy,
3457                                ImplicitParamDecl::Other);
3458   WrapperArgs.emplace_back(&ParallelLevelArg);
3459   WrapperArgs.emplace_back(&WrapperArg);
3460 
3461   const CGFunctionInfo &CGFI =
3462       CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
3463 
3464   auto *Fn = llvm::Function::Create(
3465       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3466       Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
3467 
3468   // Ensure we do not inline the function. This is trivially true for the ones
3469   // passed to __kmpc_fork_call but the ones calles in serialized regions
3470   // could be inlined. This is not a perfect but it is closer to the invariant
3471   // we want, namely, every data environment starts with a new function.
3472   // TODO: We should pass the if condition to the runtime function and do the
3473   //       handling there. Much cleaner code.
3474   Fn->addFnAttr(llvm::Attribute::NoInline);
3475 
3476   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3477   Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
3478   Fn->setDoesNotRecurse();
3479 
3480   CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
3481   CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
3482                     D.getBeginLoc(), D.getBeginLoc());
3483 
3484   const auto *RD = CS.getCapturedRecordDecl();
3485   auto CurField = RD->field_begin();
3486 
3487   Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
3488                                                       /*Name=*/".zero.addr");
3489   CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr);
3490   // Get the array of arguments.
3491   SmallVector<llvm::Value *, 8> Args;
3492 
3493   Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
3494   Args.emplace_back(ZeroAddr.getPointer());
3495 
3496   CGBuilderTy &Bld = CGF.Builder;
3497   auto CI = CS.capture_begin();
3498 
3499   // Use global memory for data sharing.
3500   // Handle passing of global args to workers.
3501   Address GlobalArgs =
3502       CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
3503   llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
3504   llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
3505   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
3506                           CGM.getModule(), OMPRTL___kmpc_get_shared_variables),
3507                       DataSharingArgs);
3508 
3509   // Retrieve the shared variables from the list of references returned
3510   // by the runtime. Pass the variables to the outlined function.
3511   Address SharedArgListAddress = Address::invalid();
3512   if (CS.capture_size() > 0 ||
3513       isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
3514     SharedArgListAddress = CGF.EmitLoadOfPointer(
3515         GlobalArgs, CGF.getContext()
3516                         .getPointerType(CGF.getContext().getPointerType(
3517                             CGF.getContext().VoidPtrTy))
3518                         .castAs<PointerType>());
3519   }
3520   unsigned Idx = 0;
3521   if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
3522     Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
3523     Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3524         Src, CGF.SizeTy->getPointerTo());
3525     llvm::Value *LB = CGF.EmitLoadOfScalar(
3526         TypedAddress,
3527         /*Volatile=*/false,
3528         CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
3529         cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
3530     Args.emplace_back(LB);
3531     ++Idx;
3532     Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
3533     TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3534         Src, CGF.SizeTy->getPointerTo());
3535     llvm::Value *UB = CGF.EmitLoadOfScalar(
3536         TypedAddress,
3537         /*Volatile=*/false,
3538         CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
3539         cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
3540     Args.emplace_back(UB);
3541     ++Idx;
3542   }
3543   if (CS.capture_size() > 0) {
3544     ASTContext &CGFContext = CGF.getContext();
3545     for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
3546       QualType ElemTy = CurField->getType();
3547       Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
3548       Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3549           Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
3550       llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
3551                                               /*Volatile=*/false,
3552                                               CGFContext.getPointerType(ElemTy),
3553                                               CI->getLocation());
3554       if (CI->capturesVariableByCopy() &&
3555           !CI->getCapturedVar()->getType()->isAnyPointerType()) {
3556         Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
3557                               CI->getLocation());
3558       }
3559       Args.emplace_back(Arg);
3560     }
3561   }
3562 
3563   emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
3564   CGF.FinishFunction();
3565   return Fn;
3566 }
3567 
3568 void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
3569                                               const Decl *D) {
3570   if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
3571     return;
3572 
3573   assert(D && "Expected function or captured|block decl.");
3574   assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
3575          "Function is registered already.");
3576   assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
3577          "Team is set but not processed.");
3578   const Stmt *Body = nullptr;
3579   bool NeedToDelayGlobalization = false;
3580   if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
3581     Body = FD->getBody();
3582   } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
3583     Body = BD->getBody();
3584   } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
3585     Body = CD->getBody();
3586     NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
3587     if (NeedToDelayGlobalization &&
3588         getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
3589       return;
3590   }
3591   if (!Body)
3592     return;
3593   CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
3594   VarChecker.Visit(Body);
3595   const RecordDecl *GlobalizedVarsRecord =
3596       VarChecker.getGlobalizedRecord(IsInTTDRegion);
3597   TeamAndReductions.first = nullptr;
3598   TeamAndReductions.second.clear();
3599   ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
3600       VarChecker.getEscapedVariableLengthDecls();
3601   if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
3602     return;
3603   auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
3604   I->getSecond().MappedParams =
3605       std::make_unique<CodeGenFunction::OMPMapVars>();
3606   I->getSecond().EscapedParameters.insert(
3607       VarChecker.getEscapedParameters().begin(),
3608       VarChecker.getEscapedParameters().end());
3609   I->getSecond().EscapedVariableLengthDecls.append(
3610       EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
3611   DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
3612   for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3613     assert(VD->isCanonicalDecl() && "Expected canonical declaration");
3614     Data.insert(std::make_pair(VD, MappedVarData()));
3615   }
3616   if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
3617     CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
3618     VarChecker.Visit(Body);
3619     I->getSecond().SecondaryLocalVarData.emplace();
3620     DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
3621     for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3622       assert(VD->isCanonicalDecl() && "Expected canonical declaration");
3623       Data.insert(std::make_pair(VD, MappedVarData()));
3624     }
3625   }
3626   if (!NeedToDelayGlobalization) {
3627     emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
3628     struct GlobalizationScope final : EHScopeStack::Cleanup {
3629       GlobalizationScope() = default;
3630 
3631       void Emit(CodeGenFunction &CGF, Flags flags) override {
3632         static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
3633             .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
3634       }
3635     };
3636     CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
3637   }
3638 }
3639 
3640 Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
3641                                                         const VarDecl *VD) {
3642   if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
3643     const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3644     auto AS = LangAS::Default;
3645     switch (A->getAllocatorType()) {
3646       // Use the default allocator here as by default local vars are
3647       // threadlocal.
3648     case OMPAllocateDeclAttr::OMPNullMemAlloc:
3649     case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
3650     case OMPAllocateDeclAttr::OMPThreadMemAlloc:
3651     case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
3652     case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
3653       // Follow the user decision - use default allocation.
3654       return Address::invalid();
3655     case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
3656       // TODO: implement aupport for user-defined allocators.
3657       return Address::invalid();
3658     case OMPAllocateDeclAttr::OMPConstMemAlloc:
3659       AS = LangAS::cuda_constant;
3660       break;
3661     case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
3662       AS = LangAS::cuda_shared;
3663       break;
3664     case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
3665     case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
3666       break;
3667     }
3668     llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
3669     auto *GV = new llvm::GlobalVariable(
3670         CGM.getModule(), VarTy, /*isConstant=*/false,
3671         llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
3672         VD->getName(),
3673         /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
3674         CGM.getContext().getTargetAddressSpace(AS));
3675     CharUnits Align = CGM.getContext().getDeclAlign(VD);
3676     GV->setAlignment(Align.getAsAlign());
3677     return Address(
3678         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3679             GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
3680                     VD->getType().getAddressSpace()))),
3681         Align);
3682   }
3683 
3684   if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
3685     return Address::invalid();
3686 
3687   VD = VD->getCanonicalDecl();
3688   auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
3689   if (I == FunctionGlobalizedDecls.end())
3690     return Address::invalid();
3691   auto VDI = I->getSecond().LocalVarData.find(VD);
3692   if (VDI != I->getSecond().LocalVarData.end())
3693     return VDI->second.PrivateAddr;
3694   if (VD->hasAttrs()) {
3695     for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
3696          E(VD->attr_end());
3697          IT != E; ++IT) {
3698       auto VDI = I->getSecond().LocalVarData.find(
3699           cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
3700               ->getCanonicalDecl());
3701       if (VDI != I->getSecond().LocalVarData.end())
3702         return VDI->second.PrivateAddr;
3703     }
3704   }
3705 
3706   return Address::invalid();
3707 }
3708 
3709 void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) {
3710   FunctionGlobalizedDecls.erase(CGF.CurFn);
3711   CGOpenMPRuntime::functionFinished(CGF);
3712 }
3713 
3714 void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk(
3715     CodeGenFunction &CGF, const OMPLoopDirective &S,
3716     OpenMPDistScheduleClauseKind &ScheduleKind,
3717     llvm::Value *&Chunk) const {
3718   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
3719   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
3720     ScheduleKind = OMPC_DIST_SCHEDULE_static;
3721     Chunk = CGF.EmitScalarConversion(
3722         RT.getGPUNumThreads(CGF),
3723         CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3724         S.getIterationVariable()->getType(), S.getBeginLoc());
3725     return;
3726   }
3727   CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
3728       CGF, S, ScheduleKind, Chunk);
3729 }
3730 
3731 void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk(
3732     CodeGenFunction &CGF, const OMPLoopDirective &S,
3733     OpenMPScheduleClauseKind &ScheduleKind,
3734     const Expr *&ChunkExpr) const {
3735   ScheduleKind = OMPC_SCHEDULE_static;
3736   // Chunk size is 1 in this case.
3737   llvm::APInt ChunkSize(32, 1);
3738   ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
3739       CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3740       SourceLocation());
3741 }
3742 
3743 void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
3744     CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
3745   assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
3746          " Expected target-based directive.");
3747   const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
3748   for (const CapturedStmt::Capture &C : CS->captures()) {
3749     // Capture variables captured by reference in lambdas for target-based
3750     // directives.
3751     if (!C.capturesVariable())
3752       continue;
3753     const VarDecl *VD = C.getCapturedVar();
3754     const auto *RD = VD->getType()
3755                          .getCanonicalType()
3756                          .getNonReferenceType()
3757                          ->getAsCXXRecordDecl();
3758     if (!RD || !RD->isLambda())
3759       continue;
3760     Address VDAddr = CGF.GetAddrOfLocalVar(VD);
3761     LValue VDLVal;
3762     if (VD->getType().getCanonicalType()->isReferenceType())
3763       VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
3764     else
3765       VDLVal = CGF.MakeAddrLValue(
3766           VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
3767     llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
3768     FieldDecl *ThisCapture = nullptr;
3769     RD->getCaptureFields(Captures, ThisCapture);
3770     if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
3771       LValue ThisLVal =
3772           CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
3773       llvm::Value *CXXThis = CGF.LoadCXXThis();
3774       CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
3775     }
3776     for (const LambdaCapture &LC : RD->captures()) {
3777       if (LC.getCaptureKind() != LCK_ByRef)
3778         continue;
3779       const VarDecl *VD = LC.getCapturedVar();
3780       if (!CS->capturesVariable(VD))
3781         continue;
3782       auto It = Captures.find(VD);
3783       assert(It != Captures.end() && "Found lambda capture without field.");
3784       LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
3785       Address VDAddr = CGF.GetAddrOfLocalVar(VD);
3786       if (VD->getType().getCanonicalType()->isReferenceType())
3787         VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
3788                                                VD->getType().getCanonicalType())
3789                      .getAddress(CGF);
3790       CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
3791     }
3792   }
3793 }
3794 
3795 bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
3796                                                             LangAS &AS) {
3797   if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
3798     return false;
3799   const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3800   switch(A->getAllocatorType()) {
3801   case OMPAllocateDeclAttr::OMPNullMemAlloc:
3802   case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
3803   // Not supported, fallback to the default mem space.
3804   case OMPAllocateDeclAttr::OMPThreadMemAlloc:
3805   case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
3806   case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
3807   case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
3808   case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
3809     AS = LangAS::Default;
3810     return true;
3811   case OMPAllocateDeclAttr::OMPConstMemAlloc:
3812     AS = LangAS::cuda_constant;
3813     return true;
3814   case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
3815     AS = LangAS::cuda_shared;
3816     return true;
3817   case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
3818     llvm_unreachable("Expected predefined allocator for the variables with the "
3819                      "static storage.");
3820   }
3821   return false;
3822 }
3823 
3824 // Get current CudaArch and ignore any unknown values
3825 static CudaArch getCudaArch(CodeGenModule &CGM) {
3826   if (!CGM.getTarget().hasFeature("ptx"))
3827     return CudaArch::UNKNOWN;
3828   for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) {
3829     if (Feature.getValue()) {
3830       CudaArch Arch = StringToCudaArch(Feature.getKey());
3831       if (Arch != CudaArch::UNKNOWN)
3832         return Arch;
3833     }
3834   }
3835   return CudaArch::UNKNOWN;
3836 }
3837 
3838 /// Check to see if target architecture supports unified addressing which is
3839 /// a restriction for OpenMP requires clause "unified_shared_memory".
3840 void CGOpenMPRuntimeGPU::processRequiresDirective(
3841     const OMPRequiresDecl *D) {
3842   for (const OMPClause *Clause : D->clauselists()) {
3843     if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
3844       CudaArch Arch = getCudaArch(CGM);
3845       switch (Arch) {
3846       case CudaArch::SM_20:
3847       case CudaArch::SM_21:
3848       case CudaArch::SM_30:
3849       case CudaArch::SM_32:
3850       case CudaArch::SM_35:
3851       case CudaArch::SM_37:
3852       case CudaArch::SM_50:
3853       case CudaArch::SM_52:
3854       case CudaArch::SM_53: {
3855         SmallString<256> Buffer;
3856         llvm::raw_svector_ostream Out(Buffer);
3857         Out << "Target architecture " << CudaArchToString(Arch)
3858             << " does not support unified addressing";
3859         CGM.Error(Clause->getBeginLoc(), Out.str());
3860         return;
3861       }
3862       case CudaArch::SM_60:
3863       case CudaArch::SM_61:
3864       case CudaArch::SM_62:
3865       case CudaArch::SM_70:
3866       case CudaArch::SM_72:
3867       case CudaArch::SM_75:
3868       case CudaArch::SM_80:
3869       case CudaArch::SM_86:
3870       case CudaArch::GFX600:
3871       case CudaArch::GFX601:
3872       case CudaArch::GFX602:
3873       case CudaArch::GFX700:
3874       case CudaArch::GFX701:
3875       case CudaArch::GFX702:
3876       case CudaArch::GFX703:
3877       case CudaArch::GFX704:
3878       case CudaArch::GFX705:
3879       case CudaArch::GFX801:
3880       case CudaArch::GFX802:
3881       case CudaArch::GFX803:
3882       case CudaArch::GFX805:
3883       case CudaArch::GFX810:
3884       case CudaArch::GFX900:
3885       case CudaArch::GFX902:
3886       case CudaArch::GFX904:
3887       case CudaArch::GFX906:
3888       case CudaArch::GFX908:
3889       case CudaArch::GFX909:
3890       case CudaArch::GFX90a:
3891       case CudaArch::GFX90c:
3892       case CudaArch::GFX1010:
3893       case CudaArch::GFX1011:
3894       case CudaArch::GFX1012:
3895       case CudaArch::GFX1013:
3896       case CudaArch::GFX1030:
3897       case CudaArch::GFX1031:
3898       case CudaArch::GFX1032:
3899       case CudaArch::GFX1033:
3900       case CudaArch::GFX1034:
3901       case CudaArch::GFX1035:
3902       case CudaArch::UNUSED:
3903       case CudaArch::UNKNOWN:
3904         break;
3905       case CudaArch::LAST:
3906         llvm_unreachable("Unexpected Cuda arch.");
3907       }
3908     }
3909   }
3910   CGOpenMPRuntime::processRequiresDirective(D);
3911 }
3912 
3913 void CGOpenMPRuntimeGPU::clear() {
3914 
3915   if (!TeamsReductions.empty()) {
3916     ASTContext &C = CGM.getContext();
3917     RecordDecl *StaticRD = C.buildImplicitRecord(
3918         "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
3919     StaticRD->startDefinition();
3920     for (const RecordDecl *TeamReductionRec : TeamsReductions) {
3921       QualType RecTy = C.getRecordType(TeamReductionRec);
3922       auto *Field = FieldDecl::Create(
3923           C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
3924           C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
3925           /*BW=*/nullptr, /*Mutable=*/false,
3926           /*InitStyle=*/ICIS_NoInit);
3927       Field->setAccess(AS_public);
3928       StaticRD->addDecl(Field);
3929     }
3930     StaticRD->completeDefinition();
3931     QualType StaticTy = C.getRecordType(StaticRD);
3932     llvm::Type *LLVMReductionsBufferTy =
3933         CGM.getTypes().ConvertTypeForMem(StaticTy);
3934     // FIXME: nvlink does not handle weak linkage correctly (object with the
3935     // different size are reported as erroneous).
3936     // Restore CommonLinkage as soon as nvlink is fixed.
3937     auto *GV = new llvm::GlobalVariable(
3938         CGM.getModule(), LLVMReductionsBufferTy,
3939         /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
3940         llvm::Constant::getNullValue(LLVMReductionsBufferTy),
3941         "_openmp_teams_reductions_buffer_$_");
3942     KernelTeamsReductionPtr->setInitializer(
3943         llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
3944                                                              CGM.VoidPtrTy));
3945   }
3946   CGOpenMPRuntime::clear();
3947 }
3948 
3949 llvm::Value *CGOpenMPRuntimeGPU::getGPUNumThreads(CodeGenFunction &CGF) {
3950   CGBuilderTy &Bld = CGF.Builder;
3951   llvm::Module *M = &CGF.CGM.getModule();
3952   const char *LocSize = "__kmpc_get_hardware_num_threads_in_block";
3953   llvm::Function *F = M->getFunction(LocSize);
3954   if (!F) {
3955     F = llvm::Function::Create(
3956         llvm::FunctionType::get(CGF.Int32Ty, llvm::None, false),
3957         llvm::GlobalVariable::ExternalLinkage, LocSize, &CGF.CGM.getModule());
3958   }
3959   return Bld.CreateCall(F, llvm::None, "nvptx_num_threads");
3960 }
3961 
3962 llvm::Value *CGOpenMPRuntimeGPU::getGPUThreadID(CodeGenFunction &CGF) {
3963   ArrayRef<llvm::Value *> Args{};
3964   return CGF.EmitRuntimeCall(
3965       OMPBuilder.getOrCreateRuntimeFunction(
3966           CGM.getModule(), OMPRTL___kmpc_get_hardware_thread_id_in_block),
3967       Args);
3968 }
3969 
3970 llvm::Value *CGOpenMPRuntimeGPU::getGPUWarpSize(CodeGenFunction &CGF) {
3971   ArrayRef<llvm::Value *> Args{};
3972   return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
3973                                  CGM.getModule(), OMPRTL___kmpc_get_warp_size),
3974                              Args);
3975 }
3976