1 //===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This provides a generalized class for OpenMP runtime code generation 10 // specialized by GPU targets NVPTX and AMDGCN. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGOpenMPRuntimeGPU.h" 15 #include "CodeGenFunction.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/DeclOpenMP.h" 18 #include "clang/AST/StmtOpenMP.h" 19 #include "clang/AST/StmtVisitor.h" 20 #include "clang/Basic/Cuda.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/Frontend/OpenMP/OMPGridValues.h" 23 #include "llvm/Support/MathExtras.h" 24 25 using namespace clang; 26 using namespace CodeGen; 27 using namespace llvm::omp; 28 29 namespace { 30 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX. 31 class NVPTXActionTy final : public PrePostActionTy { 32 llvm::FunctionCallee EnterCallee = nullptr; 33 ArrayRef<llvm::Value *> EnterArgs; 34 llvm::FunctionCallee ExitCallee = nullptr; 35 ArrayRef<llvm::Value *> ExitArgs; 36 bool Conditional = false; 37 llvm::BasicBlock *ContBlock = nullptr; 38 39 public: 40 NVPTXActionTy(llvm::FunctionCallee EnterCallee, 41 ArrayRef<llvm::Value *> EnterArgs, 42 llvm::FunctionCallee ExitCallee, 43 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false) 44 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee), 45 ExitArgs(ExitArgs), Conditional(Conditional) {} 46 void Enter(CodeGenFunction &CGF) override { 47 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs); 48 if (Conditional) { 49 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes); 50 auto *ThenBlock = CGF.createBasicBlock("omp_if.then"); 51 ContBlock = CGF.createBasicBlock("omp_if.end"); 52 // Generate the branch (If-stmt) 53 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock); 54 CGF.EmitBlock(ThenBlock); 55 } 56 } 57 void Done(CodeGenFunction &CGF) { 58 // Emit the rest of blocks/branches 59 CGF.EmitBranch(ContBlock); 60 CGF.EmitBlock(ContBlock, true); 61 } 62 void Exit(CodeGenFunction &CGF) override { 63 CGF.EmitRuntimeCall(ExitCallee, ExitArgs); 64 } 65 }; 66 67 /// A class to track the execution mode when codegening directives within 68 /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry 69 /// to the target region and used by containing directives such as 'parallel' 70 /// to emit optimized code. 71 class ExecutionRuntimeModesRAII { 72 private: 73 CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode = 74 CGOpenMPRuntimeGPU::EM_Unknown; 75 CGOpenMPRuntimeGPU::ExecutionMode &ExecMode; 76 bool SavedRuntimeMode = false; 77 bool *RuntimeMode = nullptr; 78 79 public: 80 /// Constructor for Non-SPMD mode. 81 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode) 82 : ExecMode(ExecMode) { 83 SavedExecMode = ExecMode; 84 ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD; 85 } 86 /// Constructor for SPMD mode. 87 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode, 88 bool &RuntimeMode, bool FullRuntimeMode) 89 : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) { 90 SavedExecMode = ExecMode; 91 SavedRuntimeMode = RuntimeMode; 92 ExecMode = CGOpenMPRuntimeGPU::EM_SPMD; 93 RuntimeMode = FullRuntimeMode; 94 } 95 ~ExecutionRuntimeModesRAII() { 96 ExecMode = SavedExecMode; 97 if (RuntimeMode) 98 *RuntimeMode = SavedRuntimeMode; 99 } 100 }; 101 102 /// GPU Configuration: This information can be derived from cuda registers, 103 /// however, providing compile time constants helps generate more efficient 104 /// code. For all practical purposes this is fine because the configuration 105 /// is the same for all known NVPTX architectures. 106 enum MachineConfiguration : unsigned { 107 /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target 108 /// specific Grid Values like GV_Warp_Size, GV_Slot_Size 109 110 /// Global memory alignment for performance. 111 GlobalMemoryAlignment = 128, 112 113 /// Maximal size of the shared memory buffer. 114 SharedMemorySize = 128, 115 }; 116 117 static const ValueDecl *getPrivateItem(const Expr *RefExpr) { 118 RefExpr = RefExpr->IgnoreParens(); 119 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) { 120 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts(); 121 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) 122 Base = TempASE->getBase()->IgnoreParenImpCasts(); 123 RefExpr = Base; 124 } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) { 125 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts(); 126 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base)) 127 Base = TempOASE->getBase()->IgnoreParenImpCasts(); 128 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) 129 Base = TempASE->getBase()->IgnoreParenImpCasts(); 130 RefExpr = Base; 131 } 132 RefExpr = RefExpr->IgnoreParenImpCasts(); 133 if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr)) 134 return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl()); 135 const auto *ME = cast<MemberExpr>(RefExpr); 136 return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl()); 137 } 138 139 140 static RecordDecl *buildRecordForGlobalizedVars( 141 ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls, 142 ArrayRef<const ValueDecl *> EscapedDeclsForTeams, 143 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 144 &MappedDeclsFields, int BufSize) { 145 using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>; 146 if (EscapedDecls.empty() && EscapedDeclsForTeams.empty()) 147 return nullptr; 148 SmallVector<VarsDataTy, 4> GlobalizedVars; 149 for (const ValueDecl *D : EscapedDecls) 150 GlobalizedVars.emplace_back( 151 CharUnits::fromQuantity(std::max( 152 C.getDeclAlign(D).getQuantity(), 153 static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))), 154 D); 155 for (const ValueDecl *D : EscapedDeclsForTeams) 156 GlobalizedVars.emplace_back(C.getDeclAlign(D), D); 157 llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) { 158 return L.first > R.first; 159 }); 160 161 // Build struct _globalized_locals_ty { 162 // /* globalized vars */[WarSize] align (max(decl_align, 163 // GlobalMemoryAlignment)) 164 // /* globalized vars */ for EscapedDeclsForTeams 165 // }; 166 RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty"); 167 GlobalizedRD->startDefinition(); 168 llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped( 169 EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end()); 170 for (const auto &Pair : GlobalizedVars) { 171 const ValueDecl *VD = Pair.second; 172 QualType Type = VD->getType(); 173 if (Type->isLValueReferenceType()) 174 Type = C.getPointerType(Type.getNonReferenceType()); 175 else 176 Type = Type.getNonReferenceType(); 177 SourceLocation Loc = VD->getLocation(); 178 FieldDecl *Field; 179 if (SingleEscaped.count(VD)) { 180 Field = FieldDecl::Create( 181 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type, 182 C.getTrivialTypeSourceInfo(Type, SourceLocation()), 183 /*BW=*/nullptr, /*Mutable=*/false, 184 /*InitStyle=*/ICIS_NoInit); 185 Field->setAccess(AS_public); 186 if (VD->hasAttrs()) { 187 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()), 188 E(VD->getAttrs().end()); 189 I != E; ++I) 190 Field->addAttr(*I); 191 } 192 } else { 193 llvm::APInt ArraySize(32, BufSize); 194 Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal, 195 0); 196 Field = FieldDecl::Create( 197 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type, 198 C.getTrivialTypeSourceInfo(Type, SourceLocation()), 199 /*BW=*/nullptr, /*Mutable=*/false, 200 /*InitStyle=*/ICIS_NoInit); 201 Field->setAccess(AS_public); 202 llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(), 203 static_cast<CharUnits::QuantityType>( 204 GlobalMemoryAlignment))); 205 Field->addAttr(AlignedAttr::CreateImplicit( 206 C, /*IsAlignmentExpr=*/true, 207 IntegerLiteral::Create(C, Align, 208 C.getIntTypeForBitwidth(32, /*Signed=*/0), 209 SourceLocation()), 210 {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned)); 211 } 212 GlobalizedRD->addDecl(Field); 213 MappedDeclsFields.try_emplace(VD, Field); 214 } 215 GlobalizedRD->completeDefinition(); 216 return GlobalizedRD; 217 } 218 219 /// Get the list of variables that can escape their declaration context. 220 class CheckVarsEscapingDeclContext final 221 : public ConstStmtVisitor<CheckVarsEscapingDeclContext> { 222 CodeGenFunction &CGF; 223 llvm::SetVector<const ValueDecl *> EscapedDecls; 224 llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls; 225 llvm::SmallPtrSet<const Decl *, 4> EscapedParameters; 226 RecordDecl *GlobalizedRD = nullptr; 227 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields; 228 bool AllEscaped = false; 229 bool IsForCombinedParallelRegion = false; 230 231 void markAsEscaped(const ValueDecl *VD) { 232 // Do not globalize declare target variables. 233 if (!isa<VarDecl>(VD) || 234 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 235 return; 236 VD = cast<ValueDecl>(VD->getCanonicalDecl()); 237 // Use user-specified allocation. 238 if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>()) 239 return; 240 // Variables captured by value must be globalized. 241 if (auto *CSI = CGF.CapturedStmtInfo) { 242 if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) { 243 // Check if need to capture the variable that was already captured by 244 // value in the outer region. 245 if (!IsForCombinedParallelRegion) { 246 if (!FD->hasAttrs()) 247 return; 248 const auto *Attr = FD->getAttr<OMPCaptureKindAttr>(); 249 if (!Attr) 250 return; 251 if (((Attr->getCaptureKind() != OMPC_map) && 252 !isOpenMPPrivate(Attr->getCaptureKind())) || 253 ((Attr->getCaptureKind() == OMPC_map) && 254 !FD->getType()->isAnyPointerType())) 255 return; 256 } 257 if (!FD->getType()->isReferenceType()) { 258 assert(!VD->getType()->isVariablyModifiedType() && 259 "Parameter captured by value with variably modified type"); 260 EscapedParameters.insert(VD); 261 } else if (!IsForCombinedParallelRegion) { 262 return; 263 } 264 } 265 } 266 if ((!CGF.CapturedStmtInfo || 267 (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) && 268 VD->getType()->isReferenceType()) 269 // Do not globalize variables with reference type. 270 return; 271 if (VD->getType()->isVariablyModifiedType()) 272 EscapedVariableLengthDecls.insert(VD); 273 else 274 EscapedDecls.insert(VD); 275 } 276 277 void VisitValueDecl(const ValueDecl *VD) { 278 if (VD->getType()->isLValueReferenceType()) 279 markAsEscaped(VD); 280 if (const auto *VarD = dyn_cast<VarDecl>(VD)) { 281 if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) { 282 const bool SavedAllEscaped = AllEscaped; 283 AllEscaped = VD->getType()->isLValueReferenceType(); 284 Visit(VarD->getInit()); 285 AllEscaped = SavedAllEscaped; 286 } 287 } 288 } 289 void VisitOpenMPCapturedStmt(const CapturedStmt *S, 290 ArrayRef<OMPClause *> Clauses, 291 bool IsCombinedParallelRegion) { 292 if (!S) 293 return; 294 for (const CapturedStmt::Capture &C : S->captures()) { 295 if (C.capturesVariable() && !C.capturesVariableByCopy()) { 296 const ValueDecl *VD = C.getCapturedVar(); 297 bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion; 298 if (IsCombinedParallelRegion) { 299 // Check if the variable is privatized in the combined construct and 300 // those private copies must be shared in the inner parallel 301 // directive. 302 IsForCombinedParallelRegion = false; 303 for (const OMPClause *C : Clauses) { 304 if (!isOpenMPPrivate(C->getClauseKind()) || 305 C->getClauseKind() == OMPC_reduction || 306 C->getClauseKind() == OMPC_linear || 307 C->getClauseKind() == OMPC_private) 308 continue; 309 ArrayRef<const Expr *> Vars; 310 if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C)) 311 Vars = PC->getVarRefs(); 312 else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C)) 313 Vars = PC->getVarRefs(); 314 else 315 llvm_unreachable("Unexpected clause."); 316 for (const auto *E : Vars) { 317 const Decl *D = 318 cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl(); 319 if (D == VD->getCanonicalDecl()) { 320 IsForCombinedParallelRegion = true; 321 break; 322 } 323 } 324 if (IsForCombinedParallelRegion) 325 break; 326 } 327 } 328 markAsEscaped(VD); 329 if (isa<OMPCapturedExprDecl>(VD)) 330 VisitValueDecl(VD); 331 IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion; 332 } 333 } 334 } 335 336 void buildRecordForGlobalizedVars(bool IsInTTDRegion) { 337 assert(!GlobalizedRD && 338 "Record for globalized variables is built already."); 339 ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams; 340 unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size; 341 if (IsInTTDRegion) 342 EscapedDeclsForTeams = EscapedDecls.getArrayRef(); 343 else 344 EscapedDeclsForParallel = EscapedDecls.getArrayRef(); 345 GlobalizedRD = ::buildRecordForGlobalizedVars( 346 CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams, 347 MappedDeclsFields, WarpSize); 348 } 349 350 public: 351 CheckVarsEscapingDeclContext(CodeGenFunction &CGF, 352 ArrayRef<const ValueDecl *> TeamsReductions) 353 : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) { 354 } 355 virtual ~CheckVarsEscapingDeclContext() = default; 356 void VisitDeclStmt(const DeclStmt *S) { 357 if (!S) 358 return; 359 for (const Decl *D : S->decls()) 360 if (const auto *VD = dyn_cast_or_null<ValueDecl>(D)) 361 VisitValueDecl(VD); 362 } 363 void VisitOMPExecutableDirective(const OMPExecutableDirective *D) { 364 if (!D) 365 return; 366 if (!D->hasAssociatedStmt()) 367 return; 368 if (const auto *S = 369 dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) { 370 // Do not analyze directives that do not actually require capturing, 371 // like `omp for` or `omp simd` directives. 372 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 373 getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind()); 374 if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) { 375 VisitStmt(S->getCapturedStmt()); 376 return; 377 } 378 VisitOpenMPCapturedStmt( 379 S, D->clauses(), 380 CaptureRegions.back() == OMPD_parallel && 381 isOpenMPDistributeDirective(D->getDirectiveKind())); 382 } 383 } 384 void VisitCapturedStmt(const CapturedStmt *S) { 385 if (!S) 386 return; 387 for (const CapturedStmt::Capture &C : S->captures()) { 388 if (C.capturesVariable() && !C.capturesVariableByCopy()) { 389 const ValueDecl *VD = C.getCapturedVar(); 390 markAsEscaped(VD); 391 if (isa<OMPCapturedExprDecl>(VD)) 392 VisitValueDecl(VD); 393 } 394 } 395 } 396 void VisitLambdaExpr(const LambdaExpr *E) { 397 if (!E) 398 return; 399 for (const LambdaCapture &C : E->captures()) { 400 if (C.capturesVariable()) { 401 if (C.getCaptureKind() == LCK_ByRef) { 402 const ValueDecl *VD = C.getCapturedVar(); 403 markAsEscaped(VD); 404 if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD)) 405 VisitValueDecl(VD); 406 } 407 } 408 } 409 } 410 void VisitBlockExpr(const BlockExpr *E) { 411 if (!E) 412 return; 413 for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) { 414 if (C.isByRef()) { 415 const VarDecl *VD = C.getVariable(); 416 markAsEscaped(VD); 417 if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture()) 418 VisitValueDecl(VD); 419 } 420 } 421 } 422 void VisitCallExpr(const CallExpr *E) { 423 if (!E) 424 return; 425 for (const Expr *Arg : E->arguments()) { 426 if (!Arg) 427 continue; 428 if (Arg->isLValue()) { 429 const bool SavedAllEscaped = AllEscaped; 430 AllEscaped = true; 431 Visit(Arg); 432 AllEscaped = SavedAllEscaped; 433 } else { 434 Visit(Arg); 435 } 436 } 437 Visit(E->getCallee()); 438 } 439 void VisitDeclRefExpr(const DeclRefExpr *E) { 440 if (!E) 441 return; 442 const ValueDecl *VD = E->getDecl(); 443 if (AllEscaped) 444 markAsEscaped(VD); 445 if (isa<OMPCapturedExprDecl>(VD)) 446 VisitValueDecl(VD); 447 else if (const auto *VarD = dyn_cast<VarDecl>(VD)) 448 if (VarD->isInitCapture()) 449 VisitValueDecl(VD); 450 } 451 void VisitUnaryOperator(const UnaryOperator *E) { 452 if (!E) 453 return; 454 if (E->getOpcode() == UO_AddrOf) { 455 const bool SavedAllEscaped = AllEscaped; 456 AllEscaped = true; 457 Visit(E->getSubExpr()); 458 AllEscaped = SavedAllEscaped; 459 } else { 460 Visit(E->getSubExpr()); 461 } 462 } 463 void VisitImplicitCastExpr(const ImplicitCastExpr *E) { 464 if (!E) 465 return; 466 if (E->getCastKind() == CK_ArrayToPointerDecay) { 467 const bool SavedAllEscaped = AllEscaped; 468 AllEscaped = true; 469 Visit(E->getSubExpr()); 470 AllEscaped = SavedAllEscaped; 471 } else { 472 Visit(E->getSubExpr()); 473 } 474 } 475 void VisitExpr(const Expr *E) { 476 if (!E) 477 return; 478 bool SavedAllEscaped = AllEscaped; 479 if (!E->isLValue()) 480 AllEscaped = false; 481 for (const Stmt *Child : E->children()) 482 if (Child) 483 Visit(Child); 484 AllEscaped = SavedAllEscaped; 485 } 486 void VisitStmt(const Stmt *S) { 487 if (!S) 488 return; 489 for (const Stmt *Child : S->children()) 490 if (Child) 491 Visit(Child); 492 } 493 494 /// Returns the record that handles all the escaped local variables and used 495 /// instead of their original storage. 496 const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) { 497 if (!GlobalizedRD) 498 buildRecordForGlobalizedVars(IsInTTDRegion); 499 return GlobalizedRD; 500 } 501 502 /// Returns the field in the globalized record for the escaped variable. 503 const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const { 504 assert(GlobalizedRD && 505 "Record for globalized variables must be generated already."); 506 auto I = MappedDeclsFields.find(VD); 507 if (I == MappedDeclsFields.end()) 508 return nullptr; 509 return I->getSecond(); 510 } 511 512 /// Returns the list of the escaped local variables/parameters. 513 ArrayRef<const ValueDecl *> getEscapedDecls() const { 514 return EscapedDecls.getArrayRef(); 515 } 516 517 /// Checks if the escaped local variable is actually a parameter passed by 518 /// value. 519 const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const { 520 return EscapedParameters; 521 } 522 523 /// Returns the list of the escaped variables with the variably modified 524 /// types. 525 ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const { 526 return EscapedVariableLengthDecls.getArrayRef(); 527 } 528 }; 529 } // anonymous namespace 530 531 /// Get the id of the warp in the block. 532 /// We assume that the warp size is 32, which is always the case 533 /// on the NVPTX device, to generate more efficient code. 534 static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) { 535 CGBuilderTy &Bld = CGF.Builder; 536 unsigned LaneIDBits = 537 llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size); 538 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 539 return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id"); 540 } 541 542 /// Get the id of the current lane in the Warp. 543 /// We assume that the warp size is 32, which is always the case 544 /// on the NVPTX device, to generate more efficient code. 545 static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) { 546 CGBuilderTy &Bld = CGF.Builder; 547 unsigned LaneIDBits = 548 llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size); 549 unsigned LaneIDMask = ~0u >> (32u - LaneIDBits); 550 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 551 return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask), 552 "nvptx_lane_id"); 553 } 554 555 CGOpenMPRuntimeGPU::ExecutionMode 556 CGOpenMPRuntimeGPU::getExecutionMode() const { 557 return CurrentExecutionMode; 558 } 559 560 static CGOpenMPRuntimeGPU::DataSharingMode 561 getDataSharingMode(CodeGenModule &CGM) { 562 return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA 563 : CGOpenMPRuntimeGPU::Generic; 564 } 565 566 /// Check for inner (nested) SPMD construct, if any 567 static bool hasNestedSPMDDirective(ASTContext &Ctx, 568 const OMPExecutableDirective &D) { 569 const auto *CS = D.getInnermostCapturedStmt(); 570 const auto *Body = 571 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 572 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 573 574 if (const auto *NestedDir = 575 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 576 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind(); 577 switch (D.getDirectiveKind()) { 578 case OMPD_target: 579 if (isOpenMPParallelDirective(DKind)) 580 return true; 581 if (DKind == OMPD_teams) { 582 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 583 /*IgnoreCaptured=*/true); 584 if (!Body) 585 return false; 586 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 587 if (const auto *NND = 588 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 589 DKind = NND->getDirectiveKind(); 590 if (isOpenMPParallelDirective(DKind)) 591 return true; 592 } 593 } 594 return false; 595 case OMPD_target_teams: 596 return isOpenMPParallelDirective(DKind); 597 case OMPD_target_simd: 598 case OMPD_target_parallel: 599 case OMPD_target_parallel_for: 600 case OMPD_target_parallel_for_simd: 601 case OMPD_target_teams_distribute: 602 case OMPD_target_teams_distribute_simd: 603 case OMPD_target_teams_distribute_parallel_for: 604 case OMPD_target_teams_distribute_parallel_for_simd: 605 case OMPD_parallel: 606 case OMPD_for: 607 case OMPD_parallel_for: 608 case OMPD_parallel_master: 609 case OMPD_parallel_sections: 610 case OMPD_for_simd: 611 case OMPD_parallel_for_simd: 612 case OMPD_cancel: 613 case OMPD_cancellation_point: 614 case OMPD_ordered: 615 case OMPD_threadprivate: 616 case OMPD_allocate: 617 case OMPD_task: 618 case OMPD_simd: 619 case OMPD_sections: 620 case OMPD_section: 621 case OMPD_single: 622 case OMPD_master: 623 case OMPD_critical: 624 case OMPD_taskyield: 625 case OMPD_barrier: 626 case OMPD_taskwait: 627 case OMPD_taskgroup: 628 case OMPD_atomic: 629 case OMPD_flush: 630 case OMPD_depobj: 631 case OMPD_scan: 632 case OMPD_teams: 633 case OMPD_target_data: 634 case OMPD_target_exit_data: 635 case OMPD_target_enter_data: 636 case OMPD_distribute: 637 case OMPD_distribute_simd: 638 case OMPD_distribute_parallel_for: 639 case OMPD_distribute_parallel_for_simd: 640 case OMPD_teams_distribute: 641 case OMPD_teams_distribute_simd: 642 case OMPD_teams_distribute_parallel_for: 643 case OMPD_teams_distribute_parallel_for_simd: 644 case OMPD_target_update: 645 case OMPD_declare_simd: 646 case OMPD_declare_variant: 647 case OMPD_begin_declare_variant: 648 case OMPD_end_declare_variant: 649 case OMPD_declare_target: 650 case OMPD_end_declare_target: 651 case OMPD_declare_reduction: 652 case OMPD_declare_mapper: 653 case OMPD_taskloop: 654 case OMPD_taskloop_simd: 655 case OMPD_master_taskloop: 656 case OMPD_master_taskloop_simd: 657 case OMPD_parallel_master_taskloop: 658 case OMPD_parallel_master_taskloop_simd: 659 case OMPD_requires: 660 case OMPD_unknown: 661 default: 662 llvm_unreachable("Unexpected directive."); 663 } 664 } 665 666 return false; 667 } 668 669 static bool supportsSPMDExecutionMode(ASTContext &Ctx, 670 const OMPExecutableDirective &D) { 671 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); 672 switch (DirectiveKind) { 673 case OMPD_target: 674 case OMPD_target_teams: 675 return hasNestedSPMDDirective(Ctx, D); 676 case OMPD_target_parallel: 677 case OMPD_target_parallel_for: 678 case OMPD_target_parallel_for_simd: 679 case OMPD_target_teams_distribute_parallel_for: 680 case OMPD_target_teams_distribute_parallel_for_simd: 681 case OMPD_target_simd: 682 case OMPD_target_teams_distribute_simd: 683 return true; 684 case OMPD_target_teams_distribute: 685 return false; 686 case OMPD_parallel: 687 case OMPD_for: 688 case OMPD_parallel_for: 689 case OMPD_parallel_master: 690 case OMPD_parallel_sections: 691 case OMPD_for_simd: 692 case OMPD_parallel_for_simd: 693 case OMPD_cancel: 694 case OMPD_cancellation_point: 695 case OMPD_ordered: 696 case OMPD_threadprivate: 697 case OMPD_allocate: 698 case OMPD_task: 699 case OMPD_simd: 700 case OMPD_sections: 701 case OMPD_section: 702 case OMPD_single: 703 case OMPD_master: 704 case OMPD_critical: 705 case OMPD_taskyield: 706 case OMPD_barrier: 707 case OMPD_taskwait: 708 case OMPD_taskgroup: 709 case OMPD_atomic: 710 case OMPD_flush: 711 case OMPD_depobj: 712 case OMPD_scan: 713 case OMPD_teams: 714 case OMPD_target_data: 715 case OMPD_target_exit_data: 716 case OMPD_target_enter_data: 717 case OMPD_distribute: 718 case OMPD_distribute_simd: 719 case OMPD_distribute_parallel_for: 720 case OMPD_distribute_parallel_for_simd: 721 case OMPD_teams_distribute: 722 case OMPD_teams_distribute_simd: 723 case OMPD_teams_distribute_parallel_for: 724 case OMPD_teams_distribute_parallel_for_simd: 725 case OMPD_target_update: 726 case OMPD_declare_simd: 727 case OMPD_declare_variant: 728 case OMPD_begin_declare_variant: 729 case OMPD_end_declare_variant: 730 case OMPD_declare_target: 731 case OMPD_end_declare_target: 732 case OMPD_declare_reduction: 733 case OMPD_declare_mapper: 734 case OMPD_taskloop: 735 case OMPD_taskloop_simd: 736 case OMPD_master_taskloop: 737 case OMPD_master_taskloop_simd: 738 case OMPD_parallel_master_taskloop: 739 case OMPD_parallel_master_taskloop_simd: 740 case OMPD_requires: 741 case OMPD_unknown: 742 default: 743 break; 744 } 745 llvm_unreachable( 746 "Unknown programming model for OpenMP directive on NVPTX target."); 747 } 748 749 /// Check if the directive is loops based and has schedule clause at all or has 750 /// static scheduling. 751 static bool hasStaticScheduling(const OMPExecutableDirective &D) { 752 assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) && 753 isOpenMPLoopDirective(D.getDirectiveKind()) && 754 "Expected loop-based directive."); 755 return !D.hasClausesOfKind<OMPOrderedClause>() && 756 (!D.hasClausesOfKind<OMPScheduleClause>() || 757 llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(), 758 [](const OMPScheduleClause *C) { 759 return C->getScheduleKind() == OMPC_SCHEDULE_static; 760 })); 761 } 762 763 /// Check for inner (nested) lightweight runtime construct, if any 764 static bool hasNestedLightweightDirective(ASTContext &Ctx, 765 const OMPExecutableDirective &D) { 766 assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive."); 767 const auto *CS = D.getInnermostCapturedStmt(); 768 const auto *Body = 769 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 770 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 771 772 if (const auto *NestedDir = 773 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 774 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind(); 775 switch (D.getDirectiveKind()) { 776 case OMPD_target: 777 if (isOpenMPParallelDirective(DKind) && 778 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) && 779 hasStaticScheduling(*NestedDir)) 780 return true; 781 if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd) 782 return true; 783 if (DKind == OMPD_parallel) { 784 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 785 /*IgnoreCaptured=*/true); 786 if (!Body) 787 return false; 788 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 789 if (const auto *NND = 790 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 791 DKind = NND->getDirectiveKind(); 792 if (isOpenMPWorksharingDirective(DKind) && 793 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 794 return true; 795 } 796 } else if (DKind == OMPD_teams) { 797 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 798 /*IgnoreCaptured=*/true); 799 if (!Body) 800 return false; 801 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 802 if (const auto *NND = 803 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 804 DKind = NND->getDirectiveKind(); 805 if (isOpenMPParallelDirective(DKind) && 806 isOpenMPWorksharingDirective(DKind) && 807 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 808 return true; 809 if (DKind == OMPD_parallel) { 810 Body = NND->getInnermostCapturedStmt()->IgnoreContainers( 811 /*IgnoreCaptured=*/true); 812 if (!Body) 813 return false; 814 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 815 if (const auto *NND = 816 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 817 DKind = NND->getDirectiveKind(); 818 if (isOpenMPWorksharingDirective(DKind) && 819 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 820 return true; 821 } 822 } 823 } 824 } 825 return false; 826 case OMPD_target_teams: 827 if (isOpenMPParallelDirective(DKind) && 828 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) && 829 hasStaticScheduling(*NestedDir)) 830 return true; 831 if (DKind == OMPD_distribute_simd || DKind == OMPD_simd) 832 return true; 833 if (DKind == OMPD_parallel) { 834 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 835 /*IgnoreCaptured=*/true); 836 if (!Body) 837 return false; 838 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 839 if (const auto *NND = 840 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 841 DKind = NND->getDirectiveKind(); 842 if (isOpenMPWorksharingDirective(DKind) && 843 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 844 return true; 845 } 846 } 847 return false; 848 case OMPD_target_parallel: 849 if (DKind == OMPD_simd) 850 return true; 851 return isOpenMPWorksharingDirective(DKind) && 852 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir); 853 case OMPD_target_teams_distribute: 854 case OMPD_target_simd: 855 case OMPD_target_parallel_for: 856 case OMPD_target_parallel_for_simd: 857 case OMPD_target_teams_distribute_simd: 858 case OMPD_target_teams_distribute_parallel_for: 859 case OMPD_target_teams_distribute_parallel_for_simd: 860 case OMPD_parallel: 861 case OMPD_for: 862 case OMPD_parallel_for: 863 case OMPD_parallel_master: 864 case OMPD_parallel_sections: 865 case OMPD_for_simd: 866 case OMPD_parallel_for_simd: 867 case OMPD_cancel: 868 case OMPD_cancellation_point: 869 case OMPD_ordered: 870 case OMPD_threadprivate: 871 case OMPD_allocate: 872 case OMPD_task: 873 case OMPD_simd: 874 case OMPD_sections: 875 case OMPD_section: 876 case OMPD_single: 877 case OMPD_master: 878 case OMPD_critical: 879 case OMPD_taskyield: 880 case OMPD_barrier: 881 case OMPD_taskwait: 882 case OMPD_taskgroup: 883 case OMPD_atomic: 884 case OMPD_flush: 885 case OMPD_depobj: 886 case OMPD_scan: 887 case OMPD_teams: 888 case OMPD_target_data: 889 case OMPD_target_exit_data: 890 case OMPD_target_enter_data: 891 case OMPD_distribute: 892 case OMPD_distribute_simd: 893 case OMPD_distribute_parallel_for: 894 case OMPD_distribute_parallel_for_simd: 895 case OMPD_teams_distribute: 896 case OMPD_teams_distribute_simd: 897 case OMPD_teams_distribute_parallel_for: 898 case OMPD_teams_distribute_parallel_for_simd: 899 case OMPD_target_update: 900 case OMPD_declare_simd: 901 case OMPD_declare_variant: 902 case OMPD_begin_declare_variant: 903 case OMPD_end_declare_variant: 904 case OMPD_declare_target: 905 case OMPD_end_declare_target: 906 case OMPD_declare_reduction: 907 case OMPD_declare_mapper: 908 case OMPD_taskloop: 909 case OMPD_taskloop_simd: 910 case OMPD_master_taskloop: 911 case OMPD_master_taskloop_simd: 912 case OMPD_parallel_master_taskloop: 913 case OMPD_parallel_master_taskloop_simd: 914 case OMPD_requires: 915 case OMPD_unknown: 916 default: 917 llvm_unreachable("Unexpected directive."); 918 } 919 } 920 921 return false; 922 } 923 924 /// Checks if the construct supports lightweight runtime. It must be SPMD 925 /// construct + inner loop-based construct with static scheduling. 926 static bool supportsLightweightRuntime(ASTContext &Ctx, 927 const OMPExecutableDirective &D) { 928 if (!supportsSPMDExecutionMode(Ctx, D)) 929 return false; 930 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); 931 switch (DirectiveKind) { 932 case OMPD_target: 933 case OMPD_target_teams: 934 case OMPD_target_parallel: 935 return hasNestedLightweightDirective(Ctx, D); 936 case OMPD_target_parallel_for: 937 case OMPD_target_parallel_for_simd: 938 case OMPD_target_teams_distribute_parallel_for: 939 case OMPD_target_teams_distribute_parallel_for_simd: 940 // (Last|First)-privates must be shared in parallel region. 941 return hasStaticScheduling(D); 942 case OMPD_target_simd: 943 case OMPD_target_teams_distribute_simd: 944 return true; 945 case OMPD_target_teams_distribute: 946 return false; 947 case OMPD_parallel: 948 case OMPD_for: 949 case OMPD_parallel_for: 950 case OMPD_parallel_master: 951 case OMPD_parallel_sections: 952 case OMPD_for_simd: 953 case OMPD_parallel_for_simd: 954 case OMPD_cancel: 955 case OMPD_cancellation_point: 956 case OMPD_ordered: 957 case OMPD_threadprivate: 958 case OMPD_allocate: 959 case OMPD_task: 960 case OMPD_simd: 961 case OMPD_sections: 962 case OMPD_section: 963 case OMPD_single: 964 case OMPD_master: 965 case OMPD_critical: 966 case OMPD_taskyield: 967 case OMPD_barrier: 968 case OMPD_taskwait: 969 case OMPD_taskgroup: 970 case OMPD_atomic: 971 case OMPD_flush: 972 case OMPD_depobj: 973 case OMPD_scan: 974 case OMPD_teams: 975 case OMPD_target_data: 976 case OMPD_target_exit_data: 977 case OMPD_target_enter_data: 978 case OMPD_distribute: 979 case OMPD_distribute_simd: 980 case OMPD_distribute_parallel_for: 981 case OMPD_distribute_parallel_for_simd: 982 case OMPD_teams_distribute: 983 case OMPD_teams_distribute_simd: 984 case OMPD_teams_distribute_parallel_for: 985 case OMPD_teams_distribute_parallel_for_simd: 986 case OMPD_target_update: 987 case OMPD_declare_simd: 988 case OMPD_declare_variant: 989 case OMPD_begin_declare_variant: 990 case OMPD_end_declare_variant: 991 case OMPD_declare_target: 992 case OMPD_end_declare_target: 993 case OMPD_declare_reduction: 994 case OMPD_declare_mapper: 995 case OMPD_taskloop: 996 case OMPD_taskloop_simd: 997 case OMPD_master_taskloop: 998 case OMPD_master_taskloop_simd: 999 case OMPD_parallel_master_taskloop: 1000 case OMPD_parallel_master_taskloop_simd: 1001 case OMPD_requires: 1002 case OMPD_unknown: 1003 default: 1004 break; 1005 } 1006 llvm_unreachable( 1007 "Unknown programming model for OpenMP directive on NVPTX target."); 1008 } 1009 1010 void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D, 1011 StringRef ParentName, 1012 llvm::Function *&OutlinedFn, 1013 llvm::Constant *&OutlinedFnID, 1014 bool IsOffloadEntry, 1015 const RegionCodeGenTy &CodeGen) { 1016 ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode); 1017 EntryFunctionState EST; 1018 WrapperFunctionsMap.clear(); 1019 1020 // Emit target region as a standalone region. 1021 class NVPTXPrePostActionTy : public PrePostActionTy { 1022 CGOpenMPRuntimeGPU::EntryFunctionState &EST; 1023 1024 public: 1025 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST) 1026 : EST(EST) {} 1027 void Enter(CodeGenFunction &CGF) override { 1028 auto &RT = 1029 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 1030 RT.emitKernelInit(CGF, EST, /* IsSPMD */ false); 1031 // Skip target region initialization. 1032 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true); 1033 } 1034 void Exit(CodeGenFunction &CGF) override { 1035 auto &RT = 1036 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 1037 RT.clearLocThreadIdInsertPt(CGF); 1038 RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ false); 1039 } 1040 } Action(EST); 1041 CodeGen.setAction(Action); 1042 IsInTTDRegion = true; 1043 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID, 1044 IsOffloadEntry, CodeGen); 1045 IsInTTDRegion = false; 1046 } 1047 1048 void CGOpenMPRuntimeGPU::emitKernelInit(CodeGenFunction &CGF, 1049 EntryFunctionState &EST, bool IsSPMD) { 1050 CGBuilderTy &Bld = CGF.Builder; 1051 Bld.restoreIP(OMPBuilder.createTargetInit(Bld, IsSPMD, requiresFullRuntime())); 1052 IsInTargetMasterThreadRegion = IsSPMD; 1053 if (!IsSPMD) 1054 emitGenericVarsProlog(CGF, EST.Loc); 1055 } 1056 1057 void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF, 1058 EntryFunctionState &EST, 1059 bool IsSPMD) { 1060 if (!IsSPMD) 1061 emitGenericVarsEpilog(CGF); 1062 1063 CGBuilderTy &Bld = CGF.Builder; 1064 OMPBuilder.createTargetDeinit(Bld, IsSPMD, requiresFullRuntime()); 1065 } 1066 1067 void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D, 1068 StringRef ParentName, 1069 llvm::Function *&OutlinedFn, 1070 llvm::Constant *&OutlinedFnID, 1071 bool IsOffloadEntry, 1072 const RegionCodeGenTy &CodeGen) { 1073 ExecutionRuntimeModesRAII ModeRAII( 1074 CurrentExecutionMode, RequiresFullRuntime, 1075 CGM.getLangOpts().OpenMPCUDAForceFullRuntime || 1076 !supportsLightweightRuntime(CGM.getContext(), D)); 1077 EntryFunctionState EST; 1078 1079 // Emit target region as a standalone region. 1080 class NVPTXPrePostActionTy : public PrePostActionTy { 1081 CGOpenMPRuntimeGPU &RT; 1082 CGOpenMPRuntimeGPU::EntryFunctionState &EST; 1083 1084 public: 1085 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT, 1086 CGOpenMPRuntimeGPU::EntryFunctionState &EST) 1087 : RT(RT), EST(EST) {} 1088 void Enter(CodeGenFunction &CGF) override { 1089 RT.emitKernelInit(CGF, EST, /* IsSPMD */ true); 1090 // Skip target region initialization. 1091 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true); 1092 } 1093 void Exit(CodeGenFunction &CGF) override { 1094 RT.clearLocThreadIdInsertPt(CGF); 1095 RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ true); 1096 } 1097 } Action(*this, EST); 1098 CodeGen.setAction(Action); 1099 IsInTTDRegion = true; 1100 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID, 1101 IsOffloadEntry, CodeGen); 1102 IsInTTDRegion = false; 1103 } 1104 1105 // Create a unique global variable to indicate the execution mode of this target 1106 // region. The execution mode is either 'generic', or 'spmd' depending on the 1107 // target directive. This variable is picked up by the offload library to setup 1108 // the device appropriately before kernel launch. If the execution mode is 1109 // 'generic', the runtime reserves one warp for the master, otherwise, all 1110 // warps participate in parallel work. 1111 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name, 1112 bool Mode) { 1113 auto *GVMode = new llvm::GlobalVariable( 1114 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true, 1115 llvm::GlobalValue::WeakAnyLinkage, 1116 llvm::ConstantInt::get(CGM.Int8Ty, Mode ? OMP_TGT_EXEC_MODE_SPMD 1117 : OMP_TGT_EXEC_MODE_GENERIC), 1118 Twine(Name, "_exec_mode")); 1119 CGM.addCompilerUsedGlobal(GVMode); 1120 } 1121 1122 void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID, 1123 llvm::Constant *Addr, 1124 uint64_t Size, int32_t, 1125 llvm::GlobalValue::LinkageTypes) { 1126 // TODO: Add support for global variables on the device after declare target 1127 // support. 1128 llvm::Function *Fn = dyn_cast<llvm::Function>(Addr); 1129 if (!Fn) 1130 return; 1131 1132 llvm::Module &M = CGM.getModule(); 1133 llvm::LLVMContext &Ctx = CGM.getLLVMContext(); 1134 1135 // Get "nvvm.annotations" metadata node. 1136 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations"); 1137 1138 llvm::Metadata *MDVals[] = { 1139 llvm::ConstantAsMetadata::get(Fn), llvm::MDString::get(Ctx, "kernel"), 1140 llvm::ConstantAsMetadata::get( 1141 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))}; 1142 // Append metadata to nvvm.annotations. 1143 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 1144 1145 // Add a function attribute for the kernel. 1146 Fn->addFnAttr(llvm::Attribute::get(Ctx, "kernel")); 1147 } 1148 1149 void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction( 1150 const OMPExecutableDirective &D, StringRef ParentName, 1151 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, 1152 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) { 1153 if (!IsOffloadEntry) // Nothing to do. 1154 return; 1155 1156 assert(!ParentName.empty() && "Invalid target region parent name!"); 1157 1158 bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D); 1159 if (Mode) 1160 emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, 1161 CodeGen); 1162 else 1163 emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, 1164 CodeGen); 1165 1166 setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode); 1167 } 1168 1169 namespace { 1170 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE(); 1171 /// Enum for accesseing the reserved_2 field of the ident_t struct. 1172 enum ModeFlagsTy : unsigned { 1173 /// Bit set to 1 when in SPMD mode. 1174 KMP_IDENT_SPMD_MODE = 0x01, 1175 /// Bit set to 1 when a simplified runtime is used. 1176 KMP_IDENT_SIMPLE_RT_MODE = 0x02, 1177 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE) 1178 }; 1179 1180 /// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime. 1181 static const ModeFlagsTy UndefinedMode = 1182 (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE; 1183 } // anonymous namespace 1184 1185 unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const { 1186 switch (getExecutionMode()) { 1187 case EM_SPMD: 1188 if (requiresFullRuntime()) 1189 return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE); 1190 return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE; 1191 case EM_NonSPMD: 1192 assert(requiresFullRuntime() && "Expected full runtime."); 1193 return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE); 1194 case EM_Unknown: 1195 return UndefinedMode; 1196 } 1197 llvm_unreachable("Unknown flags are requested."); 1198 } 1199 1200 CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM) 1201 : CGOpenMPRuntime(CGM, "_", "$") { 1202 if (!CGM.getLangOpts().OpenMPIsDevice) 1203 llvm_unreachable("OpenMP can only handle device code."); 1204 1205 llvm::OpenMPIRBuilder &OMPBuilder = getOMPBuilder(); 1206 if (CGM.getLangOpts().OpenMPTargetNewRuntime && 1207 !CGM.getLangOpts().OMPHostIRFile.empty()) { 1208 OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTargetDebug, 1209 "__omp_rtl_debug_kind"); 1210 OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTeamSubscription, 1211 "__omp_rtl_assume_teams_oversubscription"); 1212 OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPThreadSubscription, 1213 "__omp_rtl_assume_threads_oversubscription"); 1214 } 1215 } 1216 1217 void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF, 1218 ProcBindKind ProcBind, 1219 SourceLocation Loc) { 1220 // Do nothing in case of SPMD mode and L0 parallel. 1221 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) 1222 return; 1223 1224 CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc); 1225 } 1226 1227 void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF, 1228 llvm::Value *NumThreads, 1229 SourceLocation Loc) { 1230 // Nothing to do. 1231 } 1232 1233 void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF, 1234 const Expr *NumTeams, 1235 const Expr *ThreadLimit, 1236 SourceLocation Loc) {} 1237 1238 llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction( 1239 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 1240 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { 1241 // Emit target region as a standalone region. 1242 class NVPTXPrePostActionTy : public PrePostActionTy { 1243 bool &IsInParallelRegion; 1244 bool PrevIsInParallelRegion; 1245 1246 public: 1247 NVPTXPrePostActionTy(bool &IsInParallelRegion) 1248 : IsInParallelRegion(IsInParallelRegion) {} 1249 void Enter(CodeGenFunction &CGF) override { 1250 PrevIsInParallelRegion = IsInParallelRegion; 1251 IsInParallelRegion = true; 1252 } 1253 void Exit(CodeGenFunction &CGF) override { 1254 IsInParallelRegion = PrevIsInParallelRegion; 1255 } 1256 } Action(IsInParallelRegion); 1257 CodeGen.setAction(Action); 1258 bool PrevIsInTTDRegion = IsInTTDRegion; 1259 IsInTTDRegion = false; 1260 bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion; 1261 IsInTargetMasterThreadRegion = false; 1262 auto *OutlinedFun = 1263 cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction( 1264 D, ThreadIDVar, InnermostKind, CodeGen)); 1265 IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion; 1266 IsInTTDRegion = PrevIsInTTDRegion; 1267 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD && 1268 !IsInParallelRegion) { 1269 llvm::Function *WrapperFun = 1270 createParallelDataSharingWrapper(OutlinedFun, D); 1271 WrapperFunctionsMap[OutlinedFun] = WrapperFun; 1272 } 1273 1274 return OutlinedFun; 1275 } 1276 1277 /// Get list of lastprivate variables from the teams distribute ... or 1278 /// teams {distribute ...} directives. 1279 static void 1280 getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D, 1281 llvm::SmallVectorImpl<const ValueDecl *> &Vars) { 1282 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) && 1283 "expected teams directive."); 1284 const OMPExecutableDirective *Dir = &D; 1285 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1286 if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild( 1287 Ctx, 1288 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers( 1289 /*IgnoreCaptured=*/true))) { 1290 Dir = dyn_cast_or_null<OMPExecutableDirective>(S); 1291 if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind())) 1292 Dir = nullptr; 1293 } 1294 } 1295 if (!Dir) 1296 return; 1297 for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) { 1298 for (const Expr *E : C->getVarRefs()) 1299 Vars.push_back(getPrivateItem(E)); 1300 } 1301 } 1302 1303 /// Get list of reduction variables from the teams ... directives. 1304 static void 1305 getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D, 1306 llvm::SmallVectorImpl<const ValueDecl *> &Vars) { 1307 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) && 1308 "expected teams directive."); 1309 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1310 for (const Expr *E : C->privates()) 1311 Vars.push_back(getPrivateItem(E)); 1312 } 1313 } 1314 1315 llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction( 1316 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 1317 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { 1318 SourceLocation Loc = D.getBeginLoc(); 1319 1320 const RecordDecl *GlobalizedRD = nullptr; 1321 llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions; 1322 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields; 1323 unsigned WarpSize = CGM.getTarget().getGridValue().GV_Warp_Size; 1324 // Globalize team reductions variable unconditionally in all modes. 1325 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) 1326 getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions); 1327 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) { 1328 getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions); 1329 if (!LastPrivatesReductions.empty()) { 1330 GlobalizedRD = ::buildRecordForGlobalizedVars( 1331 CGM.getContext(), llvm::None, LastPrivatesReductions, 1332 MappedDeclsFields, WarpSize); 1333 } 1334 } else if (!LastPrivatesReductions.empty()) { 1335 assert(!TeamAndReductions.first && 1336 "Previous team declaration is not expected."); 1337 TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl(); 1338 std::swap(TeamAndReductions.second, LastPrivatesReductions); 1339 } 1340 1341 // Emit target region as a standalone region. 1342 class NVPTXPrePostActionTy : public PrePostActionTy { 1343 SourceLocation &Loc; 1344 const RecordDecl *GlobalizedRD; 1345 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 1346 &MappedDeclsFields; 1347 1348 public: 1349 NVPTXPrePostActionTy( 1350 SourceLocation &Loc, const RecordDecl *GlobalizedRD, 1351 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 1352 &MappedDeclsFields) 1353 : Loc(Loc), GlobalizedRD(GlobalizedRD), 1354 MappedDeclsFields(MappedDeclsFields) {} 1355 void Enter(CodeGenFunction &CGF) override { 1356 auto &Rt = 1357 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 1358 if (GlobalizedRD) { 1359 auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first; 1360 I->getSecond().MappedParams = 1361 std::make_unique<CodeGenFunction::OMPMapVars>(); 1362 DeclToAddrMapTy &Data = I->getSecond().LocalVarData; 1363 for (const auto &Pair : MappedDeclsFields) { 1364 assert(Pair.getFirst()->isCanonicalDecl() && 1365 "Expected canonical declaration"); 1366 Data.insert(std::make_pair(Pair.getFirst(), MappedVarData())); 1367 } 1368 } 1369 Rt.emitGenericVarsProlog(CGF, Loc); 1370 } 1371 void Exit(CodeGenFunction &CGF) override { 1372 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()) 1373 .emitGenericVarsEpilog(CGF); 1374 } 1375 } Action(Loc, GlobalizedRD, MappedDeclsFields); 1376 CodeGen.setAction(Action); 1377 llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction( 1378 D, ThreadIDVar, InnermostKind, CodeGen); 1379 1380 return OutlinedFun; 1381 } 1382 1383 void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF, 1384 SourceLocation Loc, 1385 bool WithSPMDCheck) { 1386 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic && 1387 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) 1388 return; 1389 1390 CGBuilderTy &Bld = CGF.Builder; 1391 1392 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn); 1393 if (I == FunctionGlobalizedDecls.end()) 1394 return; 1395 1396 for (auto &Rec : I->getSecond().LocalVarData) { 1397 const auto *VD = cast<VarDecl>(Rec.first); 1398 bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first); 1399 QualType VarTy = VD->getType(); 1400 1401 // Get the local allocation of a firstprivate variable before sharing 1402 llvm::Value *ParValue; 1403 if (EscapedParam) { 1404 LValue ParLVal = 1405 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType()); 1406 ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc); 1407 } 1408 1409 // Allocate space for the variable to be globalized 1410 llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())}; 1411 llvm::CallBase *VoidPtr = 1412 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1413 CGM.getModule(), OMPRTL___kmpc_alloc_shared), 1414 AllocArgs, VD->getName()); 1415 // FIXME: We should use the variables actual alignment as an argument. 1416 VoidPtr->addRetAttr(llvm::Attribute::get( 1417 CGM.getLLVMContext(), llvm::Attribute::Alignment, 1418 CGM.getContext().getTargetInfo().getNewAlign() / 8)); 1419 1420 // Cast the void pointer and get the address of the globalized variable. 1421 llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo(); 1422 llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 1423 VoidPtr, VarPtrTy, VD->getName() + "_on_stack"); 1424 LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy); 1425 Rec.second.PrivateAddr = VarAddr.getAddress(CGF); 1426 Rec.second.GlobalizedVal = VoidPtr; 1427 1428 // Assign the local allocation to the newly globalized location. 1429 if (EscapedParam) { 1430 CGF.EmitStoreOfScalar(ParValue, VarAddr); 1431 I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF)); 1432 } 1433 if (auto *DI = CGF.getDebugInfo()) 1434 VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation())); 1435 } 1436 for (const auto *VD : I->getSecond().EscapedVariableLengthDecls) { 1437 // Use actual memory size of the VLA object including the padding 1438 // for alignment purposes. 1439 llvm::Value *Size = CGF.getTypeSize(VD->getType()); 1440 CharUnits Align = CGM.getContext().getDeclAlign(VD); 1441 Size = Bld.CreateNUWAdd( 1442 Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1)); 1443 llvm::Value *AlignVal = 1444 llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity()); 1445 1446 Size = Bld.CreateUDiv(Size, AlignVal); 1447 Size = Bld.CreateNUWMul(Size, AlignVal); 1448 1449 // Allocate space for this VLA object to be globalized. 1450 llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())}; 1451 llvm::CallBase *VoidPtr = 1452 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1453 CGM.getModule(), OMPRTL___kmpc_alloc_shared), 1454 AllocArgs, VD->getName()); 1455 VoidPtr->addRetAttr( 1456 llvm::Attribute::get(CGM.getLLVMContext(), llvm::Attribute::Alignment, 1457 CGM.getContext().getTargetInfo().getNewAlign())); 1458 1459 I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back( 1460 std::pair<llvm::Value *, llvm::Value *>( 1461 {VoidPtr, CGF.getTypeSize(VD->getType())})); 1462 LValue Base = CGF.MakeAddrLValue(VoidPtr, VD->getType(), 1463 CGM.getContext().getDeclAlign(VD), 1464 AlignmentSource::Decl); 1465 I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD), 1466 Base.getAddress(CGF)); 1467 } 1468 I->getSecond().MappedParams->apply(CGF); 1469 } 1470 1471 void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF, 1472 bool WithSPMDCheck) { 1473 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic && 1474 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) 1475 return; 1476 1477 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn); 1478 if (I != FunctionGlobalizedDecls.end()) { 1479 // Deallocate the memory for each globalized VLA object 1480 for (auto AddrSizePair : 1481 llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) { 1482 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1483 CGM.getModule(), OMPRTL___kmpc_free_shared), 1484 {AddrSizePair.first, AddrSizePair.second}); 1485 } 1486 // Deallocate the memory for each globalized value 1487 for (auto &Rec : llvm::reverse(I->getSecond().LocalVarData)) { 1488 const auto *VD = cast<VarDecl>(Rec.first); 1489 I->getSecond().MappedParams->restore(CGF); 1490 1491 llvm::Value *FreeArgs[] = {Rec.second.GlobalizedVal, 1492 CGF.getTypeSize(VD->getType())}; 1493 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1494 CGM.getModule(), OMPRTL___kmpc_free_shared), 1495 FreeArgs); 1496 } 1497 } 1498 } 1499 1500 void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF, 1501 const OMPExecutableDirective &D, 1502 SourceLocation Loc, 1503 llvm::Function *OutlinedFn, 1504 ArrayRef<llvm::Value *> CapturedVars) { 1505 if (!CGF.HaveInsertPoint()) 1506 return; 1507 1508 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 1509 /*Name=*/".zero.addr"); 1510 CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr); 1511 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs; 1512 OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer()); 1513 OutlinedFnArgs.push_back(ZeroAddr.getPointer()); 1514 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); 1515 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs); 1516 } 1517 1518 void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF, 1519 SourceLocation Loc, 1520 llvm::Function *OutlinedFn, 1521 ArrayRef<llvm::Value *> CapturedVars, 1522 const Expr *IfCond, 1523 llvm::Value *NumThreads) { 1524 if (!CGF.HaveInsertPoint()) 1525 return; 1526 1527 auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars, IfCond, 1528 NumThreads](CodeGenFunction &CGF, 1529 PrePostActionTy &Action) { 1530 CGBuilderTy &Bld = CGF.Builder; 1531 llvm::Value *NumThreadsVal = NumThreads; 1532 llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn]; 1533 llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy); 1534 if (WFn) 1535 ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy); 1536 llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, CGM.Int8PtrTy); 1537 1538 // Create a private scope that will globalize the arguments 1539 // passed from the outside of the target region. 1540 // TODO: Is that needed? 1541 CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF); 1542 1543 Address CapturedVarsAddrs = CGF.CreateDefaultAlignTempAlloca( 1544 llvm::ArrayType::get(CGM.VoidPtrTy, CapturedVars.size()), 1545 "captured_vars_addrs"); 1546 // There's something to share. 1547 if (!CapturedVars.empty()) { 1548 // Prepare for parallel region. Indicate the outlined function. 1549 ASTContext &Ctx = CGF.getContext(); 1550 unsigned Idx = 0; 1551 for (llvm::Value *V : CapturedVars) { 1552 Address Dst = Bld.CreateConstArrayGEP(CapturedVarsAddrs, Idx); 1553 llvm::Value *PtrV; 1554 if (V->getType()->isIntegerTy()) 1555 PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy); 1556 else 1557 PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy); 1558 CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false, 1559 Ctx.getPointerType(Ctx.VoidPtrTy)); 1560 ++Idx; 1561 } 1562 } 1563 1564 llvm::Value *IfCondVal = nullptr; 1565 if (IfCond) 1566 IfCondVal = Bld.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.Int32Ty, 1567 /* isSigned */ false); 1568 else 1569 IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1); 1570 1571 if (!NumThreadsVal) 1572 NumThreadsVal = llvm::ConstantInt::get(CGF.Int32Ty, -1); 1573 else 1574 NumThreadsVal = Bld.CreateZExtOrTrunc(NumThreadsVal, CGF.Int32Ty), 1575 1576 assert(IfCondVal && "Expected a value"); 1577 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 1578 llvm::Value *Args[] = { 1579 RTLoc, 1580 getThreadID(CGF, Loc), 1581 IfCondVal, 1582 NumThreadsVal, 1583 llvm::ConstantInt::get(CGF.Int32Ty, -1), 1584 FnPtr, 1585 ID, 1586 Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(), 1587 CGF.VoidPtrPtrTy), 1588 llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())}; 1589 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1590 CGM.getModule(), OMPRTL___kmpc_parallel_51), 1591 Args); 1592 }; 1593 1594 RegionCodeGenTy RCG(ParallelGen); 1595 RCG(CGF); 1596 } 1597 1598 void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) { 1599 // Always emit simple barriers! 1600 if (!CGF.HaveInsertPoint()) 1601 return; 1602 // Build call __kmpc_barrier_simple_spmd(nullptr, 0); 1603 // This function does not use parameters, so we can emit just default values. 1604 llvm::Value *Args[] = { 1605 llvm::ConstantPointerNull::get( 1606 cast<llvm::PointerType>(getIdentTyPointerTy())), 1607 llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)}; 1608 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1609 CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd), 1610 Args); 1611 } 1612 1613 void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF, 1614 SourceLocation Loc, 1615 OpenMPDirectiveKind Kind, bool, 1616 bool) { 1617 // Always emit simple barriers! 1618 if (!CGF.HaveInsertPoint()) 1619 return; 1620 // Build call __kmpc_cancel_barrier(loc, thread_id); 1621 unsigned Flags = getDefaultFlagsForBarriers(Kind); 1622 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags), 1623 getThreadID(CGF, Loc)}; 1624 1625 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1626 CGM.getModule(), OMPRTL___kmpc_barrier), 1627 Args); 1628 } 1629 1630 void CGOpenMPRuntimeGPU::emitCriticalRegion( 1631 CodeGenFunction &CGF, StringRef CriticalName, 1632 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, 1633 const Expr *Hint) { 1634 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop"); 1635 llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test"); 1636 llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync"); 1637 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body"); 1638 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit"); 1639 1640 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 1641 1642 // Get the mask of active threads in the warp. 1643 llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1644 CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask)); 1645 // Fetch team-local id of the thread. 1646 llvm::Value *ThreadID = RT.getGPUThreadID(CGF); 1647 1648 // Get the width of the team. 1649 llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF); 1650 1651 // Initialize the counter variable for the loop. 1652 QualType Int32Ty = 1653 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0); 1654 Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter"); 1655 LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty); 1656 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal, 1657 /*isInit=*/true); 1658 1659 // Block checks if loop counter exceeds upper bound. 1660 CGF.EmitBlock(LoopBB); 1661 llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc); 1662 llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth); 1663 CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB); 1664 1665 // Block tests which single thread should execute region, and which threads 1666 // should go straight to synchronisation point. 1667 CGF.EmitBlock(TestBB); 1668 CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc); 1669 llvm::Value *CmpThreadToCounter = 1670 CGF.Builder.CreateICmpEQ(ThreadID, CounterVal); 1671 CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB); 1672 1673 // Block emits the body of the critical region. 1674 CGF.EmitBlock(BodyBB); 1675 1676 // Output the critical statement. 1677 CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc, 1678 Hint); 1679 1680 // After the body surrounded by the critical region, the single executing 1681 // thread will jump to the synchronisation point. 1682 // Block waits for all threads in current team to finish then increments the 1683 // counter variable and returns to the loop. 1684 CGF.EmitBlock(SyncBB); 1685 // Reconverge active threads in the warp. 1686 (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 1687 CGM.getModule(), OMPRTL___kmpc_syncwarp), 1688 Mask); 1689 1690 llvm::Value *IncCounterVal = 1691 CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1)); 1692 CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal); 1693 CGF.EmitBranch(LoopBB); 1694 1695 // Block that is reached when all threads in the team complete the region. 1696 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 1697 } 1698 1699 /// Cast value to the specified type. 1700 static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val, 1701 QualType ValTy, QualType CastTy, 1702 SourceLocation Loc) { 1703 assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() && 1704 "Cast type must sized."); 1705 assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() && 1706 "Val type must sized."); 1707 llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy); 1708 if (ValTy == CastTy) 1709 return Val; 1710 if (CGF.getContext().getTypeSizeInChars(ValTy) == 1711 CGF.getContext().getTypeSizeInChars(CastTy)) 1712 return CGF.Builder.CreateBitCast(Val, LLVMCastTy); 1713 if (CastTy->isIntegerType() && ValTy->isIntegerType()) 1714 return CGF.Builder.CreateIntCast(Val, LLVMCastTy, 1715 CastTy->hasSignedIntegerRepresentation()); 1716 Address CastItem = CGF.CreateMemTemp(CastTy); 1717 Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 1718 CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace())); 1719 CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy, 1720 LValueBaseInfo(AlignmentSource::Type), 1721 TBAAAccessInfo()); 1722 return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc, 1723 LValueBaseInfo(AlignmentSource::Type), 1724 TBAAAccessInfo()); 1725 } 1726 1727 /// This function creates calls to one of two shuffle functions to copy 1728 /// variables between lanes in a warp. 1729 static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF, 1730 llvm::Value *Elem, 1731 QualType ElemType, 1732 llvm::Value *Offset, 1733 SourceLocation Loc) { 1734 CodeGenModule &CGM = CGF.CGM; 1735 CGBuilderTy &Bld = CGF.Builder; 1736 CGOpenMPRuntimeGPU &RT = 1737 *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime())); 1738 llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder(); 1739 1740 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType); 1741 assert(Size.getQuantity() <= 8 && 1742 "Unsupported bitwidth in shuffle instruction."); 1743 1744 RuntimeFunction ShuffleFn = Size.getQuantity() <= 4 1745 ? OMPRTL___kmpc_shuffle_int32 1746 : OMPRTL___kmpc_shuffle_int64; 1747 1748 // Cast all types to 32- or 64-bit values before calling shuffle routines. 1749 QualType CastTy = CGF.getContext().getIntTypeForBitwidth( 1750 Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1); 1751 llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc); 1752 llvm::Value *WarpSize = 1753 Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true); 1754 1755 llvm::Value *ShuffledVal = CGF.EmitRuntimeCall( 1756 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn), 1757 {ElemCast, Offset, WarpSize}); 1758 1759 return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc); 1760 } 1761 1762 static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr, 1763 Address DestAddr, QualType ElemType, 1764 llvm::Value *Offset, SourceLocation Loc) { 1765 CGBuilderTy &Bld = CGF.Builder; 1766 1767 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType); 1768 // Create the loop over the big sized data. 1769 // ptr = (void*)Elem; 1770 // ptrEnd = (void*) Elem + 1; 1771 // Step = 8; 1772 // while (ptr + Step < ptrEnd) 1773 // shuffle((int64_t)*ptr); 1774 // Step = 4; 1775 // while (ptr + Step < ptrEnd) 1776 // shuffle((int32_t)*ptr); 1777 // ... 1778 Address ElemPtr = DestAddr; 1779 Address Ptr = SrcAddr; 1780 Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast( 1781 Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy); 1782 for (int IntSize = 8; IntSize >= 1; IntSize /= 2) { 1783 if (Size < CharUnits::fromQuantity(IntSize)) 1784 continue; 1785 QualType IntType = CGF.getContext().getIntTypeForBitwidth( 1786 CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)), 1787 /*Signed=*/1); 1788 llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType); 1789 Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo()); 1790 ElemPtr = 1791 Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo()); 1792 if (Size.getQuantity() / IntSize > 1) { 1793 llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond"); 1794 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then"); 1795 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit"); 1796 llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock(); 1797 CGF.EmitBlock(PreCondBB); 1798 llvm::PHINode *PhiSrc = 1799 Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2); 1800 PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB); 1801 llvm::PHINode *PhiDest = 1802 Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2); 1803 PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB); 1804 Ptr = Address(PhiSrc, Ptr.getAlignment()); 1805 ElemPtr = Address(PhiDest, ElemPtr.getAlignment()); 1806 llvm::Value *PtrDiff = Bld.CreatePtrDiff( 1807 CGF.Int8Ty, PtrEnd.getPointer(), 1808 Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr.getPointer(), 1809 CGF.VoidPtrTy)); 1810 Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)), 1811 ThenBB, ExitBB); 1812 CGF.EmitBlock(ThenBB); 1813 llvm::Value *Res = createRuntimeShuffleFunction( 1814 CGF, 1815 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc, 1816 LValueBaseInfo(AlignmentSource::Type), 1817 TBAAAccessInfo()), 1818 IntType, Offset, Loc); 1819 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType, 1820 LValueBaseInfo(AlignmentSource::Type), 1821 TBAAAccessInfo()); 1822 Address LocalPtr = Bld.CreateConstGEP(Ptr, 1); 1823 Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1); 1824 PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB); 1825 PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB); 1826 CGF.EmitBranch(PreCondBB); 1827 CGF.EmitBlock(ExitBB); 1828 } else { 1829 llvm::Value *Res = createRuntimeShuffleFunction( 1830 CGF, 1831 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc, 1832 LValueBaseInfo(AlignmentSource::Type), 1833 TBAAAccessInfo()), 1834 IntType, Offset, Loc); 1835 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType, 1836 LValueBaseInfo(AlignmentSource::Type), 1837 TBAAAccessInfo()); 1838 Ptr = Bld.CreateConstGEP(Ptr, 1); 1839 ElemPtr = Bld.CreateConstGEP(ElemPtr, 1); 1840 } 1841 Size = Size % IntSize; 1842 } 1843 } 1844 1845 namespace { 1846 enum CopyAction : unsigned { 1847 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in 1848 // the warp using shuffle instructions. 1849 RemoteLaneToThread, 1850 // ThreadCopy: Make a copy of a Reduce list on the thread's stack. 1851 ThreadCopy, 1852 // ThreadToScratchpad: Copy a team-reduced array to the scratchpad. 1853 ThreadToScratchpad, 1854 // ScratchpadToThread: Copy from a scratchpad array in global memory 1855 // containing team-reduced data to a thread's stack. 1856 ScratchpadToThread, 1857 }; 1858 } // namespace 1859 1860 struct CopyOptionsTy { 1861 llvm::Value *RemoteLaneOffset; 1862 llvm::Value *ScratchpadIndex; 1863 llvm::Value *ScratchpadWidth; 1864 }; 1865 1866 /// Emit instructions to copy a Reduce list, which contains partially 1867 /// aggregated values, in the specified direction. 1868 static void emitReductionListCopy( 1869 CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy, 1870 ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase, 1871 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) { 1872 1873 CodeGenModule &CGM = CGF.CGM; 1874 ASTContext &C = CGM.getContext(); 1875 CGBuilderTy &Bld = CGF.Builder; 1876 1877 llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset; 1878 llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex; 1879 llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth; 1880 1881 // Iterates, element-by-element, through the source Reduce list and 1882 // make a copy. 1883 unsigned Idx = 0; 1884 unsigned Size = Privates.size(); 1885 for (const Expr *Private : Privates) { 1886 Address SrcElementAddr = Address::invalid(); 1887 Address DestElementAddr = Address::invalid(); 1888 Address DestElementPtrAddr = Address::invalid(); 1889 // Should we shuffle in an element from a remote lane? 1890 bool ShuffleInElement = false; 1891 // Set to true to update the pointer in the dest Reduce list to a 1892 // newly created element. 1893 bool UpdateDestListPtr = false; 1894 // Increment the src or dest pointer to the scratchpad, for each 1895 // new element. 1896 bool IncrScratchpadSrc = false; 1897 bool IncrScratchpadDest = false; 1898 1899 switch (Action) { 1900 case RemoteLaneToThread: { 1901 // Step 1.1: Get the address for the src element in the Reduce list. 1902 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); 1903 SrcElementAddr = CGF.EmitLoadOfPointer( 1904 SrcElementPtrAddr, 1905 C.getPointerType(Private->getType())->castAs<PointerType>()); 1906 1907 // Step 1.2: Create a temporary to store the element in the destination 1908 // Reduce list. 1909 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); 1910 DestElementAddr = 1911 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element"); 1912 ShuffleInElement = true; 1913 UpdateDestListPtr = true; 1914 break; 1915 } 1916 case ThreadCopy: { 1917 // Step 1.1: Get the address for the src element in the Reduce list. 1918 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); 1919 SrcElementAddr = CGF.EmitLoadOfPointer( 1920 SrcElementPtrAddr, 1921 C.getPointerType(Private->getType())->castAs<PointerType>()); 1922 1923 // Step 1.2: Get the address for dest element. The destination 1924 // element has already been created on the thread's stack. 1925 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); 1926 DestElementAddr = CGF.EmitLoadOfPointer( 1927 DestElementPtrAddr, 1928 C.getPointerType(Private->getType())->castAs<PointerType>()); 1929 break; 1930 } 1931 case ThreadToScratchpad: { 1932 // Step 1.1: Get the address for the src element in the Reduce list. 1933 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); 1934 SrcElementAddr = CGF.EmitLoadOfPointer( 1935 SrcElementPtrAddr, 1936 C.getPointerType(Private->getType())->castAs<PointerType>()); 1937 1938 // Step 1.2: Get the address for dest element: 1939 // address = base + index * ElementSizeInChars. 1940 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType()); 1941 llvm::Value *CurrentOffset = 1942 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex); 1943 llvm::Value *ScratchPadElemAbsolutePtrVal = 1944 Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset); 1945 ScratchPadElemAbsolutePtrVal = 1946 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy); 1947 DestElementAddr = Address(ScratchPadElemAbsolutePtrVal, 1948 C.getTypeAlignInChars(Private->getType())); 1949 IncrScratchpadDest = true; 1950 break; 1951 } 1952 case ScratchpadToThread: { 1953 // Step 1.1: Get the address for the src element in the scratchpad. 1954 // address = base + index * ElementSizeInChars. 1955 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType()); 1956 llvm::Value *CurrentOffset = 1957 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex); 1958 llvm::Value *ScratchPadElemAbsolutePtrVal = 1959 Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset); 1960 ScratchPadElemAbsolutePtrVal = 1961 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy); 1962 SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal, 1963 C.getTypeAlignInChars(Private->getType())); 1964 IncrScratchpadSrc = true; 1965 1966 // Step 1.2: Create a temporary to store the element in the destination 1967 // Reduce list. 1968 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); 1969 DestElementAddr = 1970 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element"); 1971 UpdateDestListPtr = true; 1972 break; 1973 } 1974 } 1975 1976 // Regardless of src and dest of copy, we emit the load of src 1977 // element as this is required in all directions 1978 SrcElementAddr = Bld.CreateElementBitCast( 1979 SrcElementAddr, CGF.ConvertTypeForMem(Private->getType())); 1980 DestElementAddr = Bld.CreateElementBitCast(DestElementAddr, 1981 SrcElementAddr.getElementType()); 1982 1983 // Now that all active lanes have read the element in the 1984 // Reduce list, shuffle over the value from the remote lane. 1985 if (ShuffleInElement) { 1986 shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(), 1987 RemoteLaneOffset, Private->getExprLoc()); 1988 } else { 1989 switch (CGF.getEvaluationKind(Private->getType())) { 1990 case TEK_Scalar: { 1991 llvm::Value *Elem = CGF.EmitLoadOfScalar( 1992 SrcElementAddr, /*Volatile=*/false, Private->getType(), 1993 Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type), 1994 TBAAAccessInfo()); 1995 // Store the source element value to the dest element address. 1996 CGF.EmitStoreOfScalar( 1997 Elem, DestElementAddr, /*Volatile=*/false, Private->getType(), 1998 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); 1999 break; 2000 } 2001 case TEK_Complex: { 2002 CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex( 2003 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()), 2004 Private->getExprLoc()); 2005 CGF.EmitStoreOfComplex( 2006 Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()), 2007 /*isInit=*/false); 2008 break; 2009 } 2010 case TEK_Aggregate: 2011 CGF.EmitAggregateCopy( 2012 CGF.MakeAddrLValue(DestElementAddr, Private->getType()), 2013 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()), 2014 Private->getType(), AggValueSlot::DoesNotOverlap); 2015 break; 2016 } 2017 } 2018 2019 // Step 3.1: Modify reference in dest Reduce list as needed. 2020 // Modifying the reference in Reduce list to point to the newly 2021 // created element. The element is live in the current function 2022 // scope and that of functions it invokes (i.e., reduce_function). 2023 // RemoteReduceData[i] = (void*)&RemoteElem 2024 if (UpdateDestListPtr) { 2025 CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast( 2026 DestElementAddr.getPointer(), CGF.VoidPtrTy), 2027 DestElementPtrAddr, /*Volatile=*/false, 2028 C.VoidPtrTy); 2029 } 2030 2031 // Step 4.1: Increment SrcBase/DestBase so that it points to the starting 2032 // address of the next element in scratchpad memory, unless we're currently 2033 // processing the last one. Memory alignment is also taken care of here. 2034 if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) { 2035 llvm::Value *ScratchpadBasePtr = 2036 IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer(); 2037 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType()); 2038 ScratchpadBasePtr = Bld.CreateNUWAdd( 2039 ScratchpadBasePtr, 2040 Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars)); 2041 2042 // Take care of global memory alignment for performance 2043 ScratchpadBasePtr = Bld.CreateNUWSub( 2044 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1)); 2045 ScratchpadBasePtr = Bld.CreateUDiv( 2046 ScratchpadBasePtr, 2047 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment)); 2048 ScratchpadBasePtr = Bld.CreateNUWAdd( 2049 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1)); 2050 ScratchpadBasePtr = Bld.CreateNUWMul( 2051 ScratchpadBasePtr, 2052 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment)); 2053 2054 if (IncrScratchpadDest) 2055 DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign()); 2056 else /* IncrScratchpadSrc = true */ 2057 SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign()); 2058 } 2059 2060 ++Idx; 2061 } 2062 } 2063 2064 /// This function emits a helper that gathers Reduce lists from the first 2065 /// lane of every active warp to lanes in the first warp. 2066 /// 2067 /// void inter_warp_copy_func(void* reduce_data, num_warps) 2068 /// shared smem[warp_size]; 2069 /// For all data entries D in reduce_data: 2070 /// sync 2071 /// If (I am the first lane in each warp) 2072 /// Copy my local D to smem[warp_id] 2073 /// sync 2074 /// if (I am the first warp) 2075 /// Copy smem[thread_id] to my local D 2076 static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM, 2077 ArrayRef<const Expr *> Privates, 2078 QualType ReductionArrayTy, 2079 SourceLocation Loc) { 2080 ASTContext &C = CGM.getContext(); 2081 llvm::Module &M = CGM.getModule(); 2082 2083 // ReduceList: thread local Reduce list. 2084 // At the stage of the computation when this function is called, partially 2085 // aggregated values reside in the first lane of every active warp. 2086 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2087 C.VoidPtrTy, ImplicitParamDecl::Other); 2088 // NumWarps: number of warps active in the parallel region. This could 2089 // be smaller than 32 (max warps in a CTA) for partial block reduction. 2090 ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2091 C.getIntTypeForBitwidth(32, /* Signed */ true), 2092 ImplicitParamDecl::Other); 2093 FunctionArgList Args; 2094 Args.push_back(&ReduceListArg); 2095 Args.push_back(&NumWarpsArg); 2096 2097 const CGFunctionInfo &CGFI = 2098 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2099 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI), 2100 llvm::GlobalValue::InternalLinkage, 2101 "_omp_reduction_inter_warp_copy_func", &M); 2102 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 2103 Fn->setDoesNotRecurse(); 2104 CodeGenFunction CGF(CGM); 2105 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2106 2107 CGBuilderTy &Bld = CGF.Builder; 2108 2109 // This array is used as a medium to transfer, one reduce element at a time, 2110 // the data from the first lane of every warp to lanes in the first warp 2111 // in order to perform the final step of a reduction in a parallel region 2112 // (reduction across warps). The array is placed in NVPTX __shared__ memory 2113 // for reduced latency, as well as to have a distinct copy for concurrently 2114 // executing target regions. The array is declared with common linkage so 2115 // as to be shared across compilation units. 2116 StringRef TransferMediumName = 2117 "__openmp_nvptx_data_transfer_temporary_storage"; 2118 llvm::GlobalVariable *TransferMedium = 2119 M.getGlobalVariable(TransferMediumName); 2120 unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size; 2121 if (!TransferMedium) { 2122 auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize); 2123 unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared); 2124 TransferMedium = new llvm::GlobalVariable( 2125 M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage, 2126 llvm::UndefValue::get(Ty), TransferMediumName, 2127 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, 2128 SharedAddressSpace); 2129 CGM.addCompilerUsedGlobal(TransferMedium); 2130 } 2131 2132 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 2133 // Get the CUDA thread id of the current OpenMP thread on the GPU. 2134 llvm::Value *ThreadID = RT.getGPUThreadID(CGF); 2135 // nvptx_lane_id = nvptx_id % warpsize 2136 llvm::Value *LaneID = getNVPTXLaneID(CGF); 2137 // nvptx_warp_id = nvptx_id / warpsize 2138 llvm::Value *WarpID = getNVPTXWarpID(CGF); 2139 2140 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 2141 Address LocalReduceList( 2142 Bld.CreatePointerBitCastOrAddrSpaceCast( 2143 CGF.EmitLoadOfScalar( 2144 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc, 2145 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()), 2146 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 2147 CGF.getPointerAlign()); 2148 2149 unsigned Idx = 0; 2150 for (const Expr *Private : Privates) { 2151 // 2152 // Warp master copies reduce element to transfer medium in __shared__ 2153 // memory. 2154 // 2155 unsigned RealTySize = 2156 C.getTypeSizeInChars(Private->getType()) 2157 .alignTo(C.getTypeAlignInChars(Private->getType())) 2158 .getQuantity(); 2159 for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) { 2160 unsigned NumIters = RealTySize / TySize; 2161 if (NumIters == 0) 2162 continue; 2163 QualType CType = C.getIntTypeForBitwidth( 2164 C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1); 2165 llvm::Type *CopyType = CGF.ConvertTypeForMem(CType); 2166 CharUnits Align = CharUnits::fromQuantity(TySize); 2167 llvm::Value *Cnt = nullptr; 2168 Address CntAddr = Address::invalid(); 2169 llvm::BasicBlock *PrecondBB = nullptr; 2170 llvm::BasicBlock *ExitBB = nullptr; 2171 if (NumIters > 1) { 2172 CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr"); 2173 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr, 2174 /*Volatile=*/false, C.IntTy); 2175 PrecondBB = CGF.createBasicBlock("precond"); 2176 ExitBB = CGF.createBasicBlock("exit"); 2177 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body"); 2178 // There is no need to emit line number for unconditional branch. 2179 (void)ApplyDebugLocation::CreateEmpty(CGF); 2180 CGF.EmitBlock(PrecondBB); 2181 Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc); 2182 llvm::Value *Cmp = 2183 Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters)); 2184 Bld.CreateCondBr(Cmp, BodyBB, ExitBB); 2185 CGF.EmitBlock(BodyBB); 2186 } 2187 // kmpc_barrier. 2188 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown, 2189 /*EmitChecks=*/false, 2190 /*ForceSimpleCall=*/true); 2191 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then"); 2192 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else"); 2193 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont"); 2194 2195 // if (lane_id == 0) 2196 llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master"); 2197 Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB); 2198 CGF.EmitBlock(ThenBB); 2199 2200 // Reduce element = LocalReduceList[i] 2201 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 2202 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( 2203 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); 2204 // elemptr = ((CopyType*)(elemptrptr)) + I 2205 Address ElemPtr = Address(ElemPtrPtr, Align); 2206 ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType); 2207 if (NumIters > 1) 2208 ElemPtr = Bld.CreateGEP(ElemPtr, Cnt); 2209 2210 // Get pointer to location in transfer medium. 2211 // MediumPtr = &medium[warp_id] 2212 llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP( 2213 TransferMedium->getValueType(), TransferMedium, 2214 {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID}); 2215 Address MediumPtr(MediumPtrVal, Align); 2216 // Casting to actual data type. 2217 // MediumPtr = (CopyType*)MediumPtrAddr; 2218 MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType); 2219 2220 // elem = *elemptr 2221 //*MediumPtr = elem 2222 llvm::Value *Elem = CGF.EmitLoadOfScalar( 2223 ElemPtr, /*Volatile=*/false, CType, Loc, 2224 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); 2225 // Store the source element value to the dest element address. 2226 CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType, 2227 LValueBaseInfo(AlignmentSource::Type), 2228 TBAAAccessInfo()); 2229 2230 Bld.CreateBr(MergeBB); 2231 2232 CGF.EmitBlock(ElseBB); 2233 Bld.CreateBr(MergeBB); 2234 2235 CGF.EmitBlock(MergeBB); 2236 2237 // kmpc_barrier. 2238 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown, 2239 /*EmitChecks=*/false, 2240 /*ForceSimpleCall=*/true); 2241 2242 // 2243 // Warp 0 copies reduce element from transfer medium. 2244 // 2245 llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then"); 2246 llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else"); 2247 llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont"); 2248 2249 Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg); 2250 llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar( 2251 AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc); 2252 2253 // Up to 32 threads in warp 0 are active. 2254 llvm::Value *IsActiveThread = 2255 Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread"); 2256 Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB); 2257 2258 CGF.EmitBlock(W0ThenBB); 2259 2260 // SrcMediumPtr = &medium[tid] 2261 llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP( 2262 TransferMedium->getValueType(), TransferMedium, 2263 {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID}); 2264 Address SrcMediumPtr(SrcMediumPtrVal, Align); 2265 // SrcMediumVal = *SrcMediumPtr; 2266 SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType); 2267 2268 // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I 2269 Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 2270 llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar( 2271 TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc); 2272 Address TargetElemPtr = Address(TargetElemPtrVal, Align); 2273 TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType); 2274 if (NumIters > 1) 2275 TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt); 2276 2277 // *TargetElemPtr = SrcMediumVal; 2278 llvm::Value *SrcMediumValue = 2279 CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc); 2280 CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false, 2281 CType); 2282 Bld.CreateBr(W0MergeBB); 2283 2284 CGF.EmitBlock(W0ElseBB); 2285 Bld.CreateBr(W0MergeBB); 2286 2287 CGF.EmitBlock(W0MergeBB); 2288 2289 if (NumIters > 1) { 2290 Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1)); 2291 CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy); 2292 CGF.EmitBranch(PrecondBB); 2293 (void)ApplyDebugLocation::CreateEmpty(CGF); 2294 CGF.EmitBlock(ExitBB); 2295 } 2296 RealTySize %= TySize; 2297 } 2298 ++Idx; 2299 } 2300 2301 CGF.FinishFunction(); 2302 return Fn; 2303 } 2304 2305 /// Emit a helper that reduces data across two OpenMP threads (lanes) 2306 /// in the same warp. It uses shuffle instructions to copy over data from 2307 /// a remote lane's stack. The reduction algorithm performed is specified 2308 /// by the fourth parameter. 2309 /// 2310 /// Algorithm Versions. 2311 /// Full Warp Reduce (argument value 0): 2312 /// This algorithm assumes that all 32 lanes are active and gathers 2313 /// data from these 32 lanes, producing a single resultant value. 2314 /// Contiguous Partial Warp Reduce (argument value 1): 2315 /// This algorithm assumes that only a *contiguous* subset of lanes 2316 /// are active. This happens for the last warp in a parallel region 2317 /// when the user specified num_threads is not an integer multiple of 2318 /// 32. This contiguous subset always starts with the zeroth lane. 2319 /// Partial Warp Reduce (argument value 2): 2320 /// This algorithm gathers data from any number of lanes at any position. 2321 /// All reduced values are stored in the lowest possible lane. The set 2322 /// of problems every algorithm addresses is a super set of those 2323 /// addressable by algorithms with a lower version number. Overhead 2324 /// increases as algorithm version increases. 2325 /// 2326 /// Terminology 2327 /// Reduce element: 2328 /// Reduce element refers to the individual data field with primitive 2329 /// data types to be combined and reduced across threads. 2330 /// Reduce list: 2331 /// Reduce list refers to a collection of local, thread-private 2332 /// reduce elements. 2333 /// Remote Reduce list: 2334 /// Remote Reduce list refers to a collection of remote (relative to 2335 /// the current thread) reduce elements. 2336 /// 2337 /// We distinguish between three states of threads that are important to 2338 /// the implementation of this function. 2339 /// Alive threads: 2340 /// Threads in a warp executing the SIMT instruction, as distinguished from 2341 /// threads that are inactive due to divergent control flow. 2342 /// Active threads: 2343 /// The minimal set of threads that has to be alive upon entry to this 2344 /// function. The computation is correct iff active threads are alive. 2345 /// Some threads are alive but they are not active because they do not 2346 /// contribute to the computation in any useful manner. Turning them off 2347 /// may introduce control flow overheads without any tangible benefits. 2348 /// Effective threads: 2349 /// In order to comply with the argument requirements of the shuffle 2350 /// function, we must keep all lanes holding data alive. But at most 2351 /// half of them perform value aggregation; we refer to this half of 2352 /// threads as effective. The other half is simply handing off their 2353 /// data. 2354 /// 2355 /// Procedure 2356 /// Value shuffle: 2357 /// In this step active threads transfer data from higher lane positions 2358 /// in the warp to lower lane positions, creating Remote Reduce list. 2359 /// Value aggregation: 2360 /// In this step, effective threads combine their thread local Reduce list 2361 /// with Remote Reduce list and store the result in the thread local 2362 /// Reduce list. 2363 /// Value copy: 2364 /// In this step, we deal with the assumption made by algorithm 2 2365 /// (i.e. contiguity assumption). When we have an odd number of lanes 2366 /// active, say 2k+1, only k threads will be effective and therefore k 2367 /// new values will be produced. However, the Reduce list owned by the 2368 /// (2k+1)th thread is ignored in the value aggregation. Therefore 2369 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so 2370 /// that the contiguity assumption still holds. 2371 static llvm::Function *emitShuffleAndReduceFunction( 2372 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 2373 QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) { 2374 ASTContext &C = CGM.getContext(); 2375 2376 // Thread local Reduce list used to host the values of data to be reduced. 2377 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2378 C.VoidPtrTy, ImplicitParamDecl::Other); 2379 // Current lane id; could be logical. 2380 ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy, 2381 ImplicitParamDecl::Other); 2382 // Offset of the remote source lane relative to the current lane. 2383 ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2384 C.ShortTy, ImplicitParamDecl::Other); 2385 // Algorithm version. This is expected to be known at compile time. 2386 ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2387 C.ShortTy, ImplicitParamDecl::Other); 2388 FunctionArgList Args; 2389 Args.push_back(&ReduceListArg); 2390 Args.push_back(&LaneIDArg); 2391 Args.push_back(&RemoteLaneOffsetArg); 2392 Args.push_back(&AlgoVerArg); 2393 2394 const CGFunctionInfo &CGFI = 2395 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2396 auto *Fn = llvm::Function::Create( 2397 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 2398 "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule()); 2399 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 2400 Fn->setDoesNotRecurse(); 2401 2402 CodeGenFunction CGF(CGM); 2403 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2404 2405 CGBuilderTy &Bld = CGF.Builder; 2406 2407 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 2408 Address LocalReduceList( 2409 Bld.CreatePointerBitCastOrAddrSpaceCast( 2410 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, 2411 C.VoidPtrTy, SourceLocation()), 2412 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 2413 CGF.getPointerAlign()); 2414 2415 Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg); 2416 llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar( 2417 AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); 2418 2419 Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg); 2420 llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar( 2421 AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); 2422 2423 Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg); 2424 llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar( 2425 AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); 2426 2427 // Create a local thread-private variable to host the Reduce list 2428 // from a remote lane. 2429 Address RemoteReduceList = 2430 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list"); 2431 2432 // This loop iterates through the list of reduce elements and copies, 2433 // element by element, from a remote lane in the warp to RemoteReduceList, 2434 // hosted on the thread's stack. 2435 emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates, 2436 LocalReduceList, RemoteReduceList, 2437 {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal, 2438 /*ScratchpadIndex=*/nullptr, 2439 /*ScratchpadWidth=*/nullptr}); 2440 2441 // The actions to be performed on the Remote Reduce list is dependent 2442 // on the algorithm version. 2443 // 2444 // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 && 2445 // LaneId % 2 == 0 && Offset > 0): 2446 // do the reduction value aggregation 2447 // 2448 // The thread local variable Reduce list is mutated in place to host the 2449 // reduced data, which is the aggregated value produced from local and 2450 // remote lanes. 2451 // 2452 // Note that AlgoVer is expected to be a constant integer known at compile 2453 // time. 2454 // When AlgoVer==0, the first conjunction evaluates to true, making 2455 // the entire predicate true during compile time. 2456 // When AlgoVer==1, the second conjunction has only the second part to be 2457 // evaluated during runtime. Other conjunctions evaluates to false 2458 // during compile time. 2459 // When AlgoVer==2, the third conjunction has only the second part to be 2460 // evaluated during runtime. Other conjunctions evaluates to false 2461 // during compile time. 2462 llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal); 2463 2464 llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1)); 2465 llvm::Value *CondAlgo1 = Bld.CreateAnd( 2466 Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal)); 2467 2468 llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2)); 2469 llvm::Value *CondAlgo2 = Bld.CreateAnd( 2470 Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1)))); 2471 CondAlgo2 = Bld.CreateAnd( 2472 CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0))); 2473 2474 llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1); 2475 CondReduce = Bld.CreateOr(CondReduce, CondAlgo2); 2476 2477 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then"); 2478 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else"); 2479 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont"); 2480 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB); 2481 2482 CGF.EmitBlock(ThenBB); 2483 // reduce_function(LocalReduceList, RemoteReduceList) 2484 llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2485 LocalReduceList.getPointer(), CGF.VoidPtrTy); 2486 llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2487 RemoteReduceList.getPointer(), CGF.VoidPtrTy); 2488 CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 2489 CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr}); 2490 Bld.CreateBr(MergeBB); 2491 2492 CGF.EmitBlock(ElseBB); 2493 Bld.CreateBr(MergeBB); 2494 2495 CGF.EmitBlock(MergeBB); 2496 2497 // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local 2498 // Reduce list. 2499 Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1)); 2500 llvm::Value *CondCopy = Bld.CreateAnd( 2501 Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal)); 2502 2503 llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then"); 2504 llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else"); 2505 llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont"); 2506 Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB); 2507 2508 CGF.EmitBlock(CpyThenBB); 2509 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates, 2510 RemoteReduceList, LocalReduceList); 2511 Bld.CreateBr(CpyMergeBB); 2512 2513 CGF.EmitBlock(CpyElseBB); 2514 Bld.CreateBr(CpyMergeBB); 2515 2516 CGF.EmitBlock(CpyMergeBB); 2517 2518 CGF.FinishFunction(); 2519 return Fn; 2520 } 2521 2522 /// This function emits a helper that copies all the reduction variables from 2523 /// the team into the provided global buffer for the reduction variables. 2524 /// 2525 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data) 2526 /// For all data entries D in reduce_data: 2527 /// Copy local D to buffer.D[Idx] 2528 static llvm::Value *emitListToGlobalCopyFunction( 2529 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 2530 QualType ReductionArrayTy, SourceLocation Loc, 2531 const RecordDecl *TeamReductionRec, 2532 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 2533 &VarFieldMap) { 2534 ASTContext &C = CGM.getContext(); 2535 2536 // Buffer: global reduction buffer. 2537 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2538 C.VoidPtrTy, ImplicitParamDecl::Other); 2539 // Idx: index of the buffer. 2540 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 2541 ImplicitParamDecl::Other); 2542 // ReduceList: thread local Reduce list. 2543 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2544 C.VoidPtrTy, ImplicitParamDecl::Other); 2545 FunctionArgList Args; 2546 Args.push_back(&BufferArg); 2547 Args.push_back(&IdxArg); 2548 Args.push_back(&ReduceListArg); 2549 2550 const CGFunctionInfo &CGFI = 2551 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2552 auto *Fn = llvm::Function::Create( 2553 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 2554 "_omp_reduction_list_to_global_copy_func", &CGM.getModule()); 2555 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 2556 Fn->setDoesNotRecurse(); 2557 CodeGenFunction CGF(CGM); 2558 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2559 2560 CGBuilderTy &Bld = CGF.Builder; 2561 2562 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 2563 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 2564 Address LocalReduceList( 2565 Bld.CreatePointerBitCastOrAddrSpaceCast( 2566 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, 2567 C.VoidPtrTy, Loc), 2568 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 2569 CGF.getPointerAlign()); 2570 QualType StaticTy = C.getRecordType(TeamReductionRec); 2571 llvm::Type *LLVMReductionsBufferTy = 2572 CGM.getTypes().ConvertTypeForMem(StaticTy); 2573 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2574 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 2575 LLVMReductionsBufferTy->getPointerTo()); 2576 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 2577 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 2578 /*Volatile=*/false, C.IntTy, 2579 Loc)}; 2580 unsigned Idx = 0; 2581 for (const Expr *Private : Privates) { 2582 // Reduce element = LocalReduceList[i] 2583 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 2584 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( 2585 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); 2586 // elemptr = ((CopyType*)(elemptrptr)) + I 2587 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2588 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo()); 2589 Address ElemPtr = 2590 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType())); 2591 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl(); 2592 // Global = Buffer.VD[Idx]; 2593 const FieldDecl *FD = VarFieldMap.lookup(VD); 2594 LValue GlobLVal = CGF.EmitLValueForField( 2595 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 2596 Address GlobAddr = GlobLVal.getAddress(CGF); 2597 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP( 2598 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs); 2599 GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment())); 2600 switch (CGF.getEvaluationKind(Private->getType())) { 2601 case TEK_Scalar: { 2602 llvm::Value *V = CGF.EmitLoadOfScalar( 2603 ElemPtr, /*Volatile=*/false, Private->getType(), Loc, 2604 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); 2605 CGF.EmitStoreOfScalar(V, GlobLVal); 2606 break; 2607 } 2608 case TEK_Complex: { 2609 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex( 2610 CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc); 2611 CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false); 2612 break; 2613 } 2614 case TEK_Aggregate: 2615 CGF.EmitAggregateCopy(GlobLVal, 2616 CGF.MakeAddrLValue(ElemPtr, Private->getType()), 2617 Private->getType(), AggValueSlot::DoesNotOverlap); 2618 break; 2619 } 2620 ++Idx; 2621 } 2622 2623 CGF.FinishFunction(); 2624 return Fn; 2625 } 2626 2627 /// This function emits a helper that reduces all the reduction variables from 2628 /// the team into the provided global buffer for the reduction variables. 2629 /// 2630 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data) 2631 /// void *GlobPtrs[]; 2632 /// GlobPtrs[0] = (void*)&buffer.D0[Idx]; 2633 /// ... 2634 /// GlobPtrs[N] = (void*)&buffer.DN[Idx]; 2635 /// reduce_function(GlobPtrs, reduce_data); 2636 static llvm::Value *emitListToGlobalReduceFunction( 2637 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 2638 QualType ReductionArrayTy, SourceLocation Loc, 2639 const RecordDecl *TeamReductionRec, 2640 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 2641 &VarFieldMap, 2642 llvm::Function *ReduceFn) { 2643 ASTContext &C = CGM.getContext(); 2644 2645 // Buffer: global reduction buffer. 2646 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2647 C.VoidPtrTy, ImplicitParamDecl::Other); 2648 // Idx: index of the buffer. 2649 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 2650 ImplicitParamDecl::Other); 2651 // ReduceList: thread local Reduce list. 2652 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2653 C.VoidPtrTy, ImplicitParamDecl::Other); 2654 FunctionArgList Args; 2655 Args.push_back(&BufferArg); 2656 Args.push_back(&IdxArg); 2657 Args.push_back(&ReduceListArg); 2658 2659 const CGFunctionInfo &CGFI = 2660 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2661 auto *Fn = llvm::Function::Create( 2662 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 2663 "_omp_reduction_list_to_global_reduce_func", &CGM.getModule()); 2664 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 2665 Fn->setDoesNotRecurse(); 2666 CodeGenFunction CGF(CGM); 2667 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2668 2669 CGBuilderTy &Bld = CGF.Builder; 2670 2671 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 2672 QualType StaticTy = C.getRecordType(TeamReductionRec); 2673 llvm::Type *LLVMReductionsBufferTy = 2674 CGM.getTypes().ConvertTypeForMem(StaticTy); 2675 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2676 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 2677 LLVMReductionsBufferTy->getPointerTo()); 2678 2679 // 1. Build a list of reduction variables. 2680 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 2681 Address ReductionList = 2682 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 2683 auto IPriv = Privates.begin(); 2684 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 2685 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 2686 /*Volatile=*/false, C.IntTy, 2687 Loc)}; 2688 unsigned Idx = 0; 2689 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) { 2690 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 2691 // Global = Buffer.VD[Idx]; 2692 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl(); 2693 const FieldDecl *FD = VarFieldMap.lookup(VD); 2694 LValue GlobLVal = CGF.EmitLValueForField( 2695 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 2696 Address GlobAddr = GlobLVal.getAddress(CGF); 2697 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP( 2698 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs); 2699 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr); 2700 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy); 2701 if ((*IPriv)->getType()->isVariablyModifiedType()) { 2702 // Store array size. 2703 ++Idx; 2704 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 2705 llvm::Value *Size = CGF.Builder.CreateIntCast( 2706 CGF.getVLASize( 2707 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 2708 .NumElts, 2709 CGF.SizeTy, /*isSigned=*/false); 2710 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 2711 Elem); 2712 } 2713 } 2714 2715 // Call reduce_function(GlobalReduceList, ReduceList) 2716 llvm::Value *GlobalReduceList = 2717 CGF.EmitCastToVoidPtr(ReductionList.getPointer()); 2718 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 2719 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar( 2720 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc); 2721 CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 2722 CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr}); 2723 CGF.FinishFunction(); 2724 return Fn; 2725 } 2726 2727 /// This function emits a helper that copies all the reduction variables from 2728 /// the team into the provided global buffer for the reduction variables. 2729 /// 2730 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data) 2731 /// For all data entries D in reduce_data: 2732 /// Copy buffer.D[Idx] to local D; 2733 static llvm::Value *emitGlobalToListCopyFunction( 2734 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 2735 QualType ReductionArrayTy, SourceLocation Loc, 2736 const RecordDecl *TeamReductionRec, 2737 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 2738 &VarFieldMap) { 2739 ASTContext &C = CGM.getContext(); 2740 2741 // Buffer: global reduction buffer. 2742 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2743 C.VoidPtrTy, ImplicitParamDecl::Other); 2744 // Idx: index of the buffer. 2745 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 2746 ImplicitParamDecl::Other); 2747 // ReduceList: thread local Reduce list. 2748 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2749 C.VoidPtrTy, ImplicitParamDecl::Other); 2750 FunctionArgList Args; 2751 Args.push_back(&BufferArg); 2752 Args.push_back(&IdxArg); 2753 Args.push_back(&ReduceListArg); 2754 2755 const CGFunctionInfo &CGFI = 2756 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2757 auto *Fn = llvm::Function::Create( 2758 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 2759 "_omp_reduction_global_to_list_copy_func", &CGM.getModule()); 2760 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 2761 Fn->setDoesNotRecurse(); 2762 CodeGenFunction CGF(CGM); 2763 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2764 2765 CGBuilderTy &Bld = CGF.Builder; 2766 2767 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 2768 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 2769 Address LocalReduceList( 2770 Bld.CreatePointerBitCastOrAddrSpaceCast( 2771 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, 2772 C.VoidPtrTy, Loc), 2773 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 2774 CGF.getPointerAlign()); 2775 QualType StaticTy = C.getRecordType(TeamReductionRec); 2776 llvm::Type *LLVMReductionsBufferTy = 2777 CGM.getTypes().ConvertTypeForMem(StaticTy); 2778 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2779 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 2780 LLVMReductionsBufferTy->getPointerTo()); 2781 2782 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 2783 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 2784 /*Volatile=*/false, C.IntTy, 2785 Loc)}; 2786 unsigned Idx = 0; 2787 for (const Expr *Private : Privates) { 2788 // Reduce element = LocalReduceList[i] 2789 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 2790 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( 2791 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); 2792 // elemptr = ((CopyType*)(elemptrptr)) + I 2793 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2794 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo()); 2795 Address ElemPtr = 2796 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType())); 2797 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl(); 2798 // Global = Buffer.VD[Idx]; 2799 const FieldDecl *FD = VarFieldMap.lookup(VD); 2800 LValue GlobLVal = CGF.EmitLValueForField( 2801 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 2802 Address GlobAddr = GlobLVal.getAddress(CGF); 2803 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP( 2804 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs); 2805 GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment())); 2806 switch (CGF.getEvaluationKind(Private->getType())) { 2807 case TEK_Scalar: { 2808 llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc); 2809 CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(), 2810 LValueBaseInfo(AlignmentSource::Type), 2811 TBAAAccessInfo()); 2812 break; 2813 } 2814 case TEK_Complex: { 2815 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc); 2816 CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()), 2817 /*isInit=*/false); 2818 break; 2819 } 2820 case TEK_Aggregate: 2821 CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()), 2822 GlobLVal, Private->getType(), 2823 AggValueSlot::DoesNotOverlap); 2824 break; 2825 } 2826 ++Idx; 2827 } 2828 2829 CGF.FinishFunction(); 2830 return Fn; 2831 } 2832 2833 /// This function emits a helper that reduces all the reduction variables from 2834 /// the team into the provided global buffer for the reduction variables. 2835 /// 2836 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data) 2837 /// void *GlobPtrs[]; 2838 /// GlobPtrs[0] = (void*)&buffer.D0[Idx]; 2839 /// ... 2840 /// GlobPtrs[N] = (void*)&buffer.DN[Idx]; 2841 /// reduce_function(reduce_data, GlobPtrs); 2842 static llvm::Value *emitGlobalToListReduceFunction( 2843 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 2844 QualType ReductionArrayTy, SourceLocation Loc, 2845 const RecordDecl *TeamReductionRec, 2846 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 2847 &VarFieldMap, 2848 llvm::Function *ReduceFn) { 2849 ASTContext &C = CGM.getContext(); 2850 2851 // Buffer: global reduction buffer. 2852 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2853 C.VoidPtrTy, ImplicitParamDecl::Other); 2854 // Idx: index of the buffer. 2855 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 2856 ImplicitParamDecl::Other); 2857 // ReduceList: thread local Reduce list. 2858 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 2859 C.VoidPtrTy, ImplicitParamDecl::Other); 2860 FunctionArgList Args; 2861 Args.push_back(&BufferArg); 2862 Args.push_back(&IdxArg); 2863 Args.push_back(&ReduceListArg); 2864 2865 const CGFunctionInfo &CGFI = 2866 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2867 auto *Fn = llvm::Function::Create( 2868 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 2869 "_omp_reduction_global_to_list_reduce_func", &CGM.getModule()); 2870 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 2871 Fn->setDoesNotRecurse(); 2872 CodeGenFunction CGF(CGM); 2873 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2874 2875 CGBuilderTy &Bld = CGF.Builder; 2876 2877 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 2878 QualType StaticTy = C.getRecordType(TeamReductionRec); 2879 llvm::Type *LLVMReductionsBufferTy = 2880 CGM.getTypes().ConvertTypeForMem(StaticTy); 2881 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2882 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 2883 LLVMReductionsBufferTy->getPointerTo()); 2884 2885 // 1. Build a list of reduction variables. 2886 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 2887 Address ReductionList = 2888 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 2889 auto IPriv = Privates.begin(); 2890 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 2891 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 2892 /*Volatile=*/false, C.IntTy, 2893 Loc)}; 2894 unsigned Idx = 0; 2895 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) { 2896 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 2897 // Global = Buffer.VD[Idx]; 2898 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl(); 2899 const FieldDecl *FD = VarFieldMap.lookup(VD); 2900 LValue GlobLVal = CGF.EmitLValueForField( 2901 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 2902 Address GlobAddr = GlobLVal.getAddress(CGF); 2903 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP( 2904 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs); 2905 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr); 2906 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy); 2907 if ((*IPriv)->getType()->isVariablyModifiedType()) { 2908 // Store array size. 2909 ++Idx; 2910 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 2911 llvm::Value *Size = CGF.Builder.CreateIntCast( 2912 CGF.getVLASize( 2913 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 2914 .NumElts, 2915 CGF.SizeTy, /*isSigned=*/false); 2916 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 2917 Elem); 2918 } 2919 } 2920 2921 // Call reduce_function(ReduceList, GlobalReduceList) 2922 llvm::Value *GlobalReduceList = 2923 CGF.EmitCastToVoidPtr(ReductionList.getPointer()); 2924 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 2925 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar( 2926 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc); 2927 CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 2928 CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList}); 2929 CGF.FinishFunction(); 2930 return Fn; 2931 } 2932 2933 /// 2934 /// Design of OpenMP reductions on the GPU 2935 /// 2936 /// Consider a typical OpenMP program with one or more reduction 2937 /// clauses: 2938 /// 2939 /// float foo; 2940 /// double bar; 2941 /// #pragma omp target teams distribute parallel for \ 2942 /// reduction(+:foo) reduction(*:bar) 2943 /// for (int i = 0; i < N; i++) { 2944 /// foo += A[i]; bar *= B[i]; 2945 /// } 2946 /// 2947 /// where 'foo' and 'bar' are reduced across all OpenMP threads in 2948 /// all teams. In our OpenMP implementation on the NVPTX device an 2949 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads 2950 /// within a team are mapped to CUDA threads within a threadblock. 2951 /// Our goal is to efficiently aggregate values across all OpenMP 2952 /// threads such that: 2953 /// 2954 /// - the compiler and runtime are logically concise, and 2955 /// - the reduction is performed efficiently in a hierarchical 2956 /// manner as follows: within OpenMP threads in the same warp, 2957 /// across warps in a threadblock, and finally across teams on 2958 /// the NVPTX device. 2959 /// 2960 /// Introduction to Decoupling 2961 /// 2962 /// We would like to decouple the compiler and the runtime so that the 2963 /// latter is ignorant of the reduction variables (number, data types) 2964 /// and the reduction operators. This allows a simpler interface 2965 /// and implementation while still attaining good performance. 2966 /// 2967 /// Pseudocode for the aforementioned OpenMP program generated by the 2968 /// compiler is as follows: 2969 /// 2970 /// 1. Create private copies of reduction variables on each OpenMP 2971 /// thread: 'foo_private', 'bar_private' 2972 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned 2973 /// to it and writes the result in 'foo_private' and 'bar_private' 2974 /// respectively. 2975 /// 3. Call the OpenMP runtime on the GPU to reduce within a team 2976 /// and store the result on the team master: 2977 /// 2978 /// __kmpc_nvptx_parallel_reduce_nowait_v2(..., 2979 /// reduceData, shuffleReduceFn, interWarpCpyFn) 2980 /// 2981 /// where: 2982 /// struct ReduceData { 2983 /// double *foo; 2984 /// double *bar; 2985 /// } reduceData 2986 /// reduceData.foo = &foo_private 2987 /// reduceData.bar = &bar_private 2988 /// 2989 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two 2990 /// auxiliary functions generated by the compiler that operate on 2991 /// variables of type 'ReduceData'. They aid the runtime perform 2992 /// algorithmic steps in a data agnostic manner. 2993 /// 2994 /// 'shuffleReduceFn' is a pointer to a function that reduces data 2995 /// of type 'ReduceData' across two OpenMP threads (lanes) in the 2996 /// same warp. It takes the following arguments as input: 2997 /// 2998 /// a. variable of type 'ReduceData' on the calling lane, 2999 /// b. its lane_id, 3000 /// c. an offset relative to the current lane_id to generate a 3001 /// remote_lane_id. The remote lane contains the second 3002 /// variable of type 'ReduceData' that is to be reduced. 3003 /// d. an algorithm version parameter determining which reduction 3004 /// algorithm to use. 3005 /// 3006 /// 'shuffleReduceFn' retrieves data from the remote lane using 3007 /// efficient GPU shuffle intrinsics and reduces, using the 3008 /// algorithm specified by the 4th parameter, the two operands 3009 /// element-wise. The result is written to the first operand. 3010 /// 3011 /// Different reduction algorithms are implemented in different 3012 /// runtime functions, all calling 'shuffleReduceFn' to perform 3013 /// the essential reduction step. Therefore, based on the 4th 3014 /// parameter, this function behaves slightly differently to 3015 /// cooperate with the runtime to ensure correctness under 3016 /// different circumstances. 3017 /// 3018 /// 'InterWarpCpyFn' is a pointer to a function that transfers 3019 /// reduced variables across warps. It tunnels, through CUDA 3020 /// shared memory, the thread-private data of type 'ReduceData' 3021 /// from lane 0 of each warp to a lane in the first warp. 3022 /// 4. Call the OpenMP runtime on the GPU to reduce across teams. 3023 /// The last team writes the global reduced value to memory. 3024 /// 3025 /// ret = __kmpc_nvptx_teams_reduce_nowait(..., 3026 /// reduceData, shuffleReduceFn, interWarpCpyFn, 3027 /// scratchpadCopyFn, loadAndReduceFn) 3028 /// 3029 /// 'scratchpadCopyFn' is a helper that stores reduced 3030 /// data from the team master to a scratchpad array in 3031 /// global memory. 3032 /// 3033 /// 'loadAndReduceFn' is a helper that loads data from 3034 /// the scratchpad array and reduces it with the input 3035 /// operand. 3036 /// 3037 /// These compiler generated functions hide address 3038 /// calculation and alignment information from the runtime. 3039 /// 5. if ret == 1: 3040 /// The team master of the last team stores the reduced 3041 /// result to the globals in memory. 3042 /// foo += reduceData.foo; bar *= reduceData.bar 3043 /// 3044 /// 3045 /// Warp Reduction Algorithms 3046 /// 3047 /// On the warp level, we have three algorithms implemented in the 3048 /// OpenMP runtime depending on the number of active lanes: 3049 /// 3050 /// Full Warp Reduction 3051 /// 3052 /// The reduce algorithm within a warp where all lanes are active 3053 /// is implemented in the runtime as follows: 3054 /// 3055 /// full_warp_reduce(void *reduce_data, 3056 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) { 3057 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2) 3058 /// ShuffleReduceFn(reduce_data, 0, offset, 0); 3059 /// } 3060 /// 3061 /// The algorithm completes in log(2, WARPSIZE) steps. 3062 /// 3063 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is 3064 /// not used therefore we save instructions by not retrieving lane_id 3065 /// from the corresponding special registers. The 4th parameter, which 3066 /// represents the version of the algorithm being used, is set to 0 to 3067 /// signify full warp reduction. 3068 /// 3069 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows: 3070 /// 3071 /// #reduce_elem refers to an element in the local lane's data structure 3072 /// #remote_elem is retrieved from a remote lane 3073 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); 3074 /// reduce_elem = reduce_elem REDUCE_OP remote_elem; 3075 /// 3076 /// Contiguous Partial Warp Reduction 3077 /// 3078 /// This reduce algorithm is used within a warp where only the first 3079 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the 3080 /// number of OpenMP threads in a parallel region is not a multiple of 3081 /// WARPSIZE. The algorithm is implemented in the runtime as follows: 3082 /// 3083 /// void 3084 /// contiguous_partial_reduce(void *reduce_data, 3085 /// kmp_ShuffleReductFctPtr ShuffleReduceFn, 3086 /// int size, int lane_id) { 3087 /// int curr_size; 3088 /// int offset; 3089 /// curr_size = size; 3090 /// mask = curr_size/2; 3091 /// while (offset>0) { 3092 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1); 3093 /// curr_size = (curr_size+1)/2; 3094 /// offset = curr_size/2; 3095 /// } 3096 /// } 3097 /// 3098 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows: 3099 /// 3100 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); 3101 /// if (lane_id < offset) 3102 /// reduce_elem = reduce_elem REDUCE_OP remote_elem 3103 /// else 3104 /// reduce_elem = remote_elem 3105 /// 3106 /// This algorithm assumes that the data to be reduced are located in a 3107 /// contiguous subset of lanes starting from the first. When there is 3108 /// an odd number of active lanes, the data in the last lane is not 3109 /// aggregated with any other lane's dat but is instead copied over. 3110 /// 3111 /// Dispersed Partial Warp Reduction 3112 /// 3113 /// This algorithm is used within a warp when any discontiguous subset of 3114 /// lanes are active. It is used to implement the reduction operation 3115 /// across lanes in an OpenMP simd region or in a nested parallel region. 3116 /// 3117 /// void 3118 /// dispersed_partial_reduce(void *reduce_data, 3119 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) { 3120 /// int size, remote_id; 3121 /// int logical_lane_id = number_of_active_lanes_before_me() * 2; 3122 /// do { 3123 /// remote_id = next_active_lane_id_right_after_me(); 3124 /// # the above function returns 0 of no active lane 3125 /// # is present right after the current lane. 3126 /// size = number_of_active_lanes_in_this_warp(); 3127 /// logical_lane_id /= 2; 3128 /// ShuffleReduceFn(reduce_data, logical_lane_id, 3129 /// remote_id-1-threadIdx.x, 2); 3130 /// } while (logical_lane_id % 2 == 0 && size > 1); 3131 /// } 3132 /// 3133 /// There is no assumption made about the initial state of the reduction. 3134 /// Any number of lanes (>=1) could be active at any position. The reduction 3135 /// result is returned in the first active lane. 3136 /// 3137 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows: 3138 /// 3139 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); 3140 /// if (lane_id % 2 == 0 && offset > 0) 3141 /// reduce_elem = reduce_elem REDUCE_OP remote_elem 3142 /// else 3143 /// reduce_elem = remote_elem 3144 /// 3145 /// 3146 /// Intra-Team Reduction 3147 /// 3148 /// This function, as implemented in the runtime call 3149 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP 3150 /// threads in a team. It first reduces within a warp using the 3151 /// aforementioned algorithms. We then proceed to gather all such 3152 /// reduced values at the first warp. 3153 /// 3154 /// The runtime makes use of the function 'InterWarpCpyFn', which copies 3155 /// data from each of the "warp master" (zeroth lane of each warp, where 3156 /// warp-reduced data is held) to the zeroth warp. This step reduces (in 3157 /// a mathematical sense) the problem of reduction across warp masters in 3158 /// a block to the problem of warp reduction. 3159 /// 3160 /// 3161 /// Inter-Team Reduction 3162 /// 3163 /// Once a team has reduced its data to a single value, it is stored in 3164 /// a global scratchpad array. Since each team has a distinct slot, this 3165 /// can be done without locking. 3166 /// 3167 /// The last team to write to the scratchpad array proceeds to reduce the 3168 /// scratchpad array. One or more workers in the last team use the helper 3169 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e., 3170 /// the k'th worker reduces every k'th element. 3171 /// 3172 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to 3173 /// reduce across workers and compute a globally reduced value. 3174 /// 3175 void CGOpenMPRuntimeGPU::emitReduction( 3176 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, 3177 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, 3178 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) { 3179 if (!CGF.HaveInsertPoint()) 3180 return; 3181 3182 bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind); 3183 #ifndef NDEBUG 3184 bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind); 3185 #endif 3186 3187 if (Options.SimpleReduction) { 3188 assert(!TeamsReduction && !ParallelReduction && 3189 "Invalid reduction selection in emitReduction."); 3190 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs, 3191 ReductionOps, Options); 3192 return; 3193 } 3194 3195 assert((TeamsReduction || ParallelReduction) && 3196 "Invalid reduction selection in emitReduction."); 3197 3198 // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList), 3199 // RedList, shuffle_reduce_func, interwarp_copy_func); 3200 // or 3201 // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>); 3202 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 3203 llvm::Value *ThreadId = getThreadID(CGF, Loc); 3204 3205 llvm::Value *Res; 3206 ASTContext &C = CGM.getContext(); 3207 // 1. Build a list of reduction variables. 3208 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 3209 auto Size = RHSExprs.size(); 3210 for (const Expr *E : Privates) { 3211 if (E->getType()->isVariablyModifiedType()) 3212 // Reserve place for array size. 3213 ++Size; 3214 } 3215 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size); 3216 QualType ReductionArrayTy = 3217 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal, 3218 /*IndexTypeQuals=*/0); 3219 Address ReductionList = 3220 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 3221 auto IPriv = Privates.begin(); 3222 unsigned Idx = 0; 3223 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) { 3224 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 3225 CGF.Builder.CreateStore( 3226 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3227 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy), 3228 Elem); 3229 if ((*IPriv)->getType()->isVariablyModifiedType()) { 3230 // Store array size. 3231 ++Idx; 3232 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 3233 llvm::Value *Size = CGF.Builder.CreateIntCast( 3234 CGF.getVLASize( 3235 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 3236 .NumElts, 3237 CGF.SizeTy, /*isSigned=*/false); 3238 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 3239 Elem); 3240 } 3241 } 3242 3243 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3244 ReductionList.getPointer(), CGF.VoidPtrTy); 3245 llvm::Function *ReductionFn = emitReductionFunction( 3246 Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates, 3247 LHSExprs, RHSExprs, ReductionOps); 3248 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy); 3249 llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction( 3250 CGM, Privates, ReductionArrayTy, ReductionFn, Loc); 3251 llvm::Value *InterWarpCopyFn = 3252 emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc); 3253 3254 if (ParallelReduction) { 3255 llvm::Value *Args[] = {RTLoc, 3256 ThreadId, 3257 CGF.Builder.getInt32(RHSExprs.size()), 3258 ReductionArrayTySize, 3259 RL, 3260 ShuffleAndReduceFn, 3261 InterWarpCopyFn}; 3262 3263 Res = CGF.EmitRuntimeCall( 3264 OMPBuilder.getOrCreateRuntimeFunction( 3265 CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2), 3266 Args); 3267 } else { 3268 assert(TeamsReduction && "expected teams reduction."); 3269 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap; 3270 llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size()); 3271 int Cnt = 0; 3272 for (const Expr *DRE : Privates) { 3273 PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl(); 3274 ++Cnt; 3275 } 3276 const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars( 3277 CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap, 3278 C.getLangOpts().OpenMPCUDAReductionBufNum); 3279 TeamsReductions.push_back(TeamReductionRec); 3280 if (!KernelTeamsReductionPtr) { 3281 KernelTeamsReductionPtr = new llvm::GlobalVariable( 3282 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true, 3283 llvm::GlobalValue::InternalLinkage, nullptr, 3284 "_openmp_teams_reductions_buffer_$_$ptr"); 3285 } 3286 llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar( 3287 Address(KernelTeamsReductionPtr, CGM.getPointerAlign()), 3288 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc); 3289 llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction( 3290 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap); 3291 llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction( 3292 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap, 3293 ReductionFn); 3294 llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction( 3295 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap); 3296 llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction( 3297 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap, 3298 ReductionFn); 3299 3300 llvm::Value *Args[] = { 3301 RTLoc, 3302 ThreadId, 3303 GlobalBufferPtr, 3304 CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum), 3305 RL, 3306 ShuffleAndReduceFn, 3307 InterWarpCopyFn, 3308 GlobalToBufferCpyFn, 3309 GlobalToBufferRedFn, 3310 BufferToGlobalCpyFn, 3311 BufferToGlobalRedFn}; 3312 3313 Res = CGF.EmitRuntimeCall( 3314 OMPBuilder.getOrCreateRuntimeFunction( 3315 CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2), 3316 Args); 3317 } 3318 3319 // 5. Build if (res == 1) 3320 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done"); 3321 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then"); 3322 llvm::Value *Cond = CGF.Builder.CreateICmpEQ( 3323 Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1)); 3324 CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB); 3325 3326 // 6. Build then branch: where we have reduced values in the master 3327 // thread in each team. 3328 // __kmpc_end_reduce{_nowait}(<gtid>); 3329 // break; 3330 CGF.EmitBlock(ThenBB); 3331 3332 // Add emission of __kmpc_end_reduce{_nowait}(<gtid>); 3333 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps, 3334 this](CodeGenFunction &CGF, PrePostActionTy &Action) { 3335 auto IPriv = Privates.begin(); 3336 auto ILHS = LHSExprs.begin(); 3337 auto IRHS = RHSExprs.begin(); 3338 for (const Expr *E : ReductionOps) { 3339 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS), 3340 cast<DeclRefExpr>(*IRHS)); 3341 ++IPriv; 3342 ++ILHS; 3343 ++IRHS; 3344 } 3345 }; 3346 llvm::Value *EndArgs[] = {ThreadId}; 3347 RegionCodeGenTy RCG(CodeGen); 3348 NVPTXActionTy Action( 3349 nullptr, llvm::None, 3350 OMPBuilder.getOrCreateRuntimeFunction( 3351 CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait), 3352 EndArgs); 3353 RCG.setAction(Action); 3354 RCG(CGF); 3355 // There is no need to emit line number for unconditional branch. 3356 (void)ApplyDebugLocation::CreateEmpty(CGF); 3357 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 3358 } 3359 3360 const VarDecl * 3361 CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD, 3362 const VarDecl *NativeParam) const { 3363 if (!NativeParam->getType()->isReferenceType()) 3364 return NativeParam; 3365 QualType ArgType = NativeParam->getType(); 3366 QualifierCollector QC; 3367 const Type *NonQualTy = QC.strip(ArgType); 3368 QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType(); 3369 if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) { 3370 if (Attr->getCaptureKind() == OMPC_map) { 3371 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy, 3372 LangAS::opencl_global); 3373 } 3374 } 3375 ArgType = CGM.getContext().getPointerType(PointeeTy); 3376 QC.addRestrict(); 3377 enum { NVPTX_local_addr = 5 }; 3378 QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr)); 3379 ArgType = QC.apply(CGM.getContext(), ArgType); 3380 if (isa<ImplicitParamDecl>(NativeParam)) 3381 return ImplicitParamDecl::Create( 3382 CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(), 3383 NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other); 3384 return ParmVarDecl::Create( 3385 CGM.getContext(), 3386 const_cast<DeclContext *>(NativeParam->getDeclContext()), 3387 NativeParam->getBeginLoc(), NativeParam->getLocation(), 3388 NativeParam->getIdentifier(), ArgType, 3389 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 3390 } 3391 3392 Address 3393 CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF, 3394 const VarDecl *NativeParam, 3395 const VarDecl *TargetParam) const { 3396 assert(NativeParam != TargetParam && 3397 NativeParam->getType()->isReferenceType() && 3398 "Native arg must not be the same as target arg."); 3399 Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam); 3400 QualType NativeParamType = NativeParam->getType(); 3401 QualifierCollector QC; 3402 const Type *NonQualTy = QC.strip(NativeParamType); 3403 QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType(); 3404 unsigned NativePointeeAddrSpace = 3405 CGF.getContext().getTargetAddressSpace(NativePointeeTy); 3406 QualType TargetTy = TargetParam->getType(); 3407 llvm::Value *TargetAddr = CGF.EmitLoadOfScalar( 3408 LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation()); 3409 // First cast to generic. 3410 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3411 TargetAddr, llvm::PointerType::getWithSamePointeeType( 3412 cast<llvm::PointerType>(TargetAddr->getType()), /*AddrSpace=*/0)); 3413 // Cast from generic to native address space. 3414 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3415 TargetAddr, llvm::PointerType::getWithSamePointeeType( 3416 cast<llvm::PointerType>(TargetAddr->getType()), 3417 NativePointeeAddrSpace)); 3418 Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType); 3419 CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false, 3420 NativeParamType); 3421 return NativeParamAddr; 3422 } 3423 3424 void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall( 3425 CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, 3426 ArrayRef<llvm::Value *> Args) const { 3427 SmallVector<llvm::Value *, 4> TargetArgs; 3428 TargetArgs.reserve(Args.size()); 3429 auto *FnType = OutlinedFn.getFunctionType(); 3430 for (unsigned I = 0, E = Args.size(); I < E; ++I) { 3431 if (FnType->isVarArg() && FnType->getNumParams() <= I) { 3432 TargetArgs.append(std::next(Args.begin(), I), Args.end()); 3433 break; 3434 } 3435 llvm::Type *TargetType = FnType->getParamType(I); 3436 llvm::Value *NativeArg = Args[I]; 3437 if (!TargetType->isPointerTy()) { 3438 TargetArgs.emplace_back(NativeArg); 3439 continue; 3440 } 3441 llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3442 NativeArg, llvm::PointerType::getWithSamePointeeType( 3443 cast<llvm::PointerType>(NativeArg->getType()), /*AddrSpace*/ 0)); 3444 TargetArgs.emplace_back( 3445 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType)); 3446 } 3447 CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs); 3448 } 3449 3450 /// Emit function which wraps the outline parallel region 3451 /// and controls the arguments which are passed to this function. 3452 /// The wrapper ensures that the outlined function is called 3453 /// with the correct arguments when data is shared. 3454 llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper( 3455 llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) { 3456 ASTContext &Ctx = CGM.getContext(); 3457 const auto &CS = *D.getCapturedStmt(OMPD_parallel); 3458 3459 // Create a function that takes as argument the source thread. 3460 FunctionArgList WrapperArgs; 3461 QualType Int16QTy = 3462 Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false); 3463 QualType Int32QTy = 3464 Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false); 3465 ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(), 3466 /*Id=*/nullptr, Int16QTy, 3467 ImplicitParamDecl::Other); 3468 ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(), 3469 /*Id=*/nullptr, Int32QTy, 3470 ImplicitParamDecl::Other); 3471 WrapperArgs.emplace_back(&ParallelLevelArg); 3472 WrapperArgs.emplace_back(&WrapperArg); 3473 3474 const CGFunctionInfo &CGFI = 3475 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs); 3476 3477 auto *Fn = llvm::Function::Create( 3478 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 3479 Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule()); 3480 3481 // Ensure we do not inline the function. This is trivially true for the ones 3482 // passed to __kmpc_fork_call but the ones calles in serialized regions 3483 // could be inlined. This is not a perfect but it is closer to the invariant 3484 // we want, namely, every data environment starts with a new function. 3485 // TODO: We should pass the if condition to the runtime function and do the 3486 // handling there. Much cleaner code. 3487 Fn->addFnAttr(llvm::Attribute::NoInline); 3488 3489 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 3490 Fn->setLinkage(llvm::GlobalValue::InternalLinkage); 3491 Fn->setDoesNotRecurse(); 3492 3493 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 3494 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs, 3495 D.getBeginLoc(), D.getBeginLoc()); 3496 3497 const auto *RD = CS.getCapturedRecordDecl(); 3498 auto CurField = RD->field_begin(); 3499 3500 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 3501 /*Name=*/".zero.addr"); 3502 CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr); 3503 // Get the array of arguments. 3504 SmallVector<llvm::Value *, 8> Args; 3505 3506 Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer()); 3507 Args.emplace_back(ZeroAddr.getPointer()); 3508 3509 CGBuilderTy &Bld = CGF.Builder; 3510 auto CI = CS.capture_begin(); 3511 3512 // Use global memory for data sharing. 3513 // Handle passing of global args to workers. 3514 Address GlobalArgs = 3515 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args"); 3516 llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer(); 3517 llvm::Value *DataSharingArgs[] = {GlobalArgsPtr}; 3518 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 3519 CGM.getModule(), OMPRTL___kmpc_get_shared_variables), 3520 DataSharingArgs); 3521 3522 // Retrieve the shared variables from the list of references returned 3523 // by the runtime. Pass the variables to the outlined function. 3524 Address SharedArgListAddress = Address::invalid(); 3525 if (CS.capture_size() > 0 || 3526 isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) { 3527 SharedArgListAddress = CGF.EmitLoadOfPointer( 3528 GlobalArgs, CGF.getContext() 3529 .getPointerType(CGF.getContext().getPointerType( 3530 CGF.getContext().VoidPtrTy)) 3531 .castAs<PointerType>()); 3532 } 3533 unsigned Idx = 0; 3534 if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) { 3535 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx); 3536 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( 3537 Src, CGF.SizeTy->getPointerTo()); 3538 llvm::Value *LB = CGF.EmitLoadOfScalar( 3539 TypedAddress, 3540 /*Volatile=*/false, 3541 CGF.getContext().getPointerType(CGF.getContext().getSizeType()), 3542 cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc()); 3543 Args.emplace_back(LB); 3544 ++Idx; 3545 Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx); 3546 TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( 3547 Src, CGF.SizeTy->getPointerTo()); 3548 llvm::Value *UB = CGF.EmitLoadOfScalar( 3549 TypedAddress, 3550 /*Volatile=*/false, 3551 CGF.getContext().getPointerType(CGF.getContext().getSizeType()), 3552 cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc()); 3553 Args.emplace_back(UB); 3554 ++Idx; 3555 } 3556 if (CS.capture_size() > 0) { 3557 ASTContext &CGFContext = CGF.getContext(); 3558 for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) { 3559 QualType ElemTy = CurField->getType(); 3560 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx); 3561 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( 3562 Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy))); 3563 llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress, 3564 /*Volatile=*/false, 3565 CGFContext.getPointerType(ElemTy), 3566 CI->getLocation()); 3567 if (CI->capturesVariableByCopy() && 3568 !CI->getCapturedVar()->getType()->isAnyPointerType()) { 3569 Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(), 3570 CI->getLocation()); 3571 } 3572 Args.emplace_back(Arg); 3573 } 3574 } 3575 3576 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args); 3577 CGF.FinishFunction(); 3578 return Fn; 3579 } 3580 3581 void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF, 3582 const Decl *D) { 3583 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic) 3584 return; 3585 3586 assert(D && "Expected function or captured|block decl."); 3587 assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && 3588 "Function is registered already."); 3589 assert((!TeamAndReductions.first || TeamAndReductions.first == D) && 3590 "Team is set but not processed."); 3591 const Stmt *Body = nullptr; 3592 bool NeedToDelayGlobalization = false; 3593 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 3594 Body = FD->getBody(); 3595 } else if (const auto *BD = dyn_cast<BlockDecl>(D)) { 3596 Body = BD->getBody(); 3597 } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) { 3598 Body = CD->getBody(); 3599 NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP; 3600 if (NeedToDelayGlobalization && 3601 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) 3602 return; 3603 } 3604 if (!Body) 3605 return; 3606 CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second); 3607 VarChecker.Visit(Body); 3608 const RecordDecl *GlobalizedVarsRecord = 3609 VarChecker.getGlobalizedRecord(IsInTTDRegion); 3610 TeamAndReductions.first = nullptr; 3611 TeamAndReductions.second.clear(); 3612 ArrayRef<const ValueDecl *> EscapedVariableLengthDecls = 3613 VarChecker.getEscapedVariableLengthDecls(); 3614 if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty()) 3615 return; 3616 auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first; 3617 I->getSecond().MappedParams = 3618 std::make_unique<CodeGenFunction::OMPMapVars>(); 3619 I->getSecond().EscapedParameters.insert( 3620 VarChecker.getEscapedParameters().begin(), 3621 VarChecker.getEscapedParameters().end()); 3622 I->getSecond().EscapedVariableLengthDecls.append( 3623 EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end()); 3624 DeclToAddrMapTy &Data = I->getSecond().LocalVarData; 3625 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) { 3626 assert(VD->isCanonicalDecl() && "Expected canonical declaration"); 3627 Data.insert(std::make_pair(VD, MappedVarData())); 3628 } 3629 if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) { 3630 CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None); 3631 VarChecker.Visit(Body); 3632 I->getSecond().SecondaryLocalVarData.emplace(); 3633 DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue(); 3634 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) { 3635 assert(VD->isCanonicalDecl() && "Expected canonical declaration"); 3636 Data.insert(std::make_pair(VD, MappedVarData())); 3637 } 3638 } 3639 if (!NeedToDelayGlobalization) { 3640 emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true); 3641 struct GlobalizationScope final : EHScopeStack::Cleanup { 3642 GlobalizationScope() = default; 3643 3644 void Emit(CodeGenFunction &CGF, Flags flags) override { 3645 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()) 3646 .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true); 3647 } 3648 }; 3649 CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup); 3650 } 3651 } 3652 3653 Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF, 3654 const VarDecl *VD) { 3655 if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) { 3656 const auto *A = VD->getAttr<OMPAllocateDeclAttr>(); 3657 auto AS = LangAS::Default; 3658 switch (A->getAllocatorType()) { 3659 // Use the default allocator here as by default local vars are 3660 // threadlocal. 3661 case OMPAllocateDeclAttr::OMPNullMemAlloc: 3662 case OMPAllocateDeclAttr::OMPDefaultMemAlloc: 3663 case OMPAllocateDeclAttr::OMPThreadMemAlloc: 3664 case OMPAllocateDeclAttr::OMPHighBWMemAlloc: 3665 case OMPAllocateDeclAttr::OMPLowLatMemAlloc: 3666 // Follow the user decision - use default allocation. 3667 return Address::invalid(); 3668 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc: 3669 // TODO: implement aupport for user-defined allocators. 3670 return Address::invalid(); 3671 case OMPAllocateDeclAttr::OMPConstMemAlloc: 3672 AS = LangAS::cuda_constant; 3673 break; 3674 case OMPAllocateDeclAttr::OMPPTeamMemAlloc: 3675 AS = LangAS::cuda_shared; 3676 break; 3677 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc: 3678 case OMPAllocateDeclAttr::OMPCGroupMemAlloc: 3679 break; 3680 } 3681 llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType()); 3682 auto *GV = new llvm::GlobalVariable( 3683 CGM.getModule(), VarTy, /*isConstant=*/false, 3684 llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy), 3685 VD->getName(), 3686 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, 3687 CGM.getContext().getTargetAddressSpace(AS)); 3688 CharUnits Align = CGM.getContext().getDeclAlign(VD); 3689 GV->setAlignment(Align.getAsAlign()); 3690 return Address( 3691 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3692 GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace( 3693 VD->getType().getAddressSpace()))), 3694 Align); 3695 } 3696 3697 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic) 3698 return Address::invalid(); 3699 3700 VD = VD->getCanonicalDecl(); 3701 auto I = FunctionGlobalizedDecls.find(CGF.CurFn); 3702 if (I == FunctionGlobalizedDecls.end()) 3703 return Address::invalid(); 3704 auto VDI = I->getSecond().LocalVarData.find(VD); 3705 if (VDI != I->getSecond().LocalVarData.end()) 3706 return VDI->second.PrivateAddr; 3707 if (VD->hasAttrs()) { 3708 for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()), 3709 E(VD->attr_end()); 3710 IT != E; ++IT) { 3711 auto VDI = I->getSecond().LocalVarData.find( 3712 cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl()) 3713 ->getCanonicalDecl()); 3714 if (VDI != I->getSecond().LocalVarData.end()) 3715 return VDI->second.PrivateAddr; 3716 } 3717 } 3718 3719 return Address::invalid(); 3720 } 3721 3722 void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) { 3723 FunctionGlobalizedDecls.erase(CGF.CurFn); 3724 CGOpenMPRuntime::functionFinished(CGF); 3725 } 3726 3727 void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk( 3728 CodeGenFunction &CGF, const OMPLoopDirective &S, 3729 OpenMPDistScheduleClauseKind &ScheduleKind, 3730 llvm::Value *&Chunk) const { 3731 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); 3732 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) { 3733 ScheduleKind = OMPC_DIST_SCHEDULE_static; 3734 Chunk = CGF.EmitScalarConversion( 3735 RT.getGPUNumThreads(CGF), 3736 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 3737 S.getIterationVariable()->getType(), S.getBeginLoc()); 3738 return; 3739 } 3740 CGOpenMPRuntime::getDefaultDistScheduleAndChunk( 3741 CGF, S, ScheduleKind, Chunk); 3742 } 3743 3744 void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk( 3745 CodeGenFunction &CGF, const OMPLoopDirective &S, 3746 OpenMPScheduleClauseKind &ScheduleKind, 3747 const Expr *&ChunkExpr) const { 3748 ScheduleKind = OMPC_SCHEDULE_static; 3749 // Chunk size is 1 in this case. 3750 llvm::APInt ChunkSize(32, 1); 3751 ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize, 3752 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 3753 SourceLocation()); 3754 } 3755 3756 void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas( 3757 CodeGenFunction &CGF, const OMPExecutableDirective &D) const { 3758 assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && 3759 " Expected target-based directive."); 3760 const CapturedStmt *CS = D.getCapturedStmt(OMPD_target); 3761 for (const CapturedStmt::Capture &C : CS->captures()) { 3762 // Capture variables captured by reference in lambdas for target-based 3763 // directives. 3764 if (!C.capturesVariable()) 3765 continue; 3766 const VarDecl *VD = C.getCapturedVar(); 3767 const auto *RD = VD->getType() 3768 .getCanonicalType() 3769 .getNonReferenceType() 3770 ->getAsCXXRecordDecl(); 3771 if (!RD || !RD->isLambda()) 3772 continue; 3773 Address VDAddr = CGF.GetAddrOfLocalVar(VD); 3774 LValue VDLVal; 3775 if (VD->getType().getCanonicalType()->isReferenceType()) 3776 VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType()); 3777 else 3778 VDLVal = CGF.MakeAddrLValue( 3779 VDAddr, VD->getType().getCanonicalType().getNonReferenceType()); 3780 llvm::DenseMap<const VarDecl *, FieldDecl *> Captures; 3781 FieldDecl *ThisCapture = nullptr; 3782 RD->getCaptureFields(Captures, ThisCapture); 3783 if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) { 3784 LValue ThisLVal = 3785 CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture); 3786 llvm::Value *CXXThis = CGF.LoadCXXThis(); 3787 CGF.EmitStoreOfScalar(CXXThis, ThisLVal); 3788 } 3789 for (const LambdaCapture &LC : RD->captures()) { 3790 if (LC.getCaptureKind() != LCK_ByRef) 3791 continue; 3792 const VarDecl *VD = LC.getCapturedVar(); 3793 if (!CS->capturesVariable(VD)) 3794 continue; 3795 auto It = Captures.find(VD); 3796 assert(It != Captures.end() && "Found lambda capture without field."); 3797 LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second); 3798 Address VDAddr = CGF.GetAddrOfLocalVar(VD); 3799 if (VD->getType().getCanonicalType()->isReferenceType()) 3800 VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr, 3801 VD->getType().getCanonicalType()) 3802 .getAddress(CGF); 3803 CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal); 3804 } 3805 } 3806 } 3807 3808 bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD, 3809 LangAS &AS) { 3810 if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>()) 3811 return false; 3812 const auto *A = VD->getAttr<OMPAllocateDeclAttr>(); 3813 switch(A->getAllocatorType()) { 3814 case OMPAllocateDeclAttr::OMPNullMemAlloc: 3815 case OMPAllocateDeclAttr::OMPDefaultMemAlloc: 3816 // Not supported, fallback to the default mem space. 3817 case OMPAllocateDeclAttr::OMPThreadMemAlloc: 3818 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc: 3819 case OMPAllocateDeclAttr::OMPCGroupMemAlloc: 3820 case OMPAllocateDeclAttr::OMPHighBWMemAlloc: 3821 case OMPAllocateDeclAttr::OMPLowLatMemAlloc: 3822 AS = LangAS::Default; 3823 return true; 3824 case OMPAllocateDeclAttr::OMPConstMemAlloc: 3825 AS = LangAS::cuda_constant; 3826 return true; 3827 case OMPAllocateDeclAttr::OMPPTeamMemAlloc: 3828 AS = LangAS::cuda_shared; 3829 return true; 3830 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc: 3831 llvm_unreachable("Expected predefined allocator for the variables with the " 3832 "static storage."); 3833 } 3834 return false; 3835 } 3836 3837 // Get current CudaArch and ignore any unknown values 3838 static CudaArch getCudaArch(CodeGenModule &CGM) { 3839 if (!CGM.getTarget().hasFeature("ptx")) 3840 return CudaArch::UNKNOWN; 3841 for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) { 3842 if (Feature.getValue()) { 3843 CudaArch Arch = StringToCudaArch(Feature.getKey()); 3844 if (Arch != CudaArch::UNKNOWN) 3845 return Arch; 3846 } 3847 } 3848 return CudaArch::UNKNOWN; 3849 } 3850 3851 /// Check to see if target architecture supports unified addressing which is 3852 /// a restriction for OpenMP requires clause "unified_shared_memory". 3853 void CGOpenMPRuntimeGPU::processRequiresDirective( 3854 const OMPRequiresDecl *D) { 3855 for (const OMPClause *Clause : D->clauselists()) { 3856 if (Clause->getClauseKind() == OMPC_unified_shared_memory) { 3857 CudaArch Arch = getCudaArch(CGM); 3858 switch (Arch) { 3859 case CudaArch::SM_20: 3860 case CudaArch::SM_21: 3861 case CudaArch::SM_30: 3862 case CudaArch::SM_32: 3863 case CudaArch::SM_35: 3864 case CudaArch::SM_37: 3865 case CudaArch::SM_50: 3866 case CudaArch::SM_52: 3867 case CudaArch::SM_53: { 3868 SmallString<256> Buffer; 3869 llvm::raw_svector_ostream Out(Buffer); 3870 Out << "Target architecture " << CudaArchToString(Arch) 3871 << " does not support unified addressing"; 3872 CGM.Error(Clause->getBeginLoc(), Out.str()); 3873 return; 3874 } 3875 case CudaArch::SM_60: 3876 case CudaArch::SM_61: 3877 case CudaArch::SM_62: 3878 case CudaArch::SM_70: 3879 case CudaArch::SM_72: 3880 case CudaArch::SM_75: 3881 case CudaArch::SM_80: 3882 case CudaArch::SM_86: 3883 case CudaArch::GFX600: 3884 case CudaArch::GFX601: 3885 case CudaArch::GFX602: 3886 case CudaArch::GFX700: 3887 case CudaArch::GFX701: 3888 case CudaArch::GFX702: 3889 case CudaArch::GFX703: 3890 case CudaArch::GFX704: 3891 case CudaArch::GFX705: 3892 case CudaArch::GFX801: 3893 case CudaArch::GFX802: 3894 case CudaArch::GFX803: 3895 case CudaArch::GFX805: 3896 case CudaArch::GFX810: 3897 case CudaArch::GFX900: 3898 case CudaArch::GFX902: 3899 case CudaArch::GFX904: 3900 case CudaArch::GFX906: 3901 case CudaArch::GFX908: 3902 case CudaArch::GFX909: 3903 case CudaArch::GFX90a: 3904 case CudaArch::GFX90c: 3905 case CudaArch::GFX1010: 3906 case CudaArch::GFX1011: 3907 case CudaArch::GFX1012: 3908 case CudaArch::GFX1013: 3909 case CudaArch::GFX1030: 3910 case CudaArch::GFX1031: 3911 case CudaArch::GFX1032: 3912 case CudaArch::GFX1033: 3913 case CudaArch::GFX1034: 3914 case CudaArch::GFX1035: 3915 case CudaArch::Generic: 3916 case CudaArch::UNUSED: 3917 case CudaArch::UNKNOWN: 3918 break; 3919 case CudaArch::LAST: 3920 llvm_unreachable("Unexpected Cuda arch."); 3921 } 3922 } 3923 } 3924 CGOpenMPRuntime::processRequiresDirective(D); 3925 } 3926 3927 void CGOpenMPRuntimeGPU::clear() { 3928 3929 if (!TeamsReductions.empty()) { 3930 ASTContext &C = CGM.getContext(); 3931 RecordDecl *StaticRD = C.buildImplicitRecord( 3932 "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union); 3933 StaticRD->startDefinition(); 3934 for (const RecordDecl *TeamReductionRec : TeamsReductions) { 3935 QualType RecTy = C.getRecordType(TeamReductionRec); 3936 auto *Field = FieldDecl::Create( 3937 C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy, 3938 C.getTrivialTypeSourceInfo(RecTy, SourceLocation()), 3939 /*BW=*/nullptr, /*Mutable=*/false, 3940 /*InitStyle=*/ICIS_NoInit); 3941 Field->setAccess(AS_public); 3942 StaticRD->addDecl(Field); 3943 } 3944 StaticRD->completeDefinition(); 3945 QualType StaticTy = C.getRecordType(StaticRD); 3946 llvm::Type *LLVMReductionsBufferTy = 3947 CGM.getTypes().ConvertTypeForMem(StaticTy); 3948 // FIXME: nvlink does not handle weak linkage correctly (object with the 3949 // different size are reported as erroneous). 3950 // Restore CommonLinkage as soon as nvlink is fixed. 3951 auto *GV = new llvm::GlobalVariable( 3952 CGM.getModule(), LLVMReductionsBufferTy, 3953 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage, 3954 llvm::Constant::getNullValue(LLVMReductionsBufferTy), 3955 "_openmp_teams_reductions_buffer_$_"); 3956 KernelTeamsReductionPtr->setInitializer( 3957 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, 3958 CGM.VoidPtrTy)); 3959 } 3960 CGOpenMPRuntime::clear(); 3961 } 3962 3963 llvm::Value *CGOpenMPRuntimeGPU::getGPUNumThreads(CodeGenFunction &CGF) { 3964 CGBuilderTy &Bld = CGF.Builder; 3965 llvm::Module *M = &CGF.CGM.getModule(); 3966 const char *LocSize = "__kmpc_get_hardware_num_threads_in_block"; 3967 llvm::Function *F = M->getFunction(LocSize); 3968 if (!F) { 3969 F = llvm::Function::Create( 3970 llvm::FunctionType::get(CGF.Int32Ty, llvm::None, false), 3971 llvm::GlobalVariable::ExternalLinkage, LocSize, &CGF.CGM.getModule()); 3972 } 3973 return Bld.CreateCall(F, llvm::None, "nvptx_num_threads"); 3974 } 3975 3976 llvm::Value *CGOpenMPRuntimeGPU::getGPUThreadID(CodeGenFunction &CGF) { 3977 ArrayRef<llvm::Value *> Args{}; 3978 return CGF.EmitRuntimeCall( 3979 OMPBuilder.getOrCreateRuntimeFunction( 3980 CGM.getModule(), OMPRTL___kmpc_get_hardware_thread_id_in_block), 3981 Args); 3982 } 3983 3984 llvm::Value *CGOpenMPRuntimeGPU::getGPUWarpSize(CodeGenFunction &CGF) { 3985 ArrayRef<llvm::Value *> Args{}; 3986 return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( 3987 CGM.getModule(), OMPRTL___kmpc_get_warp_size), 3988 Args); 3989 } 3990