1 //===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// 10 /// This file implements the OpenMPIRBuilder class, which is used as a 11 /// convenient way to create LLVM instructions for OpenMP directives. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/ADT/StringRef.h" 18 #include "llvm/ADT/Triple.h" 19 #include "llvm/Analysis/AssumptionCache.h" 20 #include "llvm/Analysis/CodeMetrics.h" 21 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 22 #include "llvm/Analysis/ScalarEvolution.h" 23 #include "llvm/Analysis/TargetLibraryInfo.h" 24 #include "llvm/IR/CFG.h" 25 #include "llvm/IR/Constants.h" 26 #include "llvm/IR/DebugInfo.h" 27 #include "llvm/IR/GlobalVariable.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/MDBuilder.h" 30 #include "llvm/IR/PassManager.h" 31 #include "llvm/IR/Value.h" 32 #include "llvm/MC/TargetRegistry.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Error.h" 35 #include "llvm/Target/TargetMachine.h" 36 #include "llvm/Target/TargetOptions.h" 37 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 38 #include "llvm/Transforms/Utils/CodeExtractor.h" 39 #include "llvm/Transforms/Utils/LoopPeel.h" 40 #include "llvm/Transforms/Utils/ModuleUtils.h" 41 #include "llvm/Transforms/Utils/UnrollLoop.h" 42 43 #include <cstdint> 44 #include <sstream> 45 46 #define DEBUG_TYPE "openmp-ir-builder" 47 48 using namespace llvm; 49 using namespace omp; 50 51 static cl::opt<bool> 52 OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden, 53 cl::desc("Use optimistic attributes describing " 54 "'as-if' properties of runtime calls."), 55 cl::init(false)); 56 57 static cl::opt<double> UnrollThresholdFactor( 58 "openmp-ir-builder-unroll-threshold-factor", cl::Hidden, 59 cl::desc("Factor for the unroll threshold to account for code " 60 "simplifications still taking place"), 61 cl::init(1.5)); 62 63 #ifndef NDEBUG 64 /// Return whether IP1 and IP2 are ambiguous, i.e. that inserting instructions 65 /// at position IP1 may change the meaning of IP2 or vice-versa. This is because 66 /// an InsertPoint stores the instruction before something is inserted. For 67 /// instance, if both point to the same instruction, two IRBuilders alternating 68 /// creating instruction will cause the instructions to be interleaved. 69 static bool isConflictIP(IRBuilder<>::InsertPoint IP1, 70 IRBuilder<>::InsertPoint IP2) { 71 if (!IP1.isSet() || !IP2.isSet()) 72 return false; 73 return IP1.getBlock() == IP2.getBlock() && IP1.getPoint() == IP2.getPoint(); 74 } 75 #endif 76 77 void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) { 78 LLVMContext &Ctx = Fn.getContext(); 79 80 // Get the function's current attributes. 81 auto Attrs = Fn.getAttributes(); 82 auto FnAttrs = Attrs.getFnAttrs(); 83 auto RetAttrs = Attrs.getRetAttrs(); 84 SmallVector<AttributeSet, 4> ArgAttrs; 85 for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo) 86 ArgAttrs.emplace_back(Attrs.getParamAttrs(ArgNo)); 87 88 #define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet; 89 #include "llvm/Frontend/OpenMP/OMPKinds.def" 90 91 // Add attributes to the function declaration. 92 switch (FnID) { 93 #define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \ 94 case Enum: \ 95 FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \ 96 RetAttrs = RetAttrs.addAttributes(Ctx, RetAttrSet); \ 97 for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \ 98 ArgAttrs[ArgNo] = \ 99 ArgAttrs[ArgNo].addAttributes(Ctx, ArgAttrSets[ArgNo]); \ 100 Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \ 101 break; 102 #include "llvm/Frontend/OpenMP/OMPKinds.def" 103 default: 104 // Attributes are optional. 105 break; 106 } 107 } 108 109 FunctionCallee 110 OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) { 111 FunctionType *FnTy = nullptr; 112 Function *Fn = nullptr; 113 114 // Try to find the declation in the module first. 115 switch (FnID) { 116 #define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \ 117 case Enum: \ 118 FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \ 119 IsVarArg); \ 120 Fn = M.getFunction(Str); \ 121 break; 122 #include "llvm/Frontend/OpenMP/OMPKinds.def" 123 } 124 125 if (!Fn) { 126 // Create a new declaration if we need one. 127 switch (FnID) { 128 #define OMP_RTL(Enum, Str, ...) \ 129 case Enum: \ 130 Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \ 131 break; 132 #include "llvm/Frontend/OpenMP/OMPKinds.def" 133 } 134 135 // Add information if the runtime function takes a callback function 136 if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) { 137 if (!Fn->hasMetadata(LLVMContext::MD_callback)) { 138 LLVMContext &Ctx = Fn->getContext(); 139 MDBuilder MDB(Ctx); 140 // Annotate the callback behavior of the runtime function: 141 // - The callback callee is argument number 2 (microtask). 142 // - The first two arguments of the callback callee are unknown (-1). 143 // - All variadic arguments to the runtime function are passed to the 144 // callback callee. 145 Fn->addMetadata( 146 LLVMContext::MD_callback, 147 *MDNode::get(Ctx, {MDB.createCallbackEncoding( 148 2, {-1, -1}, /* VarArgsArePassed */ true)})); 149 } 150 } 151 152 LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName() 153 << " with type " << *Fn->getFunctionType() << "\n"); 154 addAttributes(FnID, *Fn); 155 156 } else { 157 LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName() 158 << " with type " << *Fn->getFunctionType() << "\n"); 159 } 160 161 assert(Fn && "Failed to create OpenMP runtime function"); 162 163 // Cast the function to the expected type if necessary 164 Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo()); 165 return {FnTy, C}; 166 } 167 168 Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) { 169 FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID); 170 auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee()); 171 assert(Fn && "Failed to create OpenMP runtime function pointer"); 172 return Fn; 173 } 174 175 void OpenMPIRBuilder::initialize() { initializeTypes(M); } 176 177 void OpenMPIRBuilder::finalize(Function *Fn) { 178 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; 179 SmallVector<BasicBlock *, 32> Blocks; 180 SmallVector<OutlineInfo, 16> DeferredOutlines; 181 for (OutlineInfo &OI : OutlineInfos) { 182 // Skip functions that have not finalized yet; may happen with nested 183 // function generation. 184 if (Fn && OI.getFunction() != Fn) { 185 DeferredOutlines.push_back(OI); 186 continue; 187 } 188 189 ParallelRegionBlockSet.clear(); 190 Blocks.clear(); 191 OI.collectBlocks(ParallelRegionBlockSet, Blocks); 192 193 Function *OuterFn = OI.getFunction(); 194 CodeExtractorAnalysisCache CEAC(*OuterFn); 195 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, 196 /* AggregateArgs */ true, 197 /* BlockFrequencyInfo */ nullptr, 198 /* BranchProbabilityInfo */ nullptr, 199 /* AssumptionCache */ nullptr, 200 /* AllowVarArgs */ true, 201 /* AllowAlloca */ true, 202 /* Suffix */ ".omp_par"); 203 204 LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n"); 205 LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName() 206 << " Exit: " << OI.ExitBB->getName() << "\n"); 207 assert(Extractor.isEligible() && 208 "Expected OpenMP outlining to be possible!"); 209 210 for (auto *V : OI.ExcludeArgsFromAggregate) 211 Extractor.excludeArgFromAggregate(V); 212 213 Function *OutlinedFn = Extractor.extractCodeRegion(CEAC); 214 215 LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n"); 216 LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n"); 217 assert(OutlinedFn->getReturnType()->isVoidTy() && 218 "OpenMP outlined functions should not return a value!"); 219 220 // For compability with the clang CG we move the outlined function after the 221 // one with the parallel region. 222 OutlinedFn->removeFromParent(); 223 M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn); 224 225 // Remove the artificial entry introduced by the extractor right away, we 226 // made our own entry block after all. 227 { 228 BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock(); 229 assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB); 230 assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry); 231 // Move instructions from the to-be-deleted ArtificialEntry to the entry 232 // basic block of the parallel region. CodeExtractor generates 233 // instructions to unwrap the aggregate argument and may sink 234 // allocas/bitcasts for values that are solely used in the outlined region 235 // and do not escape. 236 assert(!ArtificialEntry.empty() && 237 "Expected instructions to add in the outlined region entry"); 238 for (BasicBlock::reverse_iterator It = ArtificialEntry.rbegin(), 239 End = ArtificialEntry.rend(); 240 It != End;) { 241 Instruction &I = *It; 242 It++; 243 244 if (I.isTerminator()) 245 continue; 246 247 I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt()); 248 } 249 250 OI.EntryBB->moveBefore(&ArtificialEntry); 251 ArtificialEntry.eraseFromParent(); 252 } 253 assert(&OutlinedFn->getEntryBlock() == OI.EntryBB); 254 assert(OutlinedFn && OutlinedFn->getNumUses() == 1); 255 256 // Run a user callback, e.g. to add attributes. 257 if (OI.PostOutlineCB) 258 OI.PostOutlineCB(*OutlinedFn); 259 } 260 261 // Remove work items that have been completed. 262 OutlineInfos = std::move(DeferredOutlines); 263 } 264 265 OpenMPIRBuilder::~OpenMPIRBuilder() { 266 assert(OutlineInfos.empty() && "There must be no outstanding outlinings"); 267 } 268 269 GlobalValue *OpenMPIRBuilder::createGlobalFlag(unsigned Value, StringRef Name) { 270 IntegerType *I32Ty = Type::getInt32Ty(M.getContext()); 271 auto *GV = 272 new GlobalVariable(M, I32Ty, 273 /* isConstant = */ true, GlobalValue::WeakODRLinkage, 274 ConstantInt::get(I32Ty, Value), Name); 275 GV->setVisibility(GlobalValue::HiddenVisibility); 276 277 return GV; 278 } 279 280 Constant *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr, 281 uint32_t SrcLocStrSize, 282 IdentFlag LocFlags, 283 unsigned Reserve2Flags) { 284 // Enable "C-mode". 285 LocFlags |= OMP_IDENT_FLAG_KMPC; 286 287 Constant *&Ident = 288 IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}]; 289 if (!Ident) { 290 Constant *I32Null = ConstantInt::getNullValue(Int32); 291 Constant *IdentData[] = {I32Null, 292 ConstantInt::get(Int32, uint32_t(LocFlags)), 293 ConstantInt::get(Int32, Reserve2Flags), 294 ConstantInt::get(Int32, SrcLocStrSize), SrcLocStr}; 295 Constant *Initializer = 296 ConstantStruct::get(OpenMPIRBuilder::Ident, IdentData); 297 298 // Look for existing encoding of the location + flags, not needed but 299 // minimizes the difference to the existing solution while we transition. 300 for (GlobalVariable &GV : M.getGlobalList()) 301 if (GV.getValueType() == OpenMPIRBuilder::Ident && GV.hasInitializer()) 302 if (GV.getInitializer() == Initializer) 303 Ident = &GV; 304 305 if (!Ident) { 306 auto *GV = new GlobalVariable( 307 M, OpenMPIRBuilder::Ident, 308 /* isConstant = */ true, GlobalValue::PrivateLinkage, Initializer, "", 309 nullptr, GlobalValue::NotThreadLocal, 310 M.getDataLayout().getDefaultGlobalsAddressSpace()); 311 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 312 GV->setAlignment(Align(8)); 313 Ident = GV; 314 } 315 } 316 317 return ConstantExpr::getPointerBitCastOrAddrSpaceCast(Ident, IdentPtr); 318 } 319 320 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr, 321 uint32_t &SrcLocStrSize) { 322 SrcLocStrSize = LocStr.size(); 323 Constant *&SrcLocStr = SrcLocStrMap[LocStr]; 324 if (!SrcLocStr) { 325 Constant *Initializer = 326 ConstantDataArray::getString(M.getContext(), LocStr); 327 328 // Look for existing encoding of the location, not needed but minimizes the 329 // difference to the existing solution while we transition. 330 for (GlobalVariable &GV : M.getGlobalList()) 331 if (GV.isConstant() && GV.hasInitializer() && 332 GV.getInitializer() == Initializer) 333 return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr); 334 335 SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "", 336 /* AddressSpace */ 0, &M); 337 } 338 return SrcLocStr; 339 } 340 341 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName, 342 StringRef FileName, 343 unsigned Line, unsigned Column, 344 uint32_t &SrcLocStrSize) { 345 SmallString<128> Buffer; 346 Buffer.push_back(';'); 347 Buffer.append(FileName); 348 Buffer.push_back(';'); 349 Buffer.append(FunctionName); 350 Buffer.push_back(';'); 351 Buffer.append(std::to_string(Line)); 352 Buffer.push_back(';'); 353 Buffer.append(std::to_string(Column)); 354 Buffer.push_back(';'); 355 Buffer.push_back(';'); 356 return getOrCreateSrcLocStr(Buffer.str(), SrcLocStrSize); 357 } 358 359 Constant * 360 OpenMPIRBuilder::getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize) { 361 StringRef UnknownLoc = ";unknown;unknown;0;0;;"; 362 return getOrCreateSrcLocStr(UnknownLoc, SrcLocStrSize); 363 } 364 365 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(DebugLoc DL, 366 uint32_t &SrcLocStrSize, 367 Function *F) { 368 DILocation *DIL = DL.get(); 369 if (!DIL) 370 return getOrCreateDefaultSrcLocStr(SrcLocStrSize); 371 StringRef FileName = M.getName(); 372 if (DIFile *DIF = DIL->getFile()) 373 if (Optional<StringRef> Source = DIF->getSource()) 374 FileName = *Source; 375 StringRef Function = DIL->getScope()->getSubprogram()->getName(); 376 if (Function.empty() && F) 377 Function = F->getName(); 378 return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(), 379 DIL->getColumn(), SrcLocStrSize); 380 } 381 382 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc, 383 uint32_t &SrcLocStrSize) { 384 return getOrCreateSrcLocStr(Loc.DL, SrcLocStrSize, 385 Loc.IP.getBlock()->getParent()); 386 } 387 388 Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) { 389 return Builder.CreateCall( 390 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident, 391 "omp_global_thread_num"); 392 } 393 394 OpenMPIRBuilder::InsertPointTy 395 OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK, 396 bool ForceSimpleCall, bool CheckCancelFlag) { 397 if (!updateToLocation(Loc)) 398 return Loc.IP; 399 return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag); 400 } 401 402 OpenMPIRBuilder::InsertPointTy 403 OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind, 404 bool ForceSimpleCall, bool CheckCancelFlag) { 405 // Build call __kmpc_cancel_barrier(loc, thread_id) or 406 // __kmpc_barrier(loc, thread_id); 407 408 IdentFlag BarrierLocFlags; 409 switch (Kind) { 410 case OMPD_for: 411 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR; 412 break; 413 case OMPD_sections: 414 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS; 415 break; 416 case OMPD_single: 417 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE; 418 break; 419 case OMPD_barrier: 420 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL; 421 break; 422 default: 423 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL; 424 break; 425 } 426 427 uint32_t SrcLocStrSize; 428 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 429 Value *Args[] = { 430 getOrCreateIdent(SrcLocStr, SrcLocStrSize, BarrierLocFlags), 431 getOrCreateThreadID(getOrCreateIdent(SrcLocStr, SrcLocStrSize))}; 432 433 // If we are in a cancellable parallel region, barriers are cancellation 434 // points. 435 // TODO: Check why we would force simple calls or to ignore the cancel flag. 436 bool UseCancelBarrier = 437 !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel); 438 439 Value *Result = 440 Builder.CreateCall(getOrCreateRuntimeFunctionPtr( 441 UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier 442 : OMPRTL___kmpc_barrier), 443 Args); 444 445 if (UseCancelBarrier && CheckCancelFlag) 446 emitCancelationCheckImpl(Result, OMPD_parallel); 447 448 return Builder.saveIP(); 449 } 450 451 OpenMPIRBuilder::InsertPointTy 452 OpenMPIRBuilder::createCancel(const LocationDescription &Loc, 453 Value *IfCondition, 454 omp::Directive CanceledDirective) { 455 if (!updateToLocation(Loc)) 456 return Loc.IP; 457 458 // LLVM utilities like blocks with terminators. 459 auto *UI = Builder.CreateUnreachable(); 460 461 Instruction *ThenTI = UI, *ElseTI = nullptr; 462 if (IfCondition) 463 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); 464 Builder.SetInsertPoint(ThenTI); 465 466 Value *CancelKind = nullptr; 467 switch (CanceledDirective) { 468 #define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \ 469 case DirectiveEnum: \ 470 CancelKind = Builder.getInt32(Value); \ 471 break; 472 #include "llvm/Frontend/OpenMP/OMPKinds.def" 473 default: 474 llvm_unreachable("Unknown cancel kind!"); 475 } 476 477 uint32_t SrcLocStrSize; 478 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 479 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 480 Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind}; 481 Value *Result = Builder.CreateCall( 482 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args); 483 auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) { 484 if (CanceledDirective == OMPD_parallel) { 485 IRBuilder<>::InsertPointGuard IPG(Builder); 486 Builder.restoreIP(IP); 487 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL), 488 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false, 489 /* CheckCancelFlag */ false); 490 } 491 }; 492 493 // The actual cancel logic is shared with others, e.g., cancel_barriers. 494 emitCancelationCheckImpl(Result, CanceledDirective, ExitCB); 495 496 // Update the insertion point and remove the terminator we introduced. 497 Builder.SetInsertPoint(UI->getParent()); 498 UI->eraseFromParent(); 499 500 return Builder.saveIP(); 501 } 502 503 void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag, 504 omp::Directive CanceledDirective, 505 FinalizeCallbackTy ExitCB) { 506 assert(isLastFinalizationInfoCancellable(CanceledDirective) && 507 "Unexpected cancellation!"); 508 509 // For a cancel barrier we create two new blocks. 510 BasicBlock *BB = Builder.GetInsertBlock(); 511 BasicBlock *NonCancellationBlock; 512 if (Builder.GetInsertPoint() == BB->end()) { 513 // TODO: This branch will not be needed once we moved to the 514 // OpenMPIRBuilder codegen completely. 515 NonCancellationBlock = BasicBlock::Create( 516 BB->getContext(), BB->getName() + ".cont", BB->getParent()); 517 } else { 518 NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint()); 519 BB->getTerminator()->eraseFromParent(); 520 Builder.SetInsertPoint(BB); 521 } 522 BasicBlock *CancellationBlock = BasicBlock::Create( 523 BB->getContext(), BB->getName() + ".cncl", BB->getParent()); 524 525 // Jump to them based on the return value. 526 Value *Cmp = Builder.CreateIsNull(CancelFlag); 527 Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock, 528 /* TODO weight */ nullptr, nullptr); 529 530 // From the cancellation block we finalize all variables and go to the 531 // post finalization block that is known to the FiniCB callback. 532 Builder.SetInsertPoint(CancellationBlock); 533 if (ExitCB) 534 ExitCB(Builder.saveIP()); 535 auto &FI = FinalizationStack.back(); 536 FI.FiniCB(Builder.saveIP()); 537 538 // The continuation block is where code generation continues. 539 Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin()); 540 } 541 542 IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel( 543 const LocationDescription &Loc, InsertPointTy OuterAllocaIP, 544 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, 545 FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, 546 omp::ProcBindKind ProcBind, bool IsCancellable) { 547 assert(!isConflictIP(Loc.IP, OuterAllocaIP) && "IPs must not be ambiguous"); 548 549 if (!updateToLocation(Loc)) 550 return Loc.IP; 551 552 uint32_t SrcLocStrSize; 553 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 554 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 555 Value *ThreadID = getOrCreateThreadID(Ident); 556 557 if (NumThreads) { 558 // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads) 559 Value *Args[] = { 560 Ident, ThreadID, 561 Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)}; 562 Builder.CreateCall( 563 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args); 564 } 565 566 if (ProcBind != OMP_PROC_BIND_default) { 567 // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind) 568 Value *Args[] = { 569 Ident, ThreadID, 570 ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)}; 571 Builder.CreateCall( 572 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args); 573 } 574 575 BasicBlock *InsertBB = Builder.GetInsertBlock(); 576 Function *OuterFn = InsertBB->getParent(); 577 578 // Save the outer alloca block because the insertion iterator may get 579 // invalidated and we still need this later. 580 BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock(); 581 582 // Vector to remember instructions we used only during the modeling but which 583 // we want to delete at the end. 584 SmallVector<Instruction *, 4> ToBeDeleted; 585 586 // Change the location to the outer alloca insertion point to create and 587 // initialize the allocas we pass into the parallel region. 588 Builder.restoreIP(OuterAllocaIP); 589 AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr"); 590 AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr"); 591 592 // If there is an if condition we actually use the TIDAddr and ZeroAddr in the 593 // program, otherwise we only need them for modeling purposes to get the 594 // associated arguments in the outlined function. In the former case, 595 // initialize the allocas properly, in the latter case, delete them later. 596 if (IfCondition) { 597 Builder.CreateStore(Constant::getNullValue(Int32), TIDAddr); 598 Builder.CreateStore(Constant::getNullValue(Int32), ZeroAddr); 599 } else { 600 ToBeDeleted.push_back(TIDAddr); 601 ToBeDeleted.push_back(ZeroAddr); 602 } 603 604 // Create an artificial insertion point that will also ensure the blocks we 605 // are about to split are not degenerated. 606 auto *UI = new UnreachableInst(Builder.getContext(), InsertBB); 607 608 Instruction *ThenTI = UI, *ElseTI = nullptr; 609 if (IfCondition) 610 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); 611 612 BasicBlock *ThenBB = ThenTI->getParent(); 613 BasicBlock *PRegEntryBB = ThenBB->splitBasicBlock(ThenTI, "omp.par.entry"); 614 BasicBlock *PRegBodyBB = 615 PRegEntryBB->splitBasicBlock(ThenTI, "omp.par.region"); 616 BasicBlock *PRegPreFiniBB = 617 PRegBodyBB->splitBasicBlock(ThenTI, "omp.par.pre_finalize"); 618 BasicBlock *PRegExitBB = 619 PRegPreFiniBB->splitBasicBlock(ThenTI, "omp.par.exit"); 620 621 auto FiniCBWrapper = [&](InsertPointTy IP) { 622 // Hide "open-ended" blocks from the given FiniCB by setting the right jump 623 // target to the region exit block. 624 if (IP.getBlock()->end() == IP.getPoint()) { 625 IRBuilder<>::InsertPointGuard IPG(Builder); 626 Builder.restoreIP(IP); 627 Instruction *I = Builder.CreateBr(PRegExitBB); 628 IP = InsertPointTy(I->getParent(), I->getIterator()); 629 } 630 assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && 631 IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && 632 "Unexpected insertion point for finalization call!"); 633 return FiniCB(IP); 634 }; 635 636 FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable}); 637 638 // Generate the privatization allocas in the block that will become the entry 639 // of the outlined function. 640 Builder.SetInsertPoint(PRegEntryBB->getTerminator()); 641 InsertPointTy InnerAllocaIP = Builder.saveIP(); 642 643 AllocaInst *PrivTIDAddr = 644 Builder.CreateAlloca(Int32, nullptr, "tid.addr.local"); 645 Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid"); 646 647 // Add some fake uses for OpenMP provided arguments. 648 ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use")); 649 Instruction *ZeroAddrUse = 650 Builder.CreateLoad(Int32, ZeroAddr, "zero.addr.use"); 651 ToBeDeleted.push_back(ZeroAddrUse); 652 653 // ThenBB 654 // | 655 // V 656 // PRegionEntryBB <- Privatization allocas are placed here. 657 // | 658 // V 659 // PRegionBodyBB <- BodeGen is invoked here. 660 // | 661 // V 662 // PRegPreFiniBB <- The block we will start finalization from. 663 // | 664 // V 665 // PRegionExitBB <- A common exit to simplify block collection. 666 // 667 668 LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n"); 669 670 // Let the caller create the body. 671 assert(BodyGenCB && "Expected body generation callback!"); 672 InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin()); 673 BodyGenCB(InnerAllocaIP, CodeGenIP, *PRegPreFiniBB); 674 675 LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n"); 676 677 FunctionCallee RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call); 678 if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) { 679 if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) { 680 llvm::LLVMContext &Ctx = F->getContext(); 681 MDBuilder MDB(Ctx); 682 // Annotate the callback behavior of the __kmpc_fork_call: 683 // - The callback callee is argument number 2 (microtask). 684 // - The first two arguments of the callback callee are unknown (-1). 685 // - All variadic arguments to the __kmpc_fork_call are passed to the 686 // callback callee. 687 F->addMetadata( 688 llvm::LLVMContext::MD_callback, 689 *llvm::MDNode::get( 690 Ctx, {MDB.createCallbackEncoding(2, {-1, -1}, 691 /* VarArgsArePassed */ true)})); 692 } 693 } 694 695 OutlineInfo OI; 696 OI.PostOutlineCB = [=](Function &OutlinedFn) { 697 // Add some known attributes. 698 OutlinedFn.addParamAttr(0, Attribute::NoAlias); 699 OutlinedFn.addParamAttr(1, Attribute::NoAlias); 700 OutlinedFn.addFnAttr(Attribute::NoUnwind); 701 OutlinedFn.addFnAttr(Attribute::NoRecurse); 702 703 assert(OutlinedFn.arg_size() >= 2 && 704 "Expected at least tid and bounded tid as arguments"); 705 unsigned NumCapturedVars = 706 OutlinedFn.arg_size() - /* tid & bounded tid */ 2; 707 708 CallInst *CI = cast<CallInst>(OutlinedFn.user_back()); 709 CI->getParent()->setName("omp_parallel"); 710 Builder.SetInsertPoint(CI); 711 712 // Build call __kmpc_fork_call(Ident, n, microtask, var1, .., varn); 713 Value *ForkCallArgs[] = { 714 Ident, Builder.getInt32(NumCapturedVars), 715 Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)}; 716 717 SmallVector<Value *, 16> RealArgs; 718 RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs)); 719 RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end()); 720 721 Builder.CreateCall(RTLFn, RealArgs); 722 723 LLVM_DEBUG(dbgs() << "With fork_call placed: " 724 << *Builder.GetInsertBlock()->getParent() << "\n"); 725 726 InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end()); 727 728 // Initialize the local TID stack location with the argument value. 729 Builder.SetInsertPoint(PrivTID); 730 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin(); 731 Builder.CreateStore(Builder.CreateLoad(Int32, OutlinedAI), PrivTIDAddr); 732 733 // If no "if" clause was present we do not need the call created during 734 // outlining, otherwise we reuse it in the serialized parallel region. 735 if (!ElseTI) { 736 CI->eraseFromParent(); 737 } else { 738 739 // If an "if" clause was present we are now generating the serialized 740 // version into the "else" branch. 741 Builder.SetInsertPoint(ElseTI); 742 743 // Build calls __kmpc_serialized_parallel(&Ident, GTid); 744 Value *SerializedParallelCallArgs[] = {Ident, ThreadID}; 745 Builder.CreateCall( 746 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_serialized_parallel), 747 SerializedParallelCallArgs); 748 749 // OutlinedFn(>id, &zero, CapturedStruct); 750 CI->removeFromParent(); 751 Builder.Insert(CI); 752 753 // __kmpc_end_serialized_parallel(&Ident, GTid); 754 Value *EndArgs[] = {Ident, ThreadID}; 755 Builder.CreateCall( 756 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_serialized_parallel), 757 EndArgs); 758 759 LLVM_DEBUG(dbgs() << "With serialized parallel region: " 760 << *Builder.GetInsertBlock()->getParent() << "\n"); 761 } 762 763 for (Instruction *I : ToBeDeleted) 764 I->eraseFromParent(); 765 }; 766 767 // Adjust the finalization stack, verify the adjustment, and call the 768 // finalize function a last time to finalize values between the pre-fini 769 // block and the exit block if we left the parallel "the normal way". 770 auto FiniInfo = FinalizationStack.pop_back_val(); 771 (void)FiniInfo; 772 assert(FiniInfo.DK == OMPD_parallel && 773 "Unexpected finalization stack state!"); 774 775 Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator(); 776 777 InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator()); 778 FiniCB(PreFiniIP); 779 780 OI.EntryBB = PRegEntryBB; 781 OI.ExitBB = PRegExitBB; 782 783 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; 784 SmallVector<BasicBlock *, 32> Blocks; 785 OI.collectBlocks(ParallelRegionBlockSet, Blocks); 786 787 // Ensure a single exit node for the outlined region by creating one. 788 // We might have multiple incoming edges to the exit now due to finalizations, 789 // e.g., cancel calls that cause the control flow to leave the region. 790 BasicBlock *PRegOutlinedExitBB = PRegExitBB; 791 PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt()); 792 PRegOutlinedExitBB->setName("omp.par.outlined.exit"); 793 Blocks.push_back(PRegOutlinedExitBB); 794 795 CodeExtractorAnalysisCache CEAC(*OuterFn); 796 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, 797 /* AggregateArgs */ false, 798 /* BlockFrequencyInfo */ nullptr, 799 /* BranchProbabilityInfo */ nullptr, 800 /* AssumptionCache */ nullptr, 801 /* AllowVarArgs */ true, 802 /* AllowAlloca */ true, 803 /* Suffix */ ".omp_par"); 804 805 // Find inputs to, outputs from the code region. 806 BasicBlock *CommonExit = nullptr; 807 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands; 808 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit); 809 Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands); 810 811 LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n"); 812 813 FunctionCallee TIDRTLFn = 814 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num); 815 816 auto PrivHelper = [&](Value &V) { 817 if (&V == TIDAddr || &V == ZeroAddr) { 818 OI.ExcludeArgsFromAggregate.push_back(&V); 819 return; 820 } 821 822 SetVector<Use *> Uses; 823 for (Use &U : V.uses()) 824 if (auto *UserI = dyn_cast<Instruction>(U.getUser())) 825 if (ParallelRegionBlockSet.count(UserI->getParent())) 826 Uses.insert(&U); 827 828 // __kmpc_fork_call expects extra arguments as pointers. If the input 829 // already has a pointer type, everything is fine. Otherwise, store the 830 // value onto stack and load it back inside the to-be-outlined region. This 831 // will ensure only the pointer will be passed to the function. 832 // FIXME: if there are more than 15 trailing arguments, they must be 833 // additionally packed in a struct. 834 Value *Inner = &V; 835 if (!V.getType()->isPointerTy()) { 836 IRBuilder<>::InsertPointGuard Guard(Builder); 837 LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n"); 838 839 Builder.restoreIP(OuterAllocaIP); 840 Value *Ptr = 841 Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded"); 842 843 // Store to stack at end of the block that currently branches to the entry 844 // block of the to-be-outlined region. 845 Builder.SetInsertPoint(InsertBB, 846 InsertBB->getTerminator()->getIterator()); 847 Builder.CreateStore(&V, Ptr); 848 849 // Load back next to allocations in the to-be-outlined region. 850 Builder.restoreIP(InnerAllocaIP); 851 Inner = Builder.CreateLoad(V.getType(), Ptr); 852 } 853 854 Value *ReplacementValue = nullptr; 855 CallInst *CI = dyn_cast<CallInst>(&V); 856 if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) { 857 ReplacementValue = PrivTID; 858 } else { 859 Builder.restoreIP( 860 PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue)); 861 assert(ReplacementValue && 862 "Expected copy/create callback to set replacement value!"); 863 if (ReplacementValue == &V) 864 return; 865 } 866 867 for (Use *UPtr : Uses) 868 UPtr->set(ReplacementValue); 869 }; 870 871 // Reset the inner alloca insertion as it will be used for loading the values 872 // wrapped into pointers before passing them into the to-be-outlined region. 873 // Configure it to insert immediately after the fake use of zero address so 874 // that they are available in the generated body and so that the 875 // OpenMP-related values (thread ID and zero address pointers) remain leading 876 // in the argument list. 877 InnerAllocaIP = IRBuilder<>::InsertPoint( 878 ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator()); 879 880 // Reset the outer alloca insertion point to the entry of the relevant block 881 // in case it was invalidated. 882 OuterAllocaIP = IRBuilder<>::InsertPoint( 883 OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt()); 884 885 for (Value *Input : Inputs) { 886 LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n"); 887 PrivHelper(*Input); 888 } 889 LLVM_DEBUG({ 890 for (Value *Output : Outputs) 891 LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n"); 892 }); 893 assert(Outputs.empty() && 894 "OpenMP outlining should not produce live-out values!"); 895 896 LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n"); 897 LLVM_DEBUG({ 898 for (auto *BB : Blocks) 899 dbgs() << " PBR: " << BB->getName() << "\n"; 900 }); 901 902 // Register the outlined info. 903 addOutlineInfo(std::move(OI)); 904 905 InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end()); 906 UI->eraseFromParent(); 907 908 return AfterIP; 909 } 910 911 void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) { 912 // Build call void __kmpc_flush(ident_t *loc) 913 uint32_t SrcLocStrSize; 914 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 915 Value *Args[] = {getOrCreateIdent(SrcLocStr, SrcLocStrSize)}; 916 917 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args); 918 } 919 920 void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) { 921 if (!updateToLocation(Loc)) 922 return; 923 emitFlush(Loc); 924 } 925 926 void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) { 927 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 928 // global_tid); 929 uint32_t SrcLocStrSize; 930 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 931 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 932 Value *Args[] = {Ident, getOrCreateThreadID(Ident)}; 933 934 // Ignore return result until untied tasks are supported. 935 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait), 936 Args); 937 } 938 939 void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) { 940 if (!updateToLocation(Loc)) 941 return; 942 emitTaskwaitImpl(Loc); 943 } 944 945 void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) { 946 // Build call __kmpc_omp_taskyield(loc, thread_id, 0); 947 uint32_t SrcLocStrSize; 948 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 949 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 950 Constant *I32Null = ConstantInt::getNullValue(Int32); 951 Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null}; 952 953 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield), 954 Args); 955 } 956 957 void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) { 958 if (!updateToLocation(Loc)) 959 return; 960 emitTaskyieldImpl(Loc); 961 } 962 963 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSections( 964 const LocationDescription &Loc, InsertPointTy AllocaIP, 965 ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, 966 FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) { 967 if (!updateToLocation(Loc)) 968 return Loc.IP; 969 970 auto FiniCBWrapper = [&](InsertPointTy IP) { 971 if (IP.getBlock()->end() != IP.getPoint()) 972 return FiniCB(IP); 973 // This must be done otherwise any nested constructs using FinalizeOMPRegion 974 // will fail because that function requires the Finalization Basic Block to 975 // have a terminator, which is already removed by EmitOMPRegionBody. 976 // IP is currently at cancelation block. 977 // We need to backtrack to the condition block to fetch 978 // the exit block and create a branch from cancelation 979 // to exit block. 980 IRBuilder<>::InsertPointGuard IPG(Builder); 981 Builder.restoreIP(IP); 982 auto *CaseBB = IP.getBlock()->getSinglePredecessor(); 983 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); 984 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); 985 Instruction *I = Builder.CreateBr(ExitBB); 986 IP = InsertPointTy(I->getParent(), I->getIterator()); 987 return FiniCB(IP); 988 }; 989 990 FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable}); 991 992 // Each section is emitted as a switch case 993 // Each finalization callback is handled from clang.EmitOMPSectionDirective() 994 // -> OMP.createSection() which generates the IR for each section 995 // Iterate through all sections and emit a switch construct: 996 // switch (IV) { 997 // case 0: 998 // <SectionStmt[0]>; 999 // break; 1000 // ... 1001 // case <NumSection> - 1: 1002 // <SectionStmt[<NumSection> - 1]>; 1003 // break; 1004 // } 1005 // ... 1006 // section_loop.after: 1007 // <FiniCB>; 1008 auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) { 1009 auto *CurFn = CodeGenIP.getBlock()->getParent(); 1010 auto *ForIncBB = CodeGenIP.getBlock()->getSingleSuccessor(); 1011 auto *ForExitBB = CodeGenIP.getBlock() 1012 ->getSinglePredecessor() 1013 ->getTerminator() 1014 ->getSuccessor(1); 1015 SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, ForIncBB); 1016 Builder.restoreIP(CodeGenIP); 1017 unsigned CaseNumber = 0; 1018 for (auto SectionCB : SectionCBs) { 1019 auto *CaseBB = BasicBlock::Create(M.getContext(), 1020 "omp_section_loop.body.case", CurFn); 1021 SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB); 1022 Builder.SetInsertPoint(CaseBB); 1023 SectionCB(InsertPointTy(), Builder.saveIP(), *ForExitBB); 1024 CaseNumber++; 1025 } 1026 // remove the existing terminator from body BB since there can be no 1027 // terminators after switch/case 1028 CodeGenIP.getBlock()->getTerminator()->eraseFromParent(); 1029 }; 1030 // Loop body ends here 1031 // LowerBound, UpperBound, and STride for createCanonicalLoop 1032 Type *I32Ty = Type::getInt32Ty(M.getContext()); 1033 Value *LB = ConstantInt::get(I32Ty, 0); 1034 Value *UB = ConstantInt::get(I32Ty, SectionCBs.size()); 1035 Value *ST = ConstantInt::get(I32Ty, 1); 1036 llvm::CanonicalLoopInfo *LoopInfo = createCanonicalLoop( 1037 Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop"); 1038 Builder.SetInsertPoint(AllocaIP.getBlock()->getTerminator()); 1039 AllocaIP = Builder.saveIP(); 1040 InsertPointTy AfterIP = 1041 applyStaticWorkshareLoop(Loc.DL, LoopInfo, AllocaIP, !IsNowait); 1042 BasicBlock *LoopAfterBB = AfterIP.getBlock(); 1043 Instruction *SplitPos = LoopAfterBB->getTerminator(); 1044 if (!isa_and_nonnull<BranchInst>(SplitPos)) 1045 SplitPos = new UnreachableInst(Builder.getContext(), LoopAfterBB); 1046 // ExitBB after LoopAfterBB because LoopAfterBB is used for FinalizationCB, 1047 // which requires a BB with branch 1048 BasicBlock *ExitBB = 1049 LoopAfterBB->splitBasicBlock(SplitPos, "omp_sections.end"); 1050 SplitPos->eraseFromParent(); 1051 1052 // Apply the finalization callback in LoopAfterBB 1053 auto FiniInfo = FinalizationStack.pop_back_val(); 1054 assert(FiniInfo.DK == OMPD_sections && 1055 "Unexpected finalization stack state!"); 1056 Builder.SetInsertPoint(LoopAfterBB->getTerminator()); 1057 FiniInfo.FiniCB(Builder.saveIP()); 1058 Builder.SetInsertPoint(ExitBB); 1059 1060 return Builder.saveIP(); 1061 } 1062 1063 OpenMPIRBuilder::InsertPointTy 1064 OpenMPIRBuilder::createSection(const LocationDescription &Loc, 1065 BodyGenCallbackTy BodyGenCB, 1066 FinalizeCallbackTy FiniCB) { 1067 if (!updateToLocation(Loc)) 1068 return Loc.IP; 1069 1070 auto FiniCBWrapper = [&](InsertPointTy IP) { 1071 if (IP.getBlock()->end() != IP.getPoint()) 1072 return FiniCB(IP); 1073 // This must be done otherwise any nested constructs using FinalizeOMPRegion 1074 // will fail because that function requires the Finalization Basic Block to 1075 // have a terminator, which is already removed by EmitOMPRegionBody. 1076 // IP is currently at cancelation block. 1077 // We need to backtrack to the condition block to fetch 1078 // the exit block and create a branch from cancelation 1079 // to exit block. 1080 IRBuilder<>::InsertPointGuard IPG(Builder); 1081 Builder.restoreIP(IP); 1082 auto *CaseBB = Loc.IP.getBlock(); 1083 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); 1084 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); 1085 Instruction *I = Builder.CreateBr(ExitBB); 1086 IP = InsertPointTy(I->getParent(), I->getIterator()); 1087 return FiniCB(IP); 1088 }; 1089 1090 Directive OMPD = Directive::OMPD_sections; 1091 // Since we are using Finalization Callback here, HasFinalize 1092 // and IsCancellable have to be true 1093 return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper, 1094 /*Conditional*/ false, /*hasFinalize*/ true, 1095 /*IsCancellable*/ true); 1096 } 1097 1098 /// Create a function with a unique name and a "void (i8*, i8*)" signature in 1099 /// the given module and return it. 1100 Function *getFreshReductionFunc(Module &M) { 1101 Type *VoidTy = Type::getVoidTy(M.getContext()); 1102 Type *Int8PtrTy = Type::getInt8PtrTy(M.getContext()); 1103 auto *FuncTy = 1104 FunctionType::get(VoidTy, {Int8PtrTy, Int8PtrTy}, /* IsVarArg */ false); 1105 return Function::Create(FuncTy, GlobalVariable::InternalLinkage, 1106 M.getDataLayout().getDefaultGlobalsAddressSpace(), 1107 ".omp.reduction.func", &M); 1108 } 1109 1110 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createReductions( 1111 const LocationDescription &Loc, InsertPointTy AllocaIP, 1112 ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait) { 1113 for (const ReductionInfo &RI : ReductionInfos) { 1114 (void)RI; 1115 assert(RI.Variable && "expected non-null variable"); 1116 assert(RI.PrivateVariable && "expected non-null private variable"); 1117 assert(RI.ReductionGen && "expected non-null reduction generator callback"); 1118 assert(RI.Variable->getType() == RI.PrivateVariable->getType() && 1119 "expected variables and their private equivalents to have the same " 1120 "type"); 1121 assert(RI.Variable->getType()->isPointerTy() && 1122 "expected variables to be pointers"); 1123 } 1124 1125 if (!updateToLocation(Loc)) 1126 return InsertPointTy(); 1127 1128 BasicBlock *InsertBlock = Loc.IP.getBlock(); 1129 BasicBlock *ContinuationBlock = 1130 InsertBlock->splitBasicBlock(Loc.IP.getPoint(), "reduce.finalize"); 1131 InsertBlock->getTerminator()->eraseFromParent(); 1132 1133 // Create and populate array of type-erased pointers to private reduction 1134 // values. 1135 unsigned NumReductions = ReductionInfos.size(); 1136 Type *RedArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumReductions); 1137 Builder.restoreIP(AllocaIP); 1138 Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array"); 1139 1140 Builder.SetInsertPoint(InsertBlock, InsertBlock->end()); 1141 1142 for (auto En : enumerate(ReductionInfos)) { 1143 unsigned Index = En.index(); 1144 const ReductionInfo &RI = En.value(); 1145 Value *RedArrayElemPtr = Builder.CreateConstInBoundsGEP2_64( 1146 RedArrayTy, RedArray, 0, Index, "red.array.elem." + Twine(Index)); 1147 Value *Casted = 1148 Builder.CreateBitCast(RI.PrivateVariable, Builder.getInt8PtrTy(), 1149 "private.red.var." + Twine(Index) + ".casted"); 1150 Builder.CreateStore(Casted, RedArrayElemPtr); 1151 } 1152 1153 // Emit a call to the runtime function that orchestrates the reduction. 1154 // Declare the reduction function in the process. 1155 Function *Func = Builder.GetInsertBlock()->getParent(); 1156 Module *Module = Func->getParent(); 1157 Value *RedArrayPtr = 1158 Builder.CreateBitCast(RedArray, Builder.getInt8PtrTy(), "red.array.ptr"); 1159 uint32_t SrcLocStrSize; 1160 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 1161 bool CanGenerateAtomic = 1162 llvm::all_of(ReductionInfos, [](const ReductionInfo &RI) { 1163 return RI.AtomicReductionGen; 1164 }); 1165 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize, 1166 CanGenerateAtomic 1167 ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE 1168 : IdentFlag(0)); 1169 Value *ThreadId = getOrCreateThreadID(Ident); 1170 Constant *NumVariables = Builder.getInt32(NumReductions); 1171 const DataLayout &DL = Module->getDataLayout(); 1172 unsigned RedArrayByteSize = DL.getTypeStoreSize(RedArrayTy); 1173 Constant *RedArraySize = Builder.getInt64(RedArrayByteSize); 1174 Function *ReductionFunc = getFreshReductionFunc(*Module); 1175 Value *Lock = getOMPCriticalRegionLock(".reduction"); 1176 Function *ReduceFunc = getOrCreateRuntimeFunctionPtr( 1177 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_reduce_nowait 1178 : RuntimeFunction::OMPRTL___kmpc_reduce); 1179 CallInst *ReduceCall = 1180 Builder.CreateCall(ReduceFunc, 1181 {Ident, ThreadId, NumVariables, RedArraySize, 1182 RedArrayPtr, ReductionFunc, Lock}, 1183 "reduce"); 1184 1185 // Create final reduction entry blocks for the atomic and non-atomic case. 1186 // Emit IR that dispatches control flow to one of the blocks based on the 1187 // reduction supporting the atomic mode. 1188 BasicBlock *NonAtomicRedBlock = 1189 BasicBlock::Create(Module->getContext(), "reduce.switch.nonatomic", Func); 1190 BasicBlock *AtomicRedBlock = 1191 BasicBlock::Create(Module->getContext(), "reduce.switch.atomic", Func); 1192 SwitchInst *Switch = 1193 Builder.CreateSwitch(ReduceCall, ContinuationBlock, /* NumCases */ 2); 1194 Switch->addCase(Builder.getInt32(1), NonAtomicRedBlock); 1195 Switch->addCase(Builder.getInt32(2), AtomicRedBlock); 1196 1197 // Populate the non-atomic reduction using the elementwise reduction function. 1198 // This loads the elements from the global and private variables and reduces 1199 // them before storing back the result to the global variable. 1200 Builder.SetInsertPoint(NonAtomicRedBlock); 1201 for (auto En : enumerate(ReductionInfos)) { 1202 const ReductionInfo &RI = En.value(); 1203 Type *ValueType = RI.ElementType; 1204 Value *RedValue = Builder.CreateLoad(ValueType, RI.Variable, 1205 "red.value." + Twine(En.index())); 1206 Value *PrivateRedValue = 1207 Builder.CreateLoad(ValueType, RI.PrivateVariable, 1208 "red.private.value." + Twine(En.index())); 1209 Value *Reduced; 1210 Builder.restoreIP( 1211 RI.ReductionGen(Builder.saveIP(), RedValue, PrivateRedValue, Reduced)); 1212 if (!Builder.GetInsertBlock()) 1213 return InsertPointTy(); 1214 Builder.CreateStore(Reduced, RI.Variable); 1215 } 1216 Function *EndReduceFunc = getOrCreateRuntimeFunctionPtr( 1217 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_end_reduce_nowait 1218 : RuntimeFunction::OMPRTL___kmpc_end_reduce); 1219 Builder.CreateCall(EndReduceFunc, {Ident, ThreadId, Lock}); 1220 Builder.CreateBr(ContinuationBlock); 1221 1222 // Populate the atomic reduction using the atomic elementwise reduction 1223 // function. There are no loads/stores here because they will be happening 1224 // inside the atomic elementwise reduction. 1225 Builder.SetInsertPoint(AtomicRedBlock); 1226 if (CanGenerateAtomic) { 1227 for (const ReductionInfo &RI : ReductionInfos) { 1228 Builder.restoreIP(RI.AtomicReductionGen(Builder.saveIP(), RI.ElementType, 1229 RI.Variable, RI.PrivateVariable)); 1230 if (!Builder.GetInsertBlock()) 1231 return InsertPointTy(); 1232 } 1233 Builder.CreateBr(ContinuationBlock); 1234 } else { 1235 Builder.CreateUnreachable(); 1236 } 1237 1238 // Populate the outlined reduction function using the elementwise reduction 1239 // function. Partial values are extracted from the type-erased array of 1240 // pointers to private variables. 1241 BasicBlock *ReductionFuncBlock = 1242 BasicBlock::Create(Module->getContext(), "", ReductionFunc); 1243 Builder.SetInsertPoint(ReductionFuncBlock); 1244 Value *LHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(0), 1245 RedArrayTy->getPointerTo()); 1246 Value *RHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(1), 1247 RedArrayTy->getPointerTo()); 1248 for (auto En : enumerate(ReductionInfos)) { 1249 const ReductionInfo &RI = En.value(); 1250 Value *LHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64( 1251 RedArrayTy, LHSArrayPtr, 0, En.index()); 1252 Value *LHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), LHSI8PtrPtr); 1253 Value *LHSPtr = Builder.CreateBitCast(LHSI8Ptr, RI.Variable->getType()); 1254 Value *LHS = Builder.CreateLoad(RI.ElementType, LHSPtr); 1255 Value *RHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64( 1256 RedArrayTy, RHSArrayPtr, 0, En.index()); 1257 Value *RHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), RHSI8PtrPtr); 1258 Value *RHSPtr = 1259 Builder.CreateBitCast(RHSI8Ptr, RI.PrivateVariable->getType()); 1260 Value *RHS = Builder.CreateLoad(RI.ElementType, RHSPtr); 1261 Value *Reduced; 1262 Builder.restoreIP(RI.ReductionGen(Builder.saveIP(), LHS, RHS, Reduced)); 1263 if (!Builder.GetInsertBlock()) 1264 return InsertPointTy(); 1265 Builder.CreateStore(Reduced, LHSPtr); 1266 } 1267 Builder.CreateRetVoid(); 1268 1269 Builder.SetInsertPoint(ContinuationBlock); 1270 return Builder.saveIP(); 1271 } 1272 1273 OpenMPIRBuilder::InsertPointTy 1274 OpenMPIRBuilder::createMaster(const LocationDescription &Loc, 1275 BodyGenCallbackTy BodyGenCB, 1276 FinalizeCallbackTy FiniCB) { 1277 1278 if (!updateToLocation(Loc)) 1279 return Loc.IP; 1280 1281 Directive OMPD = Directive::OMPD_master; 1282 uint32_t SrcLocStrSize; 1283 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 1284 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1285 Value *ThreadId = getOrCreateThreadID(Ident); 1286 Value *Args[] = {Ident, ThreadId}; 1287 1288 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master); 1289 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); 1290 1291 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master); 1292 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); 1293 1294 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 1295 /*Conditional*/ true, /*hasFinalize*/ true); 1296 } 1297 1298 OpenMPIRBuilder::InsertPointTy 1299 OpenMPIRBuilder::createMasked(const LocationDescription &Loc, 1300 BodyGenCallbackTy BodyGenCB, 1301 FinalizeCallbackTy FiniCB, Value *Filter) { 1302 if (!updateToLocation(Loc)) 1303 return Loc.IP; 1304 1305 Directive OMPD = Directive::OMPD_masked; 1306 uint32_t SrcLocStrSize; 1307 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 1308 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1309 Value *ThreadId = getOrCreateThreadID(Ident); 1310 Value *Args[] = {Ident, ThreadId, Filter}; 1311 Value *ArgsEnd[] = {Ident, ThreadId}; 1312 1313 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked); 1314 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); 1315 1316 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked); 1317 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd); 1318 1319 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 1320 /*Conditional*/ true, /*hasFinalize*/ true); 1321 } 1322 1323 CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton( 1324 DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, 1325 BasicBlock *PostInsertBefore, const Twine &Name) { 1326 Module *M = F->getParent(); 1327 LLVMContext &Ctx = M->getContext(); 1328 Type *IndVarTy = TripCount->getType(); 1329 1330 // Create the basic block structure. 1331 BasicBlock *Preheader = 1332 BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore); 1333 BasicBlock *Header = 1334 BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore); 1335 BasicBlock *Cond = 1336 BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore); 1337 BasicBlock *Body = 1338 BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore); 1339 BasicBlock *Latch = 1340 BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore); 1341 BasicBlock *Exit = 1342 BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore); 1343 BasicBlock *After = 1344 BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore); 1345 1346 // Use specified DebugLoc for new instructions. 1347 Builder.SetCurrentDebugLocation(DL); 1348 1349 Builder.SetInsertPoint(Preheader); 1350 Builder.CreateBr(Header); 1351 1352 Builder.SetInsertPoint(Header); 1353 PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv"); 1354 IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader); 1355 Builder.CreateBr(Cond); 1356 1357 Builder.SetInsertPoint(Cond); 1358 Value *Cmp = 1359 Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp"); 1360 Builder.CreateCondBr(Cmp, Body, Exit); 1361 1362 Builder.SetInsertPoint(Body); 1363 Builder.CreateBr(Latch); 1364 1365 Builder.SetInsertPoint(Latch); 1366 Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1), 1367 "omp_" + Name + ".next", /*HasNUW=*/true); 1368 Builder.CreateBr(Header); 1369 IndVarPHI->addIncoming(Next, Latch); 1370 1371 Builder.SetInsertPoint(Exit); 1372 Builder.CreateBr(After); 1373 1374 // Remember and return the canonical control flow. 1375 LoopInfos.emplace_front(); 1376 CanonicalLoopInfo *CL = &LoopInfos.front(); 1377 1378 CL->Header = Header; 1379 CL->Cond = Cond; 1380 CL->Latch = Latch; 1381 CL->Exit = Exit; 1382 1383 #ifndef NDEBUG 1384 CL->assertOK(); 1385 #endif 1386 return CL; 1387 } 1388 1389 CanonicalLoopInfo * 1390 OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc, 1391 LoopBodyGenCallbackTy BodyGenCB, 1392 Value *TripCount, const Twine &Name) { 1393 BasicBlock *BB = Loc.IP.getBlock(); 1394 BasicBlock *NextBB = BB->getNextNode(); 1395 1396 CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(), 1397 NextBB, NextBB, Name); 1398 BasicBlock *After = CL->getAfter(); 1399 1400 // If location is not set, don't connect the loop. 1401 if (updateToLocation(Loc)) { 1402 // Split the loop at the insertion point: Branch to the preheader and move 1403 // every following instruction to after the loop (the After BB). Also, the 1404 // new successor is the loop's after block. 1405 Builder.CreateBr(CL->getPreheader()); 1406 After->getInstList().splice(After->begin(), BB->getInstList(), 1407 Builder.GetInsertPoint(), BB->end()); 1408 After->replaceSuccessorsPhiUsesWith(BB, After); 1409 } 1410 1411 // Emit the body content. We do it after connecting the loop to the CFG to 1412 // avoid that the callback encounters degenerate BBs. 1413 BodyGenCB(CL->getBodyIP(), CL->getIndVar()); 1414 1415 #ifndef NDEBUG 1416 CL->assertOK(); 1417 #endif 1418 return CL; 1419 } 1420 1421 CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop( 1422 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, 1423 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, 1424 InsertPointTy ComputeIP, const Twine &Name) { 1425 1426 // Consider the following difficulties (assuming 8-bit signed integers): 1427 // * Adding \p Step to the loop counter which passes \p Stop may overflow: 1428 // DO I = 1, 100, 50 1429 /// * A \p Step of INT_MIN cannot not be normalized to a positive direction: 1430 // DO I = 100, 0, -128 1431 1432 // Start, Stop and Step must be of the same integer type. 1433 auto *IndVarTy = cast<IntegerType>(Start->getType()); 1434 assert(IndVarTy == Stop->getType() && "Stop type mismatch"); 1435 assert(IndVarTy == Step->getType() && "Step type mismatch"); 1436 1437 LocationDescription ComputeLoc = 1438 ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc; 1439 updateToLocation(ComputeLoc); 1440 1441 ConstantInt *Zero = ConstantInt::get(IndVarTy, 0); 1442 ConstantInt *One = ConstantInt::get(IndVarTy, 1); 1443 1444 // Like Step, but always positive. 1445 Value *Incr = Step; 1446 1447 // Distance between Start and Stop; always positive. 1448 Value *Span; 1449 1450 // Condition whether there are no iterations are executed at all, e.g. because 1451 // UB < LB. 1452 Value *ZeroCmp; 1453 1454 if (IsSigned) { 1455 // Ensure that increment is positive. If not, negate and invert LB and UB. 1456 Value *IsNeg = Builder.CreateICmpSLT(Step, Zero); 1457 Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step); 1458 Value *LB = Builder.CreateSelect(IsNeg, Stop, Start); 1459 Value *UB = Builder.CreateSelect(IsNeg, Start, Stop); 1460 Span = Builder.CreateSub(UB, LB, "", false, true); 1461 ZeroCmp = Builder.CreateICmp( 1462 InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB); 1463 } else { 1464 Span = Builder.CreateSub(Stop, Start, "", true); 1465 ZeroCmp = Builder.CreateICmp( 1466 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start); 1467 } 1468 1469 Value *CountIfLooping; 1470 if (InclusiveStop) { 1471 CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One); 1472 } else { 1473 // Avoid incrementing past stop since it could overflow. 1474 Value *CountIfTwo = Builder.CreateAdd( 1475 Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One); 1476 Value *OneCmp = Builder.CreateICmp( 1477 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr); 1478 CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo); 1479 } 1480 Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping, 1481 "omp_" + Name + ".tripcount"); 1482 1483 auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) { 1484 Builder.restoreIP(CodeGenIP); 1485 Value *Span = Builder.CreateMul(IV, Step); 1486 Value *IndVar = Builder.CreateAdd(Span, Start); 1487 BodyGenCB(Builder.saveIP(), IndVar); 1488 }; 1489 LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP(); 1490 return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name); 1491 } 1492 1493 // Returns an LLVM function to call for initializing loop bounds using OpenMP 1494 // static scheduling depending on `type`. Only i32 and i64 are supported by the 1495 // runtime. Always interpret integers as unsigned similarly to 1496 // CanonicalLoopInfo. 1497 static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M, 1498 OpenMPIRBuilder &OMPBuilder) { 1499 unsigned Bitwidth = Ty->getIntegerBitWidth(); 1500 if (Bitwidth == 32) 1501 return OMPBuilder.getOrCreateRuntimeFunction( 1502 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u); 1503 if (Bitwidth == 64) 1504 return OMPBuilder.getOrCreateRuntimeFunction( 1505 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u); 1506 llvm_unreachable("unknown OpenMP loop iterator bitwidth"); 1507 } 1508 1509 // Sets the number of loop iterations to the given value. This value must be 1510 // valid in the condition block (i.e., defined in the preheader) and is 1511 // interpreted as an unsigned integer. 1512 void setCanonicalLoopTripCount(CanonicalLoopInfo *CLI, Value *TripCount) { 1513 Instruction *CmpI = &CLI->getCond()->front(); 1514 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount"); 1515 CmpI->setOperand(1, TripCount); 1516 CLI->assertOK(); 1517 } 1518 1519 OpenMPIRBuilder::InsertPointTy 1520 OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, 1521 InsertPointTy AllocaIP, 1522 bool NeedsBarrier, Value *Chunk) { 1523 assert(CLI->isValid() && "Requires a valid canonical loop"); 1524 assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && 1525 "Require dedicated allocate IP"); 1526 1527 // Set up the source location value for OpenMP runtime. 1528 Builder.restoreIP(CLI->getPreheaderIP()); 1529 Builder.SetCurrentDebugLocation(DL); 1530 1531 uint32_t SrcLocStrSize; 1532 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); 1533 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1534 1535 // Declare useful OpenMP runtime functions. 1536 Value *IV = CLI->getIndVar(); 1537 Type *IVTy = IV->getType(); 1538 FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this); 1539 FunctionCallee StaticFini = 1540 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini); 1541 1542 // Allocate space for computed loop bounds as expected by the "init" function. 1543 Builder.restoreIP(AllocaIP); 1544 Type *I32Type = Type::getInt32Ty(M.getContext()); 1545 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); 1546 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); 1547 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); 1548 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); 1549 1550 // At the end of the preheader, prepare for calling the "init" function by 1551 // storing the current loop bounds into the allocated space. A canonical loop 1552 // always iterates from 0 to trip-count with step 1. Note that "init" expects 1553 // and produces an inclusive upper bound. 1554 Builder.SetInsertPoint(CLI->getPreheader()->getTerminator()); 1555 Constant *Zero = ConstantInt::get(IVTy, 0); 1556 Constant *One = ConstantInt::get(IVTy, 1); 1557 Builder.CreateStore(Zero, PLowerBound); 1558 Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One); 1559 Builder.CreateStore(UpperBound, PUpperBound); 1560 Builder.CreateStore(One, PStride); 1561 1562 // FIXME: schedule(static) is NOT the same as schedule(static,1) 1563 if (!Chunk) 1564 Chunk = One; 1565 1566 Value *ThreadNum = getOrCreateThreadID(SrcLoc); 1567 1568 Constant *SchedulingType = 1569 ConstantInt::get(I32Type, static_cast<int>(OMPScheduleType::Static)); 1570 1571 // Call the "init" function and update the trip count of the loop with the 1572 // value it produced. 1573 Builder.CreateCall(StaticInit, 1574 {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound, 1575 PUpperBound, PStride, One, Chunk}); 1576 Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound); 1577 Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound); 1578 Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound); 1579 Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One); 1580 setCanonicalLoopTripCount(CLI, TripCount); 1581 1582 // Update all uses of the induction variable except the one in the condition 1583 // block that compares it with the actual upper bound, and the increment in 1584 // the latch block. 1585 // TODO: this can eventually move to CanonicalLoopInfo or to a new 1586 // CanonicalLoopInfoUpdater interface. 1587 Builder.SetInsertPoint(CLI->getBody(), CLI->getBody()->getFirstInsertionPt()); 1588 Value *UpdatedIV = Builder.CreateAdd(IV, LowerBound); 1589 IV->replaceUsesWithIf(UpdatedIV, [&](Use &U) { 1590 auto *Instr = dyn_cast<Instruction>(U.getUser()); 1591 return !Instr || 1592 (Instr->getParent() != CLI->getCond() && 1593 Instr->getParent() != CLI->getLatch() && Instr != UpdatedIV); 1594 }); 1595 1596 // In the "exit" block, call the "fini" function. 1597 Builder.SetInsertPoint(CLI->getExit(), 1598 CLI->getExit()->getTerminator()->getIterator()); 1599 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum}); 1600 1601 // Add the barrier if requested. 1602 if (NeedsBarrier) 1603 createBarrier(LocationDescription(Builder.saveIP(), DL), 1604 omp::Directive::OMPD_for, /* ForceSimpleCall */ false, 1605 /* CheckCancelFlag */ false); 1606 1607 InsertPointTy AfterIP = CLI->getAfterIP(); 1608 CLI->invalidate(); 1609 1610 return AfterIP; 1611 } 1612 1613 OpenMPIRBuilder::InsertPointTy 1614 OpenMPIRBuilder::applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, 1615 InsertPointTy AllocaIP, bool NeedsBarrier) { 1616 // Currently only supports static schedules. 1617 return applyStaticWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier); 1618 } 1619 1620 /// Returns an LLVM function to call for initializing loop bounds using OpenMP 1621 /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by 1622 /// the runtime. Always interpret integers as unsigned similarly to 1623 /// CanonicalLoopInfo. 1624 static FunctionCallee 1625 getKmpcForDynamicInitForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { 1626 unsigned Bitwidth = Ty->getIntegerBitWidth(); 1627 if (Bitwidth == 32) 1628 return OMPBuilder.getOrCreateRuntimeFunction( 1629 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u); 1630 if (Bitwidth == 64) 1631 return OMPBuilder.getOrCreateRuntimeFunction( 1632 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u); 1633 llvm_unreachable("unknown OpenMP loop iterator bitwidth"); 1634 } 1635 1636 /// Returns an LLVM function to call for updating the next loop using OpenMP 1637 /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by 1638 /// the runtime. Always interpret integers as unsigned similarly to 1639 /// CanonicalLoopInfo. 1640 static FunctionCallee 1641 getKmpcForDynamicNextForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { 1642 unsigned Bitwidth = Ty->getIntegerBitWidth(); 1643 if (Bitwidth == 32) 1644 return OMPBuilder.getOrCreateRuntimeFunction( 1645 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u); 1646 if (Bitwidth == 64) 1647 return OMPBuilder.getOrCreateRuntimeFunction( 1648 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u); 1649 llvm_unreachable("unknown OpenMP loop iterator bitwidth"); 1650 } 1651 1652 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop( 1653 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, 1654 OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk) { 1655 assert(CLI->isValid() && "Requires a valid canonical loop"); 1656 assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && 1657 "Require dedicated allocate IP"); 1658 1659 // Set up the source location value for OpenMP runtime. 1660 Builder.SetCurrentDebugLocation(DL); 1661 1662 uint32_t SrcLocStrSize; 1663 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); 1664 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 1665 1666 // Declare useful OpenMP runtime functions. 1667 Value *IV = CLI->getIndVar(); 1668 Type *IVTy = IV->getType(); 1669 FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this); 1670 FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this); 1671 1672 // Allocate space for computed loop bounds as expected by the "init" function. 1673 Builder.restoreIP(AllocaIP); 1674 Type *I32Type = Type::getInt32Ty(M.getContext()); 1675 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); 1676 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); 1677 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); 1678 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); 1679 1680 // At the end of the preheader, prepare for calling the "init" function by 1681 // storing the current loop bounds into the allocated space. A canonical loop 1682 // always iterates from 0 to trip-count with step 1. Note that "init" expects 1683 // and produces an inclusive upper bound. 1684 BasicBlock *PreHeader = CLI->getPreheader(); 1685 Builder.SetInsertPoint(PreHeader->getTerminator()); 1686 Constant *One = ConstantInt::get(IVTy, 1); 1687 Builder.CreateStore(One, PLowerBound); 1688 Value *UpperBound = CLI->getTripCount(); 1689 Builder.CreateStore(UpperBound, PUpperBound); 1690 Builder.CreateStore(One, PStride); 1691 1692 BasicBlock *Header = CLI->getHeader(); 1693 BasicBlock *Exit = CLI->getExit(); 1694 BasicBlock *Cond = CLI->getCond(); 1695 InsertPointTy AfterIP = CLI->getAfterIP(); 1696 1697 // The CLI will be "broken" in the code below, as the loop is no longer 1698 // a valid canonical loop. 1699 1700 if (!Chunk) 1701 Chunk = One; 1702 1703 Value *ThreadNum = getOrCreateThreadID(SrcLoc); 1704 1705 Constant *SchedulingType = 1706 ConstantInt::get(I32Type, static_cast<int>(SchedType)); 1707 1708 // Call the "init" function. 1709 Builder.CreateCall(DynamicInit, 1710 {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One, 1711 UpperBound, /* step */ One, Chunk}); 1712 1713 // An outer loop around the existing one. 1714 BasicBlock *OuterCond = BasicBlock::Create( 1715 PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond", 1716 PreHeader->getParent()); 1717 // This needs to be 32-bit always, so can't use the IVTy Zero above. 1718 Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt()); 1719 Value *Res = 1720 Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter, 1721 PLowerBound, PUpperBound, PStride}); 1722 Constant *Zero32 = ConstantInt::get(I32Type, 0); 1723 Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32); 1724 Value *LowerBound = 1725 Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb"); 1726 Builder.CreateCondBr(MoreWork, Header, Exit); 1727 1728 // Change PHI-node in loop header to use outer cond rather than preheader, 1729 // and set IV to the LowerBound. 1730 Instruction *Phi = &Header->front(); 1731 auto *PI = cast<PHINode>(Phi); 1732 PI->setIncomingBlock(0, OuterCond); 1733 PI->setIncomingValue(0, LowerBound); 1734 1735 // Then set the pre-header to jump to the OuterCond 1736 Instruction *Term = PreHeader->getTerminator(); 1737 auto *Br = cast<BranchInst>(Term); 1738 Br->setSuccessor(0, OuterCond); 1739 1740 // Modify the inner condition: 1741 // * Use the UpperBound returned from the DynamicNext call. 1742 // * jump to the loop outer loop when done with one of the inner loops. 1743 Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt()); 1744 UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub"); 1745 Instruction *Comp = &*Builder.GetInsertPoint(); 1746 auto *CI = cast<CmpInst>(Comp); 1747 CI->setOperand(1, UpperBound); 1748 // Redirect the inner exit to branch to outer condition. 1749 Instruction *Branch = &Cond->back(); 1750 auto *BI = cast<BranchInst>(Branch); 1751 assert(BI->getSuccessor(1) == Exit); 1752 BI->setSuccessor(1, OuterCond); 1753 1754 // Add the barrier if requested. 1755 if (NeedsBarrier) { 1756 Builder.SetInsertPoint(&Exit->back()); 1757 createBarrier(LocationDescription(Builder.saveIP(), DL), 1758 omp::Directive::OMPD_for, /* ForceSimpleCall */ false, 1759 /* CheckCancelFlag */ false); 1760 } 1761 1762 CLI->invalidate(); 1763 return AfterIP; 1764 } 1765 1766 /// Make \p Source branch to \p Target. 1767 /// 1768 /// Handles two situations: 1769 /// * \p Source already has an unconditional branch. 1770 /// * \p Source is a degenerate block (no terminator because the BB is 1771 /// the current head of the IR construction). 1772 static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) { 1773 if (Instruction *Term = Source->getTerminator()) { 1774 auto *Br = cast<BranchInst>(Term); 1775 assert(!Br->isConditional() && 1776 "BB's terminator must be an unconditional branch (or degenerate)"); 1777 BasicBlock *Succ = Br->getSuccessor(0); 1778 Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true); 1779 Br->setSuccessor(0, Target); 1780 return; 1781 } 1782 1783 auto *NewBr = BranchInst::Create(Target, Source); 1784 NewBr->setDebugLoc(DL); 1785 } 1786 1787 /// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is, 1788 /// after this \p OldTarget will be orphaned. 1789 static void redirectAllPredecessorsTo(BasicBlock *OldTarget, 1790 BasicBlock *NewTarget, DebugLoc DL) { 1791 for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget))) 1792 redirectTo(Pred, NewTarget, DL); 1793 } 1794 1795 /// Determine which blocks in \p BBs are reachable from outside and remove the 1796 /// ones that are not reachable from the function. 1797 static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) { 1798 SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()}; 1799 auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) { 1800 for (Use &U : BB->uses()) { 1801 auto *UseInst = dyn_cast<Instruction>(U.getUser()); 1802 if (!UseInst) 1803 continue; 1804 if (BBsToErase.count(UseInst->getParent())) 1805 continue; 1806 return true; 1807 } 1808 return false; 1809 }; 1810 1811 while (true) { 1812 bool Changed = false; 1813 for (BasicBlock *BB : make_early_inc_range(BBsToErase)) { 1814 if (HasRemainingUses(BB)) { 1815 BBsToErase.erase(BB); 1816 Changed = true; 1817 } 1818 } 1819 if (!Changed) 1820 break; 1821 } 1822 1823 SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end()); 1824 DeleteDeadBlocks(BBVec); 1825 } 1826 1827 CanonicalLoopInfo * 1828 OpenMPIRBuilder::collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, 1829 InsertPointTy ComputeIP) { 1830 assert(Loops.size() >= 1 && "At least one loop required"); 1831 size_t NumLoops = Loops.size(); 1832 1833 // Nothing to do if there is already just one loop. 1834 if (NumLoops == 1) 1835 return Loops.front(); 1836 1837 CanonicalLoopInfo *Outermost = Loops.front(); 1838 CanonicalLoopInfo *Innermost = Loops.back(); 1839 BasicBlock *OrigPreheader = Outermost->getPreheader(); 1840 BasicBlock *OrigAfter = Outermost->getAfter(); 1841 Function *F = OrigPreheader->getParent(); 1842 1843 // Loop control blocks that may become orphaned later. 1844 SmallVector<BasicBlock *, 12> OldControlBBs; 1845 OldControlBBs.reserve(6 * Loops.size()); 1846 for (CanonicalLoopInfo *Loop : Loops) 1847 Loop->collectControlBlocks(OldControlBBs); 1848 1849 // Setup the IRBuilder for inserting the trip count computation. 1850 Builder.SetCurrentDebugLocation(DL); 1851 if (ComputeIP.isSet()) 1852 Builder.restoreIP(ComputeIP); 1853 else 1854 Builder.restoreIP(Outermost->getPreheaderIP()); 1855 1856 // Derive the collapsed' loop trip count. 1857 // TODO: Find common/largest indvar type. 1858 Value *CollapsedTripCount = nullptr; 1859 for (CanonicalLoopInfo *L : Loops) { 1860 assert(L->isValid() && 1861 "All loops to collapse must be valid canonical loops"); 1862 Value *OrigTripCount = L->getTripCount(); 1863 if (!CollapsedTripCount) { 1864 CollapsedTripCount = OrigTripCount; 1865 continue; 1866 } 1867 1868 // TODO: Enable UndefinedSanitizer to diagnose an overflow here. 1869 CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount, 1870 {}, /*HasNUW=*/true); 1871 } 1872 1873 // Create the collapsed loop control flow. 1874 CanonicalLoopInfo *Result = 1875 createLoopSkeleton(DL, CollapsedTripCount, F, 1876 OrigPreheader->getNextNode(), OrigAfter, "collapsed"); 1877 1878 // Build the collapsed loop body code. 1879 // Start with deriving the input loop induction variables from the collapsed 1880 // one, using a divmod scheme. To preserve the original loops' order, the 1881 // innermost loop use the least significant bits. 1882 Builder.restoreIP(Result->getBodyIP()); 1883 1884 Value *Leftover = Result->getIndVar(); 1885 SmallVector<Value *> NewIndVars; 1886 NewIndVars.resize(NumLoops); 1887 for (int i = NumLoops - 1; i >= 1; --i) { 1888 Value *OrigTripCount = Loops[i]->getTripCount(); 1889 1890 Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount); 1891 NewIndVars[i] = NewIndVar; 1892 1893 Leftover = Builder.CreateUDiv(Leftover, OrigTripCount); 1894 } 1895 // Outermost loop gets all the remaining bits. 1896 NewIndVars[0] = Leftover; 1897 1898 // Construct the loop body control flow. 1899 // We progressively construct the branch structure following in direction of 1900 // the control flow, from the leading in-between code, the loop nest body, the 1901 // trailing in-between code, and rejoining the collapsed loop's latch. 1902 // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If 1903 // the ContinueBlock is set, continue with that block. If ContinuePred, use 1904 // its predecessors as sources. 1905 BasicBlock *ContinueBlock = Result->getBody(); 1906 BasicBlock *ContinuePred = nullptr; 1907 auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest, 1908 BasicBlock *NextSrc) { 1909 if (ContinueBlock) 1910 redirectTo(ContinueBlock, Dest, DL); 1911 else 1912 redirectAllPredecessorsTo(ContinuePred, Dest, DL); 1913 1914 ContinueBlock = nullptr; 1915 ContinuePred = NextSrc; 1916 }; 1917 1918 // The code before the nested loop of each level. 1919 // Because we are sinking it into the nest, it will be executed more often 1920 // that the original loop. More sophisticated schemes could keep track of what 1921 // the in-between code is and instantiate it only once per thread. 1922 for (size_t i = 0; i < NumLoops - 1; ++i) 1923 ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader()); 1924 1925 // Connect the loop nest body. 1926 ContinueWith(Innermost->getBody(), Innermost->getLatch()); 1927 1928 // The code after the nested loop at each level. 1929 for (size_t i = NumLoops - 1; i > 0; --i) 1930 ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch()); 1931 1932 // Connect the finished loop to the collapsed loop latch. 1933 ContinueWith(Result->getLatch(), nullptr); 1934 1935 // Replace the input loops with the new collapsed loop. 1936 redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL); 1937 redirectTo(Result->getAfter(), Outermost->getAfter(), DL); 1938 1939 // Replace the input loop indvars with the derived ones. 1940 for (size_t i = 0; i < NumLoops; ++i) 1941 Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]); 1942 1943 // Remove unused parts of the input loops. 1944 removeUnusedBlocksFromParent(OldControlBBs); 1945 1946 for (CanonicalLoopInfo *L : Loops) 1947 L->invalidate(); 1948 1949 #ifndef NDEBUG 1950 Result->assertOK(); 1951 #endif 1952 return Result; 1953 } 1954 1955 std::vector<CanonicalLoopInfo *> 1956 OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, 1957 ArrayRef<Value *> TileSizes) { 1958 assert(TileSizes.size() == Loops.size() && 1959 "Must pass as many tile sizes as there are loops"); 1960 int NumLoops = Loops.size(); 1961 assert(NumLoops >= 1 && "At least one loop to tile required"); 1962 1963 CanonicalLoopInfo *OutermostLoop = Loops.front(); 1964 CanonicalLoopInfo *InnermostLoop = Loops.back(); 1965 Function *F = OutermostLoop->getBody()->getParent(); 1966 BasicBlock *InnerEnter = InnermostLoop->getBody(); 1967 BasicBlock *InnerLatch = InnermostLoop->getLatch(); 1968 1969 // Loop control blocks that may become orphaned later. 1970 SmallVector<BasicBlock *, 12> OldControlBBs; 1971 OldControlBBs.reserve(6 * Loops.size()); 1972 for (CanonicalLoopInfo *Loop : Loops) 1973 Loop->collectControlBlocks(OldControlBBs); 1974 1975 // Collect original trip counts and induction variable to be accessible by 1976 // index. Also, the structure of the original loops is not preserved during 1977 // the construction of the tiled loops, so do it before we scavenge the BBs of 1978 // any original CanonicalLoopInfo. 1979 SmallVector<Value *, 4> OrigTripCounts, OrigIndVars; 1980 for (CanonicalLoopInfo *L : Loops) { 1981 assert(L->isValid() && "All input loops must be valid canonical loops"); 1982 OrigTripCounts.push_back(L->getTripCount()); 1983 OrigIndVars.push_back(L->getIndVar()); 1984 } 1985 1986 // Collect the code between loop headers. These may contain SSA definitions 1987 // that are used in the loop nest body. To be usable with in the innermost 1988 // body, these BasicBlocks will be sunk into the loop nest body. That is, 1989 // these instructions may be executed more often than before the tiling. 1990 // TODO: It would be sufficient to only sink them into body of the 1991 // corresponding tile loop. 1992 SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode; 1993 for (int i = 0; i < NumLoops - 1; ++i) { 1994 CanonicalLoopInfo *Surrounding = Loops[i]; 1995 CanonicalLoopInfo *Nested = Loops[i + 1]; 1996 1997 BasicBlock *EnterBB = Surrounding->getBody(); 1998 BasicBlock *ExitBB = Nested->getHeader(); 1999 InbetweenCode.emplace_back(EnterBB, ExitBB); 2000 } 2001 2002 // Compute the trip counts of the floor loops. 2003 Builder.SetCurrentDebugLocation(DL); 2004 Builder.restoreIP(OutermostLoop->getPreheaderIP()); 2005 SmallVector<Value *, 4> FloorCount, FloorRems; 2006 for (int i = 0; i < NumLoops; ++i) { 2007 Value *TileSize = TileSizes[i]; 2008 Value *OrigTripCount = OrigTripCounts[i]; 2009 Type *IVType = OrigTripCount->getType(); 2010 2011 Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize); 2012 Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize); 2013 2014 // 0 if tripcount divides the tilesize, 1 otherwise. 2015 // 1 means we need an additional iteration for a partial tile. 2016 // 2017 // Unfortunately we cannot just use the roundup-formula 2018 // (tripcount + tilesize - 1)/tilesize 2019 // because the summation might overflow. We do not want introduce undefined 2020 // behavior when the untiled loop nest did not. 2021 Value *FloorTripOverflow = 2022 Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0)); 2023 2024 FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType); 2025 FloorTripCount = 2026 Builder.CreateAdd(FloorTripCount, FloorTripOverflow, 2027 "omp_floor" + Twine(i) + ".tripcount", true); 2028 2029 // Remember some values for later use. 2030 FloorCount.push_back(FloorTripCount); 2031 FloorRems.push_back(FloorTripRem); 2032 } 2033 2034 // Generate the new loop nest, from the outermost to the innermost. 2035 std::vector<CanonicalLoopInfo *> Result; 2036 Result.reserve(NumLoops * 2); 2037 2038 // The basic block of the surrounding loop that enters the nest generated 2039 // loop. 2040 BasicBlock *Enter = OutermostLoop->getPreheader(); 2041 2042 // The basic block of the surrounding loop where the inner code should 2043 // continue. 2044 BasicBlock *Continue = OutermostLoop->getAfter(); 2045 2046 // Where the next loop basic block should be inserted. 2047 BasicBlock *OutroInsertBefore = InnermostLoop->getExit(); 2048 2049 auto EmbeddNewLoop = 2050 [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore]( 2051 Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * { 2052 CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton( 2053 DL, TripCount, F, InnerEnter, OutroInsertBefore, Name); 2054 redirectTo(Enter, EmbeddedLoop->getPreheader(), DL); 2055 redirectTo(EmbeddedLoop->getAfter(), Continue, DL); 2056 2057 // Setup the position where the next embedded loop connects to this loop. 2058 Enter = EmbeddedLoop->getBody(); 2059 Continue = EmbeddedLoop->getLatch(); 2060 OutroInsertBefore = EmbeddedLoop->getLatch(); 2061 return EmbeddedLoop; 2062 }; 2063 2064 auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts, 2065 const Twine &NameBase) { 2066 for (auto P : enumerate(TripCounts)) { 2067 CanonicalLoopInfo *EmbeddedLoop = 2068 EmbeddNewLoop(P.value(), NameBase + Twine(P.index())); 2069 Result.push_back(EmbeddedLoop); 2070 } 2071 }; 2072 2073 EmbeddNewLoops(FloorCount, "floor"); 2074 2075 // Within the innermost floor loop, emit the code that computes the tile 2076 // sizes. 2077 Builder.SetInsertPoint(Enter->getTerminator()); 2078 SmallVector<Value *, 4> TileCounts; 2079 for (int i = 0; i < NumLoops; ++i) { 2080 CanonicalLoopInfo *FloorLoop = Result[i]; 2081 Value *TileSize = TileSizes[i]; 2082 2083 Value *FloorIsEpilogue = 2084 Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]); 2085 Value *TileTripCount = 2086 Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize); 2087 2088 TileCounts.push_back(TileTripCount); 2089 } 2090 2091 // Create the tile loops. 2092 EmbeddNewLoops(TileCounts, "tile"); 2093 2094 // Insert the inbetween code into the body. 2095 BasicBlock *BodyEnter = Enter; 2096 BasicBlock *BodyEntered = nullptr; 2097 for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) { 2098 BasicBlock *EnterBB = P.first; 2099 BasicBlock *ExitBB = P.second; 2100 2101 if (BodyEnter) 2102 redirectTo(BodyEnter, EnterBB, DL); 2103 else 2104 redirectAllPredecessorsTo(BodyEntered, EnterBB, DL); 2105 2106 BodyEnter = nullptr; 2107 BodyEntered = ExitBB; 2108 } 2109 2110 // Append the original loop nest body into the generated loop nest body. 2111 if (BodyEnter) 2112 redirectTo(BodyEnter, InnerEnter, DL); 2113 else 2114 redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL); 2115 redirectAllPredecessorsTo(InnerLatch, Continue, DL); 2116 2117 // Replace the original induction variable with an induction variable computed 2118 // from the tile and floor induction variables. 2119 Builder.restoreIP(Result.back()->getBodyIP()); 2120 for (int i = 0; i < NumLoops; ++i) { 2121 CanonicalLoopInfo *FloorLoop = Result[i]; 2122 CanonicalLoopInfo *TileLoop = Result[NumLoops + i]; 2123 Value *OrigIndVar = OrigIndVars[i]; 2124 Value *Size = TileSizes[i]; 2125 2126 Value *Scale = 2127 Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true); 2128 Value *Shift = 2129 Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true); 2130 OrigIndVar->replaceAllUsesWith(Shift); 2131 } 2132 2133 // Remove unused parts of the original loops. 2134 removeUnusedBlocksFromParent(OldControlBBs); 2135 2136 for (CanonicalLoopInfo *L : Loops) 2137 L->invalidate(); 2138 2139 #ifndef NDEBUG 2140 for (CanonicalLoopInfo *GenL : Result) 2141 GenL->assertOK(); 2142 #endif 2143 return Result; 2144 } 2145 2146 /// Attach loop metadata \p Properties to the loop described by \p Loop. If the 2147 /// loop already has metadata, the loop properties are appended. 2148 static void addLoopMetadata(CanonicalLoopInfo *Loop, 2149 ArrayRef<Metadata *> Properties) { 2150 assert(Loop->isValid() && "Expecting a valid CanonicalLoopInfo"); 2151 2152 // Nothing to do if no property to attach. 2153 if (Properties.empty()) 2154 return; 2155 2156 LLVMContext &Ctx = Loop->getFunction()->getContext(); 2157 SmallVector<Metadata *> NewLoopProperties; 2158 NewLoopProperties.push_back(nullptr); 2159 2160 // If the loop already has metadata, prepend it to the new metadata. 2161 BasicBlock *Latch = Loop->getLatch(); 2162 assert(Latch && "A valid CanonicalLoopInfo must have a unique latch"); 2163 MDNode *Existing = Latch->getTerminator()->getMetadata(LLVMContext::MD_loop); 2164 if (Existing) 2165 append_range(NewLoopProperties, drop_begin(Existing->operands(), 1)); 2166 2167 append_range(NewLoopProperties, Properties); 2168 MDNode *LoopID = MDNode::getDistinct(Ctx, NewLoopProperties); 2169 LoopID->replaceOperandWith(0, LoopID); 2170 2171 Latch->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopID); 2172 } 2173 2174 /// Attach llvm.access.group metadata to the memref instructions of \p Block 2175 static void addSimdMetadata(BasicBlock *Block, MDNode *AccessGroup, 2176 LoopInfo &LI) { 2177 for (Instruction &I : *Block) { 2178 if (I.mayReadOrWriteMemory()) { 2179 // TODO: This instruction may already have access group from 2180 // other pragmas e.g. #pragma clang loop vectorize. Append 2181 // so that the existing metadata is not overwritten. 2182 I.setMetadata(LLVMContext::MD_access_group, AccessGroup); 2183 } 2184 } 2185 } 2186 2187 void OpenMPIRBuilder::unrollLoopFull(DebugLoc, CanonicalLoopInfo *Loop) { 2188 LLVMContext &Ctx = Builder.getContext(); 2189 addLoopMetadata( 2190 Loop, {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), 2191 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.full"))}); 2192 } 2193 2194 void OpenMPIRBuilder::unrollLoopHeuristic(DebugLoc, CanonicalLoopInfo *Loop) { 2195 LLVMContext &Ctx = Builder.getContext(); 2196 addLoopMetadata( 2197 Loop, { 2198 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), 2199 }); 2200 } 2201 2202 void OpenMPIRBuilder::applySimd(DebugLoc, CanonicalLoopInfo *CanonicalLoop) { 2203 LLVMContext &Ctx = Builder.getContext(); 2204 2205 Function *F = CanonicalLoop->getFunction(); 2206 2207 FunctionAnalysisManager FAM; 2208 FAM.registerPass([]() { return DominatorTreeAnalysis(); }); 2209 FAM.registerPass([]() { return LoopAnalysis(); }); 2210 FAM.registerPass([]() { return PassInstrumentationAnalysis(); }); 2211 2212 LoopAnalysis LIA; 2213 LoopInfo &&LI = LIA.run(*F, FAM); 2214 2215 Loop *L = LI.getLoopFor(CanonicalLoop->getHeader()); 2216 2217 SmallSet<BasicBlock *, 8> Reachable; 2218 2219 // Get the basic blocks from the loop in which memref instructions 2220 // can be found. 2221 // TODO: Generalize getting all blocks inside a CanonicalizeLoopInfo, 2222 // preferably without running any passes. 2223 for (BasicBlock *Block : L->getBlocks()) { 2224 if (Block == CanonicalLoop->getCond() || 2225 Block == CanonicalLoop->getHeader()) 2226 continue; 2227 Reachable.insert(Block); 2228 } 2229 2230 // Add access group metadata to memory-access instructions. 2231 MDNode *AccessGroup = MDNode::getDistinct(Ctx, {}); 2232 for (BasicBlock *BB : Reachable) 2233 addSimdMetadata(BB, AccessGroup, LI); 2234 2235 // Use the above access group metadata to create loop level 2236 // metadata, which should be distinct for each loop. 2237 ConstantAsMetadata *BoolConst = 2238 ConstantAsMetadata::get(ConstantInt::getTrue(Type::getInt1Ty(Ctx))); 2239 // TODO: If the loop has existing parallel access metadata, have 2240 // to combine two lists. 2241 addLoopMetadata( 2242 CanonicalLoop, 2243 {MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), 2244 AccessGroup}), 2245 MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"), 2246 BoolConst})}); 2247 } 2248 2249 /// Create the TargetMachine object to query the backend for optimization 2250 /// preferences. 2251 /// 2252 /// Ideally, this would be passed from the front-end to the OpenMPBuilder, but 2253 /// e.g. Clang does not pass it to its CodeGen layer and creates it only when 2254 /// needed for the LLVM pass pipline. We use some default options to avoid 2255 /// having to pass too many settings from the frontend that probably do not 2256 /// matter. 2257 /// 2258 /// Currently, TargetMachine is only used sometimes by the unrollLoopPartial 2259 /// method. If we are going to use TargetMachine for more purposes, especially 2260 /// those that are sensitive to TargetOptions, RelocModel and CodeModel, it 2261 /// might become be worth requiring front-ends to pass on their TargetMachine, 2262 /// or at least cache it between methods. Note that while fontends such as Clang 2263 /// have just a single main TargetMachine per translation unit, "target-cpu" and 2264 /// "target-features" that determine the TargetMachine are per-function and can 2265 /// be overrided using __attribute__((target("OPTIONS"))). 2266 static std::unique_ptr<TargetMachine> 2267 createTargetMachine(Function *F, CodeGenOpt::Level OptLevel) { 2268 Module *M = F->getParent(); 2269 2270 StringRef CPU = F->getFnAttribute("target-cpu").getValueAsString(); 2271 StringRef Features = F->getFnAttribute("target-features").getValueAsString(); 2272 const std::string &Triple = M->getTargetTriple(); 2273 2274 std::string Error; 2275 const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error); 2276 if (!TheTarget) 2277 return {}; 2278 2279 llvm::TargetOptions Options; 2280 return std::unique_ptr<TargetMachine>(TheTarget->createTargetMachine( 2281 Triple, CPU, Features, Options, /*RelocModel=*/None, /*CodeModel=*/None, 2282 OptLevel)); 2283 } 2284 2285 /// Heuristically determine the best-performant unroll factor for \p CLI. This 2286 /// depends on the target processor. We are re-using the same heuristics as the 2287 /// LoopUnrollPass. 2288 static int32_t computeHeuristicUnrollFactor(CanonicalLoopInfo *CLI) { 2289 Function *F = CLI->getFunction(); 2290 2291 // Assume the user requests the most aggressive unrolling, even if the rest of 2292 // the code is optimized using a lower setting. 2293 CodeGenOpt::Level OptLevel = CodeGenOpt::Aggressive; 2294 std::unique_ptr<TargetMachine> TM = createTargetMachine(F, OptLevel); 2295 2296 FunctionAnalysisManager FAM; 2297 FAM.registerPass([]() { return TargetLibraryAnalysis(); }); 2298 FAM.registerPass([]() { return AssumptionAnalysis(); }); 2299 FAM.registerPass([]() { return DominatorTreeAnalysis(); }); 2300 FAM.registerPass([]() { return LoopAnalysis(); }); 2301 FAM.registerPass([]() { return ScalarEvolutionAnalysis(); }); 2302 FAM.registerPass([]() { return PassInstrumentationAnalysis(); }); 2303 TargetIRAnalysis TIRA; 2304 if (TM) 2305 TIRA = TargetIRAnalysis( 2306 [&](const Function &F) { return TM->getTargetTransformInfo(F); }); 2307 FAM.registerPass([&]() { return TIRA; }); 2308 2309 TargetIRAnalysis::Result &&TTI = TIRA.run(*F, FAM); 2310 ScalarEvolutionAnalysis SEA; 2311 ScalarEvolution &&SE = SEA.run(*F, FAM); 2312 DominatorTreeAnalysis DTA; 2313 DominatorTree &&DT = DTA.run(*F, FAM); 2314 LoopAnalysis LIA; 2315 LoopInfo &&LI = LIA.run(*F, FAM); 2316 AssumptionAnalysis ACT; 2317 AssumptionCache &&AC = ACT.run(*F, FAM); 2318 OptimizationRemarkEmitter ORE{F}; 2319 2320 Loop *L = LI.getLoopFor(CLI->getHeader()); 2321 assert(L && "Expecting CanonicalLoopInfo to be recognized as a loop"); 2322 2323 TargetTransformInfo::UnrollingPreferences UP = 2324 gatherUnrollingPreferences(L, SE, TTI, 2325 /*BlockFrequencyInfo=*/nullptr, 2326 /*ProfileSummaryInfo=*/nullptr, ORE, OptLevel, 2327 /*UserThreshold=*/None, 2328 /*UserCount=*/None, 2329 /*UserAllowPartial=*/true, 2330 /*UserAllowRuntime=*/true, 2331 /*UserUpperBound=*/None, 2332 /*UserFullUnrollMaxCount=*/None); 2333 2334 UP.Force = true; 2335 2336 // Account for additional optimizations taking place before the LoopUnrollPass 2337 // would unroll the loop. 2338 UP.Threshold *= UnrollThresholdFactor; 2339 UP.PartialThreshold *= UnrollThresholdFactor; 2340 2341 // Use normal unroll factors even if the rest of the code is optimized for 2342 // size. 2343 UP.OptSizeThreshold = UP.Threshold; 2344 UP.PartialOptSizeThreshold = UP.PartialThreshold; 2345 2346 LLVM_DEBUG(dbgs() << "Unroll heuristic thresholds:\n" 2347 << " Threshold=" << UP.Threshold << "\n" 2348 << " PartialThreshold=" << UP.PartialThreshold << "\n" 2349 << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" 2350 << " PartialOptSizeThreshold=" 2351 << UP.PartialOptSizeThreshold << "\n"); 2352 2353 // Disable peeling. 2354 TargetTransformInfo::PeelingPreferences PP = 2355 gatherPeelingPreferences(L, SE, TTI, 2356 /*UserAllowPeeling=*/false, 2357 /*UserAllowProfileBasedPeeling=*/false, 2358 /*UnrollingSpecficValues=*/false); 2359 2360 SmallPtrSet<const Value *, 32> EphValues; 2361 CodeMetrics::collectEphemeralValues(L, &AC, EphValues); 2362 2363 // Assume that reads and writes to stack variables can be eliminated by 2364 // Mem2Reg, SROA or LICM. That is, don't count them towards the loop body's 2365 // size. 2366 for (BasicBlock *BB : L->blocks()) { 2367 for (Instruction &I : *BB) { 2368 Value *Ptr; 2369 if (auto *Load = dyn_cast<LoadInst>(&I)) { 2370 Ptr = Load->getPointerOperand(); 2371 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 2372 Ptr = Store->getPointerOperand(); 2373 } else 2374 continue; 2375 2376 Ptr = Ptr->stripPointerCasts(); 2377 2378 if (auto *Alloca = dyn_cast<AllocaInst>(Ptr)) { 2379 if (Alloca->getParent() == &F->getEntryBlock()) 2380 EphValues.insert(&I); 2381 } 2382 } 2383 } 2384 2385 unsigned NumInlineCandidates; 2386 bool NotDuplicatable; 2387 bool Convergent; 2388 unsigned LoopSize = 2389 ApproximateLoopSize(L, NumInlineCandidates, NotDuplicatable, Convergent, 2390 TTI, EphValues, UP.BEInsns); 2391 LLVM_DEBUG(dbgs() << "Estimated loop size is " << LoopSize << "\n"); 2392 2393 // Loop is not unrollable if the loop contains certain instructions. 2394 if (NotDuplicatable || Convergent) { 2395 LLVM_DEBUG(dbgs() << "Loop not considered unrollable\n"); 2396 return 1; 2397 } 2398 2399 // TODO: Determine trip count of \p CLI if constant, computeUnrollCount might 2400 // be able to use it. 2401 int TripCount = 0; 2402 int MaxTripCount = 0; 2403 bool MaxOrZero = false; 2404 unsigned TripMultiple = 0; 2405 2406 bool UseUpperBound = false; 2407 computeUnrollCount(L, TTI, DT, &LI, SE, EphValues, &ORE, TripCount, 2408 MaxTripCount, MaxOrZero, TripMultiple, LoopSize, UP, PP, 2409 UseUpperBound); 2410 unsigned Factor = UP.Count; 2411 LLVM_DEBUG(dbgs() << "Suggesting unroll factor of " << Factor << "\n"); 2412 2413 // This function returns 1 to signal to not unroll a loop. 2414 if (Factor == 0) 2415 return 1; 2416 return Factor; 2417 } 2418 2419 void OpenMPIRBuilder::unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, 2420 int32_t Factor, 2421 CanonicalLoopInfo **UnrolledCLI) { 2422 assert(Factor >= 0 && "Unroll factor must not be negative"); 2423 2424 Function *F = Loop->getFunction(); 2425 LLVMContext &Ctx = F->getContext(); 2426 2427 // If the unrolled loop is not used for another loop-associated directive, it 2428 // is sufficient to add metadata for the LoopUnrollPass. 2429 if (!UnrolledCLI) { 2430 SmallVector<Metadata *, 2> LoopMetadata; 2431 LoopMetadata.push_back( 2432 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable"))); 2433 2434 if (Factor >= 1) { 2435 ConstantAsMetadata *FactorConst = ConstantAsMetadata::get( 2436 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor))); 2437 LoopMetadata.push_back(MDNode::get( 2438 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})); 2439 } 2440 2441 addLoopMetadata(Loop, LoopMetadata); 2442 return; 2443 } 2444 2445 // Heuristically determine the unroll factor. 2446 if (Factor == 0) 2447 Factor = computeHeuristicUnrollFactor(Loop); 2448 2449 // No change required with unroll factor 1. 2450 if (Factor == 1) { 2451 *UnrolledCLI = Loop; 2452 return; 2453 } 2454 2455 assert(Factor >= 2 && 2456 "unrolling only makes sense with a factor of 2 or larger"); 2457 2458 Type *IndVarTy = Loop->getIndVarType(); 2459 2460 // Apply partial unrolling by tiling the loop by the unroll-factor, then fully 2461 // unroll the inner loop. 2462 Value *FactorVal = 2463 ConstantInt::get(IndVarTy, APInt(IndVarTy->getIntegerBitWidth(), Factor, 2464 /*isSigned=*/false)); 2465 std::vector<CanonicalLoopInfo *> LoopNest = 2466 tileLoops(DL, {Loop}, {FactorVal}); 2467 assert(LoopNest.size() == 2 && "Expect 2 loops after tiling"); 2468 *UnrolledCLI = LoopNest[0]; 2469 CanonicalLoopInfo *InnerLoop = LoopNest[1]; 2470 2471 // LoopUnrollPass can only fully unroll loops with constant trip count. 2472 // Unroll by the unroll factor with a fallback epilog for the remainder 2473 // iterations if necessary. 2474 ConstantAsMetadata *FactorConst = ConstantAsMetadata::get( 2475 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor))); 2476 addLoopMetadata( 2477 InnerLoop, 2478 {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), 2479 MDNode::get( 2480 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})}); 2481 2482 #ifndef NDEBUG 2483 (*UnrolledCLI)->assertOK(); 2484 #endif 2485 } 2486 2487 OpenMPIRBuilder::InsertPointTy 2488 OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc, 2489 llvm::Value *BufSize, llvm::Value *CpyBuf, 2490 llvm::Value *CpyFn, llvm::Value *DidIt) { 2491 if (!updateToLocation(Loc)) 2492 return Loc.IP; 2493 2494 uint32_t SrcLocStrSize; 2495 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2496 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2497 Value *ThreadId = getOrCreateThreadID(Ident); 2498 2499 llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt); 2500 2501 Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD}; 2502 2503 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate); 2504 Builder.CreateCall(Fn, Args); 2505 2506 return Builder.saveIP(); 2507 } 2508 2509 OpenMPIRBuilder::InsertPointTy 2510 OpenMPIRBuilder::createSingle(const LocationDescription &Loc, 2511 BodyGenCallbackTy BodyGenCB, 2512 FinalizeCallbackTy FiniCB, llvm::Value *DidIt) { 2513 2514 if (!updateToLocation(Loc)) 2515 return Loc.IP; 2516 2517 // If needed (i.e. not null), initialize `DidIt` with 0 2518 if (DidIt) { 2519 Builder.CreateStore(Builder.getInt32(0), DidIt); 2520 } 2521 2522 Directive OMPD = Directive::OMPD_single; 2523 uint32_t SrcLocStrSize; 2524 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2525 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2526 Value *ThreadId = getOrCreateThreadID(Ident); 2527 Value *Args[] = {Ident, ThreadId}; 2528 2529 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single); 2530 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); 2531 2532 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single); 2533 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); 2534 2535 // generates the following: 2536 // if (__kmpc_single()) { 2537 // .... single region ... 2538 // __kmpc_end_single 2539 // } 2540 2541 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 2542 /*Conditional*/ true, /*hasFinalize*/ true); 2543 } 2544 2545 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical( 2546 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, 2547 FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) { 2548 2549 if (!updateToLocation(Loc)) 2550 return Loc.IP; 2551 2552 Directive OMPD = Directive::OMPD_critical; 2553 uint32_t SrcLocStrSize; 2554 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2555 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2556 Value *ThreadId = getOrCreateThreadID(Ident); 2557 Value *LockVar = getOMPCriticalRegionLock(CriticalName); 2558 Value *Args[] = {Ident, ThreadId, LockVar}; 2559 2560 SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args)); 2561 Function *RTFn = nullptr; 2562 if (HintInst) { 2563 // Add Hint to entry Args and create call 2564 EnterArgs.push_back(HintInst); 2565 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint); 2566 } else { 2567 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical); 2568 } 2569 Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs); 2570 2571 Function *ExitRTLFn = 2572 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical); 2573 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); 2574 2575 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 2576 /*Conditional*/ false, /*hasFinalize*/ true); 2577 } 2578 2579 OpenMPIRBuilder::InsertPointTy 2580 OpenMPIRBuilder::createOrderedDepend(const LocationDescription &Loc, 2581 InsertPointTy AllocaIP, unsigned NumLoops, 2582 ArrayRef<llvm::Value *> StoreValues, 2583 const Twine &Name, bool IsDependSource) { 2584 for (size_t I = 0; I < StoreValues.size(); I++) 2585 assert(StoreValues[I]->getType()->isIntegerTy(64) && 2586 "OpenMP runtime requires depend vec with i64 type"); 2587 2588 if (!updateToLocation(Loc)) 2589 return Loc.IP; 2590 2591 // Allocate space for vector and generate alloc instruction. 2592 auto *ArrI64Ty = ArrayType::get(Int64, NumLoops); 2593 Builder.restoreIP(AllocaIP); 2594 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI64Ty, nullptr, Name); 2595 ArgsBase->setAlignment(Align(8)); 2596 Builder.restoreIP(Loc.IP); 2597 2598 // Store the index value with offset in depend vector. 2599 for (unsigned I = 0; I < NumLoops; ++I) { 2600 Value *DependAddrGEPIter = Builder.CreateInBoundsGEP( 2601 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(I)}); 2602 StoreInst *STInst = Builder.CreateStore(StoreValues[I], DependAddrGEPIter); 2603 STInst->setAlignment(Align(8)); 2604 } 2605 2606 Value *DependBaseAddrGEP = Builder.CreateInBoundsGEP( 2607 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(0)}); 2608 2609 uint32_t SrcLocStrSize; 2610 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2611 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2612 Value *ThreadId = getOrCreateThreadID(Ident); 2613 Value *Args[] = {Ident, ThreadId, DependBaseAddrGEP}; 2614 2615 Function *RTLFn = nullptr; 2616 if (IsDependSource) 2617 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_post); 2618 else 2619 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_wait); 2620 Builder.CreateCall(RTLFn, Args); 2621 2622 return Builder.saveIP(); 2623 } 2624 2625 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createOrderedThreadsSimd( 2626 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, 2627 FinalizeCallbackTy FiniCB, bool IsThreads) { 2628 if (!updateToLocation(Loc)) 2629 return Loc.IP; 2630 2631 Directive OMPD = Directive::OMPD_ordered; 2632 Instruction *EntryCall = nullptr; 2633 Instruction *ExitCall = nullptr; 2634 2635 if (IsThreads) { 2636 uint32_t SrcLocStrSize; 2637 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2638 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2639 Value *ThreadId = getOrCreateThreadID(Ident); 2640 Value *Args[] = {Ident, ThreadId}; 2641 2642 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_ordered); 2643 EntryCall = Builder.CreateCall(EntryRTLFn, Args); 2644 2645 Function *ExitRTLFn = 2646 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_ordered); 2647 ExitCall = Builder.CreateCall(ExitRTLFn, Args); 2648 } 2649 2650 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 2651 /*Conditional*/ false, /*hasFinalize*/ true); 2652 } 2653 2654 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion( 2655 Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, 2656 BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional, 2657 bool HasFinalize, bool IsCancellable) { 2658 2659 if (HasFinalize) 2660 FinalizationStack.push_back({FiniCB, OMPD, IsCancellable}); 2661 2662 // Create inlined region's entry and body blocks, in preparation 2663 // for conditional creation 2664 BasicBlock *EntryBB = Builder.GetInsertBlock(); 2665 Instruction *SplitPos = EntryBB->getTerminator(); 2666 if (!isa_and_nonnull<BranchInst>(SplitPos)) 2667 SplitPos = new UnreachableInst(Builder.getContext(), EntryBB); 2668 BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end"); 2669 BasicBlock *FiniBB = 2670 EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize"); 2671 2672 Builder.SetInsertPoint(EntryBB->getTerminator()); 2673 emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional); 2674 2675 // generate body 2676 BodyGenCB(/* AllocaIP */ InsertPointTy(), 2677 /* CodeGenIP */ Builder.saveIP(), *FiniBB); 2678 2679 // If we didn't emit a branch to FiniBB during body generation, it means 2680 // FiniBB is unreachable (e.g. while(1);). stop generating all the 2681 // unreachable blocks, and remove anything we are not going to use. 2682 auto SkipEmittingRegion = FiniBB->hasNPredecessors(0); 2683 if (SkipEmittingRegion) { 2684 FiniBB->eraseFromParent(); 2685 ExitCall->eraseFromParent(); 2686 // Discard finalization if we have it. 2687 if (HasFinalize) { 2688 assert(!FinalizationStack.empty() && 2689 "Unexpected finalization stack state!"); 2690 FinalizationStack.pop_back(); 2691 } 2692 } else { 2693 // emit exit call and do any needed finalization. 2694 auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt()); 2695 assert(FiniBB->getTerminator()->getNumSuccessors() == 1 && 2696 FiniBB->getTerminator()->getSuccessor(0) == ExitBB && 2697 "Unexpected control flow graph state!!"); 2698 emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize); 2699 assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && 2700 "Unexpected Control Flow State!"); 2701 MergeBlockIntoPredecessor(FiniBB); 2702 } 2703 2704 // If we are skipping the region of a non conditional, remove the exit 2705 // block, and clear the builder's insertion point. 2706 assert(SplitPos->getParent() == ExitBB && 2707 "Unexpected Insertion point location!"); 2708 if (!Conditional && SkipEmittingRegion) { 2709 ExitBB->eraseFromParent(); 2710 Builder.ClearInsertionPoint(); 2711 } else { 2712 auto merged = MergeBlockIntoPredecessor(ExitBB); 2713 BasicBlock *ExitPredBB = SplitPos->getParent(); 2714 auto InsertBB = merged ? ExitPredBB : ExitBB; 2715 if (!isa_and_nonnull<BranchInst>(SplitPos)) 2716 SplitPos->eraseFromParent(); 2717 Builder.SetInsertPoint(InsertBB); 2718 } 2719 2720 return Builder.saveIP(); 2721 } 2722 2723 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry( 2724 Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) { 2725 // if nothing to do, Return current insertion point. 2726 if (!Conditional || !EntryCall) 2727 return Builder.saveIP(); 2728 2729 BasicBlock *EntryBB = Builder.GetInsertBlock(); 2730 Value *CallBool = Builder.CreateIsNotNull(EntryCall); 2731 auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body"); 2732 auto *UI = new UnreachableInst(Builder.getContext(), ThenBB); 2733 2734 // Emit thenBB and set the Builder's insertion point there for 2735 // body generation next. Place the block after the current block. 2736 Function *CurFn = EntryBB->getParent(); 2737 CurFn->getBasicBlockList().insertAfter(EntryBB->getIterator(), ThenBB); 2738 2739 // Move Entry branch to end of ThenBB, and replace with conditional 2740 // branch (If-stmt) 2741 Instruction *EntryBBTI = EntryBB->getTerminator(); 2742 Builder.CreateCondBr(CallBool, ThenBB, ExitBB); 2743 EntryBBTI->removeFromParent(); 2744 Builder.SetInsertPoint(UI); 2745 Builder.Insert(EntryBBTI); 2746 UI->eraseFromParent(); 2747 Builder.SetInsertPoint(ThenBB->getTerminator()); 2748 2749 // return an insertion point to ExitBB. 2750 return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt()); 2751 } 2752 2753 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit( 2754 omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, 2755 bool HasFinalize) { 2756 2757 Builder.restoreIP(FinIP); 2758 2759 // If there is finalization to do, emit it before the exit call 2760 if (HasFinalize) { 2761 assert(!FinalizationStack.empty() && 2762 "Unexpected finalization stack state!"); 2763 2764 FinalizationInfo Fi = FinalizationStack.pop_back_val(); 2765 assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!"); 2766 2767 Fi.FiniCB(FinIP); 2768 2769 BasicBlock *FiniBB = FinIP.getBlock(); 2770 Instruction *FiniBBTI = FiniBB->getTerminator(); 2771 2772 // set Builder IP for call creation 2773 Builder.SetInsertPoint(FiniBBTI); 2774 } 2775 2776 if (!ExitCall) 2777 return Builder.saveIP(); 2778 2779 // place the Exitcall as last instruction before Finalization block terminator 2780 ExitCall->removeFromParent(); 2781 Builder.Insert(ExitCall); 2782 2783 return IRBuilder<>::InsertPoint(ExitCall->getParent(), 2784 ExitCall->getIterator()); 2785 } 2786 2787 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks( 2788 InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, 2789 llvm::IntegerType *IntPtrTy, bool BranchtoEnd) { 2790 if (!IP.isSet()) 2791 return IP; 2792 2793 IRBuilder<>::InsertPointGuard IPG(Builder); 2794 2795 // creates the following CFG structure 2796 // OMP_Entry : (MasterAddr != PrivateAddr)? 2797 // F T 2798 // | \ 2799 // | copin.not.master 2800 // | / 2801 // v / 2802 // copyin.not.master.end 2803 // | 2804 // v 2805 // OMP.Entry.Next 2806 2807 BasicBlock *OMP_Entry = IP.getBlock(); 2808 Function *CurFn = OMP_Entry->getParent(); 2809 BasicBlock *CopyBegin = 2810 BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn); 2811 BasicBlock *CopyEnd = nullptr; 2812 2813 // If entry block is terminated, split to preserve the branch to following 2814 // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is. 2815 if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) { 2816 CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(), 2817 "copyin.not.master.end"); 2818 OMP_Entry->getTerminator()->eraseFromParent(); 2819 } else { 2820 CopyEnd = 2821 BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn); 2822 } 2823 2824 Builder.SetInsertPoint(OMP_Entry); 2825 Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy); 2826 Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy); 2827 Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr); 2828 Builder.CreateCondBr(cmp, CopyBegin, CopyEnd); 2829 2830 Builder.SetInsertPoint(CopyBegin); 2831 if (BranchtoEnd) 2832 Builder.SetInsertPoint(Builder.CreateBr(CopyEnd)); 2833 2834 return Builder.saveIP(); 2835 } 2836 2837 CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc, 2838 Value *Size, Value *Allocator, 2839 std::string Name) { 2840 IRBuilder<>::InsertPointGuard IPG(Builder); 2841 Builder.restoreIP(Loc.IP); 2842 2843 uint32_t SrcLocStrSize; 2844 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2845 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2846 Value *ThreadId = getOrCreateThreadID(Ident); 2847 Value *Args[] = {ThreadId, Size, Allocator}; 2848 2849 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc); 2850 2851 return Builder.CreateCall(Fn, Args, Name); 2852 } 2853 2854 CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc, 2855 Value *Addr, Value *Allocator, 2856 std::string Name) { 2857 IRBuilder<>::InsertPointGuard IPG(Builder); 2858 Builder.restoreIP(Loc.IP); 2859 2860 uint32_t SrcLocStrSize; 2861 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2862 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2863 Value *ThreadId = getOrCreateThreadID(Ident); 2864 Value *Args[] = {ThreadId, Addr, Allocator}; 2865 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free); 2866 return Builder.CreateCall(Fn, Args, Name); 2867 } 2868 2869 CallInst *OpenMPIRBuilder::createOMPInteropInit( 2870 const LocationDescription &Loc, Value *InteropVar, 2871 omp::OMPInteropType InteropType, Value *Device, Value *NumDependences, 2872 Value *DependenceAddress, bool HaveNowaitClause) { 2873 IRBuilder<>::InsertPointGuard IPG(Builder); 2874 Builder.restoreIP(Loc.IP); 2875 2876 uint32_t SrcLocStrSize; 2877 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2878 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2879 Value *ThreadId = getOrCreateThreadID(Ident); 2880 if (Device == nullptr) 2881 Device = ConstantInt::get(Int32, -1); 2882 Constant *InteropTypeVal = ConstantInt::get(Int64, (int)InteropType); 2883 if (NumDependences == nullptr) { 2884 NumDependences = ConstantInt::get(Int32, 0); 2885 PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext()); 2886 DependenceAddress = ConstantPointerNull::get(PointerTypeVar); 2887 } 2888 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause); 2889 Value *Args[] = { 2890 Ident, ThreadId, InteropVar, InteropTypeVal, 2891 Device, NumDependences, DependenceAddress, HaveNowaitClauseVal}; 2892 2893 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_init); 2894 2895 return Builder.CreateCall(Fn, Args); 2896 } 2897 2898 CallInst *OpenMPIRBuilder::createOMPInteropDestroy( 2899 const LocationDescription &Loc, Value *InteropVar, Value *Device, 2900 Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause) { 2901 IRBuilder<>::InsertPointGuard IPG(Builder); 2902 Builder.restoreIP(Loc.IP); 2903 2904 uint32_t SrcLocStrSize; 2905 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2906 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2907 Value *ThreadId = getOrCreateThreadID(Ident); 2908 if (Device == nullptr) 2909 Device = ConstantInt::get(Int32, -1); 2910 if (NumDependences == nullptr) { 2911 NumDependences = ConstantInt::get(Int32, 0); 2912 PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext()); 2913 DependenceAddress = ConstantPointerNull::get(PointerTypeVar); 2914 } 2915 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause); 2916 Value *Args[] = { 2917 Ident, ThreadId, InteropVar, Device, 2918 NumDependences, DependenceAddress, HaveNowaitClauseVal}; 2919 2920 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_destroy); 2921 2922 return Builder.CreateCall(Fn, Args); 2923 } 2924 2925 CallInst *OpenMPIRBuilder::createOMPInteropUse(const LocationDescription &Loc, 2926 Value *InteropVar, Value *Device, 2927 Value *NumDependences, 2928 Value *DependenceAddress, 2929 bool HaveNowaitClause) { 2930 IRBuilder<>::InsertPointGuard IPG(Builder); 2931 Builder.restoreIP(Loc.IP); 2932 uint32_t SrcLocStrSize; 2933 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2934 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2935 Value *ThreadId = getOrCreateThreadID(Ident); 2936 if (Device == nullptr) 2937 Device = ConstantInt::get(Int32, -1); 2938 if (NumDependences == nullptr) { 2939 NumDependences = ConstantInt::get(Int32, 0); 2940 PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext()); 2941 DependenceAddress = ConstantPointerNull::get(PointerTypeVar); 2942 } 2943 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause); 2944 Value *Args[] = { 2945 Ident, ThreadId, InteropVar, Device, 2946 NumDependences, DependenceAddress, HaveNowaitClauseVal}; 2947 2948 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_use); 2949 2950 return Builder.CreateCall(Fn, Args); 2951 } 2952 2953 CallInst *OpenMPIRBuilder::createCachedThreadPrivate( 2954 const LocationDescription &Loc, llvm::Value *Pointer, 2955 llvm::ConstantInt *Size, const llvm::Twine &Name) { 2956 IRBuilder<>::InsertPointGuard IPG(Builder); 2957 Builder.restoreIP(Loc.IP); 2958 2959 uint32_t SrcLocStrSize; 2960 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2961 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2962 Value *ThreadId = getOrCreateThreadID(Ident); 2963 Constant *ThreadPrivateCache = 2964 getOrCreateOMPInternalVariable(Int8PtrPtr, Name); 2965 llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache}; 2966 2967 Function *Fn = 2968 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached); 2969 2970 return Builder.CreateCall(Fn, Args); 2971 } 2972 2973 OpenMPIRBuilder::InsertPointTy 2974 OpenMPIRBuilder::createTargetInit(const LocationDescription &Loc, bool IsSPMD, 2975 bool RequiresFullRuntime) { 2976 if (!updateToLocation(Loc)) 2977 return Loc.IP; 2978 2979 uint32_t SrcLocStrSize; 2980 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 2981 Constant *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 2982 ConstantInt *IsSPMDVal = ConstantInt::getSigned( 2983 IntegerType::getInt8Ty(Int8->getContext()), 2984 IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC); 2985 ConstantInt *UseGenericStateMachine = 2986 ConstantInt::getBool(Int32->getContext(), !IsSPMD); 2987 ConstantInt *RequiresFullRuntimeVal = 2988 ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime); 2989 2990 Function *Fn = getOrCreateRuntimeFunctionPtr( 2991 omp::RuntimeFunction::OMPRTL___kmpc_target_init); 2992 2993 CallInst *ThreadKind = Builder.CreateCall( 2994 Fn, {Ident, IsSPMDVal, UseGenericStateMachine, RequiresFullRuntimeVal}); 2995 2996 Value *ExecUserCode = Builder.CreateICmpEQ( 2997 ThreadKind, ConstantInt::get(ThreadKind->getType(), -1), 2998 "exec_user_code"); 2999 3000 // ThreadKind = __kmpc_target_init(...) 3001 // if (ThreadKind == -1) 3002 // user_code 3003 // else 3004 // return; 3005 3006 auto *UI = Builder.CreateUnreachable(); 3007 BasicBlock *CheckBB = UI->getParent(); 3008 BasicBlock *UserCodeEntryBB = CheckBB->splitBasicBlock(UI, "user_code.entry"); 3009 3010 BasicBlock *WorkerExitBB = BasicBlock::Create( 3011 CheckBB->getContext(), "worker.exit", CheckBB->getParent()); 3012 Builder.SetInsertPoint(WorkerExitBB); 3013 Builder.CreateRetVoid(); 3014 3015 auto *CheckBBTI = CheckBB->getTerminator(); 3016 Builder.SetInsertPoint(CheckBBTI); 3017 Builder.CreateCondBr(ExecUserCode, UI->getParent(), WorkerExitBB); 3018 3019 CheckBBTI->eraseFromParent(); 3020 UI->eraseFromParent(); 3021 3022 // Continue in the "user_code" block, see diagram above and in 3023 // openmp/libomptarget/deviceRTLs/common/include/target.h . 3024 return InsertPointTy(UserCodeEntryBB, UserCodeEntryBB->getFirstInsertionPt()); 3025 } 3026 3027 void OpenMPIRBuilder::createTargetDeinit(const LocationDescription &Loc, 3028 bool IsSPMD, 3029 bool RequiresFullRuntime) { 3030 if (!updateToLocation(Loc)) 3031 return; 3032 3033 uint32_t SrcLocStrSize; 3034 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); 3035 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); 3036 ConstantInt *IsSPMDVal = ConstantInt::getSigned( 3037 IntegerType::getInt8Ty(Int8->getContext()), 3038 IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC); 3039 ConstantInt *RequiresFullRuntimeVal = 3040 ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime); 3041 3042 Function *Fn = getOrCreateRuntimeFunctionPtr( 3043 omp::RuntimeFunction::OMPRTL___kmpc_target_deinit); 3044 3045 Builder.CreateCall(Fn, {Ident, IsSPMDVal, RequiresFullRuntimeVal}); 3046 } 3047 3048 std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts, 3049 StringRef FirstSeparator, 3050 StringRef Separator) { 3051 SmallString<128> Buffer; 3052 llvm::raw_svector_ostream OS(Buffer); 3053 StringRef Sep = FirstSeparator; 3054 for (StringRef Part : Parts) { 3055 OS << Sep << Part; 3056 Sep = Separator; 3057 } 3058 return OS.str().str(); 3059 } 3060 3061 Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable( 3062 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) { 3063 // TODO: Replace the twine arg with stringref to get rid of the conversion 3064 // logic. However This is taken from current implementation in clang as is. 3065 // Since this method is used in many places exclusively for OMP internal use 3066 // we will keep it as is for temporarily until we move all users to the 3067 // builder and then, if possible, fix it everywhere in one go. 3068 SmallString<256> Buffer; 3069 llvm::raw_svector_ostream Out(Buffer); 3070 Out << Name; 3071 StringRef RuntimeName = Out.str(); 3072 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first; 3073 if (Elem.second) { 3074 assert(cast<PointerType>(Elem.second->getType()) 3075 ->isOpaqueOrPointeeTypeMatches(Ty) && 3076 "OMP internal variable has different type than requested"); 3077 } else { 3078 // TODO: investigate the appropriate linkage type used for the global 3079 // variable for possibly changing that to internal or private, or maybe 3080 // create different versions of the function for different OMP internal 3081 // variables. 3082 Elem.second = new llvm::GlobalVariable( 3083 M, Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage, 3084 llvm::Constant::getNullValue(Ty), Elem.first(), 3085 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, 3086 AddressSpace); 3087 } 3088 3089 return Elem.second; 3090 } 3091 3092 Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) { 3093 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str(); 3094 std::string Name = getNameWithSeparators({Prefix, "var"}, ".", "."); 3095 return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name); 3096 } 3097 3098 GlobalVariable * 3099 OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, 3100 std::string VarName) { 3101 llvm::Constant *MaptypesArrayInit = 3102 llvm::ConstantDataArray::get(M.getContext(), Mappings); 3103 auto *MaptypesArrayGlobal = new llvm::GlobalVariable( 3104 M, MaptypesArrayInit->getType(), 3105 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MaptypesArrayInit, 3106 VarName); 3107 MaptypesArrayGlobal->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 3108 return MaptypesArrayGlobal; 3109 } 3110 3111 void OpenMPIRBuilder::createMapperAllocas(const LocationDescription &Loc, 3112 InsertPointTy AllocaIP, 3113 unsigned NumOperands, 3114 struct MapperAllocas &MapperAllocas) { 3115 if (!updateToLocation(Loc)) 3116 return; 3117 3118 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); 3119 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); 3120 Builder.restoreIP(AllocaIP); 3121 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI8PtrTy); 3122 AllocaInst *Args = Builder.CreateAlloca(ArrI8PtrTy); 3123 AllocaInst *ArgSizes = Builder.CreateAlloca(ArrI64Ty); 3124 Builder.restoreIP(Loc.IP); 3125 MapperAllocas.ArgsBase = ArgsBase; 3126 MapperAllocas.Args = Args; 3127 MapperAllocas.ArgSizes = ArgSizes; 3128 } 3129 3130 void OpenMPIRBuilder::emitMapperCall(const LocationDescription &Loc, 3131 Function *MapperFunc, Value *SrcLocInfo, 3132 Value *MaptypesArg, Value *MapnamesArg, 3133 struct MapperAllocas &MapperAllocas, 3134 int64_t DeviceID, unsigned NumOperands) { 3135 if (!updateToLocation(Loc)) 3136 return; 3137 3138 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); 3139 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); 3140 Value *ArgsBaseGEP = 3141 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.ArgsBase, 3142 {Builder.getInt32(0), Builder.getInt32(0)}); 3143 Value *ArgsGEP = 3144 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.Args, 3145 {Builder.getInt32(0), Builder.getInt32(0)}); 3146 Value *ArgSizesGEP = 3147 Builder.CreateInBoundsGEP(ArrI64Ty, MapperAllocas.ArgSizes, 3148 {Builder.getInt32(0), Builder.getInt32(0)}); 3149 Value *NullPtr = Constant::getNullValue(Int8Ptr->getPointerTo()); 3150 Builder.CreateCall(MapperFunc, 3151 {SrcLocInfo, Builder.getInt64(DeviceID), 3152 Builder.getInt32(NumOperands), ArgsBaseGEP, ArgsGEP, 3153 ArgSizesGEP, MaptypesArg, MapnamesArg, NullPtr}); 3154 } 3155 3156 bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic( 3157 const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) { 3158 assert(!(AO == AtomicOrdering::NotAtomic || 3159 AO == llvm::AtomicOrdering::Unordered) && 3160 "Unexpected Atomic Ordering."); 3161 3162 bool Flush = false; 3163 llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic; 3164 3165 switch (AK) { 3166 case Read: 3167 if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease || 3168 AO == AtomicOrdering::SequentiallyConsistent) { 3169 FlushAO = AtomicOrdering::Acquire; 3170 Flush = true; 3171 } 3172 break; 3173 case Write: 3174 case Update: 3175 if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease || 3176 AO == AtomicOrdering::SequentiallyConsistent) { 3177 FlushAO = AtomicOrdering::Release; 3178 Flush = true; 3179 } 3180 break; 3181 case Capture: 3182 switch (AO) { 3183 case AtomicOrdering::Acquire: 3184 FlushAO = AtomicOrdering::Acquire; 3185 Flush = true; 3186 break; 3187 case AtomicOrdering::Release: 3188 FlushAO = AtomicOrdering::Release; 3189 Flush = true; 3190 break; 3191 case AtomicOrdering::AcquireRelease: 3192 case AtomicOrdering::SequentiallyConsistent: 3193 FlushAO = AtomicOrdering::AcquireRelease; 3194 Flush = true; 3195 break; 3196 default: 3197 // do nothing - leave silently. 3198 break; 3199 } 3200 } 3201 3202 if (Flush) { 3203 // Currently Flush RT call still doesn't take memory_ordering, so for when 3204 // that happens, this tries to do the resolution of which atomic ordering 3205 // to use with but issue the flush call 3206 // TODO: pass `FlushAO` after memory ordering support is added 3207 (void)FlushAO; 3208 emitFlush(Loc); 3209 } 3210 3211 // for AO == AtomicOrdering::Monotonic and all other case combinations 3212 // do nothing 3213 return Flush; 3214 } 3215 3216 OpenMPIRBuilder::InsertPointTy 3217 OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc, 3218 AtomicOpValue &X, AtomicOpValue &V, 3219 AtomicOrdering AO) { 3220 if (!updateToLocation(Loc)) 3221 return Loc.IP; 3222 3223 Type *XTy = X.Var->getType(); 3224 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"); 3225 Type *XElemTy = X.ElemTy; 3226 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3227 XElemTy->isPointerTy()) && 3228 "OMP atomic read expected a scalar type"); 3229 3230 Value *XRead = nullptr; 3231 3232 if (XElemTy->isIntegerTy()) { 3233 LoadInst *XLD = 3234 Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read"); 3235 XLD->setAtomic(AO); 3236 XRead = cast<Value>(XLD); 3237 } else { 3238 // We need to bitcast and perform atomic op as integer 3239 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); 3240 IntegerType *IntCastTy = 3241 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3242 Value *XBCast = Builder.CreateBitCast( 3243 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast"); 3244 LoadInst *XLoad = 3245 Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load"); 3246 XLoad->setAtomic(AO); 3247 if (XElemTy->isFloatingPointTy()) { 3248 XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast"); 3249 } else { 3250 XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast"); 3251 } 3252 } 3253 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read); 3254 Builder.CreateStore(XRead, V.Var, V.IsVolatile); 3255 return Builder.saveIP(); 3256 } 3257 3258 OpenMPIRBuilder::InsertPointTy 3259 OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc, 3260 AtomicOpValue &X, Value *Expr, 3261 AtomicOrdering AO) { 3262 if (!updateToLocation(Loc)) 3263 return Loc.IP; 3264 3265 Type *XTy = X.Var->getType(); 3266 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"); 3267 Type *XElemTy = X.ElemTy; 3268 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3269 XElemTy->isPointerTy()) && 3270 "OMP atomic write expected a scalar type"); 3271 3272 if (XElemTy->isIntegerTy()) { 3273 StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile); 3274 XSt->setAtomic(AO); 3275 } else { 3276 // We need to bitcast and perform atomic op as integers 3277 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); 3278 IntegerType *IntCastTy = 3279 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3280 Value *XBCast = Builder.CreateBitCast( 3281 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast"); 3282 Value *ExprCast = 3283 Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast"); 3284 StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile); 3285 XSt->setAtomic(AO); 3286 } 3287 3288 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write); 3289 return Builder.saveIP(); 3290 } 3291 3292 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate( 3293 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, 3294 Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, 3295 AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr) { 3296 if (!updateToLocation(Loc)) 3297 return Loc.IP; 3298 3299 LLVM_DEBUG({ 3300 Type *XTy = X.Var->getType(); 3301 assert(XTy->isPointerTy() && 3302 "OMP Atomic expects a pointer to target memory"); 3303 Type *XElemTy = X.ElemTy; 3304 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3305 XElemTy->isPointerTy()) && 3306 "OMP atomic update expected a scalar type"); 3307 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && 3308 (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && 3309 "OpenMP atomic does not support LT or GT operations"); 3310 }); 3311 3312 emitAtomicUpdate(AllocIP, X.Var, X.ElemTy, Expr, AO, RMWOp, UpdateOp, 3313 X.IsVolatile, IsXBinopExpr); 3314 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update); 3315 return Builder.saveIP(); 3316 } 3317 3318 Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2, 3319 AtomicRMWInst::BinOp RMWOp) { 3320 switch (RMWOp) { 3321 case AtomicRMWInst::Add: 3322 return Builder.CreateAdd(Src1, Src2); 3323 case AtomicRMWInst::Sub: 3324 return Builder.CreateSub(Src1, Src2); 3325 case AtomicRMWInst::And: 3326 return Builder.CreateAnd(Src1, Src2); 3327 case AtomicRMWInst::Nand: 3328 return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2)); 3329 case AtomicRMWInst::Or: 3330 return Builder.CreateOr(Src1, Src2); 3331 case AtomicRMWInst::Xor: 3332 return Builder.CreateXor(Src1, Src2); 3333 case AtomicRMWInst::Xchg: 3334 case AtomicRMWInst::FAdd: 3335 case AtomicRMWInst::FSub: 3336 case AtomicRMWInst::BAD_BINOP: 3337 case AtomicRMWInst::Max: 3338 case AtomicRMWInst::Min: 3339 case AtomicRMWInst::UMax: 3340 case AtomicRMWInst::UMin: 3341 llvm_unreachable("Unsupported atomic update operation"); 3342 } 3343 llvm_unreachable("Unsupported atomic update operation"); 3344 } 3345 3346 std::pair<Value *, Value *> OpenMPIRBuilder::emitAtomicUpdate( 3347 Instruction *AllocIP, Value *X, Type *XElemTy, Value *Expr, 3348 AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, 3349 AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXBinopExpr) { 3350 bool DoCmpExch = 3351 ((RMWOp == AtomicRMWInst::BAD_BINOP) || (RMWOp == AtomicRMWInst::FAdd)) || 3352 (RMWOp == AtomicRMWInst::FSub) || 3353 (RMWOp == AtomicRMWInst::Sub && !IsXBinopExpr); 3354 3355 std::pair<Value *, Value *> Res; 3356 if (XElemTy->isIntegerTy() && !DoCmpExch) { 3357 Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO); 3358 // not needed except in case of postfix captures. Generate anyway for 3359 // consistency with the else part. Will be removed with any DCE pass. 3360 Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp); 3361 } else { 3362 unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace(); 3363 IntegerType *IntCastTy = 3364 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3365 Value *XBCast = 3366 Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); 3367 LoadInst *OldVal = 3368 Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load"); 3369 OldVal->setAtomic(AO); 3370 // CurBB 3371 // | /---\ 3372 // ContBB | 3373 // | \---/ 3374 // ExitBB 3375 BasicBlock *CurBB = Builder.GetInsertBlock(); 3376 Instruction *CurBBTI = CurBB->getTerminator(); 3377 CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable(); 3378 BasicBlock *ExitBB = 3379 CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit"); 3380 BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(), 3381 X->getName() + ".atomic.cont"); 3382 ContBB->getTerminator()->eraseFromParent(); 3383 Builder.SetInsertPoint(ContBB); 3384 llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2); 3385 PHI->addIncoming(OldVal, CurBB); 3386 AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy); 3387 NewAtomicAddr->setName(X->getName() + "x.new.val"); 3388 NewAtomicAddr->moveBefore(AllocIP); 3389 IntegerType *NewAtomicCastTy = 3390 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3391 bool IsIntTy = XElemTy->isIntegerTy(); 3392 Value *NewAtomicIntAddr = 3393 (IsIntTy) 3394 ? NewAtomicAddr 3395 : Builder.CreateBitCast(NewAtomicAddr, 3396 NewAtomicCastTy->getPointerTo(Addrspace)); 3397 Value *OldExprVal = PHI; 3398 if (!IsIntTy) { 3399 if (XElemTy->isFloatingPointTy()) { 3400 OldExprVal = Builder.CreateBitCast(PHI, XElemTy, 3401 X->getName() + ".atomic.fltCast"); 3402 } else { 3403 OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy, 3404 X->getName() + ".atomic.ptrCast"); 3405 } 3406 } 3407 3408 Value *Upd = UpdateOp(OldExprVal, Builder); 3409 Builder.CreateStore(Upd, NewAtomicAddr); 3410 LoadInst *DesiredVal = Builder.CreateLoad(XElemTy, NewAtomicIntAddr); 3411 Value *XAddr = 3412 (IsIntTy) 3413 ? X 3414 : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); 3415 AtomicOrdering Failure = 3416 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); 3417 AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg( 3418 XAddr, OldExprVal, DesiredVal, llvm::MaybeAlign(), AO, Failure); 3419 Result->setVolatile(VolatileX); 3420 Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0); 3421 Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1); 3422 PHI->addIncoming(PreviousVal, Builder.GetInsertBlock()); 3423 Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB); 3424 3425 Res.first = OldExprVal; 3426 Res.second = Upd; 3427 3428 // set Insertion point in exit block 3429 if (UnreachableInst *ExitTI = 3430 dyn_cast<UnreachableInst>(ExitBB->getTerminator())) { 3431 CurBBTI->eraseFromParent(); 3432 Builder.SetInsertPoint(ExitBB); 3433 } else { 3434 Builder.SetInsertPoint(ExitTI); 3435 } 3436 } 3437 3438 return Res; 3439 } 3440 3441 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture( 3442 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, 3443 AtomicOpValue &V, Value *Expr, AtomicOrdering AO, 3444 AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, 3445 bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr) { 3446 if (!updateToLocation(Loc)) 3447 return Loc.IP; 3448 3449 LLVM_DEBUG({ 3450 Type *XTy = X.Var->getType(); 3451 assert(XTy->isPointerTy() && 3452 "OMP Atomic expects a pointer to target memory"); 3453 Type *XElemTy = XTy->getPointerElementType(); 3454 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3455 XElemTy->isPointerTy()) && 3456 "OMP atomic capture expected a scalar type"); 3457 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && 3458 "OpenMP atomic does not support LT or GT operations"); 3459 }); 3460 3461 // If UpdateExpr is 'x' updated with some `expr` not based on 'x', 3462 // 'x' is simply atomically rewritten with 'expr'. 3463 AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg); 3464 std::pair<Value *, Value *> Result = 3465 emitAtomicUpdate(AllocIP, X.Var, X.ElemTy, Expr, AO, AtomicOp, UpdateOp, 3466 X.IsVolatile, IsXBinopExpr); 3467 3468 Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second); 3469 Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile); 3470 3471 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture); 3472 return Builder.saveIP(); 3473 } 3474 3475 GlobalVariable * 3476 OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, 3477 std::string VarName) { 3478 llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get( 3479 llvm::ArrayType::get( 3480 llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()), 3481 Names); 3482 auto *MapNamesArrayGlobal = new llvm::GlobalVariable( 3483 M, MapNamesArrayInit->getType(), 3484 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MapNamesArrayInit, 3485 VarName); 3486 return MapNamesArrayGlobal; 3487 } 3488 3489 // Create all simple and struct types exposed by the runtime and remember 3490 // the llvm::PointerTypes of them for easy access later. 3491 void OpenMPIRBuilder::initializeTypes(Module &M) { 3492 LLVMContext &Ctx = M.getContext(); 3493 StructType *T; 3494 #define OMP_TYPE(VarName, InitValue) VarName = InitValue; 3495 #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ 3496 VarName##Ty = ArrayType::get(ElemTy, ArraySize); \ 3497 VarName##PtrTy = PointerType::getUnqual(VarName##Ty); 3498 #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ 3499 VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \ 3500 VarName##Ptr = PointerType::getUnqual(VarName); 3501 #define OMP_STRUCT_TYPE(VarName, StructName, ...) \ 3502 T = StructType::getTypeByName(Ctx, StructName); \ 3503 if (!T) \ 3504 T = StructType::create(Ctx, {__VA_ARGS__}, StructName); \ 3505 VarName = T; \ 3506 VarName##Ptr = PointerType::getUnqual(T); 3507 #include "llvm/Frontend/OpenMP/OMPKinds.def" 3508 } 3509 3510 void OpenMPIRBuilder::OutlineInfo::collectBlocks( 3511 SmallPtrSetImpl<BasicBlock *> &BlockSet, 3512 SmallVectorImpl<BasicBlock *> &BlockVector) { 3513 SmallVector<BasicBlock *, 32> Worklist; 3514 BlockSet.insert(EntryBB); 3515 BlockSet.insert(ExitBB); 3516 3517 Worklist.push_back(EntryBB); 3518 while (!Worklist.empty()) { 3519 BasicBlock *BB = Worklist.pop_back_val(); 3520 BlockVector.push_back(BB); 3521 for (BasicBlock *SuccBB : successors(BB)) 3522 if (BlockSet.insert(SuccBB).second) 3523 Worklist.push_back(SuccBB); 3524 } 3525 } 3526 3527 void CanonicalLoopInfo::collectControlBlocks( 3528 SmallVectorImpl<BasicBlock *> &BBs) { 3529 // We only count those BBs as control block for which we do not need to 3530 // reverse the CFG, i.e. not the loop body which can contain arbitrary control 3531 // flow. For consistency, this also means we do not add the Body block, which 3532 // is just the entry to the body code. 3533 BBs.reserve(BBs.size() + 6); 3534 BBs.append({getPreheader(), Header, Cond, Latch, Exit, getAfter()}); 3535 } 3536 3537 BasicBlock *CanonicalLoopInfo::getPreheader() const { 3538 assert(isValid() && "Requires a valid canonical loop"); 3539 for (BasicBlock *Pred : predecessors(Header)) { 3540 if (Pred != Latch) 3541 return Pred; 3542 } 3543 llvm_unreachable("Missing preheader"); 3544 } 3545 3546 void CanonicalLoopInfo::assertOK() const { 3547 #ifndef NDEBUG 3548 // No constraints if this object currently does not describe a loop. 3549 if (!isValid()) 3550 return; 3551 3552 BasicBlock *Preheader = getPreheader(); 3553 BasicBlock *Body = getBody(); 3554 BasicBlock *After = getAfter(); 3555 3556 // Verify standard control-flow we use for OpenMP loops. 3557 assert(Preheader); 3558 assert(isa<BranchInst>(Preheader->getTerminator()) && 3559 "Preheader must terminate with unconditional branch"); 3560 assert(Preheader->getSingleSuccessor() == Header && 3561 "Preheader must jump to header"); 3562 3563 assert(Header); 3564 assert(isa<BranchInst>(Header->getTerminator()) && 3565 "Header must terminate with unconditional branch"); 3566 assert(Header->getSingleSuccessor() == Cond && 3567 "Header must jump to exiting block"); 3568 3569 assert(Cond); 3570 assert(Cond->getSinglePredecessor() == Header && 3571 "Exiting block only reachable from header"); 3572 3573 assert(isa<BranchInst>(Cond->getTerminator()) && 3574 "Exiting block must terminate with conditional branch"); 3575 assert(size(successors(Cond)) == 2 && 3576 "Exiting block must have two successors"); 3577 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && 3578 "Exiting block's first successor jump to the body"); 3579 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && 3580 "Exiting block's second successor must exit the loop"); 3581 3582 assert(Body); 3583 assert(Body->getSinglePredecessor() == Cond && 3584 "Body only reachable from exiting block"); 3585 assert(!isa<PHINode>(Body->front())); 3586 3587 assert(Latch); 3588 assert(isa<BranchInst>(Latch->getTerminator()) && 3589 "Latch must terminate with unconditional branch"); 3590 assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header"); 3591 // TODO: To support simple redirecting of the end of the body code that has 3592 // multiple; introduce another auxiliary basic block like preheader and after. 3593 assert(Latch->getSinglePredecessor() != nullptr); 3594 assert(!isa<PHINode>(Latch->front())); 3595 3596 assert(Exit); 3597 assert(isa<BranchInst>(Exit->getTerminator()) && 3598 "Exit block must terminate with unconditional branch"); 3599 assert(Exit->getSingleSuccessor() == After && 3600 "Exit block must jump to after block"); 3601 3602 assert(After); 3603 assert(After->getSinglePredecessor() == Exit && 3604 "After block only reachable from exit block"); 3605 assert(After->empty() || !isa<PHINode>(After->front())); 3606 3607 Instruction *IndVar = getIndVar(); 3608 assert(IndVar && "Canonical induction variable not found?"); 3609 assert(isa<IntegerType>(IndVar->getType()) && 3610 "Induction variable must be an integer"); 3611 assert(cast<PHINode>(IndVar)->getParent() == Header && 3612 "Induction variable must be a PHI in the loop header"); 3613 assert(cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader); 3614 assert( 3615 cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()); 3616 assert(cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch); 3617 3618 auto *NextIndVar = cast<PHINode>(IndVar)->getIncomingValue(1); 3619 assert(cast<Instruction>(NextIndVar)->getParent() == Latch); 3620 assert(cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add); 3621 assert(cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar); 3622 assert(cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) 3623 ->isOne()); 3624 3625 Value *TripCount = getTripCount(); 3626 assert(TripCount && "Loop trip count not found?"); 3627 assert(IndVar->getType() == TripCount->getType() && 3628 "Trip count and induction variable must have the same type"); 3629 3630 auto *CmpI = cast<CmpInst>(&Cond->front()); 3631 assert(CmpI->getPredicate() == CmpInst::ICMP_ULT && 3632 "Exit condition must be a signed less-than comparison"); 3633 assert(CmpI->getOperand(0) == IndVar && 3634 "Exit condition must compare the induction variable"); 3635 assert(CmpI->getOperand(1) == TripCount && 3636 "Exit condition must compare with the trip count"); 3637 #endif 3638 } 3639 3640 void CanonicalLoopInfo::invalidate() { 3641 Header = nullptr; 3642 Cond = nullptr; 3643 Latch = nullptr; 3644 Exit = nullptr; 3645 } 3646