1 //===-- AMDGPUAtomicOptimizer.cpp -----------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This pass optimizes atomic operations by using a single lane of a wavefront 11 /// to perform the atomic operation, thus reducing contention on that memory 12 /// location. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "AMDGPU.h" 17 #include "AMDGPUSubtarget.h" 18 #include "llvm/Analysis/LegacyDivergenceAnalysis.h" 19 #include "llvm/CodeGen/TargetPassConfig.h" 20 #include "llvm/IR/IRBuilder.h" 21 #include "llvm/IR/InstVisitor.h" 22 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 23 24 #define DEBUG_TYPE "amdgpu-atomic-optimizer" 25 26 using namespace llvm; 27 28 namespace { 29 30 enum DPP_CTRL { 31 DPP_ROW_SR1 = 0x111, 32 DPP_ROW_SR2 = 0x112, 33 DPP_ROW_SR3 = 0x113, 34 DPP_ROW_SR4 = 0x114, 35 DPP_ROW_SR8 = 0x118, 36 DPP_WF_SR1 = 0x138, 37 DPP_ROW_BCAST15 = 0x142, 38 DPP_ROW_BCAST31 = 0x143 39 }; 40 41 struct ReplacementInfo { 42 Instruction *I; 43 AtomicRMWInst::BinOp Op; 44 unsigned ValIdx; 45 bool ValDivergent; 46 }; 47 48 class AMDGPUAtomicOptimizer : public FunctionPass, 49 public InstVisitor<AMDGPUAtomicOptimizer> { 50 private: 51 SmallVector<ReplacementInfo, 8> ToReplace; 52 const LegacyDivergenceAnalysis *DA; 53 const DataLayout *DL; 54 DominatorTree *DT; 55 bool HasDPP; 56 bool IsPixelShader; 57 58 void optimizeAtomic(Instruction &I, AtomicRMWInst::BinOp Op, unsigned ValIdx, 59 bool ValDivergent) const; 60 61 public: 62 static char ID; 63 64 AMDGPUAtomicOptimizer() : FunctionPass(ID) {} 65 66 bool runOnFunction(Function &F) override; 67 68 void getAnalysisUsage(AnalysisUsage &AU) const override { 69 AU.addPreserved<DominatorTreeWrapperPass>(); 70 AU.addRequired<LegacyDivergenceAnalysis>(); 71 AU.addRequired<TargetPassConfig>(); 72 } 73 74 void visitAtomicRMWInst(AtomicRMWInst &I); 75 void visitIntrinsicInst(IntrinsicInst &I); 76 }; 77 78 } // namespace 79 80 char AMDGPUAtomicOptimizer::ID = 0; 81 82 char &llvm::AMDGPUAtomicOptimizerID = AMDGPUAtomicOptimizer::ID; 83 84 bool AMDGPUAtomicOptimizer::runOnFunction(Function &F) { 85 if (skipFunction(F)) { 86 return false; 87 } 88 89 DA = &getAnalysis<LegacyDivergenceAnalysis>(); 90 DL = &F.getParent()->getDataLayout(); 91 DominatorTreeWrapperPass *const DTW = 92 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 93 DT = DTW ? &DTW->getDomTree() : nullptr; 94 const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>(); 95 const TargetMachine &TM = TPC.getTM<TargetMachine>(); 96 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); 97 HasDPP = ST.hasDPP(); 98 IsPixelShader = F.getCallingConv() == CallingConv::AMDGPU_PS; 99 100 visit(F); 101 102 const bool Changed = !ToReplace.empty(); 103 104 for (ReplacementInfo &Info : ToReplace) { 105 optimizeAtomic(*Info.I, Info.Op, Info.ValIdx, Info.ValDivergent); 106 } 107 108 ToReplace.clear(); 109 110 return Changed; 111 } 112 113 void AMDGPUAtomicOptimizer::visitAtomicRMWInst(AtomicRMWInst &I) { 114 // Early exit for unhandled address space atomic instructions. 115 switch (I.getPointerAddressSpace()) { 116 default: 117 return; 118 case AMDGPUAS::GLOBAL_ADDRESS: 119 case AMDGPUAS::LOCAL_ADDRESS: 120 break; 121 } 122 123 AtomicRMWInst::BinOp Op = I.getOperation(); 124 125 switch (Op) { 126 default: 127 return; 128 case AtomicRMWInst::Add: 129 case AtomicRMWInst::Sub: 130 case AtomicRMWInst::And: 131 case AtomicRMWInst::Or: 132 case AtomicRMWInst::Xor: 133 case AtomicRMWInst::Max: 134 case AtomicRMWInst::Min: 135 case AtomicRMWInst::UMax: 136 case AtomicRMWInst::UMin: 137 break; 138 } 139 140 const unsigned PtrIdx = 0; 141 const unsigned ValIdx = 1; 142 143 // If the pointer operand is divergent, then each lane is doing an atomic 144 // operation on a different address, and we cannot optimize that. 145 if (DA->isDivergent(I.getOperand(PtrIdx))) { 146 return; 147 } 148 149 const bool ValDivergent = DA->isDivergent(I.getOperand(ValIdx)); 150 151 // If the value operand is divergent, each lane is contributing a different 152 // value to the atomic calculation. We can only optimize divergent values if 153 // we have DPP available on our subtarget, and the atomic operation is 32 154 // bits. 155 if (ValDivergent && (!HasDPP || (DL->getTypeSizeInBits(I.getType()) != 32))) { 156 return; 157 } 158 159 // If we get here, we can optimize the atomic using a single wavefront-wide 160 // atomic operation to do the calculation for the entire wavefront, so 161 // remember the instruction so we can come back to it. 162 const ReplacementInfo Info = {&I, Op, ValIdx, ValDivergent}; 163 164 ToReplace.push_back(Info); 165 } 166 167 void AMDGPUAtomicOptimizer::visitIntrinsicInst(IntrinsicInst &I) { 168 AtomicRMWInst::BinOp Op; 169 170 switch (I.getIntrinsicID()) { 171 default: 172 return; 173 case Intrinsic::amdgcn_buffer_atomic_add: 174 case Intrinsic::amdgcn_struct_buffer_atomic_add: 175 case Intrinsic::amdgcn_raw_buffer_atomic_add: 176 Op = AtomicRMWInst::Add; 177 break; 178 case Intrinsic::amdgcn_buffer_atomic_sub: 179 case Intrinsic::amdgcn_struct_buffer_atomic_sub: 180 case Intrinsic::amdgcn_raw_buffer_atomic_sub: 181 Op = AtomicRMWInst::Sub; 182 break; 183 case Intrinsic::amdgcn_buffer_atomic_and: 184 case Intrinsic::amdgcn_struct_buffer_atomic_and: 185 case Intrinsic::amdgcn_raw_buffer_atomic_and: 186 Op = AtomicRMWInst::And; 187 break; 188 case Intrinsic::amdgcn_buffer_atomic_or: 189 case Intrinsic::amdgcn_struct_buffer_atomic_or: 190 case Intrinsic::amdgcn_raw_buffer_atomic_or: 191 Op = AtomicRMWInst::Or; 192 break; 193 case Intrinsic::amdgcn_buffer_atomic_xor: 194 case Intrinsic::amdgcn_struct_buffer_atomic_xor: 195 case Intrinsic::amdgcn_raw_buffer_atomic_xor: 196 Op = AtomicRMWInst::Xor; 197 break; 198 case Intrinsic::amdgcn_buffer_atomic_smin: 199 case Intrinsic::amdgcn_struct_buffer_atomic_smin: 200 case Intrinsic::amdgcn_raw_buffer_atomic_smin: 201 Op = AtomicRMWInst::Min; 202 break; 203 case Intrinsic::amdgcn_buffer_atomic_umin: 204 case Intrinsic::amdgcn_struct_buffer_atomic_umin: 205 case Intrinsic::amdgcn_raw_buffer_atomic_umin: 206 Op = AtomicRMWInst::UMin; 207 break; 208 case Intrinsic::amdgcn_buffer_atomic_smax: 209 case Intrinsic::amdgcn_struct_buffer_atomic_smax: 210 case Intrinsic::amdgcn_raw_buffer_atomic_smax: 211 Op = AtomicRMWInst::Max; 212 break; 213 case Intrinsic::amdgcn_buffer_atomic_umax: 214 case Intrinsic::amdgcn_struct_buffer_atomic_umax: 215 case Intrinsic::amdgcn_raw_buffer_atomic_umax: 216 Op = AtomicRMWInst::UMax; 217 break; 218 } 219 220 const unsigned ValIdx = 0; 221 222 const bool ValDivergent = DA->isDivergent(I.getOperand(ValIdx)); 223 224 // If the value operand is divergent, each lane is contributing a different 225 // value to the atomic calculation. We can only optimize divergent values if 226 // we have DPP available on our subtarget, and the atomic operation is 32 227 // bits. 228 if (ValDivergent && (!HasDPP || (DL->getTypeSizeInBits(I.getType()) != 32))) { 229 return; 230 } 231 232 // If any of the other arguments to the intrinsic are divergent, we can't 233 // optimize the operation. 234 for (unsigned Idx = 1; Idx < I.getNumOperands(); Idx++) { 235 if (DA->isDivergent(I.getOperand(Idx))) { 236 return; 237 } 238 } 239 240 // If we get here, we can optimize the atomic using a single wavefront-wide 241 // atomic operation to do the calculation for the entire wavefront, so 242 // remember the instruction so we can come back to it. 243 const ReplacementInfo Info = {&I, Op, ValIdx, ValDivergent}; 244 245 ToReplace.push_back(Info); 246 } 247 248 // Use the builder to create the non-atomic counterpart of the specified 249 // atomicrmw binary op. 250 static Value *buildNonAtomicBinOp(IRBuilder<> &B, AtomicRMWInst::BinOp Op, 251 Value *LHS, Value *RHS) { 252 CmpInst::Predicate Pred; 253 254 switch (Op) { 255 default: 256 llvm_unreachable("Unhandled atomic op"); 257 case AtomicRMWInst::Add: 258 return B.CreateBinOp(Instruction::Add, LHS, RHS); 259 case AtomicRMWInst::Sub: 260 return B.CreateBinOp(Instruction::Sub, LHS, RHS); 261 case AtomicRMWInst::And: 262 return B.CreateBinOp(Instruction::And, LHS, RHS); 263 case AtomicRMWInst::Or: 264 return B.CreateBinOp(Instruction::Or, LHS, RHS); 265 case AtomicRMWInst::Xor: 266 return B.CreateBinOp(Instruction::Xor, LHS, RHS); 267 268 case AtomicRMWInst::Max: 269 Pred = CmpInst::ICMP_SGT; 270 break; 271 case AtomicRMWInst::Min: 272 Pred = CmpInst::ICMP_SLT; 273 break; 274 case AtomicRMWInst::UMax: 275 Pred = CmpInst::ICMP_UGT; 276 break; 277 case AtomicRMWInst::UMin: 278 Pred = CmpInst::ICMP_ULT; 279 break; 280 } 281 Value *Cond = B.CreateICmp(Pred, LHS, RHS); 282 return B.CreateSelect(Cond, LHS, RHS); 283 } 284 285 static APInt getIdentityValueForAtomicOp(AtomicRMWInst::BinOp Op, 286 unsigned BitWidth) { 287 switch (Op) { 288 default: 289 llvm_unreachable("Unhandled atomic op"); 290 case AtomicRMWInst::Add: 291 case AtomicRMWInst::Sub: 292 case AtomicRMWInst::Or: 293 case AtomicRMWInst::Xor: 294 case AtomicRMWInst::UMax: 295 return APInt::getMinValue(BitWidth); 296 case AtomicRMWInst::And: 297 case AtomicRMWInst::UMin: 298 return APInt::getMaxValue(BitWidth); 299 case AtomicRMWInst::Max: 300 return APInt::getSignedMinValue(BitWidth); 301 case AtomicRMWInst::Min: 302 return APInt::getSignedMaxValue(BitWidth); 303 } 304 } 305 306 void AMDGPUAtomicOptimizer::optimizeAtomic(Instruction &I, 307 AtomicRMWInst::BinOp Op, 308 unsigned ValIdx, 309 bool ValDivergent) const { 310 // Start building just before the instruction. 311 IRBuilder<> B(&I); 312 313 // If we are in a pixel shader, because of how we have to mask out helper 314 // lane invocations, we need to record the entry and exit BB's. 315 BasicBlock *PixelEntryBB = nullptr; 316 BasicBlock *PixelExitBB = nullptr; 317 318 // If we're optimizing an atomic within a pixel shader, we need to wrap the 319 // entire atomic operation in a helper-lane check. We do not want any helper 320 // lanes that are around only for the purposes of derivatives to take part 321 // in any cross-lane communication, and we use a branch on whether the lane is 322 // live to do this. 323 if (IsPixelShader) { 324 // Record I's original position as the entry block. 325 PixelEntryBB = I.getParent(); 326 327 Value *const Cond = B.CreateIntrinsic(Intrinsic::amdgcn_ps_live, {}, {}); 328 Instruction *const NonHelperTerminator = 329 SplitBlockAndInsertIfThen(Cond, &I, false, nullptr, DT, nullptr); 330 331 // Record I's new position as the exit block. 332 PixelExitBB = I.getParent(); 333 334 I.moveBefore(NonHelperTerminator); 335 B.SetInsertPoint(&I); 336 } 337 338 Type *const Ty = I.getType(); 339 const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty); 340 Type *const VecTy = VectorType::get(B.getInt32Ty(), 2); 341 342 // This is the value in the atomic operation we need to combine in order to 343 // reduce the number of atomic operations. 344 Value *const V = I.getOperand(ValIdx); 345 346 // We need to know how many lanes are active within the wavefront, and we do 347 // this by doing a ballot of active lanes. 348 CallInst *const Ballot = B.CreateIntrinsic( 349 Intrinsic::amdgcn_icmp, {B.getInt64Ty(), B.getInt32Ty()}, 350 {B.getInt32(1), B.getInt32(0), B.getInt32(CmpInst::ICMP_NE)}); 351 352 // We need to know how many lanes are active within the wavefront that are 353 // below us. If we counted each lane linearly starting from 0, a lane is 354 // below us only if its associated index was less than ours. We do this by 355 // using the mbcnt intrinsic. 356 Value *const BitCast = B.CreateBitCast(Ballot, VecTy); 357 Value *const ExtractLo = B.CreateExtractElement(BitCast, B.getInt32(0)); 358 Value *const ExtractHi = B.CreateExtractElement(BitCast, B.getInt32(1)); 359 CallInst *const PartialMbcnt = B.CreateIntrinsic( 360 Intrinsic::amdgcn_mbcnt_lo, {}, {ExtractLo, B.getInt32(0)}); 361 Value *const Mbcnt = 362 B.CreateIntCast(B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_hi, {}, 363 {ExtractHi, PartialMbcnt}), 364 Ty, false); 365 366 Value *const Identity = B.getInt(getIdentityValueForAtomicOp(Op, TyBitWidth)); 367 368 Value *ExclScan = nullptr; 369 Value *NewV = nullptr; 370 371 // If we have a divergent value in each lane, we need to combine the value 372 // using DPP. 373 if (ValDivergent) { 374 // First we need to set all inactive invocations to the identity value, so 375 // that they can correctly contribute to the final result. 376 CallInst *const SetInactive = 377 B.CreateIntrinsic(Intrinsic::amdgcn_set_inactive, Ty, {V, Identity}); 378 379 CallInst *const FirstDPP = 380 B.CreateIntrinsic(Intrinsic::amdgcn_update_dpp, Ty, 381 {Identity, SetInactive, B.getInt32(DPP_WF_SR1), 382 B.getInt32(0xf), B.getInt32(0xf), B.getFalse()}); 383 ExclScan = FirstDPP; 384 385 const unsigned Iters = 7; 386 const unsigned DPPCtrl[Iters] = { 387 DPP_ROW_SR1, DPP_ROW_SR2, DPP_ROW_SR3, DPP_ROW_SR4, 388 DPP_ROW_SR8, DPP_ROW_BCAST15, DPP_ROW_BCAST31}; 389 const unsigned RowMask[Iters] = {0xf, 0xf, 0xf, 0xf, 0xf, 0xa, 0xc}; 390 const unsigned BankMask[Iters] = {0xf, 0xf, 0xf, 0xe, 0xc, 0xf, 0xf}; 391 392 // This loop performs an exclusive scan across the wavefront, with all lanes 393 // active (by using the WWM intrinsic). 394 for (unsigned Idx = 0; Idx < Iters; Idx++) { 395 Value *const UpdateValue = Idx < 3 ? FirstDPP : ExclScan; 396 CallInst *const DPP = B.CreateIntrinsic( 397 Intrinsic::amdgcn_update_dpp, Ty, 398 {Identity, UpdateValue, B.getInt32(DPPCtrl[Idx]), 399 B.getInt32(RowMask[Idx]), B.getInt32(BankMask[Idx]), B.getFalse()}); 400 401 ExclScan = buildNonAtomicBinOp(B, Op, ExclScan, DPP); 402 } 403 404 NewV = buildNonAtomicBinOp(B, Op, SetInactive, ExclScan); 405 406 // Read the value from the last lane, which has accumlated the values of 407 // each active lane in the wavefront. This will be our new value which we 408 // will provide to the atomic operation. 409 if (TyBitWidth == 64) { 410 Value *const ExtractLo = B.CreateTrunc(NewV, B.getInt32Ty()); 411 Value *const ExtractHi = 412 B.CreateTrunc(B.CreateLShr(NewV, B.getInt64(32)), B.getInt32Ty()); 413 CallInst *const ReadLaneLo = B.CreateIntrinsic( 414 Intrinsic::amdgcn_readlane, {}, {ExtractLo, B.getInt32(63)}); 415 CallInst *const ReadLaneHi = B.CreateIntrinsic( 416 Intrinsic::amdgcn_readlane, {}, {ExtractHi, B.getInt32(63)}); 417 Value *const PartialInsert = B.CreateInsertElement( 418 UndefValue::get(VecTy), ReadLaneLo, B.getInt32(0)); 419 Value *const Insert = 420 B.CreateInsertElement(PartialInsert, ReadLaneHi, B.getInt32(1)); 421 NewV = B.CreateBitCast(Insert, Ty); 422 } else if (TyBitWidth == 32) { 423 NewV = B.CreateIntrinsic(Intrinsic::amdgcn_readlane, {}, 424 {NewV, B.getInt32(63)}); 425 } else { 426 llvm_unreachable("Unhandled atomic bit width"); 427 } 428 429 // Finally mark the readlanes in the WWM section. 430 NewV = B.CreateIntrinsic(Intrinsic::amdgcn_wwm, Ty, NewV); 431 } else { 432 switch (Op) { 433 default: 434 llvm_unreachable("Unhandled atomic op"); 435 436 case AtomicRMWInst::Add: 437 case AtomicRMWInst::Sub: { 438 // The new value we will be contributing to the atomic operation is the 439 // old value times the number of active lanes. 440 Value *const Ctpop = B.CreateIntCast( 441 B.CreateUnaryIntrinsic(Intrinsic::ctpop, Ballot), Ty, false); 442 NewV = B.CreateMul(V, Ctpop); 443 break; 444 } 445 446 case AtomicRMWInst::And: 447 case AtomicRMWInst::Or: 448 case AtomicRMWInst::Max: 449 case AtomicRMWInst::Min: 450 case AtomicRMWInst::UMax: 451 case AtomicRMWInst::UMin: 452 // These operations with a uniform value are idempotent: doing the atomic 453 // operation multiple times has the same effect as doing it once. 454 NewV = V; 455 break; 456 457 case AtomicRMWInst::Xor: 458 // The new value we will be contributing to the atomic operation is the 459 // old value times the parity of the number of active lanes. 460 Value *const Ctpop = B.CreateIntCast( 461 B.CreateUnaryIntrinsic(Intrinsic::ctpop, Ballot), Ty, false); 462 NewV = B.CreateMul(V, B.CreateAnd(Ctpop, 1)); 463 break; 464 } 465 } 466 467 // We only want a single lane to enter our new control flow, and we do this 468 // by checking if there are any active lanes below us. Only one lane will 469 // have 0 active lanes below us, so that will be the only one to progress. 470 Value *const Cond = B.CreateICmpEQ(Mbcnt, B.getIntN(TyBitWidth, 0)); 471 472 // Store I's original basic block before we split the block. 473 BasicBlock *const EntryBB = I.getParent(); 474 475 // We need to introduce some new control flow to force a single lane to be 476 // active. We do this by splitting I's basic block at I, and introducing the 477 // new block such that: 478 // entry --> single_lane -\ 479 // \------------------> exit 480 Instruction *const SingleLaneTerminator = 481 SplitBlockAndInsertIfThen(Cond, &I, false, nullptr, DT, nullptr); 482 483 // Move the IR builder into single_lane next. 484 B.SetInsertPoint(SingleLaneTerminator); 485 486 // Clone the original atomic operation into single lane, replacing the 487 // original value with our newly created one. 488 Instruction *const NewI = I.clone(); 489 B.Insert(NewI); 490 NewI->setOperand(ValIdx, NewV); 491 492 // Move the IR builder into exit next, and start inserting just before the 493 // original instruction. 494 B.SetInsertPoint(&I); 495 496 // Create a PHI node to get our new atomic result into the exit block. 497 PHINode *const PHI = B.CreatePHI(Ty, 2); 498 PHI->addIncoming(UndefValue::get(Ty), EntryBB); 499 PHI->addIncoming(NewI, SingleLaneTerminator->getParent()); 500 501 // We need to broadcast the value who was the lowest active lane (the first 502 // lane) to all other lanes in the wavefront. We use an intrinsic for this, 503 // but have to handle 64-bit broadcasts with two calls to this intrinsic. 504 Value *BroadcastI = nullptr; 505 506 if (TyBitWidth == 64) { 507 Value *const ExtractLo = B.CreateTrunc(PHI, B.getInt32Ty()); 508 Value *const ExtractHi = 509 B.CreateTrunc(B.CreateLShr(PHI, B.getInt64(32)), B.getInt32Ty()); 510 CallInst *const ReadFirstLaneLo = 511 B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractLo); 512 CallInst *const ReadFirstLaneHi = 513 B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractHi); 514 Value *const PartialInsert = B.CreateInsertElement( 515 UndefValue::get(VecTy), ReadFirstLaneLo, B.getInt32(0)); 516 Value *const Insert = 517 B.CreateInsertElement(PartialInsert, ReadFirstLaneHi, B.getInt32(1)); 518 BroadcastI = B.CreateBitCast(Insert, Ty); 519 } else if (TyBitWidth == 32) { 520 521 BroadcastI = B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, PHI); 522 } else { 523 llvm_unreachable("Unhandled atomic bit width"); 524 } 525 526 // Now that we have the result of our single atomic operation, we need to 527 // get our individual lane's slice into the result. We use the lane offset we 528 // previously calculated combined with the atomic result value we got from the 529 // first lane, to get our lane's index into the atomic result. 530 Value *LaneOffset = nullptr; 531 if (ValDivergent) { 532 LaneOffset = B.CreateIntrinsic(Intrinsic::amdgcn_wwm, Ty, ExclScan); 533 } else { 534 switch (Op) { 535 default: 536 llvm_unreachable("Unhandled atomic op"); 537 case AtomicRMWInst::Add: 538 case AtomicRMWInst::Sub: 539 LaneOffset = B.CreateMul(V, Mbcnt); 540 break; 541 case AtomicRMWInst::And: 542 case AtomicRMWInst::Or: 543 case AtomicRMWInst::Max: 544 case AtomicRMWInst::Min: 545 case AtomicRMWInst::UMax: 546 case AtomicRMWInst::UMin: 547 LaneOffset = B.CreateSelect(Cond, Identity, V); 548 break; 549 case AtomicRMWInst::Xor: 550 LaneOffset = B.CreateMul(V, B.CreateAnd(Mbcnt, 1)); 551 break; 552 } 553 } 554 Value *const Result = buildNonAtomicBinOp(B, Op, BroadcastI, LaneOffset); 555 556 if (IsPixelShader) { 557 // Need a final PHI to reconverge to above the helper lane branch mask. 558 B.SetInsertPoint(PixelExitBB->getFirstNonPHI()); 559 560 PHINode *const PHI = B.CreatePHI(Ty, 2); 561 PHI->addIncoming(UndefValue::get(Ty), PixelEntryBB); 562 PHI->addIncoming(Result, I.getParent()); 563 I.replaceAllUsesWith(PHI); 564 } else { 565 // Replace the original atomic instruction with the new one. 566 I.replaceAllUsesWith(Result); 567 } 568 569 // And delete the original. 570 I.eraseFromParent(); 571 } 572 573 INITIALIZE_PASS_BEGIN(AMDGPUAtomicOptimizer, DEBUG_TYPE, 574 "AMDGPU atomic optimizations", false, false) 575 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) 576 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 577 INITIALIZE_PASS_END(AMDGPUAtomicOptimizer, DEBUG_TYPE, 578 "AMDGPU atomic optimizations", false, false) 579 580 FunctionPass *llvm::createAMDGPUAtomicOptimizerPass() { 581 return new AMDGPUAtomicOptimizer(); 582 } 583