1 //===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains a pass (at IR level) to replace atomic instructions with 10 // __atomic_* library calls, or target specific instruction which implement the 11 // same semantics in a way which better fits the target backend. This can 12 // include the use of (intrinsic-based) load-linked/store-conditional loops, 13 // AtomicCmpXchg, or type coercions. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/STLFunctionalExtras.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 21 #include "llvm/CodeGen/AtomicExpandUtils.h" 22 #include "llvm/CodeGen/RuntimeLibcalls.h" 23 #include "llvm/CodeGen/TargetLowering.h" 24 #include "llvm/CodeGen/TargetPassConfig.h" 25 #include "llvm/CodeGen/TargetSubtargetInfo.h" 26 #include "llvm/CodeGen/ValueTypes.h" 27 #include "llvm/IR/Attributes.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/Constant.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/IRBuilder.h" 35 #include "llvm/IR/InstIterator.h" 36 #include "llvm/IR/Instruction.h" 37 #include "llvm/IR/Instructions.h" 38 #include "llvm/IR/Module.h" 39 #include "llvm/IR/Type.h" 40 #include "llvm/IR/User.h" 41 #include "llvm/IR/Value.h" 42 #include "llvm/InitializePasses.h" 43 #include "llvm/Pass.h" 44 #include "llvm/Support/AtomicOrdering.h" 45 #include "llvm/Support/Casting.h" 46 #include "llvm/Support/Debug.h" 47 #include "llvm/Support/ErrorHandling.h" 48 #include "llvm/Support/raw_ostream.h" 49 #include "llvm/Target/TargetMachine.h" 50 #include "llvm/Transforms/Utils/LowerAtomic.h" 51 #include <cassert> 52 #include <cstdint> 53 #include <iterator> 54 55 using namespace llvm; 56 57 #define DEBUG_TYPE "atomic-expand" 58 59 namespace { 60 61 class AtomicExpand : public FunctionPass { 62 const TargetLowering *TLI = nullptr; 63 64 public: 65 static char ID; // Pass identification, replacement for typeid 66 67 AtomicExpand() : FunctionPass(ID) { 68 initializeAtomicExpandPass(*PassRegistry::getPassRegistry()); 69 } 70 71 bool runOnFunction(Function &F) override; 72 73 private: 74 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order); 75 IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL); 76 LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI); 77 bool tryExpandAtomicLoad(LoadInst *LI); 78 bool expandAtomicLoadToLL(LoadInst *LI); 79 bool expandAtomicLoadToCmpXchg(LoadInst *LI); 80 StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI); 81 bool tryExpandAtomicStore(StoreInst *SI); 82 void expandAtomicStore(StoreInst *SI); 83 bool tryExpandAtomicRMW(AtomicRMWInst *AI); 84 AtomicRMWInst *convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI); 85 Value * 86 insertRMWLLSCLoop(IRBuilder<> &Builder, Type *ResultTy, Value *Addr, 87 Align AddrAlign, AtomicOrdering MemOpOrder, 88 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp); 89 void 90 expandAtomicOpToLLSC(Instruction *I, Type *ResultTy, Value *Addr, 91 Align AddrAlign, AtomicOrdering MemOpOrder, 92 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp); 93 void expandPartwordAtomicRMW( 94 AtomicRMWInst *I, TargetLoweringBase::AtomicExpansionKind ExpansionKind); 95 AtomicRMWInst *widenPartwordAtomicRMW(AtomicRMWInst *AI); 96 bool expandPartwordCmpXchg(AtomicCmpXchgInst *I); 97 void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI); 98 void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI); 99 100 AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI); 101 static Value * 102 insertRMWCmpXchgLoop(IRBuilder<> &Builder, Type *ResultType, Value *Addr, 103 Align AddrAlign, AtomicOrdering MemOpOrder, 104 SyncScope::ID SSID, 105 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp, 106 CreateCmpXchgInstFun CreateCmpXchg); 107 bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI); 108 109 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI); 110 bool isIdempotentRMW(AtomicRMWInst *RMWI); 111 bool simplifyIdempotentRMW(AtomicRMWInst *RMWI); 112 113 bool expandAtomicOpToLibcall(Instruction *I, unsigned Size, Align Alignment, 114 Value *PointerOperand, Value *ValueOperand, 115 Value *CASExpected, AtomicOrdering Ordering, 116 AtomicOrdering Ordering2, 117 ArrayRef<RTLIB::Libcall> Libcalls); 118 void expandAtomicLoadToLibcall(LoadInst *LI); 119 void expandAtomicStoreToLibcall(StoreInst *LI); 120 void expandAtomicRMWToLibcall(AtomicRMWInst *I); 121 void expandAtomicCASToLibcall(AtomicCmpXchgInst *I); 122 123 friend bool 124 llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, 125 CreateCmpXchgInstFun CreateCmpXchg); 126 }; 127 128 } // end anonymous namespace 129 130 char AtomicExpand::ID = 0; 131 132 char &llvm::AtomicExpandID = AtomicExpand::ID; 133 134 INITIALIZE_PASS(AtomicExpand, DEBUG_TYPE, "Expand Atomic instructions", false, 135 false) 136 137 FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); } 138 139 // Helper functions to retrieve the size of atomic instructions. 140 static unsigned getAtomicOpSize(LoadInst *LI) { 141 const DataLayout &DL = LI->getModule()->getDataLayout(); 142 return DL.getTypeStoreSize(LI->getType()); 143 } 144 145 static unsigned getAtomicOpSize(StoreInst *SI) { 146 const DataLayout &DL = SI->getModule()->getDataLayout(); 147 return DL.getTypeStoreSize(SI->getValueOperand()->getType()); 148 } 149 150 static unsigned getAtomicOpSize(AtomicRMWInst *RMWI) { 151 const DataLayout &DL = RMWI->getModule()->getDataLayout(); 152 return DL.getTypeStoreSize(RMWI->getValOperand()->getType()); 153 } 154 155 static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) { 156 const DataLayout &DL = CASI->getModule()->getDataLayout(); 157 return DL.getTypeStoreSize(CASI->getCompareOperand()->getType()); 158 } 159 160 // Determine if a particular atomic operation has a supported size, 161 // and is of appropriate alignment, to be passed through for target 162 // lowering. (Versus turning into a __atomic libcall) 163 template <typename Inst> 164 static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) { 165 unsigned Size = getAtomicOpSize(I); 166 Align Alignment = I->getAlign(); 167 return Alignment >= Size && 168 Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8; 169 } 170 171 bool AtomicExpand::runOnFunction(Function &F) { 172 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); 173 if (!TPC) 174 return false; 175 176 auto &TM = TPC->getTM<TargetMachine>(); 177 if (!TM.getSubtargetImpl(F)->enableAtomicExpand()) 178 return false; 179 TLI = TM.getSubtargetImpl(F)->getTargetLowering(); 180 181 SmallVector<Instruction *, 1> AtomicInsts; 182 183 // Changing control-flow while iterating through it is a bad idea, so gather a 184 // list of all atomic instructions before we start. 185 for (Instruction &I : instructions(F)) 186 if (I.isAtomic() && !isa<FenceInst>(&I)) 187 AtomicInsts.push_back(&I); 188 189 bool MadeChange = false; 190 for (auto *I : AtomicInsts) { 191 auto LI = dyn_cast<LoadInst>(I); 192 auto SI = dyn_cast<StoreInst>(I); 193 auto RMWI = dyn_cast<AtomicRMWInst>(I); 194 auto CASI = dyn_cast<AtomicCmpXchgInst>(I); 195 assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction"); 196 197 // If the Size/Alignment is not supported, replace with a libcall. 198 if (LI) { 199 if (!atomicSizeSupported(TLI, LI)) { 200 expandAtomicLoadToLibcall(LI); 201 MadeChange = true; 202 continue; 203 } 204 } else if (SI) { 205 if (!atomicSizeSupported(TLI, SI)) { 206 expandAtomicStoreToLibcall(SI); 207 MadeChange = true; 208 continue; 209 } 210 } else if (RMWI) { 211 if (!atomicSizeSupported(TLI, RMWI)) { 212 expandAtomicRMWToLibcall(RMWI); 213 MadeChange = true; 214 continue; 215 } 216 } else if (CASI) { 217 if (!atomicSizeSupported(TLI, CASI)) { 218 expandAtomicCASToLibcall(CASI); 219 MadeChange = true; 220 continue; 221 } 222 } 223 224 if (TLI->shouldInsertFencesForAtomic(I)) { 225 auto FenceOrdering = AtomicOrdering::Monotonic; 226 if (LI && isAcquireOrStronger(LI->getOrdering())) { 227 FenceOrdering = LI->getOrdering(); 228 LI->setOrdering(AtomicOrdering::Monotonic); 229 } else if (SI && isReleaseOrStronger(SI->getOrdering())) { 230 FenceOrdering = SI->getOrdering(); 231 SI->setOrdering(AtomicOrdering::Monotonic); 232 } else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) || 233 isAcquireOrStronger(RMWI->getOrdering()))) { 234 FenceOrdering = RMWI->getOrdering(); 235 RMWI->setOrdering(AtomicOrdering::Monotonic); 236 } else if (CASI && 237 TLI->shouldExpandAtomicCmpXchgInIR(CASI) == 238 TargetLoweringBase::AtomicExpansionKind::None && 239 (isReleaseOrStronger(CASI->getSuccessOrdering()) || 240 isAcquireOrStronger(CASI->getSuccessOrdering()) || 241 isAcquireOrStronger(CASI->getFailureOrdering()))) { 242 // If a compare and swap is lowered to LL/SC, we can do smarter fence 243 // insertion, with a stronger one on the success path than on the 244 // failure path. As a result, fence insertion is directly done by 245 // expandAtomicCmpXchg in that case. 246 FenceOrdering = CASI->getMergedOrdering(); 247 CASI->setSuccessOrdering(AtomicOrdering::Monotonic); 248 CASI->setFailureOrdering(AtomicOrdering::Monotonic); 249 } 250 251 if (FenceOrdering != AtomicOrdering::Monotonic) { 252 MadeChange |= bracketInstWithFences(I, FenceOrdering); 253 } 254 } 255 256 if (LI) { 257 if (TLI->shouldCastAtomicLoadInIR(LI) == 258 TargetLoweringBase::AtomicExpansionKind::CastToInteger) { 259 // TODO: add a TLI hook to control this so that each target can 260 // convert to lowering the original type one at a time. 261 LI = convertAtomicLoadToIntegerType(LI); 262 assert(LI->getType()->isIntegerTy() && "invariant broken"); 263 MadeChange = true; 264 } 265 266 MadeChange |= tryExpandAtomicLoad(LI); 267 } else if (SI) { 268 if (TLI->shouldCastAtomicStoreInIR(SI) == 269 TargetLoweringBase::AtomicExpansionKind::CastToInteger) { 270 // TODO: add a TLI hook to control this so that each target can 271 // convert to lowering the original type one at a time. 272 SI = convertAtomicStoreToIntegerType(SI); 273 assert(SI->getValueOperand()->getType()->isIntegerTy() && 274 "invariant broken"); 275 MadeChange = true; 276 } 277 278 if (tryExpandAtomicStore(SI)) 279 MadeChange = true; 280 } else if (RMWI) { 281 // There are two different ways of expanding RMW instructions: 282 // - into a load if it is idempotent 283 // - into a Cmpxchg/LL-SC loop otherwise 284 // we try them in that order. 285 286 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) { 287 MadeChange = true; 288 } else { 289 AtomicRMWInst::BinOp Op = RMWI->getOperation(); 290 if (TLI->shouldCastAtomicRMWIInIR(RMWI) == 291 TargetLoweringBase::AtomicExpansionKind::CastToInteger) { 292 // TODO: add a TLI hook to control this so that each target can 293 // convert to lowering the original type one at a time. 294 RMWI = convertAtomicXchgToIntegerType(RMWI); 295 assert(RMWI->getValOperand()->getType()->isIntegerTy() && 296 "invariant broken"); 297 MadeChange = true; 298 } 299 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; 300 unsigned ValueSize = getAtomicOpSize(RMWI); 301 if (ValueSize < MinCASSize && 302 (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor || 303 Op == AtomicRMWInst::And)) { 304 RMWI = widenPartwordAtomicRMW(RMWI); 305 MadeChange = true; 306 } 307 308 MadeChange |= tryExpandAtomicRMW(RMWI); 309 } 310 } else if (CASI) { 311 // TODO: when we're ready to make the change at the IR level, we can 312 // extend convertCmpXchgToInteger for floating point too. 313 assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() && 314 "unimplemented - floating point not legal at IR level"); 315 if (CASI->getCompareOperand()->getType()->isPointerTy()) { 316 // TODO: add a TLI hook to control this so that each target can 317 // convert to lowering the original type one at a time. 318 CASI = convertCmpXchgToIntegerType(CASI); 319 assert(CASI->getCompareOperand()->getType()->isIntegerTy() && 320 "invariant broken"); 321 MadeChange = true; 322 } 323 324 MadeChange |= tryExpandAtomicCmpXchg(CASI); 325 } 326 } 327 return MadeChange; 328 } 329 330 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) { 331 IRBuilder<> Builder(I); 332 333 auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order); 334 335 auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order); 336 // We have a guard here because not every atomic operation generates a 337 // trailing fence. 338 if (TrailingFence) 339 TrailingFence->moveAfter(I); 340 341 return (LeadingFence || TrailingFence); 342 } 343 344 /// Get the iX type with the same bitwidth as T. 345 IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T, 346 const DataLayout &DL) { 347 EVT VT = TLI->getMemValueType(DL, T); 348 unsigned BitWidth = VT.getStoreSizeInBits(); 349 assert(BitWidth == VT.getSizeInBits() && "must be a power of two"); 350 return IntegerType::get(T->getContext(), BitWidth); 351 } 352 353 /// Convert an atomic load of a non-integral type to an integer load of the 354 /// equivalent bitwidth. See the function comment on 355 /// convertAtomicStoreToIntegerType for background. 356 LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) { 357 auto *M = LI->getModule(); 358 Type *NewTy = getCorrespondingIntegerType(LI->getType(), M->getDataLayout()); 359 360 IRBuilder<> Builder(LI); 361 362 Value *Addr = LI->getPointerOperand(); 363 Type *PT = PointerType::get(NewTy, Addr->getType()->getPointerAddressSpace()); 364 Value *NewAddr = Builder.CreateBitCast(Addr, PT); 365 366 auto *NewLI = Builder.CreateLoad(NewTy, NewAddr); 367 NewLI->setAlignment(LI->getAlign()); 368 NewLI->setVolatile(LI->isVolatile()); 369 NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID()); 370 LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n"); 371 372 Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType()); 373 LI->replaceAllUsesWith(NewVal); 374 LI->eraseFromParent(); 375 return NewLI; 376 } 377 378 AtomicRMWInst * 379 AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) { 380 auto *M = RMWI->getModule(); 381 Type *NewTy = 382 getCorrespondingIntegerType(RMWI->getType(), M->getDataLayout()); 383 384 IRBuilder<> Builder(RMWI); 385 386 Value *Addr = RMWI->getPointerOperand(); 387 Value *Val = RMWI->getValOperand(); 388 Type *PT = PointerType::get(NewTy, RMWI->getPointerAddressSpace()); 389 Value *NewAddr = Builder.CreateBitCast(Addr, PT); 390 Value *NewVal = Val->getType()->isPointerTy() 391 ? Builder.CreatePtrToInt(Val, NewTy) 392 : Builder.CreateBitCast(Val, NewTy); 393 394 auto *NewRMWI = 395 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, NewAddr, NewVal, 396 RMWI->getAlign(), RMWI->getOrdering()); 397 NewRMWI->setVolatile(RMWI->isVolatile()); 398 LLVM_DEBUG(dbgs() << "Replaced " << *RMWI << " with " << *NewRMWI << "\n"); 399 400 Value *NewRVal = RMWI->getType()->isPointerTy() 401 ? Builder.CreateIntToPtr(NewRMWI, RMWI->getType()) 402 : Builder.CreateBitCast(NewRMWI, RMWI->getType()); 403 RMWI->replaceAllUsesWith(NewRVal); 404 RMWI->eraseFromParent(); 405 return NewRMWI; 406 } 407 408 bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) { 409 switch (TLI->shouldExpandAtomicLoadInIR(LI)) { 410 case TargetLoweringBase::AtomicExpansionKind::None: 411 return false; 412 case TargetLoweringBase::AtomicExpansionKind::LLSC: 413 expandAtomicOpToLLSC( 414 LI, LI->getType(), LI->getPointerOperand(), LI->getAlign(), 415 LI->getOrdering(), 416 [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; }); 417 return true; 418 case TargetLoweringBase::AtomicExpansionKind::LLOnly: 419 return expandAtomicLoadToLL(LI); 420 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: 421 return expandAtomicLoadToCmpXchg(LI); 422 case TargetLoweringBase::AtomicExpansionKind::NotAtomic: 423 LI->setAtomic(AtomicOrdering::NotAtomic); 424 return true; 425 default: 426 llvm_unreachable("Unhandled case in tryExpandAtomicLoad"); 427 } 428 } 429 430 bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) { 431 switch (TLI->shouldExpandAtomicStoreInIR(SI)) { 432 case TargetLoweringBase::AtomicExpansionKind::None: 433 return false; 434 case TargetLoweringBase::AtomicExpansionKind::Expand: 435 expandAtomicStore(SI); 436 return true; 437 case TargetLoweringBase::AtomicExpansionKind::NotAtomic: 438 SI->setAtomic(AtomicOrdering::NotAtomic); 439 return true; 440 default: 441 llvm_unreachable("Unhandled case in tryExpandAtomicStore"); 442 } 443 } 444 445 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) { 446 IRBuilder<> Builder(LI); 447 448 // On some architectures, load-linked instructions are atomic for larger 449 // sizes than normal loads. For example, the only 64-bit load guaranteed 450 // to be single-copy atomic by ARM is an ldrexd (A3.5.3). 451 Value *Val = TLI->emitLoadLinked(Builder, LI->getType(), 452 LI->getPointerOperand(), LI->getOrdering()); 453 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder); 454 455 LI->replaceAllUsesWith(Val); 456 LI->eraseFromParent(); 457 458 return true; 459 } 460 461 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) { 462 IRBuilder<> Builder(LI); 463 AtomicOrdering Order = LI->getOrdering(); 464 if (Order == AtomicOrdering::Unordered) 465 Order = AtomicOrdering::Monotonic; 466 467 Value *Addr = LI->getPointerOperand(); 468 Type *Ty = LI->getType(); 469 Constant *DummyVal = Constant::getNullValue(Ty); 470 471 Value *Pair = Builder.CreateAtomicCmpXchg( 472 Addr, DummyVal, DummyVal, LI->getAlign(), Order, 473 AtomicCmpXchgInst::getStrongestFailureOrdering(Order)); 474 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded"); 475 476 LI->replaceAllUsesWith(Loaded); 477 LI->eraseFromParent(); 478 479 return true; 480 } 481 482 /// Convert an atomic store of a non-integral type to an integer store of the 483 /// equivalent bitwidth. We used to not support floating point or vector 484 /// atomics in the IR at all. The backends learned to deal with the bitcast 485 /// idiom because that was the only way of expressing the notion of a atomic 486 /// float or vector store. The long term plan is to teach each backend to 487 /// instruction select from the original atomic store, but as a migration 488 /// mechanism, we convert back to the old format which the backends understand. 489 /// Each backend will need individual work to recognize the new format. 490 StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) { 491 IRBuilder<> Builder(SI); 492 auto *M = SI->getModule(); 493 Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(), 494 M->getDataLayout()); 495 Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy); 496 497 Value *Addr = SI->getPointerOperand(); 498 Type *PT = PointerType::get(NewTy, Addr->getType()->getPointerAddressSpace()); 499 Value *NewAddr = Builder.CreateBitCast(Addr, PT); 500 501 StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr); 502 NewSI->setAlignment(SI->getAlign()); 503 NewSI->setVolatile(SI->isVolatile()); 504 NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID()); 505 LLVM_DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n"); 506 SI->eraseFromParent(); 507 return NewSI; 508 } 509 510 void AtomicExpand::expandAtomicStore(StoreInst *SI) { 511 // This function is only called on atomic stores that are too large to be 512 // atomic if implemented as a native store. So we replace them by an 513 // atomic swap, that can be implemented for example as a ldrex/strex on ARM 514 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes. 515 // It is the responsibility of the target to only signal expansion via 516 // shouldExpandAtomicRMW in cases where this is required and possible. 517 IRBuilder<> Builder(SI); 518 AtomicOrdering Ordering = SI->getOrdering(); 519 assert(Ordering != AtomicOrdering::NotAtomic); 520 AtomicOrdering RMWOrdering = Ordering == AtomicOrdering::Unordered 521 ? AtomicOrdering::Monotonic 522 : Ordering; 523 AtomicRMWInst *AI = Builder.CreateAtomicRMW( 524 AtomicRMWInst::Xchg, SI->getPointerOperand(), SI->getValueOperand(), 525 SI->getAlign(), RMWOrdering); 526 SI->eraseFromParent(); 527 528 // Now we have an appropriate swap instruction, lower it as usual. 529 tryExpandAtomicRMW(AI); 530 } 531 532 static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr, 533 Value *Loaded, Value *NewVal, Align AddrAlign, 534 AtomicOrdering MemOpOrder, SyncScope::ID SSID, 535 Value *&Success, Value *&NewLoaded) { 536 Type *OrigTy = NewVal->getType(); 537 538 // This code can go away when cmpxchg supports FP types. 539 assert(!OrigTy->isPointerTy()); 540 bool NeedBitcast = OrigTy->isFloatingPointTy(); 541 if (NeedBitcast) { 542 IntegerType *IntTy = Builder.getIntNTy(OrigTy->getPrimitiveSizeInBits()); 543 unsigned AS = Addr->getType()->getPointerAddressSpace(); 544 Addr = Builder.CreateBitCast(Addr, IntTy->getPointerTo(AS)); 545 NewVal = Builder.CreateBitCast(NewVal, IntTy); 546 Loaded = Builder.CreateBitCast(Loaded, IntTy); 547 } 548 549 Value *Pair = Builder.CreateAtomicCmpXchg( 550 Addr, Loaded, NewVal, AddrAlign, MemOpOrder, 551 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder), SSID); 552 Success = Builder.CreateExtractValue(Pair, 1, "success"); 553 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded"); 554 555 if (NeedBitcast) 556 NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy); 557 } 558 559 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) { 560 LLVMContext &Ctx = AI->getModule()->getContext(); 561 TargetLowering::AtomicExpansionKind Kind = TLI->shouldExpandAtomicRMWInIR(AI); 562 switch (Kind) { 563 case TargetLoweringBase::AtomicExpansionKind::None: 564 return false; 565 case TargetLoweringBase::AtomicExpansionKind::LLSC: { 566 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; 567 unsigned ValueSize = getAtomicOpSize(AI); 568 if (ValueSize < MinCASSize) { 569 expandPartwordAtomicRMW(AI, 570 TargetLoweringBase::AtomicExpansionKind::LLSC); 571 } else { 572 auto PerformOp = [&](IRBuilder<> &Builder, Value *Loaded) { 573 return buildAtomicRMWValue(AI->getOperation(), Builder, Loaded, 574 AI->getValOperand()); 575 }; 576 expandAtomicOpToLLSC(AI, AI->getType(), AI->getPointerOperand(), 577 AI->getAlign(), AI->getOrdering(), PerformOp); 578 } 579 return true; 580 } 581 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: { 582 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; 583 unsigned ValueSize = getAtomicOpSize(AI); 584 if (ValueSize < MinCASSize) { 585 // TODO: Handle atomicrmw fadd/fsub 586 if (AI->getType()->isFloatingPointTy()) 587 return false; 588 589 expandPartwordAtomicRMW(AI, 590 TargetLoweringBase::AtomicExpansionKind::CmpXChg); 591 } else { 592 SmallVector<StringRef> SSNs; 593 Ctx.getSyncScopeNames(SSNs); 594 auto MemScope = SSNs[AI->getSyncScopeID()].empty() 595 ? "system" 596 : SSNs[AI->getSyncScopeID()]; 597 OptimizationRemarkEmitter ORE(AI->getFunction()); 598 ORE.emit([&]() { 599 return OptimizationRemark(DEBUG_TYPE, "Passed", AI) 600 << "A compare and swap loop was generated for an atomic " 601 << AI->getOperationName(AI->getOperation()) << " operation at " 602 << MemScope << " memory scope"; 603 }); 604 expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun); 605 } 606 return true; 607 } 608 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: { 609 expandAtomicRMWToMaskedIntrinsic(AI); 610 return true; 611 } 612 case TargetLoweringBase::AtomicExpansionKind::BitTestIntrinsic: { 613 TLI->emitBitTestAtomicRMWIntrinsic(AI); 614 return true; 615 } 616 case TargetLoweringBase::AtomicExpansionKind::NotAtomic: 617 return lowerAtomicRMWInst(AI); 618 default: 619 llvm_unreachable("Unhandled case in tryExpandAtomicRMW"); 620 } 621 } 622 623 namespace { 624 625 struct PartwordMaskValues { 626 // These three fields are guaranteed to be set by createMaskInstrs. 627 Type *WordType = nullptr; 628 Type *ValueType = nullptr; 629 Value *AlignedAddr = nullptr; 630 Align AlignedAddrAlignment; 631 // The remaining fields can be null. 632 Value *ShiftAmt = nullptr; 633 Value *Mask = nullptr; 634 Value *Inv_Mask = nullptr; 635 }; 636 637 LLVM_ATTRIBUTE_UNUSED 638 raw_ostream &operator<<(raw_ostream &O, const PartwordMaskValues &PMV) { 639 auto PrintObj = [&O](auto *V) { 640 if (V) 641 O << *V; 642 else 643 O << "nullptr"; 644 O << '\n'; 645 }; 646 O << "PartwordMaskValues {\n"; 647 O << " WordType: "; 648 PrintObj(PMV.WordType); 649 O << " ValueType: "; 650 PrintObj(PMV.ValueType); 651 O << " AlignedAddr: "; 652 PrintObj(PMV.AlignedAddr); 653 O << " AlignedAddrAlignment: " << PMV.AlignedAddrAlignment.value() << '\n'; 654 O << " ShiftAmt: "; 655 PrintObj(PMV.ShiftAmt); 656 O << " Mask: "; 657 PrintObj(PMV.Mask); 658 O << " Inv_Mask: "; 659 PrintObj(PMV.Inv_Mask); 660 O << "}\n"; 661 return O; 662 } 663 664 } // end anonymous namespace 665 666 /// This is a helper function which builds instructions to provide 667 /// values necessary for partword atomic operations. It takes an 668 /// incoming address, Addr, and ValueType, and constructs the address, 669 /// shift-amounts and masks needed to work with a larger value of size 670 /// WordSize. 671 /// 672 /// AlignedAddr: Addr rounded down to a multiple of WordSize 673 /// 674 /// ShiftAmt: Number of bits to right-shift a WordSize value loaded 675 /// from AlignAddr for it to have the same value as if 676 /// ValueType was loaded from Addr. 677 /// 678 /// Mask: Value to mask with the value loaded from AlignAddr to 679 /// include only the part that would've been loaded from Addr. 680 /// 681 /// Inv_Mask: The inverse of Mask. 682 static PartwordMaskValues createMaskInstrs(IRBuilder<> &Builder, Instruction *I, 683 Type *ValueType, Value *Addr, 684 Align AddrAlign, 685 unsigned MinWordSize) { 686 PartwordMaskValues PMV; 687 688 Module *M = I->getModule(); 689 LLVMContext &Ctx = M->getContext(); 690 const DataLayout &DL = M->getDataLayout(); 691 unsigned ValueSize = DL.getTypeStoreSize(ValueType); 692 693 PMV.ValueType = ValueType; 694 PMV.WordType = MinWordSize > ValueSize ? Type::getIntNTy(Ctx, MinWordSize * 8) 695 : ValueType; 696 if (PMV.ValueType == PMV.WordType) { 697 PMV.AlignedAddr = Addr; 698 PMV.AlignedAddrAlignment = AddrAlign; 699 PMV.ShiftAmt = ConstantInt::get(PMV.ValueType, 0); 700 PMV.Mask = ConstantInt::get(PMV.ValueType, ~0, /*isSigned*/ true); 701 return PMV; 702 } 703 704 assert(ValueSize < MinWordSize); 705 706 Type *WordPtrType = 707 PMV.WordType->getPointerTo(Addr->getType()->getPointerAddressSpace()); 708 709 // TODO: we could skip some of this if AddrAlign >= MinWordSize. 710 Value *AddrInt = Builder.CreatePtrToInt(Addr, DL.getIntPtrType(Ctx)); 711 PMV.AlignedAddr = Builder.CreateIntToPtr( 712 Builder.CreateAnd(AddrInt, ~(uint64_t)(MinWordSize - 1)), WordPtrType, 713 "AlignedAddr"); 714 PMV.AlignedAddrAlignment = Align(MinWordSize); 715 716 Value *PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1, "PtrLSB"); 717 if (DL.isLittleEndian()) { 718 // turn bytes into bits 719 PMV.ShiftAmt = Builder.CreateShl(PtrLSB, 3); 720 } else { 721 // turn bytes into bits, and count from the other side. 722 PMV.ShiftAmt = Builder.CreateShl( 723 Builder.CreateXor(PtrLSB, MinWordSize - ValueSize), 3); 724 } 725 726 PMV.ShiftAmt = Builder.CreateTrunc(PMV.ShiftAmt, PMV.WordType, "ShiftAmt"); 727 PMV.Mask = Builder.CreateShl( 728 ConstantInt::get(PMV.WordType, (1 << (ValueSize * 8)) - 1), PMV.ShiftAmt, 729 "Mask"); 730 PMV.Inv_Mask = Builder.CreateNot(PMV.Mask, "Inv_Mask"); 731 return PMV; 732 } 733 734 static Value *extractMaskedValue(IRBuilder<> &Builder, Value *WideWord, 735 const PartwordMaskValues &PMV) { 736 assert(WideWord->getType() == PMV.WordType && "Widened type mismatch"); 737 if (PMV.WordType == PMV.ValueType) 738 return WideWord; 739 740 Value *Shift = Builder.CreateLShr(WideWord, PMV.ShiftAmt, "shifted"); 741 Value *Trunc = Builder.CreateTrunc(Shift, PMV.ValueType, "extracted"); 742 return Trunc; 743 } 744 745 static Value *insertMaskedValue(IRBuilder<> &Builder, Value *WideWord, 746 Value *Updated, const PartwordMaskValues &PMV) { 747 assert(WideWord->getType() == PMV.WordType && "Widened type mismatch"); 748 assert(Updated->getType() == PMV.ValueType && "Value type mismatch"); 749 if (PMV.WordType == PMV.ValueType) 750 return Updated; 751 752 Value *ZExt = Builder.CreateZExt(Updated, PMV.WordType, "extended"); 753 Value *Shift = 754 Builder.CreateShl(ZExt, PMV.ShiftAmt, "shifted", /*HasNUW*/ true); 755 Value *And = Builder.CreateAnd(WideWord, PMV.Inv_Mask, "unmasked"); 756 Value *Or = Builder.CreateOr(And, Shift, "inserted"); 757 return Or; 758 } 759 760 /// Emit IR to implement a masked version of a given atomicrmw 761 /// operation. (That is, only the bits under the Mask should be 762 /// affected by the operation) 763 static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op, 764 IRBuilder<> &Builder, Value *Loaded, 765 Value *Shifted_Inc, Value *Inc, 766 const PartwordMaskValues &PMV) { 767 // TODO: update to use 768 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge in order 769 // to merge bits from two values without requiring PMV.Inv_Mask. 770 switch (Op) { 771 case AtomicRMWInst::Xchg: { 772 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask); 773 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, Shifted_Inc); 774 return FinalVal; 775 } 776 case AtomicRMWInst::Or: 777 case AtomicRMWInst::Xor: 778 case AtomicRMWInst::And: 779 llvm_unreachable("Or/Xor/And handled by widenPartwordAtomicRMW"); 780 case AtomicRMWInst::Add: 781 case AtomicRMWInst::Sub: 782 case AtomicRMWInst::Nand: { 783 // The other arithmetic ops need to be masked into place. 784 Value *NewVal = buildAtomicRMWValue(Op, Builder, Loaded, Shifted_Inc); 785 Value *NewVal_Masked = Builder.CreateAnd(NewVal, PMV.Mask); 786 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask); 787 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Masked); 788 return FinalVal; 789 } 790 case AtomicRMWInst::Max: 791 case AtomicRMWInst::Min: 792 case AtomicRMWInst::UMax: 793 case AtomicRMWInst::UMin: { 794 // Finally, comparison ops will operate on the full value, so 795 // truncate down to the original size, and expand out again after 796 // doing the operation. 797 Value *Loaded_Extract = extractMaskedValue(Builder, Loaded, PMV); 798 Value *NewVal = buildAtomicRMWValue(Op, Builder, Loaded_Extract, Inc); 799 Value *FinalVal = insertMaskedValue(Builder, Loaded, NewVal, PMV); 800 return FinalVal; 801 } 802 default: 803 llvm_unreachable("Unknown atomic op"); 804 } 805 } 806 807 /// Expand a sub-word atomicrmw operation into an appropriate 808 /// word-sized operation. 809 /// 810 /// It will create an LL/SC or cmpxchg loop, as appropriate, the same 811 /// way as a typical atomicrmw expansion. The only difference here is 812 /// that the operation inside of the loop may operate upon only a 813 /// part of the value. 814 void AtomicExpand::expandPartwordAtomicRMW( 815 AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) { 816 AtomicOrdering MemOpOrder = AI->getOrdering(); 817 SyncScope::ID SSID = AI->getSyncScopeID(); 818 819 IRBuilder<> Builder(AI); 820 821 PartwordMaskValues PMV = 822 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(), 823 AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8); 824 825 Value *ValOperand_Shifted = 826 Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType), 827 PMV.ShiftAmt, "ValOperand_Shifted"); 828 829 auto PerformPartwordOp = [&](IRBuilder<> &Builder, Value *Loaded) { 830 return performMaskedAtomicOp(AI->getOperation(), Builder, Loaded, 831 ValOperand_Shifted, AI->getValOperand(), PMV); 832 }; 833 834 Value *OldResult; 835 if (ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg) { 836 OldResult = insertRMWCmpXchgLoop(Builder, PMV.WordType, PMV.AlignedAddr, 837 PMV.AlignedAddrAlignment, MemOpOrder, SSID, 838 PerformPartwordOp, createCmpXchgInstFun); 839 } else { 840 assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::LLSC); 841 OldResult = insertRMWLLSCLoop(Builder, PMV.WordType, PMV.AlignedAddr, 842 PMV.AlignedAddrAlignment, MemOpOrder, 843 PerformPartwordOp); 844 } 845 846 Value *FinalOldResult = extractMaskedValue(Builder, OldResult, PMV); 847 AI->replaceAllUsesWith(FinalOldResult); 848 AI->eraseFromParent(); 849 } 850 851 // Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width. 852 AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) { 853 IRBuilder<> Builder(AI); 854 AtomicRMWInst::BinOp Op = AI->getOperation(); 855 856 assert((Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor || 857 Op == AtomicRMWInst::And) && 858 "Unable to widen operation"); 859 860 PartwordMaskValues PMV = 861 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(), 862 AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8); 863 864 Value *ValOperand_Shifted = 865 Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType), 866 PMV.ShiftAmt, "ValOperand_Shifted"); 867 868 Value *NewOperand; 869 870 if (Op == AtomicRMWInst::And) 871 NewOperand = 872 Builder.CreateOr(PMV.Inv_Mask, ValOperand_Shifted, "AndOperand"); 873 else 874 NewOperand = ValOperand_Shifted; 875 876 AtomicRMWInst *NewAI = 877 Builder.CreateAtomicRMW(Op, PMV.AlignedAddr, NewOperand, 878 PMV.AlignedAddrAlignment, AI->getOrdering()); 879 880 Value *FinalOldResult = extractMaskedValue(Builder, NewAI, PMV); 881 AI->replaceAllUsesWith(FinalOldResult); 882 AI->eraseFromParent(); 883 return NewAI; 884 } 885 886 bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) { 887 // The basic idea here is that we're expanding a cmpxchg of a 888 // smaller memory size up to a word-sized cmpxchg. To do this, we 889 // need to add a retry-loop for strong cmpxchg, so that 890 // modifications to other parts of the word don't cause a spurious 891 // failure. 892 893 // This generates code like the following: 894 // [[Setup mask values PMV.*]] 895 // %NewVal_Shifted = shl i32 %NewVal, %PMV.ShiftAmt 896 // %Cmp_Shifted = shl i32 %Cmp, %PMV.ShiftAmt 897 // %InitLoaded = load i32* %addr 898 // %InitLoaded_MaskOut = and i32 %InitLoaded, %PMV.Inv_Mask 899 // br partword.cmpxchg.loop 900 // partword.cmpxchg.loop: 901 // %Loaded_MaskOut = phi i32 [ %InitLoaded_MaskOut, %entry ], 902 // [ %OldVal_MaskOut, %partword.cmpxchg.failure ] 903 // %FullWord_NewVal = or i32 %Loaded_MaskOut, %NewVal_Shifted 904 // %FullWord_Cmp = or i32 %Loaded_MaskOut, %Cmp_Shifted 905 // %NewCI = cmpxchg i32* %PMV.AlignedAddr, i32 %FullWord_Cmp, 906 // i32 %FullWord_NewVal success_ordering failure_ordering 907 // %OldVal = extractvalue { i32, i1 } %NewCI, 0 908 // %Success = extractvalue { i32, i1 } %NewCI, 1 909 // br i1 %Success, label %partword.cmpxchg.end, 910 // label %partword.cmpxchg.failure 911 // partword.cmpxchg.failure: 912 // %OldVal_MaskOut = and i32 %OldVal, %PMV.Inv_Mask 913 // %ShouldContinue = icmp ne i32 %Loaded_MaskOut, %OldVal_MaskOut 914 // br i1 %ShouldContinue, label %partword.cmpxchg.loop, 915 // label %partword.cmpxchg.end 916 // partword.cmpxchg.end: 917 // %tmp1 = lshr i32 %OldVal, %PMV.ShiftAmt 918 // %FinalOldVal = trunc i32 %tmp1 to i8 919 // %tmp2 = insertvalue { i8, i1 } undef, i8 %FinalOldVal, 0 920 // %Res = insertvalue { i8, i1 } %25, i1 %Success, 1 921 922 Value *Addr = CI->getPointerOperand(); 923 Value *Cmp = CI->getCompareOperand(); 924 Value *NewVal = CI->getNewValOperand(); 925 926 BasicBlock *BB = CI->getParent(); 927 Function *F = BB->getParent(); 928 IRBuilder<> Builder(CI); 929 LLVMContext &Ctx = Builder.getContext(); 930 931 BasicBlock *EndBB = 932 BB->splitBasicBlock(CI->getIterator(), "partword.cmpxchg.end"); 933 auto FailureBB = 934 BasicBlock::Create(Ctx, "partword.cmpxchg.failure", F, EndBB); 935 auto LoopBB = BasicBlock::Create(Ctx, "partword.cmpxchg.loop", F, FailureBB); 936 937 // The split call above "helpfully" added a branch at the end of BB 938 // (to the wrong place). 939 std::prev(BB->end())->eraseFromParent(); 940 Builder.SetInsertPoint(BB); 941 942 PartwordMaskValues PMV = 943 createMaskInstrs(Builder, CI, CI->getCompareOperand()->getType(), Addr, 944 CI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8); 945 946 // Shift the incoming values over, into the right location in the word. 947 Value *NewVal_Shifted = 948 Builder.CreateShl(Builder.CreateZExt(NewVal, PMV.WordType), PMV.ShiftAmt); 949 Value *Cmp_Shifted = 950 Builder.CreateShl(Builder.CreateZExt(Cmp, PMV.WordType), PMV.ShiftAmt); 951 952 // Load the entire current word, and mask into place the expected and new 953 // values 954 LoadInst *InitLoaded = Builder.CreateLoad(PMV.WordType, PMV.AlignedAddr); 955 InitLoaded->setVolatile(CI->isVolatile()); 956 Value *InitLoaded_MaskOut = Builder.CreateAnd(InitLoaded, PMV.Inv_Mask); 957 Builder.CreateBr(LoopBB); 958 959 // partword.cmpxchg.loop: 960 Builder.SetInsertPoint(LoopBB); 961 PHINode *Loaded_MaskOut = Builder.CreatePHI(PMV.WordType, 2); 962 Loaded_MaskOut->addIncoming(InitLoaded_MaskOut, BB); 963 964 // Mask/Or the expected and new values into place in the loaded word. 965 Value *FullWord_NewVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Shifted); 966 Value *FullWord_Cmp = Builder.CreateOr(Loaded_MaskOut, Cmp_Shifted); 967 AtomicCmpXchgInst *NewCI = Builder.CreateAtomicCmpXchg( 968 PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, PMV.AlignedAddrAlignment, 969 CI->getSuccessOrdering(), CI->getFailureOrdering(), CI->getSyncScopeID()); 970 NewCI->setVolatile(CI->isVolatile()); 971 // When we're building a strong cmpxchg, we need a loop, so you 972 // might think we could use a weak cmpxchg inside. But, using strong 973 // allows the below comparison for ShouldContinue, and we're 974 // expecting the underlying cmpxchg to be a machine instruction, 975 // which is strong anyways. 976 NewCI->setWeak(CI->isWeak()); 977 978 Value *OldVal = Builder.CreateExtractValue(NewCI, 0); 979 Value *Success = Builder.CreateExtractValue(NewCI, 1); 980 981 if (CI->isWeak()) 982 Builder.CreateBr(EndBB); 983 else 984 Builder.CreateCondBr(Success, EndBB, FailureBB); 985 986 // partword.cmpxchg.failure: 987 Builder.SetInsertPoint(FailureBB); 988 // Upon failure, verify that the masked-out part of the loaded value 989 // has been modified. If it didn't, abort the cmpxchg, since the 990 // masked-in part must've. 991 Value *OldVal_MaskOut = Builder.CreateAnd(OldVal, PMV.Inv_Mask); 992 Value *ShouldContinue = Builder.CreateICmpNE(Loaded_MaskOut, OldVal_MaskOut); 993 Builder.CreateCondBr(ShouldContinue, LoopBB, EndBB); 994 995 // Add the second value to the phi from above 996 Loaded_MaskOut->addIncoming(OldVal_MaskOut, FailureBB); 997 998 // partword.cmpxchg.end: 999 Builder.SetInsertPoint(CI); 1000 1001 Value *FinalOldVal = extractMaskedValue(Builder, OldVal, PMV); 1002 Value *Res = UndefValue::get(CI->getType()); 1003 Res = Builder.CreateInsertValue(Res, FinalOldVal, 0); 1004 Res = Builder.CreateInsertValue(Res, Success, 1); 1005 1006 CI->replaceAllUsesWith(Res); 1007 CI->eraseFromParent(); 1008 return true; 1009 } 1010 1011 void AtomicExpand::expandAtomicOpToLLSC( 1012 Instruction *I, Type *ResultType, Value *Addr, Align AddrAlign, 1013 AtomicOrdering MemOpOrder, 1014 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) { 1015 IRBuilder<> Builder(I); 1016 Value *Loaded = insertRMWLLSCLoop(Builder, ResultType, Addr, AddrAlign, 1017 MemOpOrder, PerformOp); 1018 1019 I->replaceAllUsesWith(Loaded); 1020 I->eraseFromParent(); 1021 } 1022 1023 void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) { 1024 IRBuilder<> Builder(AI); 1025 1026 PartwordMaskValues PMV = 1027 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(), 1028 AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8); 1029 1030 // The value operand must be sign-extended for signed min/max so that the 1031 // target's signed comparison instructions can be used. Otherwise, just 1032 // zero-ext. 1033 Instruction::CastOps CastOp = Instruction::ZExt; 1034 AtomicRMWInst::BinOp RMWOp = AI->getOperation(); 1035 if (RMWOp == AtomicRMWInst::Max || RMWOp == AtomicRMWInst::Min) 1036 CastOp = Instruction::SExt; 1037 1038 Value *ValOperand_Shifted = Builder.CreateShl( 1039 Builder.CreateCast(CastOp, AI->getValOperand(), PMV.WordType), 1040 PMV.ShiftAmt, "ValOperand_Shifted"); 1041 Value *OldResult = TLI->emitMaskedAtomicRMWIntrinsic( 1042 Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt, 1043 AI->getOrdering()); 1044 Value *FinalOldResult = extractMaskedValue(Builder, OldResult, PMV); 1045 AI->replaceAllUsesWith(FinalOldResult); 1046 AI->eraseFromParent(); 1047 } 1048 1049 void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) { 1050 IRBuilder<> Builder(CI); 1051 1052 PartwordMaskValues PMV = createMaskInstrs( 1053 Builder, CI, CI->getCompareOperand()->getType(), CI->getPointerOperand(), 1054 CI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8); 1055 1056 Value *CmpVal_Shifted = Builder.CreateShl( 1057 Builder.CreateZExt(CI->getCompareOperand(), PMV.WordType), PMV.ShiftAmt, 1058 "CmpVal_Shifted"); 1059 Value *NewVal_Shifted = Builder.CreateShl( 1060 Builder.CreateZExt(CI->getNewValOperand(), PMV.WordType), PMV.ShiftAmt, 1061 "NewVal_Shifted"); 1062 Value *OldVal = TLI->emitMaskedAtomicCmpXchgIntrinsic( 1063 Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask, 1064 CI->getMergedOrdering()); 1065 Value *FinalOldVal = extractMaskedValue(Builder, OldVal, PMV); 1066 Value *Res = UndefValue::get(CI->getType()); 1067 Res = Builder.CreateInsertValue(Res, FinalOldVal, 0); 1068 Value *Success = Builder.CreateICmpEQ( 1069 CmpVal_Shifted, Builder.CreateAnd(OldVal, PMV.Mask), "Success"); 1070 Res = Builder.CreateInsertValue(Res, Success, 1); 1071 1072 CI->replaceAllUsesWith(Res); 1073 CI->eraseFromParent(); 1074 } 1075 1076 Value *AtomicExpand::insertRMWLLSCLoop( 1077 IRBuilder<> &Builder, Type *ResultTy, Value *Addr, Align AddrAlign, 1078 AtomicOrdering MemOpOrder, 1079 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) { 1080 LLVMContext &Ctx = Builder.getContext(); 1081 BasicBlock *BB = Builder.GetInsertBlock(); 1082 Function *F = BB->getParent(); 1083 1084 assert(AddrAlign >= 1085 F->getParent()->getDataLayout().getTypeStoreSize(ResultTy) && 1086 "Expected at least natural alignment at this point."); 1087 1088 // Given: atomicrmw some_op iN* %addr, iN %incr ordering 1089 // 1090 // The standard expansion we produce is: 1091 // [...] 1092 // atomicrmw.start: 1093 // %loaded = @load.linked(%addr) 1094 // %new = some_op iN %loaded, %incr 1095 // %stored = @store_conditional(%new, %addr) 1096 // %try_again = icmp i32 ne %stored, 0 1097 // br i1 %try_again, label %loop, label %atomicrmw.end 1098 // atomicrmw.end: 1099 // [...] 1100 BasicBlock *ExitBB = 1101 BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end"); 1102 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB); 1103 1104 // The split call above "helpfully" added a branch at the end of BB (to the 1105 // wrong place). 1106 std::prev(BB->end())->eraseFromParent(); 1107 Builder.SetInsertPoint(BB); 1108 Builder.CreateBr(LoopBB); 1109 1110 // Start the main loop block now that we've taken care of the preliminaries. 1111 Builder.SetInsertPoint(LoopBB); 1112 Value *Loaded = TLI->emitLoadLinked(Builder, ResultTy, Addr, MemOpOrder); 1113 1114 Value *NewVal = PerformOp(Builder, Loaded); 1115 1116 Value *StoreSuccess = 1117 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder); 1118 Value *TryAgain = Builder.CreateICmpNE( 1119 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain"); 1120 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB); 1121 1122 Builder.SetInsertPoint(ExitBB, ExitBB->begin()); 1123 return Loaded; 1124 } 1125 1126 /// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of 1127 /// the equivalent bitwidth. We used to not support pointer cmpxchg in the 1128 /// IR. As a migration step, we convert back to what use to be the standard 1129 /// way to represent a pointer cmpxchg so that we can update backends one by 1130 /// one. 1131 AtomicCmpXchgInst * 1132 AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) { 1133 auto *M = CI->getModule(); 1134 Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(), 1135 M->getDataLayout()); 1136 1137 IRBuilder<> Builder(CI); 1138 1139 Value *Addr = CI->getPointerOperand(); 1140 Type *PT = PointerType::get(NewTy, Addr->getType()->getPointerAddressSpace()); 1141 Value *NewAddr = Builder.CreateBitCast(Addr, PT); 1142 1143 Value *NewCmp = Builder.CreatePtrToInt(CI->getCompareOperand(), NewTy); 1144 Value *NewNewVal = Builder.CreatePtrToInt(CI->getNewValOperand(), NewTy); 1145 1146 auto *NewCI = Builder.CreateAtomicCmpXchg( 1147 NewAddr, NewCmp, NewNewVal, CI->getAlign(), CI->getSuccessOrdering(), 1148 CI->getFailureOrdering(), CI->getSyncScopeID()); 1149 NewCI->setVolatile(CI->isVolatile()); 1150 NewCI->setWeak(CI->isWeak()); 1151 LLVM_DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n"); 1152 1153 Value *OldVal = Builder.CreateExtractValue(NewCI, 0); 1154 Value *Succ = Builder.CreateExtractValue(NewCI, 1); 1155 1156 OldVal = Builder.CreateIntToPtr(OldVal, CI->getCompareOperand()->getType()); 1157 1158 Value *Res = UndefValue::get(CI->getType()); 1159 Res = Builder.CreateInsertValue(Res, OldVal, 0); 1160 Res = Builder.CreateInsertValue(Res, Succ, 1); 1161 1162 CI->replaceAllUsesWith(Res); 1163 CI->eraseFromParent(); 1164 return NewCI; 1165 } 1166 1167 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { 1168 AtomicOrdering SuccessOrder = CI->getSuccessOrdering(); 1169 AtomicOrdering FailureOrder = CI->getFailureOrdering(); 1170 Value *Addr = CI->getPointerOperand(); 1171 BasicBlock *BB = CI->getParent(); 1172 Function *F = BB->getParent(); 1173 LLVMContext &Ctx = F->getContext(); 1174 // If shouldInsertFencesForAtomic() returns true, then the target does not 1175 // want to deal with memory orders, and emitLeading/TrailingFence should take 1176 // care of everything. Otherwise, emitLeading/TrailingFence are no-op and we 1177 // should preserve the ordering. 1178 bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI); 1179 AtomicOrdering MemOpOrder = ShouldInsertFencesForAtomic 1180 ? AtomicOrdering::Monotonic 1181 : CI->getMergedOrdering(); 1182 1183 // In implementations which use a barrier to achieve release semantics, we can 1184 // delay emitting this barrier until we know a store is actually going to be 1185 // attempted. The cost of this delay is that we need 2 copies of the block 1186 // emitting the load-linked, affecting code size. 1187 // 1188 // Ideally, this logic would be unconditional except for the minsize check 1189 // since in other cases the extra blocks naturally collapse down to the 1190 // minimal loop. Unfortunately, this puts too much stress on later 1191 // optimisations so we avoid emitting the extra logic in those cases too. 1192 bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic && 1193 SuccessOrder != AtomicOrdering::Monotonic && 1194 SuccessOrder != AtomicOrdering::Acquire && 1195 !F->hasMinSize(); 1196 1197 // There's no overhead for sinking the release barrier in a weak cmpxchg, so 1198 // do it even on minsize. 1199 bool UseUnconditionalReleaseBarrier = F->hasMinSize() && !CI->isWeak(); 1200 1201 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord 1202 // 1203 // The full expansion we produce is: 1204 // [...] 1205 // %aligned.addr = ... 1206 // cmpxchg.start: 1207 // %unreleasedload = @load.linked(%aligned.addr) 1208 // %unreleasedload.extract = extract value from %unreleasedload 1209 // %should_store = icmp eq %unreleasedload.extract, %desired 1210 // br i1 %should_store, label %cmpxchg.releasingstore, 1211 // label %cmpxchg.nostore 1212 // cmpxchg.releasingstore: 1213 // fence? 1214 // br label cmpxchg.trystore 1215 // cmpxchg.trystore: 1216 // %loaded.trystore = phi [%unreleasedload, %cmpxchg.releasingstore], 1217 // [%releasedload, %cmpxchg.releasedload] 1218 // %updated.new = insert %new into %loaded.trystore 1219 // %stored = @store_conditional(%updated.new, %aligned.addr) 1220 // %success = icmp eq i32 %stored, 0 1221 // br i1 %success, label %cmpxchg.success, 1222 // label %cmpxchg.releasedload/%cmpxchg.failure 1223 // cmpxchg.releasedload: 1224 // %releasedload = @load.linked(%aligned.addr) 1225 // %releasedload.extract = extract value from %releasedload 1226 // %should_store = icmp eq %releasedload.extract, %desired 1227 // br i1 %should_store, label %cmpxchg.trystore, 1228 // label %cmpxchg.failure 1229 // cmpxchg.success: 1230 // fence? 1231 // br label %cmpxchg.end 1232 // cmpxchg.nostore: 1233 // %loaded.nostore = phi [%unreleasedload, %cmpxchg.start], 1234 // [%releasedload, 1235 // %cmpxchg.releasedload/%cmpxchg.trystore] 1236 // @load_linked_fail_balance()? 1237 // br label %cmpxchg.failure 1238 // cmpxchg.failure: 1239 // fence? 1240 // br label %cmpxchg.end 1241 // cmpxchg.end: 1242 // %loaded.exit = phi [%loaded.nostore, %cmpxchg.failure], 1243 // [%loaded.trystore, %cmpxchg.trystore] 1244 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure] 1245 // %loaded = extract value from %loaded.exit 1246 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0 1247 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1 1248 // [...] 1249 BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end"); 1250 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB); 1251 auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB); 1252 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB); 1253 auto ReleasedLoadBB = 1254 BasicBlock::Create(Ctx, "cmpxchg.releasedload", F, SuccessBB); 1255 auto TryStoreBB = 1256 BasicBlock::Create(Ctx, "cmpxchg.trystore", F, ReleasedLoadBB); 1257 auto ReleasingStoreBB = 1258 BasicBlock::Create(Ctx, "cmpxchg.fencedstore", F, TryStoreBB); 1259 auto StartBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, ReleasingStoreBB); 1260 1261 // This grabs the DebugLoc from CI 1262 IRBuilder<> Builder(CI); 1263 1264 // The split call above "helpfully" added a branch at the end of BB (to the 1265 // wrong place), but we might want a fence too. It's easiest to just remove 1266 // the branch entirely. 1267 std::prev(BB->end())->eraseFromParent(); 1268 Builder.SetInsertPoint(BB); 1269 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier) 1270 TLI->emitLeadingFence(Builder, CI, SuccessOrder); 1271 1272 PartwordMaskValues PMV = 1273 createMaskInstrs(Builder, CI, CI->getCompareOperand()->getType(), Addr, 1274 CI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8); 1275 Builder.CreateBr(StartBB); 1276 1277 // Start the main loop block now that we've taken care of the preliminaries. 1278 Builder.SetInsertPoint(StartBB); 1279 Value *UnreleasedLoad = 1280 TLI->emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder); 1281 Value *UnreleasedLoadExtract = 1282 extractMaskedValue(Builder, UnreleasedLoad, PMV); 1283 Value *ShouldStore = Builder.CreateICmpEQ( 1284 UnreleasedLoadExtract, CI->getCompareOperand(), "should_store"); 1285 1286 // If the cmpxchg doesn't actually need any ordering when it fails, we can 1287 // jump straight past that fence instruction (if it exists). 1288 Builder.CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB); 1289 1290 Builder.SetInsertPoint(ReleasingStoreBB); 1291 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier) 1292 TLI->emitLeadingFence(Builder, CI, SuccessOrder); 1293 Builder.CreateBr(TryStoreBB); 1294 1295 Builder.SetInsertPoint(TryStoreBB); 1296 PHINode *LoadedTryStore = 1297 Builder.CreatePHI(PMV.WordType, 2, "loaded.trystore"); 1298 LoadedTryStore->addIncoming(UnreleasedLoad, ReleasingStoreBB); 1299 Value *NewValueInsert = 1300 insertMaskedValue(Builder, LoadedTryStore, CI->getNewValOperand(), PMV); 1301 Value *StoreSuccess = TLI->emitStoreConditional(Builder, NewValueInsert, 1302 PMV.AlignedAddr, MemOpOrder); 1303 StoreSuccess = Builder.CreateICmpEQ( 1304 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success"); 1305 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB; 1306 Builder.CreateCondBr(StoreSuccess, SuccessBB, 1307 CI->isWeak() ? FailureBB : RetryBB); 1308 1309 Builder.SetInsertPoint(ReleasedLoadBB); 1310 Value *SecondLoad; 1311 if (HasReleasedLoadBB) { 1312 SecondLoad = 1313 TLI->emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder); 1314 Value *SecondLoadExtract = extractMaskedValue(Builder, SecondLoad, PMV); 1315 ShouldStore = Builder.CreateICmpEQ(SecondLoadExtract, 1316 CI->getCompareOperand(), "should_store"); 1317 1318 // If the cmpxchg doesn't actually need any ordering when it fails, we can 1319 // jump straight past that fence instruction (if it exists). 1320 Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB); 1321 // Update PHI node in TryStoreBB. 1322 LoadedTryStore->addIncoming(SecondLoad, ReleasedLoadBB); 1323 } else 1324 Builder.CreateUnreachable(); 1325 1326 // Make sure later instructions don't get reordered with a fence if 1327 // necessary. 1328 Builder.SetInsertPoint(SuccessBB); 1329 if (ShouldInsertFencesForAtomic) 1330 TLI->emitTrailingFence(Builder, CI, SuccessOrder); 1331 Builder.CreateBr(ExitBB); 1332 1333 Builder.SetInsertPoint(NoStoreBB); 1334 PHINode *LoadedNoStore = 1335 Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.nostore"); 1336 LoadedNoStore->addIncoming(UnreleasedLoad, StartBB); 1337 if (HasReleasedLoadBB) 1338 LoadedNoStore->addIncoming(SecondLoad, ReleasedLoadBB); 1339 1340 // In the failing case, where we don't execute the store-conditional, the 1341 // target might want to balance out the load-linked with a dedicated 1342 // instruction (e.g., on ARM, clearing the exclusive monitor). 1343 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder); 1344 Builder.CreateBr(FailureBB); 1345 1346 Builder.SetInsertPoint(FailureBB); 1347 PHINode *LoadedFailure = 1348 Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.failure"); 1349 LoadedFailure->addIncoming(LoadedNoStore, NoStoreBB); 1350 if (CI->isWeak()) 1351 LoadedFailure->addIncoming(LoadedTryStore, TryStoreBB); 1352 if (ShouldInsertFencesForAtomic) 1353 TLI->emitTrailingFence(Builder, CI, FailureOrder); 1354 Builder.CreateBr(ExitBB); 1355 1356 // Finally, we have control-flow based knowledge of whether the cmpxchg 1357 // succeeded or not. We expose this to later passes by converting any 1358 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate 1359 // PHI. 1360 Builder.SetInsertPoint(ExitBB, ExitBB->begin()); 1361 PHINode *LoadedExit = 1362 Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.exit"); 1363 LoadedExit->addIncoming(LoadedTryStore, SuccessBB); 1364 LoadedExit->addIncoming(LoadedFailure, FailureBB); 1365 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2, "success"); 1366 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB); 1367 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB); 1368 1369 // This is the "exit value" from the cmpxchg expansion. It may be of 1370 // a type wider than the one in the cmpxchg instruction. 1371 Value *LoadedFull = LoadedExit; 1372 1373 Builder.SetInsertPoint(ExitBB, std::next(Success->getIterator())); 1374 Value *Loaded = extractMaskedValue(Builder, LoadedFull, PMV); 1375 1376 // Look for any users of the cmpxchg that are just comparing the loaded value 1377 // against the desired one, and replace them with the CFG-derived version. 1378 SmallVector<ExtractValueInst *, 2> PrunedInsts; 1379 for (auto *User : CI->users()) { 1380 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User); 1381 if (!EV) 1382 continue; 1383 1384 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 && 1385 "weird extraction from { iN, i1 }"); 1386 1387 if (EV->getIndices()[0] == 0) 1388 EV->replaceAllUsesWith(Loaded); 1389 else 1390 EV->replaceAllUsesWith(Success); 1391 1392 PrunedInsts.push_back(EV); 1393 } 1394 1395 // We can remove the instructions now we're no longer iterating through them. 1396 for (auto *EV : PrunedInsts) 1397 EV->eraseFromParent(); 1398 1399 if (!CI->use_empty()) { 1400 // Some use of the full struct return that we don't understand has happened, 1401 // so we've got to reconstruct it properly. 1402 Value *Res; 1403 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0); 1404 Res = Builder.CreateInsertValue(Res, Success, 1); 1405 1406 CI->replaceAllUsesWith(Res); 1407 } 1408 1409 CI->eraseFromParent(); 1410 return true; 1411 } 1412 1413 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst *RMWI) { 1414 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand()); 1415 if (!C) 1416 return false; 1417 1418 AtomicRMWInst::BinOp Op = RMWI->getOperation(); 1419 switch (Op) { 1420 case AtomicRMWInst::Add: 1421 case AtomicRMWInst::Sub: 1422 case AtomicRMWInst::Or: 1423 case AtomicRMWInst::Xor: 1424 return C->isZero(); 1425 case AtomicRMWInst::And: 1426 return C->isMinusOne(); 1427 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/... 1428 default: 1429 return false; 1430 } 1431 } 1432 1433 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst *RMWI) { 1434 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) { 1435 tryExpandAtomicLoad(ResultingLoad); 1436 return true; 1437 } 1438 return false; 1439 } 1440 1441 Value *AtomicExpand::insertRMWCmpXchgLoop( 1442 IRBuilder<> &Builder, Type *ResultTy, Value *Addr, Align AddrAlign, 1443 AtomicOrdering MemOpOrder, SyncScope::ID SSID, 1444 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp, 1445 CreateCmpXchgInstFun CreateCmpXchg) { 1446 LLVMContext &Ctx = Builder.getContext(); 1447 BasicBlock *BB = Builder.GetInsertBlock(); 1448 Function *F = BB->getParent(); 1449 1450 // Given: atomicrmw some_op iN* %addr, iN %incr ordering 1451 // 1452 // The standard expansion we produce is: 1453 // [...] 1454 // %init_loaded = load atomic iN* %addr 1455 // br label %loop 1456 // loop: 1457 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ] 1458 // %new = some_op iN %loaded, %incr 1459 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new 1460 // %new_loaded = extractvalue { iN, i1 } %pair, 0 1461 // %success = extractvalue { iN, i1 } %pair, 1 1462 // br i1 %success, label %atomicrmw.end, label %loop 1463 // atomicrmw.end: 1464 // [...] 1465 BasicBlock *ExitBB = 1466 BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end"); 1467 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB); 1468 1469 // The split call above "helpfully" added a branch at the end of BB (to the 1470 // wrong place), but we want a load. It's easiest to just remove 1471 // the branch entirely. 1472 std::prev(BB->end())->eraseFromParent(); 1473 Builder.SetInsertPoint(BB); 1474 LoadInst *InitLoaded = Builder.CreateAlignedLoad(ResultTy, Addr, AddrAlign); 1475 Builder.CreateBr(LoopBB); 1476 1477 // Start the main loop block now that we've taken care of the preliminaries. 1478 Builder.SetInsertPoint(LoopBB); 1479 PHINode *Loaded = Builder.CreatePHI(ResultTy, 2, "loaded"); 1480 Loaded->addIncoming(InitLoaded, BB); 1481 1482 Value *NewVal = PerformOp(Builder, Loaded); 1483 1484 Value *NewLoaded = nullptr; 1485 Value *Success = nullptr; 1486 1487 CreateCmpXchg(Builder, Addr, Loaded, NewVal, AddrAlign, 1488 MemOpOrder == AtomicOrdering::Unordered 1489 ? AtomicOrdering::Monotonic 1490 : MemOpOrder, 1491 SSID, Success, NewLoaded); 1492 assert(Success && NewLoaded); 1493 1494 Loaded->addIncoming(NewLoaded, LoopBB); 1495 1496 Builder.CreateCondBr(Success, ExitBB, LoopBB); 1497 1498 Builder.SetInsertPoint(ExitBB, ExitBB->begin()); 1499 return NewLoaded; 1500 } 1501 1502 bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) { 1503 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; 1504 unsigned ValueSize = getAtomicOpSize(CI); 1505 1506 switch (TLI->shouldExpandAtomicCmpXchgInIR(CI)) { 1507 default: 1508 llvm_unreachable("Unhandled case in tryExpandAtomicCmpXchg"); 1509 case TargetLoweringBase::AtomicExpansionKind::None: 1510 if (ValueSize < MinCASSize) 1511 return expandPartwordCmpXchg(CI); 1512 return false; 1513 case TargetLoweringBase::AtomicExpansionKind::LLSC: { 1514 return expandAtomicCmpXchg(CI); 1515 } 1516 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: 1517 expandAtomicCmpXchgToMaskedIntrinsic(CI); 1518 return true; 1519 case TargetLoweringBase::AtomicExpansionKind::NotAtomic: 1520 return lowerAtomicCmpXchgInst(CI); 1521 } 1522 } 1523 1524 // Note: This function is exposed externally by AtomicExpandUtils.h 1525 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, 1526 CreateCmpXchgInstFun CreateCmpXchg) { 1527 IRBuilder<> Builder(AI); 1528 Value *Loaded = AtomicExpand::insertRMWCmpXchgLoop( 1529 Builder, AI->getType(), AI->getPointerOperand(), AI->getAlign(), 1530 AI->getOrdering(), AI->getSyncScopeID(), 1531 [&](IRBuilder<> &Builder, Value *Loaded) { 1532 return buildAtomicRMWValue(AI->getOperation(), Builder, Loaded, 1533 AI->getValOperand()); 1534 }, 1535 CreateCmpXchg); 1536 1537 AI->replaceAllUsesWith(Loaded); 1538 AI->eraseFromParent(); 1539 return true; 1540 } 1541 1542 // In order to use one of the sized library calls such as 1543 // __atomic_fetch_add_4, the alignment must be sufficient, the size 1544 // must be one of the potentially-specialized sizes, and the value 1545 // type must actually exist in C on the target (otherwise, the 1546 // function wouldn't actually be defined.) 1547 static bool canUseSizedAtomicCall(unsigned Size, Align Alignment, 1548 const DataLayout &DL) { 1549 // TODO: "LargestSize" is an approximation for "largest type that 1550 // you can express in C". It seems to be the case that int128 is 1551 // supported on all 64-bit platforms, otherwise only up to 64-bit 1552 // integers are supported. If we get this wrong, then we'll try to 1553 // call a sized libcall that doesn't actually exist. There should 1554 // really be some more reliable way in LLVM of determining integer 1555 // sizes which are valid in the target's C ABI... 1556 unsigned LargestSize = DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8; 1557 return Alignment >= Size && 1558 (Size == 1 || Size == 2 || Size == 4 || Size == 8 || Size == 16) && 1559 Size <= LargestSize; 1560 } 1561 1562 void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) { 1563 static const RTLIB::Libcall Libcalls[6] = { 1564 RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2, 1565 RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16}; 1566 unsigned Size = getAtomicOpSize(I); 1567 1568 bool expanded = expandAtomicOpToLibcall( 1569 I, Size, I->getAlign(), I->getPointerOperand(), nullptr, nullptr, 1570 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls); 1571 if (!expanded) 1572 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Load"); 1573 } 1574 1575 void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) { 1576 static const RTLIB::Libcall Libcalls[6] = { 1577 RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2, 1578 RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16}; 1579 unsigned Size = getAtomicOpSize(I); 1580 1581 bool expanded = expandAtomicOpToLibcall( 1582 I, Size, I->getAlign(), I->getPointerOperand(), I->getValueOperand(), 1583 nullptr, I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls); 1584 if (!expanded) 1585 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Store"); 1586 } 1587 1588 void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) { 1589 static const RTLIB::Libcall Libcalls[6] = { 1590 RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1, 1591 RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4, 1592 RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16}; 1593 unsigned Size = getAtomicOpSize(I); 1594 1595 bool expanded = expandAtomicOpToLibcall( 1596 I, Size, I->getAlign(), I->getPointerOperand(), I->getNewValOperand(), 1597 I->getCompareOperand(), I->getSuccessOrdering(), I->getFailureOrdering(), 1598 Libcalls); 1599 if (!expanded) 1600 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for CAS"); 1601 } 1602 1603 static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) { 1604 static const RTLIB::Libcall LibcallsXchg[6] = { 1605 RTLIB::ATOMIC_EXCHANGE, RTLIB::ATOMIC_EXCHANGE_1, 1606 RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4, 1607 RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16}; 1608 static const RTLIB::Libcall LibcallsAdd[6] = { 1609 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_ADD_1, 1610 RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4, 1611 RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16}; 1612 static const RTLIB::Libcall LibcallsSub[6] = { 1613 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_SUB_1, 1614 RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4, 1615 RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16}; 1616 static const RTLIB::Libcall LibcallsAnd[6] = { 1617 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_AND_1, 1618 RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4, 1619 RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16}; 1620 static const RTLIB::Libcall LibcallsOr[6] = { 1621 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_OR_1, 1622 RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4, 1623 RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16}; 1624 static const RTLIB::Libcall LibcallsXor[6] = { 1625 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_XOR_1, 1626 RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4, 1627 RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16}; 1628 static const RTLIB::Libcall LibcallsNand[6] = { 1629 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_NAND_1, 1630 RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4, 1631 RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16}; 1632 1633 switch (Op) { 1634 case AtomicRMWInst::BAD_BINOP: 1635 llvm_unreachable("Should not have BAD_BINOP."); 1636 case AtomicRMWInst::Xchg: 1637 return makeArrayRef(LibcallsXchg); 1638 case AtomicRMWInst::Add: 1639 return makeArrayRef(LibcallsAdd); 1640 case AtomicRMWInst::Sub: 1641 return makeArrayRef(LibcallsSub); 1642 case AtomicRMWInst::And: 1643 return makeArrayRef(LibcallsAnd); 1644 case AtomicRMWInst::Or: 1645 return makeArrayRef(LibcallsOr); 1646 case AtomicRMWInst::Xor: 1647 return makeArrayRef(LibcallsXor); 1648 case AtomicRMWInst::Nand: 1649 return makeArrayRef(LibcallsNand); 1650 case AtomicRMWInst::Max: 1651 case AtomicRMWInst::Min: 1652 case AtomicRMWInst::UMax: 1653 case AtomicRMWInst::UMin: 1654 case AtomicRMWInst::FMax: 1655 case AtomicRMWInst::FMin: 1656 case AtomicRMWInst::FAdd: 1657 case AtomicRMWInst::FSub: 1658 // No atomic libcalls are available for max/min/umax/umin. 1659 return {}; 1660 } 1661 llvm_unreachable("Unexpected AtomicRMW operation."); 1662 } 1663 1664 void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) { 1665 ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation()); 1666 1667 unsigned Size = getAtomicOpSize(I); 1668 1669 bool Success = false; 1670 if (!Libcalls.empty()) 1671 Success = expandAtomicOpToLibcall( 1672 I, Size, I->getAlign(), I->getPointerOperand(), I->getValOperand(), 1673 nullptr, I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls); 1674 1675 // The expansion failed: either there were no libcalls at all for 1676 // the operation (min/max), or there were only size-specialized 1677 // libcalls (add/sub/etc) and we needed a generic. So, expand to a 1678 // CAS libcall, via a CAS loop, instead. 1679 if (!Success) { 1680 expandAtomicRMWToCmpXchg( 1681 I, [this](IRBuilder<> &Builder, Value *Addr, Value *Loaded, 1682 Value *NewVal, Align Alignment, AtomicOrdering MemOpOrder, 1683 SyncScope::ID SSID, Value *&Success, Value *&NewLoaded) { 1684 // Create the CAS instruction normally... 1685 AtomicCmpXchgInst *Pair = Builder.CreateAtomicCmpXchg( 1686 Addr, Loaded, NewVal, Alignment, MemOpOrder, 1687 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder), SSID); 1688 Success = Builder.CreateExtractValue(Pair, 1, "success"); 1689 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded"); 1690 1691 // ...and then expand the CAS into a libcall. 1692 expandAtomicCASToLibcall(Pair); 1693 }); 1694 } 1695 } 1696 1697 // A helper routine for the above expandAtomic*ToLibcall functions. 1698 // 1699 // 'Libcalls' contains an array of enum values for the particular 1700 // ATOMIC libcalls to be emitted. All of the other arguments besides 1701 // 'I' are extracted from the Instruction subclass by the 1702 // caller. Depending on the particular call, some will be null. 1703 bool AtomicExpand::expandAtomicOpToLibcall( 1704 Instruction *I, unsigned Size, Align Alignment, Value *PointerOperand, 1705 Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering, 1706 AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) { 1707 assert(Libcalls.size() == 6); 1708 1709 LLVMContext &Ctx = I->getContext(); 1710 Module *M = I->getModule(); 1711 const DataLayout &DL = M->getDataLayout(); 1712 IRBuilder<> Builder(I); 1713 IRBuilder<> AllocaBuilder(&I->getFunction()->getEntryBlock().front()); 1714 1715 bool UseSizedLibcall = canUseSizedAtomicCall(Size, Alignment, DL); 1716 Type *SizedIntTy = Type::getIntNTy(Ctx, Size * 8); 1717 1718 const Align AllocaAlignment = DL.getPrefTypeAlign(SizedIntTy); 1719 1720 // TODO: the "order" argument type is "int", not int32. So 1721 // getInt32Ty may be wrong if the arch uses e.g. 16-bit ints. 1722 ConstantInt *SizeVal64 = ConstantInt::get(Type::getInt64Ty(Ctx), Size); 1723 assert(Ordering != AtomicOrdering::NotAtomic && "expect atomic MO"); 1724 Constant *OrderingVal = 1725 ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering)); 1726 Constant *Ordering2Val = nullptr; 1727 if (CASExpected) { 1728 assert(Ordering2 != AtomicOrdering::NotAtomic && "expect atomic MO"); 1729 Ordering2Val = 1730 ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering2)); 1731 } 1732 bool HasResult = I->getType() != Type::getVoidTy(Ctx); 1733 1734 RTLIB::Libcall RTLibType; 1735 if (UseSizedLibcall) { 1736 switch (Size) { 1737 case 1: 1738 RTLibType = Libcalls[1]; 1739 break; 1740 case 2: 1741 RTLibType = Libcalls[2]; 1742 break; 1743 case 4: 1744 RTLibType = Libcalls[3]; 1745 break; 1746 case 8: 1747 RTLibType = Libcalls[4]; 1748 break; 1749 case 16: 1750 RTLibType = Libcalls[5]; 1751 break; 1752 } 1753 } else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) { 1754 RTLibType = Libcalls[0]; 1755 } else { 1756 // Can't use sized function, and there's no generic for this 1757 // operation, so give up. 1758 return false; 1759 } 1760 1761 if (!TLI->getLibcallName(RTLibType)) { 1762 // This target does not implement the requested atomic libcall so give up. 1763 return false; 1764 } 1765 1766 // Build up the function call. There's two kinds. First, the sized 1767 // variants. These calls are going to be one of the following (with 1768 // N=1,2,4,8,16): 1769 // iN __atomic_load_N(iN *ptr, int ordering) 1770 // void __atomic_store_N(iN *ptr, iN val, int ordering) 1771 // iN __atomic_{exchange|fetch_*}_N(iN *ptr, iN val, int ordering) 1772 // bool __atomic_compare_exchange_N(iN *ptr, iN *expected, iN desired, 1773 // int success_order, int failure_order) 1774 // 1775 // Note that these functions can be used for non-integer atomic 1776 // operations, the values just need to be bitcast to integers on the 1777 // way in and out. 1778 // 1779 // And, then, the generic variants. They look like the following: 1780 // void __atomic_load(size_t size, void *ptr, void *ret, int ordering) 1781 // void __atomic_store(size_t size, void *ptr, void *val, int ordering) 1782 // void __atomic_exchange(size_t size, void *ptr, void *val, void *ret, 1783 // int ordering) 1784 // bool __atomic_compare_exchange(size_t size, void *ptr, void *expected, 1785 // void *desired, int success_order, 1786 // int failure_order) 1787 // 1788 // The different signatures are built up depending on the 1789 // 'UseSizedLibcall', 'CASExpected', 'ValueOperand', and 'HasResult' 1790 // variables. 1791 1792 AllocaInst *AllocaCASExpected = nullptr; 1793 Value *AllocaCASExpected_i8 = nullptr; 1794 AllocaInst *AllocaValue = nullptr; 1795 Value *AllocaValue_i8 = nullptr; 1796 AllocaInst *AllocaResult = nullptr; 1797 Value *AllocaResult_i8 = nullptr; 1798 1799 Type *ResultTy; 1800 SmallVector<Value *, 6> Args; 1801 AttributeList Attr; 1802 1803 // 'size' argument. 1804 if (!UseSizedLibcall) { 1805 // Note, getIntPtrType is assumed equivalent to size_t. 1806 Args.push_back(ConstantInt::get(DL.getIntPtrType(Ctx), Size)); 1807 } 1808 1809 // 'ptr' argument. 1810 // note: This assumes all address spaces share a common libfunc 1811 // implementation and that addresses are convertable. For systems without 1812 // that property, we'd need to extend this mechanism to support AS-specific 1813 // families of atomic intrinsics. 1814 auto PtrTypeAS = PointerOperand->getType()->getPointerAddressSpace(); 1815 Value *PtrVal = 1816 Builder.CreateBitCast(PointerOperand, Type::getInt8PtrTy(Ctx, PtrTypeAS)); 1817 PtrVal = Builder.CreateAddrSpaceCast(PtrVal, Type::getInt8PtrTy(Ctx)); 1818 Args.push_back(PtrVal); 1819 1820 // 'expected' argument, if present. 1821 if (CASExpected) { 1822 AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType()); 1823 AllocaCASExpected->setAlignment(AllocaAlignment); 1824 unsigned AllocaAS = AllocaCASExpected->getType()->getPointerAddressSpace(); 1825 1826 AllocaCASExpected_i8 = Builder.CreateBitCast( 1827 AllocaCASExpected, Type::getInt8PtrTy(Ctx, AllocaAS)); 1828 Builder.CreateLifetimeStart(AllocaCASExpected_i8, SizeVal64); 1829 Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment); 1830 Args.push_back(AllocaCASExpected_i8); 1831 } 1832 1833 // 'val' argument ('desired' for cas), if present. 1834 if (ValueOperand) { 1835 if (UseSizedLibcall) { 1836 Value *IntValue = 1837 Builder.CreateBitOrPointerCast(ValueOperand, SizedIntTy); 1838 Args.push_back(IntValue); 1839 } else { 1840 AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType()); 1841 AllocaValue->setAlignment(AllocaAlignment); 1842 AllocaValue_i8 = 1843 Builder.CreateBitCast(AllocaValue, Type::getInt8PtrTy(Ctx)); 1844 Builder.CreateLifetimeStart(AllocaValue_i8, SizeVal64); 1845 Builder.CreateAlignedStore(ValueOperand, AllocaValue, AllocaAlignment); 1846 Args.push_back(AllocaValue_i8); 1847 } 1848 } 1849 1850 // 'ret' argument. 1851 if (!CASExpected && HasResult && !UseSizedLibcall) { 1852 AllocaResult = AllocaBuilder.CreateAlloca(I->getType()); 1853 AllocaResult->setAlignment(AllocaAlignment); 1854 unsigned AllocaAS = AllocaResult->getType()->getPointerAddressSpace(); 1855 AllocaResult_i8 = 1856 Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx, AllocaAS)); 1857 Builder.CreateLifetimeStart(AllocaResult_i8, SizeVal64); 1858 Args.push_back(AllocaResult_i8); 1859 } 1860 1861 // 'ordering' ('success_order' for cas) argument. 1862 Args.push_back(OrderingVal); 1863 1864 // 'failure_order' argument, if present. 1865 if (Ordering2Val) 1866 Args.push_back(Ordering2Val); 1867 1868 // Now, the return type. 1869 if (CASExpected) { 1870 ResultTy = Type::getInt1Ty(Ctx); 1871 Attr = Attr.addRetAttribute(Ctx, Attribute::ZExt); 1872 } else if (HasResult && UseSizedLibcall) 1873 ResultTy = SizedIntTy; 1874 else 1875 ResultTy = Type::getVoidTy(Ctx); 1876 1877 // Done with setting up arguments and return types, create the call: 1878 SmallVector<Type *, 6> ArgTys; 1879 for (Value *Arg : Args) 1880 ArgTys.push_back(Arg->getType()); 1881 FunctionType *FnType = FunctionType::get(ResultTy, ArgTys, false); 1882 FunctionCallee LibcallFn = 1883 M->getOrInsertFunction(TLI->getLibcallName(RTLibType), FnType, Attr); 1884 CallInst *Call = Builder.CreateCall(LibcallFn, Args); 1885 Call->setAttributes(Attr); 1886 Value *Result = Call; 1887 1888 // And then, extract the results... 1889 if (ValueOperand && !UseSizedLibcall) 1890 Builder.CreateLifetimeEnd(AllocaValue_i8, SizeVal64); 1891 1892 if (CASExpected) { 1893 // The final result from the CAS is {load of 'expected' alloca, bool result 1894 // from call} 1895 Type *FinalResultTy = I->getType(); 1896 Value *V = UndefValue::get(FinalResultTy); 1897 Value *ExpectedOut = Builder.CreateAlignedLoad( 1898 CASExpected->getType(), AllocaCASExpected, AllocaAlignment); 1899 Builder.CreateLifetimeEnd(AllocaCASExpected_i8, SizeVal64); 1900 V = Builder.CreateInsertValue(V, ExpectedOut, 0); 1901 V = Builder.CreateInsertValue(V, Result, 1); 1902 I->replaceAllUsesWith(V); 1903 } else if (HasResult) { 1904 Value *V; 1905 if (UseSizedLibcall) 1906 V = Builder.CreateBitOrPointerCast(Result, I->getType()); 1907 else { 1908 V = Builder.CreateAlignedLoad(I->getType(), AllocaResult, 1909 AllocaAlignment); 1910 Builder.CreateLifetimeEnd(AllocaResult_i8, SizeVal64); 1911 } 1912 I->replaceAllUsesWith(V); 1913 } 1914 I->eraseFromParent(); 1915 return true; 1916 } 1917