xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp (revision d56accc7c3dcc897489b6a07834763a03b9f3d68)
1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the code for emitting atomic operations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCall.h"
14 #include "CGRecordLayout.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "clang/Frontend/FrontendDiagnostic.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/Operator.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 
29 namespace {
30   class AtomicInfo {
31     CodeGenFunction &CGF;
32     QualType AtomicTy;
33     QualType ValueTy;
34     uint64_t AtomicSizeInBits;
35     uint64_t ValueSizeInBits;
36     CharUnits AtomicAlign;
37     CharUnits ValueAlign;
38     TypeEvaluationKind EvaluationKind;
39     bool UseLibcall;
40     LValue LVal;
41     CGBitFieldInfo BFI;
42   public:
43     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44         : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45           EvaluationKind(TEK_Scalar), UseLibcall(true) {
46       assert(!lvalue.isGlobalReg());
47       ASTContext &C = CGF.getContext();
48       if (lvalue.isSimple()) {
49         AtomicTy = lvalue.getType();
50         if (auto *ATy = AtomicTy->getAs<AtomicType>())
51           ValueTy = ATy->getValueType();
52         else
53           ValueTy = AtomicTy;
54         EvaluationKind = CGF.getEvaluationKind(ValueTy);
55 
56         uint64_t ValueAlignInBits;
57         uint64_t AtomicAlignInBits;
58         TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59         ValueSizeInBits = ValueTI.Width;
60         ValueAlignInBits = ValueTI.Align;
61 
62         TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63         AtomicSizeInBits = AtomicTI.Width;
64         AtomicAlignInBits = AtomicTI.Align;
65 
66         assert(ValueSizeInBits <= AtomicSizeInBits);
67         assert(ValueAlignInBits <= AtomicAlignInBits);
68 
69         AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70         ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71         if (lvalue.getAlignment().isZero())
72           lvalue.setAlignment(AtomicAlign);
73 
74         LVal = lvalue;
75       } else if (lvalue.isBitField()) {
76         ValueTy = lvalue.getType();
77         ValueSizeInBits = C.getTypeSize(ValueTy);
78         auto &OrigBFI = lvalue.getBitFieldInfo();
79         auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80         AtomicSizeInBits = C.toBits(
81             C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82                 .alignTo(lvalue.getAlignment()));
83         auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
84         auto OffsetInChars =
85             (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86             lvalue.getAlignment();
87         VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88             CGF.Int8Ty, VoidPtrAddr, OffsetInChars.getQuantity());
89         auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
90             VoidPtrAddr,
91             CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92             "atomic_bitfield_base");
93         BFI = OrigBFI;
94         BFI.Offset = Offset;
95         BFI.StorageSize = AtomicSizeInBits;
96         BFI.StorageOffset += OffsetInChars;
97         LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98                                     BFI, lvalue.getType(), lvalue.getBaseInfo(),
99                                     lvalue.getTBAAInfo());
100         AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101         if (AtomicTy.isNull()) {
102           llvm::APInt Size(
103               /*numBits=*/32,
104               C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105           AtomicTy =
106               C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
107                                      /*IndexTypeQuals=*/0);
108         }
109         AtomicAlign = ValueAlign = lvalue.getAlignment();
110       } else if (lvalue.isVectorElt()) {
111         ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
112         ValueSizeInBits = C.getTypeSize(ValueTy);
113         AtomicTy = lvalue.getType();
114         AtomicSizeInBits = C.getTypeSize(AtomicTy);
115         AtomicAlign = ValueAlign = lvalue.getAlignment();
116         LVal = lvalue;
117       } else {
118         assert(lvalue.isExtVectorElt());
119         ValueTy = lvalue.getType();
120         ValueSizeInBits = C.getTypeSize(ValueTy);
121         AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122             lvalue.getType(), cast<llvm::FixedVectorType>(
123                                   lvalue.getExtVectorAddress().getElementType())
124                                   ->getNumElements());
125         AtomicSizeInBits = C.getTypeSize(AtomicTy);
126         AtomicAlign = ValueAlign = lvalue.getAlignment();
127         LVal = lvalue;
128       }
129       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
131     }
132 
133     QualType getAtomicType() const { return AtomicTy; }
134     QualType getValueType() const { return ValueTy; }
135     CharUnits getAtomicAlignment() const { return AtomicAlign; }
136     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
137     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
138     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
139     bool shouldUseLibcall() const { return UseLibcall; }
140     const LValue &getAtomicLValue() const { return LVal; }
141     llvm::Value *getAtomicPointer() const {
142       if (LVal.isSimple())
143         return LVal.getPointer(CGF);
144       else if (LVal.isBitField())
145         return LVal.getBitFieldPointer();
146       else if (LVal.isVectorElt())
147         return LVal.getVectorPointer();
148       assert(LVal.isExtVectorElt());
149       return LVal.getExtVectorPointer();
150     }
151     Address getAtomicAddress() const {
152       return Address(getAtomicPointer(), getAtomicAlignment());
153     }
154 
155     Address getAtomicAddressAsAtomicIntPointer() const {
156       return emitCastToAtomicIntPointer(getAtomicAddress());
157     }
158 
159     /// Is the atomic size larger than the underlying value type?
160     ///
161     /// Note that the absence of padding does not mean that atomic
162     /// objects are completely interchangeable with non-atomic
163     /// objects: we might have promoted the alignment of a type
164     /// without making it bigger.
165     bool hasPadding() const {
166       return (ValueSizeInBits != AtomicSizeInBits);
167     }
168 
169     bool emitMemSetZeroIfNecessary() const;
170 
171     llvm::Value *getAtomicSizeValue() const {
172       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
173       return CGF.CGM.getSize(size);
174     }
175 
176     /// Cast the given pointer to an integer pointer suitable for atomic
177     /// operations if the source.
178     Address emitCastToAtomicIntPointer(Address Addr) const;
179 
180     /// If Addr is compatible with the iN that will be used for an atomic
181     /// operation, bitcast it. Otherwise, create a temporary that is suitable
182     /// and copy the value across.
183     Address convertToAtomicIntPointer(Address Addr) const;
184 
185     /// Turn an atomic-layout object into an r-value.
186     RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
187                                      SourceLocation loc, bool AsValue) const;
188 
189     /// Converts a rvalue to integer value.
190     llvm::Value *convertRValueToInt(RValue RVal) const;
191 
192     RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
193                                      AggValueSlot ResultSlot,
194                                      SourceLocation Loc, bool AsValue) const;
195 
196     /// Copy an atomic r-value into atomic-layout memory.
197     void emitCopyIntoMemory(RValue rvalue) const;
198 
199     /// Project an l-value down to the value field.
200     LValue projectValue() const {
201       assert(LVal.isSimple());
202       Address addr = getAtomicAddress();
203       if (hasPadding())
204         addr = CGF.Builder.CreateStructGEP(addr, 0);
205 
206       return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
207                               LVal.getBaseInfo(), LVal.getTBAAInfo());
208     }
209 
210     /// Emits atomic load.
211     /// \returns Loaded value.
212     RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
213                           bool AsValue, llvm::AtomicOrdering AO,
214                           bool IsVolatile);
215 
216     /// Emits atomic compare-and-exchange sequence.
217     /// \param Expected Expected value.
218     /// \param Desired Desired value.
219     /// \param Success Atomic ordering for success operation.
220     /// \param Failure Atomic ordering for failed operation.
221     /// \param IsWeak true if atomic operation is weak, false otherwise.
222     /// \returns Pair of values: previous value from storage (value type) and
223     /// boolean flag (i1 type) with true if success and false otherwise.
224     std::pair<RValue, llvm::Value *>
225     EmitAtomicCompareExchange(RValue Expected, RValue Desired,
226                               llvm::AtomicOrdering Success =
227                                   llvm::AtomicOrdering::SequentiallyConsistent,
228                               llvm::AtomicOrdering Failure =
229                                   llvm::AtomicOrdering::SequentiallyConsistent,
230                               bool IsWeak = false);
231 
232     /// Emits atomic update.
233     /// \param AO Atomic ordering.
234     /// \param UpdateOp Update operation for the current lvalue.
235     void EmitAtomicUpdate(llvm::AtomicOrdering AO,
236                           const llvm::function_ref<RValue(RValue)> &UpdateOp,
237                           bool IsVolatile);
238     /// Emits atomic update.
239     /// \param AO Atomic ordering.
240     void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
241                           bool IsVolatile);
242 
243     /// Materialize an atomic r-value in atomic-layout memory.
244     Address materializeRValue(RValue rvalue) const;
245 
246     /// Creates temp alloca for intermediate operations on atomic value.
247     Address CreateTempAlloca() const;
248   private:
249     bool requiresMemSetZero(llvm::Type *type) const;
250 
251 
252     /// Emits atomic load as a libcall.
253     void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
254                                llvm::AtomicOrdering AO, bool IsVolatile);
255     /// Emits atomic load as LLVM instruction.
256     llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
257     /// Emits atomic compare-and-exchange op as a libcall.
258     llvm::Value *EmitAtomicCompareExchangeLibcall(
259         llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
260         llvm::AtomicOrdering Success =
261             llvm::AtomicOrdering::SequentiallyConsistent,
262         llvm::AtomicOrdering Failure =
263             llvm::AtomicOrdering::SequentiallyConsistent);
264     /// Emits atomic compare-and-exchange op as LLVM instruction.
265     std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
266         llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
267         llvm::AtomicOrdering Success =
268             llvm::AtomicOrdering::SequentiallyConsistent,
269         llvm::AtomicOrdering Failure =
270             llvm::AtomicOrdering::SequentiallyConsistent,
271         bool IsWeak = false);
272     /// Emit atomic update as libcalls.
273     void
274     EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
275                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
276                             bool IsVolatile);
277     /// Emit atomic update as LLVM instructions.
278     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
279                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
280                             bool IsVolatile);
281     /// Emit atomic update as libcalls.
282     void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
283                                  bool IsVolatile);
284     /// Emit atomic update as LLVM instructions.
285     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
286                             bool IsVolatile);
287   };
288 }
289 
290 Address AtomicInfo::CreateTempAlloca() const {
291   Address TempAlloca = CGF.CreateMemTemp(
292       (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
293                                                                 : AtomicTy,
294       getAtomicAlignment(),
295       "atomic-temp");
296   // Cast to pointer to value type for bitfields.
297   if (LVal.isBitField())
298     return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
299         TempAlloca, getAtomicAddress().getType());
300   return TempAlloca;
301 }
302 
303 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
304                                 StringRef fnName,
305                                 QualType resultType,
306                                 CallArgList &args) {
307   const CGFunctionInfo &fnInfo =
308     CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
309   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
310   llvm::AttrBuilder fnAttrB(CGF.getLLVMContext());
311   fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
312   fnAttrB.addAttribute(llvm::Attribute::WillReturn);
313   llvm::AttributeList fnAttrs = llvm::AttributeList::get(
314       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
315 
316   llvm::FunctionCallee fn =
317       CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
318   auto callee = CGCallee::forDirect(fn);
319   return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
320 }
321 
322 /// Does a store of the given IR type modify the full expected width?
323 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
324                            uint64_t expectedSize) {
325   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
326 }
327 
328 /// Does the atomic type require memsetting to zero before initialization?
329 ///
330 /// The IR type is provided as a way of making certain queries faster.
331 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
332   // If the atomic type has size padding, we definitely need a memset.
333   if (hasPadding()) return true;
334 
335   // Otherwise, do some simple heuristics to try to avoid it:
336   switch (getEvaluationKind()) {
337   // For scalars and complexes, check whether the store size of the
338   // type uses the full size.
339   case TEK_Scalar:
340     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
341   case TEK_Complex:
342     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
343                            AtomicSizeInBits / 2);
344 
345   // Padding in structs has an undefined bit pattern.  User beware.
346   case TEK_Aggregate:
347     return false;
348   }
349   llvm_unreachable("bad evaluation kind");
350 }
351 
352 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
353   assert(LVal.isSimple());
354   Address addr = LVal.getAddress(CGF);
355   if (!requiresMemSetZero(addr.getElementType()))
356     return false;
357 
358   CGF.Builder.CreateMemSet(
359       addr.getPointer(), llvm::ConstantInt::get(CGF.Int8Ty, 0),
360       CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
361       LVal.getAlignment().getAsAlign());
362   return true;
363 }
364 
365 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
366                               Address Dest, Address Ptr,
367                               Address Val1, Address Val2,
368                               uint64_t Size,
369                               llvm::AtomicOrdering SuccessOrder,
370                               llvm::AtomicOrdering FailureOrder,
371                               llvm::SyncScope::ID Scope) {
372   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
373   llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
374   llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
375 
376   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
377       Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
378       Scope);
379   Pair->setVolatile(E->isVolatile());
380   Pair->setWeak(IsWeak);
381 
382   // Cmp holds the result of the compare-exchange operation: true on success,
383   // false on failure.
384   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
385   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
386 
387   // This basic block is used to hold the store instruction if the operation
388   // failed.
389   llvm::BasicBlock *StoreExpectedBB =
390       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
391 
392   // This basic block is the exit point of the operation, we should end up
393   // here regardless of whether or not the operation succeeded.
394   llvm::BasicBlock *ContinueBB =
395       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
396 
397   // Update Expected if Expected isn't equal to Old, otherwise branch to the
398   // exit point.
399   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
400 
401   CGF.Builder.SetInsertPoint(StoreExpectedBB);
402   // Update the memory at Expected with Old's value.
403   CGF.Builder.CreateStore(Old, Val1);
404   // Finally, branch to the exit point.
405   CGF.Builder.CreateBr(ContinueBB);
406 
407   CGF.Builder.SetInsertPoint(ContinueBB);
408   // Update the memory at Dest with Cmp's value.
409   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
410 }
411 
412 /// Given an ordering required on success, emit all possible cmpxchg
413 /// instructions to cope with the provided (but possibly only dynamically known)
414 /// FailureOrder.
415 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
416                                         bool IsWeak, Address Dest, Address Ptr,
417                                         Address Val1, Address Val2,
418                                         llvm::Value *FailureOrderVal,
419                                         uint64_t Size,
420                                         llvm::AtomicOrdering SuccessOrder,
421                                         llvm::SyncScope::ID Scope) {
422   llvm::AtomicOrdering FailureOrder;
423   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
424     auto FOS = FO->getSExtValue();
425     if (!llvm::isValidAtomicOrderingCABI(FOS))
426       FailureOrder = llvm::AtomicOrdering::Monotonic;
427     else
428       switch ((llvm::AtomicOrderingCABI)FOS) {
429       case llvm::AtomicOrderingCABI::relaxed:
430       // 31.7.2.18: "The failure argument shall not be memory_order_release
431       // nor memory_order_acq_rel". Fallback to monotonic.
432       case llvm::AtomicOrderingCABI::release:
433       case llvm::AtomicOrderingCABI::acq_rel:
434         FailureOrder = llvm::AtomicOrdering::Monotonic;
435         break;
436       case llvm::AtomicOrderingCABI::consume:
437       case llvm::AtomicOrderingCABI::acquire:
438         FailureOrder = llvm::AtomicOrdering::Acquire;
439         break;
440       case llvm::AtomicOrderingCABI::seq_cst:
441         FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
442         break;
443       }
444     // Prior to c++17, "the failure argument shall be no stronger than the
445     // success argument". This condition has been lifted and the only
446     // precondition is 31.7.2.18. Effectively treat this as a DR and skip
447     // language version checks.
448     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
449                       FailureOrder, Scope);
450     return;
451   }
452 
453   // Create all the relevant BB's
454   auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
455   auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
456   auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
457   auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
458 
459   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
460   // doesn't matter unless someone is crazy enough to use something that
461   // doesn't fold to a constant for the ordering.
462   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
463   // Implemented as acquire, since it's the closest in LLVM.
464   SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
465               AcquireBB);
466   SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
467               AcquireBB);
468   SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
469               SeqCstBB);
470 
471   // Emit all the different atomics
472   CGF.Builder.SetInsertPoint(MonotonicBB);
473   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
474                     Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
475   CGF.Builder.CreateBr(ContBB);
476 
477   CGF.Builder.SetInsertPoint(AcquireBB);
478   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
479                     llvm::AtomicOrdering::Acquire, Scope);
480   CGF.Builder.CreateBr(ContBB);
481 
482   CGF.Builder.SetInsertPoint(SeqCstBB);
483   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
484                     llvm::AtomicOrdering::SequentiallyConsistent, Scope);
485   CGF.Builder.CreateBr(ContBB);
486 
487   CGF.Builder.SetInsertPoint(ContBB);
488 }
489 
490 /// Duplicate the atomic min/max operation in conventional IR for the builtin
491 /// variants that return the new rather than the original value.
492 static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
493                                          AtomicExpr::AtomicOp Op,
494                                          bool IsSigned,
495                                          llvm::Value *OldVal,
496                                          llvm::Value *RHS) {
497   llvm::CmpInst::Predicate Pred;
498   switch (Op) {
499   default:
500     llvm_unreachable("Unexpected min/max operation");
501   case AtomicExpr::AO__atomic_max_fetch:
502     Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
503     break;
504   case AtomicExpr::AO__atomic_min_fetch:
505     Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
506     break;
507   }
508   llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
509   return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
510 }
511 
512 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
513                          Address Ptr, Address Val1, Address Val2,
514                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
515                          uint64_t Size, llvm::AtomicOrdering Order,
516                          llvm::SyncScope::ID Scope) {
517   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
518   bool PostOpMinMax = false;
519   unsigned PostOp = 0;
520 
521   switch (E->getOp()) {
522   case AtomicExpr::AO__c11_atomic_init:
523   case AtomicExpr::AO__opencl_atomic_init:
524     llvm_unreachable("Already handled!");
525 
526   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
527   case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
528   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
529     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
530                                 FailureOrder, Size, Order, Scope);
531     return;
532   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
533   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
534   case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
535     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
536                                 FailureOrder, Size, Order, Scope);
537     return;
538   case AtomicExpr::AO__atomic_compare_exchange:
539   case AtomicExpr::AO__atomic_compare_exchange_n: {
540     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
541       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
542                                   Val1, Val2, FailureOrder, Size, Order, Scope);
543     } else {
544       // Create all the relevant BB's
545       llvm::BasicBlock *StrongBB =
546           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
547       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
548       llvm::BasicBlock *ContBB =
549           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
550 
551       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
552       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
553 
554       CGF.Builder.SetInsertPoint(StrongBB);
555       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
556                                   FailureOrder, Size, Order, Scope);
557       CGF.Builder.CreateBr(ContBB);
558 
559       CGF.Builder.SetInsertPoint(WeakBB);
560       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
561                                   FailureOrder, Size, Order, Scope);
562       CGF.Builder.CreateBr(ContBB);
563 
564       CGF.Builder.SetInsertPoint(ContBB);
565     }
566     return;
567   }
568   case AtomicExpr::AO__c11_atomic_load:
569   case AtomicExpr::AO__opencl_atomic_load:
570   case AtomicExpr::AO__hip_atomic_load:
571   case AtomicExpr::AO__atomic_load_n:
572   case AtomicExpr::AO__atomic_load: {
573     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
574     Load->setAtomic(Order, Scope);
575     Load->setVolatile(E->isVolatile());
576     CGF.Builder.CreateStore(Load, Dest);
577     return;
578   }
579 
580   case AtomicExpr::AO__c11_atomic_store:
581   case AtomicExpr::AO__opencl_atomic_store:
582   case AtomicExpr::AO__hip_atomic_store:
583   case AtomicExpr::AO__atomic_store:
584   case AtomicExpr::AO__atomic_store_n: {
585     llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
586     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
587     Store->setAtomic(Order, Scope);
588     Store->setVolatile(E->isVolatile());
589     return;
590   }
591 
592   case AtomicExpr::AO__c11_atomic_exchange:
593   case AtomicExpr::AO__hip_atomic_exchange:
594   case AtomicExpr::AO__opencl_atomic_exchange:
595   case AtomicExpr::AO__atomic_exchange_n:
596   case AtomicExpr::AO__atomic_exchange:
597     Op = llvm::AtomicRMWInst::Xchg;
598     break;
599 
600   case AtomicExpr::AO__atomic_add_fetch:
601     PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
602                                                  : llvm::Instruction::Add;
603     LLVM_FALLTHROUGH;
604   case AtomicExpr::AO__c11_atomic_fetch_add:
605   case AtomicExpr::AO__hip_atomic_fetch_add:
606   case AtomicExpr::AO__opencl_atomic_fetch_add:
607   case AtomicExpr::AO__atomic_fetch_add:
608     Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
609                                              : llvm::AtomicRMWInst::Add;
610     break;
611 
612   case AtomicExpr::AO__atomic_sub_fetch:
613     PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
614                                                  : llvm::Instruction::Sub;
615     LLVM_FALLTHROUGH;
616   case AtomicExpr::AO__c11_atomic_fetch_sub:
617   case AtomicExpr::AO__opencl_atomic_fetch_sub:
618   case AtomicExpr::AO__atomic_fetch_sub:
619     Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
620                                              : llvm::AtomicRMWInst::Sub;
621     break;
622 
623   case AtomicExpr::AO__atomic_min_fetch:
624     PostOpMinMax = true;
625     LLVM_FALLTHROUGH;
626   case AtomicExpr::AO__c11_atomic_fetch_min:
627   case AtomicExpr::AO__hip_atomic_fetch_min:
628   case AtomicExpr::AO__opencl_atomic_fetch_min:
629   case AtomicExpr::AO__atomic_fetch_min:
630     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
631                                                   : llvm::AtomicRMWInst::UMin;
632     break;
633 
634   case AtomicExpr::AO__atomic_max_fetch:
635     PostOpMinMax = true;
636     LLVM_FALLTHROUGH;
637   case AtomicExpr::AO__c11_atomic_fetch_max:
638   case AtomicExpr::AO__hip_atomic_fetch_max:
639   case AtomicExpr::AO__opencl_atomic_fetch_max:
640   case AtomicExpr::AO__atomic_fetch_max:
641     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
642                                                   : llvm::AtomicRMWInst::UMax;
643     break;
644 
645   case AtomicExpr::AO__atomic_and_fetch:
646     PostOp = llvm::Instruction::And;
647     LLVM_FALLTHROUGH;
648   case AtomicExpr::AO__c11_atomic_fetch_and:
649   case AtomicExpr::AO__hip_atomic_fetch_and:
650   case AtomicExpr::AO__opencl_atomic_fetch_and:
651   case AtomicExpr::AO__atomic_fetch_and:
652     Op = llvm::AtomicRMWInst::And;
653     break;
654 
655   case AtomicExpr::AO__atomic_or_fetch:
656     PostOp = llvm::Instruction::Or;
657     LLVM_FALLTHROUGH;
658   case AtomicExpr::AO__c11_atomic_fetch_or:
659   case AtomicExpr::AO__hip_atomic_fetch_or:
660   case AtomicExpr::AO__opencl_atomic_fetch_or:
661   case AtomicExpr::AO__atomic_fetch_or:
662     Op = llvm::AtomicRMWInst::Or;
663     break;
664 
665   case AtomicExpr::AO__atomic_xor_fetch:
666     PostOp = llvm::Instruction::Xor;
667     LLVM_FALLTHROUGH;
668   case AtomicExpr::AO__c11_atomic_fetch_xor:
669   case AtomicExpr::AO__hip_atomic_fetch_xor:
670   case AtomicExpr::AO__opencl_atomic_fetch_xor:
671   case AtomicExpr::AO__atomic_fetch_xor:
672     Op = llvm::AtomicRMWInst::Xor;
673     break;
674 
675   case AtomicExpr::AO__atomic_nand_fetch:
676     PostOp = llvm::Instruction::And; // the NOT is special cased below
677     LLVM_FALLTHROUGH;
678   case AtomicExpr::AO__c11_atomic_fetch_nand:
679   case AtomicExpr::AO__atomic_fetch_nand:
680     Op = llvm::AtomicRMWInst::Nand;
681     break;
682   }
683 
684   llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
685   llvm::AtomicRMWInst *RMWI =
686       CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
687   RMWI->setVolatile(E->isVolatile());
688 
689   // For __atomic_*_fetch operations, perform the operation again to
690   // determine the value which was written.
691   llvm::Value *Result = RMWI;
692   if (PostOpMinMax)
693     Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
694                                   E->getValueType()->isSignedIntegerType(),
695                                   RMWI, LoadVal1);
696   else if (PostOp)
697     Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
698                                      LoadVal1);
699   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
700     Result = CGF.Builder.CreateNot(Result);
701   CGF.Builder.CreateStore(Result, Dest);
702 }
703 
704 // This function emits any expression (scalar, complex, or aggregate)
705 // into a temporary alloca.
706 static Address
707 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
708   Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
709   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
710                        /*Init*/ true);
711   return DeclPtr;
712 }
713 
714 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
715                          Address Ptr, Address Val1, Address Val2,
716                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
717                          uint64_t Size, llvm::AtomicOrdering Order,
718                          llvm::Value *Scope) {
719   auto ScopeModel = Expr->getScopeModel();
720 
721   // LLVM atomic instructions always have synch scope. If clang atomic
722   // expression has no scope operand, use default LLVM synch scope.
723   if (!ScopeModel) {
724     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
725                  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
726     return;
727   }
728 
729   // Handle constant scope.
730   if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
731     auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
732         CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
733         Order, CGF.CGM.getLLVMContext());
734     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
735                  Order, SCID);
736     return;
737   }
738 
739   // Handle non-constant scope.
740   auto &Builder = CGF.Builder;
741   auto Scopes = ScopeModel->getRuntimeValues();
742   llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
743   for (auto S : Scopes)
744     BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
745 
746   llvm::BasicBlock *ContBB =
747       CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
748 
749   auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
750   // If unsupported synch scope is encountered at run time, assume a fallback
751   // synch scope value.
752   auto FallBack = ScopeModel->getFallBackValue();
753   llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
754   for (auto S : Scopes) {
755     auto *B = BB[S];
756     if (S != FallBack)
757       SI->addCase(Builder.getInt32(S), B);
758 
759     Builder.SetInsertPoint(B);
760     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
761                  Order,
762                  CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
763                                                          ScopeModel->map(S),
764                                                          Order,
765                                                          CGF.getLLVMContext()));
766     Builder.CreateBr(ContBB);
767   }
768 
769   Builder.SetInsertPoint(ContBB);
770 }
771 
772 static void
773 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
774                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
775                   SourceLocation Loc, CharUnits SizeInChars) {
776   if (UseOptimizedLibcall) {
777     // Load value and pass it to the function directly.
778     CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
779     int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
780     ValTy =
781         CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
782     llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
783                                                 SizeInBits)->getPointerTo();
784     Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
785     Val = CGF.EmitLoadOfScalar(Ptr, false,
786                                CGF.getContext().getPointerType(ValTy),
787                                Loc);
788     // Coerce the value into an appropriately sized integer type.
789     Args.add(RValue::get(Val), ValTy);
790   } else {
791     // Non-optimized functions always take a reference.
792     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
793                          CGF.getContext().VoidPtrTy);
794   }
795 }
796 
797 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
798   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
799   QualType MemTy = AtomicTy;
800   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
801     MemTy = AT->getValueType();
802   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
803 
804   Address Val1 = Address::invalid();
805   Address Val2 = Address::invalid();
806   Address Dest = Address::invalid();
807   Address Ptr = EmitPointerWithAlignment(E->getPtr());
808 
809   if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
810       E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
811     LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
812     EmitAtomicInit(E->getVal1(), lvalue);
813     return RValue::get(nullptr);
814   }
815 
816   auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
817   uint64_t Size = TInfo.Width.getQuantity();
818   unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
819 
820   bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
821   bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
822   bool UseLibcall = Misaligned | Oversized;
823   bool ShouldCastToIntPtrTy = true;
824 
825   CharUnits MaxInlineWidth =
826       getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
827 
828   DiagnosticsEngine &Diags = CGM.getDiags();
829 
830   if (Misaligned) {
831     Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
832         << (int)TInfo.Width.getQuantity()
833         << (int)Ptr.getAlignment().getQuantity();
834   }
835 
836   if (Oversized) {
837     Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
838         << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
839   }
840 
841   llvm::Value *Order = EmitScalarExpr(E->getOrder());
842   llvm::Value *Scope =
843       E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
844 
845   switch (E->getOp()) {
846   case AtomicExpr::AO__c11_atomic_init:
847   case AtomicExpr::AO__opencl_atomic_init:
848     llvm_unreachable("Already handled above with EmitAtomicInit!");
849 
850   case AtomicExpr::AO__c11_atomic_load:
851   case AtomicExpr::AO__opencl_atomic_load:
852   case AtomicExpr::AO__hip_atomic_load:
853   case AtomicExpr::AO__atomic_load_n:
854     break;
855 
856   case AtomicExpr::AO__atomic_load:
857     Dest = EmitPointerWithAlignment(E->getVal1());
858     break;
859 
860   case AtomicExpr::AO__atomic_store:
861     Val1 = EmitPointerWithAlignment(E->getVal1());
862     break;
863 
864   case AtomicExpr::AO__atomic_exchange:
865     Val1 = EmitPointerWithAlignment(E->getVal1());
866     Dest = EmitPointerWithAlignment(E->getVal2());
867     break;
868 
869   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
870   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
871   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
872   case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
873   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
874   case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
875   case AtomicExpr::AO__atomic_compare_exchange_n:
876   case AtomicExpr::AO__atomic_compare_exchange:
877     Val1 = EmitPointerWithAlignment(E->getVal1());
878     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
879       Val2 = EmitPointerWithAlignment(E->getVal2());
880     else
881       Val2 = EmitValToTemp(*this, E->getVal2());
882     OrderFail = EmitScalarExpr(E->getOrderFail());
883     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
884         E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
885       IsWeak = EmitScalarExpr(E->getWeak());
886     break;
887 
888   case AtomicExpr::AO__c11_atomic_fetch_add:
889   case AtomicExpr::AO__c11_atomic_fetch_sub:
890   case AtomicExpr::AO__hip_atomic_fetch_add:
891   case AtomicExpr::AO__opencl_atomic_fetch_add:
892   case AtomicExpr::AO__opencl_atomic_fetch_sub:
893     if (MemTy->isPointerType()) {
894       // For pointer arithmetic, we're required to do a bit of math:
895       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
896       // ... but only for the C11 builtins. The GNU builtins expect the
897       // user to multiply by sizeof(T).
898       QualType Val1Ty = E->getVal1()->getType();
899       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
900       CharUnits PointeeIncAmt =
901           getContext().getTypeSizeInChars(MemTy->getPointeeType());
902       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
903       auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
904       Val1 = Temp;
905       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
906       break;
907     }
908     LLVM_FALLTHROUGH;
909   case AtomicExpr::AO__atomic_fetch_add:
910   case AtomicExpr::AO__atomic_fetch_sub:
911   case AtomicExpr::AO__atomic_add_fetch:
912   case AtomicExpr::AO__atomic_sub_fetch:
913     ShouldCastToIntPtrTy = !MemTy->isFloatingType();
914     LLVM_FALLTHROUGH;
915 
916   case AtomicExpr::AO__c11_atomic_store:
917   case AtomicExpr::AO__c11_atomic_exchange:
918   case AtomicExpr::AO__opencl_atomic_store:
919   case AtomicExpr::AO__hip_atomic_store:
920   case AtomicExpr::AO__opencl_atomic_exchange:
921   case AtomicExpr::AO__hip_atomic_exchange:
922   case AtomicExpr::AO__atomic_store_n:
923   case AtomicExpr::AO__atomic_exchange_n:
924   case AtomicExpr::AO__c11_atomic_fetch_and:
925   case AtomicExpr::AO__c11_atomic_fetch_or:
926   case AtomicExpr::AO__c11_atomic_fetch_xor:
927   case AtomicExpr::AO__c11_atomic_fetch_nand:
928   case AtomicExpr::AO__c11_atomic_fetch_max:
929   case AtomicExpr::AO__c11_atomic_fetch_min:
930   case AtomicExpr::AO__opencl_atomic_fetch_and:
931   case AtomicExpr::AO__opencl_atomic_fetch_or:
932   case AtomicExpr::AO__opencl_atomic_fetch_xor:
933   case AtomicExpr::AO__opencl_atomic_fetch_min:
934   case AtomicExpr::AO__opencl_atomic_fetch_max:
935   case AtomicExpr::AO__atomic_fetch_and:
936   case AtomicExpr::AO__hip_atomic_fetch_and:
937   case AtomicExpr::AO__atomic_fetch_or:
938   case AtomicExpr::AO__hip_atomic_fetch_or:
939   case AtomicExpr::AO__atomic_fetch_xor:
940   case AtomicExpr::AO__hip_atomic_fetch_xor:
941   case AtomicExpr::AO__atomic_fetch_nand:
942   case AtomicExpr::AO__atomic_and_fetch:
943   case AtomicExpr::AO__atomic_or_fetch:
944   case AtomicExpr::AO__atomic_xor_fetch:
945   case AtomicExpr::AO__atomic_nand_fetch:
946   case AtomicExpr::AO__atomic_max_fetch:
947   case AtomicExpr::AO__atomic_min_fetch:
948   case AtomicExpr::AO__atomic_fetch_max:
949   case AtomicExpr::AO__hip_atomic_fetch_max:
950   case AtomicExpr::AO__atomic_fetch_min:
951   case AtomicExpr::AO__hip_atomic_fetch_min:
952     Val1 = EmitValToTemp(*this, E->getVal1());
953     break;
954   }
955 
956   QualType RValTy = E->getType().getUnqualifiedType();
957 
958   // The inlined atomics only function on iN types, where N is a power of 2. We
959   // need to make sure (via temporaries if necessary) that all incoming values
960   // are compatible.
961   LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
962   AtomicInfo Atomics(*this, AtomicVal);
963 
964   if (ShouldCastToIntPtrTy) {
965     Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
966     if (Val1.isValid())
967       Val1 = Atomics.convertToAtomicIntPointer(Val1);
968     if (Val2.isValid())
969       Val2 = Atomics.convertToAtomicIntPointer(Val2);
970   }
971   if (Dest.isValid()) {
972     if (ShouldCastToIntPtrTy)
973       Dest = Atomics.emitCastToAtomicIntPointer(Dest);
974   } else if (E->isCmpXChg())
975     Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
976   else if (!RValTy->isVoidType()) {
977     Dest = Atomics.CreateTempAlloca();
978     if (ShouldCastToIntPtrTy)
979       Dest = Atomics.emitCastToAtomicIntPointer(Dest);
980   }
981 
982   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
983   if (UseLibcall) {
984     bool UseOptimizedLibcall = false;
985     switch (E->getOp()) {
986     case AtomicExpr::AO__c11_atomic_init:
987     case AtomicExpr::AO__opencl_atomic_init:
988       llvm_unreachable("Already handled above with EmitAtomicInit!");
989 
990     case AtomicExpr::AO__c11_atomic_fetch_add:
991     case AtomicExpr::AO__opencl_atomic_fetch_add:
992     case AtomicExpr::AO__atomic_fetch_add:
993     case AtomicExpr::AO__hip_atomic_fetch_add:
994     case AtomicExpr::AO__c11_atomic_fetch_and:
995     case AtomicExpr::AO__opencl_atomic_fetch_and:
996     case AtomicExpr::AO__hip_atomic_fetch_and:
997     case AtomicExpr::AO__atomic_fetch_and:
998     case AtomicExpr::AO__c11_atomic_fetch_or:
999     case AtomicExpr::AO__opencl_atomic_fetch_or:
1000     case AtomicExpr::AO__hip_atomic_fetch_or:
1001     case AtomicExpr::AO__atomic_fetch_or:
1002     case AtomicExpr::AO__c11_atomic_fetch_nand:
1003     case AtomicExpr::AO__atomic_fetch_nand:
1004     case AtomicExpr::AO__c11_atomic_fetch_sub:
1005     case AtomicExpr::AO__opencl_atomic_fetch_sub:
1006     case AtomicExpr::AO__atomic_fetch_sub:
1007     case AtomicExpr::AO__c11_atomic_fetch_xor:
1008     case AtomicExpr::AO__opencl_atomic_fetch_xor:
1009     case AtomicExpr::AO__opencl_atomic_fetch_min:
1010     case AtomicExpr::AO__opencl_atomic_fetch_max:
1011     case AtomicExpr::AO__atomic_fetch_xor:
1012     case AtomicExpr::AO__hip_atomic_fetch_xor:
1013     case AtomicExpr::AO__c11_atomic_fetch_max:
1014     case AtomicExpr::AO__c11_atomic_fetch_min:
1015     case AtomicExpr::AO__atomic_add_fetch:
1016     case AtomicExpr::AO__atomic_and_fetch:
1017     case AtomicExpr::AO__atomic_nand_fetch:
1018     case AtomicExpr::AO__atomic_or_fetch:
1019     case AtomicExpr::AO__atomic_sub_fetch:
1020     case AtomicExpr::AO__atomic_xor_fetch:
1021     case AtomicExpr::AO__atomic_fetch_max:
1022     case AtomicExpr::AO__hip_atomic_fetch_max:
1023     case AtomicExpr::AO__atomic_fetch_min:
1024     case AtomicExpr::AO__hip_atomic_fetch_min:
1025     case AtomicExpr::AO__atomic_max_fetch:
1026     case AtomicExpr::AO__atomic_min_fetch:
1027       // For these, only library calls for certain sizes exist.
1028       UseOptimizedLibcall = true;
1029       break;
1030 
1031     case AtomicExpr::AO__atomic_load:
1032     case AtomicExpr::AO__atomic_store:
1033     case AtomicExpr::AO__atomic_exchange:
1034     case AtomicExpr::AO__atomic_compare_exchange:
1035       // Use the generic version if we don't know that the operand will be
1036       // suitably aligned for the optimized version.
1037       if (Misaligned)
1038         break;
1039       LLVM_FALLTHROUGH;
1040     case AtomicExpr::AO__c11_atomic_load:
1041     case AtomicExpr::AO__c11_atomic_store:
1042     case AtomicExpr::AO__c11_atomic_exchange:
1043     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1044     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1045     case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1046     case AtomicExpr::AO__opencl_atomic_load:
1047     case AtomicExpr::AO__hip_atomic_load:
1048     case AtomicExpr::AO__opencl_atomic_store:
1049     case AtomicExpr::AO__hip_atomic_store:
1050     case AtomicExpr::AO__opencl_atomic_exchange:
1051     case AtomicExpr::AO__hip_atomic_exchange:
1052     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1053     case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1054     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1055     case AtomicExpr::AO__atomic_load_n:
1056     case AtomicExpr::AO__atomic_store_n:
1057     case AtomicExpr::AO__atomic_exchange_n:
1058     case AtomicExpr::AO__atomic_compare_exchange_n:
1059       // Only use optimized library calls for sizes for which they exist.
1060       // FIXME: Size == 16 optimized library functions exist too.
1061       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
1062         UseOptimizedLibcall = true;
1063       break;
1064     }
1065 
1066     CallArgList Args;
1067     if (!UseOptimizedLibcall) {
1068       // For non-optimized library calls, the size is the first parameter
1069       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1070                getContext().getSizeType());
1071     }
1072     // Atomic address is the first or second parameter
1073     // The OpenCL atomic library functions only accept pointer arguments to
1074     // generic address space.
1075     auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1076       if (!E->isOpenCL())
1077         return V;
1078       auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1079       if (AS == LangAS::opencl_generic)
1080         return V;
1081       auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
1082       auto T = llvm::cast<llvm::PointerType>(V->getType());
1083       auto *DestType = llvm::PointerType::getWithSamePointeeType(T, DestAS);
1084 
1085       return getTargetHooks().performAddrSpaceCast(
1086           *this, V, AS, LangAS::opencl_generic, DestType, false);
1087     };
1088 
1089     Args.add(RValue::get(CastToGenericAddrSpace(
1090                  EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
1091              getContext().VoidPtrTy);
1092 
1093     std::string LibCallName;
1094     QualType LoweredMemTy =
1095       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
1096     QualType RetTy;
1097     bool HaveRetTy = false;
1098     llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1099     bool PostOpMinMax = false;
1100     switch (E->getOp()) {
1101     case AtomicExpr::AO__c11_atomic_init:
1102     case AtomicExpr::AO__opencl_atomic_init:
1103       llvm_unreachable("Already handled!");
1104 
1105     // There is only one libcall for compare an exchange, because there is no
1106     // optimisation benefit possible from a libcall version of a weak compare
1107     // and exchange.
1108     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1109     //                                void *desired, int success, int failure)
1110     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1111     //                                  int success, int failure)
1112     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1113     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1114     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1115     case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1116     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1117     case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1118     case AtomicExpr::AO__atomic_compare_exchange:
1119     case AtomicExpr::AO__atomic_compare_exchange_n:
1120       LibCallName = "__atomic_compare_exchange";
1121       RetTy = getContext().BoolTy;
1122       HaveRetTy = true;
1123       Args.add(
1124           RValue::get(CastToGenericAddrSpace(
1125               EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1126           getContext().VoidPtrTy);
1127       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1128                         MemTy, E->getExprLoc(), TInfo.Width);
1129       Args.add(RValue::get(Order), getContext().IntTy);
1130       Order = OrderFail;
1131       break;
1132     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1133     //                        int order)
1134     // T __atomic_exchange_N(T *mem, T val, int order)
1135     case AtomicExpr::AO__c11_atomic_exchange:
1136     case AtomicExpr::AO__opencl_atomic_exchange:
1137     case AtomicExpr::AO__atomic_exchange_n:
1138     case AtomicExpr::AO__atomic_exchange:
1139     case AtomicExpr::AO__hip_atomic_exchange:
1140       LibCallName = "__atomic_exchange";
1141       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1142                         MemTy, E->getExprLoc(), TInfo.Width);
1143       break;
1144     // void __atomic_store(size_t size, void *mem, void *val, int order)
1145     // void __atomic_store_N(T *mem, T val, int order)
1146     case AtomicExpr::AO__c11_atomic_store:
1147     case AtomicExpr::AO__opencl_atomic_store:
1148     case AtomicExpr::AO__hip_atomic_store:
1149     case AtomicExpr::AO__atomic_store:
1150     case AtomicExpr::AO__atomic_store_n:
1151       LibCallName = "__atomic_store";
1152       RetTy = getContext().VoidTy;
1153       HaveRetTy = true;
1154       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1155                         MemTy, E->getExprLoc(), TInfo.Width);
1156       break;
1157     // void __atomic_load(size_t size, void *mem, void *return, int order)
1158     // T __atomic_load_N(T *mem, int order)
1159     case AtomicExpr::AO__c11_atomic_load:
1160     case AtomicExpr::AO__opencl_atomic_load:
1161     case AtomicExpr::AO__hip_atomic_load:
1162     case AtomicExpr::AO__atomic_load:
1163     case AtomicExpr::AO__atomic_load_n:
1164       LibCallName = "__atomic_load";
1165       break;
1166     // T __atomic_add_fetch_N(T *mem, T val, int order)
1167     // T __atomic_fetch_add_N(T *mem, T val, int order)
1168     case AtomicExpr::AO__atomic_add_fetch:
1169       PostOp = llvm::Instruction::Add;
1170       LLVM_FALLTHROUGH;
1171     case AtomicExpr::AO__c11_atomic_fetch_add:
1172     case AtomicExpr::AO__opencl_atomic_fetch_add:
1173     case AtomicExpr::AO__atomic_fetch_add:
1174     case AtomicExpr::AO__hip_atomic_fetch_add:
1175       LibCallName = "__atomic_fetch_add";
1176       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1177                         LoweredMemTy, E->getExprLoc(), TInfo.Width);
1178       break;
1179     // T __atomic_and_fetch_N(T *mem, T val, int order)
1180     // T __atomic_fetch_and_N(T *mem, T val, int order)
1181     case AtomicExpr::AO__atomic_and_fetch:
1182       PostOp = llvm::Instruction::And;
1183       LLVM_FALLTHROUGH;
1184     case AtomicExpr::AO__c11_atomic_fetch_and:
1185     case AtomicExpr::AO__opencl_atomic_fetch_and:
1186     case AtomicExpr::AO__hip_atomic_fetch_and:
1187     case AtomicExpr::AO__atomic_fetch_and:
1188       LibCallName = "__atomic_fetch_and";
1189       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1190                         MemTy, E->getExprLoc(), TInfo.Width);
1191       break;
1192     // T __atomic_or_fetch_N(T *mem, T val, int order)
1193     // T __atomic_fetch_or_N(T *mem, T val, int order)
1194     case AtomicExpr::AO__atomic_or_fetch:
1195       PostOp = llvm::Instruction::Or;
1196       LLVM_FALLTHROUGH;
1197     case AtomicExpr::AO__c11_atomic_fetch_or:
1198     case AtomicExpr::AO__opencl_atomic_fetch_or:
1199     case AtomicExpr::AO__hip_atomic_fetch_or:
1200     case AtomicExpr::AO__atomic_fetch_or:
1201       LibCallName = "__atomic_fetch_or";
1202       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1203                         MemTy, E->getExprLoc(), TInfo.Width);
1204       break;
1205     // T __atomic_sub_fetch_N(T *mem, T val, int order)
1206     // T __atomic_fetch_sub_N(T *mem, T val, int order)
1207     case AtomicExpr::AO__atomic_sub_fetch:
1208       PostOp = llvm::Instruction::Sub;
1209       LLVM_FALLTHROUGH;
1210     case AtomicExpr::AO__c11_atomic_fetch_sub:
1211     case AtomicExpr::AO__opencl_atomic_fetch_sub:
1212     case AtomicExpr::AO__atomic_fetch_sub:
1213       LibCallName = "__atomic_fetch_sub";
1214       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1215                         LoweredMemTy, E->getExprLoc(), TInfo.Width);
1216       break;
1217     // T __atomic_xor_fetch_N(T *mem, T val, int order)
1218     // T __atomic_fetch_xor_N(T *mem, T val, int order)
1219     case AtomicExpr::AO__atomic_xor_fetch:
1220       PostOp = llvm::Instruction::Xor;
1221       LLVM_FALLTHROUGH;
1222     case AtomicExpr::AO__c11_atomic_fetch_xor:
1223     case AtomicExpr::AO__opencl_atomic_fetch_xor:
1224     case AtomicExpr::AO__hip_atomic_fetch_xor:
1225     case AtomicExpr::AO__atomic_fetch_xor:
1226       LibCallName = "__atomic_fetch_xor";
1227       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1228                         MemTy, E->getExprLoc(), TInfo.Width);
1229       break;
1230     case AtomicExpr::AO__atomic_min_fetch:
1231       PostOpMinMax = true;
1232       LLVM_FALLTHROUGH;
1233     case AtomicExpr::AO__c11_atomic_fetch_min:
1234     case AtomicExpr::AO__atomic_fetch_min:
1235     case AtomicExpr::AO__hip_atomic_fetch_min:
1236     case AtomicExpr::AO__opencl_atomic_fetch_min:
1237       LibCallName = E->getValueType()->isSignedIntegerType()
1238                         ? "__atomic_fetch_min"
1239                         : "__atomic_fetch_umin";
1240       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1241                         LoweredMemTy, E->getExprLoc(), TInfo.Width);
1242       break;
1243     case AtomicExpr::AO__atomic_max_fetch:
1244       PostOpMinMax = true;
1245       LLVM_FALLTHROUGH;
1246     case AtomicExpr::AO__c11_atomic_fetch_max:
1247     case AtomicExpr::AO__atomic_fetch_max:
1248     case AtomicExpr::AO__hip_atomic_fetch_max:
1249     case AtomicExpr::AO__opencl_atomic_fetch_max:
1250       LibCallName = E->getValueType()->isSignedIntegerType()
1251                         ? "__atomic_fetch_max"
1252                         : "__atomic_fetch_umax";
1253       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1254                         LoweredMemTy, E->getExprLoc(), TInfo.Width);
1255       break;
1256     // T __atomic_nand_fetch_N(T *mem, T val, int order)
1257     // T __atomic_fetch_nand_N(T *mem, T val, int order)
1258     case AtomicExpr::AO__atomic_nand_fetch:
1259       PostOp = llvm::Instruction::And; // the NOT is special cased below
1260       LLVM_FALLTHROUGH;
1261     case AtomicExpr::AO__c11_atomic_fetch_nand:
1262     case AtomicExpr::AO__atomic_fetch_nand:
1263       LibCallName = "__atomic_fetch_nand";
1264       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1265                         MemTy, E->getExprLoc(), TInfo.Width);
1266       break;
1267     }
1268 
1269     if (E->isOpenCL()) {
1270       LibCallName = std::string("__opencl") +
1271           StringRef(LibCallName).drop_front(1).str();
1272 
1273     }
1274     // Optimized functions have the size in their name.
1275     if (UseOptimizedLibcall)
1276       LibCallName += "_" + llvm::utostr(Size);
1277     // By default, assume we return a value of the atomic type.
1278     if (!HaveRetTy) {
1279       if (UseOptimizedLibcall) {
1280         // Value is returned directly.
1281         // The function returns an appropriately sized integer type.
1282         RetTy = getContext().getIntTypeForBitwidth(
1283             getContext().toBits(TInfo.Width), /*Signed=*/false);
1284       } else {
1285         // Value is returned through parameter before the order.
1286         RetTy = getContext().VoidTy;
1287         Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1288                  getContext().VoidPtrTy);
1289       }
1290     }
1291     // order is always the last parameter
1292     Args.add(RValue::get(Order),
1293              getContext().IntTy);
1294     if (E->isOpenCL())
1295       Args.add(RValue::get(Scope), getContext().IntTy);
1296 
1297     // PostOp is only needed for the atomic_*_fetch operations, and
1298     // thus is only needed for and implemented in the
1299     // UseOptimizedLibcall codepath.
1300     assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1301 
1302     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1303     // The value is returned directly from the libcall.
1304     if (E->isCmpXChg())
1305       return Res;
1306 
1307     // The value is returned directly for optimized libcalls but the expr
1308     // provided an out-param.
1309     if (UseOptimizedLibcall && Res.getScalarVal()) {
1310       llvm::Value *ResVal = Res.getScalarVal();
1311       if (PostOpMinMax) {
1312         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1313         ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
1314                                       E->getValueType()->isSignedIntegerType(),
1315                                       ResVal, LoadVal1);
1316       } else if (PostOp) {
1317         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1318         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1319       }
1320       if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1321         ResVal = Builder.CreateNot(ResVal);
1322 
1323       Builder.CreateStore(
1324           ResVal, Builder.CreateElementBitCast(Dest, ResVal->getType()));
1325     }
1326 
1327     if (RValTy->isVoidType())
1328       return RValue::get(nullptr);
1329 
1330     return convertTempToRValue(
1331         Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
1332         RValTy, E->getExprLoc());
1333   }
1334 
1335   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1336                  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1337                  E->getOp() == AtomicExpr::AO__hip_atomic_store ||
1338                  E->getOp() == AtomicExpr::AO__atomic_store ||
1339                  E->getOp() == AtomicExpr::AO__atomic_store_n;
1340   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1341                 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1342                 E->getOp() == AtomicExpr::AO__hip_atomic_load ||
1343                 E->getOp() == AtomicExpr::AO__atomic_load ||
1344                 E->getOp() == AtomicExpr::AO__atomic_load_n;
1345 
1346   if (isa<llvm::ConstantInt>(Order)) {
1347     auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1348     // We should not ever get to a case where the ordering isn't a valid C ABI
1349     // value, but it's hard to enforce that in general.
1350     if (llvm::isValidAtomicOrderingCABI(ord))
1351       switch ((llvm::AtomicOrderingCABI)ord) {
1352       case llvm::AtomicOrderingCABI::relaxed:
1353         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1354                      llvm::AtomicOrdering::Monotonic, Scope);
1355         break;
1356       case llvm::AtomicOrderingCABI::consume:
1357       case llvm::AtomicOrderingCABI::acquire:
1358         if (IsStore)
1359           break; // Avoid crashing on code with undefined behavior
1360         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1361                      llvm::AtomicOrdering::Acquire, Scope);
1362         break;
1363       case llvm::AtomicOrderingCABI::release:
1364         if (IsLoad)
1365           break; // Avoid crashing on code with undefined behavior
1366         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1367                      llvm::AtomicOrdering::Release, Scope);
1368         break;
1369       case llvm::AtomicOrderingCABI::acq_rel:
1370         if (IsLoad || IsStore)
1371           break; // Avoid crashing on code with undefined behavior
1372         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1373                      llvm::AtomicOrdering::AcquireRelease, Scope);
1374         break;
1375       case llvm::AtomicOrderingCABI::seq_cst:
1376         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1377                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1378         break;
1379       }
1380     if (RValTy->isVoidType())
1381       return RValue::get(nullptr);
1382 
1383     return convertTempToRValue(
1384         Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
1385         RValTy, E->getExprLoc());
1386   }
1387 
1388   // Long case, when Order isn't obviously constant.
1389 
1390   // Create all the relevant BB's
1391   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1392                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1393                    *SeqCstBB = nullptr;
1394   MonotonicBB = createBasicBlock("monotonic", CurFn);
1395   if (!IsStore)
1396     AcquireBB = createBasicBlock("acquire", CurFn);
1397   if (!IsLoad)
1398     ReleaseBB = createBasicBlock("release", CurFn);
1399   if (!IsLoad && !IsStore)
1400     AcqRelBB = createBasicBlock("acqrel", CurFn);
1401   SeqCstBB = createBasicBlock("seqcst", CurFn);
1402   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1403 
1404   // Create the switch for the split
1405   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1406   // doesn't matter unless someone is crazy enough to use something that
1407   // doesn't fold to a constant for the ordering.
1408   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1409   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1410 
1411   // Emit all the different atomics
1412   Builder.SetInsertPoint(MonotonicBB);
1413   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1414                llvm::AtomicOrdering::Monotonic, Scope);
1415   Builder.CreateBr(ContBB);
1416   if (!IsStore) {
1417     Builder.SetInsertPoint(AcquireBB);
1418     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1419                  llvm::AtomicOrdering::Acquire, Scope);
1420     Builder.CreateBr(ContBB);
1421     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1422                 AcquireBB);
1423     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1424                 AcquireBB);
1425   }
1426   if (!IsLoad) {
1427     Builder.SetInsertPoint(ReleaseBB);
1428     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1429                  llvm::AtomicOrdering::Release, Scope);
1430     Builder.CreateBr(ContBB);
1431     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1432                 ReleaseBB);
1433   }
1434   if (!IsLoad && !IsStore) {
1435     Builder.SetInsertPoint(AcqRelBB);
1436     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1437                  llvm::AtomicOrdering::AcquireRelease, Scope);
1438     Builder.CreateBr(ContBB);
1439     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1440                 AcqRelBB);
1441   }
1442   Builder.SetInsertPoint(SeqCstBB);
1443   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1444                llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1445   Builder.CreateBr(ContBB);
1446   SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1447               SeqCstBB);
1448 
1449   // Cleanup and return
1450   Builder.SetInsertPoint(ContBB);
1451   if (RValTy->isVoidType())
1452     return RValue::get(nullptr);
1453 
1454   assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1455   return convertTempToRValue(
1456       Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
1457       RValTy, E->getExprLoc());
1458 }
1459 
1460 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1461   llvm::IntegerType *ty =
1462     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1463   return CGF.Builder.CreateElementBitCast(addr, ty);
1464 }
1465 
1466 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1467   llvm::Type *Ty = Addr.getElementType();
1468   uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1469   if (SourceSizeInBits != AtomicSizeInBits) {
1470     Address Tmp = CreateTempAlloca();
1471     CGF.Builder.CreateMemCpy(Tmp, Addr,
1472                              std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1473     Addr = Tmp;
1474   }
1475 
1476   return emitCastToAtomicIntPointer(Addr);
1477 }
1478 
1479 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1480                                              AggValueSlot resultSlot,
1481                                              SourceLocation loc,
1482                                              bool asValue) const {
1483   if (LVal.isSimple()) {
1484     if (EvaluationKind == TEK_Aggregate)
1485       return resultSlot.asRValue();
1486 
1487     // Drill into the padding structure if we have one.
1488     if (hasPadding())
1489       addr = CGF.Builder.CreateStructGEP(addr, 0);
1490 
1491     // Otherwise, just convert the temporary to an r-value using the
1492     // normal conversion routine.
1493     return CGF.convertTempToRValue(addr, getValueType(), loc);
1494   }
1495   if (!asValue)
1496     // Get RValue from temp memory as atomic for non-simple lvalues
1497     return RValue::get(CGF.Builder.CreateLoad(addr));
1498   if (LVal.isBitField())
1499     return CGF.EmitLoadOfBitfieldLValue(
1500         LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1501                              LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1502   if (LVal.isVectorElt())
1503     return CGF.EmitLoadOfLValue(
1504         LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1505                               LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1506   assert(LVal.isExtVectorElt());
1507   return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1508       addr, LVal.getExtVectorElts(), LVal.getType(),
1509       LVal.getBaseInfo(), TBAAAccessInfo()));
1510 }
1511 
1512 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1513                                              AggValueSlot ResultSlot,
1514                                              SourceLocation Loc,
1515                                              bool AsValue) const {
1516   // Try not to in some easy cases.
1517   assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1518   if (getEvaluationKind() == TEK_Scalar &&
1519       (((!LVal.isBitField() ||
1520          LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1521         !hasPadding()) ||
1522        !AsValue)) {
1523     auto *ValTy = AsValue
1524                       ? CGF.ConvertTypeForMem(ValueTy)
1525                       : getAtomicAddress().getElementType();
1526     if (ValTy->isIntegerTy()) {
1527       assert(IntVal->getType() == ValTy && "Different integer types.");
1528       return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1529     } else if (ValTy->isPointerTy())
1530       return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1531     else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1532       return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1533   }
1534 
1535   // Create a temporary.  This needs to be big enough to hold the
1536   // atomic integer.
1537   Address Temp = Address::invalid();
1538   bool TempIsVolatile = false;
1539   if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1540     assert(!ResultSlot.isIgnored());
1541     Temp = ResultSlot.getAddress();
1542     TempIsVolatile = ResultSlot.isVolatile();
1543   } else {
1544     Temp = CreateTempAlloca();
1545   }
1546 
1547   // Slam the integer into the temporary.
1548   Address CastTemp = emitCastToAtomicIntPointer(Temp);
1549   CGF.Builder.CreateStore(IntVal, CastTemp)
1550       ->setVolatile(TempIsVolatile);
1551 
1552   return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1553 }
1554 
1555 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1556                                        llvm::AtomicOrdering AO, bool) {
1557   // void __atomic_load(size_t size, void *mem, void *return, int order);
1558   CallArgList Args;
1559   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1560   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1561            CGF.getContext().VoidPtrTy);
1562   Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1563            CGF.getContext().VoidPtrTy);
1564   Args.add(
1565       RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1566       CGF.getContext().IntTy);
1567   emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1568 }
1569 
1570 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1571                                           bool IsVolatile) {
1572   // Okay, we're doing this natively.
1573   Address Addr = getAtomicAddressAsAtomicIntPointer();
1574   llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1575   Load->setAtomic(AO);
1576 
1577   // Other decoration.
1578   if (IsVolatile)
1579     Load->setVolatile(true);
1580   CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1581   return Load;
1582 }
1583 
1584 /// An LValue is a candidate for having its loads and stores be made atomic if
1585 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1586 /// performing such an operation can be performed without a libcall.
1587 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1588   if (!CGM.getCodeGenOpts().MSVolatile) return false;
1589   AtomicInfo AI(*this, LV);
1590   bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1591   // An atomic is inline if we don't need to use a libcall.
1592   bool AtomicIsInline = !AI.shouldUseLibcall();
1593   // MSVC doesn't seem to do this for types wider than a pointer.
1594   if (getContext().getTypeSize(LV.getType()) >
1595       getContext().getTypeSize(getContext().getIntPtrType()))
1596     return false;
1597   return IsVolatile && AtomicIsInline;
1598 }
1599 
1600 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1601                                        AggValueSlot Slot) {
1602   llvm::AtomicOrdering AO;
1603   bool IsVolatile = LV.isVolatileQualified();
1604   if (LV.getType()->isAtomicType()) {
1605     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1606   } else {
1607     AO = llvm::AtomicOrdering::Acquire;
1608     IsVolatile = true;
1609   }
1610   return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1611 }
1612 
1613 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1614                                   bool AsValue, llvm::AtomicOrdering AO,
1615                                   bool IsVolatile) {
1616   // Check whether we should use a library call.
1617   if (shouldUseLibcall()) {
1618     Address TempAddr = Address::invalid();
1619     if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1620       assert(getEvaluationKind() == TEK_Aggregate);
1621       TempAddr = ResultSlot.getAddress();
1622     } else
1623       TempAddr = CreateTempAlloca();
1624 
1625     EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1626 
1627     // Okay, turn that back into the original value or whole atomic (for
1628     // non-simple lvalues) type.
1629     return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1630   }
1631 
1632   // Okay, we're doing this natively.
1633   auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1634 
1635   // If we're ignoring an aggregate return, don't do anything.
1636   if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1637     return RValue::getAggregate(Address::invalid(), false);
1638 
1639   // Okay, turn that back into the original value or atomic (for non-simple
1640   // lvalues) type.
1641   return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1642 }
1643 
1644 /// Emit a load from an l-value of atomic type.  Note that the r-value
1645 /// we produce is an r-value of the atomic *value* type.
1646 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1647                                        llvm::AtomicOrdering AO, bool IsVolatile,
1648                                        AggValueSlot resultSlot) {
1649   AtomicInfo Atomics(*this, src);
1650   return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1651                                 IsVolatile);
1652 }
1653 
1654 /// Copy an r-value into memory as part of storing to an atomic type.
1655 /// This needs to create a bit-pattern suitable for atomic operations.
1656 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1657   assert(LVal.isSimple());
1658   // If we have an r-value, the rvalue should be of the atomic type,
1659   // which means that the caller is responsible for having zeroed
1660   // any padding.  Just do an aggregate copy of that type.
1661   if (rvalue.isAggregate()) {
1662     LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1663     LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1664                                     getAtomicType());
1665     bool IsVolatile = rvalue.isVolatileQualified() ||
1666                       LVal.isVolatileQualified();
1667     CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1668                           AggValueSlot::DoesNotOverlap, IsVolatile);
1669     return;
1670   }
1671 
1672   // Okay, otherwise we're copying stuff.
1673 
1674   // Zero out the buffer if necessary.
1675   emitMemSetZeroIfNecessary();
1676 
1677   // Drill past the padding if present.
1678   LValue TempLVal = projectValue();
1679 
1680   // Okay, store the rvalue in.
1681   if (rvalue.isScalar()) {
1682     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1683   } else {
1684     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1685   }
1686 }
1687 
1688 
1689 /// Materialize an r-value into memory for the purposes of storing it
1690 /// to an atomic type.
1691 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1692   // Aggregate r-values are already in memory, and EmitAtomicStore
1693   // requires them to be values of the atomic type.
1694   if (rvalue.isAggregate())
1695     return rvalue.getAggregateAddress();
1696 
1697   // Otherwise, make a temporary and materialize into it.
1698   LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1699   AtomicInfo Atomics(CGF, TempLV);
1700   Atomics.emitCopyIntoMemory(rvalue);
1701   return TempLV.getAddress(CGF);
1702 }
1703 
1704 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1705   // If we've got a scalar value of the right size, try to avoid going
1706   // through memory.
1707   if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1708     llvm::Value *Value = RVal.getScalarVal();
1709     if (isa<llvm::IntegerType>(Value->getType()))
1710       return CGF.EmitToMemory(Value, ValueTy);
1711     else {
1712       llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1713           CGF.getLLVMContext(),
1714           LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1715       if (isa<llvm::PointerType>(Value->getType()))
1716         return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1717       else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1718         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1719     }
1720   }
1721   // Otherwise, we need to go through memory.
1722   // Put the r-value in memory.
1723   Address Addr = materializeRValue(RVal);
1724 
1725   // Cast the temporary to the atomic int type and pull a value out.
1726   Addr = emitCastToAtomicIntPointer(Addr);
1727   return CGF.Builder.CreateLoad(Addr);
1728 }
1729 
1730 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1731     llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1732     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1733   // Do the atomic store.
1734   Address Addr = getAtomicAddressAsAtomicIntPointer();
1735   auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1736                                                ExpectedVal, DesiredVal,
1737                                                Success, Failure);
1738   // Other decoration.
1739   Inst->setVolatile(LVal.isVolatileQualified());
1740   Inst->setWeak(IsWeak);
1741 
1742   // Okay, turn that back into the original value type.
1743   auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1744   auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1745   return std::make_pair(PreviousVal, SuccessFailureVal);
1746 }
1747 
1748 llvm::Value *
1749 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1750                                              llvm::Value *DesiredAddr,
1751                                              llvm::AtomicOrdering Success,
1752                                              llvm::AtomicOrdering Failure) {
1753   // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1754   // void *desired, int success, int failure);
1755   CallArgList Args;
1756   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1757   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1758            CGF.getContext().VoidPtrTy);
1759   Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1760            CGF.getContext().VoidPtrTy);
1761   Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1762            CGF.getContext().VoidPtrTy);
1763   Args.add(RValue::get(
1764                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1765            CGF.getContext().IntTy);
1766   Args.add(RValue::get(
1767                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1768            CGF.getContext().IntTy);
1769   auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1770                                               CGF.getContext().BoolTy, Args);
1771 
1772   return SuccessFailureRVal.getScalarVal();
1773 }
1774 
1775 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1776     RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1777     llvm::AtomicOrdering Failure, bool IsWeak) {
1778   // Check whether we should use a library call.
1779   if (shouldUseLibcall()) {
1780     // Produce a source address.
1781     Address ExpectedAddr = materializeRValue(Expected);
1782     Address DesiredAddr = materializeRValue(Desired);
1783     auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1784                                                  DesiredAddr.getPointer(),
1785                                                  Success, Failure);
1786     return std::make_pair(
1787         convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1788                                   SourceLocation(), /*AsValue=*/false),
1789         Res);
1790   }
1791 
1792   // If we've got a scalar value of the right size, try to avoid going
1793   // through memory.
1794   auto *ExpectedVal = convertRValueToInt(Expected);
1795   auto *DesiredVal = convertRValueToInt(Desired);
1796   auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1797                                          Failure, IsWeak);
1798   return std::make_pair(
1799       ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1800                                 SourceLocation(), /*AsValue=*/false),
1801       Res.second);
1802 }
1803 
1804 static void
1805 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1806                       const llvm::function_ref<RValue(RValue)> &UpdateOp,
1807                       Address DesiredAddr) {
1808   RValue UpRVal;
1809   LValue AtomicLVal = Atomics.getAtomicLValue();
1810   LValue DesiredLVal;
1811   if (AtomicLVal.isSimple()) {
1812     UpRVal = OldRVal;
1813     DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1814   } else {
1815     // Build new lvalue for temp address.
1816     Address Ptr = Atomics.materializeRValue(OldRVal);
1817     LValue UpdateLVal;
1818     if (AtomicLVal.isBitField()) {
1819       UpdateLVal =
1820           LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1821                                AtomicLVal.getType(),
1822                                AtomicLVal.getBaseInfo(),
1823                                AtomicLVal.getTBAAInfo());
1824       DesiredLVal =
1825           LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1826                                AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1827                                AtomicLVal.getTBAAInfo());
1828     } else if (AtomicLVal.isVectorElt()) {
1829       UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1830                                          AtomicLVal.getType(),
1831                                          AtomicLVal.getBaseInfo(),
1832                                          AtomicLVal.getTBAAInfo());
1833       DesiredLVal = LValue::MakeVectorElt(
1834           DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1835           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1836     } else {
1837       assert(AtomicLVal.isExtVectorElt());
1838       UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1839                                             AtomicLVal.getType(),
1840                                             AtomicLVal.getBaseInfo(),
1841                                             AtomicLVal.getTBAAInfo());
1842       DesiredLVal = LValue::MakeExtVectorElt(
1843           DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1844           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1845     }
1846     UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1847   }
1848   // Store new value in the corresponding memory area.
1849   RValue NewRVal = UpdateOp(UpRVal);
1850   if (NewRVal.isScalar()) {
1851     CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1852   } else {
1853     assert(NewRVal.isComplex());
1854     CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1855                            /*isInit=*/false);
1856   }
1857 }
1858 
1859 void AtomicInfo::EmitAtomicUpdateLibcall(
1860     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1861     bool IsVolatile) {
1862   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1863 
1864   Address ExpectedAddr = CreateTempAlloca();
1865 
1866   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1867   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1868   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1869   CGF.EmitBlock(ContBB);
1870   Address DesiredAddr = CreateTempAlloca();
1871   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1872       requiresMemSetZero(getAtomicAddress().getElementType())) {
1873     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1874     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1875   }
1876   auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1877                                            AggValueSlot::ignored(),
1878                                            SourceLocation(), /*AsValue=*/false);
1879   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1880   auto *Res =
1881       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1882                                        DesiredAddr.getPointer(),
1883                                        AO, Failure);
1884   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1885   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1886 }
1887 
1888 void AtomicInfo::EmitAtomicUpdateOp(
1889     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1890     bool IsVolatile) {
1891   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1892 
1893   // Do the atomic load.
1894   auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1895   // For non-simple lvalues perform compare-and-swap procedure.
1896   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1897   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1898   auto *CurBB = CGF.Builder.GetInsertBlock();
1899   CGF.EmitBlock(ContBB);
1900   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1901                                              /*NumReservedValues=*/2);
1902   PHI->addIncoming(OldVal, CurBB);
1903   Address NewAtomicAddr = CreateTempAlloca();
1904   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1905   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1906       requiresMemSetZero(getAtomicAddress().getElementType())) {
1907     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1908   }
1909   auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1910                                            SourceLocation(), /*AsValue=*/false);
1911   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1912   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1913   // Try to write new value using cmpxchg operation.
1914   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1915   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1916   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1917   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1918 }
1919 
1920 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1921                                   RValue UpdateRVal, Address DesiredAddr) {
1922   LValue AtomicLVal = Atomics.getAtomicLValue();
1923   LValue DesiredLVal;
1924   // Build new lvalue for temp address.
1925   if (AtomicLVal.isBitField()) {
1926     DesiredLVal =
1927         LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1928                              AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1929                              AtomicLVal.getTBAAInfo());
1930   } else if (AtomicLVal.isVectorElt()) {
1931     DesiredLVal =
1932         LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1933                               AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1934                               AtomicLVal.getTBAAInfo());
1935   } else {
1936     assert(AtomicLVal.isExtVectorElt());
1937     DesiredLVal = LValue::MakeExtVectorElt(
1938         DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1939         AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1940   }
1941   // Store new value in the corresponding memory area.
1942   assert(UpdateRVal.isScalar());
1943   CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1944 }
1945 
1946 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1947                                          RValue UpdateRVal, bool IsVolatile) {
1948   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1949 
1950   Address ExpectedAddr = CreateTempAlloca();
1951 
1952   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1953   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1954   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1955   CGF.EmitBlock(ContBB);
1956   Address DesiredAddr = CreateTempAlloca();
1957   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1958       requiresMemSetZero(getAtomicAddress().getElementType())) {
1959     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1960     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1961   }
1962   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1963   auto *Res =
1964       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1965                                        DesiredAddr.getPointer(),
1966                                        AO, Failure);
1967   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1968   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1969 }
1970 
1971 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1972                                     bool IsVolatile) {
1973   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1974 
1975   // Do the atomic load.
1976   auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1977   // For non-simple lvalues perform compare-and-swap procedure.
1978   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1979   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1980   auto *CurBB = CGF.Builder.GetInsertBlock();
1981   CGF.EmitBlock(ContBB);
1982   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1983                                              /*NumReservedValues=*/2);
1984   PHI->addIncoming(OldVal, CurBB);
1985   Address NewAtomicAddr = CreateTempAlloca();
1986   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1987   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1988       requiresMemSetZero(getAtomicAddress().getElementType())) {
1989     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1990   }
1991   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1992   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1993   // Try to write new value using cmpxchg operation.
1994   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1995   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1996   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1997   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1998 }
1999 
2000 void AtomicInfo::EmitAtomicUpdate(
2001     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
2002     bool IsVolatile) {
2003   if (shouldUseLibcall()) {
2004     EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
2005   } else {
2006     EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
2007   }
2008 }
2009 
2010 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
2011                                   bool IsVolatile) {
2012   if (shouldUseLibcall()) {
2013     EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
2014   } else {
2015     EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
2016   }
2017 }
2018 
2019 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
2020                                       bool isInit) {
2021   bool IsVolatile = lvalue.isVolatileQualified();
2022   llvm::AtomicOrdering AO;
2023   if (lvalue.getType()->isAtomicType()) {
2024     AO = llvm::AtomicOrdering::SequentiallyConsistent;
2025   } else {
2026     AO = llvm::AtomicOrdering::Release;
2027     IsVolatile = true;
2028   }
2029   return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
2030 }
2031 
2032 /// Emit a store to an l-value of atomic type.
2033 ///
2034 /// Note that the r-value is expected to be an r-value *of the atomic
2035 /// type*; this means that for aggregate r-values, it should include
2036 /// storage for any padding that was necessary.
2037 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
2038                                       llvm::AtomicOrdering AO, bool IsVolatile,
2039                                       bool isInit) {
2040   // If this is an aggregate r-value, it should agree in type except
2041   // maybe for address-space qualification.
2042   assert(!rvalue.isAggregate() ||
2043          rvalue.getAggregateAddress().getElementType() ==
2044              dest.getAddress(*this).getElementType());
2045 
2046   AtomicInfo atomics(*this, dest);
2047   LValue LVal = atomics.getAtomicLValue();
2048 
2049   // If this is an initialization, just put the value there normally.
2050   if (LVal.isSimple()) {
2051     if (isInit) {
2052       atomics.emitCopyIntoMemory(rvalue);
2053       return;
2054     }
2055 
2056     // Check whether we should use a library call.
2057     if (atomics.shouldUseLibcall()) {
2058       // Produce a source address.
2059       Address srcAddr = atomics.materializeRValue(rvalue);
2060 
2061       // void __atomic_store(size_t size, void *mem, void *val, int order)
2062       CallArgList args;
2063       args.add(RValue::get(atomics.getAtomicSizeValue()),
2064                getContext().getSizeType());
2065       args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
2066                getContext().VoidPtrTy);
2067       args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
2068                getContext().VoidPtrTy);
2069       args.add(
2070           RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
2071           getContext().IntTy);
2072       emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
2073       return;
2074     }
2075 
2076     // Okay, we're doing this natively.
2077     llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2078 
2079     // Do the atomic store.
2080     Address addr =
2081         atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
2082     intValue = Builder.CreateIntCast(
2083         intValue, addr.getElementType(), /*isSigned=*/false);
2084     llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
2085 
2086     if (AO == llvm::AtomicOrdering::Acquire)
2087       AO = llvm::AtomicOrdering::Monotonic;
2088     else if (AO == llvm::AtomicOrdering::AcquireRelease)
2089       AO = llvm::AtomicOrdering::Release;
2090     // Initializations don't need to be atomic.
2091     if (!isInit)
2092       store->setAtomic(AO);
2093 
2094     // Other decoration.
2095     if (IsVolatile)
2096       store->setVolatile(true);
2097     CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
2098     return;
2099   }
2100 
2101   // Emit simple atomic update operation.
2102   atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2103 }
2104 
2105 /// Emit a compare-and-exchange op for atomic type.
2106 ///
2107 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2108     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2109     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2110     AggValueSlot Slot) {
2111   // If this is an aggregate r-value, it should agree in type except
2112   // maybe for address-space qualification.
2113   assert(!Expected.isAggregate() ||
2114          Expected.getAggregateAddress().getElementType() ==
2115              Obj.getAddress(*this).getElementType());
2116   assert(!Desired.isAggregate() ||
2117          Desired.getAggregateAddress().getElementType() ==
2118              Obj.getAddress(*this).getElementType());
2119   AtomicInfo Atomics(*this, Obj);
2120 
2121   return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2122                                            IsWeak);
2123 }
2124 
2125 void CodeGenFunction::EmitAtomicUpdate(
2126     LValue LVal, llvm::AtomicOrdering AO,
2127     const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2128   AtomicInfo Atomics(*this, LVal);
2129   Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2130 }
2131 
2132 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
2133   AtomicInfo atomics(*this, dest);
2134 
2135   switch (atomics.getEvaluationKind()) {
2136   case TEK_Scalar: {
2137     llvm::Value *value = EmitScalarExpr(init);
2138     atomics.emitCopyIntoMemory(RValue::get(value));
2139     return;
2140   }
2141 
2142   case TEK_Complex: {
2143     ComplexPairTy value = EmitComplexExpr(init);
2144     atomics.emitCopyIntoMemory(RValue::getComplex(value));
2145     return;
2146   }
2147 
2148   case TEK_Aggregate: {
2149     // Fix up the destination if the initializer isn't an expression
2150     // of atomic type.
2151     bool Zeroed = false;
2152     if (!init->getType()->isAtomicType()) {
2153       Zeroed = atomics.emitMemSetZeroIfNecessary();
2154       dest = atomics.projectValue();
2155     }
2156 
2157     // Evaluate the expression directly into the destination.
2158     AggValueSlot slot = AggValueSlot::forLValue(
2159         dest, *this, AggValueSlot::IsNotDestructed,
2160         AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
2161         AggValueSlot::DoesNotOverlap,
2162         Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
2163 
2164     EmitAggExpr(init, slot);
2165     return;
2166   }
2167   }
2168   llvm_unreachable("bad evaluation kind");
2169 }
2170