xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the code for emitting atomic operations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCall.h"
14 #include "CGRecordLayout.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "clang/Frontend/FrontendDiagnostic.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/Operator.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 
29 namespace {
30   class AtomicInfo {
31     CodeGenFunction &CGF;
32     QualType AtomicTy;
33     QualType ValueTy;
34     uint64_t AtomicSizeInBits;
35     uint64_t ValueSizeInBits;
36     CharUnits AtomicAlign;
37     CharUnits ValueAlign;
38     TypeEvaluationKind EvaluationKind;
39     bool UseLibcall;
40     LValue LVal;
41     CGBitFieldInfo BFI;
42   public:
AtomicInfo(CodeGenFunction & CGF,LValue & lvalue)43     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44         : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45           EvaluationKind(TEK_Scalar), UseLibcall(true) {
46       assert(!lvalue.isGlobalReg());
47       ASTContext &C = CGF.getContext();
48       if (lvalue.isSimple()) {
49         AtomicTy = lvalue.getType();
50         if (auto *ATy = AtomicTy->getAs<AtomicType>())
51           ValueTy = ATy->getValueType();
52         else
53           ValueTy = AtomicTy;
54         EvaluationKind = CGF.getEvaluationKind(ValueTy);
55 
56         uint64_t ValueAlignInBits;
57         uint64_t AtomicAlignInBits;
58         TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59         ValueSizeInBits = ValueTI.Width;
60         ValueAlignInBits = ValueTI.Align;
61 
62         TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63         AtomicSizeInBits = AtomicTI.Width;
64         AtomicAlignInBits = AtomicTI.Align;
65 
66         assert(ValueSizeInBits <= AtomicSizeInBits);
67         assert(ValueAlignInBits <= AtomicAlignInBits);
68 
69         AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70         ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71         if (lvalue.getAlignment().isZero())
72           lvalue.setAlignment(AtomicAlign);
73 
74         LVal = lvalue;
75       } else if (lvalue.isBitField()) {
76         ValueTy = lvalue.getType();
77         ValueSizeInBits = C.getTypeSize(ValueTy);
78         auto &OrigBFI = lvalue.getBitFieldInfo();
79         auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80         AtomicSizeInBits = C.toBits(
81             C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82                 .alignTo(lvalue.getAlignment()));
83         llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF);
84         auto OffsetInChars =
85             (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86             lvalue.getAlignment();
87         llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(
88             CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
89         StoragePtr = CGF.Builder.CreateAddrSpaceCast(
90             StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base");
91         BFI = OrigBFI;
92         BFI.Offset = Offset;
93         BFI.StorageSize = AtomicSizeInBits;
94         BFI.StorageOffset += OffsetInChars;
95         llvm::Type *StorageTy = CGF.Builder.getIntNTy(AtomicSizeInBits);
96         LVal = LValue::MakeBitfield(
97             Address(StoragePtr, StorageTy, lvalue.getAlignment()), BFI,
98             lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo());
99         AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
100         if (AtomicTy.isNull()) {
101           llvm::APInt Size(
102               /*numBits=*/32,
103               C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
104           AtomicTy = C.getConstantArrayType(C.CharTy, Size, nullptr,
105                                             ArraySizeModifier::Normal,
106                                             /*IndexTypeQuals=*/0);
107         }
108         AtomicAlign = ValueAlign = lvalue.getAlignment();
109       } else if (lvalue.isVectorElt()) {
110         ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
111         ValueSizeInBits = C.getTypeSize(ValueTy);
112         AtomicTy = lvalue.getType();
113         AtomicSizeInBits = C.getTypeSize(AtomicTy);
114         AtomicAlign = ValueAlign = lvalue.getAlignment();
115         LVal = lvalue;
116       } else {
117         assert(lvalue.isExtVectorElt());
118         ValueTy = lvalue.getType();
119         ValueSizeInBits = C.getTypeSize(ValueTy);
120         AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
121             lvalue.getType(), cast<llvm::FixedVectorType>(
122                                   lvalue.getExtVectorAddress().getElementType())
123                                   ->getNumElements());
124         AtomicSizeInBits = C.getTypeSize(AtomicTy);
125         AtomicAlign = ValueAlign = lvalue.getAlignment();
126         LVal = lvalue;
127       }
128       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
129           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
130     }
131 
getAtomicType() const132     QualType getAtomicType() const { return AtomicTy; }
getValueType() const133     QualType getValueType() const { return ValueTy; }
getAtomicAlignment() const134     CharUnits getAtomicAlignment() const { return AtomicAlign; }
getAtomicSizeInBits() const135     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
getValueSizeInBits() const136     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
getEvaluationKind() const137     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
shouldUseLibcall() const138     bool shouldUseLibcall() const { return UseLibcall; }
getAtomicLValue() const139     const LValue &getAtomicLValue() const { return LVal; }
getAtomicPointer() const140     llvm::Value *getAtomicPointer() const {
141       if (LVal.isSimple())
142         return LVal.emitRawPointer(CGF);
143       else if (LVal.isBitField())
144         return LVal.getRawBitFieldPointer(CGF);
145       else if (LVal.isVectorElt())
146         return LVal.getRawVectorPointer(CGF);
147       assert(LVal.isExtVectorElt());
148       return LVal.getRawExtVectorPointer(CGF);
149     }
getAtomicAddress() const150     Address getAtomicAddress() const {
151       llvm::Type *ElTy;
152       if (LVal.isSimple())
153         ElTy = LVal.getAddress().getElementType();
154       else if (LVal.isBitField())
155         ElTy = LVal.getBitFieldAddress().getElementType();
156       else if (LVal.isVectorElt())
157         ElTy = LVal.getVectorAddress().getElementType();
158       else
159         ElTy = LVal.getExtVectorAddress().getElementType();
160       return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
161     }
162 
getAtomicAddressAsAtomicIntPointer() const163     Address getAtomicAddressAsAtomicIntPointer() const {
164       return castToAtomicIntPointer(getAtomicAddress());
165     }
166 
167     /// Is the atomic size larger than the underlying value type?
168     ///
169     /// Note that the absence of padding does not mean that atomic
170     /// objects are completely interchangeable with non-atomic
171     /// objects: we might have promoted the alignment of a type
172     /// without making it bigger.
hasPadding() const173     bool hasPadding() const {
174       return (ValueSizeInBits != AtomicSizeInBits);
175     }
176 
177     bool emitMemSetZeroIfNecessary() const;
178 
getAtomicSizeValue() const179     llvm::Value *getAtomicSizeValue() const {
180       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
181       return CGF.CGM.getSize(size);
182     }
183 
184     /// Cast the given pointer to an integer pointer suitable for atomic
185     /// operations if the source.
186     Address castToAtomicIntPointer(Address Addr) const;
187 
188     /// If Addr is compatible with the iN that will be used for an atomic
189     /// operation, bitcast it. Otherwise, create a temporary that is suitable
190     /// and copy the value across.
191     Address convertToAtomicIntPointer(Address Addr) const;
192 
193     /// Turn an atomic-layout object into an r-value.
194     RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
195                                      SourceLocation loc, bool AsValue) const;
196 
197     llvm::Value *getScalarRValValueOrNull(RValue RVal) const;
198 
199     /// Converts an rvalue to integer value if needed.
200     llvm::Value *convertRValueToInt(RValue RVal, bool CmpXchg = false) const;
201 
202     RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot,
203                                   SourceLocation Loc, bool AsValue,
204                                   bool CmpXchg = false) const;
205 
206     /// Copy an atomic r-value into atomic-layout memory.
207     void emitCopyIntoMemory(RValue rvalue) const;
208 
209     /// Project an l-value down to the value field.
projectValue() const210     LValue projectValue() const {
211       assert(LVal.isSimple());
212       Address addr = getAtomicAddress();
213       if (hasPadding())
214         addr = CGF.Builder.CreateStructGEP(addr, 0);
215 
216       return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
217                               LVal.getBaseInfo(), LVal.getTBAAInfo());
218     }
219 
220     /// Emits atomic load.
221     /// \returns Loaded value.
222     RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
223                           bool AsValue, llvm::AtomicOrdering AO,
224                           bool IsVolatile);
225 
226     /// Emits atomic compare-and-exchange sequence.
227     /// \param Expected Expected value.
228     /// \param Desired Desired value.
229     /// \param Success Atomic ordering for success operation.
230     /// \param Failure Atomic ordering for failed operation.
231     /// \param IsWeak true if atomic operation is weak, false otherwise.
232     /// \returns Pair of values: previous value from storage (value type) and
233     /// boolean flag (i1 type) with true if success and false otherwise.
234     std::pair<RValue, llvm::Value *>
235     EmitAtomicCompareExchange(RValue Expected, RValue Desired,
236                               llvm::AtomicOrdering Success =
237                                   llvm::AtomicOrdering::SequentiallyConsistent,
238                               llvm::AtomicOrdering Failure =
239                                   llvm::AtomicOrdering::SequentiallyConsistent,
240                               bool IsWeak = false);
241 
242     /// Emits atomic update.
243     /// \param AO Atomic ordering.
244     /// \param UpdateOp Update operation for the current lvalue.
245     void EmitAtomicUpdate(llvm::AtomicOrdering AO,
246                           const llvm::function_ref<RValue(RValue)> &UpdateOp,
247                           bool IsVolatile);
248     /// Emits atomic update.
249     /// \param AO Atomic ordering.
250     void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
251                           bool IsVolatile);
252 
253     /// Materialize an atomic r-value in atomic-layout memory.
254     Address materializeRValue(RValue rvalue) const;
255 
256     /// Creates temp alloca for intermediate operations on atomic value.
257     Address CreateTempAlloca() const;
258   private:
259     bool requiresMemSetZero(llvm::Type *type) const;
260 
261 
262     /// Emits atomic load as a libcall.
263     void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
264                                llvm::AtomicOrdering AO, bool IsVolatile);
265     /// Emits atomic load as LLVM instruction.
266     llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile,
267                                   bool CmpXchg = false);
268     /// Emits atomic compare-and-exchange op as a libcall.
269     llvm::Value *EmitAtomicCompareExchangeLibcall(
270         llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
271         llvm::AtomicOrdering Success =
272             llvm::AtomicOrdering::SequentiallyConsistent,
273         llvm::AtomicOrdering Failure =
274             llvm::AtomicOrdering::SequentiallyConsistent);
275     /// Emits atomic compare-and-exchange op as LLVM instruction.
276     std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
277         llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
278         llvm::AtomicOrdering Success =
279             llvm::AtomicOrdering::SequentiallyConsistent,
280         llvm::AtomicOrdering Failure =
281             llvm::AtomicOrdering::SequentiallyConsistent,
282         bool IsWeak = false);
283     /// Emit atomic update as libcalls.
284     void
285     EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
286                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
287                             bool IsVolatile);
288     /// Emit atomic update as LLVM instructions.
289     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
290                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
291                             bool IsVolatile);
292     /// Emit atomic update as libcalls.
293     void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
294                                  bool IsVolatile);
295     /// Emit atomic update as LLVM instructions.
296     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
297                             bool IsVolatile);
298   };
299 }
300 
CreateTempAlloca() const301 Address AtomicInfo::CreateTempAlloca() const {
302   Address TempAlloca = CGF.CreateMemTemp(
303       (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
304                                                                 : AtomicTy,
305       getAtomicAlignment(),
306       "atomic-temp");
307   // Cast to pointer to value type for bitfields.
308   if (LVal.isBitField())
309     return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
310         TempAlloca, getAtomicAddress().getType(),
311         getAtomicAddress().getElementType());
312   return TempAlloca;
313 }
314 
emitAtomicLibcall(CodeGenFunction & CGF,StringRef fnName,QualType resultType,CallArgList & args)315 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
316                                 StringRef fnName,
317                                 QualType resultType,
318                                 CallArgList &args) {
319   const CGFunctionInfo &fnInfo =
320     CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
321   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
322   llvm::AttrBuilder fnAttrB(CGF.getLLVMContext());
323   fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
324   fnAttrB.addAttribute(llvm::Attribute::WillReturn);
325   llvm::AttributeList fnAttrs = llvm::AttributeList::get(
326       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
327 
328   llvm::FunctionCallee fn =
329       CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
330   auto callee = CGCallee::forDirect(fn);
331   return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
332 }
333 
334 /// Does a store of the given IR type modify the full expected width?
isFullSizeType(CodeGenModule & CGM,llvm::Type * type,uint64_t expectedSize)335 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
336                            uint64_t expectedSize) {
337   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
338 }
339 
340 /// Does the atomic type require memsetting to zero before initialization?
341 ///
342 /// The IR type is provided as a way of making certain queries faster.
requiresMemSetZero(llvm::Type * type) const343 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
344   // If the atomic type has size padding, we definitely need a memset.
345   if (hasPadding()) return true;
346 
347   // Otherwise, do some simple heuristics to try to avoid it:
348   switch (getEvaluationKind()) {
349   // For scalars and complexes, check whether the store size of the
350   // type uses the full size.
351   case TEK_Scalar:
352     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
353   case TEK_Complex:
354     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
355                            AtomicSizeInBits / 2);
356 
357   // Padding in structs has an undefined bit pattern.  User beware.
358   case TEK_Aggregate:
359     return false;
360   }
361   llvm_unreachable("bad evaluation kind");
362 }
363 
emitMemSetZeroIfNecessary() const364 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
365   assert(LVal.isSimple());
366   Address addr = LVal.getAddress();
367   if (!requiresMemSetZero(addr.getElementType()))
368     return false;
369 
370   CGF.Builder.CreateMemSet(
371       addr.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),
372       CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
373       LVal.getAlignment().getAsAlign());
374   return true;
375 }
376 
emitAtomicCmpXchg(CodeGenFunction & CGF,AtomicExpr * E,bool IsWeak,Address Dest,Address Ptr,Address Val1,Address Val2,uint64_t Size,llvm::AtomicOrdering SuccessOrder,llvm::AtomicOrdering FailureOrder,llvm::SyncScope::ID Scope)377 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
378                               Address Dest, Address Ptr,
379                               Address Val1, Address Val2,
380                               uint64_t Size,
381                               llvm::AtomicOrdering SuccessOrder,
382                               llvm::AtomicOrdering FailureOrder,
383                               llvm::SyncScope::ID Scope) {
384   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
385   llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
386   llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
387 
388   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
389       Ptr, Expected, Desired, SuccessOrder, FailureOrder, Scope);
390   Pair->setVolatile(E->isVolatile());
391   Pair->setWeak(IsWeak);
392 
393   // Cmp holds the result of the compare-exchange operation: true on success,
394   // false on failure.
395   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
396   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
397 
398   // This basic block is used to hold the store instruction if the operation
399   // failed.
400   llvm::BasicBlock *StoreExpectedBB =
401       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
402 
403   // This basic block is the exit point of the operation, we should end up
404   // here regardless of whether or not the operation succeeded.
405   llvm::BasicBlock *ContinueBB =
406       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
407 
408   // Update Expected if Expected isn't equal to Old, otherwise branch to the
409   // exit point.
410   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
411 
412   CGF.Builder.SetInsertPoint(StoreExpectedBB);
413   // Update the memory at Expected with Old's value.
414   CGF.Builder.CreateStore(Old, Val1);
415   // Finally, branch to the exit point.
416   CGF.Builder.CreateBr(ContinueBB);
417 
418   CGF.Builder.SetInsertPoint(ContinueBB);
419   // Update the memory at Dest with Cmp's value.
420   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
421 }
422 
423 /// Given an ordering required on success, emit all possible cmpxchg
424 /// instructions to cope with the provided (but possibly only dynamically known)
425 /// FailureOrder.
emitAtomicCmpXchgFailureSet(CodeGenFunction & CGF,AtomicExpr * E,bool IsWeak,Address Dest,Address Ptr,Address Val1,Address Val2,llvm::Value * FailureOrderVal,uint64_t Size,llvm::AtomicOrdering SuccessOrder,llvm::SyncScope::ID Scope)426 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
427                                         bool IsWeak, Address Dest, Address Ptr,
428                                         Address Val1, Address Val2,
429                                         llvm::Value *FailureOrderVal,
430                                         uint64_t Size,
431                                         llvm::AtomicOrdering SuccessOrder,
432                                         llvm::SyncScope::ID Scope) {
433   llvm::AtomicOrdering FailureOrder;
434   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
435     auto FOS = FO->getSExtValue();
436     if (!llvm::isValidAtomicOrderingCABI(FOS))
437       FailureOrder = llvm::AtomicOrdering::Monotonic;
438     else
439       switch ((llvm::AtomicOrderingCABI)FOS) {
440       case llvm::AtomicOrderingCABI::relaxed:
441       // 31.7.2.18: "The failure argument shall not be memory_order_release
442       // nor memory_order_acq_rel". Fallback to monotonic.
443       case llvm::AtomicOrderingCABI::release:
444       case llvm::AtomicOrderingCABI::acq_rel:
445         FailureOrder = llvm::AtomicOrdering::Monotonic;
446         break;
447       case llvm::AtomicOrderingCABI::consume:
448       case llvm::AtomicOrderingCABI::acquire:
449         FailureOrder = llvm::AtomicOrdering::Acquire;
450         break;
451       case llvm::AtomicOrderingCABI::seq_cst:
452         FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
453         break;
454       }
455     // Prior to c++17, "the failure argument shall be no stronger than the
456     // success argument". This condition has been lifted and the only
457     // precondition is 31.7.2.18. Effectively treat this as a DR and skip
458     // language version checks.
459     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
460                       FailureOrder, Scope);
461     return;
462   }
463 
464   // Create all the relevant BB's
465   auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
466   auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
467   auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
468   auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
469 
470   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
471   // doesn't matter unless someone is crazy enough to use something that
472   // doesn't fold to a constant for the ordering.
473   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
474   // Implemented as acquire, since it's the closest in LLVM.
475   SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
476               AcquireBB);
477   SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
478               AcquireBB);
479   SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
480               SeqCstBB);
481 
482   // Emit all the different atomics
483   CGF.Builder.SetInsertPoint(MonotonicBB);
484   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
485                     Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
486   CGF.Builder.CreateBr(ContBB);
487 
488   CGF.Builder.SetInsertPoint(AcquireBB);
489   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
490                     llvm::AtomicOrdering::Acquire, Scope);
491   CGF.Builder.CreateBr(ContBB);
492 
493   CGF.Builder.SetInsertPoint(SeqCstBB);
494   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
495                     llvm::AtomicOrdering::SequentiallyConsistent, Scope);
496   CGF.Builder.CreateBr(ContBB);
497 
498   CGF.Builder.SetInsertPoint(ContBB);
499 }
500 
501 /// Duplicate the atomic min/max operation in conventional IR for the builtin
502 /// variants that return the new rather than the original value.
EmitPostAtomicMinMax(CGBuilderTy & Builder,AtomicExpr::AtomicOp Op,bool IsSigned,llvm::Value * OldVal,llvm::Value * RHS)503 static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
504                                          AtomicExpr::AtomicOp Op,
505                                          bool IsSigned,
506                                          llvm::Value *OldVal,
507                                          llvm::Value *RHS) {
508   llvm::CmpInst::Predicate Pred;
509   switch (Op) {
510   default:
511     llvm_unreachable("Unexpected min/max operation");
512   case AtomicExpr::AO__atomic_max_fetch:
513   case AtomicExpr::AO__scoped_atomic_max_fetch:
514     Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
515     break;
516   case AtomicExpr::AO__atomic_min_fetch:
517   case AtomicExpr::AO__scoped_atomic_min_fetch:
518     Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
519     break;
520   }
521   llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
522   return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
523 }
524 
EmitAtomicOp(CodeGenFunction & CGF,AtomicExpr * E,Address Dest,Address Ptr,Address Val1,Address Val2,llvm::Value * IsWeak,llvm::Value * FailureOrder,uint64_t Size,llvm::AtomicOrdering Order,llvm::SyncScope::ID Scope)525 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
526                          Address Ptr, Address Val1, Address Val2,
527                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
528                          uint64_t Size, llvm::AtomicOrdering Order,
529                          llvm::SyncScope::ID Scope) {
530   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
531   bool PostOpMinMax = false;
532   unsigned PostOp = 0;
533 
534   switch (E->getOp()) {
535   case AtomicExpr::AO__c11_atomic_init:
536   case AtomicExpr::AO__opencl_atomic_init:
537     llvm_unreachable("Already handled!");
538 
539   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
540   case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
541   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
542     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
543                                 FailureOrder, Size, Order, Scope);
544     return;
545   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
546   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
547   case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
548     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
549                                 FailureOrder, Size, Order, Scope);
550     return;
551   case AtomicExpr::AO__atomic_compare_exchange:
552   case AtomicExpr::AO__atomic_compare_exchange_n:
553   case AtomicExpr::AO__scoped_atomic_compare_exchange:
554   case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
555     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
556       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
557                                   Val1, Val2, FailureOrder, Size, Order, Scope);
558     } else {
559       // Create all the relevant BB's
560       llvm::BasicBlock *StrongBB =
561           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
562       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
563       llvm::BasicBlock *ContBB =
564           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
565 
566       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
567       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
568 
569       CGF.Builder.SetInsertPoint(StrongBB);
570       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
571                                   FailureOrder, Size, Order, Scope);
572       CGF.Builder.CreateBr(ContBB);
573 
574       CGF.Builder.SetInsertPoint(WeakBB);
575       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
576                                   FailureOrder, Size, Order, Scope);
577       CGF.Builder.CreateBr(ContBB);
578 
579       CGF.Builder.SetInsertPoint(ContBB);
580     }
581     return;
582   }
583   case AtomicExpr::AO__c11_atomic_load:
584   case AtomicExpr::AO__opencl_atomic_load:
585   case AtomicExpr::AO__hip_atomic_load:
586   case AtomicExpr::AO__atomic_load_n:
587   case AtomicExpr::AO__atomic_load:
588   case AtomicExpr::AO__scoped_atomic_load_n:
589   case AtomicExpr::AO__scoped_atomic_load: {
590     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
591     Load->setAtomic(Order, Scope);
592     Load->setVolatile(E->isVolatile());
593     CGF.Builder.CreateStore(Load, Dest);
594     return;
595   }
596 
597   case AtomicExpr::AO__c11_atomic_store:
598   case AtomicExpr::AO__opencl_atomic_store:
599   case AtomicExpr::AO__hip_atomic_store:
600   case AtomicExpr::AO__atomic_store:
601   case AtomicExpr::AO__atomic_store_n:
602   case AtomicExpr::AO__scoped_atomic_store:
603   case AtomicExpr::AO__scoped_atomic_store_n: {
604     llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
605     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
606     Store->setAtomic(Order, Scope);
607     Store->setVolatile(E->isVolatile());
608     return;
609   }
610 
611   case AtomicExpr::AO__c11_atomic_exchange:
612   case AtomicExpr::AO__hip_atomic_exchange:
613   case AtomicExpr::AO__opencl_atomic_exchange:
614   case AtomicExpr::AO__atomic_exchange_n:
615   case AtomicExpr::AO__atomic_exchange:
616   case AtomicExpr::AO__scoped_atomic_exchange_n:
617   case AtomicExpr::AO__scoped_atomic_exchange:
618     Op = llvm::AtomicRMWInst::Xchg;
619     break;
620 
621   case AtomicExpr::AO__atomic_add_fetch:
622   case AtomicExpr::AO__scoped_atomic_add_fetch:
623     PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
624                                                  : llvm::Instruction::Add;
625     [[fallthrough]];
626   case AtomicExpr::AO__c11_atomic_fetch_add:
627   case AtomicExpr::AO__hip_atomic_fetch_add:
628   case AtomicExpr::AO__opencl_atomic_fetch_add:
629   case AtomicExpr::AO__atomic_fetch_add:
630   case AtomicExpr::AO__scoped_atomic_fetch_add:
631     Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
632                                              : llvm::AtomicRMWInst::Add;
633     break;
634 
635   case AtomicExpr::AO__atomic_sub_fetch:
636   case AtomicExpr::AO__scoped_atomic_sub_fetch:
637     PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
638                                                  : llvm::Instruction::Sub;
639     [[fallthrough]];
640   case AtomicExpr::AO__c11_atomic_fetch_sub:
641   case AtomicExpr::AO__hip_atomic_fetch_sub:
642   case AtomicExpr::AO__opencl_atomic_fetch_sub:
643   case AtomicExpr::AO__atomic_fetch_sub:
644   case AtomicExpr::AO__scoped_atomic_fetch_sub:
645     Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
646                                              : llvm::AtomicRMWInst::Sub;
647     break;
648 
649   case AtomicExpr::AO__atomic_min_fetch:
650   case AtomicExpr::AO__scoped_atomic_min_fetch:
651     PostOpMinMax = true;
652     [[fallthrough]];
653   case AtomicExpr::AO__c11_atomic_fetch_min:
654   case AtomicExpr::AO__hip_atomic_fetch_min:
655   case AtomicExpr::AO__opencl_atomic_fetch_min:
656   case AtomicExpr::AO__atomic_fetch_min:
657   case AtomicExpr::AO__scoped_atomic_fetch_min:
658     Op = E->getValueType()->isFloatingType()
659              ? llvm::AtomicRMWInst::FMin
660              : (E->getValueType()->isSignedIntegerType()
661                     ? llvm::AtomicRMWInst::Min
662                     : llvm::AtomicRMWInst::UMin);
663     break;
664 
665   case AtomicExpr::AO__atomic_max_fetch:
666   case AtomicExpr::AO__scoped_atomic_max_fetch:
667     PostOpMinMax = true;
668     [[fallthrough]];
669   case AtomicExpr::AO__c11_atomic_fetch_max:
670   case AtomicExpr::AO__hip_atomic_fetch_max:
671   case AtomicExpr::AO__opencl_atomic_fetch_max:
672   case AtomicExpr::AO__atomic_fetch_max:
673   case AtomicExpr::AO__scoped_atomic_fetch_max:
674     Op = E->getValueType()->isFloatingType()
675              ? llvm::AtomicRMWInst::FMax
676              : (E->getValueType()->isSignedIntegerType()
677                     ? llvm::AtomicRMWInst::Max
678                     : llvm::AtomicRMWInst::UMax);
679     break;
680 
681   case AtomicExpr::AO__atomic_and_fetch:
682   case AtomicExpr::AO__scoped_atomic_and_fetch:
683     PostOp = llvm::Instruction::And;
684     [[fallthrough]];
685   case AtomicExpr::AO__c11_atomic_fetch_and:
686   case AtomicExpr::AO__hip_atomic_fetch_and:
687   case AtomicExpr::AO__opencl_atomic_fetch_and:
688   case AtomicExpr::AO__atomic_fetch_and:
689   case AtomicExpr::AO__scoped_atomic_fetch_and:
690     Op = llvm::AtomicRMWInst::And;
691     break;
692 
693   case AtomicExpr::AO__atomic_or_fetch:
694   case AtomicExpr::AO__scoped_atomic_or_fetch:
695     PostOp = llvm::Instruction::Or;
696     [[fallthrough]];
697   case AtomicExpr::AO__c11_atomic_fetch_or:
698   case AtomicExpr::AO__hip_atomic_fetch_or:
699   case AtomicExpr::AO__opencl_atomic_fetch_or:
700   case AtomicExpr::AO__atomic_fetch_or:
701   case AtomicExpr::AO__scoped_atomic_fetch_or:
702     Op = llvm::AtomicRMWInst::Or;
703     break;
704 
705   case AtomicExpr::AO__atomic_xor_fetch:
706   case AtomicExpr::AO__scoped_atomic_xor_fetch:
707     PostOp = llvm::Instruction::Xor;
708     [[fallthrough]];
709   case AtomicExpr::AO__c11_atomic_fetch_xor:
710   case AtomicExpr::AO__hip_atomic_fetch_xor:
711   case AtomicExpr::AO__opencl_atomic_fetch_xor:
712   case AtomicExpr::AO__atomic_fetch_xor:
713   case AtomicExpr::AO__scoped_atomic_fetch_xor:
714     Op = llvm::AtomicRMWInst::Xor;
715     break;
716 
717   case AtomicExpr::AO__atomic_nand_fetch:
718   case AtomicExpr::AO__scoped_atomic_nand_fetch:
719     PostOp = llvm::Instruction::And; // the NOT is special cased below
720     [[fallthrough]];
721   case AtomicExpr::AO__c11_atomic_fetch_nand:
722   case AtomicExpr::AO__atomic_fetch_nand:
723   case AtomicExpr::AO__scoped_atomic_fetch_nand:
724     Op = llvm::AtomicRMWInst::Nand;
725     break;
726   }
727 
728   llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
729   llvm::AtomicRMWInst *RMWI =
730       CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order, Scope);
731   RMWI->setVolatile(E->isVolatile());
732 
733   // For __atomic_*_fetch operations, perform the operation again to
734   // determine the value which was written.
735   llvm::Value *Result = RMWI;
736   if (PostOpMinMax)
737     Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
738                                   E->getValueType()->isSignedIntegerType(),
739                                   RMWI, LoadVal1);
740   else if (PostOp)
741     Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
742                                      LoadVal1);
743   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch ||
744       E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
745     Result = CGF.Builder.CreateNot(Result);
746   CGF.Builder.CreateStore(Result, Dest);
747 }
748 
749 // This function emits any expression (scalar, complex, or aggregate)
750 // into a temporary alloca.
751 static Address
EmitValToTemp(CodeGenFunction & CGF,Expr * E)752 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
753   Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
754   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
755                        /*Init*/ true);
756   return DeclPtr;
757 }
758 
EmitAtomicOp(CodeGenFunction & CGF,AtomicExpr * Expr,Address Dest,Address Ptr,Address Val1,Address Val2,llvm::Value * IsWeak,llvm::Value * FailureOrder,uint64_t Size,llvm::AtomicOrdering Order,llvm::Value * Scope)759 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
760                          Address Ptr, Address Val1, Address Val2,
761                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
762                          uint64_t Size, llvm::AtomicOrdering Order,
763                          llvm::Value *Scope) {
764   auto ScopeModel = Expr->getScopeModel();
765 
766   // LLVM atomic instructions always have synch scope. If clang atomic
767   // expression has no scope operand, use default LLVM synch scope.
768   if (!ScopeModel) {
769     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
770                  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
771     return;
772   }
773 
774   // Handle constant scope.
775   if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
776     auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
777         CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
778         Order, CGF.CGM.getLLVMContext());
779     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
780                  Order, SCID);
781     return;
782   }
783 
784   // Handle non-constant scope.
785   auto &Builder = CGF.Builder;
786   auto Scopes = ScopeModel->getRuntimeValues();
787   llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
788   for (auto S : Scopes)
789     BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
790 
791   llvm::BasicBlock *ContBB =
792       CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
793 
794   auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
795   // If unsupported synch scope is encountered at run time, assume a fallback
796   // synch scope value.
797   auto FallBack = ScopeModel->getFallBackValue();
798   llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
799   for (auto S : Scopes) {
800     auto *B = BB[S];
801     if (S != FallBack)
802       SI->addCase(Builder.getInt32(S), B);
803 
804     Builder.SetInsertPoint(B);
805     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
806                  Order,
807                  CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
808                                                          ScopeModel->map(S),
809                                                          Order,
810                                                          CGF.getLLVMContext()));
811     Builder.CreateBr(ContBB);
812   }
813 
814   Builder.SetInsertPoint(ContBB);
815 }
816 
EmitAtomicExpr(AtomicExpr * E)817 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
818   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
819   QualType MemTy = AtomicTy;
820   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
821     MemTy = AT->getValueType();
822   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
823 
824   Address Val1 = Address::invalid();
825   Address Val2 = Address::invalid();
826   Address Dest = Address::invalid();
827   Address Ptr = EmitPointerWithAlignment(E->getPtr());
828 
829   if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
830       E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
831     LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
832     EmitAtomicInit(E->getVal1(), lvalue);
833     return RValue::get(nullptr);
834   }
835 
836   auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
837   uint64_t Size = TInfo.Width.getQuantity();
838   unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
839 
840   CharUnits MaxInlineWidth =
841       getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
842   DiagnosticsEngine &Diags = CGM.getDiags();
843   bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
844   bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
845   if (Misaligned) {
846     Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
847         << (int)TInfo.Width.getQuantity()
848         << (int)Ptr.getAlignment().getQuantity();
849   }
850   if (Oversized) {
851     Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
852         << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
853   }
854 
855   llvm::Value *Order = EmitScalarExpr(E->getOrder());
856   llvm::Value *Scope =
857       E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
858   bool ShouldCastToIntPtrTy = true;
859 
860   switch (E->getOp()) {
861   case AtomicExpr::AO__c11_atomic_init:
862   case AtomicExpr::AO__opencl_atomic_init:
863     llvm_unreachable("Already handled above with EmitAtomicInit!");
864 
865   case AtomicExpr::AO__atomic_load_n:
866   case AtomicExpr::AO__scoped_atomic_load_n:
867   case AtomicExpr::AO__c11_atomic_load:
868   case AtomicExpr::AO__opencl_atomic_load:
869   case AtomicExpr::AO__hip_atomic_load:
870     break;
871 
872   case AtomicExpr::AO__atomic_load:
873   case AtomicExpr::AO__scoped_atomic_load:
874     Dest = EmitPointerWithAlignment(E->getVal1());
875     break;
876 
877   case AtomicExpr::AO__atomic_store:
878   case AtomicExpr::AO__scoped_atomic_store:
879     Val1 = EmitPointerWithAlignment(E->getVal1());
880     break;
881 
882   case AtomicExpr::AO__atomic_exchange:
883   case AtomicExpr::AO__scoped_atomic_exchange:
884     Val1 = EmitPointerWithAlignment(E->getVal1());
885     Dest = EmitPointerWithAlignment(E->getVal2());
886     break;
887 
888   case AtomicExpr::AO__atomic_compare_exchange:
889   case AtomicExpr::AO__atomic_compare_exchange_n:
890   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
891   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
892   case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
893   case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
894   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
895   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
896   case AtomicExpr::AO__scoped_atomic_compare_exchange:
897   case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
898     Val1 = EmitPointerWithAlignment(E->getVal1());
899     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
900         E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
901       Val2 = EmitPointerWithAlignment(E->getVal2());
902     else
903       Val2 = EmitValToTemp(*this, E->getVal2());
904     OrderFail = EmitScalarExpr(E->getOrderFail());
905     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
906         E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
907         E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
908         E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
909       IsWeak = EmitScalarExpr(E->getWeak());
910     break;
911 
912   case AtomicExpr::AO__c11_atomic_fetch_add:
913   case AtomicExpr::AO__c11_atomic_fetch_sub:
914   case AtomicExpr::AO__hip_atomic_fetch_add:
915   case AtomicExpr::AO__hip_atomic_fetch_sub:
916   case AtomicExpr::AO__opencl_atomic_fetch_add:
917   case AtomicExpr::AO__opencl_atomic_fetch_sub:
918     if (MemTy->isPointerType()) {
919       // For pointer arithmetic, we're required to do a bit of math:
920       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
921       // ... but only for the C11 builtins. The GNU builtins expect the
922       // user to multiply by sizeof(T).
923       QualType Val1Ty = E->getVal1()->getType();
924       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
925       CharUnits PointeeIncAmt =
926           getContext().getTypeSizeInChars(MemTy->getPointeeType());
927       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
928       auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
929       Val1 = Temp;
930       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
931       break;
932     }
933     [[fallthrough]];
934   case AtomicExpr::AO__atomic_fetch_add:
935   case AtomicExpr::AO__atomic_fetch_max:
936   case AtomicExpr::AO__atomic_fetch_min:
937   case AtomicExpr::AO__atomic_fetch_sub:
938   case AtomicExpr::AO__atomic_add_fetch:
939   case AtomicExpr::AO__atomic_max_fetch:
940   case AtomicExpr::AO__atomic_min_fetch:
941   case AtomicExpr::AO__atomic_sub_fetch:
942   case AtomicExpr::AO__c11_atomic_fetch_max:
943   case AtomicExpr::AO__c11_atomic_fetch_min:
944   case AtomicExpr::AO__opencl_atomic_fetch_max:
945   case AtomicExpr::AO__opencl_atomic_fetch_min:
946   case AtomicExpr::AO__hip_atomic_fetch_max:
947   case AtomicExpr::AO__hip_atomic_fetch_min:
948   case AtomicExpr::AO__scoped_atomic_fetch_add:
949   case AtomicExpr::AO__scoped_atomic_fetch_max:
950   case AtomicExpr::AO__scoped_atomic_fetch_min:
951   case AtomicExpr::AO__scoped_atomic_fetch_sub:
952   case AtomicExpr::AO__scoped_atomic_add_fetch:
953   case AtomicExpr::AO__scoped_atomic_max_fetch:
954   case AtomicExpr::AO__scoped_atomic_min_fetch:
955   case AtomicExpr::AO__scoped_atomic_sub_fetch:
956     ShouldCastToIntPtrTy = !MemTy->isFloatingType();
957     [[fallthrough]];
958 
959   case AtomicExpr::AO__atomic_fetch_and:
960   case AtomicExpr::AO__atomic_fetch_nand:
961   case AtomicExpr::AO__atomic_fetch_or:
962   case AtomicExpr::AO__atomic_fetch_xor:
963   case AtomicExpr::AO__atomic_and_fetch:
964   case AtomicExpr::AO__atomic_nand_fetch:
965   case AtomicExpr::AO__atomic_or_fetch:
966   case AtomicExpr::AO__atomic_xor_fetch:
967   case AtomicExpr::AO__atomic_store_n:
968   case AtomicExpr::AO__atomic_exchange_n:
969   case AtomicExpr::AO__c11_atomic_fetch_and:
970   case AtomicExpr::AO__c11_atomic_fetch_nand:
971   case AtomicExpr::AO__c11_atomic_fetch_or:
972   case AtomicExpr::AO__c11_atomic_fetch_xor:
973   case AtomicExpr::AO__c11_atomic_store:
974   case AtomicExpr::AO__c11_atomic_exchange:
975   case AtomicExpr::AO__hip_atomic_fetch_and:
976   case AtomicExpr::AO__hip_atomic_fetch_or:
977   case AtomicExpr::AO__hip_atomic_fetch_xor:
978   case AtomicExpr::AO__hip_atomic_store:
979   case AtomicExpr::AO__hip_atomic_exchange:
980   case AtomicExpr::AO__opencl_atomic_fetch_and:
981   case AtomicExpr::AO__opencl_atomic_fetch_or:
982   case AtomicExpr::AO__opencl_atomic_fetch_xor:
983   case AtomicExpr::AO__opencl_atomic_store:
984   case AtomicExpr::AO__opencl_atomic_exchange:
985   case AtomicExpr::AO__scoped_atomic_fetch_and:
986   case AtomicExpr::AO__scoped_atomic_fetch_nand:
987   case AtomicExpr::AO__scoped_atomic_fetch_or:
988   case AtomicExpr::AO__scoped_atomic_fetch_xor:
989   case AtomicExpr::AO__scoped_atomic_and_fetch:
990   case AtomicExpr::AO__scoped_atomic_nand_fetch:
991   case AtomicExpr::AO__scoped_atomic_or_fetch:
992   case AtomicExpr::AO__scoped_atomic_xor_fetch:
993   case AtomicExpr::AO__scoped_atomic_store_n:
994   case AtomicExpr::AO__scoped_atomic_exchange_n:
995     Val1 = EmitValToTemp(*this, E->getVal1());
996     break;
997   }
998 
999   QualType RValTy = E->getType().getUnqualifiedType();
1000 
1001   // The inlined atomics only function on iN types, where N is a power of 2. We
1002   // need to make sure (via temporaries if necessary) that all incoming values
1003   // are compatible.
1004   LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
1005   AtomicInfo Atomics(*this, AtomicVal);
1006 
1007   if (ShouldCastToIntPtrTy) {
1008     Ptr = Atomics.castToAtomicIntPointer(Ptr);
1009     if (Val1.isValid())
1010       Val1 = Atomics.convertToAtomicIntPointer(Val1);
1011     if (Val2.isValid())
1012       Val2 = Atomics.convertToAtomicIntPointer(Val2);
1013   }
1014   if (Dest.isValid()) {
1015     if (ShouldCastToIntPtrTy)
1016       Dest = Atomics.castToAtomicIntPointer(Dest);
1017   } else if (E->isCmpXChg())
1018     Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
1019   else if (!RValTy->isVoidType()) {
1020     Dest = Atomics.CreateTempAlloca();
1021     if (ShouldCastToIntPtrTy)
1022       Dest = Atomics.castToAtomicIntPointer(Dest);
1023   }
1024 
1025   bool PowerOf2Size = (Size & (Size - 1)) == 0;
1026   bool UseLibcall = !PowerOf2Size || (Size > 16);
1027 
1028   // For atomics larger than 16 bytes, emit a libcall from the frontend. This
1029   // avoids the overhead of dealing with excessively-large value types in IR.
1030   // Non-power-of-2 values also lower to libcall here, as they are not currently
1031   // permitted in IR instructions (although that constraint could be relaxed in
1032   // the future). For other cases where a libcall is required on a given
1033   // platform, we let the backend handle it (this includes handling for all of
1034   // the size-optimized libcall variants, which are only valid up to 16 bytes.)
1035   //
1036   // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
1037   if (UseLibcall) {
1038     CallArgList Args;
1039     // For non-optimized library calls, the size is the first parameter.
1040     Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1041              getContext().getSizeType());
1042 
1043     // The atomic address is the second parameter.
1044     // The OpenCL atomic library functions only accept pointer arguments to
1045     // generic address space.
1046     auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1047       if (!E->isOpenCL())
1048         return V;
1049       auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1050       if (AS == LangAS::opencl_generic)
1051         return V;
1052       auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
1053       auto *DestType = llvm::PointerType::get(getLLVMContext(), DestAS);
1054 
1055       return getTargetHooks().performAddrSpaceCast(
1056           *this, V, AS, LangAS::opencl_generic, DestType, false);
1057     };
1058 
1059     Args.add(RValue::get(CastToGenericAddrSpace(Ptr.emitRawPointer(*this),
1060                                                 E->getPtr()->getType())),
1061              getContext().VoidPtrTy);
1062 
1063     // The next 1-3 parameters are op-dependent.
1064     std::string LibCallName;
1065     QualType RetTy;
1066     bool HaveRetTy = false;
1067     switch (E->getOp()) {
1068     case AtomicExpr::AO__c11_atomic_init:
1069     case AtomicExpr::AO__opencl_atomic_init:
1070       llvm_unreachable("Already handled!");
1071 
1072     // There is only one libcall for compare an exchange, because there is no
1073     // optimisation benefit possible from a libcall version of a weak compare
1074     // and exchange.
1075     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1076     //                                void *desired, int success, int failure)
1077     case AtomicExpr::AO__atomic_compare_exchange:
1078     case AtomicExpr::AO__atomic_compare_exchange_n:
1079     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1080     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1081     case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1082     case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1083     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1084     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1085     case AtomicExpr::AO__scoped_atomic_compare_exchange:
1086     case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
1087       LibCallName = "__atomic_compare_exchange";
1088       RetTy = getContext().BoolTy;
1089       HaveRetTy = true;
1090       Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
1091                                                   E->getVal1()->getType())),
1092                getContext().VoidPtrTy);
1093       Args.add(RValue::get(CastToGenericAddrSpace(Val2.emitRawPointer(*this),
1094                                                   E->getVal2()->getType())),
1095                getContext().VoidPtrTy);
1096       Args.add(RValue::get(Order), getContext().IntTy);
1097       Order = OrderFail;
1098       break;
1099     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1100     //                        int order)
1101     case AtomicExpr::AO__atomic_exchange:
1102     case AtomicExpr::AO__atomic_exchange_n:
1103     case AtomicExpr::AO__c11_atomic_exchange:
1104     case AtomicExpr::AO__hip_atomic_exchange:
1105     case AtomicExpr::AO__opencl_atomic_exchange:
1106     case AtomicExpr::AO__scoped_atomic_exchange:
1107     case AtomicExpr::AO__scoped_atomic_exchange_n:
1108       LibCallName = "__atomic_exchange";
1109       Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
1110                                                   E->getVal1()->getType())),
1111                getContext().VoidPtrTy);
1112       break;
1113     // void __atomic_store(size_t size, void *mem, void *val, int order)
1114     case AtomicExpr::AO__atomic_store:
1115     case AtomicExpr::AO__atomic_store_n:
1116     case AtomicExpr::AO__c11_atomic_store:
1117     case AtomicExpr::AO__hip_atomic_store:
1118     case AtomicExpr::AO__opencl_atomic_store:
1119     case AtomicExpr::AO__scoped_atomic_store:
1120     case AtomicExpr::AO__scoped_atomic_store_n:
1121       LibCallName = "__atomic_store";
1122       RetTy = getContext().VoidTy;
1123       HaveRetTy = true;
1124       Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
1125                                                   E->getVal1()->getType())),
1126                getContext().VoidPtrTy);
1127       break;
1128     // void __atomic_load(size_t size, void *mem, void *return, int order)
1129     case AtomicExpr::AO__atomic_load:
1130     case AtomicExpr::AO__atomic_load_n:
1131     case AtomicExpr::AO__c11_atomic_load:
1132     case AtomicExpr::AO__hip_atomic_load:
1133     case AtomicExpr::AO__opencl_atomic_load:
1134     case AtomicExpr::AO__scoped_atomic_load:
1135     case AtomicExpr::AO__scoped_atomic_load_n:
1136       LibCallName = "__atomic_load";
1137       break;
1138     case AtomicExpr::AO__atomic_add_fetch:
1139     case AtomicExpr::AO__scoped_atomic_add_fetch:
1140     case AtomicExpr::AO__atomic_fetch_add:
1141     case AtomicExpr::AO__c11_atomic_fetch_add:
1142     case AtomicExpr::AO__hip_atomic_fetch_add:
1143     case AtomicExpr::AO__opencl_atomic_fetch_add:
1144     case AtomicExpr::AO__scoped_atomic_fetch_add:
1145     case AtomicExpr::AO__atomic_and_fetch:
1146     case AtomicExpr::AO__scoped_atomic_and_fetch:
1147     case AtomicExpr::AO__atomic_fetch_and:
1148     case AtomicExpr::AO__c11_atomic_fetch_and:
1149     case AtomicExpr::AO__hip_atomic_fetch_and:
1150     case AtomicExpr::AO__opencl_atomic_fetch_and:
1151     case AtomicExpr::AO__scoped_atomic_fetch_and:
1152     case AtomicExpr::AO__atomic_or_fetch:
1153     case AtomicExpr::AO__scoped_atomic_or_fetch:
1154     case AtomicExpr::AO__atomic_fetch_or:
1155     case AtomicExpr::AO__c11_atomic_fetch_or:
1156     case AtomicExpr::AO__hip_atomic_fetch_or:
1157     case AtomicExpr::AO__opencl_atomic_fetch_or:
1158     case AtomicExpr::AO__scoped_atomic_fetch_or:
1159     case AtomicExpr::AO__atomic_sub_fetch:
1160     case AtomicExpr::AO__scoped_atomic_sub_fetch:
1161     case AtomicExpr::AO__atomic_fetch_sub:
1162     case AtomicExpr::AO__c11_atomic_fetch_sub:
1163     case AtomicExpr::AO__hip_atomic_fetch_sub:
1164     case AtomicExpr::AO__opencl_atomic_fetch_sub:
1165     case AtomicExpr::AO__scoped_atomic_fetch_sub:
1166     case AtomicExpr::AO__atomic_xor_fetch:
1167     case AtomicExpr::AO__scoped_atomic_xor_fetch:
1168     case AtomicExpr::AO__atomic_fetch_xor:
1169     case AtomicExpr::AO__c11_atomic_fetch_xor:
1170     case AtomicExpr::AO__hip_atomic_fetch_xor:
1171     case AtomicExpr::AO__opencl_atomic_fetch_xor:
1172     case AtomicExpr::AO__scoped_atomic_fetch_xor:
1173     case AtomicExpr::AO__atomic_nand_fetch:
1174     case AtomicExpr::AO__atomic_fetch_nand:
1175     case AtomicExpr::AO__c11_atomic_fetch_nand:
1176     case AtomicExpr::AO__scoped_atomic_fetch_nand:
1177     case AtomicExpr::AO__scoped_atomic_nand_fetch:
1178     case AtomicExpr::AO__atomic_min_fetch:
1179     case AtomicExpr::AO__atomic_fetch_min:
1180     case AtomicExpr::AO__c11_atomic_fetch_min:
1181     case AtomicExpr::AO__hip_atomic_fetch_min:
1182     case AtomicExpr::AO__opencl_atomic_fetch_min:
1183     case AtomicExpr::AO__scoped_atomic_fetch_min:
1184     case AtomicExpr::AO__scoped_atomic_min_fetch:
1185     case AtomicExpr::AO__atomic_max_fetch:
1186     case AtomicExpr::AO__atomic_fetch_max:
1187     case AtomicExpr::AO__c11_atomic_fetch_max:
1188     case AtomicExpr::AO__hip_atomic_fetch_max:
1189     case AtomicExpr::AO__opencl_atomic_fetch_max:
1190     case AtomicExpr::AO__scoped_atomic_fetch_max:
1191     case AtomicExpr::AO__scoped_atomic_max_fetch:
1192       llvm_unreachable("Integral atomic operations always become atomicrmw!");
1193     }
1194 
1195     if (E->isOpenCL()) {
1196       LibCallName =
1197           std::string("__opencl") + StringRef(LibCallName).drop_front(1).str();
1198     }
1199     // By default, assume we return a value of the atomic type.
1200     if (!HaveRetTy) {
1201       // Value is returned through parameter before the order.
1202       RetTy = getContext().VoidTy;
1203       Args.add(RValue::get(
1204                    CastToGenericAddrSpace(Dest.emitRawPointer(*this), RetTy)),
1205                getContext().VoidPtrTy);
1206     }
1207     // Order is always the last parameter.
1208     Args.add(RValue::get(Order),
1209              getContext().IntTy);
1210     if (E->isOpenCL())
1211       Args.add(RValue::get(Scope), getContext().IntTy);
1212 
1213     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1214     // The value is returned directly from the libcall.
1215     if (E->isCmpXChg())
1216       return Res;
1217 
1218     if (RValTy->isVoidType())
1219       return RValue::get(nullptr);
1220 
1221     return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)),
1222                                RValTy, E->getExprLoc());
1223   }
1224 
1225   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1226                  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1227                  E->getOp() == AtomicExpr::AO__hip_atomic_store ||
1228                  E->getOp() == AtomicExpr::AO__atomic_store ||
1229                  E->getOp() == AtomicExpr::AO__atomic_store_n ||
1230                  E->getOp() == AtomicExpr::AO__scoped_atomic_store ||
1231                  E->getOp() == AtomicExpr::AO__scoped_atomic_store_n;
1232   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1233                 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1234                 E->getOp() == AtomicExpr::AO__hip_atomic_load ||
1235                 E->getOp() == AtomicExpr::AO__atomic_load ||
1236                 E->getOp() == AtomicExpr::AO__atomic_load_n ||
1237                 E->getOp() == AtomicExpr::AO__scoped_atomic_load ||
1238                 E->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1239 
1240   if (isa<llvm::ConstantInt>(Order)) {
1241     auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1242     // We should not ever get to a case where the ordering isn't a valid C ABI
1243     // value, but it's hard to enforce that in general.
1244     if (llvm::isValidAtomicOrderingCABI(ord))
1245       switch ((llvm::AtomicOrderingCABI)ord) {
1246       case llvm::AtomicOrderingCABI::relaxed:
1247         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1248                      llvm::AtomicOrdering::Monotonic, Scope);
1249         break;
1250       case llvm::AtomicOrderingCABI::consume:
1251       case llvm::AtomicOrderingCABI::acquire:
1252         if (IsStore)
1253           break; // Avoid crashing on code with undefined behavior
1254         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1255                      llvm::AtomicOrdering::Acquire, Scope);
1256         break;
1257       case llvm::AtomicOrderingCABI::release:
1258         if (IsLoad)
1259           break; // Avoid crashing on code with undefined behavior
1260         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1261                      llvm::AtomicOrdering::Release, Scope);
1262         break;
1263       case llvm::AtomicOrderingCABI::acq_rel:
1264         if (IsLoad || IsStore)
1265           break; // Avoid crashing on code with undefined behavior
1266         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1267                      llvm::AtomicOrdering::AcquireRelease, Scope);
1268         break;
1269       case llvm::AtomicOrderingCABI::seq_cst:
1270         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1271                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1272         break;
1273       }
1274     if (RValTy->isVoidType())
1275       return RValue::get(nullptr);
1276 
1277     return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)),
1278                                RValTy, E->getExprLoc());
1279   }
1280 
1281   // Long case, when Order isn't obviously constant.
1282 
1283   // Create all the relevant BB's
1284   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1285                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1286                    *SeqCstBB = nullptr;
1287   MonotonicBB = createBasicBlock("monotonic", CurFn);
1288   if (!IsStore)
1289     AcquireBB = createBasicBlock("acquire", CurFn);
1290   if (!IsLoad)
1291     ReleaseBB = createBasicBlock("release", CurFn);
1292   if (!IsLoad && !IsStore)
1293     AcqRelBB = createBasicBlock("acqrel", CurFn);
1294   SeqCstBB = createBasicBlock("seqcst", CurFn);
1295   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1296 
1297   // Create the switch for the split
1298   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1299   // doesn't matter unless someone is crazy enough to use something that
1300   // doesn't fold to a constant for the ordering.
1301   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1302   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1303 
1304   // Emit all the different atomics
1305   Builder.SetInsertPoint(MonotonicBB);
1306   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1307                llvm::AtomicOrdering::Monotonic, Scope);
1308   Builder.CreateBr(ContBB);
1309   if (!IsStore) {
1310     Builder.SetInsertPoint(AcquireBB);
1311     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1312                  llvm::AtomicOrdering::Acquire, Scope);
1313     Builder.CreateBr(ContBB);
1314     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1315                 AcquireBB);
1316     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1317                 AcquireBB);
1318   }
1319   if (!IsLoad) {
1320     Builder.SetInsertPoint(ReleaseBB);
1321     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1322                  llvm::AtomicOrdering::Release, Scope);
1323     Builder.CreateBr(ContBB);
1324     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1325                 ReleaseBB);
1326   }
1327   if (!IsLoad && !IsStore) {
1328     Builder.SetInsertPoint(AcqRelBB);
1329     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1330                  llvm::AtomicOrdering::AcquireRelease, Scope);
1331     Builder.CreateBr(ContBB);
1332     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1333                 AcqRelBB);
1334   }
1335   Builder.SetInsertPoint(SeqCstBB);
1336   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1337                llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1338   Builder.CreateBr(ContBB);
1339   SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1340               SeqCstBB);
1341 
1342   // Cleanup and return
1343   Builder.SetInsertPoint(ContBB);
1344   if (RValTy->isVoidType())
1345     return RValue::get(nullptr);
1346 
1347   assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1348   return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)),
1349                              RValTy, E->getExprLoc());
1350 }
1351 
castToAtomicIntPointer(Address addr) const1352 Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
1353   llvm::IntegerType *ty =
1354     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1355   return addr.withElementType(ty);
1356 }
1357 
convertToAtomicIntPointer(Address Addr) const1358 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1359   llvm::Type *Ty = Addr.getElementType();
1360   uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1361   if (SourceSizeInBits != AtomicSizeInBits) {
1362     Address Tmp = CreateTempAlloca();
1363     CGF.Builder.CreateMemCpy(Tmp, Addr,
1364                              std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1365     Addr = Tmp;
1366   }
1367 
1368   return castToAtomicIntPointer(Addr);
1369 }
1370 
convertAtomicTempToRValue(Address addr,AggValueSlot resultSlot,SourceLocation loc,bool asValue) const1371 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1372                                              AggValueSlot resultSlot,
1373                                              SourceLocation loc,
1374                                              bool asValue) const {
1375   if (LVal.isSimple()) {
1376     if (EvaluationKind == TEK_Aggregate)
1377       return resultSlot.asRValue();
1378 
1379     // Drill into the padding structure if we have one.
1380     if (hasPadding())
1381       addr = CGF.Builder.CreateStructGEP(addr, 0);
1382 
1383     // Otherwise, just convert the temporary to an r-value using the
1384     // normal conversion routine.
1385     return CGF.convertTempToRValue(addr, getValueType(), loc);
1386   }
1387   if (!asValue)
1388     // Get RValue from temp memory as atomic for non-simple lvalues
1389     return RValue::get(CGF.Builder.CreateLoad(addr));
1390   if (LVal.isBitField())
1391     return CGF.EmitLoadOfBitfieldLValue(
1392         LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1393                              LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1394   if (LVal.isVectorElt())
1395     return CGF.EmitLoadOfLValue(
1396         LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1397                               LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1398   assert(LVal.isExtVectorElt());
1399   return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1400       addr, LVal.getExtVectorElts(), LVal.getType(),
1401       LVal.getBaseInfo(), TBAAAccessInfo()));
1402 }
1403 
1404 /// Return true if \param ValTy is a type that should be casted to integer
1405 /// around the atomic memory operation. If \param CmpXchg is true, then the
1406 /// cast of a floating point type is made as that instruction can not have
1407 /// floating point operands.  TODO: Allow compare-and-exchange and FP - see
1408 /// comment in AtomicExpandPass.cpp.
shouldCastToInt(llvm::Type * ValTy,bool CmpXchg)1409 static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg) {
1410   if (ValTy->isFloatingPointTy())
1411     return ValTy->isX86_FP80Ty() || CmpXchg;
1412   return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
1413 }
1414 
ConvertToValueOrAtomic(llvm::Value * Val,AggValueSlot ResultSlot,SourceLocation Loc,bool AsValue,bool CmpXchg) const1415 RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,
1416                                           AggValueSlot ResultSlot,
1417                                           SourceLocation Loc, bool AsValue,
1418                                           bool CmpXchg) const {
1419   // Try not to in some easy cases.
1420   assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||
1421           Val->getType()->isIEEELikeFPTy()) &&
1422          "Expected integer, pointer or floating point value when converting "
1423          "result.");
1424   if (getEvaluationKind() == TEK_Scalar &&
1425       (((!LVal.isBitField() ||
1426          LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1427         !hasPadding()) ||
1428        !AsValue)) {
1429     auto *ValTy = AsValue
1430                       ? CGF.ConvertTypeForMem(ValueTy)
1431                       : getAtomicAddress().getElementType();
1432     if (!shouldCastToInt(ValTy, CmpXchg)) {
1433       assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&
1434              "Different integer types.");
1435       return RValue::get(CGF.EmitFromMemory(Val, ValueTy));
1436     }
1437     if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))
1438       return RValue::get(CGF.Builder.CreateBitCast(Val, ValTy));
1439   }
1440 
1441   // Create a temporary.  This needs to be big enough to hold the
1442   // atomic integer.
1443   Address Temp = Address::invalid();
1444   bool TempIsVolatile = false;
1445   if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1446     assert(!ResultSlot.isIgnored());
1447     Temp = ResultSlot.getAddress();
1448     TempIsVolatile = ResultSlot.isVolatile();
1449   } else {
1450     Temp = CreateTempAlloca();
1451   }
1452 
1453   // Slam the integer into the temporary.
1454   Address CastTemp = castToAtomicIntPointer(Temp);
1455   CGF.Builder.CreateStore(Val, CastTemp)->setVolatile(TempIsVolatile);
1456 
1457   return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1458 }
1459 
EmitAtomicLoadLibcall(llvm::Value * AddForLoaded,llvm::AtomicOrdering AO,bool)1460 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1461                                        llvm::AtomicOrdering AO, bool) {
1462   // void __atomic_load(size_t size, void *mem, void *return, int order);
1463   CallArgList Args;
1464   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1465   Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);
1466   Args.add(RValue::get(AddForLoaded), CGF.getContext().VoidPtrTy);
1467   Args.add(
1468       RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1469       CGF.getContext().IntTy);
1470   emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1471 }
1472 
EmitAtomicLoadOp(llvm::AtomicOrdering AO,bool IsVolatile,bool CmpXchg)1473 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1474                                           bool IsVolatile, bool CmpXchg) {
1475   // Okay, we're doing this natively.
1476   Address Addr = getAtomicAddress();
1477   if (shouldCastToInt(Addr.getElementType(), CmpXchg))
1478     Addr = castToAtomicIntPointer(Addr);
1479   llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1480   Load->setAtomic(AO);
1481 
1482   // Other decoration.
1483   if (IsVolatile)
1484     Load->setVolatile(true);
1485   CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1486   return Load;
1487 }
1488 
1489 /// An LValue is a candidate for having its loads and stores be made atomic if
1490 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1491 /// performing such an operation can be performed without a libcall.
LValueIsSuitableForInlineAtomic(LValue LV)1492 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1493   if (!CGM.getLangOpts().MSVolatile) return false;
1494   AtomicInfo AI(*this, LV);
1495   bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1496   // An atomic is inline if we don't need to use a libcall.
1497   bool AtomicIsInline = !AI.shouldUseLibcall();
1498   // MSVC doesn't seem to do this for types wider than a pointer.
1499   if (getContext().getTypeSize(LV.getType()) >
1500       getContext().getTypeSize(getContext().getIntPtrType()))
1501     return false;
1502   return IsVolatile && AtomicIsInline;
1503 }
1504 
EmitAtomicLoad(LValue LV,SourceLocation SL,AggValueSlot Slot)1505 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1506                                        AggValueSlot Slot) {
1507   llvm::AtomicOrdering AO;
1508   bool IsVolatile = LV.isVolatileQualified();
1509   if (LV.getType()->isAtomicType()) {
1510     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1511   } else {
1512     AO = llvm::AtomicOrdering::Acquire;
1513     IsVolatile = true;
1514   }
1515   return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1516 }
1517 
EmitAtomicLoad(AggValueSlot ResultSlot,SourceLocation Loc,bool AsValue,llvm::AtomicOrdering AO,bool IsVolatile)1518 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1519                                   bool AsValue, llvm::AtomicOrdering AO,
1520                                   bool IsVolatile) {
1521   // Check whether we should use a library call.
1522   if (shouldUseLibcall()) {
1523     Address TempAddr = Address::invalid();
1524     if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1525       assert(getEvaluationKind() == TEK_Aggregate);
1526       TempAddr = ResultSlot.getAddress();
1527     } else
1528       TempAddr = CreateTempAlloca();
1529 
1530     EmitAtomicLoadLibcall(TempAddr.emitRawPointer(CGF), AO, IsVolatile);
1531 
1532     // Okay, turn that back into the original value or whole atomic (for
1533     // non-simple lvalues) type.
1534     return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1535   }
1536 
1537   // Okay, we're doing this natively.
1538   auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1539 
1540   // If we're ignoring an aggregate return, don't do anything.
1541   if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1542     return RValue::getAggregate(Address::invalid(), false);
1543 
1544   // Okay, turn that back into the original value or atomic (for non-simple
1545   // lvalues) type.
1546   return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1547 }
1548 
1549 /// Emit a load from an l-value of atomic type.  Note that the r-value
1550 /// we produce is an r-value of the atomic *value* type.
EmitAtomicLoad(LValue src,SourceLocation loc,llvm::AtomicOrdering AO,bool IsVolatile,AggValueSlot resultSlot)1551 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1552                                        llvm::AtomicOrdering AO, bool IsVolatile,
1553                                        AggValueSlot resultSlot) {
1554   AtomicInfo Atomics(*this, src);
1555   return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1556                                 IsVolatile);
1557 }
1558 
1559 /// Copy an r-value into memory as part of storing to an atomic type.
1560 /// This needs to create a bit-pattern suitable for atomic operations.
emitCopyIntoMemory(RValue rvalue) const1561 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1562   assert(LVal.isSimple());
1563   // If we have an r-value, the rvalue should be of the atomic type,
1564   // which means that the caller is responsible for having zeroed
1565   // any padding.  Just do an aggregate copy of that type.
1566   if (rvalue.isAggregate()) {
1567     LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1568     LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1569                                     getAtomicType());
1570     bool IsVolatile = rvalue.isVolatileQualified() ||
1571                       LVal.isVolatileQualified();
1572     CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1573                           AggValueSlot::DoesNotOverlap, IsVolatile);
1574     return;
1575   }
1576 
1577   // Okay, otherwise we're copying stuff.
1578 
1579   // Zero out the buffer if necessary.
1580   emitMemSetZeroIfNecessary();
1581 
1582   // Drill past the padding if present.
1583   LValue TempLVal = projectValue();
1584 
1585   // Okay, store the rvalue in.
1586   if (rvalue.isScalar()) {
1587     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1588   } else {
1589     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1590   }
1591 }
1592 
1593 
1594 /// Materialize an r-value into memory for the purposes of storing it
1595 /// to an atomic type.
materializeRValue(RValue rvalue) const1596 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1597   // Aggregate r-values are already in memory, and EmitAtomicStore
1598   // requires them to be values of the atomic type.
1599   if (rvalue.isAggregate())
1600     return rvalue.getAggregateAddress();
1601 
1602   // Otherwise, make a temporary and materialize into it.
1603   LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1604   AtomicInfo Atomics(CGF, TempLV);
1605   Atomics.emitCopyIntoMemory(rvalue);
1606   return TempLV.getAddress();
1607 }
1608 
getScalarRValValueOrNull(RValue RVal) const1609 llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal) const {
1610   if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple()))
1611     return RVal.getScalarVal();
1612   return nullptr;
1613 }
1614 
convertRValueToInt(RValue RVal,bool CmpXchg) const1615 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const {
1616   // If we've got a scalar value of the right size, try to avoid going
1617   // through memory. Floats get casted if needed by AtomicExpandPass.
1618   if (llvm::Value *Value = getScalarRValValueOrNull(RVal)) {
1619     if (!shouldCastToInt(Value->getType(), CmpXchg))
1620       return CGF.EmitToMemory(Value, ValueTy);
1621     else {
1622       llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1623           CGF.getLLVMContext(),
1624           LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1625       if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1626         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1627     }
1628   }
1629   // Otherwise, we need to go through memory.
1630   // Put the r-value in memory.
1631   Address Addr = materializeRValue(RVal);
1632 
1633   // Cast the temporary to the atomic int type and pull a value out.
1634   Addr = castToAtomicIntPointer(Addr);
1635   return CGF.Builder.CreateLoad(Addr);
1636 }
1637 
EmitAtomicCompareExchangeOp(llvm::Value * ExpectedVal,llvm::Value * DesiredVal,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak)1638 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1639     llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1640     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1641   // Do the atomic store.
1642   Address Addr = getAtomicAddressAsAtomicIntPointer();
1643   auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
1644                                                Success, Failure);
1645   // Other decoration.
1646   Inst->setVolatile(LVal.isVolatileQualified());
1647   Inst->setWeak(IsWeak);
1648 
1649   // Okay, turn that back into the original value type.
1650   auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1651   auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1652   return std::make_pair(PreviousVal, SuccessFailureVal);
1653 }
1654 
1655 llvm::Value *
EmitAtomicCompareExchangeLibcall(llvm::Value * ExpectedAddr,llvm::Value * DesiredAddr,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure)1656 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1657                                              llvm::Value *DesiredAddr,
1658                                              llvm::AtomicOrdering Success,
1659                                              llvm::AtomicOrdering Failure) {
1660   // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1661   // void *desired, int success, int failure);
1662   CallArgList Args;
1663   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1664   Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);
1665   Args.add(RValue::get(ExpectedAddr), CGF.getContext().VoidPtrTy);
1666   Args.add(RValue::get(DesiredAddr), CGF.getContext().VoidPtrTy);
1667   Args.add(RValue::get(
1668                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1669            CGF.getContext().IntTy);
1670   Args.add(RValue::get(
1671                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1672            CGF.getContext().IntTy);
1673   auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1674                                               CGF.getContext().BoolTy, Args);
1675 
1676   return SuccessFailureRVal.getScalarVal();
1677 }
1678 
EmitAtomicCompareExchange(RValue Expected,RValue Desired,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak)1679 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1680     RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1681     llvm::AtomicOrdering Failure, bool IsWeak) {
1682   // Check whether we should use a library call.
1683   if (shouldUseLibcall()) {
1684     // Produce a source address.
1685     Address ExpectedAddr = materializeRValue(Expected);
1686     llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
1687     llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);
1688     auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,
1689                                                  Success, Failure);
1690     return std::make_pair(
1691         convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1692                                   SourceLocation(), /*AsValue=*/false),
1693         Res);
1694   }
1695 
1696   // If we've got a scalar value of the right size, try to avoid going
1697   // through memory.
1698   auto *ExpectedVal = convertRValueToInt(Expected, /*CmpXchg=*/true);
1699   auto *DesiredVal = convertRValueToInt(Desired, /*CmpXchg=*/true);
1700   auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1701                                          Failure, IsWeak);
1702   return std::make_pair(
1703       ConvertToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1704                              SourceLocation(), /*AsValue=*/false,
1705                              /*CmpXchg=*/true),
1706       Res.second);
1707 }
1708 
1709 static void
EmitAtomicUpdateValue(CodeGenFunction & CGF,AtomicInfo & Atomics,RValue OldRVal,const llvm::function_ref<RValue (RValue)> & UpdateOp,Address DesiredAddr)1710 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1711                       const llvm::function_ref<RValue(RValue)> &UpdateOp,
1712                       Address DesiredAddr) {
1713   RValue UpRVal;
1714   LValue AtomicLVal = Atomics.getAtomicLValue();
1715   LValue DesiredLVal;
1716   if (AtomicLVal.isSimple()) {
1717     UpRVal = OldRVal;
1718     DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1719   } else {
1720     // Build new lvalue for temp address.
1721     Address Ptr = Atomics.materializeRValue(OldRVal);
1722     LValue UpdateLVal;
1723     if (AtomicLVal.isBitField()) {
1724       UpdateLVal =
1725           LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1726                                AtomicLVal.getType(),
1727                                AtomicLVal.getBaseInfo(),
1728                                AtomicLVal.getTBAAInfo());
1729       DesiredLVal =
1730           LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1731                                AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1732                                AtomicLVal.getTBAAInfo());
1733     } else if (AtomicLVal.isVectorElt()) {
1734       UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1735                                          AtomicLVal.getType(),
1736                                          AtomicLVal.getBaseInfo(),
1737                                          AtomicLVal.getTBAAInfo());
1738       DesiredLVal = LValue::MakeVectorElt(
1739           DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1740           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1741     } else {
1742       assert(AtomicLVal.isExtVectorElt());
1743       UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1744                                             AtomicLVal.getType(),
1745                                             AtomicLVal.getBaseInfo(),
1746                                             AtomicLVal.getTBAAInfo());
1747       DesiredLVal = LValue::MakeExtVectorElt(
1748           DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1749           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1750     }
1751     UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1752   }
1753   // Store new value in the corresponding memory area.
1754   RValue NewRVal = UpdateOp(UpRVal);
1755   if (NewRVal.isScalar()) {
1756     CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1757   } else {
1758     assert(NewRVal.isComplex());
1759     CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1760                            /*isInit=*/false);
1761   }
1762 }
1763 
EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)1764 void AtomicInfo::EmitAtomicUpdateLibcall(
1765     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1766     bool IsVolatile) {
1767   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1768 
1769   Address ExpectedAddr = CreateTempAlloca();
1770 
1771   EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);
1772   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1773   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1774   CGF.EmitBlock(ContBB);
1775   Address DesiredAddr = CreateTempAlloca();
1776   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1777       requiresMemSetZero(getAtomicAddress().getElementType())) {
1778     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1779     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1780   }
1781   auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1782                                            AggValueSlot::ignored(),
1783                                            SourceLocation(), /*AsValue=*/false);
1784   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1785   llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
1786   llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);
1787   auto *Res =
1788       EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1789   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1790   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1791 }
1792 
EmitAtomicUpdateOp(llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)1793 void AtomicInfo::EmitAtomicUpdateOp(
1794     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1795     bool IsVolatile) {
1796   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1797 
1798   // Do the atomic load.
1799   auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true);
1800   // For non-simple lvalues perform compare-and-swap procedure.
1801   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1802   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1803   auto *CurBB = CGF.Builder.GetInsertBlock();
1804   CGF.EmitBlock(ContBB);
1805   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1806                                              /*NumReservedValues=*/2);
1807   PHI->addIncoming(OldVal, CurBB);
1808   Address NewAtomicAddr = CreateTempAlloca();
1809   Address NewAtomicIntAddr =
1810       shouldCastToInt(NewAtomicAddr.getElementType(), /*CmpXchg=*/true)
1811           ? castToAtomicIntPointer(NewAtomicAddr)
1812           : NewAtomicAddr;
1813 
1814   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1815       requiresMemSetZero(getAtomicAddress().getElementType())) {
1816     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1817   }
1818   auto OldRVal = ConvertToValueOrAtomic(PHI, AggValueSlot::ignored(),
1819                                         SourceLocation(), /*AsValue=*/false,
1820                                         /*CmpXchg=*/true);
1821   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1822   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1823   // Try to write new value using cmpxchg operation.
1824   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1825   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1826   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1827   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1828 }
1829 
EmitAtomicUpdateValue(CodeGenFunction & CGF,AtomicInfo & Atomics,RValue UpdateRVal,Address DesiredAddr)1830 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1831                                   RValue UpdateRVal, Address DesiredAddr) {
1832   LValue AtomicLVal = Atomics.getAtomicLValue();
1833   LValue DesiredLVal;
1834   // Build new lvalue for temp address.
1835   if (AtomicLVal.isBitField()) {
1836     DesiredLVal =
1837         LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1838                              AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1839                              AtomicLVal.getTBAAInfo());
1840   } else if (AtomicLVal.isVectorElt()) {
1841     DesiredLVal =
1842         LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1843                               AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1844                               AtomicLVal.getTBAAInfo());
1845   } else {
1846     assert(AtomicLVal.isExtVectorElt());
1847     DesiredLVal = LValue::MakeExtVectorElt(
1848         DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1849         AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1850   }
1851   // Store new value in the corresponding memory area.
1852   assert(UpdateRVal.isScalar());
1853   CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1854 }
1855 
EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,RValue UpdateRVal,bool IsVolatile)1856 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1857                                          RValue UpdateRVal, bool IsVolatile) {
1858   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1859 
1860   Address ExpectedAddr = CreateTempAlloca();
1861 
1862   EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);
1863   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1864   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1865   CGF.EmitBlock(ContBB);
1866   Address DesiredAddr = CreateTempAlloca();
1867   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1868       requiresMemSetZero(getAtomicAddress().getElementType())) {
1869     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1870     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1871   }
1872   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1873   llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
1874   llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);
1875   auto *Res =
1876       EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1877   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1878   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1879 }
1880 
EmitAtomicUpdateOp(llvm::AtomicOrdering AO,RValue UpdateRVal,bool IsVolatile)1881 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1882                                     bool IsVolatile) {
1883   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1884 
1885   // Do the atomic load.
1886   auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true);
1887   // For non-simple lvalues perform compare-and-swap procedure.
1888   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1889   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1890   auto *CurBB = CGF.Builder.GetInsertBlock();
1891   CGF.EmitBlock(ContBB);
1892   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1893                                              /*NumReservedValues=*/2);
1894   PHI->addIncoming(OldVal, CurBB);
1895   Address NewAtomicAddr = CreateTempAlloca();
1896   Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
1897   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1898       requiresMemSetZero(getAtomicAddress().getElementType())) {
1899     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1900   }
1901   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1902   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1903   // Try to write new value using cmpxchg operation.
1904   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1905   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1906   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1907   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1908 }
1909 
EmitAtomicUpdate(llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)1910 void AtomicInfo::EmitAtomicUpdate(
1911     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1912     bool IsVolatile) {
1913   if (shouldUseLibcall()) {
1914     EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1915   } else {
1916     EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1917   }
1918 }
1919 
EmitAtomicUpdate(llvm::AtomicOrdering AO,RValue UpdateRVal,bool IsVolatile)1920 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1921                                   bool IsVolatile) {
1922   if (shouldUseLibcall()) {
1923     EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1924   } else {
1925     EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1926   }
1927 }
1928 
EmitAtomicStore(RValue rvalue,LValue lvalue,bool isInit)1929 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1930                                       bool isInit) {
1931   bool IsVolatile = lvalue.isVolatileQualified();
1932   llvm::AtomicOrdering AO;
1933   if (lvalue.getType()->isAtomicType()) {
1934     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1935   } else {
1936     AO = llvm::AtomicOrdering::Release;
1937     IsVolatile = true;
1938   }
1939   return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1940 }
1941 
1942 /// Emit a store to an l-value of atomic type.
1943 ///
1944 /// Note that the r-value is expected to be an r-value *of the atomic
1945 /// type*; this means that for aggregate r-values, it should include
1946 /// storage for any padding that was necessary.
EmitAtomicStore(RValue rvalue,LValue dest,llvm::AtomicOrdering AO,bool IsVolatile,bool isInit)1947 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1948                                       llvm::AtomicOrdering AO, bool IsVolatile,
1949                                       bool isInit) {
1950   // If this is an aggregate r-value, it should agree in type except
1951   // maybe for address-space qualification.
1952   assert(!rvalue.isAggregate() ||
1953          rvalue.getAggregateAddress().getElementType() ==
1954              dest.getAddress().getElementType());
1955 
1956   AtomicInfo atomics(*this, dest);
1957   LValue LVal = atomics.getAtomicLValue();
1958 
1959   // If this is an initialization, just put the value there normally.
1960   if (LVal.isSimple()) {
1961     if (isInit) {
1962       atomics.emitCopyIntoMemory(rvalue);
1963       return;
1964     }
1965 
1966     // Check whether we should use a library call.
1967     if (atomics.shouldUseLibcall()) {
1968       // Produce a source address.
1969       Address srcAddr = atomics.materializeRValue(rvalue);
1970 
1971       // void __atomic_store(size_t size, void *mem, void *val, int order)
1972       CallArgList args;
1973       args.add(RValue::get(atomics.getAtomicSizeValue()),
1974                getContext().getSizeType());
1975       args.add(RValue::get(atomics.getAtomicPointer()), getContext().VoidPtrTy);
1976       args.add(RValue::get(srcAddr.emitRawPointer(*this)),
1977                getContext().VoidPtrTy);
1978       args.add(
1979           RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
1980           getContext().IntTy);
1981       emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1982       return;
1983     }
1984 
1985     // Okay, we're doing this natively.
1986     llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);
1987 
1988     // Do the atomic store.
1989     Address Addr = atomics.getAtomicAddress();
1990     if (llvm::Value *Value = atomics.getScalarRValValueOrNull(rvalue))
1991       if (shouldCastToInt(Value->getType(), /*CmpXchg=*/false)) {
1992         Addr = atomics.castToAtomicIntPointer(Addr);
1993         ValToStore = Builder.CreateIntCast(ValToStore, Addr.getElementType(),
1994                                            /*isSigned=*/false);
1995       }
1996     llvm::StoreInst *store = Builder.CreateStore(ValToStore, Addr);
1997 
1998     if (AO == llvm::AtomicOrdering::Acquire)
1999       AO = llvm::AtomicOrdering::Monotonic;
2000     else if (AO == llvm::AtomicOrdering::AcquireRelease)
2001       AO = llvm::AtomicOrdering::Release;
2002     // Initializations don't need to be atomic.
2003     if (!isInit)
2004       store->setAtomic(AO);
2005 
2006     // Other decoration.
2007     if (IsVolatile)
2008       store->setVolatile(true);
2009     CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
2010     return;
2011   }
2012 
2013   // Emit simple atomic update operation.
2014   atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2015 }
2016 
2017 /// Emit a compare-and-exchange op for atomic type.
2018 ///
EmitAtomicCompareExchange(LValue Obj,RValue Expected,RValue Desired,SourceLocation Loc,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak,AggValueSlot Slot)2019 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2020     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2021     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2022     AggValueSlot Slot) {
2023   // If this is an aggregate r-value, it should agree in type except
2024   // maybe for address-space qualification.
2025   assert(!Expected.isAggregate() ||
2026          Expected.getAggregateAddress().getElementType() ==
2027              Obj.getAddress().getElementType());
2028   assert(!Desired.isAggregate() ||
2029          Desired.getAggregateAddress().getElementType() ==
2030              Obj.getAddress().getElementType());
2031   AtomicInfo Atomics(*this, Obj);
2032 
2033   return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2034                                            IsWeak);
2035 }
2036 
EmitAtomicUpdate(LValue LVal,llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)2037 void CodeGenFunction::EmitAtomicUpdate(
2038     LValue LVal, llvm::AtomicOrdering AO,
2039     const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2040   AtomicInfo Atomics(*this, LVal);
2041   Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2042 }
2043 
EmitAtomicInit(Expr * init,LValue dest)2044 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
2045   AtomicInfo atomics(*this, dest);
2046 
2047   switch (atomics.getEvaluationKind()) {
2048   case TEK_Scalar: {
2049     llvm::Value *value = EmitScalarExpr(init);
2050     atomics.emitCopyIntoMemory(RValue::get(value));
2051     return;
2052   }
2053 
2054   case TEK_Complex: {
2055     ComplexPairTy value = EmitComplexExpr(init);
2056     atomics.emitCopyIntoMemory(RValue::getComplex(value));
2057     return;
2058   }
2059 
2060   case TEK_Aggregate: {
2061     // Fix up the destination if the initializer isn't an expression
2062     // of atomic type.
2063     bool Zeroed = false;
2064     if (!init->getType()->isAtomicType()) {
2065       Zeroed = atomics.emitMemSetZeroIfNecessary();
2066       dest = atomics.projectValue();
2067     }
2068 
2069     // Evaluate the expression directly into the destination.
2070     AggValueSlot slot = AggValueSlot::forLValue(
2071         dest, AggValueSlot::IsNotDestructed,
2072         AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
2073         AggValueSlot::DoesNotOverlap,
2074         Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
2075 
2076     EmitAggExpr(init, slot);
2077     return;
2078   }
2079   }
2080   llvm_unreachable("bad evaluation kind");
2081 }
2082