xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp (revision c4b244af42a1f20937939a824b753a92c9c0a46f)
1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Expr nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "ABIInfoImpl.h"
14 #include "CGCUDARuntime.h"
15 #include "CGCXXABI.h"
16 #include "CGCall.h"
17 #include "CGCleanup.h"
18 #include "CGDebugInfo.h"
19 #include "CGObjCRuntime.h"
20 #include "CGOpenMPRuntime.h"
21 #include "CGRecordLayout.h"
22 #include "CodeGenFunction.h"
23 #include "CodeGenModule.h"
24 #include "CodeGenPGO.h"
25 #include "ConstantEmitter.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/ASTContext.h"
28 #include "clang/AST/ASTLambda.h"
29 #include "clang/AST/Attr.h"
30 #include "clang/AST/DeclObjC.h"
31 #include "clang/AST/NSAPI.h"
32 #include "clang/AST/StmtVisitor.h"
33 #include "clang/Basic/Builtins.h"
34 #include "clang/Basic/CodeGenOptions.h"
35 #include "clang/Basic/Module.h"
36 #include "clang/Basic/SourceManager.h"
37 #include "llvm/ADT/STLExtras.h"
38 #include "llvm/ADT/ScopeExit.h"
39 #include "llvm/ADT/StringExtras.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/LLVMContext.h"
43 #include "llvm/IR/MDBuilder.h"
44 #include "llvm/IR/MatrixBuilder.h"
45 #include "llvm/Support/ConvertUTF.h"
46 #include "llvm/Support/Endian.h"
47 #include "llvm/Support/MathExtras.h"
48 #include "llvm/Support/Path.h"
49 #include "llvm/Support/xxhash.h"
50 #include "llvm/Transforms/Utils/SanitizerStats.h"
51 
52 #include <numeric>
53 #include <optional>
54 #include <string>
55 
56 using namespace clang;
57 using namespace CodeGen;
58 
59 namespace clang {
60 // TODO: consider deprecating ClSanitizeGuardChecks; functionality is subsumed
61 //       by -fsanitize-skip-hot-cutoff
62 llvm::cl::opt<bool> ClSanitizeGuardChecks(
63     "ubsan-guard-checks", llvm::cl::Optional,
64     llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
65 
66 } // namespace clang
67 
68 //===--------------------------------------------------------------------===//
69 //                        Defines for metadata
70 //===--------------------------------------------------------------------===//
71 
72 // Those values are crucial to be the SAME as in ubsan runtime library.
73 enum VariableTypeDescriptorKind : uint16_t {
74   /// An integer type.
75   TK_Integer = 0x0000,
76   /// A floating-point type.
77   TK_Float = 0x0001,
78   /// An _BitInt(N) type.
79   TK_BitInt = 0x0002,
80   /// Any other type. The value representation is unspecified.
81   TK_Unknown = 0xffff
82 };
83 
84 //===--------------------------------------------------------------------===//
85 //                        Miscellaneous Helper Methods
86 //===--------------------------------------------------------------------===//
87 
88 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
89 /// block.
90 RawAddress
91 CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
92                                              const Twine &Name,
93                                              llvm::Value *ArraySize) {
94   auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
95   Alloca->setAlignment(Align.getAsAlign());
96   return RawAddress(Alloca, Ty, Align, KnownNonNull);
97 }
98 
99 RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, LangAS DestLangAS,
100                                              CharUnits Align, const Twine &Name,
101                                              llvm::Value *ArraySize,
102                                              RawAddress *AllocaAddr) {
103   RawAddress Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
104   if (AllocaAddr)
105     *AllocaAddr = Alloca;
106   llvm::Value *V = Alloca.getPointer();
107   // Alloca always returns a pointer in alloca address space, which may
108   // be different from the type defined by the language. For example,
109   // in C++ the auto variables are in the default address space. Therefore
110   // cast alloca to the default address space when necessary.
111 
112   unsigned DestAddrSpace = getContext().getTargetAddressSpace(DestLangAS);
113   if (DestAddrSpace != Alloca.getAddressSpace()) {
114     llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
115     // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
116     // otherwise alloca is inserted at the current insertion point of the
117     // builder.
118     if (!ArraySize)
119       Builder.SetInsertPoint(getPostAllocaInsertPoint());
120     V = getTargetHooks().performAddrSpaceCast(
121         *this, V, getASTAllocaAddressSpace(), Builder.getPtrTy(DestAddrSpace),
122         /*IsNonNull=*/true);
123   }
124 
125   return RawAddress(V, Ty, Align, KnownNonNull);
126 }
127 
128 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
129 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
130 /// insertion point of the builder.
131 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
132                                                     const Twine &Name,
133                                                     llvm::Value *ArraySize) {
134   llvm::AllocaInst *Alloca;
135   if (ArraySize)
136     Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
137   else
138     Alloca =
139         new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
140                              ArraySize, Name, AllocaInsertPt->getIterator());
141   if (SanOpts.Mask & SanitizerKind::Address) {
142     Alloca->addAnnotationMetadata({"alloca_name_altered", Name.str()});
143   }
144   if (Allocas) {
145     Allocas->Add(Alloca);
146   }
147   return Alloca;
148 }
149 
150 /// CreateDefaultAlignTempAlloca - This creates an alloca with the
151 /// default alignment of the corresponding LLVM type, which is *not*
152 /// guaranteed to be related in any way to the expected alignment of
153 /// an AST type that might have been lowered to Ty.
154 RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
155                                                          const Twine &Name) {
156   CharUnits Align =
157       CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
158   return CreateTempAlloca(Ty, Align, Name);
159 }
160 
161 RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
162   CharUnits Align = getContext().getTypeAlignInChars(Ty);
163   return CreateTempAlloca(ConvertType(Ty), Align, Name);
164 }
165 
166 RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
167                                           RawAddress *Alloca) {
168   // FIXME: Should we prefer the preferred type alignment here?
169   return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
170 }
171 
172 RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
173                                           const Twine &Name,
174                                           RawAddress *Alloca) {
175   RawAddress Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
176                                        /*ArraySize=*/nullptr, Alloca);
177 
178   if (Ty->isConstantMatrixType()) {
179     auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
180     auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
181                                                 ArrayTy->getNumElements());
182 
183     Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
184                      KnownNonNull);
185   }
186   return Result;
187 }
188 
189 RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
190                                                      CharUnits Align,
191                                                      const Twine &Name) {
192   return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
193 }
194 
195 RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
196                                                      const Twine &Name) {
197   return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
198                                   Name);
199 }
200 
201 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
202 /// expression and compare the result against zero, returning an Int1Ty value.
203 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
204   PGO->setCurrentStmt(E);
205   if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
206     llvm::Value *MemPtr = EmitScalarExpr(E);
207     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
208   }
209 
210   QualType BoolTy = getContext().BoolTy;
211   SourceLocation Loc = E->getExprLoc();
212   CGFPOptionsRAII FPOptsRAII(*this, E);
213   if (!E->getType()->isAnyComplexType())
214     return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
215 
216   return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,
217                                        Loc);
218 }
219 
220 /// EmitIgnoredExpr - Emit code to compute the specified expression,
221 /// ignoring the result.
222 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
223   if (E->isPRValue())
224     return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
225 
226   // if this is a bitfield-resulting conditional operator, we can special case
227   // emit this. The normal 'EmitLValue' version of this is particularly
228   // difficult to codegen for, since creating a single "LValue" for two
229   // different sized arguments here is not particularly doable.
230   if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
231           E->IgnoreParenNoopCasts(getContext()))) {
232     if (CondOp->getObjectKind() == OK_BitField)
233       return EmitIgnoredConditionalOperator(CondOp);
234   }
235 
236   // Just emit it as an l-value and drop the result.
237   EmitLValue(E);
238 }
239 
240 /// EmitAnyExpr - Emit code to compute the specified expression which
241 /// can have any type.  The result is returned as an RValue struct.
242 /// If this is an aggregate expression, AggSlot indicates where the
243 /// result should be returned.
244 RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
245                                     AggValueSlot aggSlot,
246                                     bool ignoreResult) {
247   switch (getEvaluationKind(E->getType())) {
248   case TEK_Scalar:
249     return RValue::get(EmitScalarExpr(E, ignoreResult));
250   case TEK_Complex:
251     return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
252   case TEK_Aggregate:
253     if (!ignoreResult && aggSlot.isIgnored())
254       aggSlot = CreateAggTemp(E->getType(), "agg-temp");
255     EmitAggExpr(E, aggSlot);
256     return aggSlot.asRValue();
257   }
258   llvm_unreachable("bad evaluation kind");
259 }
260 
261 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
262 /// always be accessible even if no aggregate location is provided.
263 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
264   AggValueSlot AggSlot = AggValueSlot::ignored();
265 
266   if (hasAggregateEvaluationKind(E->getType()))
267     AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
268   return EmitAnyExpr(E, AggSlot);
269 }
270 
271 /// EmitAnyExprToMem - Evaluate an expression into a given memory
272 /// location.
273 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
274                                        Address Location,
275                                        Qualifiers Quals,
276                                        bool IsInit) {
277   // FIXME: This function should take an LValue as an argument.
278   switch (getEvaluationKind(E->getType())) {
279   case TEK_Complex:
280     EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()),
281                               /*isInit*/ false);
282     return;
283 
284   case TEK_Aggregate: {
285     EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
286                                          AggValueSlot::IsDestructed_t(IsInit),
287                                          AggValueSlot::DoesNotNeedGCBarriers,
288                                          AggValueSlot::IsAliased_t(!IsInit),
289                                          AggValueSlot::MayOverlap));
290     return;
291   }
292 
293   case TEK_Scalar: {
294     RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
295     LValue LV = MakeAddrLValue(Location, E->getType());
296     EmitStoreThroughLValue(RV, LV);
297     return;
298   }
299   }
300   llvm_unreachable("bad evaluation kind");
301 }
302 
303 void CodeGenFunction::EmitInitializationToLValue(
304     const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) {
305   QualType Type = LV.getType();
306   switch (getEvaluationKind(Type)) {
307   case TEK_Complex:
308     EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
309     return;
310   case TEK_Aggregate:
311     EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
312                                            AggValueSlot::DoesNotNeedGCBarriers,
313                                            AggValueSlot::IsNotAliased,
314                                            AggValueSlot::MayOverlap, IsZeroed));
315     return;
316   case TEK_Scalar:
317     if (LV.isSimple())
318       EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
319     else
320       EmitStoreThroughLValue(RValue::get(EmitScalarExpr(E)), LV);
321     return;
322   }
323   llvm_unreachable("bad evaluation kind");
324 }
325 
326 static void
327 pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
328                      const Expr *E, Address ReferenceTemporary) {
329   // Objective-C++ ARC:
330   //   If we are binding a reference to a temporary that has ownership, we
331   //   need to perform retain/release operations on the temporary.
332   //
333   // FIXME: This should be looking at E, not M.
334   if (auto Lifetime = M->getType().getObjCLifetime()) {
335     switch (Lifetime) {
336     case Qualifiers::OCL_None:
337     case Qualifiers::OCL_ExplicitNone:
338       // Carry on to normal cleanup handling.
339       break;
340 
341     case Qualifiers::OCL_Autoreleasing:
342       // Nothing to do; cleaned up by an autorelease pool.
343       return;
344 
345     case Qualifiers::OCL_Strong:
346     case Qualifiers::OCL_Weak:
347       switch (StorageDuration Duration = M->getStorageDuration()) {
348       case SD_Static:
349         // Note: we intentionally do not register a cleanup to release
350         // the object on program termination.
351         return;
352 
353       case SD_Thread:
354         // FIXME: We should probably register a cleanup in this case.
355         return;
356 
357       case SD_Automatic:
358       case SD_FullExpression:
359         CodeGenFunction::Destroyer *Destroy;
360         CleanupKind CleanupKind;
361         if (Lifetime == Qualifiers::OCL_Strong) {
362           const ValueDecl *VD = M->getExtendingDecl();
363           bool Precise = isa_and_nonnull<VarDecl>(VD) &&
364                          VD->hasAttr<ObjCPreciseLifetimeAttr>();
365           CleanupKind = CGF.getARCCleanupKind();
366           Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
367                             : &CodeGenFunction::destroyARCStrongImprecise;
368         } else {
369           // __weak objects always get EH cleanups; otherwise, exceptions
370           // could cause really nasty crashes instead of mere leaks.
371           CleanupKind = NormalAndEHCleanup;
372           Destroy = &CodeGenFunction::destroyARCWeak;
373         }
374         if (Duration == SD_FullExpression)
375           CGF.pushDestroy(CleanupKind, ReferenceTemporary,
376                           M->getType(), *Destroy,
377                           CleanupKind & EHCleanup);
378         else
379           CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
380                                           M->getType(),
381                                           *Destroy, CleanupKind & EHCleanup);
382         return;
383 
384       case SD_Dynamic:
385         llvm_unreachable("temporary cannot have dynamic storage duration");
386       }
387       llvm_unreachable("unknown storage duration");
388     }
389   }
390 
391   QualType::DestructionKind DK = E->getType().isDestructedType();
392   if (DK != QualType::DK_none) {
393     switch (M->getStorageDuration()) {
394     case SD_Static:
395     case SD_Thread: {
396       CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
397       if (const RecordType *RT =
398               E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
399         // Get the destructor for the reference temporary.
400         if (auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl());
401             ClassDecl && !ClassDecl->hasTrivialDestructor())
402           ReferenceTemporaryDtor = ClassDecl->getDestructor();
403       }
404 
405       if (!ReferenceTemporaryDtor)
406         return;
407 
408       llvm::FunctionCallee CleanupFn;
409       llvm::Constant *CleanupArg;
410       if (E->getType()->isArrayType()) {
411         CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
412             ReferenceTemporary, E->getType(), CodeGenFunction::destroyCXXObject,
413             CGF.getLangOpts().Exceptions,
414             dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
415         CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
416       } else {
417         CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
418             GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
419         CleanupArg =
420             cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
421       }
422       CGF.CGM.getCXXABI().registerGlobalDtor(
423           CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
424     } break;
425     case SD_FullExpression:
426       CGF.pushDestroy(DK, ReferenceTemporary, E->getType());
427       break;
428     case SD_Automatic:
429       CGF.pushLifetimeExtendedDestroy(DK, ReferenceTemporary, E->getType());
430       break;
431     case SD_Dynamic:
432       llvm_unreachable("temporary cannot have dynamic storage duration");
433     }
434   }
435 }
436 
437 static RawAddress createReferenceTemporary(CodeGenFunction &CGF,
438                                            const MaterializeTemporaryExpr *M,
439                                            const Expr *Inner,
440                                            RawAddress *Alloca = nullptr) {
441   auto &TCG = CGF.getTargetHooks();
442   switch (M->getStorageDuration()) {
443   case SD_FullExpression:
444   case SD_Automatic: {
445     // If we have a constant temporary array or record try to promote it into a
446     // constant global under the same rules a normal constant would've been
447     // promoted. This is easier on the optimizer and generally emits fewer
448     // instructions.
449     QualType Ty = Inner->getType();
450     if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
451         (Ty->isArrayType() || Ty->isRecordType()) &&
452         Ty.isConstantStorage(CGF.getContext(), true, false))
453       if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
454         auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
455         auto *GV = new llvm::GlobalVariable(
456             CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
457             llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
458             llvm::GlobalValue::NotThreadLocal,
459             CGF.getContext().getTargetAddressSpace(AS));
460         CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
461         GV->setAlignment(alignment.getAsAlign());
462         llvm::Constant *C = GV;
463         if (AS != LangAS::Default)
464           C = TCG.performAddrSpaceCast(
465               CGF.CGM, GV, AS,
466               llvm::PointerType::get(
467                   CGF.getLLVMContext(),
468                   CGF.getContext().getTargetAddressSpace(LangAS::Default)));
469         // FIXME: Should we put the new global into a COMDAT?
470         return RawAddress(C, GV->getValueType(), alignment);
471       }
472     return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
473   }
474   case SD_Thread:
475   case SD_Static:
476     return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
477 
478   case SD_Dynamic:
479     llvm_unreachable("temporary can't have dynamic storage duration");
480   }
481   llvm_unreachable("unknown storage duration");
482 }
483 
484 /// Helper method to check if the underlying ABI is AAPCS
485 static bool isAAPCS(const TargetInfo &TargetInfo) {
486   return TargetInfo.getABI().starts_with("aapcs");
487 }
488 
489 LValue CodeGenFunction::
490 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
491   const Expr *E = M->getSubExpr();
492 
493   assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
494           !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
495          "Reference should never be pseudo-strong!");
496 
497   // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
498   // as that will cause the lifetime adjustment to be lost for ARC
499   auto ownership = M->getType().getObjCLifetime();
500   if (ownership != Qualifiers::OCL_None &&
501       ownership != Qualifiers::OCL_ExplicitNone) {
502     RawAddress Object = createReferenceTemporary(*this, M, E);
503     if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
504       llvm::Type *Ty = ConvertTypeForMem(E->getType());
505       Object = Object.withElementType(Ty);
506 
507       // createReferenceTemporary will promote the temporary to a global with a
508       // constant initializer if it can.  It can only do this to a value of
509       // ARC-manageable type if the value is global and therefore "immune" to
510       // ref-counting operations.  Therefore we have no need to emit either a
511       // dynamic initialization or a cleanup and we can just return the address
512       // of the temporary.
513       if (Var->hasInitializer())
514         return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
515 
516       Var->setInitializer(CGM.EmitNullConstant(E->getType()));
517     }
518     LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
519                                        AlignmentSource::Decl);
520 
521     switch (getEvaluationKind(E->getType())) {
522     default: llvm_unreachable("expected scalar or aggregate expression");
523     case TEK_Scalar:
524       EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
525       break;
526     case TEK_Aggregate: {
527       EmitAggExpr(E, AggValueSlot::forAddr(Object,
528                                            E->getType().getQualifiers(),
529                                            AggValueSlot::IsDestructed,
530                                            AggValueSlot::DoesNotNeedGCBarriers,
531                                            AggValueSlot::IsNotAliased,
532                                            AggValueSlot::DoesNotOverlap));
533       break;
534     }
535     }
536 
537     pushTemporaryCleanup(*this, M, E, Object);
538     return RefTempDst;
539   }
540 
541   SmallVector<const Expr *, 2> CommaLHSs;
542   SmallVector<SubobjectAdjustment, 2> Adjustments;
543   E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
544 
545   for (const auto &Ignored : CommaLHSs)
546     EmitIgnoredExpr(Ignored);
547 
548   if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
549     if (opaque->getType()->isRecordType()) {
550       assert(Adjustments.empty());
551       return EmitOpaqueValueLValue(opaque);
552     }
553   }
554 
555   // Create and initialize the reference temporary.
556   RawAddress Alloca = Address::invalid();
557   RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
558   if (auto *Var = dyn_cast<llvm::GlobalVariable>(
559           Object.getPointer()->stripPointerCasts())) {
560     llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
561     Object = Object.withElementType(TemporaryType);
562     // If the temporary is a global and has a constant initializer or is a
563     // constant temporary that we promoted to a global, we may have already
564     // initialized it.
565     if (!Var->hasInitializer()) {
566       Var->setInitializer(CGM.EmitNullConstant(E->getType()));
567       QualType RefType = M->getType().withoutLocalFastQualifiers();
568       if (RefType.getPointerAuth()) {
569         // Use the qualifier of the reference temporary to sign the pointer.
570         LValue LV = MakeRawAddrLValue(Object.getPointer(), RefType,
571                                       Object.getAlignment());
572         EmitScalarInit(E, M->getExtendingDecl(), LV, false);
573       } else {
574         EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true);
575       }
576     }
577   } else {
578     switch (M->getStorageDuration()) {
579     case SD_Automatic:
580       if (auto *Size = EmitLifetimeStart(
581               CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
582               Alloca.getPointer())) {
583         pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
584                                                   Alloca, Size);
585       }
586       break;
587 
588     case SD_FullExpression: {
589       if (!ShouldEmitLifetimeMarkers)
590         break;
591 
592       // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
593       // marker. Instead, start the lifetime of a conditional temporary earlier
594       // so that it's unconditional. Don't do this with sanitizers which need
595       // more precise lifetime marks. However when inside an "await.suspend"
596       // block, we should always avoid conditional cleanup because it creates
597       // boolean marker that lives across await_suspend, which can destroy coro
598       // frame.
599       ConditionalEvaluation *OldConditional = nullptr;
600       CGBuilderTy::InsertPoint OldIP;
601       if (isInConditionalBranch() && !E->getType().isDestructedType() &&
602           ((!SanOpts.has(SanitizerKind::HWAddress) &&
603             !SanOpts.has(SanitizerKind::Memory) &&
604             !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
605            inSuspendBlock())) {
606         OldConditional = OutermostConditional;
607         OutermostConditional = nullptr;
608 
609         OldIP = Builder.saveIP();
610         llvm::BasicBlock *Block = OldConditional->getStartingBlock();
611         Builder.restoreIP(CGBuilderTy::InsertPoint(
612             Block, llvm::BasicBlock::iterator(Block->back())));
613       }
614 
615       if (auto *Size = EmitLifetimeStart(
616               CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
617               Alloca.getPointer())) {
618         pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
619                                              Size);
620       }
621 
622       if (OldConditional) {
623         OutermostConditional = OldConditional;
624         Builder.restoreIP(OldIP);
625       }
626       break;
627     }
628 
629     default:
630       break;
631     }
632     EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
633   }
634   pushTemporaryCleanup(*this, M, E, Object);
635 
636   // Perform derived-to-base casts and/or field accesses, to get from the
637   // temporary object we created (and, potentially, for which we extended
638   // the lifetime) to the subobject we're binding the reference to.
639   for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
640     switch (Adjustment.Kind) {
641     case SubobjectAdjustment::DerivedToBaseAdjustment:
642       Object =
643           GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
644                                 Adjustment.DerivedToBase.BasePath->path_begin(),
645                                 Adjustment.DerivedToBase.BasePath->path_end(),
646                                 /*NullCheckValue=*/ false, E->getExprLoc());
647       break;
648 
649     case SubobjectAdjustment::FieldAdjustment: {
650       LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl);
651       LV = EmitLValueForField(LV, Adjustment.Field);
652       assert(LV.isSimple() &&
653              "materialized temporary field is not a simple lvalue");
654       Object = LV.getAddress();
655       break;
656     }
657 
658     case SubobjectAdjustment::MemberPointerAdjustment: {
659       llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
660       Object = EmitCXXMemberDataPointerAddress(
661           E, Object, Ptr, Adjustment.Ptr.MPT, /*IsInBounds=*/true);
662       break;
663     }
664     }
665   }
666 
667   return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
668 }
669 
670 RValue
671 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
672   // Emit the expression as an lvalue.
673   LValue LV = EmitLValue(E);
674   assert(LV.isSimple());
675   llvm::Value *Value = LV.getPointer(*this);
676 
677   if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
678     // C++11 [dcl.ref]p5 (as amended by core issue 453):
679     //   If a glvalue to which a reference is directly bound designates neither
680     //   an existing object or function of an appropriate type nor a region of
681     //   storage of suitable size and alignment to contain an object of the
682     //   reference's type, the behavior is undefined.
683     QualType Ty = E->getType();
684     EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
685   }
686 
687   return RValue::get(Value);
688 }
689 
690 
691 /// getAccessedFieldNo - Given an encoded value and a result number, return the
692 /// input field number being accessed.
693 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
694                                              const llvm::Constant *Elts) {
695   return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
696       ->getZExtValue();
697 }
698 
699 static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
700                                 llvm::Value *Ptr) {
701   llvm::Value *A0 =
702       Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
703   llvm::Value *A1 =
704       Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
705   return Builder.CreateXor(Acc, A1);
706 }
707 
708 bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
709   return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
710          TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation;
711 }
712 
713 bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
714   CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
715   return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
716          (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
717           TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
718           TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation);
719 }
720 
721 bool CodeGenFunction::sanitizePerformTypeCheck() const {
722   return SanOpts.has(SanitizerKind::Null) ||
723          SanOpts.has(SanitizerKind::Alignment) ||
724          SanOpts.has(SanitizerKind::ObjectSize) ||
725          SanOpts.has(SanitizerKind::Vptr);
726 }
727 
728 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
729                                     llvm::Value *Ptr, QualType Ty,
730                                     CharUnits Alignment,
731                                     SanitizerSet SkippedChecks,
732                                     llvm::Value *ArraySize) {
733   if (!sanitizePerformTypeCheck())
734     return;
735 
736   // Don't check pointers outside the default address space. The null check
737   // isn't correct, the object-size check isn't supported by LLVM, and we can't
738   // communicate the addresses to the runtime handler for the vptr check.
739   if (Ptr->getType()->getPointerAddressSpace())
740     return;
741 
742   // Don't check pointers to volatile data. The behavior here is implementation-
743   // defined.
744   if (Ty.isVolatileQualified())
745     return;
746 
747   // Quickly determine whether we have a pointer to an alloca. It's possible
748   // to skip null checks, and some alignment checks, for these pointers. This
749   // can reduce compile-time significantly.
750   auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
751 
752   llvm::Value *IsNonNull = nullptr;
753   bool IsGuaranteedNonNull =
754       SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
755 
756   llvm::BasicBlock *Done = nullptr;
757   bool DoneViaNullSanitize = false;
758 
759   {
760     auto CheckHandler = SanitizerHandler::TypeMismatch;
761     SanitizerDebugLocation SanScope(this,
762                                     {SanitizerKind::SO_Null,
763                                      SanitizerKind::SO_ObjectSize,
764                                      SanitizerKind::SO_Alignment},
765                                     CheckHandler);
766 
767     SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 3>
768         Checks;
769 
770     llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
771     bool AllowNullPointers = isNullPointerAllowed(TCK);
772     if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
773         !IsGuaranteedNonNull) {
774       // The glvalue must not be an empty glvalue.
775       IsNonNull = Builder.CreateIsNotNull(Ptr);
776 
777       // The IR builder can constant-fold the null check if the pointer points
778       // to a constant.
779       IsGuaranteedNonNull = IsNonNull == True;
780 
781       // Skip the null check if the pointer is known to be non-null.
782       if (!IsGuaranteedNonNull) {
783         if (AllowNullPointers) {
784           // When performing pointer casts, it's OK if the value is null.
785           // Skip the remaining checks in that case.
786           Done = createBasicBlock("null");
787           DoneViaNullSanitize = true;
788           llvm::BasicBlock *Rest = createBasicBlock("not.null");
789           Builder.CreateCondBr(IsNonNull, Rest, Done);
790           EmitBlock(Rest);
791         } else {
792           Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::SO_Null));
793         }
794       }
795     }
796 
797     if (SanOpts.has(SanitizerKind::ObjectSize) &&
798         !SkippedChecks.has(SanitizerKind::ObjectSize) &&
799         !Ty->isIncompleteType()) {
800       uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
801       llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
802       if (ArraySize)
803         Size = Builder.CreateMul(Size, ArraySize);
804 
805       // Degenerate case: new X[0] does not need an objectsize check.
806       llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
807       if (!ConstantSize || !ConstantSize->isNullValue()) {
808         // The glvalue must refer to a large enough storage region.
809         // FIXME: If Address Sanitizer is enabled, insert dynamic
810         // instrumentation
811         //        to check this.
812         // FIXME: Get object address space
813         llvm::Type *Tys[2] = {IntPtrTy, Int8PtrTy};
814         llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
815         llvm::Value *Min = Builder.getFalse();
816         llvm::Value *NullIsUnknown = Builder.getFalse();
817         llvm::Value *Dynamic = Builder.getFalse();
818         llvm::Value *LargeEnough = Builder.CreateICmpUGE(
819             Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
820         Checks.push_back(
821             std::make_pair(LargeEnough, SanitizerKind::SO_ObjectSize));
822       }
823     }
824 
825     llvm::MaybeAlign AlignVal;
826     llvm::Value *PtrAsInt = nullptr;
827 
828     if (SanOpts.has(SanitizerKind::Alignment) &&
829         !SkippedChecks.has(SanitizerKind::Alignment)) {
830       AlignVal = Alignment.getAsMaybeAlign();
831       if (!Ty->isIncompleteType() && !AlignVal)
832         AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
833                                                /*ForPointeeType=*/true)
834                        .getAsMaybeAlign();
835 
836       // The glvalue must be suitably aligned.
837       if (AlignVal && *AlignVal > llvm::Align(1) &&
838           (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
839         PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
840         llvm::Value *Align = Builder.CreateAnd(
841             PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
842         llvm::Value *Aligned =
843             Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
844         if (Aligned != True)
845           Checks.push_back(
846               std::make_pair(Aligned, SanitizerKind::SO_Alignment));
847       }
848     }
849 
850     if (Checks.size() > 0) {
851       llvm::Constant *StaticData[] = {
852           EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty),
853           llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
854           llvm::ConstantInt::get(Int8Ty, TCK)};
855       EmitCheck(Checks, CheckHandler, StaticData, PtrAsInt ? PtrAsInt : Ptr);
856     }
857   }
858 
859   // If possible, check that the vptr indicates that there is a subobject of
860   // type Ty at offset zero within this object.
861   //
862   // C++11 [basic.life]p5,6:
863   //   [For storage which does not refer to an object within its lifetime]
864   //   The program has undefined behavior if:
865   //    -- the [pointer or glvalue] is used to access a non-static data member
866   //       or call a non-static member function
867   if (SanOpts.has(SanitizerKind::Vptr) &&
868       !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
869     SanitizerDebugLocation SanScope(this, {SanitizerKind::SO_Vptr},
870                                     SanitizerHandler::DynamicTypeCacheMiss);
871 
872     // Ensure that the pointer is non-null before loading it. If there is no
873     // compile-time guarantee, reuse the run-time null check or emit a new one.
874     if (!IsGuaranteedNonNull) {
875       if (!IsNonNull)
876         IsNonNull = Builder.CreateIsNotNull(Ptr);
877       if (!Done)
878         Done = createBasicBlock("vptr.null");
879       llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
880       Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
881       EmitBlock(VptrNotNull);
882     }
883 
884     // Compute a deterministic hash of the mangled name of the type.
885     SmallString<64> MangledName;
886     llvm::raw_svector_ostream Out(MangledName);
887     CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
888                                                      Out);
889 
890     // Contained in NoSanitizeList based on the mangled type.
891     if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
892                                                            Out.str())) {
893       // Load the vptr, and mix it with TypeHash.
894       llvm::Value *TypeHash =
895           llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
896 
897       llvm::Type *VPtrTy = llvm::PointerType::get(getLLVMContext(), 0);
898       Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
899       llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
900                                           Ty->getAsCXXRecordDecl(),
901                                           VTableAuthMode::UnsafeUbsanStrip);
902       VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
903 
904       llvm::Value *Hash =
905           emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
906       Hash = Builder.CreateTrunc(Hash, IntPtrTy);
907 
908       // Look the hash up in our cache.
909       const int CacheSize = 128;
910       llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
911       llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
912                                                      "__ubsan_vptr_type_cache");
913       llvm::Value *Slot = Builder.CreateAnd(Hash,
914                                             llvm::ConstantInt::get(IntPtrTy,
915                                                                    CacheSize-1));
916       llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
917       llvm::Value *CacheVal = Builder.CreateAlignedLoad(
918           IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
919           getPointerAlign());
920 
921       // If the hash isn't in the cache, call a runtime handler to perform the
922       // hard work of checking whether the vptr is for an object of the right
923       // type. This will either fill in the cache and return, or produce a
924       // diagnostic.
925       llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
926       llvm::Constant *StaticData[] = {
927         EmitCheckSourceLocation(Loc),
928         EmitCheckTypeDescriptor(Ty),
929         CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
930         llvm::ConstantInt::get(Int8Ty, TCK)
931       };
932       llvm::Value *DynamicData[] = { Ptr, Hash };
933       EmitCheck(std::make_pair(EqualHash, SanitizerKind::SO_Vptr),
934                 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
935                 DynamicData);
936     }
937   }
938 
939   if (Done) {
940     SanitizerDebugLocation SanScope(
941         this,
942         {DoneViaNullSanitize ? SanitizerKind::SO_Null : SanitizerKind::SO_Vptr},
943         DoneViaNullSanitize ? SanitizerHandler::TypeMismatch
944                             : SanitizerHandler::DynamicTypeCacheMiss);
945     Builder.CreateBr(Done);
946     EmitBlock(Done);
947   }
948 }
949 
950 llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,
951                                                    QualType EltTy) {
952   ASTContext &C = getContext();
953   uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
954   if (!EltSize)
955     return nullptr;
956 
957   auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
958   if (!ArrayDeclRef)
959     return nullptr;
960 
961   auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
962   if (!ParamDecl)
963     return nullptr;
964 
965   auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
966   if (!POSAttr)
967     return nullptr;
968 
969   // Don't load the size if it's a lower bound.
970   int POSType = POSAttr->getType();
971   if (POSType != 0 && POSType != 1)
972     return nullptr;
973 
974   // Find the implicit size parameter.
975   auto PassedSizeIt = SizeArguments.find(ParamDecl);
976   if (PassedSizeIt == SizeArguments.end())
977     return nullptr;
978 
979   const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
980   assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
981   Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
982   llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
983                                               C.getSizeType(), E->getExprLoc());
984   llvm::Value *SizeOfElement =
985       llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
986   return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
987 }
988 
989 /// If Base is known to point to the start of an array, return the length of
990 /// that array. Return 0 if the length cannot be determined.
991 static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
992                                           const Expr *Base,
993                                           QualType &IndexedType,
994                                           LangOptions::StrictFlexArraysLevelKind
995                                           StrictFlexArraysLevel) {
996   // For the vector indexing extension, the bound is the number of elements.
997   if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
998     IndexedType = Base->getType();
999     return CGF.Builder.getInt32(VT->getNumElements());
1000   }
1001 
1002   Base = Base->IgnoreParens();
1003 
1004   if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1005     if (CE->getCastKind() == CK_ArrayToPointerDecay &&
1006         !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
1007                                                      StrictFlexArraysLevel)) {
1008       CodeGenFunction::SanitizerScope SanScope(&CGF);
1009 
1010       IndexedType = CE->getSubExpr()->getType();
1011       const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
1012       if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
1013         return CGF.Builder.getInt(CAT->getSize());
1014 
1015       if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
1016         return CGF.getVLASize(VAT).NumElts;
1017       // Ignore pass_object_size here. It's not applicable on decayed pointers.
1018     }
1019   }
1020 
1021   CodeGenFunction::SanitizerScope SanScope(&CGF);
1022 
1023   QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
1024   if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
1025     IndexedType = Base->getType();
1026     return POS;
1027   }
1028 
1029   return nullptr;
1030 }
1031 
1032 namespace {
1033 
1034 /// \p StructAccessBase returns the base \p Expr of a field access. It returns
1035 /// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
1036 ///
1037 ///     p in p-> a.b.c
1038 ///
1039 /// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
1040 /// looking for:
1041 ///
1042 ///     struct s {
1043 ///       struct s *ptr;
1044 ///       int count;
1045 ///       char array[] __attribute__((counted_by(count)));
1046 ///     };
1047 ///
1048 /// If we have an expression like \p p->ptr->array[index], we want the
1049 /// \p MemberExpr for \p p->ptr instead of \p p.
1050 class StructAccessBase
1051     : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1052   const RecordDecl *ExpectedRD;
1053 
1054   bool IsExpectedRecordDecl(const Expr *E) const {
1055     QualType Ty = E->getType();
1056     if (Ty->isPointerType())
1057       Ty = Ty->getPointeeType();
1058     return ExpectedRD == Ty->getAsRecordDecl();
1059   }
1060 
1061 public:
1062   StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1063 
1064   //===--------------------------------------------------------------------===//
1065   //                            Visitor Methods
1066   //===--------------------------------------------------------------------===//
1067 
1068   // NOTE: If we build C++ support for counted_by, then we'll have to handle
1069   // horrors like this:
1070   //
1071   //     struct S {
1072   //       int x, y;
1073   //       int blah[] __attribute__((counted_by(x)));
1074   //     } s;
1075   //
1076   //     int foo(int index, int val) {
1077   //       int (S::*IHatePMDs)[] = &S::blah;
1078   //       (s.*IHatePMDs)[index] = val;
1079   //     }
1080 
1081   const Expr *Visit(const Expr *E) {
1082     return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E);
1083   }
1084 
1085   const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1086 
1087   // These are the types we expect to return (in order of most to least
1088   // likely):
1089   //
1090   //   1. DeclRefExpr - This is the expression for the base of the structure.
1091   //      It's exactly what we want to build an access to the \p counted_by
1092   //      field.
1093   //   2. MemberExpr - This is the expression that has the same \p RecordDecl
1094   //      as the flexble array member's lexical enclosing \p RecordDecl. This
1095   //      allows us to catch things like: "p->p->array"
1096   //   3. CompoundLiteralExpr - This is for people who create something
1097   //      heretical like (struct foo has a flexible array member):
1098   //
1099   //        (struct foo){ 1, 2 }.blah[idx];
1100   const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1101     return IsExpectedRecordDecl(E) ? E : nullptr;
1102   }
1103   const Expr *VisitMemberExpr(const MemberExpr *E) {
1104     if (IsExpectedRecordDecl(E) && E->isArrow())
1105       return E;
1106     const Expr *Res = Visit(E->getBase());
1107     return !Res && IsExpectedRecordDecl(E) ? E : Res;
1108   }
1109   const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1110     return IsExpectedRecordDecl(E) ? E : nullptr;
1111   }
1112   const Expr *VisitCallExpr(const CallExpr *E) {
1113     return IsExpectedRecordDecl(E) ? E : nullptr;
1114   }
1115 
1116   const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1117     if (IsExpectedRecordDecl(E))
1118       return E;
1119     return Visit(E->getBase());
1120   }
1121   const Expr *VisitCastExpr(const CastExpr *E) {
1122     if (E->getCastKind() == CK_LValueToRValue)
1123       return IsExpectedRecordDecl(E) ? E : nullptr;
1124     return Visit(E->getSubExpr());
1125   }
1126   const Expr *VisitParenExpr(const ParenExpr *E) {
1127     return Visit(E->getSubExpr());
1128   }
1129   const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1130     return Visit(E->getSubExpr());
1131   }
1132   const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1133     return Visit(E->getSubExpr());
1134   }
1135 };
1136 
1137 } // end anonymous namespace
1138 
1139 using RecIndicesTy = SmallVector<llvm::Value *, 8>;
1140 
1141 static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD,
1142                                  const FieldDecl *Field,
1143                                  RecIndicesTy &Indices) {
1144   const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1145   int64_t FieldNo = -1;
1146   for (const FieldDecl *FD : RD->fields()) {
1147     if (!Layout.containsFieldDecl(FD))
1148       // This could happen if the field has a struct type that's empty. I don't
1149       // know why either.
1150       continue;
1151 
1152     FieldNo = Layout.getLLVMFieldNo(FD);
1153     if (FD == Field) {
1154       Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1155       return true;
1156     }
1157 
1158     QualType Ty = FD->getType();
1159     if (Ty->isRecordType()) {
1160       if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1161         if (RD->isUnion())
1162           FieldNo = 0;
1163         Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1164         return true;
1165       }
1166     }
1167   }
1168 
1169   return false;
1170 }
1171 
1172 llvm::Value *CodeGenFunction::GetCountedByFieldExprGEP(
1173     const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1174   const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1175 
1176   // Find the base struct expr (i.e. p in p->a.b.c.d).
1177   const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1178   if (!StructBase || StructBase->HasSideEffects(getContext()))
1179     return nullptr;
1180 
1181   llvm::Value *Res = nullptr;
1182   if (StructBase->getType()->isPointerType()) {
1183     LValueBaseInfo BaseInfo;
1184     TBAAAccessInfo TBAAInfo;
1185     Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1186     Res = Addr.emitRawPointer(*this);
1187   } else if (StructBase->isLValue()) {
1188     LValue LV = EmitLValue(StructBase);
1189     Address Addr = LV.getAddress();
1190     Res = Addr.emitRawPointer(*this);
1191   } else {
1192     return nullptr;
1193   }
1194 
1195   RecIndicesTy Indices;
1196   getGEPIndicesToField(*this, RD, CountDecl, Indices);
1197   if (Indices.empty())
1198     return nullptr;
1199 
1200   Indices.push_back(Builder.getInt32(0));
1201   return Builder.CreateInBoundsGEP(
1202       ConvertType(QualType(RD->getTypeForDecl(), 0)), Res,
1203       RecIndicesTy(llvm::reverse(Indices)), "counted_by.gep");
1204 }
1205 
1206 /// This method is typically called in contexts where we can't generate
1207 /// side-effects, like in __builtin_dynamic_object_size. When finding
1208 /// expressions, only choose those that have either already been emitted or can
1209 /// be loaded without side-effects.
1210 ///
1211 /// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1212 ///   within the top-level struct.
1213 /// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1214 llvm::Value *CodeGenFunction::EmitLoadOfCountedByField(
1215     const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1216   if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl))
1217     return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), GEP,
1218                                      getIntAlign(), "counted_by.load");
1219   return nullptr;
1220 }
1221 
1222 void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
1223                                       llvm::Value *Index, QualType IndexType,
1224                                       bool Accessed) {
1225   assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1226          "should not be called unless adding bounds checks");
1227   const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1228       getLangOpts().getStrictFlexArraysLevel();
1229   QualType IndexedType;
1230   llvm::Value *Bound =
1231       getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1232 
1233   EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1234 }
1235 
1236 void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1237                                           llvm::Value *Index,
1238                                           QualType IndexType,
1239                                           QualType IndexedType, bool Accessed) {
1240   if (!Bound)
1241     return;
1242 
1243   auto CheckKind = SanitizerKind::SO_ArrayBounds;
1244   auto CheckHandler = SanitizerHandler::OutOfBounds;
1245   SanitizerDebugLocation SanScope(this, {CheckKind}, CheckHandler);
1246 
1247   bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1248   llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1249   llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1250 
1251   llvm::Constant *StaticData[] = {
1252     EmitCheckSourceLocation(E->getExprLoc()),
1253     EmitCheckTypeDescriptor(IndexedType),
1254     EmitCheckTypeDescriptor(IndexType)
1255   };
1256   llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1257                                 : Builder.CreateICmpULE(IndexVal, BoundVal);
1258   EmitCheck(std::make_pair(Check, CheckKind), CheckHandler, StaticData, Index);
1259 }
1260 
1261 CodeGenFunction::ComplexPairTy CodeGenFunction::
1262 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1263                          bool isInc, bool isPre) {
1264   ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1265 
1266   llvm::Value *NextVal;
1267   if (isa<llvm::IntegerType>(InVal.first->getType())) {
1268     uint64_t AmountVal = isInc ? 1 : -1;
1269     NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1270 
1271     // Add the inc/dec to the real part.
1272     NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1273   } else {
1274     QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1275     llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1276     if (!isInc)
1277       FVal.changeSign();
1278     NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1279 
1280     // Add the inc/dec to the real part.
1281     NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1282   }
1283 
1284   ComplexPairTy IncVal(NextVal, InVal.second);
1285 
1286   // Store the updated result through the lvalue.
1287   EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1288   if (getLangOpts().OpenMP)
1289     CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1290                                                               E->getSubExpr());
1291 
1292   // If this is a postinc, return the value read from memory, otherwise use the
1293   // updated value.
1294   return isPre ? IncVal : InVal;
1295 }
1296 
1297 void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
1298                                              CodeGenFunction *CGF) {
1299   // Bind VLAs in the cast type.
1300   if (CGF && E->getType()->isVariablyModifiedType())
1301     CGF->EmitVariablyModifiedType(E->getType());
1302 
1303   if (CGDebugInfo *DI = getModuleDebugInfo())
1304     DI->EmitExplicitCastType(E->getType());
1305 }
1306 
1307 //===----------------------------------------------------------------------===//
1308 //                         LValue Expression Emission
1309 //===----------------------------------------------------------------------===//
1310 
1311 static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
1312                                         TBAAAccessInfo *TBAAInfo,
1313                                         KnownNonNull_t IsKnownNonNull,
1314                                         CodeGenFunction &CGF) {
1315   // We allow this with ObjC object pointers because of fragile ABIs.
1316   assert(E->getType()->isPointerType() ||
1317          E->getType()->isObjCObjectPointerType());
1318   E = E->IgnoreParens();
1319 
1320   // Casts:
1321   if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1322     if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1323       CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1324 
1325     switch (CE->getCastKind()) {
1326     // Non-converting casts (but not C's implicit conversion from void*).
1327     case CK_BitCast:
1328     case CK_NoOp:
1329     case CK_AddressSpaceConversion:
1330       if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1331         if (PtrTy->getPointeeType()->isVoidType())
1332           break;
1333 
1334         LValueBaseInfo InnerBaseInfo;
1335         TBAAAccessInfo InnerTBAAInfo;
1336         Address Addr = CGF.EmitPointerWithAlignment(
1337             CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1338         if (BaseInfo) *BaseInfo = InnerBaseInfo;
1339         if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1340 
1341         if (isa<ExplicitCastExpr>(CE)) {
1342           LValueBaseInfo TargetTypeBaseInfo;
1343           TBAAAccessInfo TargetTypeTBAAInfo;
1344           CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(
1345               E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1346           if (TBAAInfo)
1347             *TBAAInfo =
1348                 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1349           // If the source l-value is opaque, honor the alignment of the
1350           // casted-to type.
1351           if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1352             if (BaseInfo)
1353               BaseInfo->mergeForCast(TargetTypeBaseInfo);
1354             Addr.setAlignment(Align);
1355           }
1356         }
1357 
1358         if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1359             CE->getCastKind() == CK_BitCast) {
1360           if (auto PT = E->getType()->getAs<PointerType>())
1361             CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1362                                           /*MayBeNull=*/true,
1363                                           CodeGenFunction::CFITCK_UnrelatedCast,
1364                                           CE->getBeginLoc());
1365         }
1366 
1367         llvm::Type *ElemTy =
1368             CGF.ConvertTypeForMem(E->getType()->getPointeeType());
1369         Addr = Addr.withElementType(ElemTy);
1370         if (CE->getCastKind() == CK_AddressSpaceConversion)
1371           Addr = CGF.Builder.CreateAddrSpaceCast(
1372               Addr, CGF.ConvertType(E->getType()), ElemTy);
1373         return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1374                                             CE->getType());
1375       }
1376       break;
1377 
1378     // Array-to-pointer decay.
1379     case CK_ArrayToPointerDecay:
1380       return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1381 
1382     // Derived-to-base conversions.
1383     case CK_UncheckedDerivedToBase:
1384     case CK_DerivedToBase: {
1385       // TODO: Support accesses to members of base classes in TBAA. For now, we
1386       // conservatively pretend that the complete object is of the base class
1387       // type.
1388       if (TBAAInfo)
1389         *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1390       Address Addr = CGF.EmitPointerWithAlignment(
1391           CE->getSubExpr(), BaseInfo, nullptr,
1392           (KnownNonNull_t)(IsKnownNonNull ||
1393                            CE->getCastKind() == CK_UncheckedDerivedToBase));
1394       auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1395       return CGF.GetAddressOfBaseClass(
1396           Addr, Derived, CE->path_begin(), CE->path_end(),
1397           CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1398     }
1399 
1400     // TODO: Is there any reason to treat base-to-derived conversions
1401     // specially?
1402     default:
1403       break;
1404     }
1405   }
1406 
1407   // Unary &.
1408   if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1409     if (UO->getOpcode() == UO_AddrOf) {
1410       LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1411       if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1412       if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1413       return LV.getAddress();
1414     }
1415   }
1416 
1417   // std::addressof and variants.
1418   if (auto *Call = dyn_cast<CallExpr>(E)) {
1419     switch (Call->getBuiltinCallee()) {
1420     default:
1421       break;
1422     case Builtin::BIaddressof:
1423     case Builtin::BI__addressof:
1424     case Builtin::BI__builtin_addressof: {
1425       LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1426       if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1427       if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1428       return LV.getAddress();
1429     }
1430     }
1431   }
1432 
1433   // TODO: conditional operators, comma.
1434 
1435   // Otherwise, use the alignment of the type.
1436   return CGF.makeNaturalAddressForPointer(
1437       CGF.EmitScalarExpr(E), E->getType()->getPointeeType(), CharUnits(),
1438       /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1439 }
1440 
1441 /// EmitPointerWithAlignment - Given an expression of pointer type, try to
1442 /// derive a more accurate bound on the alignment of the pointer.
1443 Address CodeGenFunction::EmitPointerWithAlignment(
1444     const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1445     KnownNonNull_t IsKnownNonNull) {
1446   Address Addr =
1447       ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1448   if (IsKnownNonNull && !Addr.isKnownNonNull())
1449     Addr.setKnownNonNull();
1450   return Addr;
1451 }
1452 
1453 llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) {
1454   llvm::Value *V = RV.getScalarVal();
1455   if (auto MPT = T->getAs<MemberPointerType>())
1456     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1457   return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1458 }
1459 
1460 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
1461   if (Ty->isVoidType())
1462     return RValue::get(nullptr);
1463 
1464   switch (getEvaluationKind(Ty)) {
1465   case TEK_Complex: {
1466     llvm::Type *EltTy =
1467       ConvertType(Ty->castAs<ComplexType>()->getElementType());
1468     llvm::Value *U = llvm::UndefValue::get(EltTy);
1469     return RValue::getComplex(std::make_pair(U, U));
1470   }
1471 
1472   // If this is a use of an undefined aggregate type, the aggregate must have an
1473   // identifiable address.  Just because the contents of the value are undefined
1474   // doesn't mean that the address can't be taken and compared.
1475   case TEK_Aggregate: {
1476     Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1477     return RValue::getAggregate(DestPtr);
1478   }
1479 
1480   case TEK_Scalar:
1481     return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1482   }
1483   llvm_unreachable("bad evaluation kind");
1484 }
1485 
1486 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
1487                                               const char *Name) {
1488   ErrorUnsupported(E, Name);
1489   return GetUndefRValue(E->getType());
1490 }
1491 
1492 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
1493                                               const char *Name) {
1494   ErrorUnsupported(E, Name);
1495   llvm::Type *ElTy = ConvertType(E->getType());
1496   llvm::Type *Ty = UnqualPtrTy;
1497   return MakeAddrLValue(
1498       Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1499 }
1500 
1501 bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1502   const Expr *Base = Obj;
1503   while (!isa<CXXThisExpr>(Base)) {
1504     // The result of a dynamic_cast can be null.
1505     if (isa<CXXDynamicCastExpr>(Base))
1506       return false;
1507 
1508     if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1509       Base = CE->getSubExpr();
1510     } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1511       Base = PE->getSubExpr();
1512     } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1513       if (UO->getOpcode() == UO_Extension)
1514         Base = UO->getSubExpr();
1515       else
1516         return false;
1517     } else {
1518       return false;
1519     }
1520   }
1521   return true;
1522 }
1523 
1524 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1525   LValue LV;
1526   if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1527     LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1528   else
1529     LV = EmitLValue(E);
1530   if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1531     SanitizerSet SkippedChecks;
1532     if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1533       bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1534       if (IsBaseCXXThis)
1535         SkippedChecks.set(SanitizerKind::Alignment, true);
1536       if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1537         SkippedChecks.set(SanitizerKind::Null, true);
1538     }
1539     EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1540   }
1541   return LV;
1542 }
1543 
1544 /// EmitLValue - Emit code to compute a designator that specifies the location
1545 /// of the expression.
1546 ///
1547 /// This can return one of two things: a simple address or a bitfield reference.
1548 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1549 /// an LLVM pointer type.
1550 ///
1551 /// If this returns a bitfield reference, nothing about the pointee type of the
1552 /// LLVM value is known: For example, it may not be a pointer to an integer.
1553 ///
1554 /// If this returns a normal address, and if the lvalue's C type is fixed size,
1555 /// this method guarantees that the returned pointer type will point to an LLVM
1556 /// type of the same size of the lvalue's type.  If the lvalue has a variable
1557 /// length type, this is not possible.
1558 ///
1559 LValue CodeGenFunction::EmitLValue(const Expr *E,
1560                                    KnownNonNull_t IsKnownNonNull) {
1561   // Running with sufficient stack space to avoid deeply nested expressions
1562   // cause a stack overflow.
1563   LValue LV;
1564   CGM.runWithSufficientStackSpace(
1565       E->getExprLoc(), [&] { LV = EmitLValueHelper(E, IsKnownNonNull); });
1566 
1567   if (IsKnownNonNull && !LV.isKnownNonNull())
1568     LV.setKnownNonNull();
1569   return LV;
1570 }
1571 
1572 static QualType getConstantExprReferredType(const FullExpr *E,
1573                                             const ASTContext &Ctx) {
1574   const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1575   if (isa<OpaqueValueExpr>(SE))
1576     return SE->getType();
1577   return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1578 }
1579 
1580 LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1581                                          KnownNonNull_t IsKnownNonNull) {
1582   ApplyDebugLocation DL(*this, E);
1583   switch (E->getStmtClass()) {
1584   default: return EmitUnsupportedLValue(E, "l-value expression");
1585 
1586   case Expr::ObjCPropertyRefExprClass:
1587     llvm_unreachable("cannot emit a property reference directly");
1588 
1589   case Expr::ObjCSelectorExprClass:
1590     return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1591   case Expr::ObjCIsaExprClass:
1592     return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1593   case Expr::BinaryOperatorClass:
1594     return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1595   case Expr::CompoundAssignOperatorClass: {
1596     QualType Ty = E->getType();
1597     if (const AtomicType *AT = Ty->getAs<AtomicType>())
1598       Ty = AT->getValueType();
1599     if (!Ty->isAnyComplexType())
1600       return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1601     return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1602   }
1603   case Expr::CallExprClass:
1604   case Expr::CXXMemberCallExprClass:
1605   case Expr::CXXOperatorCallExprClass:
1606   case Expr::UserDefinedLiteralClass:
1607     return EmitCallExprLValue(cast<CallExpr>(E));
1608   case Expr::CXXRewrittenBinaryOperatorClass:
1609     return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1610                       IsKnownNonNull);
1611   case Expr::VAArgExprClass:
1612     return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1613   case Expr::DeclRefExprClass:
1614     return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1615   case Expr::ConstantExprClass: {
1616     const ConstantExpr *CE = cast<ConstantExpr>(E);
1617     if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1618       QualType RetType = getConstantExprReferredType(CE, getContext());
1619       return MakeNaturalAlignAddrLValue(Result, RetType);
1620     }
1621     return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1622   }
1623   case Expr::ParenExprClass:
1624     return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1625   case Expr::GenericSelectionExprClass:
1626     return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1627                       IsKnownNonNull);
1628   case Expr::PredefinedExprClass:
1629     return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1630   case Expr::StringLiteralClass:
1631     return EmitStringLiteralLValue(cast<StringLiteral>(E));
1632   case Expr::ObjCEncodeExprClass:
1633     return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1634   case Expr::PseudoObjectExprClass:
1635     return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1636   case Expr::InitListExprClass:
1637     return EmitInitListLValue(cast<InitListExpr>(E));
1638   case Expr::CXXTemporaryObjectExprClass:
1639   case Expr::CXXConstructExprClass:
1640     return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1641   case Expr::CXXBindTemporaryExprClass:
1642     return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1643   case Expr::CXXUuidofExprClass:
1644     return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1645   case Expr::LambdaExprClass:
1646     return EmitAggExprToLValue(E);
1647 
1648   case Expr::ExprWithCleanupsClass: {
1649     const auto *cleanups = cast<ExprWithCleanups>(E);
1650     RunCleanupsScope Scope(*this);
1651     LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1652     if (LV.isSimple()) {
1653       // Defend against branches out of gnu statement expressions surrounded by
1654       // cleanups.
1655       Address Addr = LV.getAddress();
1656       llvm::Value *V = Addr.getBasePointer();
1657       Scope.ForceCleanup({&V});
1658       Addr.replaceBasePointer(V);
1659       return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1660                               LV.getBaseInfo(), LV.getTBAAInfo());
1661     }
1662     // FIXME: Is it possible to create an ExprWithCleanups that produces a
1663     // bitfield lvalue or some other non-simple lvalue?
1664     return LV;
1665   }
1666 
1667   case Expr::CXXDefaultArgExprClass: {
1668     auto *DAE = cast<CXXDefaultArgExpr>(E);
1669     CXXDefaultArgExprScope Scope(*this, DAE);
1670     return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1671   }
1672   case Expr::CXXDefaultInitExprClass: {
1673     auto *DIE = cast<CXXDefaultInitExpr>(E);
1674     CXXDefaultInitExprScope Scope(*this, DIE);
1675     return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1676   }
1677   case Expr::CXXTypeidExprClass:
1678     return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1679 
1680   case Expr::ObjCMessageExprClass:
1681     return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1682   case Expr::ObjCIvarRefExprClass:
1683     return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1684   case Expr::StmtExprClass:
1685     return EmitStmtExprLValue(cast<StmtExpr>(E));
1686   case Expr::UnaryOperatorClass:
1687     return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1688   case Expr::ArraySubscriptExprClass:
1689     return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1690   case Expr::MatrixSubscriptExprClass:
1691     return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1692   case Expr::ArraySectionExprClass:
1693     return EmitArraySectionExpr(cast<ArraySectionExpr>(E));
1694   case Expr::ExtVectorElementExprClass:
1695     return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1696   case Expr::CXXThisExprClass:
1697     return MakeAddrLValue(LoadCXXThisAddress(), E->getType());
1698   case Expr::MemberExprClass:
1699     return EmitMemberExpr(cast<MemberExpr>(E));
1700   case Expr::CompoundLiteralExprClass:
1701     return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1702   case Expr::ConditionalOperatorClass:
1703     return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1704   case Expr::BinaryConditionalOperatorClass:
1705     return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1706   case Expr::ChooseExprClass:
1707     return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1708   case Expr::OpaqueValueExprClass:
1709     return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1710   case Expr::SubstNonTypeTemplateParmExprClass:
1711     return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1712                       IsKnownNonNull);
1713   case Expr::ImplicitCastExprClass:
1714   case Expr::CStyleCastExprClass:
1715   case Expr::CXXFunctionalCastExprClass:
1716   case Expr::CXXStaticCastExprClass:
1717   case Expr::CXXDynamicCastExprClass:
1718   case Expr::CXXReinterpretCastExprClass:
1719   case Expr::CXXConstCastExprClass:
1720   case Expr::CXXAddrspaceCastExprClass:
1721   case Expr::ObjCBridgedCastExprClass:
1722     return EmitCastLValue(cast<CastExpr>(E));
1723 
1724   case Expr::MaterializeTemporaryExprClass:
1725     return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1726 
1727   case Expr::CoawaitExprClass:
1728     return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1729   case Expr::CoyieldExprClass:
1730     return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1731   case Expr::PackIndexingExprClass:
1732     return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1733   case Expr::HLSLOutArgExprClass:
1734     llvm_unreachable("cannot emit a HLSL out argument directly");
1735   }
1736 }
1737 
1738 /// Given an object of the given canonical type, can we safely copy a
1739 /// value out of it based on its initializer?
1740 static bool isConstantEmittableObjectType(QualType type) {
1741   assert(type.isCanonical());
1742   assert(!type->isReferenceType());
1743 
1744   // Must be const-qualified but non-volatile.
1745   Qualifiers qs = type.getLocalQualifiers();
1746   if (!qs.hasConst() || qs.hasVolatile()) return false;
1747 
1748   // Otherwise, all object types satisfy this except C++ classes with
1749   // mutable subobjects or non-trivial copy/destroy behavior.
1750   if (const auto *RT = dyn_cast<RecordType>(type))
1751     if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1752       if (RD->hasMutableFields() || !RD->isTrivial())
1753         return false;
1754 
1755   return true;
1756 }
1757 
1758 /// Can we constant-emit a load of a reference to a variable of the
1759 /// given type?  This is different from predicates like
1760 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1761 /// in situations that don't necessarily satisfy the language's rules
1762 /// for this (e.g. C++'s ODR-use rules).  For example, we want to able
1763 /// to do this with const float variables even if those variables
1764 /// aren't marked 'constexpr'.
1765 enum ConstantEmissionKind {
1766   CEK_None,
1767   CEK_AsReferenceOnly,
1768   CEK_AsValueOrReference,
1769   CEK_AsValueOnly
1770 };
1771 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
1772   type = type.getCanonicalType();
1773   if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1774     if (isConstantEmittableObjectType(ref->getPointeeType()))
1775       return CEK_AsValueOrReference;
1776     return CEK_AsReferenceOnly;
1777   }
1778   if (isConstantEmittableObjectType(type))
1779     return CEK_AsValueOnly;
1780   return CEK_None;
1781 }
1782 
1783 /// Try to emit a reference to the given value without producing it as
1784 /// an l-value.  This is just an optimization, but it avoids us needing
1785 /// to emit global copies of variables if they're named without triggering
1786 /// a formal use in a context where we can't emit a direct reference to them,
1787 /// for instance if a block or lambda or a member of a local class uses a
1788 /// const int variable or constexpr variable from an enclosing function.
1789 CodeGenFunction::ConstantEmission
1790 CodeGenFunction::tryEmitAsConstant(const DeclRefExpr *RefExpr) {
1791   const ValueDecl *Value = RefExpr->getDecl();
1792 
1793   // The value needs to be an enum constant or a constant variable.
1794   ConstantEmissionKind CEK;
1795   if (isa<ParmVarDecl>(Value)) {
1796     CEK = CEK_None;
1797   } else if (const auto *var = dyn_cast<VarDecl>(Value)) {
1798     CEK = checkVarTypeForConstantEmission(var->getType());
1799   } else if (isa<EnumConstantDecl>(Value)) {
1800     CEK = CEK_AsValueOnly;
1801   } else {
1802     CEK = CEK_None;
1803   }
1804   if (CEK == CEK_None) return ConstantEmission();
1805 
1806   Expr::EvalResult result;
1807   bool resultIsReference;
1808   QualType resultType;
1809 
1810   // It's best to evaluate all the way as an r-value if that's permitted.
1811   if (CEK != CEK_AsReferenceOnly &&
1812       RefExpr->EvaluateAsRValue(result, getContext())) {
1813     resultIsReference = false;
1814     resultType = RefExpr->getType().getUnqualifiedType();
1815 
1816   // Otherwise, try to evaluate as an l-value.
1817   } else if (CEK != CEK_AsValueOnly &&
1818              RefExpr->EvaluateAsLValue(result, getContext())) {
1819     resultIsReference = true;
1820     resultType = Value->getType();
1821 
1822   // Failure.
1823   } else {
1824     return ConstantEmission();
1825   }
1826 
1827   // In any case, if the initializer has side-effects, abandon ship.
1828   if (result.HasSideEffects)
1829     return ConstantEmission();
1830 
1831   // In CUDA/HIP device compilation, a lambda may capture a reference variable
1832   // referencing a global host variable by copy. In this case the lambda should
1833   // make a copy of the value of the global host variable. The DRE of the
1834   // captured reference variable cannot be emitted as load from the host
1835   // global variable as compile time constant, since the host variable is not
1836   // accessible on device. The DRE of the captured reference variable has to be
1837   // loaded from captures.
1838   if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1839       RefExpr->refersToEnclosingVariableOrCapture()) {
1840     auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1841     if (isLambdaMethod(MD) && MD->getOverloadedOperator() == OO_Call) {
1842       const APValue::LValueBase &base = result.Val.getLValueBase();
1843       if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1844         if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1845           if (!VD->hasAttr<CUDADeviceAttr>()) {
1846             return ConstantEmission();
1847           }
1848         }
1849       }
1850     }
1851   }
1852 
1853   // Emit as a constant.
1854   llvm::Constant *C = ConstantEmitter(*this).emitAbstract(
1855       RefExpr->getLocation(), result.Val, resultType);
1856 
1857   // Make sure we emit a debug reference to the global variable.
1858   // This should probably fire even for
1859   if (isa<VarDecl>(Value)) {
1860     if (!getContext().DeclMustBeEmitted(cast<VarDecl>(Value)))
1861       EmitDeclRefExprDbgValue(RefExpr, result.Val);
1862   } else {
1863     assert(isa<EnumConstantDecl>(Value));
1864     EmitDeclRefExprDbgValue(RefExpr, result.Val);
1865   }
1866 
1867   // If we emitted a reference constant, we need to dereference that.
1868   if (resultIsReference)
1869     return ConstantEmission::forReference(C);
1870 
1871   return ConstantEmission::forValue(C);
1872 }
1873 
1874 static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF,
1875                                                         const MemberExpr *ME) {
1876   if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1877     // Try to emit static variable member expressions as DREs.
1878     return DeclRefExpr::Create(
1879         CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD,
1880         /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1881         ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1882   }
1883   return nullptr;
1884 }
1885 
1886 CodeGenFunction::ConstantEmission
1887 CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) {
1888   if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME))
1889     return tryEmitAsConstant(DRE);
1890   return ConstantEmission();
1891 }
1892 
1893 llvm::Value *CodeGenFunction::emitScalarConstant(
1894     const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1895   assert(Constant && "not a constant");
1896   if (Constant.isReference())
1897     return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1898                             E->getExprLoc())
1899         .getScalarVal();
1900   return Constant.getValue();
1901 }
1902 
1903 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1904                                                SourceLocation Loc) {
1905   return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
1906                           lvalue.getType(), Loc, lvalue.getBaseInfo(),
1907                           lvalue.getTBAAInfo(), lvalue.isNontemporal());
1908 }
1909 
1910 static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
1911                             llvm::APInt &Min, llvm::APInt &End,
1912                             bool StrictEnums, bool IsBool) {
1913   const EnumType *ET = Ty->getAs<EnumType>();
1914   bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1915                                 ET && !ET->getDecl()->isFixed();
1916   if (!IsBool && !IsRegularCPlusPlusEnum)
1917     return false;
1918 
1919   if (IsBool) {
1920     Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1921     End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1922   } else {
1923     const EnumDecl *ED = ET->getDecl();
1924     ED->getValueRange(End, Min);
1925   }
1926   return true;
1927 }
1928 
1929 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1930   llvm::APInt Min, End;
1931   if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1932                        Ty->hasBooleanRepresentation() && !Ty->isVectorType()))
1933     return nullptr;
1934 
1935   llvm::MDBuilder MDHelper(getLLVMContext());
1936   return MDHelper.createRange(Min, End);
1937 }
1938 
1939 void CodeGenFunction::maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty,
1940                                               SourceLocation Loc) {
1941   if (EmitScalarRangeCheck(Load, Ty, Loc)) {
1942     // In order to prevent the optimizer from throwing away the check, don't
1943     // attach range metadata to the load.
1944   } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
1945     if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
1946       Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
1947       Load->setMetadata(llvm::LLVMContext::MD_noundef,
1948                         llvm::MDNode::get(CGM.getLLVMContext(), {}));
1949     }
1950   }
1951 }
1952 
1953 bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
1954                                            SourceLocation Loc) {
1955   bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1956   bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1957   if (!HasBoolCheck && !HasEnumCheck)
1958     return false;
1959 
1960   bool IsBool = (Ty->hasBooleanRepresentation() && !Ty->isVectorType()) ||
1961                 NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
1962   bool NeedsBoolCheck = HasBoolCheck && IsBool;
1963   bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1964   if (!NeedsBoolCheck && !NeedsEnumCheck)
1965     return false;
1966 
1967   // Single-bit booleans don't need to be checked. Special-case this to avoid
1968   // a bit width mismatch when handling bitfield values. This is handled by
1969   // EmitFromMemory for the non-bitfield case.
1970   if (IsBool &&
1971       cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1972     return false;
1973 
1974   if (NeedsEnumCheck &&
1975       getContext().isTypeIgnoredBySanitizer(SanitizerKind::Enum, Ty))
1976     return false;
1977 
1978   llvm::APInt Min, End;
1979   if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1980     return true;
1981 
1982   SanitizerKind::SanitizerOrdinal Kind =
1983       NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool;
1984 
1985   auto &Ctx = getLLVMContext();
1986   auto CheckHandler = SanitizerHandler::LoadInvalidValue;
1987   SanitizerDebugLocation SanScope(this, {Kind}, CheckHandler);
1988   llvm::Value *Check;
1989   --End;
1990   if (!Min) {
1991     Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1992   } else {
1993     llvm::Value *Upper =
1994         Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1995     llvm::Value *Lower =
1996         Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1997     Check = Builder.CreateAnd(Upper, Lower);
1998   }
1999   llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
2000                                   EmitCheckTypeDescriptor(Ty)};
2001   EmitCheck(std::make_pair(Check, Kind), CheckHandler, StaticArgs, Value);
2002   return true;
2003 }
2004 
2005 llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
2006                                                QualType Ty,
2007                                                SourceLocation Loc,
2008                                                LValueBaseInfo BaseInfo,
2009                                                TBAAAccessInfo TBAAInfo,
2010                                                bool isNontemporal) {
2011   if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2012     if (GV->isThreadLocal())
2013       Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2014                               NotKnownNonNull);
2015 
2016   if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2017     // Boolean vectors use `iN` as storage type.
2018     if (ClangVecTy->isPackedVectorBoolType(getContext())) {
2019       llvm::Type *ValTy = ConvertType(Ty);
2020       unsigned ValNumElems =
2021           cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2022       // Load the `iP` storage object (P is the padded vector size).
2023       auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
2024       const auto *RawIntTy = RawIntV->getType();
2025       assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
2026       // Bitcast iP --> <P x i1>.
2027       auto *PaddedVecTy = llvm::FixedVectorType::get(
2028           Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2029       llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
2030       // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2031       V = emitBoolVecConversion(V, ValNumElems, "extractvec");
2032 
2033       return EmitFromMemory(V, Ty);
2034     }
2035 
2036     // Handles vectors of sizes that are likely to be expanded to a larger size
2037     // to optimize performance.
2038     auto *VTy = cast<llvm::FixedVectorType>(Addr.getElementType());
2039     auto *NewVecTy =
2040         CGM.getABIInfo().getOptimalVectorMemoryType(VTy, getLangOpts());
2041 
2042     if (VTy != NewVecTy) {
2043       Address Cast = Addr.withElementType(NewVecTy);
2044       llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVecN");
2045       unsigned OldNumElements = VTy->getNumElements();
2046       SmallVector<int, 16> Mask(OldNumElements);
2047       std::iota(Mask.begin(), Mask.end(), 0);
2048       V = Builder.CreateShuffleVector(V, Mask, "extractVec");
2049       return EmitFromMemory(V, Ty);
2050     }
2051   }
2052 
2053   // Atomic operations have to be done on integral types.
2054   LValue AtomicLValue =
2055       LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2056   if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
2057     return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
2058   }
2059 
2060   Addr =
2061       Addr.withElementType(convertTypeForLoadStore(Ty, Addr.getElementType()));
2062 
2063   llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
2064   if (isNontemporal) {
2065     llvm::MDNode *Node = llvm::MDNode::get(
2066         Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2067     Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2068   }
2069 
2070   CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2071 
2072   maybeAttachRangeForLoad(Load, Ty, Loc);
2073 
2074   return EmitFromMemory(Load, Ty);
2075 }
2076 
2077 /// Converts a scalar value from its primary IR type (as returned
2078 /// by ConvertType) to its load/store type (as returned by
2079 /// convertTypeForLoadStore).
2080 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2081   if (auto *AtomicTy = Ty->getAs<AtomicType>())
2082     Ty = AtomicTy->getValueType();
2083 
2084   if (Ty->isExtVectorBoolType()) {
2085     llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2086     if (StoreTy->isVectorTy() && StoreTy->getScalarSizeInBits() >
2087                                      Value->getType()->getScalarSizeInBits())
2088       return Builder.CreateZExt(Value, StoreTy);
2089 
2090     // Expand to the memory bit width.
2091     unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2092     // <N x i1> --> <P x i1>.
2093     Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2094     // <P x i1> --> iP.
2095     Value = Builder.CreateBitCast(Value, StoreTy);
2096   }
2097 
2098   if (Ty->hasBooleanRepresentation() || Ty->isBitIntType()) {
2099     llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2100     bool Signed = Ty->isSignedIntegerOrEnumerationType();
2101     return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2102   }
2103 
2104   return Value;
2105 }
2106 
2107 /// Converts a scalar value from its load/store type (as returned
2108 /// by convertTypeForLoadStore) to its primary IR type (as returned
2109 /// by ConvertType).
2110 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2111   if (auto *AtomicTy = Ty->getAs<AtomicType>())
2112     Ty = AtomicTy->getValueType();
2113 
2114   if (Ty->isPackedVectorBoolType(getContext())) {
2115     const auto *RawIntTy = Value->getType();
2116 
2117     // Bitcast iP --> <P x i1>.
2118     auto *PaddedVecTy = llvm::FixedVectorType::get(
2119         Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2120     auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2121     // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2122     llvm::Type *ValTy = ConvertType(Ty);
2123     unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2124     return emitBoolVecConversion(V, ValNumElems, "extractvec");
2125   }
2126 
2127   llvm::Type *ResTy = ConvertType(Ty);
2128   if (Ty->hasBooleanRepresentation() || Ty->isBitIntType() ||
2129       Ty->isExtVectorBoolType())
2130     return Builder.CreateTrunc(Value, ResTy, "loadedv");
2131 
2132   return Value;
2133 }
2134 
2135 // Convert the pointer of \p Addr to a pointer to a vector (the value type of
2136 // MatrixType), if it points to a array (the memory type of MatrixType).
2137 static RawAddress MaybeConvertMatrixAddress(RawAddress Addr,
2138                                             CodeGenFunction &CGF,
2139                                             bool IsVector = true) {
2140   auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2141   if (ArrayTy && IsVector) {
2142     auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2143                                                 ArrayTy->getNumElements());
2144 
2145     return Addr.withElementType(VectorTy);
2146   }
2147   auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2148   if (VectorTy && !IsVector) {
2149     auto *ArrayTy = llvm::ArrayType::get(
2150         VectorTy->getElementType(),
2151         cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2152 
2153     return Addr.withElementType(ArrayTy);
2154   }
2155 
2156   return Addr;
2157 }
2158 
2159 // Emit a store of a matrix LValue. This may require casting the original
2160 // pointer to memory address (ArrayType) to a pointer to the value type
2161 // (VectorType).
2162 static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2163                                     bool isInit, CodeGenFunction &CGF) {
2164   Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2165                                            value->getType()->isVectorTy());
2166   CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2167                         lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2168                         lvalue.isNontemporal());
2169 }
2170 
2171 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
2172                                         bool Volatile, QualType Ty,
2173                                         LValueBaseInfo BaseInfo,
2174                                         TBAAAccessInfo TBAAInfo,
2175                                         bool isInit, bool isNontemporal) {
2176   if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2177     if (GV->isThreadLocal())
2178       Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2179                               NotKnownNonNull);
2180 
2181   // Handles vectors of sizes that are likely to be expanded to a larger size
2182   // to optimize performance.
2183   llvm::Type *SrcTy = Value->getType();
2184   if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2185     if (auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2186       auto *NewVecTy =
2187           CGM.getABIInfo().getOptimalVectorMemoryType(VecTy, getLangOpts());
2188       if (!ClangVecTy->isPackedVectorBoolType(getContext()) &&
2189           VecTy != NewVecTy) {
2190         SmallVector<int, 16> Mask(NewVecTy->getNumElements(), -1);
2191         std::iota(Mask.begin(), Mask.begin() + VecTy->getNumElements(), 0);
2192         Value = Builder.CreateShuffleVector(Value, Mask, "extractVec");
2193         SrcTy = NewVecTy;
2194       }
2195       if (Addr.getElementType() != SrcTy)
2196         Addr = Addr.withElementType(SrcTy);
2197     }
2198   }
2199 
2200   Value = EmitToMemory(Value, Ty);
2201 
2202   LValue AtomicLValue =
2203       LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2204   if (Ty->isAtomicType() ||
2205       (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2206     EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2207     return;
2208   }
2209 
2210   llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2211   addInstToCurrentSourceAtom(Store, Value);
2212 
2213   if (isNontemporal) {
2214     llvm::MDNode *Node =
2215         llvm::MDNode::get(Store->getContext(),
2216                           llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2217     Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2218   }
2219 
2220   CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2221 }
2222 
2223 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2224                                         bool isInit) {
2225   if (lvalue.getType()->isConstantMatrixType()) {
2226     EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2227     return;
2228   }
2229 
2230   EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2231                     lvalue.getType(), lvalue.getBaseInfo(),
2232                     lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2233 }
2234 
2235 // Emit a load of a LValue of matrix type. This may require casting the pointer
2236 // to memory address (ArrayType) to a pointer to the value type (VectorType).
2237 static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
2238                                      CodeGenFunction &CGF) {
2239   assert(LV.getType()->isConstantMatrixType());
2240   Address Addr = MaybeConvertMatrixAddress(LV.getAddress(), CGF);
2241   LV.setAddress(Addr);
2242   return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2243 }
2244 
2245 RValue CodeGenFunction::EmitLoadOfAnyValue(LValue LV, AggValueSlot Slot,
2246                                            SourceLocation Loc) {
2247   QualType Ty = LV.getType();
2248   switch (getEvaluationKind(Ty)) {
2249   case TEK_Scalar:
2250     return EmitLoadOfLValue(LV, Loc);
2251   case TEK_Complex:
2252     return RValue::getComplex(EmitLoadOfComplex(LV, Loc));
2253   case TEK_Aggregate:
2254     EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2255     return Slot.asRValue();
2256   }
2257   llvm_unreachable("bad evaluation kind");
2258 }
2259 
2260 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2261 /// method emits the address of the lvalue, then loads the result as an rvalue,
2262 /// returning the rvalue.
2263 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
2264   // Load from __ptrauth.
2265   if (PointerAuthQualifier PtrAuth = LV.getQuals().getPointerAuth()) {
2266     LV.getQuals().removePointerAuth();
2267     llvm::Value *Value = EmitLoadOfLValue(LV, Loc).getScalarVal();
2268     return RValue::get(EmitPointerAuthUnqualify(PtrAuth, Value, LV.getType(),
2269                                                 LV.getAddress(),
2270                                                 /*known nonnull*/ false));
2271   }
2272 
2273   if (LV.isObjCWeak()) {
2274     // load of a __weak object.
2275     Address AddrWeakObj = LV.getAddress();
2276     return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
2277                                                              AddrWeakObj));
2278   }
2279   if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
2280     // In MRC mode, we do a load+autorelease.
2281     if (!getLangOpts().ObjCAutoRefCount) {
2282       return RValue::get(EmitARCLoadWeak(LV.getAddress()));
2283     }
2284 
2285     // In ARC mode, we load retained and then consume the value.
2286     llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2287     Object = EmitObjCConsumeObject(LV.getType(), Object);
2288     return RValue::get(Object);
2289   }
2290 
2291   if (LV.isSimple()) {
2292     assert(!LV.getType()->isFunctionType());
2293 
2294     if (LV.getType()->isConstantMatrixType())
2295       return EmitLoadOfMatrixLValue(LV, Loc, *this);
2296 
2297     // Everything needs a load.
2298     return RValue::get(EmitLoadOfScalar(LV, Loc));
2299   }
2300 
2301   if (LV.isVectorElt()) {
2302     llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2303                                               LV.isVolatileQualified());
2304     return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2305                                                     "vecext"));
2306   }
2307 
2308   // If this is a reference to a subset of the elements of a vector, either
2309   // shuffle the input or extract/insert them as appropriate.
2310   if (LV.isExtVectorElt()) {
2311     return EmitLoadOfExtVectorElementLValue(LV);
2312   }
2313 
2314   // Global Register variables always invoke intrinsics
2315   if (LV.isGlobalReg())
2316     return EmitLoadOfGlobalRegLValue(LV);
2317 
2318   if (LV.isMatrixElt()) {
2319     llvm::Value *Idx = LV.getMatrixIdx();
2320     if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2321       const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2322       llvm::MatrixBuilder MB(Builder);
2323       MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2324     }
2325     llvm::LoadInst *Load =
2326         Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
2327     return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2328   }
2329 
2330   assert(LV.isBitField() && "Unknown LValue type!");
2331   return EmitLoadOfBitfieldLValue(LV, Loc);
2332 }
2333 
2334 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
2335                                                  SourceLocation Loc) {
2336   const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2337 
2338   // Get the output type.
2339   llvm::Type *ResLTy = ConvertType(LV.getType());
2340 
2341   Address Ptr = LV.getBitFieldAddress();
2342   llvm::Value *Val =
2343       Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2344 
2345   bool UseVolatile = LV.isVolatileQualified() &&
2346                      Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2347   const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2348   const unsigned StorageSize =
2349       UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2350   if (Info.IsSigned) {
2351     assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2352     unsigned HighBits = StorageSize - Offset - Info.Size;
2353     if (HighBits)
2354       Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2355     if (Offset + HighBits)
2356       Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2357   } else {
2358     if (Offset)
2359       Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2360     if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2361       Val = Builder.CreateAnd(
2362           Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2363   }
2364   Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2365   EmitScalarRangeCheck(Val, LV.getType(), Loc);
2366   return RValue::get(Val);
2367 }
2368 
2369 // If this is a reference to a subset of the elements of a vector, create an
2370 // appropriate shufflevector.
2371 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
2372   llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2373                                         LV.isVolatileQualified());
2374 
2375   // HLSL allows treating scalars as one-element vectors. Converting the scalar
2376   // IR value to a vector here allows the rest of codegen to behave as normal.
2377   if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2378     llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2379     llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2380     Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2381   }
2382 
2383   const llvm::Constant *Elts = LV.getExtVectorElts();
2384 
2385   // If the result of the expression is a non-vector type, we must be extracting
2386   // a single element.  Just codegen as an extractelement.
2387   const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2388   if (!ExprVT) {
2389     unsigned InIdx = getAccessedFieldNo(0, Elts);
2390     llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2391 
2392     llvm::Value *Element = Builder.CreateExtractElement(Vec, Elt);
2393 
2394     llvm::Type *LVTy = ConvertType(LV.getType());
2395     if (Element->getType()->getPrimitiveSizeInBits() >
2396         LVTy->getPrimitiveSizeInBits())
2397       Element = Builder.CreateTrunc(Element, LVTy);
2398 
2399     return RValue::get(Element);
2400   }
2401 
2402   // Always use shuffle vector to try to retain the original program structure
2403   unsigned NumResultElts = ExprVT->getNumElements();
2404 
2405   SmallVector<int, 4> Mask;
2406   for (unsigned i = 0; i != NumResultElts; ++i)
2407     Mask.push_back(getAccessedFieldNo(i, Elts));
2408 
2409   Vec = Builder.CreateShuffleVector(Vec, Mask);
2410 
2411   if (LV.getType()->isExtVectorBoolType())
2412     Vec = Builder.CreateTrunc(Vec, ConvertType(LV.getType()), "truncv");
2413 
2414   return RValue::get(Vec);
2415 }
2416 
2417 /// Generates lvalue for partial ext_vector access.
2418 Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
2419   Address VectorAddress = LV.getExtVectorAddress();
2420   QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2421   llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2422 
2423   Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2424 
2425   const llvm::Constant *Elts = LV.getExtVectorElts();
2426   unsigned ix = getAccessedFieldNo(0, Elts);
2427 
2428   Address VectorBasePtrPlusIx =
2429     Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2430                                    "vector.elt");
2431 
2432   return VectorBasePtrPlusIx;
2433 }
2434 
2435 /// Load of global named registers are always calls to intrinsics.
2436 RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
2437   assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2438          "Bad type for register variable");
2439   llvm::MDNode *RegName = cast<llvm::MDNode>(
2440       cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2441 
2442   // We accept integer and pointer types only
2443   llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2444   llvm::Type *Ty = OrigTy;
2445   if (OrigTy->isPointerTy())
2446     Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2447   llvm::Type *Types[] = { Ty };
2448 
2449   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2450   llvm::Value *Call = Builder.CreateCall(
2451       F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2452   if (OrigTy->isPointerTy())
2453     Call = Builder.CreateIntToPtr(Call, OrigTy);
2454   return RValue::get(Call);
2455 }
2456 
2457 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2458 /// lvalue, where both are guaranteed to the have the same type, and that type
2459 /// is 'Ty'.
2460 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
2461                                              bool isInit) {
2462   if (!Dst.isSimple()) {
2463     if (Dst.isVectorElt()) {
2464       // Read/modify/write the vector, inserting the new element.
2465       llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2466                                             Dst.isVolatileQualified());
2467       llvm::Type *VecTy = Vec->getType();
2468       llvm::Value *SrcVal = Src.getScalarVal();
2469 
2470       if (SrcVal->getType()->getPrimitiveSizeInBits() <
2471           VecTy->getScalarSizeInBits())
2472         SrcVal = Builder.CreateZExt(SrcVal, VecTy->getScalarType());
2473 
2474       auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2475       if (IRStoreTy) {
2476         auto *IRVecTy = llvm::FixedVectorType::get(
2477             Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2478         Vec = Builder.CreateBitCast(Vec, IRVecTy);
2479         // iN --> <N x i1>.
2480       }
2481 
2482       // Allow inserting `<1 x T>` into an `<N x T>`. It can happen with scalar
2483       // types which are mapped to vector LLVM IR types (e.g. for implementing
2484       // an ABI).
2485       if (auto *EltTy = dyn_cast<llvm::FixedVectorType>(SrcVal->getType());
2486           EltTy && EltTy->getNumElements() == 1)
2487         SrcVal = Builder.CreateBitCast(SrcVal, EltTy->getElementType());
2488 
2489       Vec = Builder.CreateInsertElement(Vec, SrcVal, Dst.getVectorIdx(),
2490                                         "vecins");
2491       if (IRStoreTy) {
2492         // <N x i1> --> <iN>.
2493         Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2494       }
2495 
2496       auto *I = Builder.CreateStore(Vec, Dst.getVectorAddress(),
2497                                     Dst.isVolatileQualified());
2498       addInstToCurrentSourceAtom(I, Vec);
2499       return;
2500     }
2501 
2502     // If this is an update of extended vector elements, insert them as
2503     // appropriate.
2504     if (Dst.isExtVectorElt())
2505       return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
2506 
2507     if (Dst.isGlobalReg())
2508       return EmitStoreThroughGlobalRegLValue(Src, Dst);
2509 
2510     if (Dst.isMatrixElt()) {
2511       llvm::Value *Idx = Dst.getMatrixIdx();
2512       if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2513         const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2514         llvm::MatrixBuilder MB(Builder);
2515         MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2516       }
2517       llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2518       llvm::Value *Vec =
2519           Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2520       auto *I = Builder.CreateStore(Vec, Dst.getMatrixAddress(),
2521                                     Dst.isVolatileQualified());
2522       addInstToCurrentSourceAtom(I, Vec);
2523       return;
2524     }
2525 
2526     assert(Dst.isBitField() && "Unknown LValue type");
2527     return EmitStoreThroughBitfieldLValue(Src, Dst);
2528   }
2529 
2530   // Handle __ptrauth qualification by re-signing the value.
2531   if (PointerAuthQualifier PointerAuth = Dst.getQuals().getPointerAuth()) {
2532     Src = RValue::get(EmitPointerAuthQualify(PointerAuth, Src.getScalarVal(),
2533                                              Dst.getType(), Dst.getAddress(),
2534                                              /*known nonnull*/ false));
2535   }
2536 
2537   // There's special magic for assigning into an ARC-qualified l-value.
2538   if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2539     switch (Lifetime) {
2540     case Qualifiers::OCL_None:
2541       llvm_unreachable("present but none");
2542 
2543     case Qualifiers::OCL_ExplicitNone:
2544       // nothing special
2545       break;
2546 
2547     case Qualifiers::OCL_Strong:
2548       if (isInit) {
2549         Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2550         break;
2551       }
2552       EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2553       return;
2554 
2555     case Qualifiers::OCL_Weak:
2556       if (isInit)
2557         // Initialize and then skip the primitive store.
2558         EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal());
2559       else
2560         EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(),
2561                          /*ignore*/ true);
2562       return;
2563 
2564     case Qualifiers::OCL_Autoreleasing:
2565       Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
2566                                                      Src.getScalarVal()));
2567       // fall into the normal path
2568       break;
2569     }
2570   }
2571 
2572   if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2573     // load of a __weak object.
2574     Address LvalueDst = Dst.getAddress();
2575     llvm::Value *src = Src.getScalarVal();
2576      CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2577     return;
2578   }
2579 
2580   if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2581     // load of a __strong object.
2582     Address LvalueDst = Dst.getAddress();
2583     llvm::Value *src = Src.getScalarVal();
2584     if (Dst.isObjCIvar()) {
2585       assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2586       llvm::Type *ResultType = IntPtrTy;
2587       Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp());
2588       llvm::Value *RHS = dst.emitRawPointer(*this);
2589       RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2590       llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2591                                                 ResultType, "sub.ptr.lhs.cast");
2592       llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2593       CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2594     } else if (Dst.isGlobalObjCRef()) {
2595       CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2596                                                 Dst.isThreadLocalRef());
2597     }
2598     else
2599       CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2600     return;
2601   }
2602 
2603   assert(Src.isScalar() && "Can't emit an agg store with this method");
2604   EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2605 }
2606 
2607 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2608                                                      llvm::Value **Result) {
2609   const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2610   llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
2611   Address Ptr = Dst.getBitFieldAddress();
2612 
2613   // Get the source value, truncated to the width of the bit-field.
2614   llvm::Value *SrcVal = Src.getScalarVal();
2615 
2616   // Cast the source to the storage type and shift it into place.
2617   SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2618                                  /*isSigned=*/false);
2619   llvm::Value *MaskedVal = SrcVal;
2620 
2621   const bool UseVolatile =
2622       CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2623       Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2624   const unsigned StorageSize =
2625       UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2626   const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2627   // See if there are other bits in the bitfield's storage we'll need to load
2628   // and mask together with source before storing.
2629   if (StorageSize != Info.Size) {
2630     assert(StorageSize > Info.Size && "Invalid bitfield size.");
2631     llvm::Value *Val =
2632         Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2633 
2634     // Mask the source value as needed.
2635     if (!Dst.getType()->hasBooleanRepresentation())
2636       SrcVal = Builder.CreateAnd(
2637           SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2638           "bf.value");
2639     MaskedVal = SrcVal;
2640     if (Offset)
2641       SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2642 
2643     // Mask out the original value.
2644     Val = Builder.CreateAnd(
2645         Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2646         "bf.clear");
2647 
2648     // Or together the unchanged values and the source value.
2649     SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2650   } else {
2651     assert(Offset == 0);
2652     // According to the AACPS:
2653     // When a volatile bit-field is written, and its container does not overlap
2654     // with any non-bit-field member, its container must be read exactly once
2655     // and written exactly once using the access width appropriate to the type
2656     // of the container. The two accesses are not atomic.
2657     if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2658         CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2659       Builder.CreateLoad(Ptr, true, "bf.load");
2660   }
2661 
2662   // Write the new value back out.
2663   auto *I = Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2664   addInstToCurrentSourceAtom(I, SrcVal);
2665 
2666   // Return the new value of the bit-field, if requested.
2667   if (Result) {
2668     llvm::Value *ResultVal = MaskedVal;
2669 
2670     // Sign extend the value if needed.
2671     if (Info.IsSigned) {
2672       assert(Info.Size <= StorageSize);
2673       unsigned HighBits = StorageSize - Info.Size;
2674       if (HighBits) {
2675         ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2676         ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2677       }
2678     }
2679 
2680     ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2681                                       "bf.result.cast");
2682     *Result = EmitFromMemory(ResultVal, Dst.getType());
2683   }
2684 }
2685 
2686 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
2687                                                                LValue Dst) {
2688   llvm::Value *SrcVal = Src.getScalarVal();
2689   Address DstAddr = Dst.getExtVectorAddress();
2690   if (DstAddr.getElementType()->getScalarSizeInBits() >
2691       SrcVal->getType()->getScalarSizeInBits())
2692     SrcVal = Builder.CreateZExt(
2693         SrcVal, convertTypeForLoadStore(Dst.getType(), SrcVal->getType()));
2694 
2695   // HLSL allows storing to scalar values through ExtVector component LValues.
2696   // To support this we need to handle the case where the destination address is
2697   // a scalar.
2698   if (!DstAddr.getElementType()->isVectorTy()) {
2699     assert(!Dst.getType()->isVectorType() &&
2700            "this should only occur for non-vector l-values");
2701     Builder.CreateStore(SrcVal, DstAddr, Dst.isVolatileQualified());
2702     return;
2703   }
2704 
2705   // This access turns into a read/modify/write of the vector.  Load the input
2706   // value now.
2707   llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2708   llvm::Type *VecTy = Vec->getType();
2709   const llvm::Constant *Elts = Dst.getExtVectorElts();
2710 
2711   if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2712     unsigned NumSrcElts = VTy->getNumElements();
2713     unsigned NumDstElts = cast<llvm::FixedVectorType>(VecTy)->getNumElements();
2714     if (NumDstElts == NumSrcElts) {
2715       // Use shuffle vector is the src and destination are the same number of
2716       // elements and restore the vector mask since it is on the side it will be
2717       // stored.
2718       SmallVector<int, 4> Mask(NumDstElts);
2719       for (unsigned i = 0; i != NumSrcElts; ++i)
2720         Mask[getAccessedFieldNo(i, Elts)] = i;
2721 
2722       Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2723     } else if (NumDstElts > NumSrcElts) {
2724       // Extended the source vector to the same length and then shuffle it
2725       // into the destination.
2726       // FIXME: since we're shuffling with undef, can we just use the indices
2727       //        into that?  This could be simpler.
2728       SmallVector<int, 4> ExtMask;
2729       for (unsigned i = 0; i != NumSrcElts; ++i)
2730         ExtMask.push_back(i);
2731       ExtMask.resize(NumDstElts, -1);
2732       llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2733       // build identity
2734       SmallVector<int, 4> Mask;
2735       for (unsigned i = 0; i != NumDstElts; ++i)
2736         Mask.push_back(i);
2737 
2738       // When the vector size is odd and .odd or .hi is used, the last element
2739       // of the Elts constant array will be one past the size of the vector.
2740       // Ignore the last element here, if it is greater than the mask size.
2741       if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2742         NumSrcElts--;
2743 
2744       // modify when what gets shuffled in
2745       for (unsigned i = 0; i != NumSrcElts; ++i)
2746         Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2747       Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2748     } else {
2749       // We should never shorten the vector
2750       llvm_unreachable("unexpected shorten vector length");
2751     }
2752   } else {
2753     // If the Src is a scalar (not a vector), and the target is a vector it must
2754     // be updating one element.
2755     unsigned InIdx = getAccessedFieldNo(0, Elts);
2756     llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2757 
2758     Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2759   }
2760 
2761   Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
2762                       Dst.isVolatileQualified());
2763 }
2764 
2765 /// Store of global named registers are always calls to intrinsics.
2766 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
2767   assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2768          "Bad type for register variable");
2769   llvm::MDNode *RegName = cast<llvm::MDNode>(
2770       cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2771   assert(RegName && "Register LValue is not metadata");
2772 
2773   // We accept integer and pointer types only
2774   llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2775   llvm::Type *Ty = OrigTy;
2776   if (OrigTy->isPointerTy())
2777     Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2778   llvm::Type *Types[] = { Ty };
2779 
2780   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2781   llvm::Value *Value = Src.getScalarVal();
2782   if (OrigTy->isPointerTy())
2783     Value = Builder.CreatePtrToInt(Value, Ty);
2784   Builder.CreateCall(
2785       F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2786 }
2787 
2788 // setObjCGCLValueClass - sets class of the lvalue for the purpose of
2789 // generating write-barries API. It is currently a global, ivar,
2790 // or neither.
2791 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2792                                  LValue &LV,
2793                                  bool IsMemberAccess=false) {
2794   if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2795     return;
2796 
2797   if (isa<ObjCIvarRefExpr>(E)) {
2798     QualType ExpTy = E->getType();
2799     if (IsMemberAccess && ExpTy->isPointerType()) {
2800       // If ivar is a structure pointer, assigning to field of
2801       // this struct follows gcc's behavior and makes it a non-ivar
2802       // writer-barrier conservatively.
2803       ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2804       if (ExpTy->isRecordType()) {
2805         LV.setObjCIvar(false);
2806         return;
2807       }
2808     }
2809     LV.setObjCIvar(true);
2810     auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2811     LV.setBaseIvarExp(Exp->getBase());
2812     LV.setObjCArray(E->getType()->isArrayType());
2813     return;
2814   }
2815 
2816   if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2817     if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2818       if (VD->hasGlobalStorage()) {
2819         LV.setGlobalObjCRef(true);
2820         LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2821       }
2822     }
2823     LV.setObjCArray(E->getType()->isArrayType());
2824     return;
2825   }
2826 
2827   if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2828     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2829     return;
2830   }
2831 
2832   if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2833     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2834     if (LV.isObjCIvar()) {
2835       // If cast is to a structure pointer, follow gcc's behavior and make it
2836       // a non-ivar write-barrier.
2837       QualType ExpTy = E->getType();
2838       if (ExpTy->isPointerType())
2839         ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2840       if (ExpTy->isRecordType())
2841         LV.setObjCIvar(false);
2842     }
2843     return;
2844   }
2845 
2846   if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2847     setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2848     return;
2849   }
2850 
2851   if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2852     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2853     return;
2854   }
2855 
2856   if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2857     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2858     return;
2859   }
2860 
2861   if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2862     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2863     return;
2864   }
2865 
2866   if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2867     setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2868     if (LV.isObjCIvar() && !LV.isObjCArray())
2869       // Using array syntax to assigning to what an ivar points to is not
2870       // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2871       LV.setObjCIvar(false);
2872     else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2873       // Using array syntax to assigning to what global points to is not
2874       // same as assigning to the global itself. {id *G;} G[i] = 0;
2875       LV.setGlobalObjCRef(false);
2876     return;
2877   }
2878 
2879   if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2880     setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2881     // We don't know if member is an 'ivar', but this flag is looked at
2882     // only in the context of LV.isObjCIvar().
2883     LV.setObjCArray(E->getType()->isArrayType());
2884     return;
2885   }
2886 }
2887 
2888 static LValue EmitThreadPrivateVarDeclLValue(
2889     CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2890     llvm::Type *RealVarTy, SourceLocation Loc) {
2891   if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2892     Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
2893         CGF, VD, Addr, Loc);
2894   else
2895     Addr =
2896         CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2897 
2898   Addr = Addr.withElementType(RealVarTy);
2899   return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2900 }
2901 
2902 static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF,
2903                                            const VarDecl *VD, QualType T) {
2904   std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2905       OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2906   // Return an invalid address if variable is MT_To (or MT_Enter starting with
2907   // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2908   // and MT_To (or MT_Enter) with unified memory, return a valid address.
2909   if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2910                 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2911                !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
2912     return Address::invalid();
2913   assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2914           ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2915             *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2916            CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
2917          "Expected link clause OR to clause with unified memory enabled.");
2918   QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2919   Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
2920   return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2921 }
2922 
2923 Address
2924 CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
2925                                      LValueBaseInfo *PointeeBaseInfo,
2926                                      TBAAAccessInfo *PointeeTBAAInfo) {
2927   llvm::LoadInst *Load =
2928       Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
2929   CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
2930   QualType PTy = RefLVal.getType()->getPointeeType();
2931   CharUnits Align = CGM.getNaturalTypeAlignment(
2932       PTy, PointeeBaseInfo, PointeeTBAAInfo, /*ForPointeeType=*/true);
2933   if (!PTy->isIncompleteType()) {
2934     llvm::LLVMContext &Ctx = getLLVMContext();
2935     llvm::MDBuilder MDB(Ctx);
2936     // Emit !nonnull metadata
2937     if (CGM.getTypes().getTargetAddressSpace(PTy) == 0 &&
2938         !CGM.getCodeGenOpts().NullPointerIsValid)
2939       Load->setMetadata(llvm::LLVMContext::MD_nonnull,
2940                         llvm::MDNode::get(Ctx, {}));
2941     // Emit !align metadata
2942     if (PTy->isObjectType()) {
2943       auto AlignVal = Align.getQuantity();
2944       if (AlignVal > 1) {
2945         Load->setMetadata(
2946             llvm::LLVMContext::MD_align,
2947             llvm::MDNode::get(Ctx, MDB.createConstant(llvm::ConstantInt::get(
2948                                        Builder.getInt64Ty(), AlignVal))));
2949       }
2950     }
2951   }
2952   return makeNaturalAddressForPointer(Load, PTy, Align,
2953                                       /*ForPointeeType=*/true, PointeeBaseInfo,
2954                                       PointeeTBAAInfo);
2955 }
2956 
2957 LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {
2958   LValueBaseInfo PointeeBaseInfo;
2959   TBAAAccessInfo PointeeTBAAInfo;
2960   Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2961                                             &PointeeTBAAInfo);
2962   return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2963                         PointeeBaseInfo, PointeeTBAAInfo);
2964 }
2965 
2966 Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
2967                                            const PointerType *PtrTy,
2968                                            LValueBaseInfo *BaseInfo,
2969                                            TBAAAccessInfo *TBAAInfo) {
2970   llvm::Value *Addr = Builder.CreateLoad(Ptr);
2971   return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
2972                                       CharUnits(), /*ForPointeeType=*/true,
2973                                       BaseInfo, TBAAInfo);
2974 }
2975 
2976 LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
2977                                                 const PointerType *PtrTy) {
2978   LValueBaseInfo BaseInfo;
2979   TBAAAccessInfo TBAAInfo;
2980   Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2981   return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2982 }
2983 
2984 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
2985                                       const Expr *E, const VarDecl *VD) {
2986   QualType T = E->getType();
2987 
2988   // If it's thread_local, emit a call to its wrapper function instead.
2989   if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2990       CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD))
2991     return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2992   // Check if the variable is marked as declare target with link clause in
2993   // device codegen.
2994   if (CGF.getLangOpts().OpenMPIsTargetDevice) {
2995     Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2996     if (Addr.isValid())
2997       return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2998   }
2999 
3000   llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
3001 
3002   if (VD->getTLSKind() != VarDecl::TLS_None)
3003     V = CGF.Builder.CreateThreadLocalAddress(V);
3004 
3005   llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
3006   CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
3007   Address Addr(V, RealVarTy, Alignment);
3008   // Emit reference to the private copy of the variable if it is an OpenMP
3009   // threadprivate variable.
3010   if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
3011       VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3012     return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
3013                                           E->getExprLoc());
3014   }
3015   LValue LV = VD->getType()->isReferenceType() ?
3016       CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
3017                                     AlignmentSource::Decl) :
3018       CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
3019   setObjCGCLValueClass(CGF.getContext(), E, LV);
3020   return LV;
3021 }
3022 
3023 llvm::Constant *CodeGenModule::getRawFunctionPointer(GlobalDecl GD,
3024                                                      llvm::Type *Ty) {
3025   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3026   if (FD->hasAttr<WeakRefAttr>()) {
3027     ConstantAddress aliasee = GetWeakRefReference(FD);
3028     return aliasee.getPointer();
3029   }
3030 
3031   llvm::Constant *V = GetAddrOfFunction(GD, Ty);
3032   return V;
3033 }
3034 
3035 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
3036                                      GlobalDecl GD) {
3037   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3038   llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
3039   QualType ETy = E->getType();
3040   if (ETy->isCFIUncheckedCalleeFunctionType()) {
3041     if (auto *GV = dyn_cast<llvm::GlobalValue>(V))
3042       V = llvm::NoCFIValue::get(GV);
3043   }
3044   CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
3045   return CGF.MakeAddrLValue(V, ETy, Alignment, AlignmentSource::Decl);
3046 }
3047 
3048 static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
3049                                       llvm::Value *ThisValue) {
3050 
3051   return CGF.EmitLValueForLambdaField(FD, ThisValue);
3052 }
3053 
3054 /// Named Registers are named metadata pointing to the register name
3055 /// which will be read from/written to as an argument to the intrinsic
3056 /// @llvm.read/write_register.
3057 /// So far, only the name is being passed down, but other options such as
3058 /// register type, allocation type or even optimization options could be
3059 /// passed down via the metadata node.
3060 static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
3061   SmallString<64> Name("llvm.named.register.");
3062   AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
3063   assert(Asm->getLabel().size() < 64-Name.size() &&
3064       "Register name too big");
3065   Name.append(Asm->getLabel());
3066   llvm::NamedMDNode *M =
3067     CGM.getModule().getOrInsertNamedMetadata(Name);
3068   if (M->getNumOperands() == 0) {
3069     llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
3070                                               Asm->getLabel());
3071     llvm::Metadata *Ops[] = {Str};
3072     M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
3073   }
3074 
3075   CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
3076 
3077   llvm::Value *Ptr =
3078     llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
3079   return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
3080 }
3081 
3082 /// Determine whether we can emit a reference to \p VD from the current
3083 /// context, despite not necessarily having seen an odr-use of the variable in
3084 /// this context.
3085 static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,
3086                                                const DeclRefExpr *E,
3087                                                const VarDecl *VD) {
3088   // For a variable declared in an enclosing scope, do not emit a spurious
3089   // reference even if we have a capture, as that will emit an unwarranted
3090   // reference to our capture state, and will likely generate worse code than
3091   // emitting a local copy.
3092   if (E->refersToEnclosingVariableOrCapture())
3093     return false;
3094 
3095   // For a local declaration declared in this function, we can always reference
3096   // it even if we don't have an odr-use.
3097   if (VD->hasLocalStorage()) {
3098     return VD->getDeclContext() ==
3099            dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
3100   }
3101 
3102   // For a global declaration, we can emit a reference to it if we know
3103   // for sure that we are able to emit a definition of it.
3104   VD = VD->getDefinition(CGF.getContext());
3105   if (!VD)
3106     return false;
3107 
3108   // Don't emit a spurious reference if it might be to a variable that only
3109   // exists on a different device / target.
3110   // FIXME: This is unnecessarily broad. Check whether this would actually be a
3111   // cross-target reference.
3112   if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
3113       CGF.getLangOpts().OpenCL) {
3114     return false;
3115   }
3116 
3117   // We can emit a spurious reference only if the linkage implies that we'll
3118   // be emitting a non-interposable symbol that will be retained until link
3119   // time.
3120   switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
3121   case llvm::GlobalValue::ExternalLinkage:
3122   case llvm::GlobalValue::LinkOnceODRLinkage:
3123   case llvm::GlobalValue::WeakODRLinkage:
3124   case llvm::GlobalValue::InternalLinkage:
3125   case llvm::GlobalValue::PrivateLinkage:
3126     return true;
3127   default:
3128     return false;
3129   }
3130 }
3131 
3132 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
3133   const NamedDecl *ND = E->getDecl();
3134   QualType T = E->getType();
3135 
3136   assert(E->isNonOdrUse() != NOUR_Unevaluated &&
3137          "should not emit an unevaluated operand");
3138 
3139   if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3140     // Global Named registers access via intrinsics only
3141     if (VD->getStorageClass() == SC_Register &&
3142         VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
3143       return EmitGlobalNamedRegister(VD, CGM);
3144 
3145     // If this DeclRefExpr does not constitute an odr-use of the variable,
3146     // we're not permitted to emit a reference to it in general, and it might
3147     // not be captured if capture would be necessary for a use. Emit the
3148     // constant value directly instead.
3149     if (E->isNonOdrUse() == NOUR_Constant &&
3150         (VD->getType()->isReferenceType() ||
3151          !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3152       VD->getAnyInitializer(VD);
3153       llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3154           E->getLocation(), *VD->evaluateValue(), VD->getType());
3155       assert(Val && "failed to emit constant expression");
3156 
3157       Address Addr = Address::invalid();
3158       if (!VD->getType()->isReferenceType()) {
3159         // Spill the constant value to a global.
3160         Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3161                                            getContext().getDeclAlign(VD));
3162         llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3163         auto *PTy = llvm::PointerType::get(
3164             getLLVMContext(), getTypes().getTargetAddressSpace(VD->getType()));
3165         Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3166       } else {
3167         // Should we be using the alignment of the constant pointer we emitted?
3168         CharUnits Alignment =
3169             CGM.getNaturalTypeAlignment(E->getType(),
3170                                         /* BaseInfo= */ nullptr,
3171                                         /* TBAAInfo= */ nullptr,
3172                                         /* forPointeeType= */ true);
3173         Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3174       }
3175       return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
3176     }
3177 
3178     // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3179 
3180     // Check for captured variables.
3181     if (E->refersToEnclosingVariableOrCapture()) {
3182       VD = VD->getCanonicalDecl();
3183       if (auto *FD = LambdaCaptureFields.lookup(VD))
3184         return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3185       if (CapturedStmtInfo) {
3186         auto I = LocalDeclMap.find(VD);
3187         if (I != LocalDeclMap.end()) {
3188           LValue CapLVal;
3189           if (VD->getType()->isReferenceType())
3190             CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3191                                                 AlignmentSource::Decl);
3192           else
3193             CapLVal = MakeAddrLValue(I->second, T);
3194           // Mark lvalue as nontemporal if the variable is marked as nontemporal
3195           // in simd context.
3196           if (getLangOpts().OpenMP &&
3197               CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3198             CapLVal.setNontemporal(/*Value=*/true);
3199           return CapLVal;
3200         }
3201         LValue CapLVal =
3202             EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
3203                                     CapturedStmtInfo->getContextValue());
3204         Address LValueAddress = CapLVal.getAddress();
3205         CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3206                                          LValueAddress.getElementType(),
3207                                          getContext().getDeclAlign(VD)),
3208                                  CapLVal.getType(),
3209                                  LValueBaseInfo(AlignmentSource::Decl),
3210                                  CapLVal.getTBAAInfo());
3211         // Mark lvalue as nontemporal if the variable is marked as nontemporal
3212         // in simd context.
3213         if (getLangOpts().OpenMP &&
3214             CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3215           CapLVal.setNontemporal(/*Value=*/true);
3216         return CapLVal;
3217       }
3218 
3219       assert(isa<BlockDecl>(CurCodeDecl));
3220       Address addr = GetAddrOfBlockDecl(VD);
3221       return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3222     }
3223   }
3224 
3225   // FIXME: We should be able to assert this for FunctionDecls as well!
3226   // FIXME: We should be able to assert this for all DeclRefExprs, not just
3227   // those with a valid source location.
3228   assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3229           !E->getLocation().isValid()) &&
3230          "Should not use decl without marking it used!");
3231 
3232   if (ND->hasAttr<WeakRefAttr>()) {
3233     const auto *VD = cast<ValueDecl>(ND);
3234     ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
3235     return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3236   }
3237 
3238   if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3239     // Check if this is a global variable.
3240     if (VD->hasLinkage() || VD->isStaticDataMember())
3241       return EmitGlobalVarDeclLValue(*this, E, VD);
3242 
3243     Address addr = Address::invalid();
3244 
3245     // The variable should generally be present in the local decl map.
3246     auto iter = LocalDeclMap.find(VD);
3247     if (iter != LocalDeclMap.end()) {
3248       addr = iter->second;
3249 
3250     // Otherwise, it might be static local we haven't emitted yet for
3251     // some reason; most likely, because it's in an outer function.
3252     } else if (VD->isStaticLocal()) {
3253       llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3254           *VD, CGM.getLLVMLinkageVarDefinition(VD));
3255       addr = Address(
3256           var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3257 
3258     // No other cases for now.
3259     } else {
3260       llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3261     }
3262 
3263     // Handle threadlocal function locals.
3264     if (VD->getTLSKind() != VarDecl::TLS_None)
3265       addr = addr.withPointer(
3266           Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3267           NotKnownNonNull);
3268 
3269     // Check for OpenMP threadprivate variables.
3270     if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3271         VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3272       return EmitThreadPrivateVarDeclLValue(
3273           *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3274           E->getExprLoc());
3275     }
3276 
3277     // Drill into block byref variables.
3278     bool isBlockByref = VD->isEscapingByref();
3279     if (isBlockByref) {
3280       addr = emitBlockByrefAddress(addr, VD);
3281     }
3282 
3283     // Drill into reference types.
3284     LValue LV = VD->getType()->isReferenceType() ?
3285         EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
3286         MakeAddrLValue(addr, T, AlignmentSource::Decl);
3287 
3288     bool isLocalStorage = VD->hasLocalStorage();
3289 
3290     bool NonGCable = isLocalStorage &&
3291                      !VD->getType()->isReferenceType() &&
3292                      !isBlockByref;
3293     if (NonGCable) {
3294       LV.getQuals().removeObjCGCAttr();
3295       LV.setNonGC(true);
3296     }
3297 
3298     bool isImpreciseLifetime =
3299       (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3300     if (isImpreciseLifetime)
3301       LV.setARCPreciseLifetime(ARCImpreciseLifetime);
3302     setObjCGCLValueClass(getContext(), E, LV);
3303     return LV;
3304   }
3305 
3306   if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3307     return EmitFunctionDeclLValue(*this, E, FD);
3308 
3309   // FIXME: While we're emitting a binding from an enclosing scope, all other
3310   // DeclRefExprs we see should be implicitly treated as if they also refer to
3311   // an enclosing scope.
3312   if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3313     if (E->refersToEnclosingVariableOrCapture()) {
3314       auto *FD = LambdaCaptureFields.lookup(BD);
3315       return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3316     }
3317     // Suppress debug location updates when visiting the binding, since the
3318     // binding may emit instructions that would otherwise be associated with the
3319     // binding itself, rather than the expression referencing the binding. (this
3320     // leads to jumpy debug stepping behavior where the location/debugger jump
3321     // back to the binding declaration, then back to the expression referencing
3322     // the binding)
3323     DisableDebugLocationUpdates D(*this);
3324     return EmitLValue(BD->getBinding(), NotKnownNonNull);
3325   }
3326 
3327   // We can form DeclRefExprs naming GUID declarations when reconstituting
3328   // non-type template parameters into expressions.
3329   if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3330     return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
3331                           AlignmentSource::Decl);
3332 
3333   if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3334     auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3335     auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3336 
3337     if (AS != T.getAddressSpace()) {
3338       auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3339       auto PtrTy = llvm::PointerType::get(CGM.getLLVMContext(), TargetAS);
3340       auto ASC = getTargetHooks().performAddrSpaceCast(CGM, ATPO.getPointer(),
3341                                                        AS, PtrTy);
3342       ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3343     }
3344 
3345     return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3346   }
3347 
3348   llvm_unreachable("Unhandled DeclRefExpr");
3349 }
3350 
3351 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
3352   // __extension__ doesn't affect lvalue-ness.
3353   if (E->getOpcode() == UO_Extension)
3354     return EmitLValue(E->getSubExpr());
3355 
3356   QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
3357   switch (E->getOpcode()) {
3358   default: llvm_unreachable("Unknown unary operator lvalue!");
3359   case UO_Deref: {
3360     QualType T = E->getSubExpr()->getType()->getPointeeType();
3361     assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3362 
3363     LValueBaseInfo BaseInfo;
3364     TBAAAccessInfo TBAAInfo;
3365     Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
3366                                             &TBAAInfo);
3367     LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3368     LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
3369 
3370     // We should not generate __weak write barrier on indirect reference
3371     // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3372     // But, we continue to generate __strong write barrier on indirect write
3373     // into a pointer to object.
3374     if (getLangOpts().ObjC &&
3375         getLangOpts().getGC() != LangOptions::NonGC &&
3376         LV.isObjCWeak())
3377       LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
3378     return LV;
3379   }
3380   case UO_Real:
3381   case UO_Imag: {
3382     LValue LV = EmitLValue(E->getSubExpr());
3383     assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3384 
3385     // __real is valid on scalars.  This is a faster way of testing that.
3386     // __imag can only produce an rvalue on scalars.
3387     if (E->getOpcode() == UO_Real &&
3388         !LV.getAddress().getElementType()->isStructTy()) {
3389       assert(E->getSubExpr()->getType()->isArithmeticType());
3390       return LV;
3391     }
3392 
3393     QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3394 
3395     Address Component =
3396         (E->getOpcode() == UO_Real
3397              ? emitAddrOfRealComponent(LV.getAddress(), LV.getType())
3398              : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
3399     LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3400                                    CGM.getTBAAInfoForSubobject(LV, T));
3401     ElemLV.getQuals().addQualifiers(LV.getQuals());
3402     return ElemLV;
3403   }
3404   case UO_PreInc:
3405   case UO_PreDec: {
3406     LValue LV = EmitLValue(E->getSubExpr());
3407     bool isInc = E->getOpcode() == UO_PreInc;
3408 
3409     if (E->getType()->isAnyComplexType())
3410       EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3411     else
3412       EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3413     return LV;
3414   }
3415   }
3416 }
3417 
3418 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
3419   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
3420                         E->getType(), AlignmentSource::Decl);
3421 }
3422 
3423 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
3424   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
3425                         E->getType(), AlignmentSource::Decl);
3426 }
3427 
3428 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
3429   auto SL = E->getFunctionName();
3430   assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3431   StringRef FnName = CurFn->getName();
3432   FnName.consume_front("\01");
3433   StringRef NameItems[] = {
3434       PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
3435   std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3436   if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3437     std::string Name = std::string(SL->getString());
3438     if (!Name.empty()) {
3439       unsigned Discriminator =
3440           CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
3441       if (Discriminator)
3442         Name += "_" + Twine(Discriminator + 1).str();
3443       auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3444       return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3445     } else {
3446       auto C =
3447           CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3448       return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3449     }
3450   }
3451   auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3452   return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3453 }
3454 
3455 /// Emit a type description suitable for use by a runtime sanitizer library. The
3456 /// format of a type descriptor is
3457 ///
3458 /// \code
3459 ///   { i16 TypeKind, i16 TypeInfo }
3460 /// \endcode
3461 ///
3462 /// followed by an array of i8 containing the type name with extra information
3463 /// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3464 /// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3465 /// anything else.
3466 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
3467   // Only emit each type's descriptor once.
3468   if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3469     return C;
3470 
3471   uint16_t TypeKind = TK_Unknown;
3472   uint16_t TypeInfo = 0;
3473   bool IsBitInt = false;
3474 
3475   if (T->isIntegerType()) {
3476     TypeKind = TK_Integer;
3477     TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3478                (T->isSignedIntegerType() ? 1 : 0);
3479     // Follow suggestion from discussion of issue 64100.
3480     // So we can write the exact amount of bits in TypeName after '\0'
3481     // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3482     if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3483       // Do a sanity checks as we are using 32-bit type to store bit length.
3484       assert(getContext().getTypeSize(T) > 0 &&
3485              " non positive amount of bits in __BitInt type");
3486       assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3487              " too many bits in __BitInt type");
3488 
3489       // Redefine TypeKind with the actual __BitInt type if we have signed
3490       // BitInt.
3491       TypeKind = TK_BitInt;
3492       IsBitInt = true;
3493     }
3494   } else if (T->isFloatingType()) {
3495     TypeKind = TK_Float;
3496     TypeInfo = getContext().getTypeSize(T);
3497   }
3498 
3499   // Format the type name as if for a diagnostic, including quotes and
3500   // optionally an 'aka'.
3501   SmallString<32> Buffer;
3502   CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
3503                                     (intptr_t)T.getAsOpaquePtr(), StringRef(),
3504                                     StringRef(), {}, Buffer, {});
3505 
3506   if (IsBitInt) {
3507     // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3508     // endianness, zero.
3509     char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3510     const auto *EIT = T->castAs<BitIntType>();
3511     uint32_t Bits = EIT->getNumBits();
3512     llvm::support::endian::write32(S + 1, Bits,
3513                                    getTarget().isBigEndian()
3514                                        ? llvm::endianness::big
3515                                        : llvm::endianness::little);
3516     StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3517     Buffer.append(Str);
3518   }
3519 
3520   llvm::Constant *Components[] = {
3521     Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3522     llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3523   };
3524   llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3525 
3526   auto *GV = new llvm::GlobalVariable(
3527       CGM.getModule(), Descriptor->getType(),
3528       /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3529   GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3530   CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
3531 
3532   // Remember the descriptor for this type.
3533   CGM.setTypeDescriptorInMap(T, GV);
3534 
3535   return GV;
3536 }
3537 
3538 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3539   llvm::Type *TargetTy = IntPtrTy;
3540 
3541   if (V->getType() == TargetTy)
3542     return V;
3543 
3544   // Floating-point types which fit into intptr_t are bitcast to integers
3545   // and then passed directly (after zero-extension, if necessary).
3546   if (V->getType()->isFloatingPointTy()) {
3547     unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3548     if (Bits <= TargetTy->getIntegerBitWidth())
3549       V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3550                                                          Bits));
3551   }
3552 
3553   // Integers which fit in intptr_t are zero-extended and passed directly.
3554   if (V->getType()->isIntegerTy() &&
3555       V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3556     return Builder.CreateZExt(V, TargetTy);
3557 
3558   // Pointers are passed directly, everything else is passed by address.
3559   if (!V->getType()->isPointerTy()) {
3560     RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3561     Builder.CreateStore(V, Ptr);
3562     V = Ptr.getPointer();
3563   }
3564   return Builder.CreatePtrToInt(V, TargetTy);
3565 }
3566 
3567 /// Emit a representation of a SourceLocation for passing to a handler
3568 /// in a sanitizer runtime library. The format for this data is:
3569 /// \code
3570 ///   struct SourceLocation {
3571 ///     const char *Filename;
3572 ///     int32_t Line, Column;
3573 ///   };
3574 /// \endcode
3575 /// For an invalid SourceLocation, the Filename pointer is null.
3576 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
3577   llvm::Constant *Filename;
3578   int Line, Column;
3579 
3580   PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
3581   if (PLoc.isValid()) {
3582     StringRef FilenameString = PLoc.getFilename();
3583 
3584     int PathComponentsToStrip =
3585         CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3586     if (PathComponentsToStrip < 0) {
3587       assert(PathComponentsToStrip != INT_MIN);
3588       int PathComponentsToKeep = -PathComponentsToStrip;
3589       auto I = llvm::sys::path::rbegin(FilenameString);
3590       auto E = llvm::sys::path::rend(FilenameString);
3591       while (I != E && --PathComponentsToKeep)
3592         ++I;
3593 
3594       FilenameString = FilenameString.substr(I - E);
3595     } else if (PathComponentsToStrip > 0) {
3596       auto I = llvm::sys::path::begin(FilenameString);
3597       auto E = llvm::sys::path::end(FilenameString);
3598       while (I != E && PathComponentsToStrip--)
3599         ++I;
3600 
3601       if (I != E)
3602         FilenameString =
3603             FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3604       else
3605         FilenameString = llvm::sys::path::filename(FilenameString);
3606     }
3607 
3608     auto FilenameGV =
3609         CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3610     CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
3611         cast<llvm::GlobalVariable>(
3612             FilenameGV.getPointer()->stripPointerCasts()));
3613     Filename = FilenameGV.getPointer();
3614     Line = PLoc.getLine();
3615     Column = PLoc.getColumn();
3616   } else {
3617     Filename = llvm::Constant::getNullValue(Int8PtrTy);
3618     Line = Column = 0;
3619   }
3620 
3621   llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3622                             Builder.getInt32(Column)};
3623 
3624   return llvm::ConstantStruct::getAnon(Data);
3625 }
3626 
3627 namespace {
3628 /// Specify under what conditions this check can be recovered
3629 enum class CheckRecoverableKind {
3630   /// Always terminate program execution if this check fails.
3631   Unrecoverable,
3632   /// Check supports recovering, runtime has both fatal (noreturn) and
3633   /// non-fatal handlers for this check.
3634   Recoverable,
3635   /// Runtime conditionally aborts, always need to support recovery.
3636   AlwaysRecoverable
3637 };
3638 }
3639 
3640 static CheckRecoverableKind
3641 getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal) {
3642   if (Ordinal == SanitizerKind::SO_Vptr)
3643     return CheckRecoverableKind::AlwaysRecoverable;
3644   else if (Ordinal == SanitizerKind::SO_Return ||
3645            Ordinal == SanitizerKind::SO_Unreachable)
3646     return CheckRecoverableKind::Unrecoverable;
3647   else
3648     return CheckRecoverableKind::Recoverable;
3649 }
3650 
3651 namespace {
3652 struct SanitizerHandlerInfo {
3653   char const *const Name;
3654   unsigned Version;
3655 };
3656 }
3657 
3658 const SanitizerHandlerInfo SanitizerHandlers[] = {
3659 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3660     LIST_SANITIZER_CHECKS
3661 #undef SANITIZER_CHECK
3662 };
3663 
3664 static void emitCheckHandlerCall(CodeGenFunction &CGF,
3665                                  llvm::FunctionType *FnType,
3666                                  ArrayRef<llvm::Value *> FnArgs,
3667                                  SanitizerHandler CheckHandler,
3668                                  CheckRecoverableKind RecoverKind, bool IsFatal,
3669                                  llvm::BasicBlock *ContBB, bool NoMerge) {
3670   assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3671   std::optional<ApplyDebugLocation> DL;
3672   if (!CGF.Builder.getCurrentDebugLocation()) {
3673     // Ensure that the call has at least an artificial debug location.
3674     DL.emplace(CGF, SourceLocation());
3675   }
3676   bool NeedsAbortSuffix =
3677       IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3678   bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3679   const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3680   const StringRef CheckName = CheckInfo.Name;
3681   std::string FnName = "__ubsan_handle_" + CheckName.str();
3682   if (CheckInfo.Version && !MinimalRuntime)
3683     FnName += "_v" + llvm::utostr(CheckInfo.Version);
3684   if (MinimalRuntime)
3685     FnName += "_minimal";
3686   if (NeedsAbortSuffix)
3687     FnName += "_abort";
3688   bool MayReturn =
3689       !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3690 
3691   llvm::AttrBuilder B(CGF.getLLVMContext());
3692   if (!MayReturn) {
3693     B.addAttribute(llvm::Attribute::NoReturn)
3694         .addAttribute(llvm::Attribute::NoUnwind);
3695   }
3696   B.addUWTableAttr(llvm::UWTableKind::Default);
3697 
3698   llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3699       FnType, FnName,
3700       llvm::AttributeList::get(CGF.getLLVMContext(),
3701                                llvm::AttributeList::FunctionIndex, B),
3702       /*Local=*/true);
3703   llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3704   NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().OptimizationLevel ||
3705             (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3706   if (NoMerge)
3707     HandlerCall->addFnAttr(llvm::Attribute::NoMerge);
3708   if (!MayReturn) {
3709     HandlerCall->setDoesNotReturn();
3710     CGF.Builder.CreateUnreachable();
3711   } else {
3712     CGF.Builder.CreateBr(ContBB);
3713   }
3714 }
3715 
3716 void CodeGenFunction::EmitCheck(
3717     ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
3718     SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3719     ArrayRef<llvm::Value *> DynamicArgs) {
3720   assert(IsSanitizerScope);
3721   assert(Checked.size() > 0);
3722   assert(CheckHandler >= 0 &&
3723          size_t(CheckHandler) < std::size(SanitizerHandlers));
3724   const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3725 
3726   llvm::Value *FatalCond = nullptr;
3727   llvm::Value *RecoverableCond = nullptr;
3728   llvm::Value *TrapCond = nullptr;
3729   bool NoMerge = false;
3730   // Expand checks into:
3731   //   (Check1 || !allow_ubsan_check) && (Check2 || !allow_ubsan_check) ...
3732   // We need separate allow_ubsan_check intrinsics because they have separately
3733   // specified cutoffs.
3734   // This expression looks expensive but will be simplified after
3735   // LowerAllowCheckPass.
3736   for (auto &[Check, Ord] : Checked) {
3737     llvm::Value *GuardedCheck = Check;
3738     if (ClSanitizeGuardChecks ||
3739         (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] > 0)) {
3740       llvm::Value *Allow = Builder.CreateCall(
3741           CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3742           llvm::ConstantInt::get(CGM.Int8Ty, Ord));
3743       GuardedCheck = Builder.CreateOr(Check, Builder.CreateNot(Allow));
3744     }
3745 
3746     // -fsanitize-trap= overrides -fsanitize-recover=.
3747     llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond
3748                          : CGM.getCodeGenOpts().SanitizeRecover.has(Ord)
3749                              ? RecoverableCond
3750                              : FatalCond;
3751     Cond = Cond ? Builder.CreateAnd(Cond, GuardedCheck) : GuardedCheck;
3752 
3753     if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Ord))
3754       NoMerge = true;
3755   }
3756 
3757   if (TrapCond)
3758     EmitTrapCheck(TrapCond, CheckHandler, NoMerge);
3759   if (!FatalCond && !RecoverableCond)
3760     return;
3761 
3762   llvm::Value *JointCond;
3763   if (FatalCond && RecoverableCond)
3764     JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3765   else
3766     JointCond = FatalCond ? FatalCond : RecoverableCond;
3767   assert(JointCond);
3768 
3769   CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3770   assert(SanOpts.has(Checked[0].second));
3771 #ifndef NDEBUG
3772   for (int i = 1, n = Checked.size(); i < n; ++i) {
3773     assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3774            "All recoverable kinds in a single check must be same!");
3775     assert(SanOpts.has(Checked[i].second));
3776   }
3777 #endif
3778 
3779   llvm::BasicBlock *Cont = createBasicBlock("cont");
3780   llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3781   llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3782   // Give hint that we very much don't expect to execute the handler
3783   llvm::MDBuilder MDHelper(getLLVMContext());
3784   llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3785   Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3786   EmitBlock(Handlers);
3787 
3788   // Handler functions take an i8* pointing to the (handler-specific) static
3789   // information block, followed by a sequence of intptr_t arguments
3790   // representing operand values.
3791   SmallVector<llvm::Value *, 4> Args;
3792   SmallVector<llvm::Type *, 4> ArgTypes;
3793   if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3794     Args.reserve(DynamicArgs.size() + 1);
3795     ArgTypes.reserve(DynamicArgs.size() + 1);
3796 
3797     // Emit handler arguments and create handler function type.
3798     if (!StaticArgs.empty()) {
3799       llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3800       auto *InfoPtr = new llvm::GlobalVariable(
3801           CGM.getModule(), Info->getType(), false,
3802           llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
3803           llvm::GlobalVariable::NotThreadLocal,
3804           CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3805       InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3806       CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3807       Args.push_back(InfoPtr);
3808       ArgTypes.push_back(Args.back()->getType());
3809     }
3810 
3811     for (llvm::Value *DynamicArg : DynamicArgs) {
3812       Args.push_back(EmitCheckValue(DynamicArg));
3813       ArgTypes.push_back(IntPtrTy);
3814     }
3815   }
3816 
3817   llvm::FunctionType *FnType =
3818     llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3819 
3820   if (!FatalCond || !RecoverableCond) {
3821     // Simple case: we need to generate a single handler call, either
3822     // fatal, or non-fatal.
3823     emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3824                          (FatalCond != nullptr), Cont, NoMerge);
3825   } else {
3826     // Emit two handler calls: first one for set of unrecoverable checks,
3827     // another one for recoverable.
3828     llvm::BasicBlock *NonFatalHandlerBB =
3829         createBasicBlock("non_fatal." + CheckName);
3830     llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3831     Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3832     EmitBlock(FatalHandlerBB);
3833     emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3834                          NonFatalHandlerBB, NoMerge);
3835     EmitBlock(NonFatalHandlerBB);
3836     emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3837                          Cont, NoMerge);
3838   }
3839 
3840   EmitBlock(Cont);
3841 }
3842 
3843 void CodeGenFunction::EmitCfiSlowPathCheck(
3844     SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond,
3845     llvm::ConstantInt *TypeId, llvm::Value *Ptr,
3846     ArrayRef<llvm::Constant *> StaticArgs) {
3847   llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3848 
3849   llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3850   llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3851 
3852   llvm::MDBuilder MDHelper(getLLVMContext());
3853   llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3854   BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3855 
3856   EmitBlock(CheckBB);
3857 
3858   bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Ordinal);
3859 
3860   llvm::CallInst *CheckCall;
3861   llvm::FunctionCallee SlowPathFn;
3862   if (WithDiag) {
3863     llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3864     auto *InfoPtr =
3865         new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3866                                  llvm::GlobalVariable::PrivateLinkage, Info);
3867     InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3868     CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3869 
3870     SlowPathFn = CGM.getModule().getOrInsertFunction(
3871         "__cfi_slowpath_diag",
3872         llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3873                                 false));
3874     CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3875   } else {
3876     SlowPathFn = CGM.getModule().getOrInsertFunction(
3877         "__cfi_slowpath",
3878         llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3879     CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3880   }
3881 
3882   CGM.setDSOLocal(
3883       cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3884   CheckCall->setDoesNotThrow();
3885 
3886   EmitBlock(Cont);
3887 }
3888 
3889 // Emit a stub for __cfi_check function so that the linker knows about this
3890 // symbol in LTO mode.
3891 void CodeGenFunction::EmitCfiCheckStub() {
3892   llvm::Module *M = &CGM.getModule();
3893   ASTContext &C = getContext();
3894   QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
3895 
3896   FunctionArgList FnArgs;
3897   ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
3898   ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
3899   ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
3900                                         ImplicitParamKind::Other);
3901   FnArgs.push_back(&ArgCallsiteTypeId);
3902   FnArgs.push_back(&ArgAddr);
3903   FnArgs.push_back(&ArgCFICheckFailData);
3904   const CGFunctionInfo &FI =
3905       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs);
3906 
3907   llvm::Function *F = llvm::Function::Create(
3908       llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
3909       llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3910   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3911   CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
3912   F->setAlignment(llvm::Align(4096));
3913   CGM.setDSOLocal(F);
3914 
3915   llvm::LLVMContext &Ctx = M->getContext();
3916   llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3917   // CrossDSOCFI pass is not executed if there is no executable code.
3918   SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
3919   llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
3920   llvm::ReturnInst::Create(Ctx, nullptr, BB);
3921 }
3922 
3923 // This function is basically a switch over the CFI failure kind, which is
3924 // extracted from CFICheckFailData (1st function argument). Each case is either
3925 // llvm.trap or a call to one of the two runtime handlers, based on
3926 // -fsanitize-trap and -fsanitize-recover settings.  Default case (invalid
3927 // failure kind) traps, but this should really never happen.  CFICheckFailData
3928 // can be nullptr if the calling module has -fsanitize-trap behavior for this
3929 // check kind; in this case __cfi_check_fail traps as well.
3930 void CodeGenFunction::EmitCfiCheckFail() {
3931   auto CheckHandler = SanitizerHandler::CFICheckFail;
3932   // TODO: the SanitizerKind is not yet determined for this check (and might
3933   // not even be available, if Data == nullptr). However, we still want to
3934   // annotate the instrumentation. We approximate this by using all the CFI
3935   // kinds.
3936   SanitizerDebugLocation SanScope(
3937       this,
3938       {SanitizerKind::SO_CFIVCall, SanitizerKind::SO_CFINVCall,
3939        SanitizerKind::SO_CFIDerivedCast, SanitizerKind::SO_CFIUnrelatedCast,
3940        SanitizerKind::SO_CFIICall},
3941       CheckHandler);
3942   FunctionArgList Args;
3943   ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy,
3944                             ImplicitParamKind::Other);
3945   ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy,
3946                             ImplicitParamKind::Other);
3947   Args.push_back(&ArgData);
3948   Args.push_back(&ArgAddr);
3949 
3950   const CGFunctionInfo &FI =
3951     CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args);
3952 
3953   llvm::Function *F = llvm::Function::Create(
3954       llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3955       llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3956 
3957   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3958   CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
3959   F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3960 
3961   StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3962                 SourceLocation());
3963 
3964   // This function is not affected by NoSanitizeList. This function does
3965   // not have a source location, but "src:*" would still apply. Revert any
3966   // changes to SanOpts made in StartFunction.
3967   SanOpts = CGM.getLangOpts().Sanitize;
3968 
3969   llvm::Value *Data =
3970       EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3971                        CGM.getContext().VoidPtrTy, ArgData.getLocation());
3972   llvm::Value *Addr =
3973       EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3974                        CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3975 
3976   // Data == nullptr means the calling module has trap behaviour for this check.
3977   llvm::Value *DataIsNotNullPtr =
3978       Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3979   // TODO: since there is no data, we don't know the CheckKind, and therefore
3980   // cannot inspect CGM.getCodeGenOpts().SanitizeMergeHandlers. We default to
3981   // NoMerge = false. Users can disable merging by disabling optimization.
3982   EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail,
3983                 /*NoMerge=*/false);
3984 
3985   llvm::StructType *SourceLocationTy =
3986       llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3987   llvm::StructType *CfiCheckFailDataTy =
3988       llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3989 
3990   llvm::Value *V = Builder.CreateConstGEP2_32(
3991       CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0);
3992 
3993   Address CheckKindAddr(V, Int8Ty, getIntAlign());
3994   llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3995 
3996   llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3997       CGM.getLLVMContext(),
3998       llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3999   llvm::Value *ValidVtable = Builder.CreateZExt(
4000       Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
4001                          {Addr, AllVtables}),
4002       IntPtrTy);
4003 
4004   const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = {
4005       {CFITCK_VCall, SanitizerKind::SO_CFIVCall},
4006       {CFITCK_NVCall, SanitizerKind::SO_CFINVCall},
4007       {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast},
4008       {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast},
4009       {CFITCK_ICall, SanitizerKind::SO_CFIICall}};
4010 
4011   for (auto CheckKindOrdinalPair : CheckKinds) {
4012     int Kind = CheckKindOrdinalPair.first;
4013     SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second;
4014 
4015     // TODO: we could apply SanitizerAnnotateDebugInfo(Ordinal) instead of
4016     //       relying on the SanitizerScope with all CFI ordinals
4017 
4018     llvm::Value *Cond =
4019         Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
4020     if (CGM.getLangOpts().Sanitize.has(Ordinal))
4021       EmitCheck(std::make_pair(Cond, Ordinal), SanitizerHandler::CFICheckFail,
4022                 {}, {Data, Addr, ValidVtable});
4023     else
4024       // TODO: we can't rely on CGM.getCodeGenOpts().SanitizeMergeHandlers.
4025       // Although the compiler allows SanitizeMergeHandlers to be set
4026       // independently of CGM.getLangOpts().Sanitize, Driver/SanitizerArgs.cpp
4027       // requires that SanitizeMergeHandlers is a subset of Sanitize.
4028       EmitTrapCheck(Cond, CheckHandler, /*NoMerge=*/false);
4029   }
4030 
4031   FinishFunction();
4032   // The only reference to this function will be created during LTO link.
4033   // Make sure it survives until then.
4034   CGM.addUsedGlobal(F);
4035 }
4036 
4037 void CodeGenFunction::EmitUnreachable(SourceLocation Loc) {
4038   if (SanOpts.has(SanitizerKind::Unreachable)) {
4039     auto CheckOrdinal = SanitizerKind::SO_Unreachable;
4040     auto CheckHandler = SanitizerHandler::BuiltinUnreachable;
4041     SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4042     EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
4043                              CheckOrdinal),
4044               CheckHandler, EmitCheckSourceLocation(Loc), {});
4045   }
4046   Builder.CreateUnreachable();
4047 }
4048 
4049 void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
4050                                     SanitizerHandler CheckHandlerID,
4051                                     bool NoMerge) {
4052   llvm::BasicBlock *Cont = createBasicBlock("cont");
4053 
4054   // If we're optimizing, collapse all calls to trap down to just one per
4055   // check-type per function to save on code size.
4056   if ((int)TrapBBs.size() <= CheckHandlerID)
4057     TrapBBs.resize(CheckHandlerID + 1);
4058 
4059   llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
4060 
4061   NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel ||
4062             (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
4063 
4064   llvm::MDBuilder MDHelper(getLLVMContext());
4065   if (TrapBB && !NoMerge) {
4066     auto Call = TrapBB->begin();
4067     assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
4068 
4069     Call->applyMergedLocation(Call->getDebugLoc(),
4070                               Builder.getCurrentDebugLocation());
4071     Builder.CreateCondBr(Checked, Cont, TrapBB,
4072                          MDHelper.createLikelyBranchWeights());
4073   } else {
4074     TrapBB = createBasicBlock("trap");
4075     Builder.CreateCondBr(Checked, Cont, TrapBB,
4076                          MDHelper.createLikelyBranchWeights());
4077     EmitBlock(TrapBB);
4078 
4079     llvm::CallInst *TrapCall =
4080         Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
4081                            llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
4082 
4083     if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4084       auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
4085                                     CGM.getCodeGenOpts().TrapFuncName);
4086       TrapCall->addFnAttr(A);
4087     }
4088     if (NoMerge)
4089       TrapCall->addFnAttr(llvm::Attribute::NoMerge);
4090     TrapCall->setDoesNotReturn();
4091     TrapCall->setDoesNotThrow();
4092     Builder.CreateUnreachable();
4093   }
4094 
4095   EmitBlock(Cont);
4096 }
4097 
4098 llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
4099   llvm::CallInst *TrapCall =
4100       Builder.CreateCall(CGM.getIntrinsic(IntrID));
4101 
4102   if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4103     auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
4104                                   CGM.getCodeGenOpts().TrapFuncName);
4105     TrapCall->addFnAttr(A);
4106   }
4107 
4108   if (InNoMergeAttributedStmt)
4109     TrapCall->addFnAttr(llvm::Attribute::NoMerge);
4110   return TrapCall;
4111 }
4112 
4113 Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
4114                                                  LValueBaseInfo *BaseInfo,
4115                                                  TBAAAccessInfo *TBAAInfo) {
4116   assert(E->getType()->isArrayType() &&
4117          "Array to pointer decay must have array source type!");
4118 
4119   // Expressions of array type can't be bitfields or vector elements.
4120   LValue LV = EmitLValue(E);
4121   Address Addr = LV.getAddress();
4122 
4123   // If the array type was an incomplete type, we need to make sure
4124   // the decay ends up being the right type.
4125   llvm::Type *NewTy = ConvertType(E->getType());
4126   Addr = Addr.withElementType(NewTy);
4127 
4128   // Note that VLA pointers are always decayed, so we don't need to do
4129   // anything here.
4130   if (!E->getType()->isVariableArrayType()) {
4131     assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4132            "Expected pointer to array");
4133     Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4134   }
4135 
4136   // The result of this decay conversion points to an array element within the
4137   // base lvalue. However, since TBAA currently does not support representing
4138   // accesses to elements of member arrays, we conservatively represent accesses
4139   // to the pointee object as if it had no any base lvalue specified.
4140   // TODO: Support TBAA for member arrays.
4141   QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType();
4142   if (BaseInfo) *BaseInfo = LV.getBaseInfo();
4143   if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
4144 
4145   return Addr.withElementType(ConvertTypeForMem(EltType));
4146 }
4147 
4148 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
4149 /// array to pointer, return the array subexpression.
4150 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
4151   // If this isn't just an array->pointer decay, bail out.
4152   const auto *CE = dyn_cast<CastExpr>(E);
4153   if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
4154     return nullptr;
4155 
4156   // If this is a decay from variable width array, bail out.
4157   const Expr *SubExpr = CE->getSubExpr();
4158   if (SubExpr->getType()->isVariableArrayType())
4159     return nullptr;
4160 
4161   return SubExpr;
4162 }
4163 
4164 static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
4165                                           llvm::Type *elemType,
4166                                           llvm::Value *ptr,
4167                                           ArrayRef<llvm::Value*> indices,
4168                                           bool inbounds,
4169                                           bool signedIndices,
4170                                           SourceLocation loc,
4171                                     const llvm::Twine &name = "arrayidx") {
4172   if (inbounds) {
4173     return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
4174                                       CodeGenFunction::NotSubtraction, loc,
4175                                       name);
4176   } else {
4177     return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
4178   }
4179 }
4180 
4181 static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
4182                                      ArrayRef<llvm::Value *> indices,
4183                                      llvm::Type *elementType, bool inbounds,
4184                                      bool signedIndices, SourceLocation loc,
4185                                      CharUnits align,
4186                                      const llvm::Twine &name = "arrayidx") {
4187   if (inbounds) {
4188     return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
4189                                       CodeGenFunction::NotSubtraction, loc,
4190                                       align, name);
4191   } else {
4192     return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
4193   }
4194 }
4195 
4196 static CharUnits getArrayElementAlign(CharUnits arrayAlign,
4197                                       llvm::Value *idx,
4198                                       CharUnits eltSize) {
4199   // If we have a constant index, we can use the exact offset of the
4200   // element we're accessing.
4201   if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
4202     CharUnits offset = constantIdx->getZExtValue() * eltSize;
4203     return arrayAlign.alignmentAtOffset(offset);
4204 
4205   // Otherwise, use the worst-case alignment for any element.
4206   } else {
4207     return arrayAlign.alignmentOfArrayElement(eltSize);
4208   }
4209 }
4210 
4211 static QualType getFixedSizeElementType(const ASTContext &ctx,
4212                                         const VariableArrayType *vla) {
4213   QualType eltType;
4214   do {
4215     eltType = vla->getElementType();
4216   } while ((vla = ctx.getAsVariableArrayType(eltType)));
4217   return eltType;
4218 }
4219 
4220 static bool hasBPFPreserveStaticOffset(const RecordDecl *D) {
4221   return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4222 }
4223 
4224 static bool hasBPFPreserveStaticOffset(const Expr *E) {
4225   if (!E)
4226     return false;
4227   QualType PointeeType = E->getType()->getPointeeType();
4228   if (PointeeType.isNull())
4229     return false;
4230   if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4231     return hasBPFPreserveStaticOffset(BaseDecl);
4232   return false;
4233 }
4234 
4235 // Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4236 static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF,
4237                                                Address &Addr) {
4238   if (!CGF.getTarget().getTriple().isBPF())
4239     return Addr;
4240 
4241   llvm::Function *Fn =
4242       CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4243   llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4244   return Address(Call, Addr.getElementType(), Addr.getAlignment());
4245 }
4246 
4247 /// Given an array base, check whether its member access belongs to a record
4248 /// with preserve_access_index attribute or not.
4249 static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4250   if (!ArrayBase || !CGF.getDebugInfo())
4251     return false;
4252 
4253   // Only support base as either a MemberExpr or DeclRefExpr.
4254   // DeclRefExpr to cover cases like:
4255   //    struct s { int a; int b[10]; };
4256   //    struct s *p;
4257   //    p[1].a
4258   // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4259   // p->b[5] is a MemberExpr example.
4260   const Expr *E = ArrayBase->IgnoreImpCasts();
4261   if (const auto *ME = dyn_cast<MemberExpr>(E))
4262     return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4263 
4264   if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4265     const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4266     if (!VarDef)
4267       return false;
4268 
4269     const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4270     if (!PtrT)
4271       return false;
4272 
4273     const auto *PointeeT = PtrT->getPointeeType()
4274                              ->getUnqualifiedDesugaredType();
4275     if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4276       return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4277     return false;
4278   }
4279 
4280   return false;
4281 }
4282 
4283 static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
4284                                      ArrayRef<llvm::Value *> indices,
4285                                      QualType eltType, bool inbounds,
4286                                      bool signedIndices, SourceLocation loc,
4287                                      QualType *arrayType = nullptr,
4288                                      const Expr *Base = nullptr,
4289                                      const llvm::Twine &name = "arrayidx") {
4290   // All the indices except that last must be zero.
4291 #ifndef NDEBUG
4292   for (auto *idx : indices.drop_back())
4293     assert(isa<llvm::ConstantInt>(idx) &&
4294            cast<llvm::ConstantInt>(idx)->isZero());
4295 #endif
4296 
4297   // Determine the element size of the statically-sized base.  This is
4298   // the thing that the indices are expressed in terms of.
4299   if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4300     eltType = getFixedSizeElementType(CGF.getContext(), vla);
4301   }
4302 
4303   // We can use that to compute the best alignment of the element.
4304   CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4305   CharUnits eltAlign =
4306       getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4307 
4308   if (hasBPFPreserveStaticOffset(Base))
4309     addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4310 
4311   llvm::Value *eltPtr;
4312   auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4313   if (!LastIndex ||
4314       (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) {
4315     addr = emitArraySubscriptGEP(CGF, addr, indices,
4316                                  CGF.ConvertTypeForMem(eltType), inbounds,
4317                                  signedIndices, loc, eltAlign, name);
4318     return addr;
4319   } else {
4320     // Remember the original array subscript for bpf target
4321     unsigned idx = LastIndex->getZExtValue();
4322     llvm::DIType *DbgInfo = nullptr;
4323     if (arrayType)
4324       DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4325     eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4326         addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4327         idx, DbgInfo);
4328   }
4329 
4330   return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4331 }
4332 
4333 namespace {
4334 
4335 /// StructFieldAccess is a simple visitor class to grab the first l-value to
4336 /// r-value cast Expr.
4337 struct StructFieldAccess
4338     : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
4339   const Expr *VisitCastExpr(const CastExpr *E) {
4340     if (E->getCastKind() == CK_LValueToRValue)
4341       return E;
4342     return Visit(E->getSubExpr());
4343   }
4344   const Expr *VisitParenExpr(const ParenExpr *E) {
4345     return Visit(E->getSubExpr());
4346   }
4347 };
4348 
4349 } // end anonymous namespace
4350 
4351 /// The offset of a field from the beginning of the record.
4352 static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD,
4353                                  const FieldDecl *Field, int64_t &Offset) {
4354   ASTContext &Ctx = CGF.getContext();
4355   const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4356   unsigned FieldNo = 0;
4357 
4358   for (const FieldDecl *FD : RD->fields()) {
4359     if (FD == Field) {
4360       Offset += Layout.getFieldOffset(FieldNo);
4361       return true;
4362     }
4363 
4364     QualType Ty = FD->getType();
4365     if (Ty->isRecordType())
4366       if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) {
4367         Offset += Layout.getFieldOffset(FieldNo);
4368         return true;
4369       }
4370 
4371     if (!RD->isUnion())
4372       ++FieldNo;
4373   }
4374 
4375   return false;
4376 }
4377 
4378 /// Returns the relative offset difference between \p FD1 and \p FD2.
4379 /// \code
4380 ///   offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4381 /// \endcode
4382 /// Both fields must be within the same struct.
4383 static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4384                                                         const FieldDecl *FD1,
4385                                                         const FieldDecl *FD2) {
4386   const RecordDecl *FD1OuterRec =
4387       FD1->getParent()->getOuterLexicalRecordContext();
4388   const RecordDecl *FD2OuterRec =
4389       FD2->getParent()->getOuterLexicalRecordContext();
4390 
4391   if (FD1OuterRec != FD2OuterRec)
4392     // Fields must be within the same RecordDecl.
4393     return std::optional<int64_t>();
4394 
4395   int64_t FD1Offset = 0;
4396   if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4397     return std::optional<int64_t>();
4398 
4399   int64_t FD2Offset = 0;
4400   if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4401     return std::optional<int64_t>();
4402 
4403   return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4404 }
4405 
4406 /// EmitCountedByBoundsChecking - If the array being accessed has a "counted_by"
4407 /// attribute, generate bounds checking code. The "count" field is at the top
4408 /// level of the struct or in an anonymous struct, that's also at the top level.
4409 /// Future expansions may allow the "count" to reside at any place in the
4410 /// struct, but the value of "counted_by" will be a "simple" path to the count,
4411 /// i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4412 /// similar to emit the correct GEP.
4413 void CodeGenFunction::EmitCountedByBoundsChecking(
4414     const Expr *E, llvm::Value *Idx, Address Addr, QualType IdxTy,
4415     QualType ArrayTy, bool Accessed, bool FlexibleArray) {
4416   const auto *ME = dyn_cast<MemberExpr>(E->IgnoreImpCasts());
4417   if (!ME || !ME->getMemberDecl()->getType()->isCountAttributedType())
4418     return;
4419 
4420   const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4421       getLangOpts().getStrictFlexArraysLevel();
4422   if (FlexibleArray &&
4423       !ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel))
4424     return;
4425 
4426   const FieldDecl *FD = cast<FieldDecl>(ME->getMemberDecl());
4427   const FieldDecl *CountFD = FD->findCountedByField();
4428   if (!CountFD)
4429     return;
4430 
4431   if (std::optional<int64_t> Diff =
4432           getOffsetDifferenceInBits(*this, CountFD, FD)) {
4433     if (!Addr.isValid()) {
4434       // An invalid Address indicates we're checking a pointer array access.
4435       // Emit the checked L-Value here.
4436       LValue LV = EmitCheckedLValue(E, TCK_MemberAccess);
4437       Addr = LV.getAddress();
4438     }
4439 
4440     // FIXME: The 'static_cast' is necessary, otherwise the result turns into a
4441     // uint64_t, which messes things up if we have a negative offset difference.
4442     Diff = *Diff / static_cast<int64_t>(CGM.getContext().getCharWidth());
4443 
4444     // Create a GEP with the byte offset between the counted object and the
4445     // count and use that to load the count value.
4446     Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Int8PtrTy, Int8Ty);
4447 
4448     llvm::Type *CountTy = ConvertType(CountFD->getType());
4449     llvm::Value *Res =
4450         Builder.CreateInBoundsGEP(Int8Ty, Addr.emitRawPointer(*this),
4451                                   Builder.getInt32(*Diff), ".counted_by.gep");
4452     Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4453                                     ".counted_by.load");
4454 
4455     // Now emit the bounds checking.
4456     EmitBoundsCheckImpl(E, Res, Idx, IdxTy, ArrayTy, Accessed);
4457   }
4458 }
4459 
4460 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
4461                                                bool Accessed) {
4462   // The index must always be an integer, which is not an aggregate.  Emit it
4463   // in lexical order (this complexity is, sadly, required by C++17).
4464   llvm::Value *IdxPre =
4465       (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4466   bool SignedIndices = false;
4467   auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4468     auto *Idx = IdxPre;
4469     if (E->getLHS() != E->getIdx()) {
4470       assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4471       Idx = EmitScalarExpr(E->getIdx());
4472     }
4473 
4474     QualType IdxTy = E->getIdx()->getType();
4475     bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4476     SignedIndices |= IdxSigned;
4477 
4478     if (SanOpts.has(SanitizerKind::ArrayBounds))
4479       EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4480 
4481     // Extend or truncate the index type to 32 or 64-bits.
4482     if (Promote && Idx->getType() != IntPtrTy)
4483       Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4484 
4485     return Idx;
4486   };
4487   IdxPre = nullptr;
4488 
4489   // If the base is a vector type, then we are forming a vector element lvalue
4490   // with this subscript.
4491   if (E->getBase()->getType()->isSubscriptableVectorType() &&
4492       !isa<ExtVectorElementExpr>(E->getBase())) {
4493     // Emit the vector as an lvalue to get its address.
4494     LValue LHS = EmitLValue(E->getBase());
4495     auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4496     assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4497     return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
4498                                  LHS.getBaseInfo(), TBAAAccessInfo());
4499   }
4500 
4501   // All the other cases basically behave like simple offsetting.
4502 
4503   // Handle the extvector case we ignored above.
4504   if (isa<ExtVectorElementExpr>(E->getBase())) {
4505     LValue LV = EmitLValue(E->getBase());
4506     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4507     Address Addr = EmitExtVectorElementLValue(LV);
4508 
4509     QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4510     Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4511                                  SignedIndices, E->getExprLoc());
4512     return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4513                           CGM.getTBAAInfoForSubobject(LV, EltType));
4514   }
4515 
4516   LValueBaseInfo EltBaseInfo;
4517   TBAAAccessInfo EltTBAAInfo;
4518   Address Addr = Address::invalid();
4519   if (const VariableArrayType *vla =
4520            getContext().getAsVariableArrayType(E->getType())) {
4521     // The base must be a pointer, which is not an aggregate.  Emit
4522     // it.  It needs to be emitted first in case it's what captures
4523     // the VLA bounds.
4524     Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4525     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4526 
4527     // The element count here is the total number of non-VLA elements.
4528     llvm::Value *numElements = getVLASize(vla).NumElts;
4529 
4530     // Effectively, the multiply by the VLA size is part of the GEP.
4531     // GEP indexes are signed, and scaling an index isn't permitted to
4532     // signed-overflow, so we use the same semantics for our explicit
4533     // multiply.  We suppress this if overflow is not undefined behavior.
4534     if (getLangOpts().PointerOverflowDefined) {
4535       Idx = Builder.CreateMul(Idx, numElements);
4536     } else {
4537       Idx = Builder.CreateNSWMul(Idx, numElements);
4538     }
4539 
4540     Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4541                                  !getLangOpts().PointerOverflowDefined,
4542                                  SignedIndices, E->getExprLoc());
4543 
4544   } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4545     // Indexing over an interface, as in "NSString *P; P[4];"
4546 
4547     // Emit the base pointer.
4548     Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4549     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4550 
4551     CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4552     llvm::Value *InterfaceSizeVal =
4553         llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4554 
4555     llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4556 
4557     // We don't necessarily build correct LLVM struct types for ObjC
4558     // interfaces, so we can't rely on GEP to do this scaling
4559     // correctly, so we need to cast to i8*.  FIXME: is this actually
4560     // true?  A lot of other things in the fragile ABI would break...
4561     llvm::Type *OrigBaseElemTy = Addr.getElementType();
4562 
4563     // Do the GEP.
4564     CharUnits EltAlign =
4565       getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4566     llvm::Value *EltPtr =
4567         emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4568                               ScaledIdx, false, SignedIndices, E->getExprLoc());
4569     Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4570   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4571     // If this is A[i] where A is an array, the frontend will have decayed the
4572     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
4573     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4574     // "gep x, i" here.  Emit one "gep A, 0, i".
4575     assert(Array->getType()->isArrayType() &&
4576            "Array to pointer decay must have array source type!");
4577     LValue ArrayLV;
4578     // For simple multidimensional array indexing, set the 'accessed' flag for
4579     // better bounds-checking of the base expression.
4580     if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4581       ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4582     else
4583       ArrayLV = EmitLValue(Array);
4584     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4585 
4586     if (SanOpts.has(SanitizerKind::ArrayBounds))
4587       EmitCountedByBoundsChecking(Array, Idx, ArrayLV.getAddress(),
4588                                   E->getIdx()->getType(), Array->getType(),
4589                                   Accessed, /*FlexibleArray=*/true);
4590 
4591     // Propagate the alignment from the array itself to the result.
4592     QualType arrayType = Array->getType();
4593     Addr = emitArraySubscriptGEP(
4594         *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4595         E->getType(), !getLangOpts().PointerOverflowDefined, SignedIndices,
4596         E->getExprLoc(), &arrayType, E->getBase());
4597     EltBaseInfo = ArrayLV.getBaseInfo();
4598     if (!CGM.getCodeGenOpts().NewStructPathTBAA) {
4599       // Since CodeGenTBAA::getTypeInfoHelper only handles array types for
4600       // new struct path TBAA, we must a use a plain access.
4601       EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4602     } else if (ArrayLV.getTBAAInfo().isMayAlias()) {
4603       EltTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4604     } else if (ArrayLV.getTBAAInfo().isIncomplete()) {
4605       // The array element is complete, even if the array is not.
4606       EltTBAAInfo = CGM.getTBAAAccessInfo(E->getType());
4607     } else {
4608       // The TBAA access info from the array (base) lvalue is ordinary. We will
4609       // adapt it to create access info for the element.
4610       EltTBAAInfo = ArrayLV.getTBAAInfo();
4611 
4612       // We retain the TBAA struct path (BaseType and Offset members) from the
4613       // array. In the TBAA representation, we map any array access to the
4614       // element at index 0, as the index is generally a runtime value. This
4615       // element has the same offset in the base type as the array itself.
4616       // If the array lvalue had no base type, there is no point trying to
4617       // generate one, since an array itself is not a valid base type.
4618 
4619       // We also retain the access type from the base lvalue, but the access
4620       // size must be updated to the size of an individual element.
4621       EltTBAAInfo.Size =
4622           getContext().getTypeSizeInChars(E->getType()).getQuantity();
4623     }
4624   } else {
4625     // The base must be a pointer; emit it with an estimate of its alignment.
4626     Address BaseAddr =
4627         EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4628     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4629     QualType ptrType = E->getBase()->getType();
4630     Addr = emitArraySubscriptGEP(*this, BaseAddr, Idx, E->getType(),
4631                                  !getLangOpts().PointerOverflowDefined,
4632                                  SignedIndices, E->getExprLoc(), &ptrType,
4633                                  E->getBase());
4634 
4635     if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4636       StructFieldAccess Visitor;
4637       const Expr *Base = Visitor.Visit(E->getBase());
4638 
4639       if (const auto *CE = dyn_cast_if_present<CastExpr>(Base);
4640           CE && CE->getCastKind() == CK_LValueToRValue)
4641         EmitCountedByBoundsChecking(CE, Idx, Address::invalid(),
4642                                     E->getIdx()->getType(), ptrType, Accessed,
4643                                     /*FlexibleArray=*/false);
4644     }
4645   }
4646 
4647   LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4648 
4649   if (getLangOpts().ObjC &&
4650       getLangOpts().getGC() != LangOptions::NonGC) {
4651     LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
4652     setObjCGCLValueClass(getContext(), E, LV);
4653   }
4654   return LV;
4655 }
4656 
4657 llvm::Value *CodeGenFunction::EmitMatrixIndexExpr(const Expr *E) {
4658   llvm::Value *Idx = EmitScalarExpr(E);
4659   if (Idx->getType() == IntPtrTy)
4660     return Idx;
4661   bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType();
4662   return Builder.CreateIntCast(Idx, IntPtrTy, IsSigned);
4663 }
4664 
4665 LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {
4666   assert(
4667       !E->isIncomplete() &&
4668       "incomplete matrix subscript expressions should be rejected during Sema");
4669   LValue Base = EmitLValue(E->getBase());
4670 
4671   // Extend or truncate the index type to 32 or 64-bits if needed.
4672   llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
4673   llvm::Value *ColIdx = EmitMatrixIndexExpr(E->getColumnIdx());
4674 
4675   llvm::Value *NumRows = Builder.getIntN(
4676       RowIdx->getType()->getScalarSizeInBits(),
4677       E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
4678   llvm::Value *FinalIdx =
4679       Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4680   return LValue::MakeMatrixElt(
4681       MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
4682       E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4683 }
4684 
4685 static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
4686                                        LValueBaseInfo &BaseInfo,
4687                                        TBAAAccessInfo &TBAAInfo,
4688                                        QualType BaseTy, QualType ElTy,
4689                                        bool IsLowerBound) {
4690   LValue BaseLVal;
4691   if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4692     BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4693     if (BaseTy->isArrayType()) {
4694       Address Addr = BaseLVal.getAddress();
4695       BaseInfo = BaseLVal.getBaseInfo();
4696 
4697       // If the array type was an incomplete type, we need to make sure
4698       // the decay ends up being the right type.
4699       llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4700       Addr = Addr.withElementType(NewTy);
4701 
4702       // Note that VLA pointers are always decayed, so we don't need to do
4703       // anything here.
4704       if (!BaseTy->isVariableArrayType()) {
4705         assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4706                "Expected pointer to array");
4707         Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4708       }
4709 
4710       return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4711     }
4712     LValueBaseInfo TypeBaseInfo;
4713     TBAAAccessInfo TypeTBAAInfo;
4714     CharUnits Align =
4715         CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4716     BaseInfo.mergeForCast(TypeBaseInfo);
4717     TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4718     return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
4719                    CGF.ConvertTypeForMem(ElTy), Align);
4720   }
4721   return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4722 }
4723 
4724 LValue CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr *E,
4725                                              bool IsLowerBound) {
4726 
4727   assert(!E->isOpenACCArraySection() &&
4728          "OpenACC Array section codegen not implemented");
4729 
4730   QualType BaseTy = ArraySectionExpr::getBaseOriginalType(E->getBase());
4731   QualType ResultExprTy;
4732   if (auto *AT = getContext().getAsArrayType(BaseTy))
4733     ResultExprTy = AT->getElementType();
4734   else
4735     ResultExprTy = BaseTy->getPointeeType();
4736   llvm::Value *Idx = nullptr;
4737   if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4738     // Requesting lower bound or upper bound, but without provided length and
4739     // without ':' symbol for the default length -> length = 1.
4740     // Idx = LowerBound ?: 0;
4741     if (auto *LowerBound = E->getLowerBound()) {
4742       Idx = Builder.CreateIntCast(
4743           EmitScalarExpr(LowerBound), IntPtrTy,
4744           LowerBound->getType()->hasSignedIntegerRepresentation());
4745     } else
4746       Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4747   } else {
4748     // Try to emit length or lower bound as constant. If this is possible, 1
4749     // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4750     // IR (LB + Len) - 1.
4751     auto &C = CGM.getContext();
4752     auto *Length = E->getLength();
4753     llvm::APSInt ConstLength;
4754     if (Length) {
4755       // Idx = LowerBound + Length - 1;
4756       if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4757         ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4758         Length = nullptr;
4759       }
4760       auto *LowerBound = E->getLowerBound();
4761       llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4762       if (LowerBound) {
4763         if (std::optional<llvm::APSInt> LB =
4764                 LowerBound->getIntegerConstantExpr(C)) {
4765           ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4766           LowerBound = nullptr;
4767         }
4768       }
4769       if (!Length)
4770         --ConstLength;
4771       else if (!LowerBound)
4772         --ConstLowerBound;
4773 
4774       if (Length || LowerBound) {
4775         auto *LowerBoundVal =
4776             LowerBound
4777                 ? Builder.CreateIntCast(
4778                       EmitScalarExpr(LowerBound), IntPtrTy,
4779                       LowerBound->getType()->hasSignedIntegerRepresentation())
4780                 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4781         auto *LengthVal =
4782             Length
4783                 ? Builder.CreateIntCast(
4784                       EmitScalarExpr(Length), IntPtrTy,
4785                       Length->getType()->hasSignedIntegerRepresentation())
4786                 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4787         Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4788                                 /*HasNUW=*/false,
4789                                 !getLangOpts().PointerOverflowDefined);
4790         if (Length && LowerBound) {
4791           Idx = Builder.CreateSub(
4792               Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4793               /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
4794         }
4795       } else
4796         Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4797     } else {
4798       // Idx = ArraySize - 1;
4799       QualType ArrayTy = BaseTy->isPointerType()
4800                              ? E->getBase()->IgnoreParenImpCasts()->getType()
4801                              : BaseTy;
4802       if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4803         Length = VAT->getSizeExpr();
4804         if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4805           ConstLength = *L;
4806           Length = nullptr;
4807         }
4808       } else {
4809         auto *CAT = C.getAsConstantArrayType(ArrayTy);
4810         assert(CAT && "unexpected type for array initializer");
4811         ConstLength = CAT->getSize();
4812       }
4813       if (Length) {
4814         auto *LengthVal = Builder.CreateIntCast(
4815             EmitScalarExpr(Length), IntPtrTy,
4816             Length->getType()->hasSignedIntegerRepresentation());
4817         Idx = Builder.CreateSub(
4818             LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4819             /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
4820       } else {
4821         ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4822         --ConstLength;
4823         Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4824       }
4825     }
4826   }
4827   assert(Idx);
4828 
4829   Address EltPtr = Address::invalid();
4830   LValueBaseInfo BaseInfo;
4831   TBAAAccessInfo TBAAInfo;
4832   if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4833     // The base must be a pointer, which is not an aggregate.  Emit
4834     // it.  It needs to be emitted first in case it's what captures
4835     // the VLA bounds.
4836     Address Base =
4837         emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4838                                 BaseTy, VLA->getElementType(), IsLowerBound);
4839     // The element count here is the total number of non-VLA elements.
4840     llvm::Value *NumElements = getVLASize(VLA).NumElts;
4841 
4842     // Effectively, the multiply by the VLA size is part of the GEP.
4843     // GEP indexes are signed, and scaling an index isn't permitted to
4844     // signed-overflow, so we use the same semantics for our explicit
4845     // multiply.  We suppress this if overflow is not undefined behavior.
4846     if (getLangOpts().PointerOverflowDefined)
4847       Idx = Builder.CreateMul(Idx, NumElements);
4848     else
4849       Idx = Builder.CreateNSWMul(Idx, NumElements);
4850     EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4851                                    !getLangOpts().PointerOverflowDefined,
4852                                    /*signedIndices=*/false, E->getExprLoc());
4853   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4854     // If this is A[i] where A is an array, the frontend will have decayed the
4855     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
4856     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4857     // "gep x, i" here.  Emit one "gep A, 0, i".
4858     assert(Array->getType()->isArrayType() &&
4859            "Array to pointer decay must have array source type!");
4860     LValue ArrayLV;
4861     // For simple multidimensional array indexing, set the 'accessed' flag for
4862     // better bounds-checking of the base expression.
4863     if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4864       ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4865     else
4866       ArrayLV = EmitLValue(Array);
4867 
4868     // Propagate the alignment from the array itself to the result.
4869     EltPtr = emitArraySubscriptGEP(
4870         *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4871         ResultExprTy, !getLangOpts().PointerOverflowDefined,
4872         /*signedIndices=*/false, E->getExprLoc());
4873     BaseInfo = ArrayLV.getBaseInfo();
4874     TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4875   } else {
4876     Address Base =
4877         emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
4878                                 ResultExprTy, IsLowerBound);
4879     EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4880                                    !getLangOpts().PointerOverflowDefined,
4881                                    /*signedIndices=*/false, E->getExprLoc());
4882   }
4883 
4884   return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4885 }
4886 
4887 LValue CodeGenFunction::
4888 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
4889   // Emit the base vector as an l-value.
4890   LValue Base;
4891 
4892   // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4893   if (E->isArrow()) {
4894     // If it is a pointer to a vector, emit the address and form an lvalue with
4895     // it.
4896     LValueBaseInfo BaseInfo;
4897     TBAAAccessInfo TBAAInfo;
4898     Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4899     const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4900     Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4901     Base.getQuals().removeObjCGCAttr();
4902   } else if (E->getBase()->isGLValue()) {
4903     // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4904     // emit the base as an lvalue.
4905     assert(E->getBase()->getType()->isVectorType());
4906     Base = EmitLValue(E->getBase());
4907   } else {
4908     // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4909     assert(E->getBase()->getType()->isVectorType() &&
4910            "Result must be a vector");
4911     llvm::Value *Vec = EmitScalarExpr(E->getBase());
4912 
4913     // Store the vector to memory (because LValue wants an address).
4914     Address VecMem = CreateMemTemp(E->getBase()->getType());
4915     // need to zero extend an hlsl boolean vector to store it back to memory
4916     QualType Ty = E->getBase()->getType();
4917     llvm::Type *LTy = convertTypeForLoadStore(Ty, Vec->getType());
4918     if (LTy->getScalarSizeInBits() > Vec->getType()->getScalarSizeInBits())
4919       Vec = Builder.CreateZExt(Vec, LTy);
4920     Builder.CreateStore(Vec, VecMem);
4921     Base = MakeAddrLValue(VecMem, Ty, AlignmentSource::Decl);
4922   }
4923 
4924   QualType type =
4925     E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4926 
4927   // Encode the element access list into a vector of unsigned indices.
4928   SmallVector<uint32_t, 4> Indices;
4929   E->getEncodedElementAccess(Indices);
4930 
4931   if (Base.isSimple()) {
4932     llvm::Constant *CV =
4933         llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4934     return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
4935                                     Base.getBaseInfo(), TBAAAccessInfo());
4936   }
4937   assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4938 
4939   llvm::Constant *BaseElts = Base.getExtVectorElts();
4940   SmallVector<llvm::Constant *, 4> CElts;
4941 
4942   for (unsigned Index : Indices)
4943     CElts.push_back(BaseElts->getAggregateElement(Index));
4944   llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4945   return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4946                                   Base.getBaseInfo(), TBAAAccessInfo());
4947 }
4948 
4949 bool CodeGenFunction::isUnderlyingBasePointerConstantNull(const Expr *E) {
4950   const Expr *UnderlyingBaseExpr = E->IgnoreParens();
4951   while (auto *BaseMemberExpr = dyn_cast<MemberExpr>(UnderlyingBaseExpr))
4952     UnderlyingBaseExpr = BaseMemberExpr->getBase()->IgnoreParens();
4953   return getContext().isSentinelNullExpr(UnderlyingBaseExpr);
4954 }
4955 
4956 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
4957   if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
4958     EmitIgnoredExpr(E->getBase());
4959     return EmitDeclRefLValue(DRE);
4960   }
4961 
4962   Expr *BaseExpr = E->getBase();
4963   // Check whether the underlying base pointer is a constant null.
4964   // If so, we do not set inbounds flag for GEP to avoid breaking some
4965   // old-style offsetof idioms.
4966   bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
4967                     !isUnderlyingBasePointerConstantNull(BaseExpr);
4968   // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
4969   LValue BaseLV;
4970   if (E->isArrow()) {
4971     LValueBaseInfo BaseInfo;
4972     TBAAAccessInfo TBAAInfo;
4973     Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4974     QualType PtrTy = BaseExpr->getType()->getPointeeType();
4975     SanitizerSet SkippedChecks;
4976     bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4977     if (IsBaseCXXThis)
4978       SkippedChecks.set(SanitizerKind::Alignment, true);
4979     if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4980       SkippedChecks.set(SanitizerKind::Null, true);
4981     EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,
4982                   /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4983     BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4984   } else
4985     BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4986 
4987   NamedDecl *ND = E->getMemberDecl();
4988   if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4989     LValue LV = EmitLValueForField(BaseLV, Field, IsInBounds);
4990     setObjCGCLValueClass(getContext(), E, LV);
4991     if (getLangOpts().OpenMP) {
4992       // If the member was explicitly marked as nontemporal, mark it as
4993       // nontemporal. If the base lvalue is marked as nontemporal, mark access
4994       // to children as nontemporal too.
4995       if ((IsWrappedCXXThis(BaseExpr) &&
4996            CGM.getOpenMPRuntime().isNontemporalDecl(Field)) ||
4997           BaseLV.isNontemporal())
4998         LV.setNontemporal(/*Value=*/true);
4999     }
5000     return LV;
5001   }
5002 
5003   if (const auto *FD = dyn_cast<FunctionDecl>(ND))
5004     return EmitFunctionDeclLValue(*this, E, FD);
5005 
5006   llvm_unreachable("Unhandled member declaration!");
5007 }
5008 
5009 /// Given that we are currently emitting a lambda, emit an l-value for
5010 /// one of its members.
5011 ///
5012 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
5013                                                  llvm::Value *ThisValue) {
5014   bool HasExplicitObjectParameter = false;
5015   const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
5016   if (MD) {
5017     HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
5018     assert(MD->getParent()->isLambda());
5019     assert(MD->getParent() == Field->getParent());
5020   }
5021   LValue LambdaLV;
5022   if (HasExplicitObjectParameter) {
5023     const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
5024     auto It = LocalDeclMap.find(D);
5025     assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
5026     Address AddrOfExplicitObject = It->getSecond();
5027     if (D->getType()->isReferenceType())
5028       LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
5029                                            AlignmentSource::Decl);
5030     else
5031       LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
5032                                 D->getType().getNonReferenceType());
5033 
5034     // Make sure we have an lvalue to the lambda itself and not a derived class.
5035     auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
5036     auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
5037     if (ThisTy != LambdaTy) {
5038       const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
5039       Address Base = GetAddressOfBaseClass(
5040           LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
5041           BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
5042       LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
5043     }
5044   } else {
5045     QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
5046     LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
5047   }
5048   return EmitLValueForField(LambdaLV, Field);
5049 }
5050 
5051 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
5052   return EmitLValueForLambdaField(Field, CXXABIThisValue);
5053 }
5054 
5055 /// Get the field index in the debug info. The debug info structure/union
5056 /// will ignore the unnamed bitfields.
5057 unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec,
5058                                              unsigned FieldIndex) {
5059   unsigned I = 0, Skipped = 0;
5060 
5061   for (auto *F : Rec->getDefinition()->fields()) {
5062     if (I == FieldIndex)
5063       break;
5064     if (F->isUnnamedBitField())
5065       Skipped++;
5066     I++;
5067   }
5068 
5069   return FieldIndex - Skipped;
5070 }
5071 
5072 /// Get the address of a zero-sized field within a record. The resulting
5073 /// address doesn't necessarily have the right type.
5074 static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,
5075                                        const FieldDecl *Field,
5076                                        bool IsInBounds) {
5077   CharUnits Offset = CGF.getContext().toCharUnitsFromBits(
5078       CGF.getContext().getFieldOffset(Field));
5079   if (Offset.isZero())
5080     return Base;
5081   Base = Base.withElementType(CGF.Int8Ty);
5082   if (!IsInBounds)
5083     return CGF.Builder.CreateConstByteGEP(Base, Offset);
5084   return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
5085 }
5086 
5087 /// Drill down to the storage of a field without walking into
5088 /// reference types.
5089 ///
5090 /// The resulting address doesn't necessarily have the right type.
5091 static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
5092                                       const FieldDecl *field, bool IsInBounds) {
5093   if (isEmptyFieldForLayout(CGF.getContext(), field))
5094     return emitAddrOfZeroSizeField(CGF, base, field, IsInBounds);
5095 
5096   const RecordDecl *rec = field->getParent();
5097 
5098   unsigned idx =
5099     CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
5100 
5101   if (!IsInBounds)
5102     return CGF.Builder.CreateConstGEP2_32(base, 0, idx, field->getName());
5103 
5104   return CGF.Builder.CreateStructGEP(base, idx, field->getName());
5105 }
5106 
5107 static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base,
5108                                         Address addr, const FieldDecl *field) {
5109   const RecordDecl *rec = field->getParent();
5110   llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
5111       base.getType(), rec->getLocation());
5112 
5113   unsigned idx =
5114       CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
5115 
5116   return CGF.Builder.CreatePreserveStructAccessIndex(
5117       addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
5118 }
5119 
5120 static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
5121   const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
5122   if (!RD)
5123     return false;
5124 
5125   if (RD->isDynamicClass())
5126     return true;
5127 
5128   for (const auto &Base : RD->bases())
5129     if (hasAnyVptr(Base.getType(), Context))
5130       return true;
5131 
5132   for (const FieldDecl *Field : RD->fields())
5133     if (hasAnyVptr(Field->getType(), Context))
5134       return true;
5135 
5136   return false;
5137 }
5138 
5139 LValue CodeGenFunction::EmitLValueForField(LValue base, const FieldDecl *field,
5140                                            bool IsInBounds) {
5141   LValueBaseInfo BaseInfo = base.getBaseInfo();
5142 
5143   if (field->isBitField()) {
5144     const CGRecordLayout &RL =
5145         CGM.getTypes().getCGRecordLayout(field->getParent());
5146     const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
5147     const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
5148                              CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
5149                              Info.VolatileStorageSize != 0 &&
5150                              field->getType()
5151                                  .withCVRQualifiers(base.getVRQualifiers())
5152                                  .isVolatileQualified();
5153     Address Addr = base.getAddress();
5154     unsigned Idx = RL.getLLVMFieldNo(field);
5155     const RecordDecl *rec = field->getParent();
5156     if (hasBPFPreserveStaticOffset(rec))
5157       Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);
5158     if (!UseVolatile) {
5159       if (!IsInPreservedAIRegion &&
5160           (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5161         if (Idx != 0) {
5162           // For structs, we GEP to the field that the record layout suggests.
5163           if (!IsInBounds)
5164             Addr = Builder.CreateConstGEP2_32(Addr, 0, Idx, field->getName());
5165           else
5166             Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
5167         }
5168       } else {
5169         llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
5170             getContext().getRecordType(rec), rec->getLocation());
5171         Addr = Builder.CreatePreserveStructAccessIndex(
5172             Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
5173             DbgInfo);
5174       }
5175     }
5176     const unsigned SS =
5177         UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
5178     // Get the access type.
5179     llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
5180     Addr = Addr.withElementType(FieldIntTy);
5181     if (UseVolatile) {
5182       const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
5183       if (VolatileOffset)
5184         Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
5185     }
5186 
5187     QualType fieldType =
5188         field->getType().withCVRQualifiers(base.getVRQualifiers());
5189     // TODO: Support TBAA for bit fields.
5190     LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
5191     return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
5192                                 TBAAAccessInfo());
5193   }
5194 
5195   // Fields of may-alias structures are may-alias themselves.
5196   // FIXME: this should get propagated down through anonymous structs
5197   // and unions.
5198   QualType FieldType = field->getType();
5199   const RecordDecl *rec = field->getParent();
5200   AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
5201   LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
5202   TBAAAccessInfo FieldTBAAInfo;
5203   if (base.getTBAAInfo().isMayAlias() ||
5204           rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
5205     FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5206   } else if (rec->isUnion()) {
5207     // TODO: Support TBAA for unions.
5208     FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5209   } else {
5210     // If no base type been assigned for the base access, then try to generate
5211     // one for this base lvalue.
5212     FieldTBAAInfo = base.getTBAAInfo();
5213     if (!FieldTBAAInfo.BaseType) {
5214         FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
5215         assert(!FieldTBAAInfo.Offset &&
5216                "Nonzero offset for an access with no base type!");
5217     }
5218 
5219     // Adjust offset to be relative to the base type.
5220     const ASTRecordLayout &Layout =
5221         getContext().getASTRecordLayout(field->getParent());
5222     unsigned CharWidth = getContext().getCharWidth();
5223     if (FieldTBAAInfo.BaseType)
5224       FieldTBAAInfo.Offset +=
5225           Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
5226 
5227     // Update the final access type and size.
5228     FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
5229     FieldTBAAInfo.Size =
5230         getContext().getTypeSizeInChars(FieldType).getQuantity();
5231   }
5232 
5233   Address addr = base.getAddress();
5234   if (hasBPFPreserveStaticOffset(rec))
5235     addr = wrapWithBPFPreserveStaticOffset(*this, addr);
5236   if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
5237     if (CGM.getCodeGenOpts().StrictVTablePointers &&
5238         ClassDef->isDynamicClass()) {
5239       // Getting to any field of dynamic object requires stripping dynamic
5240       // information provided by invariant.group.  This is because accessing
5241       // fields may leak the real address of dynamic object, which could result
5242       // in miscompilation when leaked pointer would be compared.
5243       auto *stripped =
5244           Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this));
5245       addr = Address(stripped, addr.getElementType(), addr.getAlignment());
5246     }
5247   }
5248 
5249   unsigned RecordCVR = base.getVRQualifiers();
5250   if (rec->isUnion()) {
5251     // For unions, there is no pointer adjustment.
5252     if (CGM.getCodeGenOpts().StrictVTablePointers &&
5253         hasAnyVptr(FieldType, getContext()))
5254       // Because unions can easily skip invariant.barriers, we need to add
5255       // a barrier every time CXXRecord field with vptr is referenced.
5256       addr = Builder.CreateLaunderInvariantGroup(addr);
5257 
5258     if (IsInPreservedAIRegion ||
5259         (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5260       // Remember the original union field index
5261       llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
5262           rec->getLocation());
5263       addr =
5264           Address(Builder.CreatePreserveUnionAccessIndex(
5265                       addr.emitRawPointer(*this),
5266                       getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
5267                   addr.getElementType(), addr.getAlignment());
5268     }
5269 
5270     if (FieldType->isReferenceType())
5271       addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5272   } else {
5273     if (!IsInPreservedAIRegion &&
5274         (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
5275       // For structs, we GEP to the field that the record layout suggests.
5276       addr = emitAddrOfFieldStorage(*this, addr, field, IsInBounds);
5277     else
5278       // Remember the original struct field index
5279       addr = emitPreserveStructAccess(*this, base, addr, field);
5280   }
5281 
5282   // If this is a reference field, load the reference right now.
5283   if (FieldType->isReferenceType()) {
5284     LValue RefLVal =
5285         MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5286     if (RecordCVR & Qualifiers::Volatile)
5287       RefLVal.getQuals().addVolatile();
5288     addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
5289 
5290     // Qualifiers on the struct don't apply to the referencee.
5291     RecordCVR = 0;
5292     FieldType = FieldType->getPointeeType();
5293   }
5294 
5295   // Make sure that the address is pointing to the right type.  This is critical
5296   // for both unions and structs.
5297   addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5298 
5299   if (field->hasAttr<AnnotateAttr>())
5300     addr = EmitFieldAnnotations(field, addr);
5301 
5302   LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5303   LV.getQuals().addCVRQualifiers(RecordCVR);
5304 
5305   // __weak attribute on a field is ignored.
5306   if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
5307     LV.getQuals().removeObjCGCAttr();
5308 
5309   return LV;
5310 }
5311 
5312 LValue
5313 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
5314                                                   const FieldDecl *Field) {
5315   QualType FieldType = Field->getType();
5316 
5317   if (!FieldType->isReferenceType())
5318     return EmitLValueForField(Base, Field);
5319 
5320   Address V = emitAddrOfFieldStorage(
5321       *this, Base.getAddress(), Field,
5322       /*IsInBounds=*/!getLangOpts().PointerOverflowDefined);
5323 
5324   // Make sure that the address is pointing to the right type.
5325   llvm::Type *llvmType = ConvertTypeForMem(FieldType);
5326   V = V.withElementType(llvmType);
5327 
5328   // TODO: Generate TBAA information that describes this access as a structure
5329   // member access and not just an access to an object of the field's type. This
5330   // should be similar to what we do in EmitLValueForField().
5331   LValueBaseInfo BaseInfo = Base.getBaseInfo();
5332   AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5333   LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
5334   return MakeAddrLValue(V, FieldType, FieldBaseInfo,
5335                         CGM.getTBAAInfoForSubobject(Base, FieldType));
5336 }
5337 
5338 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
5339   if (E->isFileScope()) {
5340     ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
5341     return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5342   }
5343   if (E->getType()->isVariablyModifiedType())
5344     // make sure to emit the VLA size.
5345     EmitVariablyModifiedType(E->getType());
5346 
5347   Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5348   const Expr *InitExpr = E->getInitializer();
5349   LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);
5350 
5351   EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5352                    /*Init*/ true);
5353 
5354   // Block-scope compound literals are destroyed at the end of the enclosing
5355   // scope in C.
5356   if (!getLangOpts().CPlusPlus)
5357     if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
5358       pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr,
5359                                   E->getType(), getDestroyer(DtorKind),
5360                                   DtorKind & EHCleanup);
5361 
5362   return Result;
5363 }
5364 
5365 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
5366   if (!E->isGLValue())
5367     // Initializing an aggregate temporary in C++11: T{...}.
5368     return EmitAggExprToLValue(E);
5369 
5370   // An lvalue initializer list must be initializing a reference.
5371   assert(E->isTransparent() && "non-transparent glvalue init list");
5372   return EmitLValue(E->getInit(0));
5373 }
5374 
5375 /// Emit the operand of a glvalue conditional operator. This is either a glvalue
5376 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5377 /// LValue is returned and the current block has been terminated.
5378 static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5379                                                          const Expr *Operand) {
5380   if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5381     CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5382     return std::nullopt;
5383   }
5384 
5385   return CGF.EmitLValue(Operand);
5386 }
5387 
5388 namespace {
5389 // Handle the case where the condition is a constant evaluatable simple integer,
5390 // which means we don't have to separately handle the true/false blocks.
5391 std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5392     CodeGenFunction &CGF, const AbstractConditionalOperator *E) {
5393   const Expr *condExpr = E->getCond();
5394   bool CondExprBool;
5395   if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5396     const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5397     if (!CondExprBool)
5398       std::swap(Live, Dead);
5399 
5400     if (!CGF.ContainsLabel(Dead)) {
5401       // If the true case is live, we need to track its region.
5402       if (CondExprBool)
5403         CGF.incrementProfileCounter(E);
5404       CGF.markStmtMaybeUsed(Dead);
5405       // If a throw expression we emit it and return an undefined lvalue
5406       // because it can't be used.
5407       if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5408         CGF.EmitCXXThrowExpr(ThrowExpr);
5409         llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5410         llvm::Type *Ty = CGF.UnqualPtrTy;
5411         return CGF.MakeAddrLValue(
5412             Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5413             Dead->getType());
5414       }
5415       return CGF.EmitLValue(Live);
5416     }
5417   }
5418   return std::nullopt;
5419 }
5420 struct ConditionalInfo {
5421   llvm::BasicBlock *lhsBlock, *rhsBlock;
5422   std::optional<LValue> LHS, RHS;
5423 };
5424 
5425 // Create and generate the 3 blocks for a conditional operator.
5426 // Leaves the 'current block' in the continuation basic block.
5427 template<typename FuncTy>
5428 ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5429                                       const AbstractConditionalOperator *E,
5430                                       const FuncTy &BranchGenFunc) {
5431   ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
5432                        CGF.createBasicBlock("cond.false"), std::nullopt,
5433                        std::nullopt};
5434   llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
5435 
5436   CodeGenFunction::ConditionalEvaluation eval(CGF);
5437   CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
5438                            CGF.getProfileCount(E));
5439 
5440   // Any temporaries created here are conditional.
5441   CGF.EmitBlock(Info.lhsBlock);
5442   CGF.incrementProfileCounter(E);
5443   eval.begin(CGF);
5444   Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5445   eval.end(CGF);
5446   Info.lhsBlock = CGF.Builder.GetInsertBlock();
5447 
5448   if (Info.LHS)
5449     CGF.Builder.CreateBr(endBlock);
5450 
5451   // Any temporaries created here are conditional.
5452   CGF.EmitBlock(Info.rhsBlock);
5453   eval.begin(CGF);
5454   Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5455   eval.end(CGF);
5456   Info.rhsBlock = CGF.Builder.GetInsertBlock();
5457   CGF.EmitBlock(endBlock);
5458 
5459   return Info;
5460 }
5461 } // namespace
5462 
5463 void CodeGenFunction::EmitIgnoredConditionalOperator(
5464     const AbstractConditionalOperator *E) {
5465   if (!E->isGLValue()) {
5466     // ?: here should be an aggregate.
5467     assert(hasAggregateEvaluationKind(E->getType()) &&
5468            "Unexpected conditional operator!");
5469     return (void)EmitAggExprToLValue(E);
5470   }
5471 
5472   OpaqueValueMapping binding(*this, E);
5473   if (HandleConditionalOperatorLValueSimpleCase(*this, E))
5474     return;
5475 
5476   EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
5477     CGF.EmitIgnoredExpr(E);
5478     return LValue{};
5479   });
5480 }
5481 LValue CodeGenFunction::EmitConditionalOperatorLValue(
5482     const AbstractConditionalOperator *expr) {
5483   if (!expr->isGLValue()) {
5484     // ?: here should be an aggregate.
5485     assert(hasAggregateEvaluationKind(expr->getType()) &&
5486            "Unexpected conditional operator!");
5487     return EmitAggExprToLValue(expr);
5488   }
5489 
5490   OpaqueValueMapping binding(*this, expr);
5491   if (std::optional<LValue> Res =
5492           HandleConditionalOperatorLValueSimpleCase(*this, expr))
5493     return *Res;
5494 
5495   ConditionalInfo Info = EmitConditionalBlocks(
5496       *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
5497         return EmitLValueOrThrowExpression(CGF, E);
5498       });
5499 
5500   if ((Info.LHS && !Info.LHS->isSimple()) ||
5501       (Info.RHS && !Info.RHS->isSimple()))
5502     return EmitUnsupportedLValue(expr, "conditional operator");
5503 
5504   if (Info.LHS && Info.RHS) {
5505     Address lhsAddr = Info.LHS->getAddress();
5506     Address rhsAddr = Info.RHS->getAddress();
5507     Address result = mergeAddressesInConditionalExpr(
5508         lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
5509         Builder.GetInsertBlock(), expr->getType());
5510     AlignmentSource alignSource =
5511         std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
5512                  Info.RHS->getBaseInfo().getAlignmentSource());
5513     TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
5514         Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
5515     return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
5516                           TBAAInfo);
5517   } else {
5518     assert((Info.LHS || Info.RHS) &&
5519            "both operands of glvalue conditional are throw-expressions?");
5520     return Info.LHS ? *Info.LHS : *Info.RHS;
5521   }
5522 }
5523 
5524 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5525 /// type. If the cast is to a reference, we can have the usual lvalue result,
5526 /// otherwise if a cast is needed by the code generator in an lvalue context,
5527 /// then it must mean that we need the address of an aggregate in order to
5528 /// access one of its members.  This can happen for all the reasons that casts
5529 /// are permitted with aggregate result, including noop aggregate casts, and
5530 /// cast from scalar to union.
5531 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
5532   switch (E->getCastKind()) {
5533   case CK_ToVoid:
5534   case CK_BitCast:
5535   case CK_LValueToRValueBitCast:
5536   case CK_ArrayToPointerDecay:
5537   case CK_FunctionToPointerDecay:
5538   case CK_NullToMemberPointer:
5539   case CK_NullToPointer:
5540   case CK_IntegralToPointer:
5541   case CK_PointerToIntegral:
5542   case CK_PointerToBoolean:
5543   case CK_IntegralCast:
5544   case CK_BooleanToSignedIntegral:
5545   case CK_IntegralToBoolean:
5546   case CK_IntegralToFloating:
5547   case CK_FloatingToIntegral:
5548   case CK_FloatingToBoolean:
5549   case CK_FloatingCast:
5550   case CK_FloatingRealToComplex:
5551   case CK_FloatingComplexToReal:
5552   case CK_FloatingComplexToBoolean:
5553   case CK_FloatingComplexCast:
5554   case CK_FloatingComplexToIntegralComplex:
5555   case CK_IntegralRealToComplex:
5556   case CK_IntegralComplexToReal:
5557   case CK_IntegralComplexToBoolean:
5558   case CK_IntegralComplexCast:
5559   case CK_IntegralComplexToFloatingComplex:
5560   case CK_DerivedToBaseMemberPointer:
5561   case CK_BaseToDerivedMemberPointer:
5562   case CK_MemberPointerToBoolean:
5563   case CK_ReinterpretMemberPointer:
5564   case CK_AnyPointerToBlockPointerCast:
5565   case CK_ARCProduceObject:
5566   case CK_ARCConsumeObject:
5567   case CK_ARCReclaimReturnedObject:
5568   case CK_ARCExtendBlockObject:
5569   case CK_CopyAndAutoreleaseBlockObject:
5570   case CK_IntToOCLSampler:
5571   case CK_FloatingToFixedPoint:
5572   case CK_FixedPointToFloating:
5573   case CK_FixedPointCast:
5574   case CK_FixedPointToBoolean:
5575   case CK_FixedPointToIntegral:
5576   case CK_IntegralToFixedPoint:
5577   case CK_MatrixCast:
5578   case CK_HLSLVectorTruncation:
5579   case CK_HLSLArrayRValue:
5580   case CK_HLSLElementwiseCast:
5581   case CK_HLSLAggregateSplatCast:
5582     return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5583 
5584   case CK_Dependent:
5585     llvm_unreachable("dependent cast kind in IR gen!");
5586 
5587   case CK_BuiltinFnToFnPtr:
5588     llvm_unreachable("builtin functions are handled elsewhere");
5589 
5590   // These are never l-values; just use the aggregate emission code.
5591   case CK_NonAtomicToAtomic:
5592   case CK_AtomicToNonAtomic:
5593     return EmitAggExprToLValue(E);
5594 
5595   case CK_Dynamic: {
5596     LValue LV = EmitLValue(E->getSubExpr());
5597     Address V = LV.getAddress();
5598     const auto *DCE = cast<CXXDynamicCastExpr>(E);
5599     return MakeNaturalAlignRawAddrLValue(EmitDynamicCast(V, DCE), E->getType());
5600   }
5601 
5602   case CK_ConstructorConversion:
5603   case CK_UserDefinedConversion:
5604   case CK_CPointerToObjCPointerCast:
5605   case CK_BlockPointerToObjCPointerCast:
5606   case CK_LValueToRValue:
5607     return EmitLValue(E->getSubExpr());
5608 
5609   case CK_NoOp: {
5610     // CK_NoOp can model a qualification conversion, which can remove an array
5611     // bound and change the IR type.
5612     // FIXME: Once pointee types are removed from IR, remove this.
5613     LValue LV = EmitLValue(E->getSubExpr());
5614     // Propagate the volatile qualifer to LValue, if exist in E.
5615     if (E->changesVolatileQualification())
5616       LV.getQuals() = E->getType().getQualifiers();
5617     if (LV.isSimple()) {
5618       Address V = LV.getAddress();
5619       if (V.isValid()) {
5620         llvm::Type *T = ConvertTypeForMem(E->getType());
5621         if (V.getElementType() != T)
5622           LV.setAddress(V.withElementType(T));
5623       }
5624     }
5625     return LV;
5626   }
5627 
5628   case CK_UncheckedDerivedToBase:
5629   case CK_DerivedToBase: {
5630     const auto *DerivedClassTy =
5631         E->getSubExpr()->getType()->castAs<RecordType>();
5632     auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5633 
5634     LValue LV = EmitLValue(E->getSubExpr());
5635     Address This = LV.getAddress();
5636 
5637     // Perform the derived-to-base conversion
5638     Address Base = GetAddressOfBaseClass(
5639         This, DerivedClassDecl, E->path_begin(), E->path_end(),
5640         /*NullCheckValue=*/false, E->getExprLoc());
5641 
5642     // TODO: Support accesses to members of base classes in TBAA. For now, we
5643     // conservatively pretend that the complete object is of the base class
5644     // type.
5645     return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5646                           CGM.getTBAAInfoForSubobject(LV, E->getType()));
5647   }
5648   case CK_ToUnion:
5649     return EmitAggExprToLValue(E);
5650   case CK_BaseToDerived: {
5651     const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
5652     auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5653 
5654     LValue LV = EmitLValue(E->getSubExpr());
5655 
5656     // Perform the base-to-derived conversion
5657     Address Derived = GetAddressOfDerivedClass(
5658         LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
5659         /*NullCheckValue=*/false);
5660 
5661     // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5662     // performed and the object is not of the derived type.
5663     if (sanitizePerformTypeCheck())
5664       EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), Derived,
5665                     E->getType());
5666 
5667     if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5668       EmitVTablePtrCheckForCast(E->getType(), Derived,
5669                                 /*MayBeNull=*/false, CFITCK_DerivedCast,
5670                                 E->getBeginLoc());
5671 
5672     return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5673                           CGM.getTBAAInfoForSubobject(LV, E->getType()));
5674   }
5675   case CK_LValueBitCast: {
5676     // This must be a reinterpret_cast (or c-style equivalent).
5677     const auto *CE = cast<ExplicitCastExpr>(E);
5678 
5679     CGM.EmitExplicitCastExprType(CE, this);
5680     LValue LV = EmitLValue(E->getSubExpr());
5681     Address V = LV.getAddress().withElementType(
5682         ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5683 
5684     if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5685       EmitVTablePtrCheckForCast(E->getType(), V,
5686                                 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5687                                 E->getBeginLoc());
5688 
5689     return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5690                           CGM.getTBAAInfoForSubobject(LV, E->getType()));
5691   }
5692   case CK_AddressSpaceConversion: {
5693     LValue LV = EmitLValue(E->getSubExpr());
5694     QualType DestTy = getContext().getPointerType(E->getType());
5695     llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5696         *this, LV.getPointer(*this),
5697         E->getSubExpr()->getType().getAddressSpace(), ConvertType(DestTy));
5698     return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()),
5699                                   LV.getAddress().getAlignment()),
5700                           E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5701   }
5702   case CK_ObjCObjectLValueCast: {
5703     LValue LV = EmitLValue(E->getSubExpr());
5704     Address V = LV.getAddress().withElementType(ConvertType(E->getType()));
5705     return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5706                           CGM.getTBAAInfoForSubobject(LV, E->getType()));
5707   }
5708   case CK_ZeroToOCLOpaqueType:
5709     llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5710 
5711   case CK_VectorSplat: {
5712     // LValue results of vector splats are only supported in HLSL.
5713     if (!getLangOpts().HLSL)
5714       return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5715     return EmitLValue(E->getSubExpr());
5716   }
5717   }
5718 
5719   llvm_unreachable("Unhandled lvalue cast kind?");
5720 }
5721 
5722 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
5723   assert(OpaqueValueMappingData::shouldBindAsLValue(e));
5724   return getOrCreateOpaqueLValueMapping(e);
5725 }
5726 
5727 std::pair<LValue, LValue>
5728 CodeGenFunction::EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty) {
5729   // Emitting the casted temporary through an opaque value.
5730   LValue BaseLV = EmitLValue(E->getArgLValue());
5731   OpaqueValueMappingData::bind(*this, E->getOpaqueArgLValue(), BaseLV);
5732 
5733   QualType ExprTy = E->getType();
5734   Address OutTemp = CreateIRTemp(ExprTy);
5735   LValue TempLV = MakeAddrLValue(OutTemp, ExprTy);
5736 
5737   if (E->isInOut())
5738     EmitInitializationToLValue(E->getCastedTemporary()->getSourceExpr(),
5739                                TempLV);
5740 
5741   OpaqueValueMappingData::bind(*this, E->getCastedTemporary(), TempLV);
5742   return std::make_pair(BaseLV, TempLV);
5743 }
5744 
5745 LValue CodeGenFunction::EmitHLSLOutArgExpr(const HLSLOutArgExpr *E,
5746                                            CallArgList &Args, QualType Ty) {
5747 
5748   auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty);
5749 
5750   llvm::Value *Addr = TempLV.getAddress().getBasePointer();
5751   llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
5752 
5753   llvm::TypeSize Sz = CGM.getDataLayout().getTypeAllocSize(ElTy);
5754 
5755   llvm::Value *LifetimeSize = EmitLifetimeStart(Sz, Addr);
5756 
5757   Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
5758   Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast(),
5759                     LifetimeSize);
5760   Args.add(RValue::get(TmpAddr, *this), Ty);
5761   return TempLV;
5762 }
5763 
5764 LValue
5765 CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) {
5766   assert(OpaqueValueMapping::shouldBindAsLValue(e));
5767 
5768   llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5769       it = OpaqueLValues.find(e);
5770 
5771   if (it != OpaqueLValues.end())
5772     return it->second;
5773 
5774   assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5775   return EmitLValue(e->getSourceExpr());
5776 }
5777 
5778 RValue
5779 CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) {
5780   assert(!OpaqueValueMapping::shouldBindAsLValue(e));
5781 
5782   llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5783       it = OpaqueRValues.find(e);
5784 
5785   if (it != OpaqueRValues.end())
5786     return it->second;
5787 
5788   assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5789   return EmitAnyExpr(e->getSourceExpr());
5790 }
5791 
5792 bool CodeGenFunction::isOpaqueValueEmitted(const OpaqueValueExpr *E) {
5793   if (OpaqueValueMapping::shouldBindAsLValue(E))
5794     return OpaqueLValues.contains(E);
5795   return OpaqueRValues.contains(E);
5796 }
5797 
5798 RValue CodeGenFunction::EmitRValueForField(LValue LV,
5799                                            const FieldDecl *FD,
5800                                            SourceLocation Loc) {
5801   QualType FT = FD->getType();
5802   LValue FieldLV = EmitLValueForField(LV, FD);
5803   switch (getEvaluationKind(FT)) {
5804   case TEK_Complex:
5805     return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5806   case TEK_Aggregate:
5807     return FieldLV.asAggregateRValue();
5808   case TEK_Scalar:
5809     // This routine is used to load fields one-by-one to perform a copy, so
5810     // don't load reference fields.
5811     if (FD->getType()->isReferenceType())
5812       return RValue::get(FieldLV.getPointer(*this));
5813     // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5814     // primitive load.
5815     if (FieldLV.isBitField())
5816       return EmitLoadOfLValue(FieldLV, Loc);
5817     return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
5818   }
5819   llvm_unreachable("bad evaluation kind");
5820 }
5821 
5822 //===--------------------------------------------------------------------===//
5823 //                             Expression Emission
5824 //===--------------------------------------------------------------------===//
5825 
5826 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
5827                                      ReturnValueSlot ReturnValue,
5828                                      llvm::CallBase **CallOrInvoke) {
5829   llvm::CallBase *CallOrInvokeStorage;
5830   if (!CallOrInvoke) {
5831     CallOrInvoke = &CallOrInvokeStorage;
5832   }
5833 
5834   auto AddCoroElideSafeOnExit = llvm::make_scope_exit([&] {
5835     if (E->isCoroElideSafe()) {
5836       auto *I = *CallOrInvoke;
5837       if (I)
5838         I->addFnAttr(llvm::Attribute::CoroElideSafe);
5839     }
5840   });
5841 
5842   // Builtins never have block type.
5843   if (E->getCallee()->getType()->isBlockPointerType())
5844     return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke);
5845 
5846   if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
5847     return EmitCXXMemberCallExpr(CE, ReturnValue, CallOrInvoke);
5848 
5849   if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
5850     return EmitCUDAKernelCallExpr(CE, ReturnValue, CallOrInvoke);
5851 
5852   // A CXXOperatorCallExpr is created even for explicit object methods, but
5853   // these should be treated like static function call.
5854   if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
5855     if (const auto *MD =
5856             dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
5857         MD && MD->isImplicitObjectMemberFunction())
5858       return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue, CallOrInvoke);
5859 
5860   CGCallee callee = EmitCallee(E->getCallee());
5861 
5862   if (callee.isBuiltin()) {
5863     return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
5864                            E, ReturnValue);
5865   }
5866 
5867   if (callee.isPseudoDestructor()) {
5868     return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr());
5869   }
5870 
5871   return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue,
5872                   /*Chain=*/nullptr, CallOrInvoke);
5873 }
5874 
5875 /// Emit a CallExpr without considering whether it might be a subclass.
5876 RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,
5877                                            ReturnValueSlot ReturnValue,
5878                                            llvm::CallBase **CallOrInvoke) {
5879   CGCallee Callee = EmitCallee(E->getCallee());
5880   return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
5881                   /*Chain=*/nullptr, CallOrInvoke);
5882 }
5883 
5884 // Detect the unusual situation where an inline version is shadowed by a
5885 // non-inline version. In that case we should pick the external one
5886 // everywhere. That's GCC behavior too.
5887 static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) {
5888   for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
5889     if (!PD->isInlineBuiltinDeclaration())
5890       return false;
5891   return true;
5892 }
5893 
5894 static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
5895   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
5896 
5897   if (auto builtinID = FD->getBuiltinID()) {
5898     std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
5899     std::string NoBuiltins = "no-builtins";
5900 
5901     StringRef Ident = CGF.CGM.getMangledName(GD);
5902     std::string FDInlineName = (Ident + ".inline").str();
5903 
5904     bool IsPredefinedLibFunction =
5905         CGF.getContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
5906     bool HasAttributeNoBuiltin =
5907         CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
5908         CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
5909 
5910     // When directing calling an inline builtin, call it through it's mangled
5911     // name to make it clear it's not the actual builtin.
5912     if (CGF.CurFn->getName() != FDInlineName &&
5913         OnlyHasInlineBuiltinDeclaration(FD)) {
5914       llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5915       llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
5916       llvm::Module *M = Fn->getParent();
5917       llvm::Function *Clone = M->getFunction(FDInlineName);
5918       if (!Clone) {
5919         Clone = llvm::Function::Create(Fn->getFunctionType(),
5920                                        llvm::GlobalValue::InternalLinkage,
5921                                        Fn->getAddressSpace(), FDInlineName, M);
5922         Clone->addFnAttr(llvm::Attribute::AlwaysInline);
5923       }
5924       return CGCallee::forDirect(Clone, GD);
5925     }
5926 
5927     // Replaceable builtins provide their own implementation of a builtin. If we
5928     // are in an inline builtin implementation, avoid trivial infinite
5929     // recursion. Honor __attribute__((no_builtin("foo"))) or
5930     // __attribute__((no_builtin)) on the current function unless foo is
5931     // not a predefined library function which means we must generate the
5932     // builtin no matter what.
5933     else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
5934       return CGCallee::forBuiltin(builtinID, FD);
5935   }
5936 
5937   llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5938   if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
5939       FD->hasAttr<CUDAGlobalAttr>())
5940     CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
5941         cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
5942 
5943   return CGCallee::forDirect(CalleePtr, GD);
5944 }
5945 
5946 static GlobalDecl getGlobalDeclForDirectCall(const FunctionDecl *FD) {
5947   if (DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()))
5948     return GlobalDecl(FD, KernelReferenceKind::Stub);
5949   return GlobalDecl(FD);
5950 }
5951 
5952 CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
5953   E = E->IgnoreParens();
5954 
5955   // Look through function-to-pointer decay.
5956   if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
5957     if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
5958         ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
5959       return EmitCallee(ICE->getSubExpr());
5960     }
5961 
5962     // Try to remember the original __ptrauth qualifier for loads of
5963     // function pointers.
5964     if (ICE->getCastKind() == CK_LValueToRValue) {
5965       const Expr *SubExpr = ICE->getSubExpr();
5966       if (const auto *PtrType = SubExpr->getType()->getAs<PointerType>()) {
5967         std::pair<llvm::Value *, CGPointerAuthInfo> Result =
5968             EmitOrigPointerRValue(E);
5969 
5970         QualType FunctionType = PtrType->getPointeeType();
5971         assert(FunctionType->isFunctionType());
5972 
5973         GlobalDecl GD;
5974         if (const auto *VD =
5975                 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee())) {
5976           GD = GlobalDecl(VD);
5977         }
5978         CGCalleeInfo CalleeInfo(FunctionType->getAs<FunctionProtoType>(), GD);
5979         CGCallee Callee(CalleeInfo, Result.first, Result.second);
5980         return Callee;
5981       }
5982     }
5983 
5984   // Resolve direct calls.
5985   } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
5986     if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
5987       return EmitDirectCallee(*this, getGlobalDeclForDirectCall(FD));
5988     }
5989   } else if (auto ME = dyn_cast<MemberExpr>(E)) {
5990     if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
5991       EmitIgnoredExpr(ME->getBase());
5992       return EmitDirectCallee(*this, FD);
5993     }
5994 
5995   // Look through template substitutions.
5996   } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
5997     return EmitCallee(NTTP->getReplacement());
5998 
5999   // Treat pseudo-destructor calls differently.
6000   } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
6001     return CGCallee::forPseudoDestructor(PDE);
6002   }
6003 
6004   // Otherwise, we have an indirect reference.
6005   llvm::Value *calleePtr;
6006   QualType functionType;
6007   if (auto ptrType = E->getType()->getAs<PointerType>()) {
6008     calleePtr = EmitScalarExpr(E);
6009     functionType = ptrType->getPointeeType();
6010   } else {
6011     functionType = E->getType();
6012     calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
6013   }
6014   assert(functionType->isFunctionType());
6015 
6016   GlobalDecl GD;
6017   if (const auto *VD =
6018           dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
6019     GD = GlobalDecl(VD);
6020 
6021   CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
6022   CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(functionType);
6023   CGCallee callee(calleeInfo, calleePtr, pointerAuth);
6024   return callee;
6025 }
6026 
6027 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
6028   // Comma expressions just emit their LHS then their RHS as an l-value.
6029   if (E->getOpcode() == BO_Comma) {
6030     EmitIgnoredExpr(E->getLHS());
6031     EnsureInsertPoint();
6032     return EmitLValue(E->getRHS());
6033   }
6034 
6035   if (E->getOpcode() == BO_PtrMemD ||
6036       E->getOpcode() == BO_PtrMemI)
6037     return EmitPointerToDataMemberBinaryExpr(E);
6038 
6039   assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
6040 
6041   // Create a Key Instructions source location atom group that covers both
6042   // LHS and RHS expressions. Nested RHS expressions may get subsequently
6043   // separately grouped (1 below):
6044   //
6045   //   1. `a = b = c`  -> Two atoms.
6046   //   2. `x = new(1)` -> One atom (for both addr store and value store).
6047   //   3. Complex and agg assignment -> One atom.
6048   ApplyAtomGroup Grp(getDebugInfo());
6049 
6050   // Note that in all of these cases, __block variables need the RHS
6051   // evaluated first just in case the variable gets moved by the RHS.
6052 
6053   switch (getEvaluationKind(E->getType())) {
6054   case TEK_Scalar: {
6055     if (PointerAuthQualifier PtrAuth =
6056             E->getLHS()->getType().getPointerAuth()) {
6057       LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
6058       LValue CopiedLV = LV;
6059       CopiedLV.getQuals().removePointerAuth();
6060       llvm::Value *RV =
6061           EmitPointerAuthQualify(PtrAuth, E->getRHS(), CopiedLV.getAddress());
6062       EmitNullabilityCheck(CopiedLV, RV, E->getExprLoc());
6063       EmitStoreThroughLValue(RValue::get(RV), CopiedLV);
6064       return LV;
6065     }
6066 
6067     switch (E->getLHS()->getType().getObjCLifetime()) {
6068     case Qualifiers::OCL_Strong:
6069       return EmitARCStoreStrong(E, /*ignored*/ false).first;
6070 
6071     case Qualifiers::OCL_Autoreleasing:
6072       return EmitARCStoreAutoreleasing(E).first;
6073 
6074     // No reason to do any of these differently.
6075     case Qualifiers::OCL_None:
6076     case Qualifiers::OCL_ExplicitNone:
6077     case Qualifiers::OCL_Weak:
6078       break;
6079     }
6080 
6081     // TODO: Can we de-duplicate this code with the corresponding code in
6082     // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
6083     RValue RV;
6084     llvm::Value *Previous = nullptr;
6085     QualType SrcType = E->getRHS()->getType();
6086     // Check if LHS is a bitfield, if RHS contains an implicit cast expression
6087     // we want to extract that value and potentially (if the bitfield sanitizer
6088     // is enabled) use it to check for an implicit conversion.
6089     if (E->getLHS()->refersToBitField()) {
6090       llvm::Value *RHS =
6091           EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
6092       RV = RValue::get(RHS);
6093     } else
6094       RV = EmitAnyExpr(E->getRHS());
6095 
6096     LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
6097 
6098     if (RV.isScalar())
6099       EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc());
6100 
6101     if (LV.isBitField()) {
6102       llvm::Value *Result = nullptr;
6103       // If bitfield sanitizers are enabled we want to use the result
6104       // to check whether a truncation or sign change has occurred.
6105       if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
6106         EmitStoreThroughBitfieldLValue(RV, LV, &Result);
6107       else
6108         EmitStoreThroughBitfieldLValue(RV, LV);
6109 
6110       // If the expression contained an implicit conversion, make sure
6111       // to use the value before the scalar conversion.
6112       llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
6113       QualType DstType = E->getLHS()->getType();
6114       EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
6115                                   LV.getBitFieldInfo(), E->getExprLoc());
6116     } else
6117       EmitStoreThroughLValue(RV, LV);
6118 
6119     if (getLangOpts().OpenMP)
6120       CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
6121                                                                 E->getLHS());
6122     return LV;
6123   }
6124 
6125   case TEK_Complex:
6126     return EmitComplexAssignmentLValue(E);
6127 
6128   case TEK_Aggregate:
6129     // If the lang opt is HLSL and the LHS is a constant array
6130     // then we are performing a copy assignment and call a special
6131     // function because EmitAggExprToLValue emits to a temporary LValue
6132     if (getLangOpts().HLSL && E->getLHS()->getType()->isConstantArrayType())
6133       return EmitHLSLArrayAssignLValue(E);
6134 
6135     return EmitAggExprToLValue(E);
6136   }
6137   llvm_unreachable("bad evaluation kind");
6138 }
6139 
6140 // This function implements trivial copy assignment for HLSL's
6141 // assignable constant arrays.
6142 LValue CodeGenFunction::EmitHLSLArrayAssignLValue(const BinaryOperator *E) {
6143   // Don't emit an LValue for the RHS because it might not be an LValue
6144   LValue LHS = EmitLValue(E->getLHS());
6145   // In C the RHS of an assignment operator is an RValue.
6146   // EmitAggregateAssign takes anan LValue for the RHS. Instead we can call
6147   // EmitInitializationToLValue to emit an RValue into an LValue.
6148   EmitInitializationToLValue(E->getRHS(), LHS);
6149   return LHS;
6150 }
6151 
6152 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E,
6153                                            llvm::CallBase **CallOrInvoke) {
6154   RValue RV = EmitCallExpr(E, ReturnValueSlot(), CallOrInvoke);
6155 
6156   if (!RV.isScalar())
6157     return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6158                           AlignmentSource::Decl);
6159 
6160   assert(E->getCallReturnType(getContext())->isReferenceType() &&
6161          "Can't have a scalar return unless the return type is a "
6162          "reference type!");
6163 
6164   return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
6165 }
6166 
6167 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
6168   // FIXME: This shouldn't require another copy.
6169   return EmitAggExprToLValue(E);
6170 }
6171 
6172 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
6173   assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
6174          && "binding l-value to type which needs a temporary");
6175   AggValueSlot Slot = CreateAggTemp(E->getType());
6176   EmitCXXConstructExpr(E, Slot);
6177   return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
6178 }
6179 
6180 LValue
6181 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
6182   return MakeNaturalAlignRawAddrLValue(EmitCXXTypeidExpr(E), E->getType());
6183 }
6184 
6185 Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
6186   return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
6187       .withElementType(ConvertType(E->getType()));
6188 }
6189 
6190 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
6191   return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(),
6192                         AlignmentSource::Decl);
6193 }
6194 
6195 LValue
6196 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
6197   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
6198   Slot.setExternallyDestructed();
6199   EmitAggExpr(E->getSubExpr(), Slot);
6200   EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
6201   return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
6202 }
6203 
6204 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
6205   RValue RV = EmitObjCMessageExpr(E);
6206 
6207   if (!RV.isScalar())
6208     return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6209                           AlignmentSource::Decl);
6210 
6211   assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
6212          "Can't have a scalar return unless the return type is a "
6213          "reference type!");
6214 
6215   return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
6216 }
6217 
6218 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
6219   Address V =
6220     CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
6221   return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl);
6222 }
6223 
6224 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
6225                                              const ObjCIvarDecl *Ivar) {
6226   return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
6227 }
6228 
6229 llvm::Value *
6230 CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,
6231                                              const ObjCIvarDecl *Ivar) {
6232   llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
6233   QualType PointerDiffType = getContext().getPointerDiffType();
6234   return Builder.CreateZExtOrTrunc(OffsetValue,
6235                                    getTypes().ConvertType(PointerDiffType));
6236 }
6237 
6238 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
6239                                           llvm::Value *BaseValue,
6240                                           const ObjCIvarDecl *Ivar,
6241                                           unsigned CVRQualifiers) {
6242   return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
6243                                                    Ivar, CVRQualifiers);
6244 }
6245 
6246 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
6247   // FIXME: A lot of the code below could be shared with EmitMemberExpr.
6248   llvm::Value *BaseValue = nullptr;
6249   const Expr *BaseExpr = E->getBase();
6250   Qualifiers BaseQuals;
6251   QualType ObjectTy;
6252   if (E->isArrow()) {
6253     BaseValue = EmitScalarExpr(BaseExpr);
6254     ObjectTy = BaseExpr->getType()->getPointeeType();
6255     BaseQuals = ObjectTy.getQualifiers();
6256   } else {
6257     LValue BaseLV = EmitLValue(BaseExpr);
6258     BaseValue = BaseLV.getPointer(*this);
6259     ObjectTy = BaseExpr->getType();
6260     BaseQuals = ObjectTy.getQualifiers();
6261   }
6262 
6263   LValue LV =
6264     EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
6265                       BaseQuals.getCVRQualifiers());
6266   setObjCGCLValueClass(getContext(), E, LV);
6267   return LV;
6268 }
6269 
6270 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
6271   // Can only get l-value for message expression returning aggregate type
6272   RValue RV = EmitAnyExprToTemp(E);
6273   return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6274                         AlignmentSource::Decl);
6275 }
6276 
6277 RValue CodeGenFunction::EmitCall(QualType CalleeType,
6278                                  const CGCallee &OrigCallee, const CallExpr *E,
6279                                  ReturnValueSlot ReturnValue,
6280                                  llvm::Value *Chain,
6281                                  llvm::CallBase **CallOrInvoke,
6282                                  CGFunctionInfo const **ResolvedFnInfo) {
6283   // Get the actual function type. The callee type will always be a pointer to
6284   // function type or a block pointer type.
6285   assert(CalleeType->isFunctionPointerType() &&
6286          "Call must have function pointer type!");
6287 
6288   const Decl *TargetDecl =
6289       OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
6290 
6291   assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
6292           !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
6293          "trying to emit a call to an immediate function");
6294 
6295   CalleeType = getContext().getCanonicalType(CalleeType);
6296 
6297   auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
6298 
6299   CGCallee Callee = OrigCallee;
6300 
6301   if (SanOpts.has(SanitizerKind::Function) &&
6302       (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
6303       !isa<FunctionNoProtoType>(PointeeType)) {
6304     if (llvm::Constant *PrefixSig =
6305             CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
6306       auto CheckOrdinal = SanitizerKind::SO_Function;
6307       auto CheckHandler = SanitizerHandler::FunctionTypeMismatch;
6308       SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6309       auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
6310 
6311       llvm::Type *PrefixSigType = PrefixSig->getType();
6312       llvm::StructType *PrefixStructTy = llvm::StructType::get(
6313           CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
6314 
6315       llvm::Value *CalleePtr = Callee.getFunctionPointer();
6316       if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) {
6317         // Use raw pointer since we are using the callee pointer as data here.
6318         Address Addr =
6319             Address(CalleePtr, CalleePtr->getType(),
6320                     CharUnits::fromQuantity(
6321                         CalleePtr->getPointerAlignment(CGM.getDataLayout())),
6322                     Callee.getPointerAuthInfo(), nullptr);
6323         CalleePtr = Addr.emitRawPointer(*this);
6324       }
6325 
6326       // On 32-bit Arm, the low bit of a function pointer indicates whether
6327       // it's using the Arm or Thumb instruction set. The actual first
6328       // instruction lives at the same address either way, so we must clear
6329       // that low bit before using the function address to find the prefix
6330       // structure.
6331       //
6332       // This applies to both Arm and Thumb target triples, because
6333       // either one could be used in an interworking context where it
6334       // might be passed function pointers of both types.
6335       llvm::Value *AlignedCalleePtr;
6336       if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
6337         llvm::Value *CalleeAddress =
6338             Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
6339         llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
6340         llvm::Value *AlignedCalleeAddress =
6341             Builder.CreateAnd(CalleeAddress, Mask);
6342         AlignedCalleePtr =
6343             Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
6344       } else {
6345         AlignedCalleePtr = CalleePtr;
6346       }
6347 
6348       llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
6349       llvm::Value *CalleeSigPtr =
6350           Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
6351       llvm::Value *CalleeSig =
6352           Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
6353       llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
6354 
6355       llvm::BasicBlock *Cont = createBasicBlock("cont");
6356       llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
6357       Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
6358 
6359       EmitBlock(TypeCheck);
6360       llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
6361           Int32Ty,
6362           Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
6363           getPointerAlign());
6364       llvm::Value *CalleeTypeHashMatch =
6365           Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
6366       llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
6367                                       EmitCheckTypeDescriptor(CalleeType)};
6368       EmitCheck(std::make_pair(CalleeTypeHashMatch, CheckOrdinal), CheckHandler,
6369                 StaticData, {CalleePtr});
6370 
6371       Builder.CreateBr(Cont);
6372       EmitBlock(Cont);
6373     }
6374   }
6375 
6376   const auto *FnType = cast<FunctionType>(PointeeType);
6377 
6378   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
6379       FD && DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()))
6380     CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FnType);
6381 
6382   bool CFIUnchecked =
6383       CalleeType->hasPointeeToToCFIUncheckedCalleeFunctionType();
6384 
6385   // If we are checking indirect calls and this call is indirect, check that the
6386   // function pointer is a member of the bit set for the function type.
6387   if (SanOpts.has(SanitizerKind::CFIICall) &&
6388       (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) && !CFIUnchecked) {
6389     auto CheckOrdinal = SanitizerKind::SO_CFIICall;
6390     auto CheckHandler = SanitizerHandler::CFICheckFail;
6391     SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6392     EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
6393 
6394     llvm::Metadata *MD;
6395     if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
6396       MD = CGM.CreateMetadataIdentifierGeneralized(QualType(FnType, 0));
6397     else
6398       MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0));
6399 
6400     llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
6401 
6402     llvm::Value *CalleePtr = Callee.getFunctionPointer();
6403     llvm::Value *TypeTest = Builder.CreateCall(
6404         CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
6405 
6406     auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
6407     llvm::Constant *StaticData[] = {
6408         llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
6409         EmitCheckSourceLocation(E->getBeginLoc()),
6410         EmitCheckTypeDescriptor(QualType(FnType, 0)),
6411     };
6412     if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
6413       EmitCfiSlowPathCheck(CheckOrdinal, TypeTest, CrossDsoTypeId, CalleePtr,
6414                            StaticData);
6415     } else {
6416       EmitCheck(std::make_pair(TypeTest, CheckOrdinal), CheckHandler,
6417                 StaticData, {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
6418     }
6419   }
6420 
6421   CallArgList Args;
6422   if (Chain)
6423     Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
6424 
6425   // C++17 requires that we evaluate arguments to a call using assignment syntax
6426   // right-to-left, and that we evaluate arguments to certain other operators
6427   // left-to-right. Note that we allow this to override the order dictated by
6428   // the calling convention on the MS ABI, which means that parameter
6429   // destruction order is not necessarily reverse construction order.
6430   // FIXME: Revisit this based on C++ committee response to unimplementability.
6431   EvaluationOrder Order = EvaluationOrder::Default;
6432   bool StaticOperator = false;
6433   if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
6434     if (OCE->isAssignmentOp())
6435       Order = EvaluationOrder::ForceRightToLeft;
6436     else {
6437       switch (OCE->getOperator()) {
6438       case OO_LessLess:
6439       case OO_GreaterGreater:
6440       case OO_AmpAmp:
6441       case OO_PipePipe:
6442       case OO_Comma:
6443       case OO_ArrowStar:
6444         Order = EvaluationOrder::ForceLeftToRight;
6445         break;
6446       default:
6447         break;
6448       }
6449     }
6450 
6451     if (const auto *MD =
6452             dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
6453         MD && MD->isStatic())
6454       StaticOperator = true;
6455   }
6456 
6457   auto Arguments = E->arguments();
6458   if (StaticOperator) {
6459     // If we're calling a static operator, we need to emit the object argument
6460     // and ignore it.
6461     EmitIgnoredExpr(E->getArg(0));
6462     Arguments = drop_begin(Arguments, 1);
6463   }
6464   EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
6465                E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
6466 
6467   const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
6468       Args, FnType, /*ChainCall=*/Chain);
6469 
6470   if (ResolvedFnInfo)
6471     *ResolvedFnInfo = &FnInfo;
6472 
6473   // HIP function pointer contains kernel handle when it is used in triple
6474   // chevron. The kernel stub needs to be loaded from kernel handle and used
6475   // as callee.
6476   if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
6477       isa<CUDAKernelCallExpr>(E) &&
6478       (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6479     llvm::Value *Handle = Callee.getFunctionPointer();
6480     auto *Stub = Builder.CreateLoad(
6481         Address(Handle, Handle->getType(), CGM.getPointerAlign()));
6482     Callee.setFunctionPointer(Stub);
6483   }
6484   llvm::CallBase *LocalCallOrInvoke = nullptr;
6485   RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke,
6486                          E == MustTailCall, E->getExprLoc());
6487 
6488   // Generate function declaration DISuprogram in order to be used
6489   // in debug info about call sites.
6490   if (CGDebugInfo *DI = getDebugInfo()) {
6491     if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
6492       FunctionArgList Args;
6493       QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
6494       DI->EmitFuncDeclForCallSite(LocalCallOrInvoke,
6495                                   DI->getFunctionType(CalleeDecl, ResTy, Args),
6496                                   CalleeDecl);
6497     }
6498   }
6499   if (CallOrInvoke)
6500     *CallOrInvoke = LocalCallOrInvoke;
6501 
6502   return Call;
6503 }
6504 
6505 LValue CodeGenFunction::
6506 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
6507   Address BaseAddr = Address::invalid();
6508   if (E->getOpcode() == BO_PtrMemI) {
6509     BaseAddr = EmitPointerWithAlignment(E->getLHS());
6510   } else {
6511     BaseAddr = EmitLValue(E->getLHS()).getAddress();
6512   }
6513 
6514   llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
6515   const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6516 
6517   LValueBaseInfo BaseInfo;
6518   TBAAAccessInfo TBAAInfo;
6519   bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
6520                     !isUnderlyingBasePointerConstantNull(E->getLHS());
6521   Address MemberAddr = EmitCXXMemberDataPointerAddress(
6522       E, BaseAddr, OffsetV, MPT, IsInBounds, &BaseInfo, &TBAAInfo);
6523 
6524   return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
6525 }
6526 
6527 /// Given the address of a temporary variable, produce an r-value of
6528 /// its type.
6529 RValue CodeGenFunction::convertTempToRValue(Address addr,
6530                                             QualType type,
6531                                             SourceLocation loc) {
6532   LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl);
6533   switch (getEvaluationKind(type)) {
6534   case TEK_Complex:
6535     return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
6536   case TEK_Aggregate:
6537     return lvalue.asAggregateRValue();
6538   case TEK_Scalar:
6539     return RValue::get(EmitLoadOfScalar(lvalue, loc));
6540   }
6541   llvm_unreachable("bad evaluation kind");
6542 }
6543 
6544 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6545   assert(Val->getType()->isFPOrFPVectorTy());
6546   if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6547     return;
6548 
6549   llvm::MDBuilder MDHelper(getLLVMContext());
6550   llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6551 
6552   cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
6553 }
6554 
6555 void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
6556   llvm::Type *EltTy = Val->getType()->getScalarType();
6557   if (!EltTy->isFloatTy())
6558     return;
6559 
6560   if ((getLangOpts().OpenCL &&
6561        !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6562       (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6563        !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6564     // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6565     //
6566     // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6567     // build option allows an application to specify that single precision
6568     // floating-point divide (x/y and 1/x) and sqrt used in the program
6569     // source are correctly rounded.
6570     //
6571     // TODO: CUDA has a prec-sqrt flag
6572     SetFPAccuracy(Val, 3.0f);
6573   }
6574 }
6575 
6576 void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
6577   llvm::Type *EltTy = Val->getType()->getScalarType();
6578   if (!EltTy->isFloatTy())
6579     return;
6580 
6581   if ((getLangOpts().OpenCL &&
6582        !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6583       (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6584        !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6585     // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6586     //
6587     // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6588     // build option allows an application to specify that single precision
6589     // floating-point divide (x/y and 1/x) and sqrt used in the program
6590     // source are correctly rounded.
6591     //
6592     // TODO: CUDA has a prec-div flag
6593     SetFPAccuracy(Val, 2.5f);
6594   }
6595 }
6596 
6597 namespace {
6598   struct LValueOrRValue {
6599     LValue LV;
6600     RValue RV;
6601   };
6602 }
6603 
6604 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
6605                                            const PseudoObjectExpr *E,
6606                                            bool forLValue,
6607                                            AggValueSlot slot) {
6608   SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
6609 
6610   // Find the result expression, if any.
6611   const Expr *resultExpr = E->getResultExpr();
6612   LValueOrRValue result;
6613 
6614   for (PseudoObjectExpr::const_semantics_iterator
6615          i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
6616     const Expr *semantic = *i;
6617 
6618     // If this semantic expression is an opaque value, bind it
6619     // to the result of its source expression.
6620     if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
6621       // Skip unique OVEs.
6622       if (ov->isUnique()) {
6623         assert(ov != resultExpr &&
6624                "A unique OVE cannot be used as the result expression");
6625         continue;
6626       }
6627 
6628       // If this is the result expression, we may need to evaluate
6629       // directly into the slot.
6630       typedef CodeGenFunction::OpaqueValueMappingData OVMA;
6631       OVMA opaqueData;
6632       if (ov == resultExpr && ov->isPRValue() && !forLValue &&
6633           CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) {
6634         CGF.EmitAggExpr(ov->getSourceExpr(), slot);
6635         LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
6636                                        AlignmentSource::Decl);
6637         opaqueData = OVMA::bind(CGF, ov, LV);
6638         result.RV = slot.asRValue();
6639 
6640       // Otherwise, emit as normal.
6641       } else {
6642         opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
6643 
6644         // If this is the result, also evaluate the result now.
6645         if (ov == resultExpr) {
6646           if (forLValue)
6647             result.LV = CGF.EmitLValue(ov);
6648           else
6649             result.RV = CGF.EmitAnyExpr(ov, slot);
6650         }
6651       }
6652 
6653       opaques.push_back(opaqueData);
6654 
6655     // Otherwise, if the expression is the result, evaluate it
6656     // and remember the result.
6657     } else if (semantic == resultExpr) {
6658       if (forLValue)
6659         result.LV = CGF.EmitLValue(semantic);
6660       else
6661         result.RV = CGF.EmitAnyExpr(semantic, slot);
6662 
6663     // Otherwise, evaluate the expression in an ignored context.
6664     } else {
6665       CGF.EmitIgnoredExpr(semantic);
6666     }
6667   }
6668 
6669   // Unbind all the opaques now.
6670   for (CodeGenFunction::OpaqueValueMappingData &opaque : opaques)
6671     opaque.unbind(CGF);
6672 
6673   return result;
6674 }
6675 
6676 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
6677                                                AggValueSlot slot) {
6678   return emitPseudoObjectExpr(*this, E, false, slot).RV;
6679 }
6680 
6681 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
6682   return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
6683 }
6684 
6685 void CodeGenFunction::FlattenAccessAndType(
6686     Address Addr, QualType AddrType,
6687     SmallVectorImpl<std::pair<Address, llvm::Value *>> &AccessList,
6688     SmallVectorImpl<QualType> &FlatTypes) {
6689   // WorkList is list of type we are processing + the Index List to access
6690   // the field of that type in Addr for use in a GEP
6691   llvm::SmallVector<std::pair<QualType, llvm::SmallVector<llvm::Value *, 4>>,
6692                     16>
6693       WorkList;
6694   llvm::IntegerType *IdxTy = llvm::IntegerType::get(getLLVMContext(), 32);
6695   // Addr should be a pointer so we need to 'dereference' it
6696   WorkList.push_back({AddrType, {llvm::ConstantInt::get(IdxTy, 0)}});
6697 
6698   while (!WorkList.empty()) {
6699     auto [T, IdxList] = WorkList.pop_back_val();
6700     T = T.getCanonicalType().getUnqualifiedType();
6701     assert(!isa<MatrixType>(T) && "Matrix types not yet supported in HLSL");
6702     if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) {
6703       uint64_t Size = CAT->getZExtSize();
6704       for (int64_t I = Size - 1; I > -1; I--) {
6705         llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
6706         IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I));
6707         WorkList.emplace_back(CAT->getElementType(), IdxListCopy);
6708       }
6709     } else if (const auto *RT = dyn_cast<RecordType>(T)) {
6710       const RecordDecl *Record = RT->getDecl();
6711       assert(!Record->isUnion() && "Union types not supported in flat cast.");
6712 
6713       const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Record);
6714 
6715       llvm::SmallVector<QualType, 16> FieldTypes;
6716       if (CXXD && CXXD->isStandardLayout())
6717         Record = CXXD->getStandardLayoutBaseWithFields();
6718 
6719       // deal with potential base classes
6720       if (CXXD && !CXXD->isStandardLayout()) {
6721         for (auto &Base : CXXD->bases())
6722           FieldTypes.push_back(Base.getType());
6723       }
6724 
6725       for (auto *FD : Record->fields())
6726         FieldTypes.push_back(FD->getType());
6727 
6728       for (int64_t I = FieldTypes.size() - 1; I > -1; I--) {
6729         llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
6730         IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I));
6731         WorkList.insert(WorkList.end(), {FieldTypes[I], IdxListCopy});
6732       }
6733     } else if (const auto *VT = dyn_cast<VectorType>(T)) {
6734       llvm::Type *LLVMT = ConvertTypeForMem(T);
6735       CharUnits Align = getContext().getTypeAlignInChars(T);
6736       Address GEP =
6737           Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "vector.gep");
6738       for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) {
6739         llvm::Value *Idx = llvm::ConstantInt::get(IdxTy, I);
6740         // gep on vector fields is not recommended so combine gep with
6741         // extract/insert
6742         AccessList.emplace_back(GEP, Idx);
6743         FlatTypes.push_back(VT->getElementType());
6744       }
6745     } else {
6746       // a scalar/builtin type
6747       llvm::Type *LLVMT = ConvertTypeForMem(T);
6748       CharUnits Align = getContext().getTypeAlignInChars(T);
6749       Address GEP =
6750           Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "gep");
6751       AccessList.emplace_back(GEP, nullptr);
6752       FlatTypes.push_back(T);
6753     }
6754   }
6755 }
6756