1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Aggregate Expr nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCXXABI.h" 14 #include "CGObjCRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "ConstantEmitter.h" 18 #include "TargetInfo.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclTemplate.h" 23 #include "clang/AST/StmtVisitor.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/GlobalVariable.h" 27 #include "llvm/IR/IntrinsicInst.h" 28 #include "llvm/IR/Intrinsics.h" 29 using namespace clang; 30 using namespace CodeGen; 31 32 //===----------------------------------------------------------------------===// 33 // Aggregate Expression Emitter 34 //===----------------------------------------------------------------------===// 35 36 namespace { 37 class AggExprEmitter : public StmtVisitor<AggExprEmitter> { 38 CodeGenFunction &CGF; 39 CGBuilderTy &Builder; 40 AggValueSlot Dest; 41 bool IsResultUnused; 42 43 AggValueSlot EnsureSlot(QualType T) { 44 if (!Dest.isIgnored()) return Dest; 45 return CGF.CreateAggTemp(T, "agg.tmp.ensured"); 46 } 47 void EnsureDest(QualType T) { 48 if (!Dest.isIgnored()) return; 49 Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured"); 50 } 51 52 // Calls `Fn` with a valid return value slot, potentially creating a temporary 53 // to do so. If a temporary is created, an appropriate copy into `Dest` will 54 // be emitted, as will lifetime markers. 55 // 56 // The given function should take a ReturnValueSlot, and return an RValue that 57 // points to said slot. 58 void withReturnValueSlot(const Expr *E, 59 llvm::function_ref<RValue(ReturnValueSlot)> Fn); 60 61 public: 62 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused) 63 : CGF(cgf), Builder(CGF.Builder), Dest(Dest), 64 IsResultUnused(IsResultUnused) { } 65 66 //===--------------------------------------------------------------------===// 67 // Utilities 68 //===--------------------------------------------------------------------===// 69 70 /// EmitAggLoadOfLValue - Given an expression with aggregate type that 71 /// represents a value lvalue, this method emits the address of the lvalue, 72 /// then loads the result into DestPtr. 73 void EmitAggLoadOfLValue(const Expr *E); 74 75 enum ExprValueKind { 76 EVK_RValue, 77 EVK_NonRValue 78 }; 79 80 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 81 /// SrcIsRValue is true if source comes from an RValue. 82 void EmitFinalDestCopy(QualType type, const LValue &src, 83 ExprValueKind SrcValueKind = EVK_NonRValue); 84 void EmitFinalDestCopy(QualType type, RValue src); 85 void EmitCopy(QualType type, const AggValueSlot &dest, 86 const AggValueSlot &src); 87 88 void EmitMoveFromReturnSlot(const Expr *E, RValue Src); 89 90 void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy, 91 Expr *ExprToVisit, ArrayRef<Expr *> Args, 92 Expr *ArrayFiller); 93 94 AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { 95 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) 96 return AggValueSlot::NeedsGCBarriers; 97 return AggValueSlot::DoesNotNeedGCBarriers; 98 } 99 100 bool TypeRequiresGCollection(QualType T); 101 102 //===--------------------------------------------------------------------===// 103 // Visitor Methods 104 //===--------------------------------------------------------------------===// 105 106 void Visit(Expr *E) { 107 ApplyDebugLocation DL(CGF, E); 108 StmtVisitor<AggExprEmitter>::Visit(E); 109 } 110 111 void VisitStmt(Stmt *S) { 112 CGF.ErrorUnsupported(S, "aggregate expression"); 113 } 114 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } 115 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 116 Visit(GE->getResultExpr()); 117 } 118 void VisitCoawaitExpr(CoawaitExpr *E) { 119 CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused); 120 } 121 void VisitCoyieldExpr(CoyieldExpr *E) { 122 CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused); 123 } 124 void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); } 125 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); } 126 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { 127 return Visit(E->getReplacement()); 128 } 129 130 void VisitConstantExpr(ConstantExpr *E) { 131 EnsureDest(E->getType()); 132 133 if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { 134 CGF.EmitAggregateStore(Result, Dest.getAddress(), 135 E->getType().isVolatileQualified()); 136 return; 137 } 138 return Visit(E->getSubExpr()); 139 } 140 141 // l-values. 142 void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); } 143 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); } 144 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } 145 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } 146 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); 147 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 148 EmitAggLoadOfLValue(E); 149 } 150 void VisitPredefinedExpr(const PredefinedExpr *E) { 151 EmitAggLoadOfLValue(E); 152 } 153 154 // Operators. 155 void VisitCastExpr(CastExpr *E); 156 void VisitCallExpr(const CallExpr *E); 157 void VisitStmtExpr(const StmtExpr *E); 158 void VisitBinaryOperator(const BinaryOperator *BO); 159 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO); 160 void VisitBinAssign(const BinaryOperator *E); 161 void VisitBinComma(const BinaryOperator *E); 162 void VisitBinCmp(const BinaryOperator *E); 163 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { 164 Visit(E->getSemanticForm()); 165 } 166 167 void VisitObjCMessageExpr(ObjCMessageExpr *E); 168 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 169 EmitAggLoadOfLValue(E); 170 } 171 172 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E); 173 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO); 174 void VisitChooseExpr(const ChooseExpr *CE); 175 void VisitInitListExpr(InitListExpr *E); 176 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args, 177 FieldDecl *InitializedFieldInUnion, 178 Expr *ArrayFiller); 179 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, 180 llvm::Value *outerBegin = nullptr); 181 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); 182 void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing. 183 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 184 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); 185 Visit(DAE->getExpr()); 186 } 187 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { 188 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); 189 Visit(DIE->getExpr()); 190 } 191 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); 192 void VisitCXXConstructExpr(const CXXConstructExpr *E); 193 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E); 194 void VisitLambdaExpr(LambdaExpr *E); 195 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E); 196 void VisitExprWithCleanups(ExprWithCleanups *E); 197 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); 198 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); } 199 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E); 200 void VisitOpaqueValueExpr(OpaqueValueExpr *E); 201 202 void VisitPseudoObjectExpr(PseudoObjectExpr *E) { 203 if (E->isGLValue()) { 204 LValue LV = CGF.EmitPseudoObjectLValue(E); 205 return EmitFinalDestCopy(E->getType(), LV); 206 } 207 208 AggValueSlot Slot = EnsureSlot(E->getType()); 209 bool NeedsDestruction = 210 !Slot.isExternallyDestructed() && 211 E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; 212 if (NeedsDestruction) 213 Slot.setExternallyDestructed(); 214 CGF.EmitPseudoObjectRValue(E, Slot); 215 if (NeedsDestruction) 216 CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Slot.getAddress(), 217 E->getType()); 218 } 219 220 void VisitVAArgExpr(VAArgExpr *E); 221 void VisitCXXParenListInitExpr(CXXParenListInitExpr *E); 222 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args, 223 Expr *ArrayFiller); 224 225 void EmitInitializationToLValue(Expr *E, LValue Address); 226 void EmitNullInitializationToLValue(LValue Address); 227 // case Expr::ChooseExprClass: 228 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); } 229 void VisitAtomicExpr(AtomicExpr *E) { 230 RValue Res = CGF.EmitAtomicExpr(E); 231 EmitFinalDestCopy(E->getType(), Res); 232 } 233 }; 234 } // end anonymous namespace. 235 236 //===----------------------------------------------------------------------===// 237 // Utilities 238 //===----------------------------------------------------------------------===// 239 240 /// EmitAggLoadOfLValue - Given an expression with aggregate type that 241 /// represents a value lvalue, this method emits the address of the lvalue, 242 /// then loads the result into DestPtr. 243 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { 244 LValue LV = CGF.EmitLValue(E); 245 246 // If the type of the l-value is atomic, then do an atomic load. 247 if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) { 248 CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest); 249 return; 250 } 251 252 EmitFinalDestCopy(E->getType(), LV); 253 } 254 255 /// True if the given aggregate type requires special GC API calls. 256 bool AggExprEmitter::TypeRequiresGCollection(QualType T) { 257 // Only record types have members that might require garbage collection. 258 const RecordType *RecordTy = T->getAs<RecordType>(); 259 if (!RecordTy) return false; 260 261 // Don't mess with non-trivial C++ types. 262 RecordDecl *Record = RecordTy->getDecl(); 263 if (isa<CXXRecordDecl>(Record) && 264 (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() || 265 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor())) 266 return false; 267 268 // Check whether the type has an object member. 269 return Record->hasObjectMember(); 270 } 271 272 void AggExprEmitter::withReturnValueSlot( 273 const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) { 274 QualType RetTy = E->getType(); 275 bool RequiresDestruction = 276 !Dest.isExternallyDestructed() && 277 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct; 278 279 // If it makes no observable difference, save a memcpy + temporary. 280 // 281 // We need to always provide our own temporary if destruction is required. 282 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end 283 // its lifetime before we have the chance to emit a proper destructor call. 284 bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() || 285 (RequiresDestruction && !Dest.getAddress().isValid()); 286 287 Address RetAddr = Address::invalid(); 288 Address RetAllocaAddr = Address::invalid(); 289 290 EHScopeStack::stable_iterator LifetimeEndBlock; 291 llvm::Value *LifetimeSizePtr = nullptr; 292 llvm::IntrinsicInst *LifetimeStartInst = nullptr; 293 if (!UseTemp) { 294 RetAddr = Dest.getAddress(); 295 } else { 296 RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr); 297 llvm::TypeSize Size = 298 CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy)); 299 LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer()); 300 if (LifetimeSizePtr) { 301 LifetimeStartInst = 302 cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint())); 303 assert(LifetimeStartInst->getIntrinsicID() == 304 llvm::Intrinsic::lifetime_start && 305 "Last insertion wasn't a lifetime.start?"); 306 307 CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>( 308 NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr); 309 LifetimeEndBlock = CGF.EHStack.stable_begin(); 310 } 311 } 312 313 RValue Src = 314 EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused, 315 Dest.isExternallyDestructed())); 316 317 if (!UseTemp) 318 return; 319 320 assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer()); 321 EmitFinalDestCopy(E->getType(), Src); 322 323 if (!RequiresDestruction && LifetimeStartInst) { 324 // If there's no dtor to run, the copy was the last use of our temporary. 325 // Since we're not guaranteed to be in an ExprWithCleanups, clean up 326 // eagerly. 327 CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst); 328 CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer()); 329 } 330 } 331 332 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 333 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) { 334 assert(src.isAggregate() && "value must be aggregate value!"); 335 LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type); 336 EmitFinalDestCopy(type, srcLV, EVK_RValue); 337 } 338 339 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 340 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src, 341 ExprValueKind SrcValueKind) { 342 // If Dest is ignored, then we're evaluating an aggregate expression 343 // in a context that doesn't care about the result. Note that loads 344 // from volatile l-values force the existence of a non-ignored 345 // destination. 346 if (Dest.isIgnored()) 347 return; 348 349 // Copy non-trivial C structs here. 350 LValue DstLV = CGF.MakeAddrLValue( 351 Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type); 352 353 if (SrcValueKind == EVK_RValue) { 354 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { 355 if (Dest.isPotentiallyAliased()) 356 CGF.callCStructMoveAssignmentOperator(DstLV, src); 357 else 358 CGF.callCStructMoveConstructor(DstLV, src); 359 return; 360 } 361 } else { 362 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { 363 if (Dest.isPotentiallyAliased()) 364 CGF.callCStructCopyAssignmentOperator(DstLV, src); 365 else 366 CGF.callCStructCopyConstructor(DstLV, src); 367 return; 368 } 369 } 370 371 AggValueSlot srcAgg = AggValueSlot::forLValue( 372 src, CGF, AggValueSlot::IsDestructed, needsGC(type), 373 AggValueSlot::IsAliased, AggValueSlot::MayOverlap); 374 EmitCopy(type, Dest, srcAgg); 375 } 376 377 /// Perform a copy from the source into the destination. 378 /// 379 /// \param type - the type of the aggregate being copied; qualifiers are 380 /// ignored 381 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest, 382 const AggValueSlot &src) { 383 if (dest.requiresGCollection()) { 384 CharUnits sz = dest.getPreferredSize(CGF.getContext(), type); 385 llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity()); 386 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, 387 dest.getAddress(), 388 src.getAddress(), 389 size); 390 return; 391 } 392 393 // If the result of the assignment is used, copy the LHS there also. 394 // It's volatile if either side is. Use the minimum alignment of 395 // the two sides. 396 LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type); 397 LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type); 398 CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), 399 dest.isVolatile() || src.isVolatile()); 400 } 401 402 /// Emit the initializer for a std::initializer_list initialized with a 403 /// real initializer list. 404 void 405 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { 406 // Emit an array containing the elements. The array is externally destructed 407 // if the std::initializer_list object is. 408 ASTContext &Ctx = CGF.getContext(); 409 LValue Array = CGF.EmitLValue(E->getSubExpr()); 410 assert(Array.isSimple() && "initializer_list array not a simple lvalue"); 411 Address ArrayPtr = Array.getAddress(CGF); 412 413 const ConstantArrayType *ArrayType = 414 Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); 415 assert(ArrayType && "std::initializer_list constructed from non-array"); 416 417 // FIXME: Perform the checks on the field types in SemaInit. 418 RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl(); 419 RecordDecl::field_iterator Field = Record->field_begin(); 420 if (Field == Record->field_end()) { 421 CGF.ErrorUnsupported(E, "weird std::initializer_list"); 422 return; 423 } 424 425 // Start pointer. 426 if (!Field->getType()->isPointerType() || 427 !Ctx.hasSameType(Field->getType()->getPointeeType(), 428 ArrayType->getElementType())) { 429 CGF.ErrorUnsupported(E, "weird std::initializer_list"); 430 return; 431 } 432 433 AggValueSlot Dest = EnsureSlot(E->getType()); 434 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); 435 LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field); 436 llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0); 437 llvm::Value *IdxStart[] = { Zero, Zero }; 438 llvm::Value *ArrayStart = Builder.CreateInBoundsGEP( 439 ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxStart, "arraystart"); 440 CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start); 441 ++Field; 442 443 if (Field == Record->field_end()) { 444 CGF.ErrorUnsupported(E, "weird std::initializer_list"); 445 return; 446 } 447 448 llvm::Value *Size = Builder.getInt(ArrayType->getSize()); 449 LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field); 450 if (Field->getType()->isPointerType() && 451 Ctx.hasSameType(Field->getType()->getPointeeType(), 452 ArrayType->getElementType())) { 453 // End pointer. 454 llvm::Value *IdxEnd[] = { Zero, Size }; 455 llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP( 456 ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxEnd, "arrayend"); 457 CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); 458 } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) { 459 // Length. 460 CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength); 461 } else { 462 CGF.ErrorUnsupported(E, "weird std::initializer_list"); 463 return; 464 } 465 } 466 467 /// Determine if E is a trivial array filler, that is, one that is 468 /// equivalent to zero-initialization. 469 static bool isTrivialFiller(Expr *E) { 470 if (!E) 471 return true; 472 473 if (isa<ImplicitValueInitExpr>(E)) 474 return true; 475 476 if (auto *ILE = dyn_cast<InitListExpr>(E)) { 477 if (ILE->getNumInits()) 478 return false; 479 return isTrivialFiller(ILE->getArrayFiller()); 480 } 481 482 if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E)) 483 return Cons->getConstructor()->isDefaultConstructor() && 484 Cons->getConstructor()->isTrivial(); 485 486 // FIXME: Are there other cases where we can avoid emitting an initializer? 487 return false; 488 } 489 490 /// Emit initialization of an array from an initializer list. ExprToVisit must 491 /// be either an InitListEpxr a CXXParenInitListExpr. 492 void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, 493 QualType ArrayQTy, Expr *ExprToVisit, 494 ArrayRef<Expr *> Args, Expr *ArrayFiller) { 495 uint64_t NumInitElements = Args.size(); 496 497 uint64_t NumArrayElements = AType->getNumElements(); 498 assert(NumInitElements <= NumArrayElements); 499 500 QualType elementType = 501 CGF.getContext().getAsArrayType(ArrayQTy)->getElementType(); 502 503 // DestPtr is an array*. Construct an elementType* by drilling 504 // down a level. 505 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); 506 llvm::Value *indices[] = { zero, zero }; 507 llvm::Value *begin = Builder.CreateInBoundsGEP( 508 DestPtr.getElementType(), DestPtr.getPointer(), indices, 509 "arrayinit.begin"); 510 511 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); 512 CharUnits elementAlign = 513 DestPtr.getAlignment().alignmentOfArrayElement(elementSize); 514 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType); 515 516 // Consider initializing the array by copying from a global. For this to be 517 // more efficient than per-element initialization, the size of the elements 518 // with explicit initializers should be large enough. 519 if (NumInitElements * elementSize.getQuantity() > 16 && 520 elementType.isTriviallyCopyableType(CGF.getContext())) { 521 CodeGen::CodeGenModule &CGM = CGF.CGM; 522 ConstantEmitter Emitter(CGF); 523 LangAS AS = ArrayQTy.getAddressSpace(); 524 if (llvm::Constant *C = 525 Emitter.tryEmitForInitializer(ExprToVisit, AS, ArrayQTy)) { 526 auto GV = new llvm::GlobalVariable( 527 CGM.getModule(), C->getType(), 528 CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true), 529 llvm::GlobalValue::PrivateLinkage, C, "constinit", 530 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal, 531 CGM.getContext().getTargetAddressSpace(AS)); 532 Emitter.finalize(GV); 533 CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy); 534 GV->setAlignment(Align.getAsAlign()); 535 Address GVAddr(GV, GV->getValueType(), Align); 536 EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, ArrayQTy)); 537 return; 538 } 539 } 540 541 // Exception safety requires us to destroy all the 542 // already-constructed members if an initializer throws. 543 // For that, we'll need an EH cleanup. 544 QualType::DestructionKind dtorKind = elementType.isDestructedType(); 545 Address endOfInit = Address::invalid(); 546 EHScopeStack::stable_iterator cleanup; 547 llvm::Instruction *cleanupDominator = nullptr; 548 if (CGF.needsEHCleanup(dtorKind)) { 549 // In principle we could tell the cleanup where we are more 550 // directly, but the control flow can get so varied here that it 551 // would actually be quite complex. Therefore we go through an 552 // alloca. 553 endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(), 554 "arrayinit.endOfInit"); 555 cleanupDominator = Builder.CreateStore(begin, endOfInit); 556 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType, 557 elementAlign, 558 CGF.getDestroyer(dtorKind)); 559 cleanup = CGF.EHStack.stable_begin(); 560 561 // Otherwise, remember that we didn't need a cleanup. 562 } else { 563 dtorKind = QualType::DK_none; 564 } 565 566 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); 567 568 // The 'current element to initialize'. The invariants on this 569 // variable are complicated. Essentially, after each iteration of 570 // the loop, it points to the last initialized element, except 571 // that it points to the beginning of the array before any 572 // elements have been initialized. 573 llvm::Value *element = begin; 574 575 // Emit the explicit initializers. 576 for (uint64_t i = 0; i != NumInitElements; ++i) { 577 // Advance to the next element. 578 if (i > 0) { 579 element = Builder.CreateInBoundsGEP( 580 llvmElementType, element, one, "arrayinit.element"); 581 582 // Tell the cleanup that it needs to destroy up to this 583 // element. TODO: some of these stores can be trivially 584 // observed to be unnecessary. 585 if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit); 586 } 587 588 LValue elementLV = CGF.MakeAddrLValue( 589 Address(element, llvmElementType, elementAlign), elementType); 590 EmitInitializationToLValue(Args[i], elementLV); 591 } 592 593 // Check whether there's a non-trivial array-fill expression. 594 bool hasTrivialFiller = isTrivialFiller(ArrayFiller); 595 596 // Any remaining elements need to be zero-initialized, possibly 597 // using the filler expression. We can skip this if the we're 598 // emitting to zeroed memory. 599 if (NumInitElements != NumArrayElements && 600 !(Dest.isZeroed() && hasTrivialFiller && 601 CGF.getTypes().isZeroInitializable(elementType))) { 602 603 // Use an actual loop. This is basically 604 // do { *array++ = filler; } while (array != end); 605 606 // Advance to the start of the rest of the array. 607 if (NumInitElements) { 608 element = Builder.CreateInBoundsGEP( 609 llvmElementType, element, one, "arrayinit.start"); 610 if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit); 611 } 612 613 // Compute the end of the array. 614 llvm::Value *end = Builder.CreateInBoundsGEP( 615 llvmElementType, begin, 616 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end"); 617 618 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 619 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); 620 621 // Jump into the body. 622 CGF.EmitBlock(bodyBB); 623 llvm::PHINode *currentElement = 624 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur"); 625 currentElement->addIncoming(element, entryBB); 626 627 // Emit the actual filler expression. 628 { 629 // C++1z [class.temporary]p5: 630 // when a default constructor is called to initialize an element of 631 // an array with no corresponding initializer [...] the destruction of 632 // every temporary created in a default argument is sequenced before 633 // the construction of the next array element, if any 634 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF); 635 LValue elementLV = CGF.MakeAddrLValue( 636 Address(currentElement, llvmElementType, elementAlign), elementType); 637 if (ArrayFiller) 638 EmitInitializationToLValue(ArrayFiller, elementLV); 639 else 640 EmitNullInitializationToLValue(elementLV); 641 } 642 643 // Move on to the next element. 644 llvm::Value *nextElement = Builder.CreateInBoundsGEP( 645 llvmElementType, currentElement, one, "arrayinit.next"); 646 647 // Tell the EH cleanup that we finished with the last element. 648 if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit); 649 650 // Leave the loop if we're done. 651 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end, 652 "arrayinit.done"); 653 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); 654 Builder.CreateCondBr(done, endBB, bodyBB); 655 currentElement->addIncoming(nextElement, Builder.GetInsertBlock()); 656 657 CGF.EmitBlock(endBB); 658 } 659 660 // Leave the partial-array cleanup if we entered one. 661 if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator); 662 } 663 664 //===----------------------------------------------------------------------===// 665 // Visitor Methods 666 //===----------------------------------------------------------------------===// 667 668 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){ 669 Visit(E->getSubExpr()); 670 } 671 672 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) { 673 // If this is a unique OVE, just visit its source expression. 674 if (e->isUnique()) 675 Visit(e->getSourceExpr()); 676 else 677 EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e)); 678 } 679 680 void 681 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 682 if (Dest.isPotentiallyAliased() && 683 E->getType().isPODType(CGF.getContext())) { 684 // For a POD type, just emit a load of the lvalue + a copy, because our 685 // compound literal might alias the destination. 686 EmitAggLoadOfLValue(E); 687 return; 688 } 689 690 AggValueSlot Slot = EnsureSlot(E->getType()); 691 692 // Block-scope compound literals are destroyed at the end of the enclosing 693 // scope in C. 694 bool Destruct = 695 !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed(); 696 if (Destruct) 697 Slot.setExternallyDestructed(); 698 699 CGF.EmitAggExpr(E->getInitializer(), Slot); 700 701 if (Destruct) 702 if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) 703 CGF.pushLifetimeExtendedDestroy( 704 CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(), 705 CGF.getDestroyer(DtorKind), DtorKind & EHCleanup); 706 } 707 708 /// Attempt to look through various unimportant expressions to find a 709 /// cast of the given kind. 710 static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) { 711 op = op->IgnoreParenNoopCasts(ctx); 712 if (auto castE = dyn_cast<CastExpr>(op)) { 713 if (castE->getCastKind() == kind) 714 return castE->getSubExpr(); 715 } 716 return nullptr; 717 } 718 719 void AggExprEmitter::VisitCastExpr(CastExpr *E) { 720 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E)) 721 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF); 722 switch (E->getCastKind()) { 723 case CK_Dynamic: { 724 // FIXME: Can this actually happen? We have no test coverage for it. 725 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); 726 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(), 727 CodeGenFunction::TCK_Load); 728 // FIXME: Do we also need to handle property references here? 729 if (LV.isSimple()) 730 CGF.EmitDynamicCast(LV.getAddress(CGF), cast<CXXDynamicCastExpr>(E)); 731 else 732 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); 733 734 if (!Dest.isIgnored()) 735 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination"); 736 break; 737 } 738 739 case CK_ToUnion: { 740 // Evaluate even if the destination is ignored. 741 if (Dest.isIgnored()) { 742 CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), 743 /*ignoreResult=*/true); 744 break; 745 } 746 747 // GCC union extension 748 QualType Ty = E->getSubExpr()->getType(); 749 Address CastPtr = 750 Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty)); 751 EmitInitializationToLValue(E->getSubExpr(), 752 CGF.MakeAddrLValue(CastPtr, Ty)); 753 break; 754 } 755 756 case CK_LValueToRValueBitCast: { 757 if (Dest.isIgnored()) { 758 CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), 759 /*ignoreResult=*/true); 760 break; 761 } 762 763 LValue SourceLV = CGF.EmitLValue(E->getSubExpr()); 764 Address SourceAddress = 765 Builder.CreateElementBitCast(SourceLV.getAddress(CGF), CGF.Int8Ty); 766 Address DestAddress = 767 Builder.CreateElementBitCast(Dest.getAddress(), CGF.Int8Ty); 768 llvm::Value *SizeVal = llvm::ConstantInt::get( 769 CGF.SizeTy, 770 CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity()); 771 Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal); 772 break; 773 } 774 775 case CK_DerivedToBase: 776 case CK_BaseToDerived: 777 case CK_UncheckedDerivedToBase: { 778 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: " 779 "should have been unpacked before we got here"); 780 } 781 782 case CK_NonAtomicToAtomic: 783 case CK_AtomicToNonAtomic: { 784 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic); 785 786 // Determine the atomic and value types. 787 QualType atomicType = E->getSubExpr()->getType(); 788 QualType valueType = E->getType(); 789 if (isToAtomic) std::swap(atomicType, valueType); 790 791 assert(atomicType->isAtomicType()); 792 assert(CGF.getContext().hasSameUnqualifiedType(valueType, 793 atomicType->castAs<AtomicType>()->getValueType())); 794 795 // Just recurse normally if we're ignoring the result or the 796 // atomic type doesn't change representation. 797 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) { 798 return Visit(E->getSubExpr()); 799 } 800 801 CastKind peepholeTarget = 802 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic); 803 804 // These two cases are reverses of each other; try to peephole them. 805 if (Expr *op = 806 findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) { 807 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(), 808 E->getType()) && 809 "peephole significantly changed types?"); 810 return Visit(op); 811 } 812 813 // If we're converting an r-value of non-atomic type to an r-value 814 // of atomic type, just emit directly into the relevant sub-object. 815 if (isToAtomic) { 816 AggValueSlot valueDest = Dest; 817 if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) { 818 // Zero-initialize. (Strictly speaking, we only need to initialize 819 // the padding at the end, but this is simpler.) 820 if (!Dest.isZeroed()) 821 CGF.EmitNullInitialization(Dest.getAddress(), atomicType); 822 823 // Build a GEP to refer to the subobject. 824 Address valueAddr = 825 CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0); 826 valueDest = AggValueSlot::forAddr(valueAddr, 827 valueDest.getQualifiers(), 828 valueDest.isExternallyDestructed(), 829 valueDest.requiresGCollection(), 830 valueDest.isPotentiallyAliased(), 831 AggValueSlot::DoesNotOverlap, 832 AggValueSlot::IsZeroed); 833 } 834 835 CGF.EmitAggExpr(E->getSubExpr(), valueDest); 836 return; 837 } 838 839 // Otherwise, we're converting an atomic type to a non-atomic type. 840 // Make an atomic temporary, emit into that, and then copy the value out. 841 AggValueSlot atomicSlot = 842 CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp"); 843 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot); 844 845 Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0); 846 RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile()); 847 return EmitFinalDestCopy(valueType, rvalue); 848 } 849 case CK_AddressSpaceConversion: 850 return Visit(E->getSubExpr()); 851 852 case CK_LValueToRValue: 853 // If we're loading from a volatile type, force the destination 854 // into existence. 855 if (E->getSubExpr()->getType().isVolatileQualified()) { 856 bool Destruct = 857 !Dest.isExternallyDestructed() && 858 E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; 859 if (Destruct) 860 Dest.setExternallyDestructed(); 861 EnsureDest(E->getType()); 862 Visit(E->getSubExpr()); 863 864 if (Destruct) 865 CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), 866 E->getType()); 867 868 return; 869 } 870 871 [[fallthrough]]; 872 873 874 case CK_NoOp: 875 case CK_UserDefinedConversion: 876 case CK_ConstructorConversion: 877 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), 878 E->getType()) && 879 "Implicit cast types must be compatible"); 880 Visit(E->getSubExpr()); 881 break; 882 883 case CK_LValueBitCast: 884 llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); 885 886 case CK_Dependent: 887 case CK_BitCast: 888 case CK_ArrayToPointerDecay: 889 case CK_FunctionToPointerDecay: 890 case CK_NullToPointer: 891 case CK_NullToMemberPointer: 892 case CK_BaseToDerivedMemberPointer: 893 case CK_DerivedToBaseMemberPointer: 894 case CK_MemberPointerToBoolean: 895 case CK_ReinterpretMemberPointer: 896 case CK_IntegralToPointer: 897 case CK_PointerToIntegral: 898 case CK_PointerToBoolean: 899 case CK_ToVoid: 900 case CK_VectorSplat: 901 case CK_IntegralCast: 902 case CK_BooleanToSignedIntegral: 903 case CK_IntegralToBoolean: 904 case CK_IntegralToFloating: 905 case CK_FloatingToIntegral: 906 case CK_FloatingToBoolean: 907 case CK_FloatingCast: 908 case CK_CPointerToObjCPointerCast: 909 case CK_BlockPointerToObjCPointerCast: 910 case CK_AnyPointerToBlockPointerCast: 911 case CK_ObjCObjectLValueCast: 912 case CK_FloatingRealToComplex: 913 case CK_FloatingComplexToReal: 914 case CK_FloatingComplexToBoolean: 915 case CK_FloatingComplexCast: 916 case CK_FloatingComplexToIntegralComplex: 917 case CK_IntegralRealToComplex: 918 case CK_IntegralComplexToReal: 919 case CK_IntegralComplexToBoolean: 920 case CK_IntegralComplexCast: 921 case CK_IntegralComplexToFloatingComplex: 922 case CK_ARCProduceObject: 923 case CK_ARCConsumeObject: 924 case CK_ARCReclaimReturnedObject: 925 case CK_ARCExtendBlockObject: 926 case CK_CopyAndAutoreleaseBlockObject: 927 case CK_BuiltinFnToFnPtr: 928 case CK_ZeroToOCLOpaqueType: 929 case CK_MatrixCast: 930 931 case CK_IntToOCLSampler: 932 case CK_FloatingToFixedPoint: 933 case CK_FixedPointToFloating: 934 case CK_FixedPointCast: 935 case CK_FixedPointToBoolean: 936 case CK_FixedPointToIntegral: 937 case CK_IntegralToFixedPoint: 938 llvm_unreachable("cast kind invalid for aggregate types"); 939 } 940 } 941 942 void AggExprEmitter::VisitCallExpr(const CallExpr *E) { 943 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) { 944 EmitAggLoadOfLValue(E); 945 return; 946 } 947 948 withReturnValueSlot(E, [&](ReturnValueSlot Slot) { 949 return CGF.EmitCallExpr(E, Slot); 950 }); 951 } 952 953 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { 954 withReturnValueSlot(E, [&](ReturnValueSlot Slot) { 955 return CGF.EmitObjCMessageExpr(E, Slot); 956 }); 957 } 958 959 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { 960 CGF.EmitIgnoredExpr(E->getLHS()); 961 Visit(E->getRHS()); 962 } 963 964 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { 965 CodeGenFunction::StmtExprEvaluation eval(CGF); 966 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest); 967 } 968 969 enum CompareKind { 970 CK_Less, 971 CK_Greater, 972 CK_Equal, 973 }; 974 975 static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, 976 const BinaryOperator *E, llvm::Value *LHS, 977 llvm::Value *RHS, CompareKind Kind, 978 const char *NameSuffix = "") { 979 QualType ArgTy = E->getLHS()->getType(); 980 if (const ComplexType *CT = ArgTy->getAs<ComplexType>()) 981 ArgTy = CT->getElementType(); 982 983 if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) { 984 assert(Kind == CK_Equal && 985 "member pointers may only be compared for equality"); 986 return CGF.CGM.getCXXABI().EmitMemberPointerComparison( 987 CGF, LHS, RHS, MPT, /*IsInequality*/ false); 988 } 989 990 // Compute the comparison instructions for the specified comparison kind. 991 struct CmpInstInfo { 992 const char *Name; 993 llvm::CmpInst::Predicate FCmp; 994 llvm::CmpInst::Predicate SCmp; 995 llvm::CmpInst::Predicate UCmp; 996 }; 997 CmpInstInfo InstInfo = [&]() -> CmpInstInfo { 998 using FI = llvm::FCmpInst; 999 using II = llvm::ICmpInst; 1000 switch (Kind) { 1001 case CK_Less: 1002 return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT}; 1003 case CK_Greater: 1004 return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT}; 1005 case CK_Equal: 1006 return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ}; 1007 } 1008 llvm_unreachable("Unrecognised CompareKind enum"); 1009 }(); 1010 1011 if (ArgTy->hasFloatingRepresentation()) 1012 return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS, 1013 llvm::Twine(InstInfo.Name) + NameSuffix); 1014 if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) { 1015 auto Inst = 1016 ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp; 1017 return Builder.CreateICmp(Inst, LHS, RHS, 1018 llvm::Twine(InstInfo.Name) + NameSuffix); 1019 } 1020 1021 llvm_unreachable("unsupported aggregate binary expression should have " 1022 "already been handled"); 1023 } 1024 1025 void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { 1026 using llvm::BasicBlock; 1027 using llvm::PHINode; 1028 using llvm::Value; 1029 assert(CGF.getContext().hasSameType(E->getLHS()->getType(), 1030 E->getRHS()->getType())); 1031 const ComparisonCategoryInfo &CmpInfo = 1032 CGF.getContext().CompCategories.getInfoForType(E->getType()); 1033 assert(CmpInfo.Record->isTriviallyCopyable() && 1034 "cannot copy non-trivially copyable aggregate"); 1035 1036 QualType ArgTy = E->getLHS()->getType(); 1037 1038 if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() && 1039 !ArgTy->isNullPtrType() && !ArgTy->isPointerType() && 1040 !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) { 1041 return CGF.ErrorUnsupported(E, "aggregate three-way comparison"); 1042 } 1043 bool IsComplex = ArgTy->isAnyComplexType(); 1044 1045 // Evaluate the operands to the expression and extract their values. 1046 auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> { 1047 RValue RV = CGF.EmitAnyExpr(E); 1048 if (RV.isScalar()) 1049 return {RV.getScalarVal(), nullptr}; 1050 if (RV.isAggregate()) 1051 return {RV.getAggregatePointer(), nullptr}; 1052 assert(RV.isComplex()); 1053 return RV.getComplexVal(); 1054 }; 1055 auto LHSValues = EmitOperand(E->getLHS()), 1056 RHSValues = EmitOperand(E->getRHS()); 1057 1058 auto EmitCmp = [&](CompareKind K) { 1059 Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first, 1060 K, IsComplex ? ".r" : ""); 1061 if (!IsComplex) 1062 return Cmp; 1063 assert(K == CompareKind::CK_Equal); 1064 Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second, 1065 RHSValues.second, K, ".i"); 1066 return Builder.CreateAnd(Cmp, CmpImag, "and.eq"); 1067 }; 1068 auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) { 1069 return Builder.getInt(VInfo->getIntValue()); 1070 }; 1071 1072 Value *Select; 1073 if (ArgTy->isNullPtrType()) { 1074 Select = EmitCmpRes(CmpInfo.getEqualOrEquiv()); 1075 } else if (!CmpInfo.isPartial()) { 1076 Value *SelectOne = 1077 Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), 1078 EmitCmpRes(CmpInfo.getGreater()), "sel.lt"); 1079 Select = Builder.CreateSelect(EmitCmp(CK_Equal), 1080 EmitCmpRes(CmpInfo.getEqualOrEquiv()), 1081 SelectOne, "sel.eq"); 1082 } else { 1083 Value *SelectEq = Builder.CreateSelect( 1084 EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()), 1085 EmitCmpRes(CmpInfo.getUnordered()), "sel.eq"); 1086 Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater), 1087 EmitCmpRes(CmpInfo.getGreater()), 1088 SelectEq, "sel.gt"); 1089 Select = Builder.CreateSelect( 1090 EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt"); 1091 } 1092 // Create the return value in the destination slot. 1093 EnsureDest(E->getType()); 1094 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); 1095 1096 // Emit the address of the first (and only) field in the comparison category 1097 // type, and initialize it from the constant integer value selected above. 1098 LValue FieldLV = CGF.EmitLValueForFieldInitialization( 1099 DestLV, *CmpInfo.Record->field_begin()); 1100 CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true); 1101 1102 // All done! The result is in the Dest slot. 1103 } 1104 1105 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { 1106 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) 1107 VisitPointerToDataMemberBinaryOperator(E); 1108 else 1109 CGF.ErrorUnsupported(E, "aggregate binary expression"); 1110 } 1111 1112 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator( 1113 const BinaryOperator *E) { 1114 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E); 1115 EmitFinalDestCopy(E->getType(), LV); 1116 } 1117 1118 /// Is the value of the given expression possibly a reference to or 1119 /// into a __block variable? 1120 static bool isBlockVarRef(const Expr *E) { 1121 // Make sure we look through parens. 1122 E = E->IgnoreParens(); 1123 1124 // Check for a direct reference to a __block variable. 1125 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 1126 const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl()); 1127 return (var && var->hasAttr<BlocksAttr>()); 1128 } 1129 1130 // More complicated stuff. 1131 1132 // Binary operators. 1133 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) { 1134 // For an assignment or pointer-to-member operation, just care 1135 // about the LHS. 1136 if (op->isAssignmentOp() || op->isPtrMemOp()) 1137 return isBlockVarRef(op->getLHS()); 1138 1139 // For a comma, just care about the RHS. 1140 if (op->getOpcode() == BO_Comma) 1141 return isBlockVarRef(op->getRHS()); 1142 1143 // FIXME: pointer arithmetic? 1144 return false; 1145 1146 // Check both sides of a conditional operator. 1147 } else if (const AbstractConditionalOperator *op 1148 = dyn_cast<AbstractConditionalOperator>(E)) { 1149 return isBlockVarRef(op->getTrueExpr()) 1150 || isBlockVarRef(op->getFalseExpr()); 1151 1152 // OVEs are required to support BinaryConditionalOperators. 1153 } else if (const OpaqueValueExpr *op 1154 = dyn_cast<OpaqueValueExpr>(E)) { 1155 if (const Expr *src = op->getSourceExpr()) 1156 return isBlockVarRef(src); 1157 1158 // Casts are necessary to get things like (*(int*)&var) = foo(). 1159 // We don't really care about the kind of cast here, except 1160 // we don't want to look through l2r casts, because it's okay 1161 // to get the *value* in a __block variable. 1162 } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) { 1163 if (cast->getCastKind() == CK_LValueToRValue) 1164 return false; 1165 return isBlockVarRef(cast->getSubExpr()); 1166 1167 // Handle unary operators. Again, just aggressively look through 1168 // it, ignoring the operation. 1169 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) { 1170 return isBlockVarRef(uop->getSubExpr()); 1171 1172 // Look into the base of a field access. 1173 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) { 1174 return isBlockVarRef(mem->getBase()); 1175 1176 // Look into the base of a subscript. 1177 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) { 1178 return isBlockVarRef(sub->getBase()); 1179 } 1180 1181 return false; 1182 } 1183 1184 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { 1185 // For an assignment to work, the value on the right has 1186 // to be compatible with the value on the left. 1187 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), 1188 E->getRHS()->getType()) 1189 && "Invalid assignment"); 1190 1191 // If the LHS might be a __block variable, and the RHS can 1192 // potentially cause a block copy, we need to evaluate the RHS first 1193 // so that the assignment goes the right place. 1194 // This is pretty semantically fragile. 1195 if (isBlockVarRef(E->getLHS()) && 1196 E->getRHS()->HasSideEffects(CGF.getContext())) { 1197 // Ensure that we have a destination, and evaluate the RHS into that. 1198 EnsureDest(E->getRHS()->getType()); 1199 Visit(E->getRHS()); 1200 1201 // Now emit the LHS and copy into it. 1202 LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 1203 1204 // That copy is an atomic copy if the LHS is atomic. 1205 if (LHS.getType()->isAtomicType() || 1206 CGF.LValueIsSuitableForInlineAtomic(LHS)) { 1207 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); 1208 return; 1209 } 1210 1211 EmitCopy(E->getLHS()->getType(), 1212 AggValueSlot::forLValue(LHS, CGF, AggValueSlot::IsDestructed, 1213 needsGC(E->getLHS()->getType()), 1214 AggValueSlot::IsAliased, 1215 AggValueSlot::MayOverlap), 1216 Dest); 1217 return; 1218 } 1219 1220 LValue LHS = CGF.EmitLValue(E->getLHS()); 1221 1222 // If we have an atomic type, evaluate into the destination and then 1223 // do an atomic copy. 1224 if (LHS.getType()->isAtomicType() || 1225 CGF.LValueIsSuitableForInlineAtomic(LHS)) { 1226 EnsureDest(E->getRHS()->getType()); 1227 Visit(E->getRHS()); 1228 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); 1229 return; 1230 } 1231 1232 // Codegen the RHS so that it stores directly into the LHS. 1233 AggValueSlot LHSSlot = AggValueSlot::forLValue( 1234 LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()), 1235 AggValueSlot::IsAliased, AggValueSlot::MayOverlap); 1236 // A non-volatile aggregate destination might have volatile member. 1237 if (!LHSSlot.isVolatile() && 1238 CGF.hasVolatileMember(E->getLHS()->getType())) 1239 LHSSlot.setVolatile(true); 1240 1241 CGF.EmitAggExpr(E->getRHS(), LHSSlot); 1242 1243 // Copy into the destination if the assignment isn't ignored. 1244 EmitFinalDestCopy(E->getType(), LHS); 1245 1246 if (!Dest.isIgnored() && !Dest.isExternallyDestructed() && 1247 E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct) 1248 CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), 1249 E->getType()); 1250 } 1251 1252 void AggExprEmitter:: 1253 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 1254 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 1255 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 1256 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 1257 1258 // Bind the common expression if necessary. 1259 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 1260 1261 CodeGenFunction::ConditionalEvaluation eval(CGF); 1262 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, 1263 CGF.getProfileCount(E)); 1264 1265 // Save whether the destination's lifetime is externally managed. 1266 bool isExternallyDestructed = Dest.isExternallyDestructed(); 1267 bool destructNonTrivialCStruct = 1268 !isExternallyDestructed && 1269 E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; 1270 isExternallyDestructed |= destructNonTrivialCStruct; 1271 Dest.setExternallyDestructed(isExternallyDestructed); 1272 1273 eval.begin(CGF); 1274 CGF.EmitBlock(LHSBlock); 1275 CGF.incrementProfileCounter(E); 1276 Visit(E->getTrueExpr()); 1277 eval.end(CGF); 1278 1279 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!"); 1280 CGF.Builder.CreateBr(ContBlock); 1281 1282 // If the result of an agg expression is unused, then the emission 1283 // of the LHS might need to create a destination slot. That's fine 1284 // with us, and we can safely emit the RHS into the same slot, but 1285 // we shouldn't claim that it's already being destructed. 1286 Dest.setExternallyDestructed(isExternallyDestructed); 1287 1288 eval.begin(CGF); 1289 CGF.EmitBlock(RHSBlock); 1290 Visit(E->getFalseExpr()); 1291 eval.end(CGF); 1292 1293 if (destructNonTrivialCStruct) 1294 CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), 1295 E->getType()); 1296 1297 CGF.EmitBlock(ContBlock); 1298 } 1299 1300 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) { 1301 Visit(CE->getChosenSubExpr()); 1302 } 1303 1304 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 1305 Address ArgValue = Address::invalid(); 1306 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); 1307 1308 // If EmitVAArg fails, emit an error. 1309 if (!ArgPtr.isValid()) { 1310 CGF.ErrorUnsupported(VE, "aggregate va_arg expression"); 1311 return; 1312 } 1313 1314 EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType())); 1315 } 1316 1317 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { 1318 // Ensure that we have a slot, but if we already do, remember 1319 // whether it was externally destructed. 1320 bool wasExternallyDestructed = Dest.isExternallyDestructed(); 1321 EnsureDest(E->getType()); 1322 1323 // We're going to push a destructor if there isn't already one. 1324 Dest.setExternallyDestructed(); 1325 1326 Visit(E->getSubExpr()); 1327 1328 // Push that destructor we promised. 1329 if (!wasExternallyDestructed) 1330 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress()); 1331 } 1332 1333 void 1334 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { 1335 AggValueSlot Slot = EnsureSlot(E->getType()); 1336 CGF.EmitCXXConstructExpr(E, Slot); 1337 } 1338 1339 void AggExprEmitter::VisitCXXInheritedCtorInitExpr( 1340 const CXXInheritedCtorInitExpr *E) { 1341 AggValueSlot Slot = EnsureSlot(E->getType()); 1342 CGF.EmitInheritedCXXConstructorCall( 1343 E->getConstructor(), E->constructsVBase(), Slot.getAddress(), 1344 E->inheritedFromVBase(), E); 1345 } 1346 1347 void 1348 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { 1349 AggValueSlot Slot = EnsureSlot(E->getType()); 1350 LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType()); 1351 1352 // We'll need to enter cleanup scopes in case any of the element 1353 // initializers throws an exception. 1354 SmallVector<EHScopeStack::stable_iterator, 16> Cleanups; 1355 llvm::Instruction *CleanupDominator = nullptr; 1356 1357 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin(); 1358 for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(), 1359 e = E->capture_init_end(); 1360 i != e; ++i, ++CurField) { 1361 // Emit initialization 1362 LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField); 1363 if (CurField->hasCapturedVLAType()) { 1364 CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV); 1365 continue; 1366 } 1367 1368 EmitInitializationToLValue(*i, LV); 1369 1370 // Push a destructor if necessary. 1371 if (QualType::DestructionKind DtorKind = 1372 CurField->getType().isDestructedType()) { 1373 assert(LV.isSimple()); 1374 if (CGF.needsEHCleanup(DtorKind)) { 1375 if (!CleanupDominator) 1376 CleanupDominator = CGF.Builder.CreateAlignedLoad( 1377 CGF.Int8Ty, 1378 llvm::Constant::getNullValue(CGF.Int8PtrTy), 1379 CharUnits::One()); // placeholder 1380 1381 CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(), 1382 CGF.getDestroyer(DtorKind), false); 1383 Cleanups.push_back(CGF.EHStack.stable_begin()); 1384 } 1385 } 1386 } 1387 1388 // Deactivate all the partial cleanups in reverse order, which 1389 // generally means popping them. 1390 for (unsigned i = Cleanups.size(); i != 0; --i) 1391 CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator); 1392 1393 // Destroy the placeholder if we made one. 1394 if (CleanupDominator) 1395 CleanupDominator->eraseFromParent(); 1396 } 1397 1398 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 1399 CodeGenFunction::RunCleanupsScope cleanups(CGF); 1400 Visit(E->getSubExpr()); 1401 } 1402 1403 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { 1404 QualType T = E->getType(); 1405 AggValueSlot Slot = EnsureSlot(T); 1406 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T)); 1407 } 1408 1409 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { 1410 QualType T = E->getType(); 1411 AggValueSlot Slot = EnsureSlot(T); 1412 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T)); 1413 } 1414 1415 /// Determine whether the given cast kind is known to always convert values 1416 /// with all zero bits in their value representation to values with all zero 1417 /// bits in their value representation. 1418 static bool castPreservesZero(const CastExpr *CE) { 1419 switch (CE->getCastKind()) { 1420 // No-ops. 1421 case CK_NoOp: 1422 case CK_UserDefinedConversion: 1423 case CK_ConstructorConversion: 1424 case CK_BitCast: 1425 case CK_ToUnion: 1426 case CK_ToVoid: 1427 // Conversions between (possibly-complex) integral, (possibly-complex) 1428 // floating-point, and bool. 1429 case CK_BooleanToSignedIntegral: 1430 case CK_FloatingCast: 1431 case CK_FloatingComplexCast: 1432 case CK_FloatingComplexToBoolean: 1433 case CK_FloatingComplexToIntegralComplex: 1434 case CK_FloatingComplexToReal: 1435 case CK_FloatingRealToComplex: 1436 case CK_FloatingToBoolean: 1437 case CK_FloatingToIntegral: 1438 case CK_IntegralCast: 1439 case CK_IntegralComplexCast: 1440 case CK_IntegralComplexToBoolean: 1441 case CK_IntegralComplexToFloatingComplex: 1442 case CK_IntegralComplexToReal: 1443 case CK_IntegralRealToComplex: 1444 case CK_IntegralToBoolean: 1445 case CK_IntegralToFloating: 1446 // Reinterpreting integers as pointers and vice versa. 1447 case CK_IntegralToPointer: 1448 case CK_PointerToIntegral: 1449 // Language extensions. 1450 case CK_VectorSplat: 1451 case CK_MatrixCast: 1452 case CK_NonAtomicToAtomic: 1453 case CK_AtomicToNonAtomic: 1454 return true; 1455 1456 case CK_BaseToDerivedMemberPointer: 1457 case CK_DerivedToBaseMemberPointer: 1458 case CK_MemberPointerToBoolean: 1459 case CK_NullToMemberPointer: 1460 case CK_ReinterpretMemberPointer: 1461 // FIXME: ABI-dependent. 1462 return false; 1463 1464 case CK_AnyPointerToBlockPointerCast: 1465 case CK_BlockPointerToObjCPointerCast: 1466 case CK_CPointerToObjCPointerCast: 1467 case CK_ObjCObjectLValueCast: 1468 case CK_IntToOCLSampler: 1469 case CK_ZeroToOCLOpaqueType: 1470 // FIXME: Check these. 1471 return false; 1472 1473 case CK_FixedPointCast: 1474 case CK_FixedPointToBoolean: 1475 case CK_FixedPointToFloating: 1476 case CK_FixedPointToIntegral: 1477 case CK_FloatingToFixedPoint: 1478 case CK_IntegralToFixedPoint: 1479 // FIXME: Do all fixed-point types represent zero as all 0 bits? 1480 return false; 1481 1482 case CK_AddressSpaceConversion: 1483 case CK_BaseToDerived: 1484 case CK_DerivedToBase: 1485 case CK_Dynamic: 1486 case CK_NullToPointer: 1487 case CK_PointerToBoolean: 1488 // FIXME: Preserves zeroes only if zero pointers and null pointers have the 1489 // same representation in all involved address spaces. 1490 return false; 1491 1492 case CK_ARCConsumeObject: 1493 case CK_ARCExtendBlockObject: 1494 case CK_ARCProduceObject: 1495 case CK_ARCReclaimReturnedObject: 1496 case CK_CopyAndAutoreleaseBlockObject: 1497 case CK_ArrayToPointerDecay: 1498 case CK_FunctionToPointerDecay: 1499 case CK_BuiltinFnToFnPtr: 1500 case CK_Dependent: 1501 case CK_LValueBitCast: 1502 case CK_LValueToRValue: 1503 case CK_LValueToRValueBitCast: 1504 case CK_UncheckedDerivedToBase: 1505 return false; 1506 } 1507 llvm_unreachable("Unhandled clang::CastKind enum"); 1508 } 1509 1510 /// isSimpleZero - If emitting this value will obviously just cause a store of 1511 /// zero to memory, return true. This can return false if uncertain, so it just 1512 /// handles simple cases. 1513 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { 1514 E = E->IgnoreParens(); 1515 while (auto *CE = dyn_cast<CastExpr>(E)) { 1516 if (!castPreservesZero(CE)) 1517 break; 1518 E = CE->getSubExpr()->IgnoreParens(); 1519 } 1520 1521 // 0 1522 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) 1523 return IL->getValue() == 0; 1524 // +0.0 1525 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E)) 1526 return FL->getValue().isPosZero(); 1527 // int() 1528 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) && 1529 CGF.getTypes().isZeroInitializable(E->getType())) 1530 return true; 1531 // (int*)0 - Null pointer expressions. 1532 if (const CastExpr *ICE = dyn_cast<CastExpr>(E)) 1533 return ICE->getCastKind() == CK_NullToPointer && 1534 CGF.getTypes().isPointerZeroInitializable(E->getType()) && 1535 !E->HasSideEffects(CGF.getContext()); 1536 // '\0' 1537 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) 1538 return CL->getValue() == 0; 1539 1540 // Otherwise, hard case: conservatively return false. 1541 return false; 1542 } 1543 1544 1545 void 1546 AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { 1547 QualType type = LV.getType(); 1548 // FIXME: Ignore result? 1549 // FIXME: Are initializers affected by volatile? 1550 if (Dest.isZeroed() && isSimpleZero(E, CGF)) { 1551 // Storing "i32 0" to a zero'd memory location is a noop. 1552 return; 1553 } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) { 1554 return EmitNullInitializationToLValue(LV); 1555 } else if (isa<NoInitExpr>(E)) { 1556 // Do nothing. 1557 return; 1558 } else if (type->isReferenceType()) { 1559 RValue RV = CGF.EmitReferenceBindingToExpr(E); 1560 return CGF.EmitStoreThroughLValue(RV, LV); 1561 } 1562 1563 switch (CGF.getEvaluationKind(type)) { 1564 case TEK_Complex: 1565 CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true); 1566 return; 1567 case TEK_Aggregate: 1568 CGF.EmitAggExpr( 1569 E, AggValueSlot::forLValue(LV, CGF, AggValueSlot::IsDestructed, 1570 AggValueSlot::DoesNotNeedGCBarriers, 1571 AggValueSlot::IsNotAliased, 1572 AggValueSlot::MayOverlap, Dest.isZeroed())); 1573 return; 1574 case TEK_Scalar: 1575 if (LV.isSimple()) { 1576 CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false); 1577 } else { 1578 CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); 1579 } 1580 return; 1581 } 1582 llvm_unreachable("bad evaluation kind"); 1583 } 1584 1585 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) { 1586 QualType type = lv.getType(); 1587 1588 // If the destination slot is already zeroed out before the aggregate is 1589 // copied into it, we don't have to emit any zeros here. 1590 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type)) 1591 return; 1592 1593 if (CGF.hasScalarEvaluationKind(type)) { 1594 // For non-aggregates, we can store the appropriate null constant. 1595 llvm::Value *null = CGF.CGM.EmitNullConstant(type); 1596 // Note that the following is not equivalent to 1597 // EmitStoreThroughBitfieldLValue for ARC types. 1598 if (lv.isBitField()) { 1599 CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv); 1600 } else { 1601 assert(lv.isSimple()); 1602 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true); 1603 } 1604 } else { 1605 // There's a potential optimization opportunity in combining 1606 // memsets; that would be easy for arrays, but relatively 1607 // difficult for structures with the current code. 1608 CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType()); 1609 } 1610 } 1611 1612 void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) { 1613 VisitCXXParenListOrInitListExpr(E, E->getInitExprs(), 1614 E->getInitializedFieldInUnion(), 1615 E->getArrayFiller()); 1616 } 1617 1618 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { 1619 if (E->hadArrayRangeDesignator()) 1620 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 1621 1622 if (E->isTransparent()) 1623 return Visit(E->getInit(0)); 1624 1625 VisitCXXParenListOrInitListExpr( 1626 E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller()); 1627 } 1628 1629 void AggExprEmitter::VisitCXXParenListOrInitListExpr( 1630 Expr *ExprToVisit, ArrayRef<Expr *> InitExprs, 1631 FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) { 1632 #if 0 1633 // FIXME: Assess perf here? Figure out what cases are worth optimizing here 1634 // (Length of globals? Chunks of zeroed-out space?). 1635 // 1636 // If we can, prefer a copy from a global; this is a lot less code for long 1637 // globals, and it's easier for the current optimizers to analyze. 1638 if (llvm::Constant *C = 1639 CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) { 1640 llvm::GlobalVariable* GV = 1641 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, 1642 llvm::GlobalValue::InternalLinkage, C, ""); 1643 EmitFinalDestCopy(ExprToVisit->getType(), 1644 CGF.MakeAddrLValue(GV, ExprToVisit->getType())); 1645 return; 1646 } 1647 #endif 1648 1649 AggValueSlot Dest = EnsureSlot(ExprToVisit->getType()); 1650 1651 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType()); 1652 1653 // Handle initialization of an array. 1654 if (ExprToVisit->getType()->isArrayType()) { 1655 auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType()); 1656 EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit, 1657 InitExprs, ArrayFiller); 1658 return; 1659 } 1660 1661 assert(ExprToVisit->getType()->isRecordType() && 1662 "Only support structs/unions here!"); 1663 1664 // Do struct initialization; this code just sets each individual member 1665 // to the approprate value. This makes bitfield support automatic; 1666 // the disadvantage is that the generated code is more difficult for 1667 // the optimizer, especially with bitfields. 1668 unsigned NumInitElements = InitExprs.size(); 1669 RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl(); 1670 1671 // We'll need to enter cleanup scopes in case any of the element 1672 // initializers throws an exception. 1673 SmallVector<EHScopeStack::stable_iterator, 16> cleanups; 1674 llvm::Instruction *cleanupDominator = nullptr; 1675 auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) { 1676 cleanups.push_back(cleanup); 1677 if (!cleanupDominator) // create placeholder once needed 1678 cleanupDominator = CGF.Builder.CreateAlignedLoad( 1679 CGF.Int8Ty, llvm::Constant::getNullValue(CGF.Int8PtrTy), 1680 CharUnits::One()); 1681 }; 1682 1683 unsigned curInitIndex = 0; 1684 1685 // Emit initialization of base classes. 1686 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) { 1687 assert(NumInitElements >= CXXRD->getNumBases() && 1688 "missing initializer for base class"); 1689 for (auto &Base : CXXRD->bases()) { 1690 assert(!Base.isVirtual() && "should not see vbases here"); 1691 auto *BaseRD = Base.getType()->getAsCXXRecordDecl(); 1692 Address V = CGF.GetAddressOfDirectBaseInCompleteClass( 1693 Dest.getAddress(), CXXRD, BaseRD, 1694 /*isBaseVirtual*/ false); 1695 AggValueSlot AggSlot = AggValueSlot::forAddr( 1696 V, Qualifiers(), 1697 AggValueSlot::IsDestructed, 1698 AggValueSlot::DoesNotNeedGCBarriers, 1699 AggValueSlot::IsNotAliased, 1700 CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual())); 1701 CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot); 1702 1703 if (QualType::DestructionKind dtorKind = 1704 Base.getType().isDestructedType()) { 1705 CGF.pushDestroy(dtorKind, V, Base.getType()); 1706 addCleanup(CGF.EHStack.stable_begin()); 1707 } 1708 } 1709 } 1710 1711 // Prepare a 'this' for CXXDefaultInitExprs. 1712 CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress()); 1713 1714 if (record->isUnion()) { 1715 // Only initialize one field of a union. The field itself is 1716 // specified by the initializer list. 1717 if (!InitializedFieldInUnion) { 1718 // Empty union; we have nothing to do. 1719 1720 #ifndef NDEBUG 1721 // Make sure that it's really an empty and not a failure of 1722 // semantic analysis. 1723 for (const auto *Field : record->fields()) 1724 assert((Field->isUnnamedBitfield() || Field->isAnonymousStructOrUnion()) && "Only unnamed bitfields or ananymous class allowed"); 1725 #endif 1726 return; 1727 } 1728 1729 // FIXME: volatility 1730 FieldDecl *Field = InitializedFieldInUnion; 1731 1732 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field); 1733 if (NumInitElements) { 1734 // Store the initializer into the field 1735 EmitInitializationToLValue(InitExprs[0], FieldLoc); 1736 } else { 1737 // Default-initialize to null. 1738 EmitNullInitializationToLValue(FieldLoc); 1739 } 1740 1741 return; 1742 } 1743 1744 // Here we iterate over the fields; this makes it simpler to both 1745 // default-initialize fields and skip over unnamed fields. 1746 for (const auto *field : record->fields()) { 1747 // We're done once we hit the flexible array member. 1748 if (field->getType()->isIncompleteArrayType()) 1749 break; 1750 1751 // Always skip anonymous bitfields. 1752 if (field->isUnnamedBitfield()) 1753 continue; 1754 1755 // We're done if we reach the end of the explicit initializers, we 1756 // have a zeroed object, and the rest of the fields are 1757 // zero-initializable. 1758 if (curInitIndex == NumInitElements && Dest.isZeroed() && 1759 CGF.getTypes().isZeroInitializable(ExprToVisit->getType())) 1760 break; 1761 1762 1763 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field); 1764 // We never generate write-barries for initialized fields. 1765 LV.setNonGC(true); 1766 1767 if (curInitIndex < NumInitElements) { 1768 // Store the initializer into the field. 1769 EmitInitializationToLValue(InitExprs[curInitIndex++], LV); 1770 } else { 1771 // We're out of initializers; default-initialize to null 1772 EmitNullInitializationToLValue(LV); 1773 } 1774 1775 // Push a destructor if necessary. 1776 // FIXME: if we have an array of structures, all explicitly 1777 // initialized, we can end up pushing a linear number of cleanups. 1778 bool pushedCleanup = false; 1779 if (QualType::DestructionKind dtorKind 1780 = field->getType().isDestructedType()) { 1781 assert(LV.isSimple()); 1782 if (CGF.needsEHCleanup(dtorKind)) { 1783 CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(), 1784 CGF.getDestroyer(dtorKind), false); 1785 addCleanup(CGF.EHStack.stable_begin()); 1786 pushedCleanup = true; 1787 } 1788 } 1789 1790 // If the GEP didn't get used because of a dead zero init or something 1791 // else, clean it up for -O0 builds and general tidiness. 1792 if (!pushedCleanup && LV.isSimple()) 1793 if (llvm::GetElementPtrInst *GEP = 1794 dyn_cast<llvm::GetElementPtrInst>(LV.getPointer(CGF))) 1795 if (GEP->use_empty()) 1796 GEP->eraseFromParent(); 1797 } 1798 1799 // Deactivate all the partial cleanups in reverse order, which 1800 // generally means popping them. 1801 assert((cleanupDominator || cleanups.empty()) && 1802 "Missing cleanupDominator before deactivating cleanup blocks"); 1803 for (unsigned i = cleanups.size(); i != 0; --i) 1804 CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator); 1805 1806 // Destroy the placeholder if we made one. 1807 if (cleanupDominator) 1808 cleanupDominator->eraseFromParent(); 1809 } 1810 1811 void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, 1812 llvm::Value *outerBegin) { 1813 // Emit the common subexpression. 1814 CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr()); 1815 1816 Address destPtr = EnsureSlot(E->getType()).getAddress(); 1817 uint64_t numElements = E->getArraySize().getZExtValue(); 1818 1819 if (!numElements) 1820 return; 1821 1822 // destPtr is an array*. Construct an elementType* by drilling down a level. 1823 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); 1824 llvm::Value *indices[] = {zero, zero}; 1825 llvm::Value *begin = Builder.CreateInBoundsGEP( 1826 destPtr.getElementType(), destPtr.getPointer(), indices, 1827 "arrayinit.begin"); 1828 1829 // Prepare to special-case multidimensional array initialization: we avoid 1830 // emitting multiple destructor loops in that case. 1831 if (!outerBegin) 1832 outerBegin = begin; 1833 ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr()); 1834 1835 QualType elementType = 1836 CGF.getContext().getAsArrayType(E->getType())->getElementType(); 1837 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); 1838 CharUnits elementAlign = 1839 destPtr.getAlignment().alignmentOfArrayElement(elementSize); 1840 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType); 1841 1842 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 1843 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); 1844 1845 // Jump into the body. 1846 CGF.EmitBlock(bodyBB); 1847 llvm::PHINode *index = 1848 Builder.CreatePHI(zero->getType(), 2, "arrayinit.index"); 1849 index->addIncoming(zero, entryBB); 1850 llvm::Value *element = 1851 Builder.CreateInBoundsGEP(llvmElementType, begin, index); 1852 1853 // Prepare for a cleanup. 1854 QualType::DestructionKind dtorKind = elementType.isDestructedType(); 1855 EHScopeStack::stable_iterator cleanup; 1856 if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) { 1857 if (outerBegin->getType() != element->getType()) 1858 outerBegin = Builder.CreateBitCast(outerBegin, element->getType()); 1859 CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType, 1860 elementAlign, 1861 CGF.getDestroyer(dtorKind)); 1862 cleanup = CGF.EHStack.stable_begin(); 1863 } else { 1864 dtorKind = QualType::DK_none; 1865 } 1866 1867 // Emit the actual filler expression. 1868 { 1869 // Temporaries created in an array initialization loop are destroyed 1870 // at the end of each iteration. 1871 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF); 1872 CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index); 1873 LValue elementLV = CGF.MakeAddrLValue( 1874 Address(element, llvmElementType, elementAlign), elementType); 1875 1876 if (InnerLoop) { 1877 // If the subexpression is an ArrayInitLoopExpr, share its cleanup. 1878 auto elementSlot = AggValueSlot::forLValue( 1879 elementLV, CGF, AggValueSlot::IsDestructed, 1880 AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, 1881 AggValueSlot::DoesNotOverlap); 1882 AggExprEmitter(CGF, elementSlot, false) 1883 .VisitArrayInitLoopExpr(InnerLoop, outerBegin); 1884 } else 1885 EmitInitializationToLValue(E->getSubExpr(), elementLV); 1886 } 1887 1888 // Move on to the next element. 1889 llvm::Value *nextIndex = Builder.CreateNUWAdd( 1890 index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next"); 1891 index->addIncoming(nextIndex, Builder.GetInsertBlock()); 1892 1893 // Leave the loop if we're done. 1894 llvm::Value *done = Builder.CreateICmpEQ( 1895 nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements), 1896 "arrayinit.done"); 1897 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); 1898 Builder.CreateCondBr(done, endBB, bodyBB); 1899 1900 CGF.EmitBlock(endBB); 1901 1902 // Leave the partial-array cleanup if we entered one. 1903 if (dtorKind) 1904 CGF.DeactivateCleanupBlock(cleanup, index); 1905 } 1906 1907 void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) { 1908 AggValueSlot Dest = EnsureSlot(E->getType()); 1909 1910 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); 1911 EmitInitializationToLValue(E->getBase(), DestLV); 1912 VisitInitListExpr(E->getUpdater()); 1913 } 1914 1915 //===----------------------------------------------------------------------===// 1916 // Entry Points into this File 1917 //===----------------------------------------------------------------------===// 1918 1919 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of 1920 /// non-zero bytes that will be stored when outputting the initializer for the 1921 /// specified initializer expression. 1922 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { 1923 if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E)) 1924 E = MTE->getSubExpr(); 1925 E = E->IgnoreParenNoopCasts(CGF.getContext()); 1926 1927 // 0 and 0.0 won't require any non-zero stores! 1928 if (isSimpleZero(E, CGF)) return CharUnits::Zero(); 1929 1930 // If this is an initlist expr, sum up the size of sizes of the (present) 1931 // elements. If this is something weird, assume the whole thing is non-zero. 1932 const InitListExpr *ILE = dyn_cast<InitListExpr>(E); 1933 while (ILE && ILE->isTransparent()) 1934 ILE = dyn_cast<InitListExpr>(ILE->getInit(0)); 1935 if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType())) 1936 return CGF.getContext().getTypeSizeInChars(E->getType()); 1937 1938 // InitListExprs for structs have to be handled carefully. If there are 1939 // reference members, we need to consider the size of the reference, not the 1940 // referencee. InitListExprs for unions and arrays can't have references. 1941 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 1942 if (!RT->isUnionType()) { 1943 RecordDecl *SD = RT->getDecl(); 1944 CharUnits NumNonZeroBytes = CharUnits::Zero(); 1945 1946 unsigned ILEElement = 0; 1947 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD)) 1948 while (ILEElement != CXXRD->getNumBases()) 1949 NumNonZeroBytes += 1950 GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF); 1951 for (const auto *Field : SD->fields()) { 1952 // We're done once we hit the flexible array member or run out of 1953 // InitListExpr elements. 1954 if (Field->getType()->isIncompleteArrayType() || 1955 ILEElement == ILE->getNumInits()) 1956 break; 1957 if (Field->isUnnamedBitfield()) 1958 continue; 1959 1960 const Expr *E = ILE->getInit(ILEElement++); 1961 1962 // Reference values are always non-null and have the width of a pointer. 1963 if (Field->getType()->isReferenceType()) 1964 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits( 1965 CGF.getTarget().getPointerWidth(LangAS::Default)); 1966 else 1967 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); 1968 } 1969 1970 return NumNonZeroBytes; 1971 } 1972 } 1973 1974 // FIXME: This overestimates the number of non-zero bytes for bit-fields. 1975 CharUnits NumNonZeroBytes = CharUnits::Zero(); 1976 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) 1977 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF); 1978 return NumNonZeroBytes; 1979 } 1980 1981 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of 1982 /// zeros in it, emit a memset and avoid storing the individual zeros. 1983 /// 1984 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, 1985 CodeGenFunction &CGF) { 1986 // If the slot is already known to be zeroed, nothing to do. Don't mess with 1987 // volatile stores. 1988 if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid()) 1989 return; 1990 1991 // C++ objects with a user-declared constructor don't need zero'ing. 1992 if (CGF.getLangOpts().CPlusPlus) 1993 if (const RecordType *RT = CGF.getContext() 1994 .getBaseElementType(E->getType())->getAs<RecordType>()) { 1995 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1996 if (RD->hasUserDeclaredConstructor()) 1997 return; 1998 } 1999 2000 // If the type is 16-bytes or smaller, prefer individual stores over memset. 2001 CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType()); 2002 if (Size <= CharUnits::fromQuantity(16)) 2003 return; 2004 2005 // Check to see if over 3/4 of the initializer are known to be zero. If so, 2006 // we prefer to emit memset + individual stores for the rest. 2007 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); 2008 if (NumNonZeroBytes*4 > Size) 2009 return; 2010 2011 // Okay, it seems like a good idea to use an initial memset, emit the call. 2012 llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity()); 2013 2014 Address Loc = Slot.getAddress(); 2015 Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty); 2016 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false); 2017 2018 // Tell the AggExprEmitter that the slot is known zero. 2019 Slot.setZeroed(); 2020 } 2021 2022 2023 2024 2025 /// EmitAggExpr - Emit the computation of the specified expression of aggregate 2026 /// type. The result is computed into DestPtr. Note that if DestPtr is null, 2027 /// the value of the aggregate expression is not needed. If VolatileDest is 2028 /// true, DestPtr cannot be 0. 2029 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) { 2030 assert(E && hasAggregateEvaluationKind(E->getType()) && 2031 "Invalid aggregate expression to emit"); 2032 assert((Slot.getAddress().isValid() || Slot.isIgnored()) && 2033 "slot has bits but no address"); 2034 2035 // Optimize the slot if possible. 2036 CheckAggExprForMemSetUse(Slot, E, *this); 2037 2038 AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E)); 2039 } 2040 2041 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { 2042 assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!"); 2043 Address Temp = CreateMemTemp(E->getType()); 2044 LValue LV = MakeAddrLValue(Temp, E->getType()); 2045 EmitAggExpr(E, AggValueSlot::forLValue( 2046 LV, *this, AggValueSlot::IsNotDestructed, 2047 AggValueSlot::DoesNotNeedGCBarriers, 2048 AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); 2049 return LV; 2050 } 2051 2052 AggValueSlot::Overlap_t 2053 CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) { 2054 if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType()) 2055 return AggValueSlot::DoesNotOverlap; 2056 2057 // If the field lies entirely within the enclosing class's nvsize, its tail 2058 // padding cannot overlap any already-initialized object. (The only subobjects 2059 // with greater addresses that might already be initialized are vbases.) 2060 const RecordDecl *ClassRD = FD->getParent(); 2061 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD); 2062 if (Layout.getFieldOffset(FD->getFieldIndex()) + 2063 getContext().getTypeSize(FD->getType()) <= 2064 (uint64_t)getContext().toBits(Layout.getNonVirtualSize())) 2065 return AggValueSlot::DoesNotOverlap; 2066 2067 // The tail padding may contain values we need to preserve. 2068 return AggValueSlot::MayOverlap; 2069 } 2070 2071 AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit( 2072 const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) { 2073 // If the most-derived object is a field declared with [[no_unique_address]], 2074 // the tail padding of any virtual base could be reused for other subobjects 2075 // of that field's class. 2076 if (IsVirtual) 2077 return AggValueSlot::MayOverlap; 2078 2079 // If the base class is laid out entirely within the nvsize of the derived 2080 // class, its tail padding cannot yet be initialized, so we can issue 2081 // stores at the full width of the base class. 2082 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 2083 if (Layout.getBaseClassOffset(BaseRD) + 2084 getContext().getASTRecordLayout(BaseRD).getSize() <= 2085 Layout.getNonVirtualSize()) 2086 return AggValueSlot::DoesNotOverlap; 2087 2088 // The tail padding may contain values we need to preserve. 2089 return AggValueSlot::MayOverlap; 2090 } 2091 2092 void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty, 2093 AggValueSlot::Overlap_t MayOverlap, 2094 bool isVolatile) { 2095 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); 2096 2097 Address DestPtr = Dest.getAddress(*this); 2098 Address SrcPtr = Src.getAddress(*this); 2099 2100 if (getLangOpts().CPlusPlus) { 2101 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2102 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl()); 2103 assert((Record->hasTrivialCopyConstructor() || 2104 Record->hasTrivialCopyAssignment() || 2105 Record->hasTrivialMoveConstructor() || 2106 Record->hasTrivialMoveAssignment() || 2107 Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) && 2108 "Trying to aggregate-copy a type without a trivial copy/move " 2109 "constructor or assignment operator"); 2110 // Ignore empty classes in C++. 2111 if (Record->isEmpty()) 2112 return; 2113 } 2114 } 2115 2116 if (getLangOpts().CUDAIsDevice) { 2117 if (Ty->isCUDADeviceBuiltinSurfaceType()) { 2118 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest, 2119 Src)) 2120 return; 2121 } else if (Ty->isCUDADeviceBuiltinTextureType()) { 2122 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest, 2123 Src)) 2124 return; 2125 } 2126 } 2127 2128 // Aggregate assignment turns into llvm.memcpy. This is almost valid per 2129 // C99 6.5.16.1p3, which states "If the value being stored in an object is 2130 // read from another object that overlaps in anyway the storage of the first 2131 // object, then the overlap shall be exact and the two objects shall have 2132 // qualified or unqualified versions of a compatible type." 2133 // 2134 // memcpy is not defined if the source and destination pointers are exactly 2135 // equal, but other compilers do this optimization, and almost every memcpy 2136 // implementation handles this case safely. If there is a libc that does not 2137 // safely handle this, we can add a target hook. 2138 2139 // Get data size info for this aggregate. Don't copy the tail padding if this 2140 // might be a potentially-overlapping subobject, since the tail padding might 2141 // be occupied by a different object. Otherwise, copying it is fine. 2142 TypeInfoChars TypeInfo; 2143 if (MayOverlap) 2144 TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty); 2145 else 2146 TypeInfo = getContext().getTypeInfoInChars(Ty); 2147 2148 llvm::Value *SizeVal = nullptr; 2149 if (TypeInfo.Width.isZero()) { 2150 // But note that getTypeInfo returns 0 for a VLA. 2151 if (auto *VAT = dyn_cast_or_null<VariableArrayType>( 2152 getContext().getAsArrayType(Ty))) { 2153 QualType BaseEltTy; 2154 SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr); 2155 TypeInfo = getContext().getTypeInfoInChars(BaseEltTy); 2156 assert(!TypeInfo.Width.isZero()); 2157 SizeVal = Builder.CreateNUWMul( 2158 SizeVal, 2159 llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity())); 2160 } 2161 } 2162 if (!SizeVal) { 2163 SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()); 2164 } 2165 2166 // FIXME: If we have a volatile struct, the optimizer can remove what might 2167 // appear to be `extra' memory ops: 2168 // 2169 // volatile struct { int i; } a, b; 2170 // 2171 // int main() { 2172 // a = b; 2173 // a = b; 2174 // } 2175 // 2176 // we need to use a different call here. We use isVolatile to indicate when 2177 // either the source or the destination is volatile. 2178 2179 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty); 2180 SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty); 2181 2182 // Don't do any of the memmove_collectable tests if GC isn't set. 2183 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) { 2184 // fall through 2185 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { 2186 RecordDecl *Record = RecordTy->getDecl(); 2187 if (Record->hasObjectMember()) { 2188 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 2189 SizeVal); 2190 return; 2191 } 2192 } else if (Ty->isArrayType()) { 2193 QualType BaseType = getContext().getBaseElementType(Ty); 2194 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { 2195 if (RecordTy->getDecl()->hasObjectMember()) { 2196 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 2197 SizeVal); 2198 return; 2199 } 2200 } 2201 } 2202 2203 auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile); 2204 2205 // Determine the metadata to describe the position of any padding in this 2206 // memcpy, as well as the TBAA tags for the members of the struct, in case 2207 // the optimizer wishes to expand it in to scalar memory operations. 2208 if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty)) 2209 Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag); 2210 2211 if (CGM.getCodeGenOpts().NewStructPathTBAA) { 2212 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer( 2213 Dest.getTBAAInfo(), Src.getTBAAInfo()); 2214 CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo); 2215 } 2216 } 2217