1 //===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Objective-C code as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGDebugInfo.h" 14 #include "CGObjCRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "CodeGenPGO.h" 18 #include "ConstantEmitter.h" 19 #include "TargetInfo.h" 20 #include "clang/AST/ASTContext.h" 21 #include "clang/AST/Attr.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/StmtObjC.h" 24 #include "clang/Basic/Diagnostic.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/CodeGen/CodeGenABITypes.h" 27 #include "llvm/Analysis/ObjCARCUtil.h" 28 #include "llvm/BinaryFormat/MachO.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/InlineAsm.h" 32 #include <optional> 33 using namespace clang; 34 using namespace CodeGen; 35 36 typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult; 37 static TryEmitResult 38 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e); 39 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, 40 QualType ET, 41 RValue Result); 42 43 /// Given the address of a variable of pointer type, find the correct 44 /// null to store into it. 45 static llvm::Constant *getNullForVariable(Address addr) { 46 llvm::Type *type = addr.getElementType(); 47 return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type)); 48 } 49 50 /// Emits an instance of NSConstantString representing the object. 51 llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E) 52 { 53 llvm::Constant *C = 54 CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer(); 55 return C; 56 } 57 58 /// EmitObjCBoxedExpr - This routine generates code to call 59 /// the appropriate expression boxing method. This will either be 60 /// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:], 61 /// or [NSValue valueWithBytes:objCType:]. 62 /// 63 llvm::Value * 64 CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) { 65 // Generate the correct selector for this literal's concrete type. 66 // Get the method. 67 const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod(); 68 const Expr *SubExpr = E->getSubExpr(); 69 70 if (E->isExpressibleAsConstantInitializer()) { 71 ConstantEmitter ConstEmitter(CGM); 72 return ConstEmitter.tryEmitAbstract(E, E->getType()); 73 } 74 75 assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method"); 76 Selector Sel = BoxingMethod->getSelector(); 77 78 // Generate a reference to the class pointer, which will be the receiver. 79 // Assumes that the method was introduced in the class that should be 80 // messaged (avoids pulling it out of the result type). 81 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 82 const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface(); 83 llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl); 84 85 CallArgList Args; 86 const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin(); 87 QualType ArgQT = ArgDecl->getType().getUnqualifiedType(); 88 89 // ObjCBoxedExpr supports boxing of structs and unions 90 // via [NSValue valueWithBytes:objCType:] 91 const QualType ValueType(SubExpr->getType().getCanonicalType()); 92 if (ValueType->isObjCBoxableRecordType()) { 93 // Emit CodeGen for first parameter 94 // and cast value to correct type 95 Address Temporary = CreateMemTemp(SubExpr->getType()); 96 EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true); 97 llvm::Value *BitCast = Builder.CreateBitCast( 98 Temporary.emitRawPointer(*this), ConvertType(ArgQT)); 99 Args.add(RValue::get(BitCast), ArgQT); 100 101 // Create char array to store type encoding 102 std::string Str; 103 getContext().getObjCEncodingForType(ValueType, Str); 104 llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer(); 105 106 // Cast type encoding to correct type 107 const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1]; 108 QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType(); 109 llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT)); 110 111 Args.add(RValue::get(Cast), EncodingQT); 112 } else { 113 Args.add(EmitAnyExpr(SubExpr), ArgQT); 114 } 115 116 RValue result = Runtime.GenerateMessageSend( 117 *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver, 118 Args, ClassDecl, BoxingMethod); 119 return Builder.CreateBitCast(result.getScalarVal(), 120 ConvertType(E->getType())); 121 } 122 123 llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, 124 const ObjCMethodDecl *MethodWithObjects) { 125 ASTContext &Context = CGM.getContext(); 126 const ObjCDictionaryLiteral *DLE = nullptr; 127 const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E); 128 if (!ALE) 129 DLE = cast<ObjCDictionaryLiteral>(E); 130 131 // Optimize empty collections by referencing constants, when available. 132 uint64_t NumElements = 133 ALE ? ALE->getNumElements() : DLE->getNumElements(); 134 if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) { 135 StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__"; 136 QualType IdTy(CGM.getContext().getObjCIdType()); 137 llvm::Constant *Constant = 138 CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName); 139 LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy); 140 llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc()); 141 cast<llvm::LoadInst>(Ptr)->setMetadata( 142 llvm::LLVMContext::MD_invariant_load, 143 llvm::MDNode::get(getLLVMContext(), {})); 144 return Builder.CreateBitCast(Ptr, ConvertType(E->getType())); 145 } 146 147 // Compute the type of the array we're initializing. 148 llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()), 149 NumElements); 150 QualType ElementType = Context.getObjCIdType().withConst(); 151 QualType ElementArrayType = Context.getConstantArrayType( 152 ElementType, APNumElements, nullptr, ArraySizeModifier::Normal, 153 /*IndexTypeQuals=*/0); 154 155 // Allocate the temporary array(s). 156 Address Objects = CreateMemTemp(ElementArrayType, "objects"); 157 Address Keys = Address::invalid(); 158 if (DLE) 159 Keys = CreateMemTemp(ElementArrayType, "keys"); 160 161 // In ARC, we may need to do extra work to keep all the keys and 162 // values alive until after the call. 163 SmallVector<llvm::Value *, 16> NeededObjects; 164 bool TrackNeededObjects = 165 (getLangOpts().ObjCAutoRefCount && 166 CGM.getCodeGenOpts().OptimizationLevel != 0); 167 168 // Perform the actual initialialization of the array(s). 169 for (uint64_t i = 0; i < NumElements; i++) { 170 if (ALE) { 171 // Emit the element and store it to the appropriate array slot. 172 const Expr *Rhs = ALE->getElement(i); 173 LValue LV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i), 174 ElementType, AlignmentSource::Decl); 175 176 llvm::Value *value = EmitScalarExpr(Rhs); 177 EmitStoreThroughLValue(RValue::get(value), LV, true); 178 if (TrackNeededObjects) { 179 NeededObjects.push_back(value); 180 } 181 } else { 182 // Emit the key and store it to the appropriate array slot. 183 const Expr *Key = DLE->getKeyValueElement(i).Key; 184 LValue KeyLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Keys, i), 185 ElementType, AlignmentSource::Decl); 186 llvm::Value *keyValue = EmitScalarExpr(Key); 187 EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true); 188 189 // Emit the value and store it to the appropriate array slot. 190 const Expr *Value = DLE->getKeyValueElement(i).Value; 191 LValue ValueLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i), 192 ElementType, AlignmentSource::Decl); 193 llvm::Value *valueValue = EmitScalarExpr(Value); 194 EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true); 195 if (TrackNeededObjects) { 196 NeededObjects.push_back(keyValue); 197 NeededObjects.push_back(valueValue); 198 } 199 } 200 } 201 202 // Generate the argument list. 203 CallArgList Args; 204 ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin(); 205 const ParmVarDecl *argDecl = *PI++; 206 QualType ArgQT = argDecl->getType().getUnqualifiedType(); 207 Args.add(RValue::get(Objects, *this), ArgQT); 208 if (DLE) { 209 argDecl = *PI++; 210 ArgQT = argDecl->getType().getUnqualifiedType(); 211 Args.add(RValue::get(Keys, *this), ArgQT); 212 } 213 argDecl = *PI; 214 ArgQT = argDecl->getType().getUnqualifiedType(); 215 llvm::Value *Count = 216 llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements); 217 Args.add(RValue::get(Count), ArgQT); 218 219 // Generate a reference to the class pointer, which will be the receiver. 220 Selector Sel = MethodWithObjects->getSelector(); 221 QualType ResultType = E->getType(); 222 const ObjCObjectPointerType *InterfacePointerType 223 = ResultType->getAsObjCInterfacePointerType(); 224 assert(InterfacePointerType && "Unexpected InterfacePointerType - null"); 225 ObjCInterfaceDecl *Class 226 = InterfacePointerType->getObjectType()->getInterface(); 227 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 228 llvm::Value *Receiver = Runtime.GetClass(*this, Class); 229 230 // Generate the message send. 231 RValue result = Runtime.GenerateMessageSend( 232 *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel, 233 Receiver, Args, Class, MethodWithObjects); 234 235 // The above message send needs these objects, but in ARC they are 236 // passed in a buffer that is essentially __unsafe_unretained. 237 // Therefore we must prevent the optimizer from releasing them until 238 // after the call. 239 if (TrackNeededObjects) { 240 EmitARCIntrinsicUse(NeededObjects); 241 } 242 243 return Builder.CreateBitCast(result.getScalarVal(), 244 ConvertType(E->getType())); 245 } 246 247 llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) { 248 return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod()); 249 } 250 251 llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral( 252 const ObjCDictionaryLiteral *E) { 253 return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod()); 254 } 255 256 /// Emit a selector. 257 llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) { 258 // Untyped selector. 259 // Note that this implementation allows for non-constant strings to be passed 260 // as arguments to @selector(). Currently, the only thing preventing this 261 // behaviour is the type checking in the front end. 262 return CGM.getObjCRuntime().GetSelector(*this, E->getSelector()); 263 } 264 265 llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) { 266 // FIXME: This should pass the Decl not the name. 267 return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol()); 268 } 269 270 /// Adjust the type of an Objective-C object that doesn't match up due 271 /// to type erasure at various points, e.g., related result types or the use 272 /// of parameterized classes. 273 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT, 274 RValue Result) { 275 if (!ExpT->isObjCRetainableType()) 276 return Result; 277 278 // If the converted types are the same, we're done. 279 llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT); 280 if (ExpLLVMTy == Result.getScalarVal()->getType()) 281 return Result; 282 283 // We have applied a substitution. Cast the rvalue appropriately. 284 return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(), 285 ExpLLVMTy)); 286 } 287 288 /// Decide whether to extend the lifetime of the receiver of a 289 /// returns-inner-pointer message. 290 static bool 291 shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) { 292 switch (message->getReceiverKind()) { 293 294 // For a normal instance message, we should extend unless the 295 // receiver is loaded from a variable with precise lifetime. 296 case ObjCMessageExpr::Instance: { 297 const Expr *receiver = message->getInstanceReceiver(); 298 299 // Look through OVEs. 300 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 301 if (opaque->getSourceExpr()) 302 receiver = opaque->getSourceExpr()->IgnoreParens(); 303 } 304 305 const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver); 306 if (!ice || ice->getCastKind() != CK_LValueToRValue) return true; 307 receiver = ice->getSubExpr()->IgnoreParens(); 308 309 // Look through OVEs. 310 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 311 if (opaque->getSourceExpr()) 312 receiver = opaque->getSourceExpr()->IgnoreParens(); 313 } 314 315 // Only __strong variables. 316 if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 317 return true; 318 319 // All ivars and fields have precise lifetime. 320 if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver)) 321 return false; 322 323 // Otherwise, check for variables. 324 const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr()); 325 if (!declRef) return true; 326 const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl()); 327 if (!var) return true; 328 329 // All variables have precise lifetime except local variables with 330 // automatic storage duration that aren't specially marked. 331 return (var->hasLocalStorage() && 332 !var->hasAttr<ObjCPreciseLifetimeAttr>()); 333 } 334 335 case ObjCMessageExpr::Class: 336 case ObjCMessageExpr::SuperClass: 337 // It's never necessary for class objects. 338 return false; 339 340 case ObjCMessageExpr::SuperInstance: 341 // We generally assume that 'self' lives throughout a method call. 342 return false; 343 } 344 345 llvm_unreachable("invalid receiver kind"); 346 } 347 348 /// Given an expression of ObjC pointer type, check whether it was 349 /// immediately loaded from an ARC __weak l-value. 350 static const Expr *findWeakLValue(const Expr *E) { 351 assert(E->getType()->isObjCRetainableType()); 352 E = E->IgnoreParens(); 353 if (auto CE = dyn_cast<CastExpr>(E)) { 354 if (CE->getCastKind() == CK_LValueToRValue) { 355 if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak) 356 return CE->getSubExpr(); 357 } 358 } 359 360 return nullptr; 361 } 362 363 /// The ObjC runtime may provide entrypoints that are likely to be faster 364 /// than an ordinary message send of the appropriate selector. 365 /// 366 /// The entrypoints are guaranteed to be equivalent to just sending the 367 /// corresponding message. If the entrypoint is implemented naively as just a 368 /// message send, using it is a trade-off: it sacrifices a few cycles of 369 /// overhead to save a small amount of code. However, it's possible for 370 /// runtimes to detect and special-case classes that use "standard" 371 /// behavior; if that's dynamically a large proportion of all objects, using 372 /// the entrypoint will also be faster than using a message send. 373 /// 374 /// If the runtime does support a required entrypoint, then this method will 375 /// generate a call and return the resulting value. Otherwise it will return 376 /// std::nullopt and the caller can generate a msgSend instead. 377 static std::optional<llvm::Value *> tryGenerateSpecializedMessageSend( 378 CodeGenFunction &CGF, QualType ResultType, llvm::Value *Receiver, 379 const CallArgList &Args, Selector Sel, const ObjCMethodDecl *method, 380 bool isClassMessage) { 381 auto &CGM = CGF.CGM; 382 if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls) 383 return std::nullopt; 384 385 auto &Runtime = CGM.getLangOpts().ObjCRuntime; 386 switch (Sel.getMethodFamily()) { 387 case OMF_alloc: 388 if (isClassMessage && 389 Runtime.shouldUseRuntimeFunctionsForAlloc() && 390 ResultType->isObjCObjectPointerType()) { 391 // [Foo alloc] -> objc_alloc(Foo) or 392 // [self alloc] -> objc_alloc(self) 393 if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "alloc") 394 return CGF.EmitObjCAlloc(Receiver, CGF.ConvertType(ResultType)); 395 // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo) or 396 // [self allocWithZone:nil] -> objc_allocWithZone(self) 397 if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 && 398 Args.size() == 1 && Args.front().getType()->isPointerType() && 399 Sel.getNameForSlot(0) == "allocWithZone") { 400 const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal(); 401 if (isa<llvm::ConstantPointerNull>(arg)) 402 return CGF.EmitObjCAllocWithZone(Receiver, 403 CGF.ConvertType(ResultType)); 404 return std::nullopt; 405 } 406 } 407 break; 408 409 case OMF_autorelease: 410 if (ResultType->isObjCObjectPointerType() && 411 CGM.getLangOpts().getGC() == LangOptions::NonGC && 412 Runtime.shouldUseARCFunctionsForRetainRelease()) 413 return CGF.EmitObjCAutorelease(Receiver, CGF.ConvertType(ResultType)); 414 break; 415 416 case OMF_retain: 417 if (ResultType->isObjCObjectPointerType() && 418 CGM.getLangOpts().getGC() == LangOptions::NonGC && 419 Runtime.shouldUseARCFunctionsForRetainRelease()) 420 return CGF.EmitObjCRetainNonBlock(Receiver, CGF.ConvertType(ResultType)); 421 break; 422 423 case OMF_release: 424 if (ResultType->isVoidType() && 425 CGM.getLangOpts().getGC() == LangOptions::NonGC && 426 Runtime.shouldUseARCFunctionsForRetainRelease()) { 427 CGF.EmitObjCRelease(Receiver, ARCPreciseLifetime); 428 return nullptr; 429 } 430 break; 431 432 default: 433 break; 434 } 435 return std::nullopt; 436 } 437 438 CodeGen::RValue CGObjCRuntime::GeneratePossiblySpecializedMessageSend( 439 CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, 440 Selector Sel, llvm::Value *Receiver, const CallArgList &Args, 441 const ObjCInterfaceDecl *OID, const ObjCMethodDecl *Method, 442 bool isClassMessage) { 443 if (std::optional<llvm::Value *> SpecializedResult = 444 tryGenerateSpecializedMessageSend(CGF, ResultType, Receiver, Args, 445 Sel, Method, isClassMessage)) { 446 return RValue::get(*SpecializedResult); 447 } 448 return GenerateMessageSend(CGF, Return, ResultType, Sel, Receiver, Args, OID, 449 Method); 450 } 451 452 static void AppendFirstImpliedRuntimeProtocols( 453 const ObjCProtocolDecl *PD, 454 llvm::UniqueVector<const ObjCProtocolDecl *> &PDs) { 455 if (!PD->isNonRuntimeProtocol()) { 456 const auto *Can = PD->getCanonicalDecl(); 457 PDs.insert(Can); 458 return; 459 } 460 461 for (const auto *ParentPD : PD->protocols()) 462 AppendFirstImpliedRuntimeProtocols(ParentPD, PDs); 463 } 464 465 std::vector<const ObjCProtocolDecl *> 466 CGObjCRuntime::GetRuntimeProtocolList(ObjCProtocolDecl::protocol_iterator begin, 467 ObjCProtocolDecl::protocol_iterator end) { 468 std::vector<const ObjCProtocolDecl *> RuntimePds; 469 llvm::DenseSet<const ObjCProtocolDecl *> NonRuntimePDs; 470 471 for (; begin != end; ++begin) { 472 const auto *It = *begin; 473 const auto *Can = It->getCanonicalDecl(); 474 if (Can->isNonRuntimeProtocol()) 475 NonRuntimePDs.insert(Can); 476 else 477 RuntimePds.push_back(Can); 478 } 479 480 // If there are no non-runtime protocols then we can just stop now. 481 if (NonRuntimePDs.empty()) 482 return RuntimePds; 483 484 // Else we have to search through the non-runtime protocol's inheritancy 485 // hierarchy DAG stopping whenever a branch either finds a runtime protocol or 486 // a non-runtime protocol without any parents. These are the "first-implied" 487 // protocols from a non-runtime protocol. 488 llvm::UniqueVector<const ObjCProtocolDecl *> FirstImpliedProtos; 489 for (const auto *PD : NonRuntimePDs) 490 AppendFirstImpliedRuntimeProtocols(PD, FirstImpliedProtos); 491 492 // Walk the Runtime list to get all protocols implied via the inclusion of 493 // this protocol, e.g. all protocols it inherits from including itself. 494 llvm::DenseSet<const ObjCProtocolDecl *> AllImpliedProtocols; 495 for (const auto *PD : RuntimePds) { 496 const auto *Can = PD->getCanonicalDecl(); 497 AllImpliedProtocols.insert(Can); 498 Can->getImpliedProtocols(AllImpliedProtocols); 499 } 500 501 // Similar to above, walk the list of first-implied protocols to find the set 502 // all the protocols implied excluding the listed protocols themselves since 503 // they are not yet a part of the `RuntimePds` list. 504 for (const auto *PD : FirstImpliedProtos) { 505 PD->getImpliedProtocols(AllImpliedProtocols); 506 } 507 508 // From the first-implied list we have to finish building the final protocol 509 // list. If a protocol in the first-implied list was already implied via some 510 // inheritance path through some other protocols then it would be redundant to 511 // add it here and so we skip over it. 512 for (const auto *PD : FirstImpliedProtos) { 513 if (!AllImpliedProtocols.contains(PD)) { 514 RuntimePds.push_back(PD); 515 } 516 } 517 518 return RuntimePds; 519 } 520 521 /// Instead of '[[MyClass alloc] init]', try to generate 522 /// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the 523 /// caller side, as well as the optimized objc_alloc. 524 static std::optional<llvm::Value *> 525 tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) { 526 auto &Runtime = CGF.getLangOpts().ObjCRuntime; 527 if (!Runtime.shouldUseRuntimeFunctionForCombinedAllocInit()) 528 return std::nullopt; 529 530 // Match the exact pattern '[[MyClass alloc] init]'. 531 Selector Sel = OME->getSelector(); 532 if (OME->getReceiverKind() != ObjCMessageExpr::Instance || 533 !OME->getType()->isObjCObjectPointerType() || !Sel.isUnarySelector() || 534 Sel.getNameForSlot(0) != "init") 535 return std::nullopt; 536 537 // Okay, this is '[receiver init]', check if 'receiver' is '[cls alloc]' 538 // with 'cls' a Class. 539 auto *SubOME = 540 dyn_cast<ObjCMessageExpr>(OME->getInstanceReceiver()->IgnoreParenCasts()); 541 if (!SubOME) 542 return std::nullopt; 543 Selector SubSel = SubOME->getSelector(); 544 545 if (!SubOME->getType()->isObjCObjectPointerType() || 546 !SubSel.isUnarySelector() || SubSel.getNameForSlot(0) != "alloc") 547 return std::nullopt; 548 549 llvm::Value *Receiver = nullptr; 550 switch (SubOME->getReceiverKind()) { 551 case ObjCMessageExpr::Instance: 552 if (!SubOME->getInstanceReceiver()->getType()->isObjCClassType()) 553 return std::nullopt; 554 Receiver = CGF.EmitScalarExpr(SubOME->getInstanceReceiver()); 555 break; 556 557 case ObjCMessageExpr::Class: { 558 QualType ReceiverType = SubOME->getClassReceiver(); 559 const ObjCObjectType *ObjTy = ReceiverType->castAs<ObjCObjectType>(); 560 const ObjCInterfaceDecl *ID = ObjTy->getInterface(); 561 assert(ID && "null interface should be impossible here"); 562 Receiver = CGF.CGM.getObjCRuntime().GetClass(CGF, ID); 563 break; 564 } 565 case ObjCMessageExpr::SuperInstance: 566 case ObjCMessageExpr::SuperClass: 567 return std::nullopt; 568 } 569 570 return CGF.EmitObjCAllocInit(Receiver, CGF.ConvertType(OME->getType())); 571 } 572 573 RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, 574 ReturnValueSlot Return) { 575 // Only the lookup mechanism and first two arguments of the method 576 // implementation vary between runtimes. We can get the receiver and 577 // arguments in generic code. 578 579 bool isDelegateInit = E->isDelegateInitCall(); 580 581 const ObjCMethodDecl *method = E->getMethodDecl(); 582 583 // If the method is -retain, and the receiver's being loaded from 584 // a __weak variable, peephole the entire operation to objc_loadWeakRetained. 585 if (method && E->getReceiverKind() == ObjCMessageExpr::Instance && 586 method->getMethodFamily() == OMF_retain) { 587 if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) { 588 LValue lvalue = EmitLValue(lvalueExpr); 589 llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress()); 590 return AdjustObjCObjectType(*this, E->getType(), RValue::get(result)); 591 } 592 } 593 594 if (std::optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(*this, E)) 595 return AdjustObjCObjectType(*this, E->getType(), RValue::get(*Val)); 596 597 // We don't retain the receiver in delegate init calls, and this is 598 // safe because the receiver value is always loaded from 'self', 599 // which we zero out. We don't want to Block_copy block receivers, 600 // though. 601 bool retainSelf = 602 (!isDelegateInit && 603 CGM.getLangOpts().ObjCAutoRefCount && 604 method && 605 method->hasAttr<NSConsumesSelfAttr>()); 606 607 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 608 bool isSuperMessage = false; 609 bool isClassMessage = false; 610 ObjCInterfaceDecl *OID = nullptr; 611 // Find the receiver 612 QualType ReceiverType; 613 llvm::Value *Receiver = nullptr; 614 switch (E->getReceiverKind()) { 615 case ObjCMessageExpr::Instance: 616 ReceiverType = E->getInstanceReceiver()->getType(); 617 isClassMessage = ReceiverType->isObjCClassType(); 618 if (retainSelf) { 619 TryEmitResult ter = tryEmitARCRetainScalarExpr(*this, 620 E->getInstanceReceiver()); 621 Receiver = ter.getPointer(); 622 if (ter.getInt()) retainSelf = false; 623 } else 624 Receiver = EmitScalarExpr(E->getInstanceReceiver()); 625 break; 626 627 case ObjCMessageExpr::Class: { 628 ReceiverType = E->getClassReceiver(); 629 OID = ReceiverType->castAs<ObjCObjectType>()->getInterface(); 630 assert(OID && "Invalid Objective-C class message send"); 631 Receiver = Runtime.GetClass(*this, OID); 632 isClassMessage = true; 633 break; 634 } 635 636 case ObjCMessageExpr::SuperInstance: 637 ReceiverType = E->getSuperType(); 638 Receiver = LoadObjCSelf(); 639 isSuperMessage = true; 640 break; 641 642 case ObjCMessageExpr::SuperClass: 643 ReceiverType = E->getSuperType(); 644 Receiver = LoadObjCSelf(); 645 isSuperMessage = true; 646 isClassMessage = true; 647 break; 648 } 649 650 if (retainSelf) 651 Receiver = EmitARCRetainNonBlock(Receiver); 652 653 // In ARC, we sometimes want to "extend the lifetime" 654 // (i.e. retain+autorelease) of receivers of returns-inner-pointer 655 // messages. 656 if (getLangOpts().ObjCAutoRefCount && method && 657 method->hasAttr<ObjCReturnsInnerPointerAttr>() && 658 shouldExtendReceiverForInnerPointerMessage(E)) 659 Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver); 660 661 QualType ResultType = method ? method->getReturnType() : E->getType(); 662 663 CallArgList Args; 664 EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method)); 665 666 // For delegate init calls in ARC, do an unsafe store of null into 667 // self. This represents the call taking direct ownership of that 668 // value. We have to do this after emitting the other call 669 // arguments because they might also reference self, but we don't 670 // have to worry about any of them modifying self because that would 671 // be an undefined read and write of an object in unordered 672 // expressions. 673 if (isDelegateInit) { 674 assert(getLangOpts().ObjCAutoRefCount && 675 "delegate init calls should only be marked in ARC"); 676 677 // Do an unsafe store of null into self. 678 Address selfAddr = 679 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 680 Builder.CreateStore(getNullForVariable(selfAddr), selfAddr); 681 } 682 683 RValue result; 684 if (isSuperMessage) { 685 // super is only valid in an Objective-C method 686 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 687 bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); 688 result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType, 689 E->getSelector(), 690 OMD->getClassInterface(), 691 isCategoryImpl, 692 Receiver, 693 isClassMessage, 694 Args, 695 method); 696 } else { 697 // Call runtime methods directly if we can. 698 result = Runtime.GeneratePossiblySpecializedMessageSend( 699 *this, Return, ResultType, E->getSelector(), Receiver, Args, OID, 700 method, isClassMessage); 701 } 702 703 // For delegate init calls in ARC, implicitly store the result of 704 // the call back into self. This takes ownership of the value. 705 if (isDelegateInit) { 706 Address selfAddr = 707 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 708 llvm::Value *newSelf = result.getScalarVal(); 709 710 // The delegate return type isn't necessarily a matching type; in 711 // fact, it's quite likely to be 'id'. 712 llvm::Type *selfTy = selfAddr.getElementType(); 713 newSelf = Builder.CreateBitCast(newSelf, selfTy); 714 715 Builder.CreateStore(newSelf, selfAddr); 716 } 717 718 return AdjustObjCObjectType(*this, E->getType(), result); 719 } 720 721 namespace { 722 struct FinishARCDealloc final : EHScopeStack::Cleanup { 723 void Emit(CodeGenFunction &CGF, Flags flags) override { 724 const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl); 725 726 const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext()); 727 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 728 if (!iface->getSuperClass()) return; 729 730 bool isCategory = isa<ObjCCategoryImplDecl>(impl); 731 732 // Call [super dealloc] if we have a superclass. 733 llvm::Value *self = CGF.LoadObjCSelf(); 734 735 CallArgList args; 736 CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(), 737 CGF.getContext().VoidTy, 738 method->getSelector(), 739 iface, 740 isCategory, 741 self, 742 /*is class msg*/ false, 743 args, 744 method); 745 } 746 }; 747 } 748 749 /// StartObjCMethod - Begin emission of an ObjCMethod. This generates 750 /// the LLVM function and sets the other context used by 751 /// CodeGenFunction. 752 void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD, 753 const ObjCContainerDecl *CD) { 754 SourceLocation StartLoc = OMD->getBeginLoc(); 755 FunctionArgList args; 756 // Check if we should generate debug info for this method. 757 if (OMD->hasAttr<NoDebugAttr>()) 758 DebugInfo = nullptr; // disable debug info indefinitely for this function 759 760 llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD); 761 762 const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD); 763 if (OMD->isDirectMethod()) { 764 Fn->setVisibility(llvm::Function::HiddenVisibility); 765 CGM.SetLLVMFunctionAttributes(OMD, FI, Fn, /*IsThunk=*/false); 766 CGM.SetLLVMFunctionAttributesForDefinition(OMD, Fn); 767 } else { 768 CGM.SetInternalFunctionAttributes(OMD, Fn, FI); 769 } 770 771 args.push_back(OMD->getSelfDecl()); 772 if (!OMD->isDirectMethod()) 773 args.push_back(OMD->getCmdDecl()); 774 775 args.append(OMD->param_begin(), OMD->param_end()); 776 777 CurGD = OMD; 778 CurEHLocation = OMD->getEndLoc(); 779 780 StartFunction(OMD, OMD->getReturnType(), Fn, FI, args, 781 OMD->getLocation(), StartLoc); 782 783 if (OMD->isDirectMethod()) { 784 // This function is a direct call, it has to implement a nil check 785 // on entry. 786 // 787 // TODO: possibly have several entry points to elide the check 788 CGM.getObjCRuntime().GenerateDirectMethodPrologue(*this, Fn, OMD, CD); 789 } 790 791 // In ARC, certain methods get an extra cleanup. 792 if (CGM.getLangOpts().ObjCAutoRefCount && 793 OMD->isInstanceMethod() && 794 OMD->getSelector().isUnarySelector()) { 795 const IdentifierInfo *ident = 796 OMD->getSelector().getIdentifierInfoForSlot(0); 797 if (ident->isStr("dealloc")) 798 EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind()); 799 } 800 } 801 802 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 803 LValue lvalue, QualType type); 804 805 /// Generate an Objective-C method. An Objective-C method is a C function with 806 /// its pointer, name, and types registered in the class structure. 807 void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) { 808 StartObjCMethod(OMD, OMD->getClassInterface()); 809 PGO->assignRegionCounters(GlobalDecl(OMD), CurFn); 810 assert(isa<CompoundStmt>(OMD->getBody())); 811 incrementProfileCounter(OMD->getBody()); 812 EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody())); 813 FinishFunction(OMD->getBodyRBrace()); 814 } 815 816 /// emitStructGetterCall - Call the runtime function to load a property 817 /// into the return value slot. 818 static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, 819 bool isAtomic, bool hasStrong) { 820 ASTContext &Context = CGF.getContext(); 821 822 llvm::Value *src = 823 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 824 .getPointer(CGF); 825 826 // objc_copyStruct (ReturnValue, &structIvar, 827 // sizeof (Type of Ivar), isAtomic, false); 828 CallArgList args; 829 830 llvm::Value *dest = CGF.ReturnValue.emitRawPointer(CGF); 831 args.add(RValue::get(dest), Context.VoidPtrTy); 832 args.add(RValue::get(src), Context.VoidPtrTy); 833 834 CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType()); 835 args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType()); 836 args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy); 837 args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy); 838 839 llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetGetStructFunction(); 840 CGCallee callee = CGCallee::forDirect(fn); 841 CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args), 842 callee, ReturnValueSlot(), args); 843 } 844 845 /// Determine whether the given architecture supports unaligned atomic 846 /// accesses. They don't have to be fast, just faster than a function 847 /// call and a mutex. 848 static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) { 849 // FIXME: Allow unaligned atomic load/store on x86. (It is not 850 // currently supported by the backend.) 851 return false; 852 } 853 854 /// Return the maximum size that permits atomic accesses for the given 855 /// architecture. 856 static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM, 857 llvm::Triple::ArchType arch) { 858 // ARM has 8-byte atomic accesses, but it's not clear whether we 859 // want to rely on them here. 860 861 // In the default case, just assume that any size up to a pointer is 862 // fine given adequate alignment. 863 return CharUnits::fromQuantity(CGM.PointerSizeInBytes); 864 } 865 866 namespace { 867 class PropertyImplStrategy { 868 public: 869 enum StrategyKind { 870 /// The 'native' strategy is to use the architecture's provided 871 /// reads and writes. 872 Native, 873 874 /// Use objc_setProperty and objc_getProperty. 875 GetSetProperty, 876 877 /// Use objc_setProperty for the setter, but use expression 878 /// evaluation for the getter. 879 SetPropertyAndExpressionGet, 880 881 /// Use objc_copyStruct. 882 CopyStruct, 883 884 /// The 'expression' strategy is to emit normal assignment or 885 /// lvalue-to-rvalue expressions. 886 Expression 887 }; 888 889 StrategyKind getKind() const { return StrategyKind(Kind); } 890 891 bool hasStrongMember() const { return HasStrong; } 892 bool isAtomic() const { return IsAtomic; } 893 bool isCopy() const { return IsCopy; } 894 895 CharUnits getIvarSize() const { return IvarSize; } 896 CharUnits getIvarAlignment() const { return IvarAlignment; } 897 898 PropertyImplStrategy(CodeGenModule &CGM, 899 const ObjCPropertyImplDecl *propImpl); 900 901 private: 902 LLVM_PREFERRED_TYPE(StrategyKind) 903 unsigned Kind : 8; 904 LLVM_PREFERRED_TYPE(bool) 905 unsigned IsAtomic : 1; 906 LLVM_PREFERRED_TYPE(bool) 907 unsigned IsCopy : 1; 908 LLVM_PREFERRED_TYPE(bool) 909 unsigned HasStrong : 1; 910 911 CharUnits IvarSize; 912 CharUnits IvarAlignment; 913 }; 914 } 915 916 /// Pick an implementation strategy for the given property synthesis. 917 PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM, 918 const ObjCPropertyImplDecl *propImpl) { 919 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 920 ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind(); 921 922 IsCopy = (setterKind == ObjCPropertyDecl::Copy); 923 IsAtomic = prop->isAtomic(); 924 HasStrong = false; // doesn't matter here. 925 926 // Evaluate the ivar's size and alignment. 927 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 928 QualType ivarType = ivar->getType(); 929 auto TInfo = CGM.getContext().getTypeInfoInChars(ivarType); 930 IvarSize = TInfo.Width; 931 IvarAlignment = TInfo.Align; 932 933 // If we have a copy property, we always have to use setProperty. 934 // If the property is atomic we need to use getProperty, but in 935 // the nonatomic case we can just use expression. 936 if (IsCopy) { 937 Kind = IsAtomic ? GetSetProperty : SetPropertyAndExpressionGet; 938 return; 939 } 940 941 // Handle retain. 942 if (setterKind == ObjCPropertyDecl::Retain) { 943 // In GC-only, there's nothing special that needs to be done. 944 if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { 945 // fallthrough 946 947 // In ARC, if the property is non-atomic, use expression emission, 948 // which translates to objc_storeStrong. This isn't required, but 949 // it's slightly nicer. 950 } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) { 951 // Using standard expression emission for the setter is only 952 // acceptable if the ivar is __strong, which won't be true if 953 // the property is annotated with __attribute__((NSObject)). 954 // TODO: falling all the way back to objc_setProperty here is 955 // just laziness, though; we could still use objc_storeStrong 956 // if we hacked it right. 957 if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong) 958 Kind = Expression; 959 else 960 Kind = SetPropertyAndExpressionGet; 961 return; 962 963 // Otherwise, we need to at least use setProperty. However, if 964 // the property isn't atomic, we can use normal expression 965 // emission for the getter. 966 } else if (!IsAtomic) { 967 Kind = SetPropertyAndExpressionGet; 968 return; 969 970 // Otherwise, we have to use both setProperty and getProperty. 971 } else { 972 Kind = GetSetProperty; 973 return; 974 } 975 } 976 977 // If we're not atomic, just use expression accesses. 978 if (!IsAtomic) { 979 Kind = Expression; 980 return; 981 } 982 983 // Properties on bitfield ivars need to be emitted using expression 984 // accesses even if they're nominally atomic. 985 if (ivar->isBitField()) { 986 Kind = Expression; 987 return; 988 } 989 990 // GC-qualified or ARC-qualified ivars need to be emitted as 991 // expressions. This actually works out to being atomic anyway, 992 // except for ARC __strong, but that should trigger the above code. 993 if (ivarType.hasNonTrivialObjCLifetime() || 994 (CGM.getLangOpts().getGC() && 995 CGM.getContext().getObjCGCAttrKind(ivarType))) { 996 Kind = Expression; 997 return; 998 } 999 1000 // Compute whether the ivar has strong members. 1001 if (CGM.getLangOpts().getGC()) 1002 if (const RecordType *recordType = ivarType->getAs<RecordType>()) 1003 HasStrong = recordType->getDecl()->hasObjectMember(); 1004 1005 // We can never access structs with object members with a native 1006 // access, because we need to use write barriers. This is what 1007 // objc_copyStruct is for. 1008 if (HasStrong) { 1009 Kind = CopyStruct; 1010 return; 1011 } 1012 1013 // Otherwise, this is target-dependent and based on the size and 1014 // alignment of the ivar. 1015 1016 // If the size of the ivar is not a power of two, give up. We don't 1017 // want to get into the business of doing compare-and-swaps. 1018 if (!IvarSize.isPowerOfTwo()) { 1019 Kind = CopyStruct; 1020 return; 1021 } 1022 1023 llvm::Triple::ArchType arch = 1024 CGM.getTarget().getTriple().getArch(); 1025 1026 // Most architectures require memory to fit within a single cache 1027 // line, so the alignment has to be at least the size of the access. 1028 // Otherwise we have to grab a lock. 1029 if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) { 1030 Kind = CopyStruct; 1031 return; 1032 } 1033 1034 // If the ivar's size exceeds the architecture's maximum atomic 1035 // access size, we have to use CopyStruct. 1036 if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) { 1037 Kind = CopyStruct; 1038 return; 1039 } 1040 1041 // Otherwise, we can use native loads and stores. 1042 Kind = Native; 1043 } 1044 1045 /// Generate an Objective-C property getter function. 1046 /// 1047 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 1048 /// is illegal within a category. 1049 void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, 1050 const ObjCPropertyImplDecl *PID) { 1051 llvm::Constant *AtomicHelperFn = 1052 CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID); 1053 ObjCMethodDecl *OMD = PID->getGetterMethodDecl(); 1054 assert(OMD && "Invalid call to generate getter (empty method)"); 1055 StartObjCMethod(OMD, IMP->getClassInterface()); 1056 1057 generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn); 1058 1059 FinishFunction(OMD->getEndLoc()); 1060 } 1061 1062 static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) { 1063 const Expr *getter = propImpl->getGetterCXXConstructor(); 1064 if (!getter) return true; 1065 1066 // Sema only makes only of these when the ivar has a C++ class type, 1067 // so the form is pretty constrained. 1068 1069 // If the property has a reference type, we might just be binding a 1070 // reference, in which case the result will be a gl-value. We should 1071 // treat this as a non-trivial operation. 1072 if (getter->isGLValue()) 1073 return false; 1074 1075 // If we selected a trivial copy-constructor, we're okay. 1076 if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter)) 1077 return (construct->getConstructor()->isTrivial()); 1078 1079 // The constructor might require cleanups (in which case it's never 1080 // trivial). 1081 assert(isa<ExprWithCleanups>(getter)); 1082 return false; 1083 } 1084 1085 /// emitCPPObjectAtomicGetterCall - Call the runtime function to 1086 /// copy the ivar into the resturn slot. 1087 static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF, 1088 llvm::Value *returnAddr, 1089 ObjCIvarDecl *ivar, 1090 llvm::Constant *AtomicHelperFn) { 1091 // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar, 1092 // AtomicHelperFn); 1093 CallArgList args; 1094 1095 // The 1st argument is the return Slot. 1096 args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy); 1097 1098 // The 2nd argument is the address of the ivar. 1099 llvm::Value *ivarAddr = 1100 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 1101 .getPointer(CGF); 1102 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1103 1104 // Third argument is the helper function. 1105 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 1106 1107 llvm::FunctionCallee copyCppAtomicObjectFn = 1108 CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction(); 1109 CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn); 1110 CGF.EmitCall( 1111 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1112 callee, ReturnValueSlot(), args); 1113 } 1114 1115 // emitCmdValueForGetterSetterBody - Handle emitting the load necessary for 1116 // the `_cmd` selector argument for getter/setter bodies. For direct methods, 1117 // this returns an undefined/poison value; this matches behavior prior to `_cmd` 1118 // being removed from the direct method ABI as the getter/setter caller would 1119 // never load one. For non-direct methods, this emits a load of the implicit 1120 // `_cmd` storage. 1121 static llvm::Value *emitCmdValueForGetterSetterBody(CodeGenFunction &CGF, 1122 ObjCMethodDecl *MD) { 1123 if (MD->isDirectMethod()) { 1124 // Direct methods do not have a `_cmd` argument. Emit an undefined/poison 1125 // value. This will be passed to objc_getProperty/objc_setProperty, which 1126 // has not appeared bothered by the `_cmd` argument being undefined before. 1127 llvm::Type *selType = CGF.ConvertType(CGF.getContext().getObjCSelType()); 1128 return llvm::PoisonValue::get(selType); 1129 } 1130 1131 return CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(MD->getCmdDecl()), "cmd"); 1132 } 1133 1134 void 1135 CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, 1136 const ObjCPropertyImplDecl *propImpl, 1137 const ObjCMethodDecl *GetterMethodDecl, 1138 llvm::Constant *AtomicHelperFn) { 1139 1140 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1141 1142 if (ivar->getType().isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { 1143 if (!AtomicHelperFn) { 1144 LValue Src = 1145 EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1146 LValue Dst = MakeAddrLValue(ReturnValue, ivar->getType()); 1147 callCStructCopyConstructor(Dst, Src); 1148 } else { 1149 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1150 emitCPPObjectAtomicGetterCall(*this, ReturnValue.emitRawPointer(*this), 1151 ivar, AtomicHelperFn); 1152 } 1153 return; 1154 } 1155 1156 // If there's a non-trivial 'get' expression, we just have to emit that. 1157 if (!hasTrivialGetExpr(propImpl)) { 1158 if (!AtomicHelperFn) { 1159 auto *ret = ReturnStmt::Create(getContext(), SourceLocation(), 1160 propImpl->getGetterCXXConstructor(), 1161 /* NRVOCandidate=*/nullptr); 1162 EmitReturnStmt(*ret); 1163 } 1164 else { 1165 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1166 emitCPPObjectAtomicGetterCall(*this, ReturnValue.emitRawPointer(*this), 1167 ivar, AtomicHelperFn); 1168 } 1169 return; 1170 } 1171 1172 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 1173 QualType propType = prop->getType(); 1174 ObjCMethodDecl *getterMethod = propImpl->getGetterMethodDecl(); 1175 1176 // Pick an implementation strategy. 1177 PropertyImplStrategy strategy(CGM, propImpl); 1178 switch (strategy.getKind()) { 1179 case PropertyImplStrategy::Native: { 1180 // We don't need to do anything for a zero-size struct. 1181 if (strategy.getIvarSize().isZero()) 1182 return; 1183 1184 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1185 1186 // Currently, all atomic accesses have to be through integer 1187 // types, so there's no point in trying to pick a prettier type. 1188 uint64_t ivarSize = getContext().toBits(strategy.getIvarSize()); 1189 llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize); 1190 1191 // Perform an atomic load. This does not impose ordering constraints. 1192 Address ivarAddr = LV.getAddress(); 1193 ivarAddr = ivarAddr.withElementType(bitcastType); 1194 llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load"); 1195 load->setAtomic(llvm::AtomicOrdering::Unordered); 1196 llvm::Value *ivarVal = load; 1197 if (PointerAuthQualifier PAQ = ivar->getType().getPointerAuth()) { 1198 CGPointerAuthInfo SrcInfo = EmitPointerAuthInfo(PAQ, ivarAddr); 1199 CGPointerAuthInfo TargetInfo = 1200 CGM.getPointerAuthInfoForType(getterMethod->getReturnType()); 1201 ivarVal = emitPointerAuthResign(ivarVal, ivar->getType(), SrcInfo, 1202 TargetInfo, /*isKnownNonNull=*/false); 1203 } 1204 1205 // Store that value into the return address. Doing this with a 1206 // bitcast is likely to produce some pretty ugly IR, but it's not 1207 // the *most* terrible thing in the world. 1208 llvm::Type *retTy = ConvertType(getterMethod->getReturnType()); 1209 uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy); 1210 if (ivarSize > retTySize) { 1211 bitcastType = llvm::Type::getIntNTy(getLLVMContext(), retTySize); 1212 ivarVal = Builder.CreateTrunc(ivarVal, bitcastType); 1213 } 1214 Builder.CreateStore(ivarVal, ReturnValue.withElementType(bitcastType)); 1215 1216 // Make sure we don't do an autorelease. 1217 AutoreleaseResult = false; 1218 return; 1219 } 1220 1221 case PropertyImplStrategy::GetSetProperty: { 1222 llvm::FunctionCallee getPropertyFn = 1223 CGM.getObjCRuntime().GetPropertyGetFunction(); 1224 1225 if (ivar->getType().getPointerAuth()) { 1226 // This currently cannot be hit, but if we ever allow objc pointers 1227 // to be signed, this will become possible. Reaching here would require 1228 // a copy, weak, etc property backed by an authenticated pointer. 1229 CGM.ErrorUnsupported(propImpl, 1230 "Obj-C getter requiring pointer authentication"); 1231 return; 1232 } 1233 1234 if (!getPropertyFn) { 1235 CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy"); 1236 return; 1237 } 1238 CGCallee callee = CGCallee::forDirect(getPropertyFn); 1239 1240 // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true). 1241 // FIXME: Can't this be simpler? This might even be worse than the 1242 // corresponding gcc code. 1243 llvm::Value *cmd = emitCmdValueForGetterSetterBody(*this, getterMethod); 1244 llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 1245 llvm::Value *ivarOffset = 1246 EmitIvarOffsetAsPointerDiff(classImpl->getClassInterface(), ivar); 1247 1248 CallArgList args; 1249 args.add(RValue::get(self), getContext().getObjCIdType()); 1250 args.add(RValue::get(cmd), getContext().getObjCSelType()); 1251 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1252 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 1253 getContext().BoolTy); 1254 1255 // FIXME: We shouldn't need to get the function info here, the 1256 // runtime already should have computed it to build the function. 1257 llvm::CallBase *CallInstruction; 1258 RValue RV = EmitCall(getTypes().arrangeBuiltinFunctionCall( 1259 getContext().getObjCIdType(), args), 1260 callee, ReturnValueSlot(), args, &CallInstruction); 1261 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction)) 1262 call->setTailCall(); 1263 1264 // We need to fix the type here. Ivars with copy & retain are 1265 // always objects so we don't need to worry about complex or 1266 // aggregates. 1267 RV = RValue::get(Builder.CreateBitCast( 1268 RV.getScalarVal(), 1269 getTypes().ConvertType(getterMethod->getReturnType()))); 1270 1271 EmitReturnOfRValue(RV, propType); 1272 1273 // objc_getProperty does an autorelease, so we should suppress ours. 1274 AutoreleaseResult = false; 1275 1276 return; 1277 } 1278 1279 case PropertyImplStrategy::CopyStruct: 1280 emitStructGetterCall(*this, ivar, strategy.isAtomic(), 1281 strategy.hasStrongMember()); 1282 return; 1283 1284 case PropertyImplStrategy::Expression: 1285 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1286 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1287 1288 QualType ivarType = ivar->getType(); 1289 auto EvaluationKind = getEvaluationKind(ivarType); 1290 assert(!ivarType.getPointerAuth() || EvaluationKind == TEK_Scalar); 1291 switch (EvaluationKind) { 1292 case TEK_Complex: { 1293 ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation()); 1294 EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType), 1295 /*init*/ true); 1296 return; 1297 } 1298 case TEK_Aggregate: { 1299 // The return value slot is guaranteed to not be aliased, but 1300 // that's not necessarily the same as "on the stack", so 1301 // we still potentially need objc_memmove_collectable. 1302 EmitAggregateCopy(/* Dest= */ MakeAddrLValue(ReturnValue, ivarType), 1303 /* Src= */ LV, ivarType, getOverlapForReturnValue()); 1304 return; 1305 } 1306 case TEK_Scalar: { 1307 llvm::Value *value; 1308 if (propType->isReferenceType()) { 1309 if (ivarType.getPointerAuth()) { 1310 CGM.ErrorUnsupported(propImpl, 1311 "Obj-C getter for authenticated reference type"); 1312 return; 1313 } 1314 value = LV.getAddress().emitRawPointer(*this); 1315 } else { 1316 // We want to load and autoreleaseReturnValue ARC __weak ivars. 1317 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1318 if (getLangOpts().ObjCAutoRefCount) { 1319 value = emitARCRetainLoadOfScalar(*this, LV, ivarType); 1320 } else { 1321 value = EmitARCLoadWeak(LV.getAddress()); 1322 } 1323 1324 // Otherwise we want to do a simple load, suppressing the 1325 // final autorelease. 1326 } else { 1327 if (PointerAuthQualifier PAQ = ivar->getType().getPointerAuth()) { 1328 Address ivarAddr = LV.getAddress(); 1329 llvm::LoadInst *LoadInst = Builder.CreateLoad(ivarAddr, "load"); 1330 llvm::Value *Load = LoadInst; 1331 auto SrcInfo = EmitPointerAuthInfo(PAQ, ivarAddr); 1332 auto TargetInfo = 1333 CGM.getPointerAuthInfoForType(getterMethod->getReturnType()); 1334 Load = emitPointerAuthResign(Load, ivarType, SrcInfo, TargetInfo, 1335 /*isKnownNonNull=*/false); 1336 value = Load; 1337 } else 1338 value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal(); 1339 1340 AutoreleaseResult = false; 1341 } 1342 1343 value = Builder.CreateBitCast( 1344 value, ConvertType(GetterMethodDecl->getReturnType())); 1345 } 1346 1347 EmitReturnOfRValue(RValue::get(value), propType); 1348 return; 1349 } 1350 } 1351 llvm_unreachable("bad evaluation kind"); 1352 } 1353 1354 } 1355 llvm_unreachable("bad @property implementation strategy!"); 1356 } 1357 1358 /// emitStructSetterCall - Call the runtime function to store the value 1359 /// from the first formal parameter into the given ivar. 1360 static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD, 1361 ObjCIvarDecl *ivar) { 1362 // objc_copyStruct (&structIvar, &Arg, 1363 // sizeof (struct something), true, false); 1364 CallArgList args; 1365 1366 // The first argument is the address of the ivar. 1367 llvm::Value *ivarAddr = 1368 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 1369 .getPointer(CGF); 1370 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1371 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1372 1373 // The second argument is the address of the parameter variable. 1374 ParmVarDecl *argVar = *OMD->param_begin(); 1375 DeclRefExpr argRef(CGF.getContext(), argVar, false, 1376 argVar->getType().getNonReferenceType(), VK_LValue, 1377 SourceLocation()); 1378 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); 1379 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1380 1381 // The third argument is the sizeof the type. 1382 llvm::Value *size = 1383 CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType())); 1384 args.add(RValue::get(size), CGF.getContext().getSizeType()); 1385 1386 // The fourth argument is the 'isAtomic' flag. 1387 args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy); 1388 1389 // The fifth argument is the 'hasStrong' flag. 1390 // FIXME: should this really always be false? 1391 args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy); 1392 1393 llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetSetStructFunction(); 1394 CGCallee callee = CGCallee::forDirect(fn); 1395 CGF.EmitCall( 1396 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1397 callee, ReturnValueSlot(), args); 1398 } 1399 1400 /// emitCPPObjectAtomicSetterCall - Call the runtime function to store 1401 /// the value from the first formal parameter into the given ivar, using 1402 /// the Cpp API for atomic Cpp objects with non-trivial copy assignment. 1403 static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF, 1404 ObjCMethodDecl *OMD, 1405 ObjCIvarDecl *ivar, 1406 llvm::Constant *AtomicHelperFn) { 1407 // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg, 1408 // AtomicHelperFn); 1409 CallArgList args; 1410 1411 // The first argument is the address of the ivar. 1412 llvm::Value *ivarAddr = 1413 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 1414 .getPointer(CGF); 1415 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1416 1417 // The second argument is the address of the parameter variable. 1418 ParmVarDecl *argVar = *OMD->param_begin(); 1419 DeclRefExpr argRef(CGF.getContext(), argVar, false, 1420 argVar->getType().getNonReferenceType(), VK_LValue, 1421 SourceLocation()); 1422 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); 1423 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1424 1425 // Third argument is the helper function. 1426 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 1427 1428 llvm::FunctionCallee fn = 1429 CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction(); 1430 CGCallee callee = CGCallee::forDirect(fn); 1431 CGF.EmitCall( 1432 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1433 callee, ReturnValueSlot(), args); 1434 } 1435 1436 1437 static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) { 1438 Expr *setter = PID->getSetterCXXAssignment(); 1439 if (!setter) return true; 1440 1441 // Sema only makes only of these when the ivar has a C++ class type, 1442 // so the form is pretty constrained. 1443 1444 // An operator call is trivial if the function it calls is trivial. 1445 // This also implies that there's nothing non-trivial going on with 1446 // the arguments, because operator= can only be trivial if it's a 1447 // synthesized assignment operator and therefore both parameters are 1448 // references. 1449 if (CallExpr *call = dyn_cast<CallExpr>(setter)) { 1450 if (const FunctionDecl *callee 1451 = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl())) 1452 if (callee->isTrivial()) 1453 return true; 1454 return false; 1455 } 1456 1457 assert(isa<ExprWithCleanups>(setter)); 1458 return false; 1459 } 1460 1461 static bool UseOptimizedSetter(CodeGenModule &CGM) { 1462 if (CGM.getLangOpts().getGC() != LangOptions::NonGC) 1463 return false; 1464 return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter(); 1465 } 1466 1467 void 1468 CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl, 1469 const ObjCPropertyImplDecl *propImpl, 1470 llvm::Constant *AtomicHelperFn) { 1471 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1472 ObjCMethodDecl *setterMethod = propImpl->getSetterMethodDecl(); 1473 1474 if (ivar->getType().isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { 1475 ParmVarDecl *PVD = *setterMethod->param_begin(); 1476 if (!AtomicHelperFn) { 1477 // Call the move assignment operator instead of calling the copy 1478 // assignment operator and destructor. 1479 LValue Dst = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 1480 /*quals*/ 0); 1481 LValue Src = MakeAddrLValue(GetAddrOfLocalVar(PVD), ivar->getType()); 1482 callCStructMoveAssignmentOperator(Dst, Src); 1483 } else { 1484 // If atomic, assignment is called via a locking api. 1485 emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, AtomicHelperFn); 1486 } 1487 // Decativate the destructor for the setter parameter. 1488 DeactivateCleanupBlock(CalleeDestructedParamCleanups[PVD], AllocaInsertPt); 1489 return; 1490 } 1491 1492 // Just use the setter expression if Sema gave us one and it's 1493 // non-trivial. 1494 if (!hasTrivialSetExpr(propImpl)) { 1495 if (!AtomicHelperFn) 1496 // If non-atomic, assignment is called directly. 1497 EmitStmt(propImpl->getSetterCXXAssignment()); 1498 else 1499 // If atomic, assignment is called via a locking api. 1500 emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, 1501 AtomicHelperFn); 1502 return; 1503 } 1504 1505 PropertyImplStrategy strategy(CGM, propImpl); 1506 switch (strategy.getKind()) { 1507 case PropertyImplStrategy::Native: { 1508 // We don't need to do anything for a zero-size struct. 1509 if (strategy.getIvarSize().isZero()) 1510 return; 1511 1512 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1513 1514 LValue ivarLValue = 1515 EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0); 1516 Address ivarAddr = ivarLValue.getAddress(); 1517 1518 // Currently, all atomic accesses have to be through integer 1519 // types, so there's no point in trying to pick a prettier type. 1520 llvm::Type *castType = llvm::Type::getIntNTy( 1521 getLLVMContext(), getContext().toBits(strategy.getIvarSize())); 1522 1523 // Cast both arguments to the chosen operation type. 1524 argAddr = argAddr.withElementType(castType); 1525 ivarAddr = ivarAddr.withElementType(castType); 1526 1527 llvm::Value *load = Builder.CreateLoad(argAddr); 1528 1529 if (PointerAuthQualifier PAQ = ivar->getType().getPointerAuth()) { 1530 QualType PropertyType = propImpl->getPropertyDecl()->getType(); 1531 CGPointerAuthInfo SrcInfo = CGM.getPointerAuthInfoForType(PropertyType); 1532 CGPointerAuthInfo TargetInfo = EmitPointerAuthInfo(PAQ, ivarAddr); 1533 load = emitPointerAuthResign(load, ivar->getType(), SrcInfo, TargetInfo, 1534 /*isKnownNonNull=*/false); 1535 } 1536 1537 // Perform an atomic store. There are no memory ordering requirements. 1538 llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr); 1539 store->setAtomic(llvm::AtomicOrdering::Unordered); 1540 return; 1541 } 1542 1543 case PropertyImplStrategy::GetSetProperty: 1544 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1545 1546 llvm::FunctionCallee setOptimizedPropertyFn = nullptr; 1547 llvm::FunctionCallee setPropertyFn = nullptr; 1548 if (UseOptimizedSetter(CGM)) { 1549 // 10.8 and iOS 6.0 code and GC is off 1550 setOptimizedPropertyFn = 1551 CGM.getObjCRuntime().GetOptimizedPropertySetFunction( 1552 strategy.isAtomic(), strategy.isCopy()); 1553 if (!setOptimizedPropertyFn) { 1554 CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI"); 1555 return; 1556 } 1557 } 1558 else { 1559 setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction(); 1560 if (!setPropertyFn) { 1561 CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy"); 1562 return; 1563 } 1564 } 1565 1566 // Emit objc_setProperty((id) self, _cmd, offset, arg, 1567 // <is-atomic>, <is-copy>). 1568 llvm::Value *cmd = emitCmdValueForGetterSetterBody(*this, setterMethod); 1569 llvm::Value *self = 1570 Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 1571 llvm::Value *ivarOffset = 1572 EmitIvarOffsetAsPointerDiff(classImpl->getClassInterface(), ivar); 1573 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1574 llvm::Value *arg = Builder.CreateLoad(argAddr, "arg"); 1575 arg = Builder.CreateBitCast(arg, VoidPtrTy); 1576 1577 CallArgList args; 1578 args.add(RValue::get(self), getContext().getObjCIdType()); 1579 args.add(RValue::get(cmd), getContext().getObjCSelType()); 1580 if (setOptimizedPropertyFn) { 1581 args.add(RValue::get(arg), getContext().getObjCIdType()); 1582 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1583 CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn); 1584 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1585 callee, ReturnValueSlot(), args); 1586 } else { 1587 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1588 args.add(RValue::get(arg), getContext().getObjCIdType()); 1589 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 1590 getContext().BoolTy); 1591 args.add(RValue::get(Builder.getInt1(strategy.isCopy())), 1592 getContext().BoolTy); 1593 // FIXME: We shouldn't need to get the function info here, the runtime 1594 // already should have computed it to build the function. 1595 CGCallee callee = CGCallee::forDirect(setPropertyFn); 1596 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1597 callee, ReturnValueSlot(), args); 1598 } 1599 1600 return; 1601 } 1602 1603 case PropertyImplStrategy::CopyStruct: 1604 emitStructSetterCall(*this, setterMethod, ivar); 1605 return; 1606 1607 case PropertyImplStrategy::Expression: 1608 break; 1609 } 1610 1611 // Otherwise, fake up some ASTs and emit a normal assignment. 1612 ValueDecl *selfDecl = setterMethod->getSelfDecl(); 1613 DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(), 1614 VK_LValue, SourceLocation()); 1615 ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, selfDecl->getType(), 1616 CK_LValueToRValue, &self, VK_PRValue, 1617 FPOptionsOverride()); 1618 ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(), 1619 SourceLocation(), SourceLocation(), 1620 &selfLoad, true, true); 1621 1622 ParmVarDecl *argDecl = *setterMethod->param_begin(); 1623 QualType argType = argDecl->getType().getNonReferenceType(); 1624 DeclRefExpr arg(getContext(), argDecl, false, argType, VK_LValue, 1625 SourceLocation()); 1626 ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack, 1627 argType.getUnqualifiedType(), CK_LValueToRValue, 1628 &arg, VK_PRValue, FPOptionsOverride()); 1629 1630 // The property type can differ from the ivar type in some situations with 1631 // Objective-C pointer types, we can always bit cast the RHS in these cases. 1632 // The following absurdity is just to ensure well-formed IR. 1633 CastKind argCK = CK_NoOp; 1634 if (ivarRef.getType()->isObjCObjectPointerType()) { 1635 if (argLoad.getType()->isObjCObjectPointerType()) 1636 argCK = CK_BitCast; 1637 else if (argLoad.getType()->isBlockPointerType()) 1638 argCK = CK_BlockPointerToObjCPointerCast; 1639 else 1640 argCK = CK_CPointerToObjCPointerCast; 1641 } else if (ivarRef.getType()->isBlockPointerType()) { 1642 if (argLoad.getType()->isBlockPointerType()) 1643 argCK = CK_BitCast; 1644 else 1645 argCK = CK_AnyPointerToBlockPointerCast; 1646 } else if (ivarRef.getType()->isPointerType()) { 1647 argCK = CK_BitCast; 1648 } else if (argLoad.getType()->isAtomicType() && 1649 !ivarRef.getType()->isAtomicType()) { 1650 argCK = CK_AtomicToNonAtomic; 1651 } else if (!argLoad.getType()->isAtomicType() && 1652 ivarRef.getType()->isAtomicType()) { 1653 argCK = CK_NonAtomicToAtomic; 1654 } 1655 ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, ivarRef.getType(), argCK, 1656 &argLoad, VK_PRValue, FPOptionsOverride()); 1657 Expr *finalArg = &argLoad; 1658 if (!getContext().hasSameUnqualifiedType(ivarRef.getType(), 1659 argLoad.getType())) 1660 finalArg = &argCast; 1661 1662 BinaryOperator *assign = BinaryOperator::Create( 1663 getContext(), &ivarRef, finalArg, BO_Assign, ivarRef.getType(), 1664 VK_PRValue, OK_Ordinary, SourceLocation(), FPOptionsOverride()); 1665 EmitStmt(assign); 1666 } 1667 1668 /// Generate an Objective-C property setter function. 1669 /// 1670 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 1671 /// is illegal within a category. 1672 void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP, 1673 const ObjCPropertyImplDecl *PID) { 1674 llvm::Constant *AtomicHelperFn = 1675 CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID); 1676 ObjCMethodDecl *OMD = PID->getSetterMethodDecl(); 1677 assert(OMD && "Invalid call to generate setter (empty method)"); 1678 StartObjCMethod(OMD, IMP->getClassInterface()); 1679 1680 generateObjCSetterBody(IMP, PID, AtomicHelperFn); 1681 1682 FinishFunction(OMD->getEndLoc()); 1683 } 1684 1685 namespace { 1686 struct DestroyIvar final : EHScopeStack::Cleanup { 1687 private: 1688 llvm::Value *addr; 1689 const ObjCIvarDecl *ivar; 1690 CodeGenFunction::Destroyer *destroyer; 1691 bool useEHCleanupForArray; 1692 public: 1693 DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar, 1694 CodeGenFunction::Destroyer *destroyer, 1695 bool useEHCleanupForArray) 1696 : addr(addr), ivar(ivar), destroyer(destroyer), 1697 useEHCleanupForArray(useEHCleanupForArray) {} 1698 1699 void Emit(CodeGenFunction &CGF, Flags flags) override { 1700 LValue lvalue 1701 = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0); 1702 CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer, 1703 flags.isForNormalCleanup() && useEHCleanupForArray); 1704 } 1705 }; 1706 } 1707 1708 /// Like CodeGenFunction::destroyARCStrong, but do it with a call. 1709 static void destroyARCStrongWithStore(CodeGenFunction &CGF, 1710 Address addr, 1711 QualType type) { 1712 llvm::Value *null = getNullForVariable(addr); 1713 CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 1714 } 1715 1716 static void emitCXXDestructMethod(CodeGenFunction &CGF, 1717 ObjCImplementationDecl *impl) { 1718 CodeGenFunction::RunCleanupsScope scope(CGF); 1719 1720 llvm::Value *self = CGF.LoadObjCSelf(); 1721 1722 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 1723 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); 1724 ivar; ivar = ivar->getNextIvar()) { 1725 QualType type = ivar->getType(); 1726 1727 // Check whether the ivar is a destructible type. 1728 QualType::DestructionKind dtorKind = type.isDestructedType(); 1729 if (!dtorKind) continue; 1730 1731 CodeGenFunction::Destroyer *destroyer = nullptr; 1732 1733 // Use a call to objc_storeStrong to destroy strong ivars, for the 1734 // general benefit of the tools. 1735 if (dtorKind == QualType::DK_objc_strong_lifetime) { 1736 destroyer = destroyARCStrongWithStore; 1737 1738 // Otherwise use the default for the destruction kind. 1739 } else { 1740 destroyer = CGF.getDestroyer(dtorKind); 1741 } 1742 1743 CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind); 1744 1745 CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer, 1746 cleanupKind & EHCleanup); 1747 } 1748 1749 assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?"); 1750 } 1751 1752 void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, 1753 ObjCMethodDecl *MD, 1754 bool ctor) { 1755 MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface()); 1756 StartObjCMethod(MD, IMP->getClassInterface()); 1757 1758 // Emit .cxx_construct. 1759 if (ctor) { 1760 // Suppress the final autorelease in ARC. 1761 AutoreleaseResult = false; 1762 1763 for (const auto *IvarInit : IMP->inits()) { 1764 FieldDecl *Field = IvarInit->getAnyMember(); 1765 ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field); 1766 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), 1767 LoadObjCSelf(), Ivar, 0); 1768 EmitAggExpr(IvarInit->getInit(), 1769 AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, 1770 AggValueSlot::DoesNotNeedGCBarriers, 1771 AggValueSlot::IsNotAliased, 1772 AggValueSlot::DoesNotOverlap)); 1773 } 1774 // constructor returns 'self'. 1775 CodeGenTypes &Types = CGM.getTypes(); 1776 QualType IdTy(CGM.getContext().getObjCIdType()); 1777 llvm::Value *SelfAsId = 1778 Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy)); 1779 EmitReturnOfRValue(RValue::get(SelfAsId), IdTy); 1780 1781 // Emit .cxx_destruct. 1782 } else { 1783 emitCXXDestructMethod(*this, IMP); 1784 } 1785 FinishFunction(); 1786 } 1787 1788 llvm::Value *CodeGenFunction::LoadObjCSelf() { 1789 VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl(); 1790 DeclRefExpr DRE(getContext(), Self, 1791 /*is enclosing local*/ (CurFuncDecl != CurCodeDecl), 1792 Self->getType(), VK_LValue, SourceLocation()); 1793 return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation()); 1794 } 1795 1796 QualType CodeGenFunction::TypeOfSelfObject() { 1797 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 1798 ImplicitParamDecl *selfDecl = OMD->getSelfDecl(); 1799 const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>( 1800 getContext().getCanonicalType(selfDecl->getType())); 1801 return PTy->getPointeeType(); 1802 } 1803 1804 void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ 1805 llvm::FunctionCallee EnumerationMutationFnPtr = 1806 CGM.getObjCRuntime().EnumerationMutationFunction(); 1807 if (!EnumerationMutationFnPtr) { 1808 CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime"); 1809 return; 1810 } 1811 CGCallee EnumerationMutationFn = 1812 CGCallee::forDirect(EnumerationMutationFnPtr); 1813 1814 CGDebugInfo *DI = getDebugInfo(); 1815 if (DI) 1816 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); 1817 1818 RunCleanupsScope ForScope(*this); 1819 1820 // The local variable comes into scope immediately. 1821 AutoVarEmission variable = AutoVarEmission::invalid(); 1822 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) 1823 variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl())); 1824 1825 JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end"); 1826 1827 // Fast enumeration state. 1828 QualType StateTy = CGM.getObjCFastEnumerationStateType(); 1829 Address StatePtr = CreateMemTemp(StateTy, "state.ptr"); 1830 EmitNullInitialization(StatePtr, StateTy); 1831 1832 // Number of elements in the items array. 1833 static const unsigned NumItems = 16; 1834 1835 // Fetch the countByEnumeratingWithState:objects:count: selector. 1836 const IdentifierInfo *II[] = { 1837 &CGM.getContext().Idents.get("countByEnumeratingWithState"), 1838 &CGM.getContext().Idents.get("objects"), 1839 &CGM.getContext().Idents.get("count")}; 1840 Selector FastEnumSel = 1841 CGM.getContext().Selectors.getSelector(std::size(II), &II[0]); 1842 1843 QualType ItemsTy = getContext().getConstantArrayType( 1844 getContext().getObjCIdType(), llvm::APInt(32, NumItems), nullptr, 1845 ArraySizeModifier::Normal, 0); 1846 Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr"); 1847 1848 // Emit the collection pointer. In ARC, we do a retain. 1849 llvm::Value *Collection; 1850 if (getLangOpts().ObjCAutoRefCount) { 1851 Collection = EmitARCRetainScalarExpr(S.getCollection()); 1852 1853 // Enter a cleanup to do the release. 1854 EmitObjCConsumeObject(S.getCollection()->getType(), Collection); 1855 } else { 1856 Collection = EmitScalarExpr(S.getCollection()); 1857 } 1858 1859 // The 'continue' label needs to appear within the cleanup for the 1860 // collection object. 1861 JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next"); 1862 1863 // Send it our message: 1864 CallArgList Args; 1865 1866 // The first argument is a temporary of the enumeration-state type. 1867 Args.add(RValue::get(StatePtr, *this), getContext().getPointerType(StateTy)); 1868 1869 // The second argument is a temporary array with space for NumItems 1870 // pointers. We'll actually be loading elements from the array 1871 // pointer written into the control state; this buffer is so that 1872 // collections that *aren't* backed by arrays can still queue up 1873 // batches of elements. 1874 Args.add(RValue::get(ItemsPtr, *this), getContext().getPointerType(ItemsTy)); 1875 1876 // The third argument is the capacity of that temporary array. 1877 llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType()); 1878 llvm::Constant *Count = llvm::ConstantInt::get(NSUIntegerTy, NumItems); 1879 Args.add(RValue::get(Count), getContext().getNSUIntegerType()); 1880 1881 // Start the enumeration. 1882 RValue CountRV = 1883 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 1884 getContext().getNSUIntegerType(), 1885 FastEnumSel, Collection, Args); 1886 1887 // The initial number of objects that were returned in the buffer. 1888 llvm::Value *initialBufferLimit = CountRV.getScalarVal(); 1889 1890 llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty"); 1891 llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit"); 1892 1893 llvm::Value *zero = llvm::Constant::getNullValue(NSUIntegerTy); 1894 1895 // If the limit pointer was zero to begin with, the collection is 1896 // empty; skip all this. Set the branch weight assuming this has the same 1897 // probability of exiting the loop as any other loop exit. 1898 uint64_t EntryCount = getCurrentProfileCount(); 1899 Builder.CreateCondBr( 1900 Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB, 1901 LoopInitBB, 1902 createProfileWeights(EntryCount, getProfileCount(S.getBody()))); 1903 1904 // Otherwise, initialize the loop. 1905 EmitBlock(LoopInitBB); 1906 1907 // Save the initial mutations value. This is the value at an 1908 // address that was written into the state object by 1909 // countByEnumeratingWithState:objects:count:. 1910 Address StateMutationsPtrPtr = 1911 Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr"); 1912 llvm::Value *StateMutationsPtr 1913 = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1914 1915 llvm::Type *UnsignedLongTy = ConvertType(getContext().UnsignedLongTy); 1916 llvm::Value *initialMutations = 1917 Builder.CreateAlignedLoad(UnsignedLongTy, StateMutationsPtr, 1918 getPointerAlign(), "forcoll.initial-mutations"); 1919 1920 // Start looping. This is the point we return to whenever we have a 1921 // fresh, non-empty batch of objects. 1922 llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody"); 1923 EmitBlock(LoopBodyBB); 1924 1925 // The current index into the buffer. 1926 llvm::PHINode *index = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.index"); 1927 index->addIncoming(zero, LoopInitBB); 1928 1929 // The current buffer size. 1930 llvm::PHINode *count = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.count"); 1931 count->addIncoming(initialBufferLimit, LoopInitBB); 1932 1933 incrementProfileCounter(&S); 1934 1935 // Check whether the mutations value has changed from where it was 1936 // at start. StateMutationsPtr should actually be invariant between 1937 // refreshes. 1938 StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1939 llvm::Value *currentMutations 1940 = Builder.CreateAlignedLoad(UnsignedLongTy, StateMutationsPtr, 1941 getPointerAlign(), "statemutations"); 1942 1943 llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated"); 1944 llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated"); 1945 1946 Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations), 1947 WasNotMutatedBB, WasMutatedBB); 1948 1949 // If so, call the enumeration-mutation function. 1950 EmitBlock(WasMutatedBB); 1951 llvm::Type *ObjCIdType = ConvertType(getContext().getObjCIdType()); 1952 llvm::Value *V = 1953 Builder.CreateBitCast(Collection, ObjCIdType); 1954 CallArgList Args2; 1955 Args2.add(RValue::get(V), getContext().getObjCIdType()); 1956 // FIXME: We shouldn't need to get the function info here, the runtime already 1957 // should have computed it to build the function. 1958 EmitCall( 1959 CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2), 1960 EnumerationMutationFn, ReturnValueSlot(), Args2); 1961 1962 // Otherwise, or if the mutation function returns, just continue. 1963 EmitBlock(WasNotMutatedBB); 1964 1965 // Initialize the element variable. 1966 RunCleanupsScope elementVariableScope(*this); 1967 bool elementIsVariable; 1968 LValue elementLValue; 1969 QualType elementType; 1970 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) { 1971 // Initialize the variable, in case it's a __block variable or something. 1972 EmitAutoVarInit(variable); 1973 1974 const VarDecl *D = cast<VarDecl>(SD->getSingleDecl()); 1975 DeclRefExpr tempDRE(getContext(), const_cast<VarDecl *>(D), false, 1976 D->getType(), VK_LValue, SourceLocation()); 1977 elementLValue = EmitLValue(&tempDRE); 1978 elementType = D->getType(); 1979 elementIsVariable = true; 1980 1981 if (D->isARCPseudoStrong()) 1982 elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone); 1983 } else { 1984 elementLValue = LValue(); // suppress warning 1985 elementType = cast<Expr>(S.getElement())->getType(); 1986 elementIsVariable = false; 1987 } 1988 llvm::Type *convertedElementType = ConvertType(elementType); 1989 1990 // Fetch the buffer out of the enumeration state. 1991 // TODO: this pointer should actually be invariant between 1992 // refreshes, which would help us do certain loop optimizations. 1993 Address StateItemsPtr = 1994 Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr"); 1995 llvm::Value *EnumStateItems = 1996 Builder.CreateLoad(StateItemsPtr, "stateitems"); 1997 1998 // Fetch the value at the current index from the buffer. 1999 llvm::Value *CurrentItemPtr = Builder.CreateInBoundsGEP( 2000 ObjCIdType, EnumStateItems, index, "currentitem.ptr"); 2001 llvm::Value *CurrentItem = 2002 Builder.CreateAlignedLoad(ObjCIdType, CurrentItemPtr, getPointerAlign()); 2003 2004 if (SanOpts.has(SanitizerKind::ObjCCast)) { 2005 // Before using an item from the collection, check that the implicit cast 2006 // from id to the element type is valid. This is done with instrumentation 2007 // roughly corresponding to: 2008 // 2009 // if (![item isKindOfClass:expectedCls]) { /* emit diagnostic */ } 2010 const ObjCObjectPointerType *ObjPtrTy = 2011 elementType->getAsObjCInterfacePointerType(); 2012 const ObjCInterfaceType *InterfaceTy = 2013 ObjPtrTy ? ObjPtrTy->getInterfaceType() : nullptr; 2014 if (InterfaceTy) { 2015 auto CheckOrdinal = SanitizerKind::SO_ObjCCast; 2016 auto CheckHandler = SanitizerHandler::InvalidObjCCast; 2017 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler); 2018 auto &C = CGM.getContext(); 2019 assert(InterfaceTy->getDecl() && "No decl for ObjC interface type"); 2020 Selector IsKindOfClassSel = GetUnarySelector("isKindOfClass", C); 2021 CallArgList IsKindOfClassArgs; 2022 llvm::Value *Cls = 2023 CGM.getObjCRuntime().GetClass(*this, InterfaceTy->getDecl()); 2024 IsKindOfClassArgs.add(RValue::get(Cls), C.getObjCClassType()); 2025 llvm::Value *IsClass = 2026 CGM.getObjCRuntime() 2027 .GenerateMessageSend(*this, ReturnValueSlot(), C.BoolTy, 2028 IsKindOfClassSel, CurrentItem, 2029 IsKindOfClassArgs) 2030 .getScalarVal(); 2031 llvm::Constant *StaticData[] = { 2032 EmitCheckSourceLocation(S.getBeginLoc()), 2033 EmitCheckTypeDescriptor(QualType(InterfaceTy, 0))}; 2034 EmitCheck({{IsClass, CheckOrdinal}}, CheckHandler, 2035 ArrayRef<llvm::Constant *>(StaticData), CurrentItem); 2036 } 2037 } 2038 2039 // Cast that value to the right type. 2040 CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType, 2041 "currentitem"); 2042 2043 // Make sure we have an l-value. Yes, this gets evaluated every 2044 // time through the loop. 2045 if (!elementIsVariable) { 2046 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 2047 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue); 2048 } else { 2049 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, 2050 /*isInit*/ true); 2051 } 2052 2053 // If we do have an element variable, this assignment is the end of 2054 // its initialization. 2055 if (elementIsVariable) 2056 EmitAutoVarCleanups(variable); 2057 2058 // Perform the loop body, setting up break and continue labels. 2059 BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody)); 2060 { 2061 RunCleanupsScope Scope(*this); 2062 EmitStmt(S.getBody()); 2063 } 2064 BreakContinueStack.pop_back(); 2065 2066 // Destroy the element variable now. 2067 elementVariableScope.ForceCleanup(); 2068 2069 // Check whether there are more elements. 2070 EmitBlock(AfterBody.getBlock()); 2071 2072 llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch"); 2073 2074 // First we check in the local buffer. 2075 llvm::Value *indexPlusOne = 2076 Builder.CreateNUWAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1)); 2077 2078 // If we haven't overrun the buffer yet, we can continue. 2079 // Set the branch weights based on the simplifying assumption that this is 2080 // like a while-loop, i.e., ignoring that the false branch fetches more 2081 // elements and then returns to the loop. 2082 Builder.CreateCondBr( 2083 Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB, 2084 createProfileWeights(getProfileCount(S.getBody()), EntryCount)); 2085 2086 index->addIncoming(indexPlusOne, AfterBody.getBlock()); 2087 count->addIncoming(count, AfterBody.getBlock()); 2088 2089 // Otherwise, we have to fetch more elements. 2090 EmitBlock(FetchMoreBB); 2091 2092 CountRV = 2093 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 2094 getContext().getNSUIntegerType(), 2095 FastEnumSel, Collection, Args); 2096 2097 // If we got a zero count, we're done. 2098 llvm::Value *refetchCount = CountRV.getScalarVal(); 2099 2100 // (note that the message send might split FetchMoreBB) 2101 index->addIncoming(zero, Builder.GetInsertBlock()); 2102 count->addIncoming(refetchCount, Builder.GetInsertBlock()); 2103 2104 Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero), 2105 EmptyBB, LoopBodyBB); 2106 2107 // No more elements. 2108 EmitBlock(EmptyBB); 2109 2110 if (!elementIsVariable) { 2111 // If the element was not a declaration, set it to be null. 2112 2113 llvm::Value *null = llvm::Constant::getNullValue(convertedElementType); 2114 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 2115 EmitStoreThroughLValue(RValue::get(null), elementLValue); 2116 } 2117 2118 if (DI) 2119 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); 2120 2121 ForScope.ForceCleanup(); 2122 EmitBlock(LoopEnd.getBlock()); 2123 } 2124 2125 void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) { 2126 CGM.getObjCRuntime().EmitTryStmt(*this, S); 2127 } 2128 2129 void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) { 2130 CGM.getObjCRuntime().EmitThrowStmt(*this, S); 2131 } 2132 2133 void CodeGenFunction::EmitObjCAtSynchronizedStmt( 2134 const ObjCAtSynchronizedStmt &S) { 2135 CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S); 2136 } 2137 2138 namespace { 2139 struct CallObjCRelease final : EHScopeStack::Cleanup { 2140 CallObjCRelease(llvm::Value *object) : object(object) {} 2141 llvm::Value *object; 2142 2143 void Emit(CodeGenFunction &CGF, Flags flags) override { 2144 // Releases at the end of the full-expression are imprecise. 2145 CGF.EmitARCRelease(object, ARCImpreciseLifetime); 2146 } 2147 }; 2148 } 2149 2150 /// Produce the code for a CK_ARCConsumeObject. Does a primitive 2151 /// release at the end of the full-expression. 2152 llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type, 2153 llvm::Value *object) { 2154 // If we're in a conditional branch, we need to make the cleanup 2155 // conditional. 2156 pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object); 2157 return object; 2158 } 2159 2160 llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type, 2161 llvm::Value *value) { 2162 return EmitARCRetainAutorelease(type, value); 2163 } 2164 2165 /// Given a number of pointers, inform the optimizer that they're 2166 /// being intrinsically used up until this point in the program. 2167 void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) { 2168 llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_use; 2169 if (!fn) 2170 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_use); 2171 2172 // This isn't really a "runtime" function, but as an intrinsic it 2173 // doesn't really matter as long as we align things up. 2174 EmitNounwindRuntimeCall(fn, values); 2175 } 2176 2177 /// Emit a call to "clang.arc.noop.use", which consumes the result of a call 2178 /// that has operand bundle "clang.arc.attachedcall". 2179 void CodeGenFunction::EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values) { 2180 llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_noop_use; 2181 if (!fn) 2182 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_noop_use); 2183 EmitNounwindRuntimeCall(fn, values); 2184 } 2185 2186 static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) { 2187 if (auto *F = dyn_cast<llvm::Function>(RTF)) { 2188 // If the target runtime doesn't naturally support ARC, emit weak 2189 // references to the runtime support library. We don't really 2190 // permit this to fail, but we need a particular relocation style. 2191 if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() && 2192 !CGM.getTriple().isOSBinFormatCOFF()) { 2193 F->setLinkage(llvm::Function::ExternalWeakLinkage); 2194 } 2195 } 2196 } 2197 2198 static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, 2199 llvm::FunctionCallee RTF) { 2200 setARCRuntimeFunctionLinkage(CGM, RTF.getCallee()); 2201 } 2202 2203 static llvm::Function *getARCIntrinsic(llvm::Intrinsic::ID IntID, 2204 CodeGenModule &CGM) { 2205 llvm::Function *fn = CGM.getIntrinsic(IntID); 2206 setARCRuntimeFunctionLinkage(CGM, fn); 2207 return fn; 2208 } 2209 2210 /// Perform an operation having the signature 2211 /// i8* (i8*) 2212 /// where a null input causes a no-op and returns null. 2213 static llvm::Value *emitARCValueOperation( 2214 CodeGenFunction &CGF, llvm::Value *value, llvm::Type *returnType, 2215 llvm::Function *&fn, llvm::Intrinsic::ID IntID, 2216 llvm::CallInst::TailCallKind tailKind = llvm::CallInst::TCK_None) { 2217 if (isa<llvm::ConstantPointerNull>(value)) 2218 return value; 2219 2220 if (!fn) 2221 fn = getARCIntrinsic(IntID, CGF.CGM); 2222 2223 // Cast the argument to 'id'. 2224 llvm::Type *origType = returnType ? returnType : value->getType(); 2225 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); 2226 2227 // Call the function. 2228 llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value); 2229 call->setTailCallKind(tailKind); 2230 2231 // Cast the result back to the original type. 2232 return CGF.Builder.CreateBitCast(call, origType); 2233 } 2234 2235 /// Perform an operation having the following signature: 2236 /// i8* (i8**) 2237 static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr, 2238 llvm::Function *&fn, 2239 llvm::Intrinsic::ID IntID) { 2240 if (!fn) 2241 fn = getARCIntrinsic(IntID, CGF.CGM); 2242 2243 return CGF.EmitNounwindRuntimeCall(fn, addr.emitRawPointer(CGF)); 2244 } 2245 2246 /// Perform an operation having the following signature: 2247 /// i8* (i8**, i8*) 2248 static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr, 2249 llvm::Value *value, 2250 llvm::Function *&fn, 2251 llvm::Intrinsic::ID IntID, 2252 bool ignored) { 2253 assert(addr.getElementType() == value->getType()); 2254 2255 if (!fn) 2256 fn = getARCIntrinsic(IntID, CGF.CGM); 2257 2258 llvm::Type *origType = value->getType(); 2259 2260 llvm::Value *args[] = { 2261 CGF.Builder.CreateBitCast(addr.emitRawPointer(CGF), CGF.Int8PtrPtrTy), 2262 CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)}; 2263 llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args); 2264 2265 if (ignored) return nullptr; 2266 2267 return CGF.Builder.CreateBitCast(result, origType); 2268 } 2269 2270 /// Perform an operation having the following signature: 2271 /// void (i8**, i8**) 2272 static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src, 2273 llvm::Function *&fn, 2274 llvm::Intrinsic::ID IntID) { 2275 assert(dst.getType() == src.getType()); 2276 2277 if (!fn) 2278 fn = getARCIntrinsic(IntID, CGF.CGM); 2279 2280 llvm::Value *args[] = { 2281 CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), CGF.Int8PtrPtrTy), 2282 CGF.Builder.CreateBitCast(src.emitRawPointer(CGF), CGF.Int8PtrPtrTy)}; 2283 CGF.EmitNounwindRuntimeCall(fn, args); 2284 } 2285 2286 /// Perform an operation having the signature 2287 /// i8* (i8*) 2288 /// where a null input causes a no-op and returns null. 2289 static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF, 2290 llvm::Value *value, 2291 llvm::Type *returnType, 2292 llvm::FunctionCallee &fn, 2293 StringRef fnName) { 2294 if (isa<llvm::ConstantPointerNull>(value)) 2295 return value; 2296 2297 if (!fn) { 2298 llvm::FunctionType *fnType = 2299 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false); 2300 fn = CGF.CGM.CreateRuntimeFunction(fnType, fnName); 2301 2302 // We have Native ARC, so set nonlazybind attribute for performance 2303 if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) 2304 if (fnName == "objc_retain") 2305 f->addFnAttr(llvm::Attribute::NonLazyBind); 2306 } 2307 2308 // Cast the argument to 'id'. 2309 llvm::Type *origType = returnType ? returnType : value->getType(); 2310 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); 2311 2312 // Call the function. 2313 llvm::CallBase *Inst = CGF.EmitCallOrInvoke(fn, value); 2314 2315 // Mark calls to objc_autorelease as tail on the assumption that methods 2316 // overriding autorelease do not touch anything on the stack. 2317 if (fnName == "objc_autorelease") 2318 if (auto *Call = dyn_cast<llvm::CallInst>(Inst)) 2319 Call->setTailCall(); 2320 2321 // Cast the result back to the original type. 2322 return CGF.Builder.CreateBitCast(Inst, origType); 2323 } 2324 2325 /// Produce the code to do a retain. Based on the type, calls one of: 2326 /// call i8* \@objc_retain(i8* %value) 2327 /// call i8* \@objc_retainBlock(i8* %value) 2328 llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) { 2329 if (type->isBlockPointerType()) 2330 return EmitARCRetainBlock(value, /*mandatory*/ false); 2331 else 2332 return EmitARCRetainNonBlock(value); 2333 } 2334 2335 /// Retain the given object, with normal retain semantics. 2336 /// call i8* \@objc_retain(i8* %value) 2337 llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) { 2338 return emitARCValueOperation(*this, value, nullptr, 2339 CGM.getObjCEntrypoints().objc_retain, 2340 llvm::Intrinsic::objc_retain); 2341 } 2342 2343 /// Retain the given block, with _Block_copy semantics. 2344 /// call i8* \@objc_retainBlock(i8* %value) 2345 /// 2346 /// \param mandatory - If false, emit the call with metadata 2347 /// indicating that it's okay for the optimizer to eliminate this call 2348 /// if it can prove that the block never escapes except down the stack. 2349 llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value, 2350 bool mandatory) { 2351 llvm::Value *result 2352 = emitARCValueOperation(*this, value, nullptr, 2353 CGM.getObjCEntrypoints().objc_retainBlock, 2354 llvm::Intrinsic::objc_retainBlock); 2355 2356 // If the copy isn't mandatory, add !clang.arc.copy_on_escape to 2357 // tell the optimizer that it doesn't need to do this copy if the 2358 // block doesn't escape, where being passed as an argument doesn't 2359 // count as escaping. 2360 if (!mandatory && isa<llvm::Instruction>(result)) { 2361 llvm::CallInst *call 2362 = cast<llvm::CallInst>(result->stripPointerCasts()); 2363 assert(call->getCalledOperand() == 2364 CGM.getObjCEntrypoints().objc_retainBlock); 2365 2366 call->setMetadata("clang.arc.copy_on_escape", 2367 llvm::MDNode::get(Builder.getContext(), {})); 2368 } 2369 2370 return result; 2371 } 2372 2373 static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) { 2374 // Fetch the void(void) inline asm which marks that we're going to 2375 // do something with the autoreleased return value. 2376 llvm::InlineAsm *&marker 2377 = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker; 2378 if (!marker) { 2379 StringRef assembly 2380 = CGF.CGM.getTargetCodeGenInfo() 2381 .getARCRetainAutoreleasedReturnValueMarker(); 2382 2383 // If we have an empty assembly string, there's nothing to do. 2384 if (assembly.empty()) { 2385 2386 // Otherwise, at -O0, build an inline asm that we're going to call 2387 // in a moment. 2388 } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { 2389 llvm::FunctionType *type = 2390 llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false); 2391 2392 marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true); 2393 2394 // If we're at -O1 and above, we don't want to litter the code 2395 // with this marker yet, so leave a breadcrumb for the ARC 2396 // optimizer to pick up. 2397 } else { 2398 const char *retainRVMarkerKey = llvm::objcarc::getRVMarkerModuleFlagStr(); 2399 if (!CGF.CGM.getModule().getModuleFlag(retainRVMarkerKey)) { 2400 auto *str = llvm::MDString::get(CGF.getLLVMContext(), assembly); 2401 CGF.CGM.getModule().addModuleFlag(llvm::Module::Error, 2402 retainRVMarkerKey, str); 2403 } 2404 } 2405 } 2406 2407 // Call the marker asm if we made one, which we do only at -O0. 2408 if (marker) 2409 CGF.Builder.CreateCall(marker, {}, CGF.getBundlesForFunclet(marker)); 2410 } 2411 2412 static llvm::Value *emitOptimizedARCReturnCall(llvm::Value *value, 2413 bool IsRetainRV, 2414 CodeGenFunction &CGF) { 2415 emitAutoreleasedReturnValueMarker(CGF); 2416 2417 // Add operand bundle "clang.arc.attachedcall" to the call instead of emitting 2418 // retainRV or claimRV calls in the IR. We currently do this only when the 2419 // optimization level isn't -O0 since global-isel, which is currently run at 2420 // -O0, doesn't know about the operand bundle. 2421 ObjCEntrypoints &EPs = CGF.CGM.getObjCEntrypoints(); 2422 llvm::Function *&EP = IsRetainRV 2423 ? EPs.objc_retainAutoreleasedReturnValue 2424 : EPs.objc_unsafeClaimAutoreleasedReturnValue; 2425 llvm::Intrinsic::ID IID = 2426 IsRetainRV ? llvm::Intrinsic::objc_retainAutoreleasedReturnValue 2427 : llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue; 2428 EP = getARCIntrinsic(IID, CGF.CGM); 2429 2430 llvm::Triple::ArchType Arch = CGF.CGM.getTriple().getArch(); 2431 2432 // FIXME: Do this on all targets and at -O0 too. This can be enabled only if 2433 // the target backend knows how to handle the operand bundle. 2434 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0 && 2435 (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 || 2436 Arch == llvm::Triple::x86_64)) { 2437 llvm::Value *bundleArgs[] = {EP}; 2438 llvm::OperandBundleDef OB("clang.arc.attachedcall", bundleArgs); 2439 auto *oldCall = cast<llvm::CallBase>(value); 2440 llvm::CallBase *newCall = llvm::CallBase::addOperandBundle( 2441 oldCall, llvm::LLVMContext::OB_clang_arc_attachedcall, OB, 2442 oldCall->getIterator()); 2443 newCall->copyMetadata(*oldCall); 2444 oldCall->replaceAllUsesWith(newCall); 2445 oldCall->eraseFromParent(); 2446 CGF.EmitARCNoopIntrinsicUse(newCall); 2447 return newCall; 2448 } 2449 2450 bool isNoTail = 2451 CGF.CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail(); 2452 llvm::CallInst::TailCallKind tailKind = 2453 isNoTail ? llvm::CallInst::TCK_NoTail : llvm::CallInst::TCK_None; 2454 return emitARCValueOperation(CGF, value, nullptr, EP, IID, tailKind); 2455 } 2456 2457 /// Retain the given object which is the result of a function call. 2458 /// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value) 2459 /// 2460 /// Yes, this function name is one character away from a different 2461 /// call with completely different semantics. 2462 llvm::Value * 2463 CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) { 2464 return emitOptimizedARCReturnCall(value, true, *this); 2465 } 2466 2467 /// Claim a possibly-autoreleased return value at +0. This is only 2468 /// valid to do in contexts which do not rely on the retain to keep 2469 /// the object valid for all of its uses; for example, when 2470 /// the value is ignored, or when it is being assigned to an 2471 /// __unsafe_unretained variable. 2472 /// 2473 /// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value) 2474 llvm::Value * 2475 CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) { 2476 return emitOptimizedARCReturnCall(value, false, *this); 2477 } 2478 2479 /// Release the given object. 2480 /// call void \@objc_release(i8* %value) 2481 void CodeGenFunction::EmitARCRelease(llvm::Value *value, 2482 ARCPreciseLifetime_t precise) { 2483 if (isa<llvm::ConstantPointerNull>(value)) return; 2484 2485 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_release; 2486 if (!fn) 2487 fn = getARCIntrinsic(llvm::Intrinsic::objc_release, CGM); 2488 2489 // Cast the argument to 'id'. 2490 value = Builder.CreateBitCast(value, Int8PtrTy); 2491 2492 // Call objc_release. 2493 llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value); 2494 2495 if (precise == ARCImpreciseLifetime) { 2496 call->setMetadata("clang.imprecise_release", 2497 llvm::MDNode::get(Builder.getContext(), {})); 2498 } 2499 } 2500 2501 /// Destroy a __strong variable. 2502 /// 2503 /// At -O0, emit a call to store 'null' into the address; 2504 /// instrumenting tools prefer this because the address is exposed, 2505 /// but it's relatively cumbersome to optimize. 2506 /// 2507 /// At -O1 and above, just load and call objc_release. 2508 /// 2509 /// call void \@objc_storeStrong(i8** %addr, i8* null) 2510 void CodeGenFunction::EmitARCDestroyStrong(Address addr, 2511 ARCPreciseLifetime_t precise) { 2512 if (CGM.getCodeGenOpts().OptimizationLevel == 0) { 2513 llvm::Value *null = getNullForVariable(addr); 2514 EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 2515 return; 2516 } 2517 2518 llvm::Value *value = Builder.CreateLoad(addr); 2519 EmitARCRelease(value, precise); 2520 } 2521 2522 /// Store into a strong object. Always calls this: 2523 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2524 llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr, 2525 llvm::Value *value, 2526 bool ignored) { 2527 assert(addr.getElementType() == value->getType()); 2528 2529 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_storeStrong; 2530 if (!fn) 2531 fn = getARCIntrinsic(llvm::Intrinsic::objc_storeStrong, CGM); 2532 2533 llvm::Value *args[] = { 2534 Builder.CreateBitCast(addr.emitRawPointer(*this), Int8PtrPtrTy), 2535 Builder.CreateBitCast(value, Int8PtrTy)}; 2536 EmitNounwindRuntimeCall(fn, args); 2537 2538 if (ignored) return nullptr; 2539 return value; 2540 } 2541 2542 /// Store into a strong object. Sometimes calls this: 2543 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2544 /// Other times, breaks it down into components. 2545 llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, 2546 llvm::Value *newValue, 2547 bool ignored) { 2548 QualType type = dst.getType(); 2549 bool isBlock = type->isBlockPointerType(); 2550 2551 // Use a store barrier at -O0 unless this is a block type or the 2552 // lvalue is inadequately aligned. 2553 if (shouldUseFusedARCCalls() && 2554 !isBlock && 2555 (dst.getAlignment().isZero() || 2556 dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) { 2557 return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored); 2558 } 2559 2560 // Otherwise, split it out. 2561 2562 // Retain the new value. 2563 newValue = EmitARCRetain(type, newValue); 2564 2565 // Read the old value. 2566 llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation()); 2567 2568 // Store. We do this before the release so that any deallocs won't 2569 // see the old value. 2570 EmitStoreOfScalar(newValue, dst); 2571 2572 // Finally, release the old value. 2573 EmitARCRelease(oldValue, dst.isARCPreciseLifetime()); 2574 2575 return newValue; 2576 } 2577 2578 /// Autorelease the given object. 2579 /// call i8* \@objc_autorelease(i8* %value) 2580 llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) { 2581 return emitARCValueOperation(*this, value, nullptr, 2582 CGM.getObjCEntrypoints().objc_autorelease, 2583 llvm::Intrinsic::objc_autorelease); 2584 } 2585 2586 /// Autorelease the given object. 2587 /// call i8* \@objc_autoreleaseReturnValue(i8* %value) 2588 llvm::Value * 2589 CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) { 2590 return emitARCValueOperation(*this, value, nullptr, 2591 CGM.getObjCEntrypoints().objc_autoreleaseReturnValue, 2592 llvm::Intrinsic::objc_autoreleaseReturnValue, 2593 llvm::CallInst::TCK_Tail); 2594 } 2595 2596 /// Do a fused retain/autorelease of the given object. 2597 /// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value) 2598 llvm::Value * 2599 CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) { 2600 return emitARCValueOperation(*this, value, nullptr, 2601 CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue, 2602 llvm::Intrinsic::objc_retainAutoreleaseReturnValue, 2603 llvm::CallInst::TCK_Tail); 2604 } 2605 2606 /// Do a fused retain/autorelease of the given object. 2607 /// call i8* \@objc_retainAutorelease(i8* %value) 2608 /// or 2609 /// %retain = call i8* \@objc_retainBlock(i8* %value) 2610 /// call i8* \@objc_autorelease(i8* %retain) 2611 llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type, 2612 llvm::Value *value) { 2613 if (!type->isBlockPointerType()) 2614 return EmitARCRetainAutoreleaseNonBlock(value); 2615 2616 if (isa<llvm::ConstantPointerNull>(value)) return value; 2617 2618 llvm::Type *origType = value->getType(); 2619 value = Builder.CreateBitCast(value, Int8PtrTy); 2620 value = EmitARCRetainBlock(value, /*mandatory*/ true); 2621 value = EmitARCAutorelease(value); 2622 return Builder.CreateBitCast(value, origType); 2623 } 2624 2625 /// Do a fused retain/autorelease of the given object. 2626 /// call i8* \@objc_retainAutorelease(i8* %value) 2627 llvm::Value * 2628 CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) { 2629 return emitARCValueOperation(*this, value, nullptr, 2630 CGM.getObjCEntrypoints().objc_retainAutorelease, 2631 llvm::Intrinsic::objc_retainAutorelease); 2632 } 2633 2634 /// i8* \@objc_loadWeak(i8** %addr) 2635 /// Essentially objc_autorelease(objc_loadWeakRetained(addr)). 2636 llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) { 2637 return emitARCLoadOperation(*this, addr, 2638 CGM.getObjCEntrypoints().objc_loadWeak, 2639 llvm::Intrinsic::objc_loadWeak); 2640 } 2641 2642 /// i8* \@objc_loadWeakRetained(i8** %addr) 2643 llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) { 2644 return emitARCLoadOperation(*this, addr, 2645 CGM.getObjCEntrypoints().objc_loadWeakRetained, 2646 llvm::Intrinsic::objc_loadWeakRetained); 2647 } 2648 2649 /// i8* \@objc_storeWeak(i8** %addr, i8* %value) 2650 /// Returns %value. 2651 llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr, 2652 llvm::Value *value, 2653 bool ignored) { 2654 return emitARCStoreOperation(*this, addr, value, 2655 CGM.getObjCEntrypoints().objc_storeWeak, 2656 llvm::Intrinsic::objc_storeWeak, ignored); 2657 } 2658 2659 /// i8* \@objc_initWeak(i8** %addr, i8* %value) 2660 /// Returns %value. %addr is known to not have a current weak entry. 2661 /// Essentially equivalent to: 2662 /// *addr = nil; objc_storeWeak(addr, value); 2663 void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) { 2664 // If we're initializing to null, just write null to memory; no need 2665 // to get the runtime involved. But don't do this if optimization 2666 // is enabled, because accounting for this would make the optimizer 2667 // much more complicated. 2668 if (isa<llvm::ConstantPointerNull>(value) && 2669 CGM.getCodeGenOpts().OptimizationLevel == 0) { 2670 Builder.CreateStore(value, addr); 2671 return; 2672 } 2673 2674 emitARCStoreOperation(*this, addr, value, 2675 CGM.getObjCEntrypoints().objc_initWeak, 2676 llvm::Intrinsic::objc_initWeak, /*ignored*/ true); 2677 } 2678 2679 /// void \@objc_destroyWeak(i8** %addr) 2680 /// Essentially objc_storeWeak(addr, nil). 2681 void CodeGenFunction::EmitARCDestroyWeak(Address addr) { 2682 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_destroyWeak; 2683 if (!fn) 2684 fn = getARCIntrinsic(llvm::Intrinsic::objc_destroyWeak, CGM); 2685 2686 EmitNounwindRuntimeCall(fn, addr.emitRawPointer(*this)); 2687 } 2688 2689 /// void \@objc_moveWeak(i8** %dest, i8** %src) 2690 /// Disregards the current value in %dest. Leaves %src pointing to nothing. 2691 /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)). 2692 void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) { 2693 emitARCCopyOperation(*this, dst, src, 2694 CGM.getObjCEntrypoints().objc_moveWeak, 2695 llvm::Intrinsic::objc_moveWeak); 2696 } 2697 2698 /// void \@objc_copyWeak(i8** %dest, i8** %src) 2699 /// Disregards the current value in %dest. Essentially 2700 /// objc_release(objc_initWeak(dest, objc_readWeakRetained(src))) 2701 void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) { 2702 emitARCCopyOperation(*this, dst, src, 2703 CGM.getObjCEntrypoints().objc_copyWeak, 2704 llvm::Intrinsic::objc_copyWeak); 2705 } 2706 2707 void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr, 2708 Address SrcAddr) { 2709 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); 2710 Object = EmitObjCConsumeObject(Ty, Object); 2711 EmitARCStoreWeak(DstAddr, Object, false); 2712 } 2713 2714 void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr, 2715 Address SrcAddr) { 2716 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); 2717 Object = EmitObjCConsumeObject(Ty, Object); 2718 EmitARCStoreWeak(DstAddr, Object, false); 2719 EmitARCDestroyWeak(SrcAddr); 2720 } 2721 2722 /// Produce the code to do a objc_autoreleasepool_push. 2723 /// call i8* \@objc_autoreleasePoolPush(void) 2724 llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() { 2725 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush; 2726 if (!fn) 2727 fn = getARCIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush, CGM); 2728 2729 return EmitNounwindRuntimeCall(fn); 2730 } 2731 2732 /// Produce the code to do a primitive release. 2733 /// call void \@objc_autoreleasePoolPop(i8* %ptr) 2734 void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) { 2735 assert(value->getType() == Int8PtrTy); 2736 2737 if (getInvokeDest()) { 2738 // Call the runtime method not the intrinsic if we are handling exceptions 2739 llvm::FunctionCallee &fn = 2740 CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke; 2741 if (!fn) { 2742 llvm::FunctionType *fnType = 2743 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2744 fn = CGM.CreateRuntimeFunction(fnType, "objc_autoreleasePoolPop"); 2745 setARCRuntimeFunctionLinkage(CGM, fn); 2746 } 2747 2748 // objc_autoreleasePoolPop can throw. 2749 EmitRuntimeCallOrInvoke(fn, value); 2750 } else { 2751 llvm::FunctionCallee &fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop; 2752 if (!fn) 2753 fn = getARCIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop, CGM); 2754 2755 EmitRuntimeCall(fn, value); 2756 } 2757 } 2758 2759 /// Produce the code to do an MRR version objc_autoreleasepool_push. 2760 /// Which is: [[NSAutoreleasePool alloc] init]; 2761 /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class. 2762 /// init is declared as: - (id) init; in its NSObject super class. 2763 /// 2764 llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() { 2765 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 2766 llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this); 2767 // [NSAutoreleasePool alloc] 2768 const IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); 2769 Selector AllocSel = getContext().Selectors.getSelector(0, &II); 2770 CallArgList Args; 2771 RValue AllocRV = 2772 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2773 getContext().getObjCIdType(), 2774 AllocSel, Receiver, Args); 2775 2776 // [Receiver init] 2777 Receiver = AllocRV.getScalarVal(); 2778 II = &CGM.getContext().Idents.get("init"); 2779 Selector InitSel = getContext().Selectors.getSelector(0, &II); 2780 RValue InitRV = 2781 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2782 getContext().getObjCIdType(), 2783 InitSel, Receiver, Args); 2784 return InitRV.getScalarVal(); 2785 } 2786 2787 /// Allocate the given objc object. 2788 /// call i8* \@objc_alloc(i8* %value) 2789 llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value, 2790 llvm::Type *resultType) { 2791 return emitObjCValueOperation(*this, value, resultType, 2792 CGM.getObjCEntrypoints().objc_alloc, 2793 "objc_alloc"); 2794 } 2795 2796 /// Allocate the given objc object. 2797 /// call i8* \@objc_allocWithZone(i8* %value) 2798 llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value, 2799 llvm::Type *resultType) { 2800 return emitObjCValueOperation(*this, value, resultType, 2801 CGM.getObjCEntrypoints().objc_allocWithZone, 2802 "objc_allocWithZone"); 2803 } 2804 2805 llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value, 2806 llvm::Type *resultType) { 2807 return emitObjCValueOperation(*this, value, resultType, 2808 CGM.getObjCEntrypoints().objc_alloc_init, 2809 "objc_alloc_init"); 2810 } 2811 2812 /// Produce the code to do a primitive release. 2813 /// [tmp drain]; 2814 void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { 2815 const IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); 2816 Selector DrainSel = getContext().Selectors.getSelector(0, &II); 2817 CallArgList Args; 2818 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 2819 getContext().VoidTy, DrainSel, Arg, Args); 2820 } 2821 2822 void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF, 2823 Address addr, 2824 QualType type) { 2825 CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime); 2826 } 2827 2828 void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF, 2829 Address addr, 2830 QualType type) { 2831 CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime); 2832 } 2833 2834 void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF, 2835 Address addr, 2836 QualType type) { 2837 CGF.EmitARCDestroyWeak(addr); 2838 } 2839 2840 void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr, 2841 QualType type) { 2842 llvm::Value *value = CGF.Builder.CreateLoad(addr); 2843 CGF.EmitARCIntrinsicUse(value); 2844 } 2845 2846 /// Autorelease the given object. 2847 /// call i8* \@objc_autorelease(i8* %value) 2848 llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value, 2849 llvm::Type *returnType) { 2850 return emitObjCValueOperation( 2851 *this, value, returnType, 2852 CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction, 2853 "objc_autorelease"); 2854 } 2855 2856 /// Retain the given object, with normal retain semantics. 2857 /// call i8* \@objc_retain(i8* %value) 2858 llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value, 2859 llvm::Type *returnType) { 2860 return emitObjCValueOperation( 2861 *this, value, returnType, 2862 CGM.getObjCEntrypoints().objc_retainRuntimeFunction, "objc_retain"); 2863 } 2864 2865 /// Release the given object. 2866 /// call void \@objc_release(i8* %value) 2867 void CodeGenFunction::EmitObjCRelease(llvm::Value *value, 2868 ARCPreciseLifetime_t precise) { 2869 if (isa<llvm::ConstantPointerNull>(value)) return; 2870 2871 llvm::FunctionCallee &fn = 2872 CGM.getObjCEntrypoints().objc_releaseRuntimeFunction; 2873 if (!fn) { 2874 llvm::FunctionType *fnType = 2875 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2876 fn = CGM.CreateRuntimeFunction(fnType, "objc_release"); 2877 setARCRuntimeFunctionLinkage(CGM, fn); 2878 // We have Native ARC, so set nonlazybind attribute for performance 2879 if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) 2880 f->addFnAttr(llvm::Attribute::NonLazyBind); 2881 } 2882 2883 // Cast the argument to 'id'. 2884 value = Builder.CreateBitCast(value, Int8PtrTy); 2885 2886 // Call objc_release. 2887 llvm::CallBase *call = EmitCallOrInvoke(fn, value); 2888 2889 if (precise == ARCImpreciseLifetime) { 2890 call->setMetadata("clang.imprecise_release", 2891 llvm::MDNode::get(Builder.getContext(), {})); 2892 } 2893 } 2894 2895 namespace { 2896 struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup { 2897 llvm::Value *Token; 2898 2899 CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2900 2901 void Emit(CodeGenFunction &CGF, Flags flags) override { 2902 CGF.EmitObjCAutoreleasePoolPop(Token); 2903 } 2904 }; 2905 struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup { 2906 llvm::Value *Token; 2907 2908 CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2909 2910 void Emit(CodeGenFunction &CGF, Flags flags) override { 2911 CGF.EmitObjCMRRAutoreleasePoolPop(Token); 2912 } 2913 }; 2914 } 2915 2916 void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) { 2917 if (CGM.getLangOpts().ObjCAutoRefCount) 2918 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr); 2919 else 2920 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr); 2921 } 2922 2923 static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) { 2924 switch (lifetime) { 2925 case Qualifiers::OCL_None: 2926 case Qualifiers::OCL_ExplicitNone: 2927 case Qualifiers::OCL_Strong: 2928 case Qualifiers::OCL_Autoreleasing: 2929 return true; 2930 2931 case Qualifiers::OCL_Weak: 2932 return false; 2933 } 2934 2935 llvm_unreachable("impossible lifetime!"); 2936 } 2937 2938 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2939 LValue lvalue, 2940 QualType type) { 2941 llvm::Value *result; 2942 bool shouldRetain = shouldRetainObjCLifetime(type.getObjCLifetime()); 2943 if (shouldRetain) { 2944 result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal(); 2945 } else { 2946 assert(type.getObjCLifetime() == Qualifiers::OCL_Weak); 2947 result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress()); 2948 } 2949 return TryEmitResult(result, !shouldRetain); 2950 } 2951 2952 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2953 const Expr *e) { 2954 e = e->IgnoreParens(); 2955 QualType type = e->getType(); 2956 2957 // If we're loading retained from a __strong xvalue, we can avoid 2958 // an extra retain/release pair by zeroing out the source of this 2959 // "move" operation. 2960 if (e->isXValue() && 2961 !type.isConstQualified() && 2962 type.getObjCLifetime() == Qualifiers::OCL_Strong) { 2963 // Emit the lvalue. 2964 LValue lv = CGF.EmitLValue(e); 2965 2966 // Load the object pointer. 2967 llvm::Value *result = CGF.EmitLoadOfLValue(lv, 2968 SourceLocation()).getScalarVal(); 2969 2970 // Set the source pointer to NULL. 2971 CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv); 2972 2973 return TryEmitResult(result, true); 2974 } 2975 2976 // As a very special optimization, in ARC++, if the l-value is the 2977 // result of a non-volatile assignment, do a simple retain of the 2978 // result of the call to objc_storeWeak instead of reloading. 2979 if (CGF.getLangOpts().CPlusPlus && 2980 !type.isVolatileQualified() && 2981 type.getObjCLifetime() == Qualifiers::OCL_Weak && 2982 isa<BinaryOperator>(e) && 2983 cast<BinaryOperator>(e)->getOpcode() == BO_Assign) 2984 return TryEmitResult(CGF.EmitScalarExpr(e), false); 2985 2986 // Try to emit code for scalar constant instead of emitting LValue and 2987 // loading it because we are not guaranteed to have an l-value. One of such 2988 // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable. 2989 if (const auto *decl_expr = dyn_cast<DeclRefExpr>(e)) { 2990 auto *DRE = const_cast<DeclRefExpr *>(decl_expr); 2991 if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(DRE)) 2992 return TryEmitResult(CGF.emitScalarConstant(constant, DRE), 2993 !shouldRetainObjCLifetime(type.getObjCLifetime())); 2994 } 2995 2996 return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type); 2997 } 2998 2999 typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, 3000 llvm::Value *value)> 3001 ValueTransform; 3002 3003 /// Insert code immediately after a call. 3004 3005 // FIXME: We should find a way to emit the runtime call immediately 3006 // after the call is emitted to eliminate the need for this function. 3007 static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF, 3008 llvm::Value *value, 3009 ValueTransform doAfterCall, 3010 ValueTransform doFallback) { 3011 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); 3012 auto *callBase = dyn_cast<llvm::CallBase>(value); 3013 3014 if (callBase && llvm::objcarc::hasAttachedCallOpBundle(callBase)) { 3015 // Fall back if the call base has operand bundle "clang.arc.attachedcall". 3016 value = doFallback(CGF, value); 3017 } else if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) { 3018 // Place the retain immediately following the call. 3019 CGF.Builder.SetInsertPoint(call->getParent(), 3020 ++llvm::BasicBlock::iterator(call)); 3021 value = doAfterCall(CGF, value); 3022 } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) { 3023 // Place the retain at the beginning of the normal destination block. 3024 llvm::BasicBlock *BB = invoke->getNormalDest(); 3025 CGF.Builder.SetInsertPoint(BB, BB->begin()); 3026 value = doAfterCall(CGF, value); 3027 3028 // Bitcasts can arise because of related-result returns. Rewrite 3029 // the operand. 3030 } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) { 3031 // Change the insert point to avoid emitting the fall-back call after the 3032 // bitcast. 3033 CGF.Builder.SetInsertPoint(bitcast->getParent(), bitcast->getIterator()); 3034 llvm::Value *operand = bitcast->getOperand(0); 3035 operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback); 3036 bitcast->setOperand(0, operand); 3037 value = bitcast; 3038 } else { 3039 auto *phi = dyn_cast<llvm::PHINode>(value); 3040 if (phi && phi->getNumIncomingValues() == 2 && 3041 isa<llvm::ConstantPointerNull>(phi->getIncomingValue(1)) && 3042 isa<llvm::CallBase>(phi->getIncomingValue(0))) { 3043 // Handle phi instructions that are generated when it's necessary to check 3044 // whether the receiver of a message is null. 3045 llvm::Value *inVal = phi->getIncomingValue(0); 3046 inVal = emitARCOperationAfterCall(CGF, inVal, doAfterCall, doFallback); 3047 phi->setIncomingValue(0, inVal); 3048 value = phi; 3049 } else { 3050 // Generic fall-back case. 3051 // Retain using the non-block variant: we never need to do a copy 3052 // of a block that's been returned to us. 3053 value = doFallback(CGF, value); 3054 } 3055 } 3056 3057 CGF.Builder.restoreIP(ip); 3058 return value; 3059 } 3060 3061 /// Given that the given expression is some sort of call (which does 3062 /// not return retained), emit a retain following it. 3063 static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF, 3064 const Expr *e) { 3065 llvm::Value *value = CGF.EmitScalarExpr(e); 3066 return emitARCOperationAfterCall(CGF, value, 3067 [](CodeGenFunction &CGF, llvm::Value *value) { 3068 return CGF.EmitARCRetainAutoreleasedReturnValue(value); 3069 }, 3070 [](CodeGenFunction &CGF, llvm::Value *value) { 3071 return CGF.EmitARCRetainNonBlock(value); 3072 }); 3073 } 3074 3075 /// Given that the given expression is some sort of call (which does 3076 /// not return retained), perform an unsafeClaim following it. 3077 static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF, 3078 const Expr *e) { 3079 llvm::Value *value = CGF.EmitScalarExpr(e); 3080 return emitARCOperationAfterCall(CGF, value, 3081 [](CodeGenFunction &CGF, llvm::Value *value) { 3082 return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value); 3083 }, 3084 [](CodeGenFunction &CGF, llvm::Value *value) { 3085 return value; 3086 }); 3087 } 3088 3089 llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E, 3090 bool allowUnsafeClaim) { 3091 if (allowUnsafeClaim && 3092 CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) { 3093 return emitARCUnsafeClaimCallResult(*this, E); 3094 } else { 3095 llvm::Value *value = emitARCRetainCallResult(*this, E); 3096 return EmitObjCConsumeObject(E->getType(), value); 3097 } 3098 } 3099 3100 /// Determine whether it might be important to emit a separate 3101 /// objc_retain_block on the result of the given expression, or 3102 /// whether it's okay to just emit it in a +1 context. 3103 static bool shouldEmitSeparateBlockRetain(const Expr *e) { 3104 assert(e->getType()->isBlockPointerType()); 3105 e = e->IgnoreParens(); 3106 3107 // For future goodness, emit block expressions directly in +1 3108 // contexts if we can. 3109 if (isa<BlockExpr>(e)) 3110 return false; 3111 3112 if (const CastExpr *cast = dyn_cast<CastExpr>(e)) { 3113 switch (cast->getCastKind()) { 3114 // Emitting these operations in +1 contexts is goodness. 3115 case CK_LValueToRValue: 3116 case CK_ARCReclaimReturnedObject: 3117 case CK_ARCConsumeObject: 3118 case CK_ARCProduceObject: 3119 return false; 3120 3121 // These operations preserve a block type. 3122 case CK_NoOp: 3123 case CK_BitCast: 3124 return shouldEmitSeparateBlockRetain(cast->getSubExpr()); 3125 3126 // These operations are known to be bad (or haven't been considered). 3127 case CK_AnyPointerToBlockPointerCast: 3128 default: 3129 return true; 3130 } 3131 } 3132 3133 return true; 3134 } 3135 3136 namespace { 3137 /// A CRTP base class for emitting expressions of retainable object 3138 /// pointer type in ARC. 3139 template <typename Impl, typename Result> class ARCExprEmitter { 3140 protected: 3141 CodeGenFunction &CGF; 3142 Impl &asImpl() { return *static_cast<Impl*>(this); } 3143 3144 ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {} 3145 3146 public: 3147 Result visit(const Expr *e); 3148 Result visitCastExpr(const CastExpr *e); 3149 Result visitPseudoObjectExpr(const PseudoObjectExpr *e); 3150 Result visitBlockExpr(const BlockExpr *e); 3151 Result visitBinaryOperator(const BinaryOperator *e); 3152 Result visitBinAssign(const BinaryOperator *e); 3153 Result visitBinAssignUnsafeUnretained(const BinaryOperator *e); 3154 Result visitBinAssignAutoreleasing(const BinaryOperator *e); 3155 Result visitBinAssignWeak(const BinaryOperator *e); 3156 Result visitBinAssignStrong(const BinaryOperator *e); 3157 3158 // Minimal implementation: 3159 // Result visitLValueToRValue(const Expr *e) 3160 // Result visitConsumeObject(const Expr *e) 3161 // Result visitExtendBlockObject(const Expr *e) 3162 // Result visitReclaimReturnedObject(const Expr *e) 3163 // Result visitCall(const Expr *e) 3164 // Result visitExpr(const Expr *e) 3165 // 3166 // Result emitBitCast(Result result, llvm::Type *resultType) 3167 // llvm::Value *getValueOfResult(Result result) 3168 }; 3169 } 3170 3171 /// Try to emit a PseudoObjectExpr under special ARC rules. 3172 /// 3173 /// This massively duplicates emitPseudoObjectRValue. 3174 template <typename Impl, typename Result> 3175 Result 3176 ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) { 3177 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 3178 3179 // Find the result expression. 3180 const Expr *resultExpr = E->getResultExpr(); 3181 assert(resultExpr); 3182 Result result; 3183 3184 for (PseudoObjectExpr::const_semantics_iterator 3185 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 3186 const Expr *semantic = *i; 3187 3188 // If this semantic expression is an opaque value, bind it 3189 // to the result of its source expression. 3190 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 3191 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 3192 OVMA opaqueData; 3193 3194 // If this semantic is the result of the pseudo-object 3195 // expression, try to evaluate the source as +1. 3196 if (ov == resultExpr) { 3197 assert(!OVMA::shouldBindAsLValue(ov)); 3198 result = asImpl().visit(ov->getSourceExpr()); 3199 opaqueData = OVMA::bind(CGF, ov, 3200 RValue::get(asImpl().getValueOfResult(result))); 3201 3202 // Otherwise, just bind it. 3203 } else { 3204 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 3205 } 3206 opaques.push_back(opaqueData); 3207 3208 // Otherwise, if the expression is the result, evaluate it 3209 // and remember the result. 3210 } else if (semantic == resultExpr) { 3211 result = asImpl().visit(semantic); 3212 3213 // Otherwise, evaluate the expression in an ignored context. 3214 } else { 3215 CGF.EmitIgnoredExpr(semantic); 3216 } 3217 } 3218 3219 // Unbind all the opaques now. 3220 for (CodeGenFunction::OpaqueValueMappingData &opaque : opaques) 3221 opaque.unbind(CGF); 3222 3223 return result; 3224 } 3225 3226 template <typename Impl, typename Result> 3227 Result ARCExprEmitter<Impl, Result>::visitBlockExpr(const BlockExpr *e) { 3228 // The default implementation just forwards the expression to visitExpr. 3229 return asImpl().visitExpr(e); 3230 } 3231 3232 template <typename Impl, typename Result> 3233 Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) { 3234 switch (e->getCastKind()) { 3235 3236 // No-op casts don't change the type, so we just ignore them. 3237 case CK_NoOp: 3238 return asImpl().visit(e->getSubExpr()); 3239 3240 // These casts can change the type. 3241 case CK_CPointerToObjCPointerCast: 3242 case CK_BlockPointerToObjCPointerCast: 3243 case CK_AnyPointerToBlockPointerCast: 3244 case CK_BitCast: { 3245 llvm::Type *resultType = CGF.ConvertType(e->getType()); 3246 assert(e->getSubExpr()->getType()->hasPointerRepresentation()); 3247 Result result = asImpl().visit(e->getSubExpr()); 3248 return asImpl().emitBitCast(result, resultType); 3249 } 3250 3251 // Handle some casts specially. 3252 case CK_LValueToRValue: 3253 return asImpl().visitLValueToRValue(e->getSubExpr()); 3254 case CK_ARCConsumeObject: 3255 return asImpl().visitConsumeObject(e->getSubExpr()); 3256 case CK_ARCExtendBlockObject: 3257 return asImpl().visitExtendBlockObject(e->getSubExpr()); 3258 case CK_ARCReclaimReturnedObject: 3259 return asImpl().visitReclaimReturnedObject(e->getSubExpr()); 3260 3261 // Otherwise, use the default logic. 3262 default: 3263 return asImpl().visitExpr(e); 3264 } 3265 } 3266 3267 template <typename Impl, typename Result> 3268 Result 3269 ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) { 3270 switch (e->getOpcode()) { 3271 case BO_Comma: 3272 CGF.EmitIgnoredExpr(e->getLHS()); 3273 CGF.EnsureInsertPoint(); 3274 return asImpl().visit(e->getRHS()); 3275 3276 case BO_Assign: 3277 return asImpl().visitBinAssign(e); 3278 3279 default: 3280 return asImpl().visitExpr(e); 3281 } 3282 } 3283 3284 template <typename Impl, typename Result> 3285 Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) { 3286 switch (e->getLHS()->getType().getObjCLifetime()) { 3287 case Qualifiers::OCL_ExplicitNone: 3288 return asImpl().visitBinAssignUnsafeUnretained(e); 3289 3290 case Qualifiers::OCL_Weak: 3291 return asImpl().visitBinAssignWeak(e); 3292 3293 case Qualifiers::OCL_Autoreleasing: 3294 return asImpl().visitBinAssignAutoreleasing(e); 3295 3296 case Qualifiers::OCL_Strong: 3297 return asImpl().visitBinAssignStrong(e); 3298 3299 case Qualifiers::OCL_None: 3300 return asImpl().visitExpr(e); 3301 } 3302 llvm_unreachable("bad ObjC ownership qualifier"); 3303 } 3304 3305 /// The default rule for __unsafe_unretained emits the RHS recursively, 3306 /// stores into the unsafe variable, and propagates the result outward. 3307 template <typename Impl, typename Result> 3308 Result ARCExprEmitter<Impl,Result>:: 3309 visitBinAssignUnsafeUnretained(const BinaryOperator *e) { 3310 // Recursively emit the RHS. 3311 // For __block safety, do this before emitting the LHS. 3312 Result result = asImpl().visit(e->getRHS()); 3313 3314 // Perform the store. 3315 LValue lvalue = 3316 CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store); 3317 CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)), 3318 lvalue); 3319 3320 return result; 3321 } 3322 3323 template <typename Impl, typename Result> 3324 Result 3325 ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) { 3326 return asImpl().visitExpr(e); 3327 } 3328 3329 template <typename Impl, typename Result> 3330 Result 3331 ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) { 3332 return asImpl().visitExpr(e); 3333 } 3334 3335 template <typename Impl, typename Result> 3336 Result 3337 ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) { 3338 return asImpl().visitExpr(e); 3339 } 3340 3341 /// The general expression-emission logic. 3342 template <typename Impl, typename Result> 3343 Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) { 3344 // We should *never* see a nested full-expression here, because if 3345 // we fail to emit at +1, our caller must not retain after we close 3346 // out the full-expression. This isn't as important in the unsafe 3347 // emitter. 3348 assert(!isa<ExprWithCleanups>(e)); 3349 3350 // Look through parens, __extension__, generic selection, etc. 3351 e = e->IgnoreParens(); 3352 3353 // Handle certain kinds of casts. 3354 if (const CastExpr *ce = dyn_cast<CastExpr>(e)) { 3355 return asImpl().visitCastExpr(ce); 3356 3357 // Handle the comma operator. 3358 } else if (auto op = dyn_cast<BinaryOperator>(e)) { 3359 return asImpl().visitBinaryOperator(op); 3360 3361 // TODO: handle conditional operators here 3362 3363 // For calls and message sends, use the retained-call logic. 3364 // Delegate inits are a special case in that they're the only 3365 // returns-retained expression that *isn't* surrounded by 3366 // a consume. 3367 } else if (isa<CallExpr>(e) || 3368 (isa<ObjCMessageExpr>(e) && 3369 !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) { 3370 return asImpl().visitCall(e); 3371 3372 // Look through pseudo-object expressions. 3373 } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 3374 return asImpl().visitPseudoObjectExpr(pseudo); 3375 } else if (auto *be = dyn_cast<BlockExpr>(e)) 3376 return asImpl().visitBlockExpr(be); 3377 3378 return asImpl().visitExpr(e); 3379 } 3380 3381 namespace { 3382 3383 /// An emitter for +1 results. 3384 struct ARCRetainExprEmitter : 3385 public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> { 3386 3387 ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 3388 3389 llvm::Value *getValueOfResult(TryEmitResult result) { 3390 return result.getPointer(); 3391 } 3392 3393 TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) { 3394 llvm::Value *value = result.getPointer(); 3395 value = CGF.Builder.CreateBitCast(value, resultType); 3396 result.setPointer(value); 3397 return result; 3398 } 3399 3400 TryEmitResult visitLValueToRValue(const Expr *e) { 3401 return tryEmitARCRetainLoadOfScalar(CGF, e); 3402 } 3403 3404 /// For consumptions, just emit the subexpression and thus elide 3405 /// the retain/release pair. 3406 TryEmitResult visitConsumeObject(const Expr *e) { 3407 llvm::Value *result = CGF.EmitScalarExpr(e); 3408 return TryEmitResult(result, true); 3409 } 3410 3411 TryEmitResult visitBlockExpr(const BlockExpr *e) { 3412 TryEmitResult result = visitExpr(e); 3413 // Avoid the block-retain if this is a block literal that doesn't need to be 3414 // copied to the heap. 3415 if (CGF.CGM.getCodeGenOpts().ObjCAvoidHeapifyLocalBlocks && 3416 e->getBlockDecl()->canAvoidCopyToHeap()) 3417 result.setInt(true); 3418 return result; 3419 } 3420 3421 /// Block extends are net +0. Naively, we could just recurse on 3422 /// the subexpression, but actually we need to ensure that the 3423 /// value is copied as a block, so there's a little filter here. 3424 TryEmitResult visitExtendBlockObject(const Expr *e) { 3425 llvm::Value *result; // will be a +0 value 3426 3427 // If we can't safely assume the sub-expression will produce a 3428 // block-copied value, emit the sub-expression at +0. 3429 if (shouldEmitSeparateBlockRetain(e)) { 3430 result = CGF.EmitScalarExpr(e); 3431 3432 // Otherwise, try to emit the sub-expression at +1 recursively. 3433 } else { 3434 TryEmitResult subresult = asImpl().visit(e); 3435 3436 // If that produced a retained value, just use that. 3437 if (subresult.getInt()) { 3438 return subresult; 3439 } 3440 3441 // Otherwise it's +0. 3442 result = subresult.getPointer(); 3443 } 3444 3445 // Retain the object as a block. 3446 result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true); 3447 return TryEmitResult(result, true); 3448 } 3449 3450 /// For reclaims, emit the subexpression as a retained call and 3451 /// skip the consumption. 3452 TryEmitResult visitReclaimReturnedObject(const Expr *e) { 3453 llvm::Value *result = emitARCRetainCallResult(CGF, e); 3454 return TryEmitResult(result, true); 3455 } 3456 3457 /// When we have an undecorated call, retroactively do a claim. 3458 TryEmitResult visitCall(const Expr *e) { 3459 llvm::Value *result = emitARCRetainCallResult(CGF, e); 3460 return TryEmitResult(result, true); 3461 } 3462 3463 // TODO: maybe special-case visitBinAssignWeak? 3464 3465 TryEmitResult visitExpr(const Expr *e) { 3466 // We didn't find an obvious production, so emit what we've got and 3467 // tell the caller that we didn't manage to retain. 3468 llvm::Value *result = CGF.EmitScalarExpr(e); 3469 return TryEmitResult(result, false); 3470 } 3471 }; 3472 } 3473 3474 static TryEmitResult 3475 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) { 3476 return ARCRetainExprEmitter(CGF).visit(e); 3477 } 3478 3479 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 3480 LValue lvalue, 3481 QualType type) { 3482 TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type); 3483 llvm::Value *value = result.getPointer(); 3484 if (!result.getInt()) 3485 value = CGF.EmitARCRetain(type, value); 3486 return value; 3487 } 3488 3489 /// EmitARCRetainScalarExpr - Semantically equivalent to 3490 /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a 3491 /// best-effort attempt to peephole expressions that naturally produce 3492 /// retained objects. 3493 llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) { 3494 // The retain needs to happen within the full-expression. 3495 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3496 RunCleanupsScope scope(*this); 3497 return EmitARCRetainScalarExpr(cleanups->getSubExpr()); 3498 } 3499 3500 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 3501 llvm::Value *value = result.getPointer(); 3502 if (!result.getInt()) 3503 value = EmitARCRetain(e->getType(), value); 3504 return value; 3505 } 3506 3507 llvm::Value * 3508 CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) { 3509 // The retain needs to happen within the full-expression. 3510 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3511 RunCleanupsScope scope(*this); 3512 return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr()); 3513 } 3514 3515 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 3516 llvm::Value *value = result.getPointer(); 3517 if (result.getInt()) 3518 value = EmitARCAutorelease(value); 3519 else 3520 value = EmitARCRetainAutorelease(e->getType(), value); 3521 return value; 3522 } 3523 3524 llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) { 3525 llvm::Value *result; 3526 bool doRetain; 3527 3528 if (shouldEmitSeparateBlockRetain(e)) { 3529 result = EmitScalarExpr(e); 3530 doRetain = true; 3531 } else { 3532 TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e); 3533 result = subresult.getPointer(); 3534 doRetain = !subresult.getInt(); 3535 } 3536 3537 if (doRetain) 3538 result = EmitARCRetainBlock(result, /*mandatory*/ true); 3539 return EmitObjCConsumeObject(e->getType(), result); 3540 } 3541 3542 llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) { 3543 // In ARC, retain and autorelease the expression. 3544 if (getLangOpts().ObjCAutoRefCount) { 3545 // Do so before running any cleanups for the full-expression. 3546 // EmitARCRetainAutoreleaseScalarExpr does this for us. 3547 return EmitARCRetainAutoreleaseScalarExpr(expr); 3548 } 3549 3550 // Otherwise, use the normal scalar-expression emission. The 3551 // exception machinery doesn't do anything special with the 3552 // exception like retaining it, so there's no safety associated with 3553 // only running cleanups after the throw has started, and when it 3554 // matters it tends to be substantially inferior code. 3555 return EmitScalarExpr(expr); 3556 } 3557 3558 namespace { 3559 3560 /// An emitter for assigning into an __unsafe_unretained context. 3561 struct ARCUnsafeUnretainedExprEmitter : 3562 public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> { 3563 3564 ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 3565 3566 llvm::Value *getValueOfResult(llvm::Value *value) { 3567 return value; 3568 } 3569 3570 llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) { 3571 return CGF.Builder.CreateBitCast(value, resultType); 3572 } 3573 3574 llvm::Value *visitLValueToRValue(const Expr *e) { 3575 return CGF.EmitScalarExpr(e); 3576 } 3577 3578 /// For consumptions, just emit the subexpression and perform the 3579 /// consumption like normal. 3580 llvm::Value *visitConsumeObject(const Expr *e) { 3581 llvm::Value *value = CGF.EmitScalarExpr(e); 3582 return CGF.EmitObjCConsumeObject(e->getType(), value); 3583 } 3584 3585 /// No special logic for block extensions. (This probably can't 3586 /// actually happen in this emitter, though.) 3587 llvm::Value *visitExtendBlockObject(const Expr *e) { 3588 return CGF.EmitARCExtendBlockObject(e); 3589 } 3590 3591 /// For reclaims, perform an unsafeClaim if that's enabled. 3592 llvm::Value *visitReclaimReturnedObject(const Expr *e) { 3593 return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true); 3594 } 3595 3596 /// When we have an undecorated call, just emit it without adding 3597 /// the unsafeClaim. 3598 llvm::Value *visitCall(const Expr *e) { 3599 return CGF.EmitScalarExpr(e); 3600 } 3601 3602 /// Just do normal scalar emission in the default case. 3603 llvm::Value *visitExpr(const Expr *e) { 3604 return CGF.EmitScalarExpr(e); 3605 } 3606 }; 3607 } 3608 3609 static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF, 3610 const Expr *e) { 3611 return ARCUnsafeUnretainedExprEmitter(CGF).visit(e); 3612 } 3613 3614 /// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to 3615 /// immediately releasing the resut of EmitARCRetainScalarExpr, but 3616 /// avoiding any spurious retains, including by performing reclaims 3617 /// with objc_unsafeClaimAutoreleasedReturnValue. 3618 llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) { 3619 // Look through full-expressions. 3620 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3621 RunCleanupsScope scope(*this); 3622 return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr()); 3623 } 3624 3625 return emitARCUnsafeUnretainedScalarExpr(*this, e); 3626 } 3627 3628 std::pair<LValue,llvm::Value*> 3629 CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e, 3630 bool ignored) { 3631 // Evaluate the RHS first. If we're ignoring the result, assume 3632 // that we can emit at an unsafe +0. 3633 llvm::Value *value; 3634 if (ignored) { 3635 value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS()); 3636 } else { 3637 value = EmitScalarExpr(e->getRHS()); 3638 } 3639 3640 // Emit the LHS and perform the store. 3641 LValue lvalue = EmitLValue(e->getLHS()); 3642 EmitStoreOfScalar(value, lvalue); 3643 3644 return std::pair<LValue,llvm::Value*>(std::move(lvalue), value); 3645 } 3646 3647 std::pair<LValue,llvm::Value*> 3648 CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e, 3649 bool ignored) { 3650 // Evaluate the RHS first. 3651 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS()); 3652 llvm::Value *value = result.getPointer(); 3653 3654 bool hasImmediateRetain = result.getInt(); 3655 3656 // If we didn't emit a retained object, and the l-value is of block 3657 // type, then we need to emit the block-retain immediately in case 3658 // it invalidates the l-value. 3659 if (!hasImmediateRetain && e->getType()->isBlockPointerType()) { 3660 value = EmitARCRetainBlock(value, /*mandatory*/ false); 3661 hasImmediateRetain = true; 3662 } 3663 3664 LValue lvalue = EmitLValue(e->getLHS()); 3665 3666 // If the RHS was emitted retained, expand this. 3667 if (hasImmediateRetain) { 3668 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation()); 3669 EmitStoreOfScalar(value, lvalue); 3670 EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime()); 3671 } else { 3672 value = EmitARCStoreStrong(lvalue, value, ignored); 3673 } 3674 3675 return std::pair<LValue,llvm::Value*>(lvalue, value); 3676 } 3677 3678 std::pair<LValue,llvm::Value*> 3679 CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) { 3680 llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS()); 3681 LValue lvalue = EmitLValue(e->getLHS()); 3682 3683 EmitStoreOfScalar(value, lvalue); 3684 3685 return std::pair<LValue,llvm::Value*>(lvalue, value); 3686 } 3687 3688 void CodeGenFunction::EmitObjCAutoreleasePoolStmt( 3689 const ObjCAutoreleasePoolStmt &ARPS) { 3690 const Stmt *subStmt = ARPS.getSubStmt(); 3691 const CompoundStmt &S = cast<CompoundStmt>(*subStmt); 3692 3693 CGDebugInfo *DI = getDebugInfo(); 3694 if (DI) 3695 DI->EmitLexicalBlockStart(Builder, S.getLBracLoc()); 3696 3697 // Keep track of the current cleanup stack depth. 3698 RunCleanupsScope Scope(*this); 3699 if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) { 3700 llvm::Value *token = EmitObjCAutoreleasePoolPush(); 3701 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token); 3702 } else { 3703 llvm::Value *token = EmitObjCMRRAutoreleasePoolPush(); 3704 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token); 3705 } 3706 3707 for (const auto *I : S.body()) 3708 EmitStmt(I); 3709 3710 if (DI) 3711 DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc()); 3712 } 3713 3714 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, 3715 /// make sure it survives garbage collection until this point. 3716 void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) { 3717 // We just use an inline assembly. 3718 llvm::FunctionType *extenderType 3719 = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All); 3720 llvm::InlineAsm *extender = llvm::InlineAsm::get(extenderType, 3721 /* assembly */ "", 3722 /* constraints */ "r", 3723 /* side effects */ true); 3724 3725 EmitNounwindRuntimeCall(extender, object); 3726 } 3727 3728 /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with 3729 /// non-trivial copy assignment function, produce following helper function. 3730 /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; } 3731 /// 3732 llvm::Constant * 3733 CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( 3734 const ObjCPropertyImplDecl *PID) { 3735 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3736 if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic))) 3737 return nullptr; 3738 3739 QualType Ty = PID->getPropertyIvarDecl()->getType(); 3740 ASTContext &C = getContext(); 3741 3742 if (Ty.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { 3743 // Call the move assignment operator instead of calling the copy assignment 3744 // operator and destructor. 3745 CharUnits Alignment = C.getTypeAlignInChars(Ty); 3746 llvm::Constant *Fn = getNonTrivialCStructMoveAssignmentOperator( 3747 CGM, Alignment, Alignment, Ty.isVolatileQualified(), Ty); 3748 return Fn; 3749 } 3750 3751 if (!getLangOpts().CPlusPlus || 3752 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3753 return nullptr; 3754 if (!Ty->isRecordType()) 3755 return nullptr; 3756 llvm::Constant *HelperFn = nullptr; 3757 if (hasTrivialSetExpr(PID)) 3758 return nullptr; 3759 assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null"); 3760 if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty))) 3761 return HelperFn; 3762 3763 const IdentifierInfo *II = 3764 &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); 3765 3766 QualType ReturnTy = C.VoidTy; 3767 QualType DestTy = C.getPointerType(Ty); 3768 QualType SrcTy = Ty; 3769 SrcTy.addConst(); 3770 SrcTy = C.getPointerType(SrcTy); 3771 3772 SmallVector<QualType, 2> ArgTys; 3773 ArgTys.push_back(DestTy); 3774 ArgTys.push_back(SrcTy); 3775 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); 3776 3777 FunctionDecl *FD = FunctionDecl::Create( 3778 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, 3779 FunctionTy, nullptr, SC_Static, false, false, false); 3780 3781 FunctionArgList args; 3782 ParmVarDecl *Params[2]; 3783 ParmVarDecl *DstDecl = ParmVarDecl::Create( 3784 C, FD, SourceLocation(), SourceLocation(), nullptr, DestTy, 3785 C.getTrivialTypeSourceInfo(DestTy, SourceLocation()), SC_None, 3786 /*DefArg=*/nullptr); 3787 args.push_back(Params[0] = DstDecl); 3788 ParmVarDecl *SrcDecl = ParmVarDecl::Create( 3789 C, FD, SourceLocation(), SourceLocation(), nullptr, SrcTy, 3790 C.getTrivialTypeSourceInfo(SrcTy, SourceLocation()), SC_None, 3791 /*DefArg=*/nullptr); 3792 args.push_back(Params[1] = SrcDecl); 3793 FD->setParams(Params); 3794 3795 const CGFunctionInfo &FI = 3796 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); 3797 3798 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3799 3800 llvm::Function *Fn = 3801 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 3802 "__assign_helper_atomic_property_", 3803 &CGM.getModule()); 3804 3805 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 3806 3807 StartFunction(FD, ReturnTy, Fn, FI, args); 3808 3809 DeclRefExpr DstExpr(C, DstDecl, false, DestTy, VK_PRValue, SourceLocation()); 3810 UnaryOperator *DST = UnaryOperator::Create( 3811 C, &DstExpr, UO_Deref, DestTy->getPointeeType(), VK_LValue, OK_Ordinary, 3812 SourceLocation(), false, FPOptionsOverride()); 3813 3814 DeclRefExpr SrcExpr(C, SrcDecl, false, SrcTy, VK_PRValue, SourceLocation()); 3815 UnaryOperator *SRC = UnaryOperator::Create( 3816 C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary, 3817 SourceLocation(), false, FPOptionsOverride()); 3818 3819 Expr *Args[2] = {DST, SRC}; 3820 CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment()); 3821 CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create( 3822 C, OO_Equal, CalleeExp->getCallee(), Args, DestTy->getPointeeType(), 3823 VK_LValue, SourceLocation(), FPOptionsOverride()); 3824 3825 EmitStmt(TheCall); 3826 3827 FinishFunction(); 3828 HelperFn = Fn; 3829 CGM.setAtomicSetterHelperFnMap(Ty, HelperFn); 3830 return HelperFn; 3831 } 3832 3833 llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( 3834 const ObjCPropertyImplDecl *PID) { 3835 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3836 if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic))) 3837 return nullptr; 3838 3839 QualType Ty = PD->getType(); 3840 ASTContext &C = getContext(); 3841 3842 if (Ty.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { 3843 CharUnits Alignment = C.getTypeAlignInChars(Ty); 3844 llvm::Constant *Fn = getNonTrivialCStructCopyConstructor( 3845 CGM, Alignment, Alignment, Ty.isVolatileQualified(), Ty); 3846 return Fn; 3847 } 3848 3849 if (!getLangOpts().CPlusPlus || 3850 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3851 return nullptr; 3852 if (!Ty->isRecordType()) 3853 return nullptr; 3854 llvm::Constant *HelperFn = nullptr; 3855 if (hasTrivialGetExpr(PID)) 3856 return nullptr; 3857 assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null"); 3858 if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty))) 3859 return HelperFn; 3860 3861 const IdentifierInfo *II = 3862 &CGM.getContext().Idents.get("__copy_helper_atomic_property_"); 3863 3864 QualType ReturnTy = C.VoidTy; 3865 QualType DestTy = C.getPointerType(Ty); 3866 QualType SrcTy = Ty; 3867 SrcTy.addConst(); 3868 SrcTy = C.getPointerType(SrcTy); 3869 3870 SmallVector<QualType, 2> ArgTys; 3871 ArgTys.push_back(DestTy); 3872 ArgTys.push_back(SrcTy); 3873 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); 3874 3875 FunctionDecl *FD = FunctionDecl::Create( 3876 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, 3877 FunctionTy, nullptr, SC_Static, false, false, false); 3878 3879 FunctionArgList args; 3880 ParmVarDecl *Params[2]; 3881 ParmVarDecl *DstDecl = ParmVarDecl::Create( 3882 C, FD, SourceLocation(), SourceLocation(), nullptr, DestTy, 3883 C.getTrivialTypeSourceInfo(DestTy, SourceLocation()), SC_None, 3884 /*DefArg=*/nullptr); 3885 args.push_back(Params[0] = DstDecl); 3886 ParmVarDecl *SrcDecl = ParmVarDecl::Create( 3887 C, FD, SourceLocation(), SourceLocation(), nullptr, SrcTy, 3888 C.getTrivialTypeSourceInfo(SrcTy, SourceLocation()), SC_None, 3889 /*DefArg=*/nullptr); 3890 args.push_back(Params[1] = SrcDecl); 3891 FD->setParams(Params); 3892 3893 const CGFunctionInfo &FI = 3894 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); 3895 3896 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3897 3898 llvm::Function *Fn = llvm::Function::Create( 3899 LTy, llvm::GlobalValue::InternalLinkage, "__copy_helper_atomic_property_", 3900 &CGM.getModule()); 3901 3902 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 3903 3904 StartFunction(FD, ReturnTy, Fn, FI, args); 3905 3906 DeclRefExpr SrcExpr(getContext(), SrcDecl, false, SrcTy, VK_PRValue, 3907 SourceLocation()); 3908 3909 UnaryOperator *SRC = UnaryOperator::Create( 3910 C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary, 3911 SourceLocation(), false, FPOptionsOverride()); 3912 3913 CXXConstructExpr *CXXConstExpr = 3914 cast<CXXConstructExpr>(PID->getGetterCXXConstructor()); 3915 3916 SmallVector<Expr*, 4> ConstructorArgs; 3917 ConstructorArgs.push_back(SRC); 3918 ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()), 3919 CXXConstExpr->arg_end()); 3920 3921 CXXConstructExpr *TheCXXConstructExpr = 3922 CXXConstructExpr::Create(C, Ty, SourceLocation(), 3923 CXXConstExpr->getConstructor(), 3924 CXXConstExpr->isElidable(), 3925 ConstructorArgs, 3926 CXXConstExpr->hadMultipleCandidates(), 3927 CXXConstExpr->isListInitialization(), 3928 CXXConstExpr->isStdInitListInitialization(), 3929 CXXConstExpr->requiresZeroInitialization(), 3930 CXXConstExpr->getConstructionKind(), 3931 SourceRange()); 3932 3933 DeclRefExpr DstExpr(getContext(), DstDecl, false, DestTy, VK_PRValue, 3934 SourceLocation()); 3935 3936 RValue DV = EmitAnyExpr(&DstExpr); 3937 CharUnits Alignment = 3938 getContext().getTypeAlignInChars(TheCXXConstructExpr->getType()); 3939 EmitAggExpr(TheCXXConstructExpr, 3940 AggValueSlot::forAddr( 3941 Address(DV.getScalarVal(), ConvertTypeForMem(Ty), Alignment), 3942 Qualifiers(), AggValueSlot::IsDestructed, 3943 AggValueSlot::DoesNotNeedGCBarriers, 3944 AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); 3945 3946 FinishFunction(); 3947 HelperFn = Fn; 3948 CGM.setAtomicGetterHelperFnMap(Ty, HelperFn); 3949 return HelperFn; 3950 } 3951 3952 llvm::Value * 3953 CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) { 3954 // Get selectors for retain/autorelease. 3955 const IdentifierInfo *CopyID = &getContext().Idents.get("copy"); 3956 Selector CopySelector = 3957 getContext().Selectors.getNullarySelector(CopyID); 3958 const IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); 3959 Selector AutoreleaseSelector = 3960 getContext().Selectors.getNullarySelector(AutoreleaseID); 3961 3962 // Emit calls to retain/autorelease. 3963 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 3964 llvm::Value *Val = Block; 3965 RValue Result; 3966 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3967 Ty, CopySelector, 3968 Val, CallArgList(), nullptr, nullptr); 3969 Val = Result.getScalarVal(); 3970 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3971 Ty, AutoreleaseSelector, 3972 Val, CallArgList(), nullptr, nullptr); 3973 Val = Result.getScalarVal(); 3974 return Val; 3975 } 3976 3977 static unsigned getBaseMachOPlatformID(const llvm::Triple &TT) { 3978 switch (TT.getOS()) { 3979 case llvm::Triple::Darwin: 3980 case llvm::Triple::MacOSX: 3981 return llvm::MachO::PLATFORM_MACOS; 3982 case llvm::Triple::IOS: 3983 return llvm::MachO::PLATFORM_IOS; 3984 case llvm::Triple::TvOS: 3985 return llvm::MachO::PLATFORM_TVOS; 3986 case llvm::Triple::WatchOS: 3987 return llvm::MachO::PLATFORM_WATCHOS; 3988 case llvm::Triple::XROS: 3989 return llvm::MachO::PLATFORM_XROS; 3990 case llvm::Triple::DriverKit: 3991 return llvm::MachO::PLATFORM_DRIVERKIT; 3992 default: 3993 return llvm::MachO::PLATFORM_UNKNOWN; 3994 } 3995 } 3996 3997 static llvm::Value *emitIsPlatformVersionAtLeast(CodeGenFunction &CGF, 3998 const VersionTuple &Version) { 3999 CodeGenModule &CGM = CGF.CGM; 4000 // Note: we intend to support multi-platform version checks, so reserve 4001 // the room for a dual platform checking invocation that will be 4002 // implemented in the future. 4003 llvm::SmallVector<llvm::Value *, 8> Args; 4004 4005 auto EmitArgs = [&](const VersionTuple &Version, const llvm::Triple &TT) { 4006 std::optional<unsigned> Min = Version.getMinor(), 4007 SMin = Version.getSubminor(); 4008 Args.push_back( 4009 llvm::ConstantInt::get(CGM.Int32Ty, getBaseMachOPlatformID(TT))); 4010 Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor())); 4011 Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min.value_or(0))); 4012 Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin.value_or(0))); 4013 }; 4014 4015 assert(!Version.empty() && "unexpected empty version"); 4016 EmitArgs(Version, CGM.getTarget().getTriple()); 4017 4018 if (!CGM.IsPlatformVersionAtLeastFn) { 4019 llvm::FunctionType *FTy = llvm::FunctionType::get( 4020 CGM.Int32Ty, {CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty}, 4021 false); 4022 CGM.IsPlatformVersionAtLeastFn = 4023 CGM.CreateRuntimeFunction(FTy, "__isPlatformVersionAtLeast"); 4024 } 4025 4026 llvm::Value *Check = 4027 CGF.EmitNounwindRuntimeCall(CGM.IsPlatformVersionAtLeastFn, Args); 4028 return CGF.Builder.CreateICmpNE(Check, 4029 llvm::Constant::getNullValue(CGM.Int32Ty)); 4030 } 4031 4032 llvm::Value * 4033 CodeGenFunction::EmitBuiltinAvailable(const VersionTuple &Version) { 4034 // Darwin uses the new __isPlatformVersionAtLeast family of routines. 4035 if (CGM.getTarget().getTriple().isOSDarwin()) 4036 return emitIsPlatformVersionAtLeast(*this, Version); 4037 4038 if (!CGM.IsOSVersionAtLeastFn) { 4039 llvm::FunctionType *FTy = 4040 llvm::FunctionType::get(Int32Ty, {Int32Ty, Int32Ty, Int32Ty}, false); 4041 CGM.IsOSVersionAtLeastFn = 4042 CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast"); 4043 } 4044 4045 std::optional<unsigned> Min = Version.getMinor(), 4046 SMin = Version.getSubminor(); 4047 llvm::Value *Args[] = { 4048 llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()), 4049 llvm::ConstantInt::get(CGM.Int32Ty, Min.value_or(0)), 4050 llvm::ConstantInt::get(CGM.Int32Ty, SMin.value_or(0))}; 4051 4052 llvm::Value *CallRes = 4053 EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args); 4054 4055 return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty)); 4056 } 4057 4058 static bool isFoundationNeededForDarwinAvailabilityCheck( 4059 const llvm::Triple &TT, const VersionTuple &TargetVersion) { 4060 VersionTuple FoundationDroppedInVersion; 4061 switch (TT.getOS()) { 4062 case llvm::Triple::IOS: 4063 case llvm::Triple::TvOS: 4064 FoundationDroppedInVersion = VersionTuple(/*Major=*/13); 4065 break; 4066 case llvm::Triple::WatchOS: 4067 FoundationDroppedInVersion = VersionTuple(/*Major=*/6); 4068 break; 4069 case llvm::Triple::Darwin: 4070 case llvm::Triple::MacOSX: 4071 FoundationDroppedInVersion = VersionTuple(/*Major=*/10, /*Minor=*/15); 4072 break; 4073 case llvm::Triple::XROS: 4074 // XROS doesn't need Foundation. 4075 return false; 4076 case llvm::Triple::DriverKit: 4077 // DriverKit doesn't need Foundation. 4078 return false; 4079 default: 4080 llvm_unreachable("Unexpected OS"); 4081 } 4082 return TargetVersion < FoundationDroppedInVersion; 4083 } 4084 4085 void CodeGenModule::emitAtAvailableLinkGuard() { 4086 if (!IsPlatformVersionAtLeastFn) 4087 return; 4088 // @available requires CoreFoundation only on Darwin. 4089 if (!Target.getTriple().isOSDarwin()) 4090 return; 4091 // @available doesn't need Foundation on macOS 10.15+, iOS/tvOS 13+, or 4092 // watchOS 6+. 4093 if (!isFoundationNeededForDarwinAvailabilityCheck( 4094 Target.getTriple(), Target.getPlatformMinVersion())) 4095 return; 4096 // Add -framework CoreFoundation to the linker commands. We still want to 4097 // emit the core foundation reference down below because otherwise if 4098 // CoreFoundation is not used in the code, the linker won't link the 4099 // framework. 4100 auto &Context = getLLVMContext(); 4101 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"), 4102 llvm::MDString::get(Context, "CoreFoundation")}; 4103 LinkerOptionsMetadata.push_back(llvm::MDNode::get(Context, Args)); 4104 // Emit a reference to a symbol from CoreFoundation to ensure that 4105 // CoreFoundation is linked into the final binary. 4106 llvm::FunctionType *FTy = 4107 llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false); 4108 llvm::FunctionCallee CFFunc = 4109 CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber"); 4110 4111 llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false); 4112 llvm::FunctionCallee CFLinkCheckFuncRef = CreateRuntimeFunction( 4113 CheckFTy, "__clang_at_available_requires_core_foundation_framework", 4114 llvm::AttributeList(), /*Local=*/true); 4115 llvm::Function *CFLinkCheckFunc = 4116 cast<llvm::Function>(CFLinkCheckFuncRef.getCallee()->stripPointerCasts()); 4117 if (CFLinkCheckFunc->empty()) { 4118 CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage); 4119 CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility); 4120 CodeGenFunction CGF(*this); 4121 CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc)); 4122 CGF.EmitNounwindRuntimeCall(CFFunc, 4123 llvm::Constant::getNullValue(VoidPtrTy)); 4124 CGF.Builder.CreateUnreachable(); 4125 addCompilerUsedGlobal(CFLinkCheckFunc); 4126 } 4127 } 4128 4129 CGObjCRuntime::~CGObjCRuntime() {} 4130