1 //===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Objective-C code as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGDebugInfo.h" 14 #include "CGObjCRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "ConstantEmitter.h" 18 #include "TargetInfo.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/DeclObjC.h" 22 #include "clang/AST/StmtObjC.h" 23 #include "clang/Basic/Diagnostic.h" 24 #include "clang/CodeGen/CGFunctionInfo.h" 25 #include "clang/CodeGen/CodeGenABITypes.h" 26 #include "llvm/ADT/STLExtras.h" 27 #include "llvm/Analysis/ObjCARCUtil.h" 28 #include "llvm/BinaryFormat/MachO.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/InlineAsm.h" 32 #include <optional> 33 using namespace clang; 34 using namespace CodeGen; 35 36 typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult; 37 static TryEmitResult 38 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e); 39 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, 40 QualType ET, 41 RValue Result); 42 43 /// Given the address of a variable of pointer type, find the correct 44 /// null to store into it. 45 static llvm::Constant *getNullForVariable(Address addr) { 46 llvm::Type *type = addr.getElementType(); 47 return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type)); 48 } 49 50 /// Emits an instance of NSConstantString representing the object. 51 llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E) 52 { 53 llvm::Constant *C = 54 CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer(); 55 // FIXME: This bitcast should just be made an invariant on the Runtime. 56 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType())); 57 } 58 59 /// EmitObjCBoxedExpr - This routine generates code to call 60 /// the appropriate expression boxing method. This will either be 61 /// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:], 62 /// or [NSValue valueWithBytes:objCType:]. 63 /// 64 llvm::Value * 65 CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) { 66 // Generate the correct selector for this literal's concrete type. 67 // Get the method. 68 const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod(); 69 const Expr *SubExpr = E->getSubExpr(); 70 71 if (E->isExpressibleAsConstantInitializer()) { 72 ConstantEmitter ConstEmitter(CGM); 73 return ConstEmitter.tryEmitAbstract(E, E->getType()); 74 } 75 76 assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method"); 77 Selector Sel = BoxingMethod->getSelector(); 78 79 // Generate a reference to the class pointer, which will be the receiver. 80 // Assumes that the method was introduced in the class that should be 81 // messaged (avoids pulling it out of the result type). 82 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 83 const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface(); 84 llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl); 85 86 CallArgList Args; 87 const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin(); 88 QualType ArgQT = ArgDecl->getType().getUnqualifiedType(); 89 90 // ObjCBoxedExpr supports boxing of structs and unions 91 // via [NSValue valueWithBytes:objCType:] 92 const QualType ValueType(SubExpr->getType().getCanonicalType()); 93 if (ValueType->isObjCBoxableRecordType()) { 94 // Emit CodeGen for first parameter 95 // and cast value to correct type 96 Address Temporary = CreateMemTemp(SubExpr->getType()); 97 EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true); 98 llvm::Value *BitCast = 99 Builder.CreateBitCast(Temporary.getPointer(), ConvertType(ArgQT)); 100 Args.add(RValue::get(BitCast), ArgQT); 101 102 // Create char array to store type encoding 103 std::string Str; 104 getContext().getObjCEncodingForType(ValueType, Str); 105 llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer(); 106 107 // Cast type encoding to correct type 108 const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1]; 109 QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType(); 110 llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT)); 111 112 Args.add(RValue::get(Cast), EncodingQT); 113 } else { 114 Args.add(EmitAnyExpr(SubExpr), ArgQT); 115 } 116 117 RValue result = Runtime.GenerateMessageSend( 118 *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver, 119 Args, ClassDecl, BoxingMethod); 120 return Builder.CreateBitCast(result.getScalarVal(), 121 ConvertType(E->getType())); 122 } 123 124 llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, 125 const ObjCMethodDecl *MethodWithObjects) { 126 ASTContext &Context = CGM.getContext(); 127 const ObjCDictionaryLiteral *DLE = nullptr; 128 const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E); 129 if (!ALE) 130 DLE = cast<ObjCDictionaryLiteral>(E); 131 132 // Optimize empty collections by referencing constants, when available. 133 uint64_t NumElements = 134 ALE ? ALE->getNumElements() : DLE->getNumElements(); 135 if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) { 136 StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__"; 137 QualType IdTy(CGM.getContext().getObjCIdType()); 138 llvm::Constant *Constant = 139 CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName); 140 LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy); 141 llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc()); 142 cast<llvm::LoadInst>(Ptr)->setMetadata( 143 llvm::LLVMContext::MD_invariant_load, 144 llvm::MDNode::get(getLLVMContext(), std::nullopt)); 145 return Builder.CreateBitCast(Ptr, ConvertType(E->getType())); 146 } 147 148 // Compute the type of the array we're initializing. 149 llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()), 150 NumElements); 151 QualType ElementType = Context.getObjCIdType().withConst(); 152 QualType ElementArrayType 153 = Context.getConstantArrayType(ElementType, APNumElements, nullptr, 154 ArrayType::Normal, /*IndexTypeQuals=*/0); 155 156 // Allocate the temporary array(s). 157 Address Objects = CreateMemTemp(ElementArrayType, "objects"); 158 Address Keys = Address::invalid(); 159 if (DLE) 160 Keys = CreateMemTemp(ElementArrayType, "keys"); 161 162 // In ARC, we may need to do extra work to keep all the keys and 163 // values alive until after the call. 164 SmallVector<llvm::Value *, 16> NeededObjects; 165 bool TrackNeededObjects = 166 (getLangOpts().ObjCAutoRefCount && 167 CGM.getCodeGenOpts().OptimizationLevel != 0); 168 169 // Perform the actual initialialization of the array(s). 170 for (uint64_t i = 0; i < NumElements; i++) { 171 if (ALE) { 172 // Emit the element and store it to the appropriate array slot. 173 const Expr *Rhs = ALE->getElement(i); 174 LValue LV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i), 175 ElementType, AlignmentSource::Decl); 176 177 llvm::Value *value = EmitScalarExpr(Rhs); 178 EmitStoreThroughLValue(RValue::get(value), LV, true); 179 if (TrackNeededObjects) { 180 NeededObjects.push_back(value); 181 } 182 } else { 183 // Emit the key and store it to the appropriate array slot. 184 const Expr *Key = DLE->getKeyValueElement(i).Key; 185 LValue KeyLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Keys, i), 186 ElementType, AlignmentSource::Decl); 187 llvm::Value *keyValue = EmitScalarExpr(Key); 188 EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true); 189 190 // Emit the value and store it to the appropriate array slot. 191 const Expr *Value = DLE->getKeyValueElement(i).Value; 192 LValue ValueLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i), 193 ElementType, AlignmentSource::Decl); 194 llvm::Value *valueValue = EmitScalarExpr(Value); 195 EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true); 196 if (TrackNeededObjects) { 197 NeededObjects.push_back(keyValue); 198 NeededObjects.push_back(valueValue); 199 } 200 } 201 } 202 203 // Generate the argument list. 204 CallArgList Args; 205 ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin(); 206 const ParmVarDecl *argDecl = *PI++; 207 QualType ArgQT = argDecl->getType().getUnqualifiedType(); 208 Args.add(RValue::get(Objects.getPointer()), ArgQT); 209 if (DLE) { 210 argDecl = *PI++; 211 ArgQT = argDecl->getType().getUnqualifiedType(); 212 Args.add(RValue::get(Keys.getPointer()), ArgQT); 213 } 214 argDecl = *PI; 215 ArgQT = argDecl->getType().getUnqualifiedType(); 216 llvm::Value *Count = 217 llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements); 218 Args.add(RValue::get(Count), ArgQT); 219 220 // Generate a reference to the class pointer, which will be the receiver. 221 Selector Sel = MethodWithObjects->getSelector(); 222 QualType ResultType = E->getType(); 223 const ObjCObjectPointerType *InterfacePointerType 224 = ResultType->getAsObjCInterfacePointerType(); 225 ObjCInterfaceDecl *Class 226 = InterfacePointerType->getObjectType()->getInterface(); 227 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 228 llvm::Value *Receiver = Runtime.GetClass(*this, Class); 229 230 // Generate the message send. 231 RValue result = Runtime.GenerateMessageSend( 232 *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel, 233 Receiver, Args, Class, MethodWithObjects); 234 235 // The above message send needs these objects, but in ARC they are 236 // passed in a buffer that is essentially __unsafe_unretained. 237 // Therefore we must prevent the optimizer from releasing them until 238 // after the call. 239 if (TrackNeededObjects) { 240 EmitARCIntrinsicUse(NeededObjects); 241 } 242 243 return Builder.CreateBitCast(result.getScalarVal(), 244 ConvertType(E->getType())); 245 } 246 247 llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) { 248 return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod()); 249 } 250 251 llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral( 252 const ObjCDictionaryLiteral *E) { 253 return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod()); 254 } 255 256 /// Emit a selector. 257 llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) { 258 // Untyped selector. 259 // Note that this implementation allows for non-constant strings to be passed 260 // as arguments to @selector(). Currently, the only thing preventing this 261 // behaviour is the type checking in the front end. 262 return CGM.getObjCRuntime().GetSelector(*this, E->getSelector()); 263 } 264 265 llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) { 266 // FIXME: This should pass the Decl not the name. 267 return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol()); 268 } 269 270 /// Adjust the type of an Objective-C object that doesn't match up due 271 /// to type erasure at various points, e.g., related result types or the use 272 /// of parameterized classes. 273 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT, 274 RValue Result) { 275 if (!ExpT->isObjCRetainableType()) 276 return Result; 277 278 // If the converted types are the same, we're done. 279 llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT); 280 if (ExpLLVMTy == Result.getScalarVal()->getType()) 281 return Result; 282 283 // We have applied a substitution. Cast the rvalue appropriately. 284 return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(), 285 ExpLLVMTy)); 286 } 287 288 /// Decide whether to extend the lifetime of the receiver of a 289 /// returns-inner-pointer message. 290 static bool 291 shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) { 292 switch (message->getReceiverKind()) { 293 294 // For a normal instance message, we should extend unless the 295 // receiver is loaded from a variable with precise lifetime. 296 case ObjCMessageExpr::Instance: { 297 const Expr *receiver = message->getInstanceReceiver(); 298 299 // Look through OVEs. 300 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 301 if (opaque->getSourceExpr()) 302 receiver = opaque->getSourceExpr()->IgnoreParens(); 303 } 304 305 const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver); 306 if (!ice || ice->getCastKind() != CK_LValueToRValue) return true; 307 receiver = ice->getSubExpr()->IgnoreParens(); 308 309 // Look through OVEs. 310 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 311 if (opaque->getSourceExpr()) 312 receiver = opaque->getSourceExpr()->IgnoreParens(); 313 } 314 315 // Only __strong variables. 316 if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 317 return true; 318 319 // All ivars and fields have precise lifetime. 320 if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver)) 321 return false; 322 323 // Otherwise, check for variables. 324 const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr()); 325 if (!declRef) return true; 326 const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl()); 327 if (!var) return true; 328 329 // All variables have precise lifetime except local variables with 330 // automatic storage duration that aren't specially marked. 331 return (var->hasLocalStorage() && 332 !var->hasAttr<ObjCPreciseLifetimeAttr>()); 333 } 334 335 case ObjCMessageExpr::Class: 336 case ObjCMessageExpr::SuperClass: 337 // It's never necessary for class objects. 338 return false; 339 340 case ObjCMessageExpr::SuperInstance: 341 // We generally assume that 'self' lives throughout a method call. 342 return false; 343 } 344 345 llvm_unreachable("invalid receiver kind"); 346 } 347 348 /// Given an expression of ObjC pointer type, check whether it was 349 /// immediately loaded from an ARC __weak l-value. 350 static const Expr *findWeakLValue(const Expr *E) { 351 assert(E->getType()->isObjCRetainableType()); 352 E = E->IgnoreParens(); 353 if (auto CE = dyn_cast<CastExpr>(E)) { 354 if (CE->getCastKind() == CK_LValueToRValue) { 355 if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak) 356 return CE->getSubExpr(); 357 } 358 } 359 360 return nullptr; 361 } 362 363 /// The ObjC runtime may provide entrypoints that are likely to be faster 364 /// than an ordinary message send of the appropriate selector. 365 /// 366 /// The entrypoints are guaranteed to be equivalent to just sending the 367 /// corresponding message. If the entrypoint is implemented naively as just a 368 /// message send, using it is a trade-off: it sacrifices a few cycles of 369 /// overhead to save a small amount of code. However, it's possible for 370 /// runtimes to detect and special-case classes that use "standard" 371 /// behavior; if that's dynamically a large proportion of all objects, using 372 /// the entrypoint will also be faster than using a message send. 373 /// 374 /// If the runtime does support a required entrypoint, then this method will 375 /// generate a call and return the resulting value. Otherwise it will return 376 /// std::nullopt and the caller can generate a msgSend instead. 377 static std::optional<llvm::Value *> tryGenerateSpecializedMessageSend( 378 CodeGenFunction &CGF, QualType ResultType, llvm::Value *Receiver, 379 const CallArgList &Args, Selector Sel, const ObjCMethodDecl *method, 380 bool isClassMessage) { 381 auto &CGM = CGF.CGM; 382 if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls) 383 return std::nullopt; 384 385 auto &Runtime = CGM.getLangOpts().ObjCRuntime; 386 switch (Sel.getMethodFamily()) { 387 case OMF_alloc: 388 if (isClassMessage && 389 Runtime.shouldUseRuntimeFunctionsForAlloc() && 390 ResultType->isObjCObjectPointerType()) { 391 // [Foo alloc] -> objc_alloc(Foo) or 392 // [self alloc] -> objc_alloc(self) 393 if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "alloc") 394 return CGF.EmitObjCAlloc(Receiver, CGF.ConvertType(ResultType)); 395 // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo) or 396 // [self allocWithZone:nil] -> objc_allocWithZone(self) 397 if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 && 398 Args.size() == 1 && Args.front().getType()->isPointerType() && 399 Sel.getNameForSlot(0) == "allocWithZone") { 400 const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal(); 401 if (isa<llvm::ConstantPointerNull>(arg)) 402 return CGF.EmitObjCAllocWithZone(Receiver, 403 CGF.ConvertType(ResultType)); 404 return std::nullopt; 405 } 406 } 407 break; 408 409 case OMF_autorelease: 410 if (ResultType->isObjCObjectPointerType() && 411 CGM.getLangOpts().getGC() == LangOptions::NonGC && 412 Runtime.shouldUseARCFunctionsForRetainRelease()) 413 return CGF.EmitObjCAutorelease(Receiver, CGF.ConvertType(ResultType)); 414 break; 415 416 case OMF_retain: 417 if (ResultType->isObjCObjectPointerType() && 418 CGM.getLangOpts().getGC() == LangOptions::NonGC && 419 Runtime.shouldUseARCFunctionsForRetainRelease()) 420 return CGF.EmitObjCRetainNonBlock(Receiver, CGF.ConvertType(ResultType)); 421 break; 422 423 case OMF_release: 424 if (ResultType->isVoidType() && 425 CGM.getLangOpts().getGC() == LangOptions::NonGC && 426 Runtime.shouldUseARCFunctionsForRetainRelease()) { 427 CGF.EmitObjCRelease(Receiver, ARCPreciseLifetime); 428 return nullptr; 429 } 430 break; 431 432 default: 433 break; 434 } 435 return std::nullopt; 436 } 437 438 CodeGen::RValue CGObjCRuntime::GeneratePossiblySpecializedMessageSend( 439 CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, 440 Selector Sel, llvm::Value *Receiver, const CallArgList &Args, 441 const ObjCInterfaceDecl *OID, const ObjCMethodDecl *Method, 442 bool isClassMessage) { 443 if (std::optional<llvm::Value *> SpecializedResult = 444 tryGenerateSpecializedMessageSend(CGF, ResultType, Receiver, Args, 445 Sel, Method, isClassMessage)) { 446 return RValue::get(*SpecializedResult); 447 } 448 return GenerateMessageSend(CGF, Return, ResultType, Sel, Receiver, Args, OID, 449 Method); 450 } 451 452 static void AppendFirstImpliedRuntimeProtocols( 453 const ObjCProtocolDecl *PD, 454 llvm::UniqueVector<const ObjCProtocolDecl *> &PDs) { 455 if (!PD->isNonRuntimeProtocol()) { 456 const auto *Can = PD->getCanonicalDecl(); 457 PDs.insert(Can); 458 return; 459 } 460 461 for (const auto *ParentPD : PD->protocols()) 462 AppendFirstImpliedRuntimeProtocols(ParentPD, PDs); 463 } 464 465 std::vector<const ObjCProtocolDecl *> 466 CGObjCRuntime::GetRuntimeProtocolList(ObjCProtocolDecl::protocol_iterator begin, 467 ObjCProtocolDecl::protocol_iterator end) { 468 std::vector<const ObjCProtocolDecl *> RuntimePds; 469 llvm::DenseSet<const ObjCProtocolDecl *> NonRuntimePDs; 470 471 for (; begin != end; ++begin) { 472 const auto *It = *begin; 473 const auto *Can = It->getCanonicalDecl(); 474 if (Can->isNonRuntimeProtocol()) 475 NonRuntimePDs.insert(Can); 476 else 477 RuntimePds.push_back(Can); 478 } 479 480 // If there are no non-runtime protocols then we can just stop now. 481 if (NonRuntimePDs.empty()) 482 return RuntimePds; 483 484 // Else we have to search through the non-runtime protocol's inheritancy 485 // hierarchy DAG stopping whenever a branch either finds a runtime protocol or 486 // a non-runtime protocol without any parents. These are the "first-implied" 487 // protocols from a non-runtime protocol. 488 llvm::UniqueVector<const ObjCProtocolDecl *> FirstImpliedProtos; 489 for (const auto *PD : NonRuntimePDs) 490 AppendFirstImpliedRuntimeProtocols(PD, FirstImpliedProtos); 491 492 // Walk the Runtime list to get all protocols implied via the inclusion of 493 // this protocol, e.g. all protocols it inherits from including itself. 494 llvm::DenseSet<const ObjCProtocolDecl *> AllImpliedProtocols; 495 for (const auto *PD : RuntimePds) { 496 const auto *Can = PD->getCanonicalDecl(); 497 AllImpliedProtocols.insert(Can); 498 Can->getImpliedProtocols(AllImpliedProtocols); 499 } 500 501 // Similar to above, walk the list of first-implied protocols to find the set 502 // all the protocols implied excluding the listed protocols themselves since 503 // they are not yet a part of the `RuntimePds` list. 504 for (const auto *PD : FirstImpliedProtos) { 505 PD->getImpliedProtocols(AllImpliedProtocols); 506 } 507 508 // From the first-implied list we have to finish building the final protocol 509 // list. If a protocol in the first-implied list was already implied via some 510 // inheritance path through some other protocols then it would be redundant to 511 // add it here and so we skip over it. 512 for (const auto *PD : FirstImpliedProtos) { 513 if (!AllImpliedProtocols.contains(PD)) { 514 RuntimePds.push_back(PD); 515 } 516 } 517 518 return RuntimePds; 519 } 520 521 /// Instead of '[[MyClass alloc] init]', try to generate 522 /// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the 523 /// caller side, as well as the optimized objc_alloc. 524 static std::optional<llvm::Value *> 525 tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) { 526 auto &Runtime = CGF.getLangOpts().ObjCRuntime; 527 if (!Runtime.shouldUseRuntimeFunctionForCombinedAllocInit()) 528 return std::nullopt; 529 530 // Match the exact pattern '[[MyClass alloc] init]'. 531 Selector Sel = OME->getSelector(); 532 if (OME->getReceiverKind() != ObjCMessageExpr::Instance || 533 !OME->getType()->isObjCObjectPointerType() || !Sel.isUnarySelector() || 534 Sel.getNameForSlot(0) != "init") 535 return std::nullopt; 536 537 // Okay, this is '[receiver init]', check if 'receiver' is '[cls alloc]' 538 // with 'cls' a Class. 539 auto *SubOME = 540 dyn_cast<ObjCMessageExpr>(OME->getInstanceReceiver()->IgnoreParenCasts()); 541 if (!SubOME) 542 return std::nullopt; 543 Selector SubSel = SubOME->getSelector(); 544 545 if (!SubOME->getType()->isObjCObjectPointerType() || 546 !SubSel.isUnarySelector() || SubSel.getNameForSlot(0) != "alloc") 547 return std::nullopt; 548 549 llvm::Value *Receiver = nullptr; 550 switch (SubOME->getReceiverKind()) { 551 case ObjCMessageExpr::Instance: 552 if (!SubOME->getInstanceReceiver()->getType()->isObjCClassType()) 553 return std::nullopt; 554 Receiver = CGF.EmitScalarExpr(SubOME->getInstanceReceiver()); 555 break; 556 557 case ObjCMessageExpr::Class: { 558 QualType ReceiverType = SubOME->getClassReceiver(); 559 const ObjCObjectType *ObjTy = ReceiverType->castAs<ObjCObjectType>(); 560 const ObjCInterfaceDecl *ID = ObjTy->getInterface(); 561 assert(ID && "null interface should be impossible here"); 562 Receiver = CGF.CGM.getObjCRuntime().GetClass(CGF, ID); 563 break; 564 } 565 case ObjCMessageExpr::SuperInstance: 566 case ObjCMessageExpr::SuperClass: 567 return std::nullopt; 568 } 569 570 return CGF.EmitObjCAllocInit(Receiver, CGF.ConvertType(OME->getType())); 571 } 572 573 RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, 574 ReturnValueSlot Return) { 575 // Only the lookup mechanism and first two arguments of the method 576 // implementation vary between runtimes. We can get the receiver and 577 // arguments in generic code. 578 579 bool isDelegateInit = E->isDelegateInitCall(); 580 581 const ObjCMethodDecl *method = E->getMethodDecl(); 582 583 // If the method is -retain, and the receiver's being loaded from 584 // a __weak variable, peephole the entire operation to objc_loadWeakRetained. 585 if (method && E->getReceiverKind() == ObjCMessageExpr::Instance && 586 method->getMethodFamily() == OMF_retain) { 587 if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) { 588 LValue lvalue = EmitLValue(lvalueExpr); 589 llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress(*this)); 590 return AdjustObjCObjectType(*this, E->getType(), RValue::get(result)); 591 } 592 } 593 594 if (std::optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(*this, E)) 595 return AdjustObjCObjectType(*this, E->getType(), RValue::get(*Val)); 596 597 // We don't retain the receiver in delegate init calls, and this is 598 // safe because the receiver value is always loaded from 'self', 599 // which we zero out. We don't want to Block_copy block receivers, 600 // though. 601 bool retainSelf = 602 (!isDelegateInit && 603 CGM.getLangOpts().ObjCAutoRefCount && 604 method && 605 method->hasAttr<NSConsumesSelfAttr>()); 606 607 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 608 bool isSuperMessage = false; 609 bool isClassMessage = false; 610 ObjCInterfaceDecl *OID = nullptr; 611 // Find the receiver 612 QualType ReceiverType; 613 llvm::Value *Receiver = nullptr; 614 switch (E->getReceiverKind()) { 615 case ObjCMessageExpr::Instance: 616 ReceiverType = E->getInstanceReceiver()->getType(); 617 isClassMessage = ReceiverType->isObjCClassType(); 618 if (retainSelf) { 619 TryEmitResult ter = tryEmitARCRetainScalarExpr(*this, 620 E->getInstanceReceiver()); 621 Receiver = ter.getPointer(); 622 if (ter.getInt()) retainSelf = false; 623 } else 624 Receiver = EmitScalarExpr(E->getInstanceReceiver()); 625 break; 626 627 case ObjCMessageExpr::Class: { 628 ReceiverType = E->getClassReceiver(); 629 OID = ReceiverType->castAs<ObjCObjectType>()->getInterface(); 630 assert(OID && "Invalid Objective-C class message send"); 631 Receiver = Runtime.GetClass(*this, OID); 632 isClassMessage = true; 633 break; 634 } 635 636 case ObjCMessageExpr::SuperInstance: 637 ReceiverType = E->getSuperType(); 638 Receiver = LoadObjCSelf(); 639 isSuperMessage = true; 640 break; 641 642 case ObjCMessageExpr::SuperClass: 643 ReceiverType = E->getSuperType(); 644 Receiver = LoadObjCSelf(); 645 isSuperMessage = true; 646 isClassMessage = true; 647 break; 648 } 649 650 if (retainSelf) 651 Receiver = EmitARCRetainNonBlock(Receiver); 652 653 // In ARC, we sometimes want to "extend the lifetime" 654 // (i.e. retain+autorelease) of receivers of returns-inner-pointer 655 // messages. 656 if (getLangOpts().ObjCAutoRefCount && method && 657 method->hasAttr<ObjCReturnsInnerPointerAttr>() && 658 shouldExtendReceiverForInnerPointerMessage(E)) 659 Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver); 660 661 QualType ResultType = method ? method->getReturnType() : E->getType(); 662 663 CallArgList Args; 664 EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method)); 665 666 // For delegate init calls in ARC, do an unsafe store of null into 667 // self. This represents the call taking direct ownership of that 668 // value. We have to do this after emitting the other call 669 // arguments because they might also reference self, but we don't 670 // have to worry about any of them modifying self because that would 671 // be an undefined read and write of an object in unordered 672 // expressions. 673 if (isDelegateInit) { 674 assert(getLangOpts().ObjCAutoRefCount && 675 "delegate init calls should only be marked in ARC"); 676 677 // Do an unsafe store of null into self. 678 Address selfAddr = 679 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 680 Builder.CreateStore(getNullForVariable(selfAddr), selfAddr); 681 } 682 683 RValue result; 684 if (isSuperMessage) { 685 // super is only valid in an Objective-C method 686 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 687 bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); 688 result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType, 689 E->getSelector(), 690 OMD->getClassInterface(), 691 isCategoryImpl, 692 Receiver, 693 isClassMessage, 694 Args, 695 method); 696 } else { 697 // Call runtime methods directly if we can. 698 result = Runtime.GeneratePossiblySpecializedMessageSend( 699 *this, Return, ResultType, E->getSelector(), Receiver, Args, OID, 700 method, isClassMessage); 701 } 702 703 // For delegate init calls in ARC, implicitly store the result of 704 // the call back into self. This takes ownership of the value. 705 if (isDelegateInit) { 706 Address selfAddr = 707 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 708 llvm::Value *newSelf = result.getScalarVal(); 709 710 // The delegate return type isn't necessarily a matching type; in 711 // fact, it's quite likely to be 'id'. 712 llvm::Type *selfTy = selfAddr.getElementType(); 713 newSelf = Builder.CreateBitCast(newSelf, selfTy); 714 715 Builder.CreateStore(newSelf, selfAddr); 716 } 717 718 return AdjustObjCObjectType(*this, E->getType(), result); 719 } 720 721 namespace { 722 struct FinishARCDealloc final : EHScopeStack::Cleanup { 723 void Emit(CodeGenFunction &CGF, Flags flags) override { 724 const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl); 725 726 const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext()); 727 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 728 if (!iface->getSuperClass()) return; 729 730 bool isCategory = isa<ObjCCategoryImplDecl>(impl); 731 732 // Call [super dealloc] if we have a superclass. 733 llvm::Value *self = CGF.LoadObjCSelf(); 734 735 CallArgList args; 736 CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(), 737 CGF.getContext().VoidTy, 738 method->getSelector(), 739 iface, 740 isCategory, 741 self, 742 /*is class msg*/ false, 743 args, 744 method); 745 } 746 }; 747 } 748 749 /// StartObjCMethod - Begin emission of an ObjCMethod. This generates 750 /// the LLVM function and sets the other context used by 751 /// CodeGenFunction. 752 void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD, 753 const ObjCContainerDecl *CD) { 754 SourceLocation StartLoc = OMD->getBeginLoc(); 755 FunctionArgList args; 756 // Check if we should generate debug info for this method. 757 if (OMD->hasAttr<NoDebugAttr>()) 758 DebugInfo = nullptr; // disable debug info indefinitely for this function 759 760 llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD); 761 762 const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD); 763 if (OMD->isDirectMethod()) { 764 Fn->setVisibility(llvm::Function::HiddenVisibility); 765 CGM.SetLLVMFunctionAttributes(OMD, FI, Fn, /*IsThunk=*/false); 766 CGM.SetLLVMFunctionAttributesForDefinition(OMD, Fn); 767 } else { 768 CGM.SetInternalFunctionAttributes(OMD, Fn, FI); 769 } 770 771 args.push_back(OMD->getSelfDecl()); 772 if (!OMD->isDirectMethod()) 773 args.push_back(OMD->getCmdDecl()); 774 775 args.append(OMD->param_begin(), OMD->param_end()); 776 777 CurGD = OMD; 778 CurEHLocation = OMD->getEndLoc(); 779 780 StartFunction(OMD, OMD->getReturnType(), Fn, FI, args, 781 OMD->getLocation(), StartLoc); 782 783 if (OMD->isDirectMethod()) { 784 // This function is a direct call, it has to implement a nil check 785 // on entry. 786 // 787 // TODO: possibly have several entry points to elide the check 788 CGM.getObjCRuntime().GenerateDirectMethodPrologue(*this, Fn, OMD, CD); 789 } 790 791 // In ARC, certain methods get an extra cleanup. 792 if (CGM.getLangOpts().ObjCAutoRefCount && 793 OMD->isInstanceMethod() && 794 OMD->getSelector().isUnarySelector()) { 795 const IdentifierInfo *ident = 796 OMD->getSelector().getIdentifierInfoForSlot(0); 797 if (ident->isStr("dealloc")) 798 EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind()); 799 } 800 } 801 802 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 803 LValue lvalue, QualType type); 804 805 /// Generate an Objective-C method. An Objective-C method is a C function with 806 /// its pointer, name, and types registered in the class structure. 807 void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) { 808 StartObjCMethod(OMD, OMD->getClassInterface()); 809 PGO.assignRegionCounters(GlobalDecl(OMD), CurFn); 810 assert(isa<CompoundStmt>(OMD->getBody())); 811 incrementProfileCounter(OMD->getBody()); 812 EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody())); 813 FinishFunction(OMD->getBodyRBrace()); 814 } 815 816 /// emitStructGetterCall - Call the runtime function to load a property 817 /// into the return value slot. 818 static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, 819 bool isAtomic, bool hasStrong) { 820 ASTContext &Context = CGF.getContext(); 821 822 llvm::Value *src = 823 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 824 .getPointer(CGF); 825 826 // objc_copyStruct (ReturnValue, &structIvar, 827 // sizeof (Type of Ivar), isAtomic, false); 828 CallArgList args; 829 830 llvm::Value *dest = 831 CGF.Builder.CreateBitCast(CGF.ReturnValue.getPointer(), CGF.VoidPtrTy); 832 args.add(RValue::get(dest), Context.VoidPtrTy); 833 834 src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy); 835 args.add(RValue::get(src), Context.VoidPtrTy); 836 837 CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType()); 838 args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType()); 839 args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy); 840 args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy); 841 842 llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetGetStructFunction(); 843 CGCallee callee = CGCallee::forDirect(fn); 844 CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args), 845 callee, ReturnValueSlot(), args); 846 } 847 848 /// Determine whether the given architecture supports unaligned atomic 849 /// accesses. They don't have to be fast, just faster than a function 850 /// call and a mutex. 851 static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) { 852 // FIXME: Allow unaligned atomic load/store on x86. (It is not 853 // currently supported by the backend.) 854 return false; 855 } 856 857 /// Return the maximum size that permits atomic accesses for the given 858 /// architecture. 859 static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM, 860 llvm::Triple::ArchType arch) { 861 // ARM has 8-byte atomic accesses, but it's not clear whether we 862 // want to rely on them here. 863 864 // In the default case, just assume that any size up to a pointer is 865 // fine given adequate alignment. 866 return CharUnits::fromQuantity(CGM.PointerSizeInBytes); 867 } 868 869 namespace { 870 class PropertyImplStrategy { 871 public: 872 enum StrategyKind { 873 /// The 'native' strategy is to use the architecture's provided 874 /// reads and writes. 875 Native, 876 877 /// Use objc_setProperty and objc_getProperty. 878 GetSetProperty, 879 880 /// Use objc_setProperty for the setter, but use expression 881 /// evaluation for the getter. 882 SetPropertyAndExpressionGet, 883 884 /// Use objc_copyStruct. 885 CopyStruct, 886 887 /// The 'expression' strategy is to emit normal assignment or 888 /// lvalue-to-rvalue expressions. 889 Expression 890 }; 891 892 StrategyKind getKind() const { return StrategyKind(Kind); } 893 894 bool hasStrongMember() const { return HasStrong; } 895 bool isAtomic() const { return IsAtomic; } 896 bool isCopy() const { return IsCopy; } 897 898 CharUnits getIvarSize() const { return IvarSize; } 899 CharUnits getIvarAlignment() const { return IvarAlignment; } 900 901 PropertyImplStrategy(CodeGenModule &CGM, 902 const ObjCPropertyImplDecl *propImpl); 903 904 private: 905 unsigned Kind : 8; 906 unsigned IsAtomic : 1; 907 unsigned IsCopy : 1; 908 unsigned HasStrong : 1; 909 910 CharUnits IvarSize; 911 CharUnits IvarAlignment; 912 }; 913 } 914 915 /// Pick an implementation strategy for the given property synthesis. 916 PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM, 917 const ObjCPropertyImplDecl *propImpl) { 918 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 919 ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind(); 920 921 IsCopy = (setterKind == ObjCPropertyDecl::Copy); 922 IsAtomic = prop->isAtomic(); 923 HasStrong = false; // doesn't matter here. 924 925 // Evaluate the ivar's size and alignment. 926 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 927 QualType ivarType = ivar->getType(); 928 auto TInfo = CGM.getContext().getTypeInfoInChars(ivarType); 929 IvarSize = TInfo.Width; 930 IvarAlignment = TInfo.Align; 931 932 // If we have a copy property, we always have to use setProperty. 933 // If the property is atomic we need to use getProperty, but in 934 // the nonatomic case we can just use expression. 935 if (IsCopy) { 936 Kind = IsAtomic ? GetSetProperty : SetPropertyAndExpressionGet; 937 return; 938 } 939 940 // Handle retain. 941 if (setterKind == ObjCPropertyDecl::Retain) { 942 // In GC-only, there's nothing special that needs to be done. 943 if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { 944 // fallthrough 945 946 // In ARC, if the property is non-atomic, use expression emission, 947 // which translates to objc_storeStrong. This isn't required, but 948 // it's slightly nicer. 949 } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) { 950 // Using standard expression emission for the setter is only 951 // acceptable if the ivar is __strong, which won't be true if 952 // the property is annotated with __attribute__((NSObject)). 953 // TODO: falling all the way back to objc_setProperty here is 954 // just laziness, though; we could still use objc_storeStrong 955 // if we hacked it right. 956 if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong) 957 Kind = Expression; 958 else 959 Kind = SetPropertyAndExpressionGet; 960 return; 961 962 // Otherwise, we need to at least use setProperty. However, if 963 // the property isn't atomic, we can use normal expression 964 // emission for the getter. 965 } else if (!IsAtomic) { 966 Kind = SetPropertyAndExpressionGet; 967 return; 968 969 // Otherwise, we have to use both setProperty and getProperty. 970 } else { 971 Kind = GetSetProperty; 972 return; 973 } 974 } 975 976 // If we're not atomic, just use expression accesses. 977 if (!IsAtomic) { 978 Kind = Expression; 979 return; 980 } 981 982 // Properties on bitfield ivars need to be emitted using expression 983 // accesses even if they're nominally atomic. 984 if (ivar->isBitField()) { 985 Kind = Expression; 986 return; 987 } 988 989 // GC-qualified or ARC-qualified ivars need to be emitted as 990 // expressions. This actually works out to being atomic anyway, 991 // except for ARC __strong, but that should trigger the above code. 992 if (ivarType.hasNonTrivialObjCLifetime() || 993 (CGM.getLangOpts().getGC() && 994 CGM.getContext().getObjCGCAttrKind(ivarType))) { 995 Kind = Expression; 996 return; 997 } 998 999 // Compute whether the ivar has strong members. 1000 if (CGM.getLangOpts().getGC()) 1001 if (const RecordType *recordType = ivarType->getAs<RecordType>()) 1002 HasStrong = recordType->getDecl()->hasObjectMember(); 1003 1004 // We can never access structs with object members with a native 1005 // access, because we need to use write barriers. This is what 1006 // objc_copyStruct is for. 1007 if (HasStrong) { 1008 Kind = CopyStruct; 1009 return; 1010 } 1011 1012 // Otherwise, this is target-dependent and based on the size and 1013 // alignment of the ivar. 1014 1015 // If the size of the ivar is not a power of two, give up. We don't 1016 // want to get into the business of doing compare-and-swaps. 1017 if (!IvarSize.isPowerOfTwo()) { 1018 Kind = CopyStruct; 1019 return; 1020 } 1021 1022 llvm::Triple::ArchType arch = 1023 CGM.getTarget().getTriple().getArch(); 1024 1025 // Most architectures require memory to fit within a single cache 1026 // line, so the alignment has to be at least the size of the access. 1027 // Otherwise we have to grab a lock. 1028 if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) { 1029 Kind = CopyStruct; 1030 return; 1031 } 1032 1033 // If the ivar's size exceeds the architecture's maximum atomic 1034 // access size, we have to use CopyStruct. 1035 if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) { 1036 Kind = CopyStruct; 1037 return; 1038 } 1039 1040 // Otherwise, we can use native loads and stores. 1041 Kind = Native; 1042 } 1043 1044 /// Generate an Objective-C property getter function. 1045 /// 1046 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 1047 /// is illegal within a category. 1048 void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, 1049 const ObjCPropertyImplDecl *PID) { 1050 llvm::Constant *AtomicHelperFn = 1051 CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID); 1052 ObjCMethodDecl *OMD = PID->getGetterMethodDecl(); 1053 assert(OMD && "Invalid call to generate getter (empty method)"); 1054 StartObjCMethod(OMD, IMP->getClassInterface()); 1055 1056 generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn); 1057 1058 FinishFunction(OMD->getEndLoc()); 1059 } 1060 1061 static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) { 1062 const Expr *getter = propImpl->getGetterCXXConstructor(); 1063 if (!getter) return true; 1064 1065 // Sema only makes only of these when the ivar has a C++ class type, 1066 // so the form is pretty constrained. 1067 1068 // If the property has a reference type, we might just be binding a 1069 // reference, in which case the result will be a gl-value. We should 1070 // treat this as a non-trivial operation. 1071 if (getter->isGLValue()) 1072 return false; 1073 1074 // If we selected a trivial copy-constructor, we're okay. 1075 if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter)) 1076 return (construct->getConstructor()->isTrivial()); 1077 1078 // The constructor might require cleanups (in which case it's never 1079 // trivial). 1080 assert(isa<ExprWithCleanups>(getter)); 1081 return false; 1082 } 1083 1084 /// emitCPPObjectAtomicGetterCall - Call the runtime function to 1085 /// copy the ivar into the resturn slot. 1086 static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF, 1087 llvm::Value *returnAddr, 1088 ObjCIvarDecl *ivar, 1089 llvm::Constant *AtomicHelperFn) { 1090 // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar, 1091 // AtomicHelperFn); 1092 CallArgList args; 1093 1094 // The 1st argument is the return Slot. 1095 args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy); 1096 1097 // The 2nd argument is the address of the ivar. 1098 llvm::Value *ivarAddr = 1099 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 1100 .getPointer(CGF); 1101 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1102 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1103 1104 // Third argument is the helper function. 1105 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 1106 1107 llvm::FunctionCallee copyCppAtomicObjectFn = 1108 CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction(); 1109 CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn); 1110 CGF.EmitCall( 1111 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1112 callee, ReturnValueSlot(), args); 1113 } 1114 1115 // emitCmdValueForGetterSetterBody - Handle emitting the load necessary for 1116 // the `_cmd` selector argument for getter/setter bodies. For direct methods, 1117 // this returns an undefined/poison value; this matches behavior prior to `_cmd` 1118 // being removed from the direct method ABI as the getter/setter caller would 1119 // never load one. For non-direct methods, this emits a load of the implicit 1120 // `_cmd` storage. 1121 static llvm::Value *emitCmdValueForGetterSetterBody(CodeGenFunction &CGF, 1122 ObjCMethodDecl *MD) { 1123 if (MD->isDirectMethod()) { 1124 // Direct methods do not have a `_cmd` argument. Emit an undefined/poison 1125 // value. This will be passed to objc_getProperty/objc_setProperty, which 1126 // has not appeared bothered by the `_cmd` argument being undefined before. 1127 llvm::Type *selType = CGF.ConvertType(CGF.getContext().getObjCSelType()); 1128 return llvm::PoisonValue::get(selType); 1129 } 1130 1131 return CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(MD->getCmdDecl()), "cmd"); 1132 } 1133 1134 void 1135 CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, 1136 const ObjCPropertyImplDecl *propImpl, 1137 const ObjCMethodDecl *GetterMethodDecl, 1138 llvm::Constant *AtomicHelperFn) { 1139 1140 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1141 1142 if (ivar->getType().isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { 1143 if (!AtomicHelperFn) { 1144 LValue Src = 1145 EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1146 LValue Dst = MakeAddrLValue(ReturnValue, ivar->getType()); 1147 callCStructCopyConstructor(Dst, Src); 1148 } else { 1149 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1150 emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), ivar, 1151 AtomicHelperFn); 1152 } 1153 return; 1154 } 1155 1156 // If there's a non-trivial 'get' expression, we just have to emit that. 1157 if (!hasTrivialGetExpr(propImpl)) { 1158 if (!AtomicHelperFn) { 1159 auto *ret = ReturnStmt::Create(getContext(), SourceLocation(), 1160 propImpl->getGetterCXXConstructor(), 1161 /* NRVOCandidate=*/nullptr); 1162 EmitReturnStmt(*ret); 1163 } 1164 else { 1165 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1166 emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), 1167 ivar, AtomicHelperFn); 1168 } 1169 return; 1170 } 1171 1172 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 1173 QualType propType = prop->getType(); 1174 ObjCMethodDecl *getterMethod = propImpl->getGetterMethodDecl(); 1175 1176 // Pick an implementation strategy. 1177 PropertyImplStrategy strategy(CGM, propImpl); 1178 switch (strategy.getKind()) { 1179 case PropertyImplStrategy::Native: { 1180 // We don't need to do anything for a zero-size struct. 1181 if (strategy.getIvarSize().isZero()) 1182 return; 1183 1184 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1185 1186 // Currently, all atomic accesses have to be through integer 1187 // types, so there's no point in trying to pick a prettier type. 1188 uint64_t ivarSize = getContext().toBits(strategy.getIvarSize()); 1189 llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize); 1190 1191 // Perform an atomic load. This does not impose ordering constraints. 1192 Address ivarAddr = LV.getAddress(*this); 1193 ivarAddr = ivarAddr.withElementType(bitcastType); 1194 llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load"); 1195 load->setAtomic(llvm::AtomicOrdering::Unordered); 1196 1197 // Store that value into the return address. Doing this with a 1198 // bitcast is likely to produce some pretty ugly IR, but it's not 1199 // the *most* terrible thing in the world. 1200 llvm::Type *retTy = ConvertType(getterMethod->getReturnType()); 1201 uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy); 1202 llvm::Value *ivarVal = load; 1203 if (ivarSize > retTySize) { 1204 bitcastType = llvm::Type::getIntNTy(getLLVMContext(), retTySize); 1205 ivarVal = Builder.CreateTrunc(load, bitcastType); 1206 } 1207 Builder.CreateStore(ivarVal, ReturnValue.withElementType(bitcastType)); 1208 1209 // Make sure we don't do an autorelease. 1210 AutoreleaseResult = false; 1211 return; 1212 } 1213 1214 case PropertyImplStrategy::GetSetProperty: { 1215 llvm::FunctionCallee getPropertyFn = 1216 CGM.getObjCRuntime().GetPropertyGetFunction(); 1217 if (!getPropertyFn) { 1218 CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy"); 1219 return; 1220 } 1221 CGCallee callee = CGCallee::forDirect(getPropertyFn); 1222 1223 // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true). 1224 // FIXME: Can't this be simpler? This might even be worse than the 1225 // corresponding gcc code. 1226 llvm::Value *cmd = emitCmdValueForGetterSetterBody(*this, getterMethod); 1227 llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 1228 llvm::Value *ivarOffset = 1229 EmitIvarOffsetAsPointerDiff(classImpl->getClassInterface(), ivar); 1230 1231 CallArgList args; 1232 args.add(RValue::get(self), getContext().getObjCIdType()); 1233 args.add(RValue::get(cmd), getContext().getObjCSelType()); 1234 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1235 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 1236 getContext().BoolTy); 1237 1238 // FIXME: We shouldn't need to get the function info here, the 1239 // runtime already should have computed it to build the function. 1240 llvm::CallBase *CallInstruction; 1241 RValue RV = EmitCall(getTypes().arrangeBuiltinFunctionCall( 1242 getContext().getObjCIdType(), args), 1243 callee, ReturnValueSlot(), args, &CallInstruction); 1244 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction)) 1245 call->setTailCall(); 1246 1247 // We need to fix the type here. Ivars with copy & retain are 1248 // always objects so we don't need to worry about complex or 1249 // aggregates. 1250 RV = RValue::get(Builder.CreateBitCast( 1251 RV.getScalarVal(), 1252 getTypes().ConvertType(getterMethod->getReturnType()))); 1253 1254 EmitReturnOfRValue(RV, propType); 1255 1256 // objc_getProperty does an autorelease, so we should suppress ours. 1257 AutoreleaseResult = false; 1258 1259 return; 1260 } 1261 1262 case PropertyImplStrategy::CopyStruct: 1263 emitStructGetterCall(*this, ivar, strategy.isAtomic(), 1264 strategy.hasStrongMember()); 1265 return; 1266 1267 case PropertyImplStrategy::Expression: 1268 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1269 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1270 1271 QualType ivarType = ivar->getType(); 1272 switch (getEvaluationKind(ivarType)) { 1273 case TEK_Complex: { 1274 ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation()); 1275 EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType), 1276 /*init*/ true); 1277 return; 1278 } 1279 case TEK_Aggregate: { 1280 // The return value slot is guaranteed to not be aliased, but 1281 // that's not necessarily the same as "on the stack", so 1282 // we still potentially need objc_memmove_collectable. 1283 EmitAggregateCopy(/* Dest= */ MakeAddrLValue(ReturnValue, ivarType), 1284 /* Src= */ LV, ivarType, getOverlapForReturnValue()); 1285 return; 1286 } 1287 case TEK_Scalar: { 1288 llvm::Value *value; 1289 if (propType->isReferenceType()) { 1290 value = LV.getAddress(*this).getPointer(); 1291 } else { 1292 // We want to load and autoreleaseReturnValue ARC __weak ivars. 1293 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1294 if (getLangOpts().ObjCAutoRefCount) { 1295 value = emitARCRetainLoadOfScalar(*this, LV, ivarType); 1296 } else { 1297 value = EmitARCLoadWeak(LV.getAddress(*this)); 1298 } 1299 1300 // Otherwise we want to do a simple load, suppressing the 1301 // final autorelease. 1302 } else { 1303 value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal(); 1304 AutoreleaseResult = false; 1305 } 1306 1307 value = Builder.CreateBitCast( 1308 value, ConvertType(GetterMethodDecl->getReturnType())); 1309 } 1310 1311 EmitReturnOfRValue(RValue::get(value), propType); 1312 return; 1313 } 1314 } 1315 llvm_unreachable("bad evaluation kind"); 1316 } 1317 1318 } 1319 llvm_unreachable("bad @property implementation strategy!"); 1320 } 1321 1322 /// emitStructSetterCall - Call the runtime function to store the value 1323 /// from the first formal parameter into the given ivar. 1324 static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD, 1325 ObjCIvarDecl *ivar) { 1326 // objc_copyStruct (&structIvar, &Arg, 1327 // sizeof (struct something), true, false); 1328 CallArgList args; 1329 1330 // The first argument is the address of the ivar. 1331 llvm::Value *ivarAddr = 1332 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 1333 .getPointer(CGF); 1334 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1335 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1336 1337 // The second argument is the address of the parameter variable. 1338 ParmVarDecl *argVar = *OMD->param_begin(); 1339 DeclRefExpr argRef(CGF.getContext(), argVar, false, 1340 argVar->getType().getNonReferenceType(), VK_LValue, 1341 SourceLocation()); 1342 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); 1343 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); 1344 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1345 1346 // The third argument is the sizeof the type. 1347 llvm::Value *size = 1348 CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType())); 1349 args.add(RValue::get(size), CGF.getContext().getSizeType()); 1350 1351 // The fourth argument is the 'isAtomic' flag. 1352 args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy); 1353 1354 // The fifth argument is the 'hasStrong' flag. 1355 // FIXME: should this really always be false? 1356 args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy); 1357 1358 llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetSetStructFunction(); 1359 CGCallee callee = CGCallee::forDirect(fn); 1360 CGF.EmitCall( 1361 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1362 callee, ReturnValueSlot(), args); 1363 } 1364 1365 /// emitCPPObjectAtomicSetterCall - Call the runtime function to store 1366 /// the value from the first formal parameter into the given ivar, using 1367 /// the Cpp API for atomic Cpp objects with non-trivial copy assignment. 1368 static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF, 1369 ObjCMethodDecl *OMD, 1370 ObjCIvarDecl *ivar, 1371 llvm::Constant *AtomicHelperFn) { 1372 // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg, 1373 // AtomicHelperFn); 1374 CallArgList args; 1375 1376 // The first argument is the address of the ivar. 1377 llvm::Value *ivarAddr = 1378 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 1379 .getPointer(CGF); 1380 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1381 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1382 1383 // The second argument is the address of the parameter variable. 1384 ParmVarDecl *argVar = *OMD->param_begin(); 1385 DeclRefExpr argRef(CGF.getContext(), argVar, false, 1386 argVar->getType().getNonReferenceType(), VK_LValue, 1387 SourceLocation()); 1388 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); 1389 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); 1390 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1391 1392 // Third argument is the helper function. 1393 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 1394 1395 llvm::FunctionCallee fn = 1396 CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction(); 1397 CGCallee callee = CGCallee::forDirect(fn); 1398 CGF.EmitCall( 1399 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1400 callee, ReturnValueSlot(), args); 1401 } 1402 1403 1404 static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) { 1405 Expr *setter = PID->getSetterCXXAssignment(); 1406 if (!setter) return true; 1407 1408 // Sema only makes only of these when the ivar has a C++ class type, 1409 // so the form is pretty constrained. 1410 1411 // An operator call is trivial if the function it calls is trivial. 1412 // This also implies that there's nothing non-trivial going on with 1413 // the arguments, because operator= can only be trivial if it's a 1414 // synthesized assignment operator and therefore both parameters are 1415 // references. 1416 if (CallExpr *call = dyn_cast<CallExpr>(setter)) { 1417 if (const FunctionDecl *callee 1418 = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl())) 1419 if (callee->isTrivial()) 1420 return true; 1421 return false; 1422 } 1423 1424 assert(isa<ExprWithCleanups>(setter)); 1425 return false; 1426 } 1427 1428 static bool UseOptimizedSetter(CodeGenModule &CGM) { 1429 if (CGM.getLangOpts().getGC() != LangOptions::NonGC) 1430 return false; 1431 return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter(); 1432 } 1433 1434 void 1435 CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl, 1436 const ObjCPropertyImplDecl *propImpl, 1437 llvm::Constant *AtomicHelperFn) { 1438 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1439 ObjCMethodDecl *setterMethod = propImpl->getSetterMethodDecl(); 1440 1441 if (ivar->getType().isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { 1442 ParmVarDecl *PVD = *setterMethod->param_begin(); 1443 if (!AtomicHelperFn) { 1444 // Call the move assignment operator instead of calling the copy 1445 // assignment operator and destructor. 1446 LValue Dst = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 1447 /*quals*/ 0); 1448 LValue Src = MakeAddrLValue(GetAddrOfLocalVar(PVD), ivar->getType()); 1449 callCStructMoveAssignmentOperator(Dst, Src); 1450 } else { 1451 // If atomic, assignment is called via a locking api. 1452 emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, AtomicHelperFn); 1453 } 1454 // Decativate the destructor for the setter parameter. 1455 DeactivateCleanupBlock(CalleeDestructedParamCleanups[PVD], AllocaInsertPt); 1456 return; 1457 } 1458 1459 // Just use the setter expression if Sema gave us one and it's 1460 // non-trivial. 1461 if (!hasTrivialSetExpr(propImpl)) { 1462 if (!AtomicHelperFn) 1463 // If non-atomic, assignment is called directly. 1464 EmitStmt(propImpl->getSetterCXXAssignment()); 1465 else 1466 // If atomic, assignment is called via a locking api. 1467 emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, 1468 AtomicHelperFn); 1469 return; 1470 } 1471 1472 PropertyImplStrategy strategy(CGM, propImpl); 1473 switch (strategy.getKind()) { 1474 case PropertyImplStrategy::Native: { 1475 // We don't need to do anything for a zero-size struct. 1476 if (strategy.getIvarSize().isZero()) 1477 return; 1478 1479 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1480 1481 LValue ivarLValue = 1482 EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0); 1483 Address ivarAddr = ivarLValue.getAddress(*this); 1484 1485 // Currently, all atomic accesses have to be through integer 1486 // types, so there's no point in trying to pick a prettier type. 1487 llvm::Type *castType = llvm::Type::getIntNTy( 1488 getLLVMContext(), getContext().toBits(strategy.getIvarSize())); 1489 1490 // Cast both arguments to the chosen operation type. 1491 argAddr = argAddr.withElementType(castType); 1492 ivarAddr = ivarAddr.withElementType(castType); 1493 1494 llvm::Value *load = Builder.CreateLoad(argAddr); 1495 1496 // Perform an atomic store. There are no memory ordering requirements. 1497 llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr); 1498 store->setAtomic(llvm::AtomicOrdering::Unordered); 1499 return; 1500 } 1501 1502 case PropertyImplStrategy::GetSetProperty: 1503 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1504 1505 llvm::FunctionCallee setOptimizedPropertyFn = nullptr; 1506 llvm::FunctionCallee setPropertyFn = nullptr; 1507 if (UseOptimizedSetter(CGM)) { 1508 // 10.8 and iOS 6.0 code and GC is off 1509 setOptimizedPropertyFn = 1510 CGM.getObjCRuntime().GetOptimizedPropertySetFunction( 1511 strategy.isAtomic(), strategy.isCopy()); 1512 if (!setOptimizedPropertyFn) { 1513 CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI"); 1514 return; 1515 } 1516 } 1517 else { 1518 setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction(); 1519 if (!setPropertyFn) { 1520 CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy"); 1521 return; 1522 } 1523 } 1524 1525 // Emit objc_setProperty((id) self, _cmd, offset, arg, 1526 // <is-atomic>, <is-copy>). 1527 llvm::Value *cmd = emitCmdValueForGetterSetterBody(*this, setterMethod); 1528 llvm::Value *self = 1529 Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 1530 llvm::Value *ivarOffset = 1531 EmitIvarOffsetAsPointerDiff(classImpl->getClassInterface(), ivar); 1532 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1533 llvm::Value *arg = Builder.CreateLoad(argAddr, "arg"); 1534 arg = Builder.CreateBitCast(arg, VoidPtrTy); 1535 1536 CallArgList args; 1537 args.add(RValue::get(self), getContext().getObjCIdType()); 1538 args.add(RValue::get(cmd), getContext().getObjCSelType()); 1539 if (setOptimizedPropertyFn) { 1540 args.add(RValue::get(arg), getContext().getObjCIdType()); 1541 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1542 CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn); 1543 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1544 callee, ReturnValueSlot(), args); 1545 } else { 1546 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1547 args.add(RValue::get(arg), getContext().getObjCIdType()); 1548 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 1549 getContext().BoolTy); 1550 args.add(RValue::get(Builder.getInt1(strategy.isCopy())), 1551 getContext().BoolTy); 1552 // FIXME: We shouldn't need to get the function info here, the runtime 1553 // already should have computed it to build the function. 1554 CGCallee callee = CGCallee::forDirect(setPropertyFn); 1555 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1556 callee, ReturnValueSlot(), args); 1557 } 1558 1559 return; 1560 } 1561 1562 case PropertyImplStrategy::CopyStruct: 1563 emitStructSetterCall(*this, setterMethod, ivar); 1564 return; 1565 1566 case PropertyImplStrategy::Expression: 1567 break; 1568 } 1569 1570 // Otherwise, fake up some ASTs and emit a normal assignment. 1571 ValueDecl *selfDecl = setterMethod->getSelfDecl(); 1572 DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(), 1573 VK_LValue, SourceLocation()); 1574 ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, selfDecl->getType(), 1575 CK_LValueToRValue, &self, VK_PRValue, 1576 FPOptionsOverride()); 1577 ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(), 1578 SourceLocation(), SourceLocation(), 1579 &selfLoad, true, true); 1580 1581 ParmVarDecl *argDecl = *setterMethod->param_begin(); 1582 QualType argType = argDecl->getType().getNonReferenceType(); 1583 DeclRefExpr arg(getContext(), argDecl, false, argType, VK_LValue, 1584 SourceLocation()); 1585 ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack, 1586 argType.getUnqualifiedType(), CK_LValueToRValue, 1587 &arg, VK_PRValue, FPOptionsOverride()); 1588 1589 // The property type can differ from the ivar type in some situations with 1590 // Objective-C pointer types, we can always bit cast the RHS in these cases. 1591 // The following absurdity is just to ensure well-formed IR. 1592 CastKind argCK = CK_NoOp; 1593 if (ivarRef.getType()->isObjCObjectPointerType()) { 1594 if (argLoad.getType()->isObjCObjectPointerType()) 1595 argCK = CK_BitCast; 1596 else if (argLoad.getType()->isBlockPointerType()) 1597 argCK = CK_BlockPointerToObjCPointerCast; 1598 else 1599 argCK = CK_CPointerToObjCPointerCast; 1600 } else if (ivarRef.getType()->isBlockPointerType()) { 1601 if (argLoad.getType()->isBlockPointerType()) 1602 argCK = CK_BitCast; 1603 else 1604 argCK = CK_AnyPointerToBlockPointerCast; 1605 } else if (ivarRef.getType()->isPointerType()) { 1606 argCK = CK_BitCast; 1607 } else if (argLoad.getType()->isAtomicType() && 1608 !ivarRef.getType()->isAtomicType()) { 1609 argCK = CK_AtomicToNonAtomic; 1610 } else if (!argLoad.getType()->isAtomicType() && 1611 ivarRef.getType()->isAtomicType()) { 1612 argCK = CK_NonAtomicToAtomic; 1613 } 1614 ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, ivarRef.getType(), argCK, 1615 &argLoad, VK_PRValue, FPOptionsOverride()); 1616 Expr *finalArg = &argLoad; 1617 if (!getContext().hasSameUnqualifiedType(ivarRef.getType(), 1618 argLoad.getType())) 1619 finalArg = &argCast; 1620 1621 BinaryOperator *assign = BinaryOperator::Create( 1622 getContext(), &ivarRef, finalArg, BO_Assign, ivarRef.getType(), 1623 VK_PRValue, OK_Ordinary, SourceLocation(), FPOptionsOverride()); 1624 EmitStmt(assign); 1625 } 1626 1627 /// Generate an Objective-C property setter function. 1628 /// 1629 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 1630 /// is illegal within a category. 1631 void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP, 1632 const ObjCPropertyImplDecl *PID) { 1633 llvm::Constant *AtomicHelperFn = 1634 CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID); 1635 ObjCMethodDecl *OMD = PID->getSetterMethodDecl(); 1636 assert(OMD && "Invalid call to generate setter (empty method)"); 1637 StartObjCMethod(OMD, IMP->getClassInterface()); 1638 1639 generateObjCSetterBody(IMP, PID, AtomicHelperFn); 1640 1641 FinishFunction(OMD->getEndLoc()); 1642 } 1643 1644 namespace { 1645 struct DestroyIvar final : EHScopeStack::Cleanup { 1646 private: 1647 llvm::Value *addr; 1648 const ObjCIvarDecl *ivar; 1649 CodeGenFunction::Destroyer *destroyer; 1650 bool useEHCleanupForArray; 1651 public: 1652 DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar, 1653 CodeGenFunction::Destroyer *destroyer, 1654 bool useEHCleanupForArray) 1655 : addr(addr), ivar(ivar), destroyer(destroyer), 1656 useEHCleanupForArray(useEHCleanupForArray) {} 1657 1658 void Emit(CodeGenFunction &CGF, Flags flags) override { 1659 LValue lvalue 1660 = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0); 1661 CGF.emitDestroy(lvalue.getAddress(CGF), ivar->getType(), destroyer, 1662 flags.isForNormalCleanup() && useEHCleanupForArray); 1663 } 1664 }; 1665 } 1666 1667 /// Like CodeGenFunction::destroyARCStrong, but do it with a call. 1668 static void destroyARCStrongWithStore(CodeGenFunction &CGF, 1669 Address addr, 1670 QualType type) { 1671 llvm::Value *null = getNullForVariable(addr); 1672 CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 1673 } 1674 1675 static void emitCXXDestructMethod(CodeGenFunction &CGF, 1676 ObjCImplementationDecl *impl) { 1677 CodeGenFunction::RunCleanupsScope scope(CGF); 1678 1679 llvm::Value *self = CGF.LoadObjCSelf(); 1680 1681 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 1682 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); 1683 ivar; ivar = ivar->getNextIvar()) { 1684 QualType type = ivar->getType(); 1685 1686 // Check whether the ivar is a destructible type. 1687 QualType::DestructionKind dtorKind = type.isDestructedType(); 1688 if (!dtorKind) continue; 1689 1690 CodeGenFunction::Destroyer *destroyer = nullptr; 1691 1692 // Use a call to objc_storeStrong to destroy strong ivars, for the 1693 // general benefit of the tools. 1694 if (dtorKind == QualType::DK_objc_strong_lifetime) { 1695 destroyer = destroyARCStrongWithStore; 1696 1697 // Otherwise use the default for the destruction kind. 1698 } else { 1699 destroyer = CGF.getDestroyer(dtorKind); 1700 } 1701 1702 CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind); 1703 1704 CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer, 1705 cleanupKind & EHCleanup); 1706 } 1707 1708 assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?"); 1709 } 1710 1711 void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, 1712 ObjCMethodDecl *MD, 1713 bool ctor) { 1714 MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface()); 1715 StartObjCMethod(MD, IMP->getClassInterface()); 1716 1717 // Emit .cxx_construct. 1718 if (ctor) { 1719 // Suppress the final autorelease in ARC. 1720 AutoreleaseResult = false; 1721 1722 for (const auto *IvarInit : IMP->inits()) { 1723 FieldDecl *Field = IvarInit->getAnyMember(); 1724 ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field); 1725 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), 1726 LoadObjCSelf(), Ivar, 0); 1727 EmitAggExpr(IvarInit->getInit(), 1728 AggValueSlot::forLValue(LV, *this, AggValueSlot::IsDestructed, 1729 AggValueSlot::DoesNotNeedGCBarriers, 1730 AggValueSlot::IsNotAliased, 1731 AggValueSlot::DoesNotOverlap)); 1732 } 1733 // constructor returns 'self'. 1734 CodeGenTypes &Types = CGM.getTypes(); 1735 QualType IdTy(CGM.getContext().getObjCIdType()); 1736 llvm::Value *SelfAsId = 1737 Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy)); 1738 EmitReturnOfRValue(RValue::get(SelfAsId), IdTy); 1739 1740 // Emit .cxx_destruct. 1741 } else { 1742 emitCXXDestructMethod(*this, IMP); 1743 } 1744 FinishFunction(); 1745 } 1746 1747 llvm::Value *CodeGenFunction::LoadObjCSelf() { 1748 VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl(); 1749 DeclRefExpr DRE(getContext(), Self, 1750 /*is enclosing local*/ (CurFuncDecl != CurCodeDecl), 1751 Self->getType(), VK_LValue, SourceLocation()); 1752 return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation()); 1753 } 1754 1755 QualType CodeGenFunction::TypeOfSelfObject() { 1756 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 1757 ImplicitParamDecl *selfDecl = OMD->getSelfDecl(); 1758 const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>( 1759 getContext().getCanonicalType(selfDecl->getType())); 1760 return PTy->getPointeeType(); 1761 } 1762 1763 void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ 1764 llvm::FunctionCallee EnumerationMutationFnPtr = 1765 CGM.getObjCRuntime().EnumerationMutationFunction(); 1766 if (!EnumerationMutationFnPtr) { 1767 CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime"); 1768 return; 1769 } 1770 CGCallee EnumerationMutationFn = 1771 CGCallee::forDirect(EnumerationMutationFnPtr); 1772 1773 CGDebugInfo *DI = getDebugInfo(); 1774 if (DI) 1775 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); 1776 1777 RunCleanupsScope ForScope(*this); 1778 1779 // The local variable comes into scope immediately. 1780 AutoVarEmission variable = AutoVarEmission::invalid(); 1781 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) 1782 variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl())); 1783 1784 JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end"); 1785 1786 // Fast enumeration state. 1787 QualType StateTy = CGM.getObjCFastEnumerationStateType(); 1788 Address StatePtr = CreateMemTemp(StateTy, "state.ptr"); 1789 EmitNullInitialization(StatePtr, StateTy); 1790 1791 // Number of elements in the items array. 1792 static const unsigned NumItems = 16; 1793 1794 // Fetch the countByEnumeratingWithState:objects:count: selector. 1795 IdentifierInfo *II[] = { 1796 &CGM.getContext().Idents.get("countByEnumeratingWithState"), 1797 &CGM.getContext().Idents.get("objects"), 1798 &CGM.getContext().Idents.get("count") 1799 }; 1800 Selector FastEnumSel = 1801 CGM.getContext().Selectors.getSelector(std::size(II), &II[0]); 1802 1803 QualType ItemsTy = 1804 getContext().getConstantArrayType(getContext().getObjCIdType(), 1805 llvm::APInt(32, NumItems), nullptr, 1806 ArrayType::Normal, 0); 1807 Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr"); 1808 1809 // Emit the collection pointer. In ARC, we do a retain. 1810 llvm::Value *Collection; 1811 if (getLangOpts().ObjCAutoRefCount) { 1812 Collection = EmitARCRetainScalarExpr(S.getCollection()); 1813 1814 // Enter a cleanup to do the release. 1815 EmitObjCConsumeObject(S.getCollection()->getType(), Collection); 1816 } else { 1817 Collection = EmitScalarExpr(S.getCollection()); 1818 } 1819 1820 // The 'continue' label needs to appear within the cleanup for the 1821 // collection object. 1822 JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next"); 1823 1824 // Send it our message: 1825 CallArgList Args; 1826 1827 // The first argument is a temporary of the enumeration-state type. 1828 Args.add(RValue::get(StatePtr.getPointer()), 1829 getContext().getPointerType(StateTy)); 1830 1831 // The second argument is a temporary array with space for NumItems 1832 // pointers. We'll actually be loading elements from the array 1833 // pointer written into the control state; this buffer is so that 1834 // collections that *aren't* backed by arrays can still queue up 1835 // batches of elements. 1836 Args.add(RValue::get(ItemsPtr.getPointer()), 1837 getContext().getPointerType(ItemsTy)); 1838 1839 // The third argument is the capacity of that temporary array. 1840 llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType()); 1841 llvm::Constant *Count = llvm::ConstantInt::get(NSUIntegerTy, NumItems); 1842 Args.add(RValue::get(Count), getContext().getNSUIntegerType()); 1843 1844 // Start the enumeration. 1845 RValue CountRV = 1846 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 1847 getContext().getNSUIntegerType(), 1848 FastEnumSel, Collection, Args); 1849 1850 // The initial number of objects that were returned in the buffer. 1851 llvm::Value *initialBufferLimit = CountRV.getScalarVal(); 1852 1853 llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty"); 1854 llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit"); 1855 1856 llvm::Value *zero = llvm::Constant::getNullValue(NSUIntegerTy); 1857 1858 // If the limit pointer was zero to begin with, the collection is 1859 // empty; skip all this. Set the branch weight assuming this has the same 1860 // probability of exiting the loop as any other loop exit. 1861 uint64_t EntryCount = getCurrentProfileCount(); 1862 Builder.CreateCondBr( 1863 Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB, 1864 LoopInitBB, 1865 createProfileWeights(EntryCount, getProfileCount(S.getBody()))); 1866 1867 // Otherwise, initialize the loop. 1868 EmitBlock(LoopInitBB); 1869 1870 // Save the initial mutations value. This is the value at an 1871 // address that was written into the state object by 1872 // countByEnumeratingWithState:objects:count:. 1873 Address StateMutationsPtrPtr = 1874 Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr"); 1875 llvm::Value *StateMutationsPtr 1876 = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1877 1878 llvm::Type *UnsignedLongTy = ConvertType(getContext().UnsignedLongTy); 1879 llvm::Value *initialMutations = 1880 Builder.CreateAlignedLoad(UnsignedLongTy, StateMutationsPtr, 1881 getPointerAlign(), "forcoll.initial-mutations"); 1882 1883 // Start looping. This is the point we return to whenever we have a 1884 // fresh, non-empty batch of objects. 1885 llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody"); 1886 EmitBlock(LoopBodyBB); 1887 1888 // The current index into the buffer. 1889 llvm::PHINode *index = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.index"); 1890 index->addIncoming(zero, LoopInitBB); 1891 1892 // The current buffer size. 1893 llvm::PHINode *count = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.count"); 1894 count->addIncoming(initialBufferLimit, LoopInitBB); 1895 1896 incrementProfileCounter(&S); 1897 1898 // Check whether the mutations value has changed from where it was 1899 // at start. StateMutationsPtr should actually be invariant between 1900 // refreshes. 1901 StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1902 llvm::Value *currentMutations 1903 = Builder.CreateAlignedLoad(UnsignedLongTy, StateMutationsPtr, 1904 getPointerAlign(), "statemutations"); 1905 1906 llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated"); 1907 llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated"); 1908 1909 Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations), 1910 WasNotMutatedBB, WasMutatedBB); 1911 1912 // If so, call the enumeration-mutation function. 1913 EmitBlock(WasMutatedBB); 1914 llvm::Type *ObjCIdType = ConvertType(getContext().getObjCIdType()); 1915 llvm::Value *V = 1916 Builder.CreateBitCast(Collection, ObjCIdType); 1917 CallArgList Args2; 1918 Args2.add(RValue::get(V), getContext().getObjCIdType()); 1919 // FIXME: We shouldn't need to get the function info here, the runtime already 1920 // should have computed it to build the function. 1921 EmitCall( 1922 CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2), 1923 EnumerationMutationFn, ReturnValueSlot(), Args2); 1924 1925 // Otherwise, or if the mutation function returns, just continue. 1926 EmitBlock(WasNotMutatedBB); 1927 1928 // Initialize the element variable. 1929 RunCleanupsScope elementVariableScope(*this); 1930 bool elementIsVariable; 1931 LValue elementLValue; 1932 QualType elementType; 1933 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) { 1934 // Initialize the variable, in case it's a __block variable or something. 1935 EmitAutoVarInit(variable); 1936 1937 const VarDecl *D = cast<VarDecl>(SD->getSingleDecl()); 1938 DeclRefExpr tempDRE(getContext(), const_cast<VarDecl *>(D), false, 1939 D->getType(), VK_LValue, SourceLocation()); 1940 elementLValue = EmitLValue(&tempDRE); 1941 elementType = D->getType(); 1942 elementIsVariable = true; 1943 1944 if (D->isARCPseudoStrong()) 1945 elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone); 1946 } else { 1947 elementLValue = LValue(); // suppress warning 1948 elementType = cast<Expr>(S.getElement())->getType(); 1949 elementIsVariable = false; 1950 } 1951 llvm::Type *convertedElementType = ConvertType(elementType); 1952 1953 // Fetch the buffer out of the enumeration state. 1954 // TODO: this pointer should actually be invariant between 1955 // refreshes, which would help us do certain loop optimizations. 1956 Address StateItemsPtr = 1957 Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr"); 1958 llvm::Value *EnumStateItems = 1959 Builder.CreateLoad(StateItemsPtr, "stateitems"); 1960 1961 // Fetch the value at the current index from the buffer. 1962 llvm::Value *CurrentItemPtr = Builder.CreateGEP( 1963 ObjCIdType, EnumStateItems, index, "currentitem.ptr"); 1964 llvm::Value *CurrentItem = 1965 Builder.CreateAlignedLoad(ObjCIdType, CurrentItemPtr, getPointerAlign()); 1966 1967 if (SanOpts.has(SanitizerKind::ObjCCast)) { 1968 // Before using an item from the collection, check that the implicit cast 1969 // from id to the element type is valid. This is done with instrumentation 1970 // roughly corresponding to: 1971 // 1972 // if (![item isKindOfClass:expectedCls]) { /* emit diagnostic */ } 1973 const ObjCObjectPointerType *ObjPtrTy = 1974 elementType->getAsObjCInterfacePointerType(); 1975 const ObjCInterfaceType *InterfaceTy = 1976 ObjPtrTy ? ObjPtrTy->getInterfaceType() : nullptr; 1977 if (InterfaceTy) { 1978 SanitizerScope SanScope(this); 1979 auto &C = CGM.getContext(); 1980 assert(InterfaceTy->getDecl() && "No decl for ObjC interface type"); 1981 Selector IsKindOfClassSel = GetUnarySelector("isKindOfClass", C); 1982 CallArgList IsKindOfClassArgs; 1983 llvm::Value *Cls = 1984 CGM.getObjCRuntime().GetClass(*this, InterfaceTy->getDecl()); 1985 IsKindOfClassArgs.add(RValue::get(Cls), C.getObjCClassType()); 1986 llvm::Value *IsClass = 1987 CGM.getObjCRuntime() 1988 .GenerateMessageSend(*this, ReturnValueSlot(), C.BoolTy, 1989 IsKindOfClassSel, CurrentItem, 1990 IsKindOfClassArgs) 1991 .getScalarVal(); 1992 llvm::Constant *StaticData[] = { 1993 EmitCheckSourceLocation(S.getBeginLoc()), 1994 EmitCheckTypeDescriptor(QualType(InterfaceTy, 0))}; 1995 EmitCheck({{IsClass, SanitizerKind::ObjCCast}}, 1996 SanitizerHandler::InvalidObjCCast, 1997 ArrayRef<llvm::Constant *>(StaticData), CurrentItem); 1998 } 1999 } 2000 2001 // Cast that value to the right type. 2002 CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType, 2003 "currentitem"); 2004 2005 // Make sure we have an l-value. Yes, this gets evaluated every 2006 // time through the loop. 2007 if (!elementIsVariable) { 2008 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 2009 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue); 2010 } else { 2011 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, 2012 /*isInit*/ true); 2013 } 2014 2015 // If we do have an element variable, this assignment is the end of 2016 // its initialization. 2017 if (elementIsVariable) 2018 EmitAutoVarCleanups(variable); 2019 2020 // Perform the loop body, setting up break and continue labels. 2021 BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody)); 2022 { 2023 RunCleanupsScope Scope(*this); 2024 EmitStmt(S.getBody()); 2025 } 2026 BreakContinueStack.pop_back(); 2027 2028 // Destroy the element variable now. 2029 elementVariableScope.ForceCleanup(); 2030 2031 // Check whether there are more elements. 2032 EmitBlock(AfterBody.getBlock()); 2033 2034 llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch"); 2035 2036 // First we check in the local buffer. 2037 llvm::Value *indexPlusOne = 2038 Builder.CreateAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1)); 2039 2040 // If we haven't overrun the buffer yet, we can continue. 2041 // Set the branch weights based on the simplifying assumption that this is 2042 // like a while-loop, i.e., ignoring that the false branch fetches more 2043 // elements and then returns to the loop. 2044 Builder.CreateCondBr( 2045 Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB, 2046 createProfileWeights(getProfileCount(S.getBody()), EntryCount)); 2047 2048 index->addIncoming(indexPlusOne, AfterBody.getBlock()); 2049 count->addIncoming(count, AfterBody.getBlock()); 2050 2051 // Otherwise, we have to fetch more elements. 2052 EmitBlock(FetchMoreBB); 2053 2054 CountRV = 2055 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 2056 getContext().getNSUIntegerType(), 2057 FastEnumSel, Collection, Args); 2058 2059 // If we got a zero count, we're done. 2060 llvm::Value *refetchCount = CountRV.getScalarVal(); 2061 2062 // (note that the message send might split FetchMoreBB) 2063 index->addIncoming(zero, Builder.GetInsertBlock()); 2064 count->addIncoming(refetchCount, Builder.GetInsertBlock()); 2065 2066 Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero), 2067 EmptyBB, LoopBodyBB); 2068 2069 // No more elements. 2070 EmitBlock(EmptyBB); 2071 2072 if (!elementIsVariable) { 2073 // If the element was not a declaration, set it to be null. 2074 2075 llvm::Value *null = llvm::Constant::getNullValue(convertedElementType); 2076 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 2077 EmitStoreThroughLValue(RValue::get(null), elementLValue); 2078 } 2079 2080 if (DI) 2081 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); 2082 2083 ForScope.ForceCleanup(); 2084 EmitBlock(LoopEnd.getBlock()); 2085 } 2086 2087 void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) { 2088 CGM.getObjCRuntime().EmitTryStmt(*this, S); 2089 } 2090 2091 void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) { 2092 CGM.getObjCRuntime().EmitThrowStmt(*this, S); 2093 } 2094 2095 void CodeGenFunction::EmitObjCAtSynchronizedStmt( 2096 const ObjCAtSynchronizedStmt &S) { 2097 CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S); 2098 } 2099 2100 namespace { 2101 struct CallObjCRelease final : EHScopeStack::Cleanup { 2102 CallObjCRelease(llvm::Value *object) : object(object) {} 2103 llvm::Value *object; 2104 2105 void Emit(CodeGenFunction &CGF, Flags flags) override { 2106 // Releases at the end of the full-expression are imprecise. 2107 CGF.EmitARCRelease(object, ARCImpreciseLifetime); 2108 } 2109 }; 2110 } 2111 2112 /// Produce the code for a CK_ARCConsumeObject. Does a primitive 2113 /// release at the end of the full-expression. 2114 llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type, 2115 llvm::Value *object) { 2116 // If we're in a conditional branch, we need to make the cleanup 2117 // conditional. 2118 pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object); 2119 return object; 2120 } 2121 2122 llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type, 2123 llvm::Value *value) { 2124 return EmitARCRetainAutorelease(type, value); 2125 } 2126 2127 /// Given a number of pointers, inform the optimizer that they're 2128 /// being intrinsically used up until this point in the program. 2129 void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) { 2130 llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_use; 2131 if (!fn) 2132 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_use); 2133 2134 // This isn't really a "runtime" function, but as an intrinsic it 2135 // doesn't really matter as long as we align things up. 2136 EmitNounwindRuntimeCall(fn, values); 2137 } 2138 2139 /// Emit a call to "clang.arc.noop.use", which consumes the result of a call 2140 /// that has operand bundle "clang.arc.attachedcall". 2141 void CodeGenFunction::EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values) { 2142 llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_noop_use; 2143 if (!fn) 2144 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_noop_use); 2145 EmitNounwindRuntimeCall(fn, values); 2146 } 2147 2148 static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) { 2149 if (auto *F = dyn_cast<llvm::Function>(RTF)) { 2150 // If the target runtime doesn't naturally support ARC, emit weak 2151 // references to the runtime support library. We don't really 2152 // permit this to fail, but we need a particular relocation style. 2153 if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() && 2154 !CGM.getTriple().isOSBinFormatCOFF()) { 2155 F->setLinkage(llvm::Function::ExternalWeakLinkage); 2156 } 2157 } 2158 } 2159 2160 static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, 2161 llvm::FunctionCallee RTF) { 2162 setARCRuntimeFunctionLinkage(CGM, RTF.getCallee()); 2163 } 2164 2165 static llvm::Function *getARCIntrinsic(llvm::Intrinsic::ID IntID, 2166 CodeGenModule &CGM) { 2167 llvm::Function *fn = CGM.getIntrinsic(IntID); 2168 setARCRuntimeFunctionLinkage(CGM, fn); 2169 return fn; 2170 } 2171 2172 /// Perform an operation having the signature 2173 /// i8* (i8*) 2174 /// where a null input causes a no-op and returns null. 2175 static llvm::Value *emitARCValueOperation( 2176 CodeGenFunction &CGF, llvm::Value *value, llvm::Type *returnType, 2177 llvm::Function *&fn, llvm::Intrinsic::ID IntID, 2178 llvm::CallInst::TailCallKind tailKind = llvm::CallInst::TCK_None) { 2179 if (isa<llvm::ConstantPointerNull>(value)) 2180 return value; 2181 2182 if (!fn) 2183 fn = getARCIntrinsic(IntID, CGF.CGM); 2184 2185 // Cast the argument to 'id'. 2186 llvm::Type *origType = returnType ? returnType : value->getType(); 2187 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); 2188 2189 // Call the function. 2190 llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value); 2191 call->setTailCallKind(tailKind); 2192 2193 // Cast the result back to the original type. 2194 return CGF.Builder.CreateBitCast(call, origType); 2195 } 2196 2197 /// Perform an operation having the following signature: 2198 /// i8* (i8**) 2199 static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr, 2200 llvm::Function *&fn, 2201 llvm::Intrinsic::ID IntID) { 2202 if (!fn) 2203 fn = getARCIntrinsic(IntID, CGF.CGM); 2204 2205 return CGF.EmitNounwindRuntimeCall(fn, addr.getPointer()); 2206 } 2207 2208 /// Perform an operation having the following signature: 2209 /// i8* (i8**, i8*) 2210 static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr, 2211 llvm::Value *value, 2212 llvm::Function *&fn, 2213 llvm::Intrinsic::ID IntID, 2214 bool ignored) { 2215 assert(addr.getElementType() == value->getType()); 2216 2217 if (!fn) 2218 fn = getARCIntrinsic(IntID, CGF.CGM); 2219 2220 llvm::Type *origType = value->getType(); 2221 2222 llvm::Value *args[] = { 2223 CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy), 2224 CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy) 2225 }; 2226 llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args); 2227 2228 if (ignored) return nullptr; 2229 2230 return CGF.Builder.CreateBitCast(result, origType); 2231 } 2232 2233 /// Perform an operation having the following signature: 2234 /// void (i8**, i8**) 2235 static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src, 2236 llvm::Function *&fn, 2237 llvm::Intrinsic::ID IntID) { 2238 assert(dst.getType() == src.getType()); 2239 2240 if (!fn) 2241 fn = getARCIntrinsic(IntID, CGF.CGM); 2242 2243 llvm::Value *args[] = { 2244 CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy), 2245 CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy) 2246 }; 2247 CGF.EmitNounwindRuntimeCall(fn, args); 2248 } 2249 2250 /// Perform an operation having the signature 2251 /// i8* (i8*) 2252 /// where a null input causes a no-op and returns null. 2253 static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF, 2254 llvm::Value *value, 2255 llvm::Type *returnType, 2256 llvm::FunctionCallee &fn, 2257 StringRef fnName) { 2258 if (isa<llvm::ConstantPointerNull>(value)) 2259 return value; 2260 2261 if (!fn) { 2262 llvm::FunctionType *fnType = 2263 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false); 2264 fn = CGF.CGM.CreateRuntimeFunction(fnType, fnName); 2265 2266 // We have Native ARC, so set nonlazybind attribute for performance 2267 if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) 2268 if (fnName == "objc_retain") 2269 f->addFnAttr(llvm::Attribute::NonLazyBind); 2270 } 2271 2272 // Cast the argument to 'id'. 2273 llvm::Type *origType = returnType ? returnType : value->getType(); 2274 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); 2275 2276 // Call the function. 2277 llvm::CallBase *Inst = CGF.EmitCallOrInvoke(fn, value); 2278 2279 // Mark calls to objc_autorelease as tail on the assumption that methods 2280 // overriding autorelease do not touch anything on the stack. 2281 if (fnName == "objc_autorelease") 2282 if (auto *Call = dyn_cast<llvm::CallInst>(Inst)) 2283 Call->setTailCall(); 2284 2285 // Cast the result back to the original type. 2286 return CGF.Builder.CreateBitCast(Inst, origType); 2287 } 2288 2289 /// Produce the code to do a retain. Based on the type, calls one of: 2290 /// call i8* \@objc_retain(i8* %value) 2291 /// call i8* \@objc_retainBlock(i8* %value) 2292 llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) { 2293 if (type->isBlockPointerType()) 2294 return EmitARCRetainBlock(value, /*mandatory*/ false); 2295 else 2296 return EmitARCRetainNonBlock(value); 2297 } 2298 2299 /// Retain the given object, with normal retain semantics. 2300 /// call i8* \@objc_retain(i8* %value) 2301 llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) { 2302 return emitARCValueOperation(*this, value, nullptr, 2303 CGM.getObjCEntrypoints().objc_retain, 2304 llvm::Intrinsic::objc_retain); 2305 } 2306 2307 /// Retain the given block, with _Block_copy semantics. 2308 /// call i8* \@objc_retainBlock(i8* %value) 2309 /// 2310 /// \param mandatory - If false, emit the call with metadata 2311 /// indicating that it's okay for the optimizer to eliminate this call 2312 /// if it can prove that the block never escapes except down the stack. 2313 llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value, 2314 bool mandatory) { 2315 llvm::Value *result 2316 = emitARCValueOperation(*this, value, nullptr, 2317 CGM.getObjCEntrypoints().objc_retainBlock, 2318 llvm::Intrinsic::objc_retainBlock); 2319 2320 // If the copy isn't mandatory, add !clang.arc.copy_on_escape to 2321 // tell the optimizer that it doesn't need to do this copy if the 2322 // block doesn't escape, where being passed as an argument doesn't 2323 // count as escaping. 2324 if (!mandatory && isa<llvm::Instruction>(result)) { 2325 llvm::CallInst *call 2326 = cast<llvm::CallInst>(result->stripPointerCasts()); 2327 assert(call->getCalledOperand() == 2328 CGM.getObjCEntrypoints().objc_retainBlock); 2329 2330 call->setMetadata("clang.arc.copy_on_escape", 2331 llvm::MDNode::get(Builder.getContext(), std::nullopt)); 2332 } 2333 2334 return result; 2335 } 2336 2337 static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) { 2338 // Fetch the void(void) inline asm which marks that we're going to 2339 // do something with the autoreleased return value. 2340 llvm::InlineAsm *&marker 2341 = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker; 2342 if (!marker) { 2343 StringRef assembly 2344 = CGF.CGM.getTargetCodeGenInfo() 2345 .getARCRetainAutoreleasedReturnValueMarker(); 2346 2347 // If we have an empty assembly string, there's nothing to do. 2348 if (assembly.empty()) { 2349 2350 // Otherwise, at -O0, build an inline asm that we're going to call 2351 // in a moment. 2352 } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { 2353 llvm::FunctionType *type = 2354 llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false); 2355 2356 marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true); 2357 2358 // If we're at -O1 and above, we don't want to litter the code 2359 // with this marker yet, so leave a breadcrumb for the ARC 2360 // optimizer to pick up. 2361 } else { 2362 const char *retainRVMarkerKey = llvm::objcarc::getRVMarkerModuleFlagStr(); 2363 if (!CGF.CGM.getModule().getModuleFlag(retainRVMarkerKey)) { 2364 auto *str = llvm::MDString::get(CGF.getLLVMContext(), assembly); 2365 CGF.CGM.getModule().addModuleFlag(llvm::Module::Error, 2366 retainRVMarkerKey, str); 2367 } 2368 } 2369 } 2370 2371 // Call the marker asm if we made one, which we do only at -O0. 2372 if (marker) 2373 CGF.Builder.CreateCall(marker, std::nullopt, 2374 CGF.getBundlesForFunclet(marker)); 2375 } 2376 2377 static llvm::Value *emitOptimizedARCReturnCall(llvm::Value *value, 2378 bool IsRetainRV, 2379 CodeGenFunction &CGF) { 2380 emitAutoreleasedReturnValueMarker(CGF); 2381 2382 // Add operand bundle "clang.arc.attachedcall" to the call instead of emitting 2383 // retainRV or claimRV calls in the IR. We currently do this only when the 2384 // optimization level isn't -O0 since global-isel, which is currently run at 2385 // -O0, doesn't know about the operand bundle. 2386 ObjCEntrypoints &EPs = CGF.CGM.getObjCEntrypoints(); 2387 llvm::Function *&EP = IsRetainRV 2388 ? EPs.objc_retainAutoreleasedReturnValue 2389 : EPs.objc_unsafeClaimAutoreleasedReturnValue; 2390 llvm::Intrinsic::ID IID = 2391 IsRetainRV ? llvm::Intrinsic::objc_retainAutoreleasedReturnValue 2392 : llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue; 2393 EP = getARCIntrinsic(IID, CGF.CGM); 2394 2395 llvm::Triple::ArchType Arch = CGF.CGM.getTriple().getArch(); 2396 2397 // FIXME: Do this on all targets and at -O0 too. This can be enabled only if 2398 // the target backend knows how to handle the operand bundle. 2399 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0 && 2400 (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::x86_64)) { 2401 llvm::Value *bundleArgs[] = {EP}; 2402 llvm::OperandBundleDef OB("clang.arc.attachedcall", bundleArgs); 2403 auto *oldCall = cast<llvm::CallBase>(value); 2404 llvm::CallBase *newCall = llvm::CallBase::addOperandBundle( 2405 oldCall, llvm::LLVMContext::OB_clang_arc_attachedcall, OB, oldCall); 2406 newCall->copyMetadata(*oldCall); 2407 oldCall->replaceAllUsesWith(newCall); 2408 oldCall->eraseFromParent(); 2409 CGF.EmitARCNoopIntrinsicUse(newCall); 2410 return newCall; 2411 } 2412 2413 bool isNoTail = 2414 CGF.CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail(); 2415 llvm::CallInst::TailCallKind tailKind = 2416 isNoTail ? llvm::CallInst::TCK_NoTail : llvm::CallInst::TCK_None; 2417 return emitARCValueOperation(CGF, value, nullptr, EP, IID, tailKind); 2418 } 2419 2420 /// Retain the given object which is the result of a function call. 2421 /// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value) 2422 /// 2423 /// Yes, this function name is one character away from a different 2424 /// call with completely different semantics. 2425 llvm::Value * 2426 CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) { 2427 return emitOptimizedARCReturnCall(value, true, *this); 2428 } 2429 2430 /// Claim a possibly-autoreleased return value at +0. This is only 2431 /// valid to do in contexts which do not rely on the retain to keep 2432 /// the object valid for all of its uses; for example, when 2433 /// the value is ignored, or when it is being assigned to an 2434 /// __unsafe_unretained variable. 2435 /// 2436 /// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value) 2437 llvm::Value * 2438 CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) { 2439 return emitOptimizedARCReturnCall(value, false, *this); 2440 } 2441 2442 /// Release the given object. 2443 /// call void \@objc_release(i8* %value) 2444 void CodeGenFunction::EmitARCRelease(llvm::Value *value, 2445 ARCPreciseLifetime_t precise) { 2446 if (isa<llvm::ConstantPointerNull>(value)) return; 2447 2448 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_release; 2449 if (!fn) 2450 fn = getARCIntrinsic(llvm::Intrinsic::objc_release, CGM); 2451 2452 // Cast the argument to 'id'. 2453 value = Builder.CreateBitCast(value, Int8PtrTy); 2454 2455 // Call objc_release. 2456 llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value); 2457 2458 if (precise == ARCImpreciseLifetime) { 2459 call->setMetadata("clang.imprecise_release", 2460 llvm::MDNode::get(Builder.getContext(), std::nullopt)); 2461 } 2462 } 2463 2464 /// Destroy a __strong variable. 2465 /// 2466 /// At -O0, emit a call to store 'null' into the address; 2467 /// instrumenting tools prefer this because the address is exposed, 2468 /// but it's relatively cumbersome to optimize. 2469 /// 2470 /// At -O1 and above, just load and call objc_release. 2471 /// 2472 /// call void \@objc_storeStrong(i8** %addr, i8* null) 2473 void CodeGenFunction::EmitARCDestroyStrong(Address addr, 2474 ARCPreciseLifetime_t precise) { 2475 if (CGM.getCodeGenOpts().OptimizationLevel == 0) { 2476 llvm::Value *null = getNullForVariable(addr); 2477 EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 2478 return; 2479 } 2480 2481 llvm::Value *value = Builder.CreateLoad(addr); 2482 EmitARCRelease(value, precise); 2483 } 2484 2485 /// Store into a strong object. Always calls this: 2486 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2487 llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr, 2488 llvm::Value *value, 2489 bool ignored) { 2490 assert(addr.getElementType() == value->getType()); 2491 2492 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_storeStrong; 2493 if (!fn) 2494 fn = getARCIntrinsic(llvm::Intrinsic::objc_storeStrong, CGM); 2495 2496 llvm::Value *args[] = { 2497 Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy), 2498 Builder.CreateBitCast(value, Int8PtrTy) 2499 }; 2500 EmitNounwindRuntimeCall(fn, args); 2501 2502 if (ignored) return nullptr; 2503 return value; 2504 } 2505 2506 /// Store into a strong object. Sometimes calls this: 2507 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2508 /// Other times, breaks it down into components. 2509 llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, 2510 llvm::Value *newValue, 2511 bool ignored) { 2512 QualType type = dst.getType(); 2513 bool isBlock = type->isBlockPointerType(); 2514 2515 // Use a store barrier at -O0 unless this is a block type or the 2516 // lvalue is inadequately aligned. 2517 if (shouldUseFusedARCCalls() && 2518 !isBlock && 2519 (dst.getAlignment().isZero() || 2520 dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) { 2521 return EmitARCStoreStrongCall(dst.getAddress(*this), newValue, ignored); 2522 } 2523 2524 // Otherwise, split it out. 2525 2526 // Retain the new value. 2527 newValue = EmitARCRetain(type, newValue); 2528 2529 // Read the old value. 2530 llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation()); 2531 2532 // Store. We do this before the release so that any deallocs won't 2533 // see the old value. 2534 EmitStoreOfScalar(newValue, dst); 2535 2536 // Finally, release the old value. 2537 EmitARCRelease(oldValue, dst.isARCPreciseLifetime()); 2538 2539 return newValue; 2540 } 2541 2542 /// Autorelease the given object. 2543 /// call i8* \@objc_autorelease(i8* %value) 2544 llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) { 2545 return emitARCValueOperation(*this, value, nullptr, 2546 CGM.getObjCEntrypoints().objc_autorelease, 2547 llvm::Intrinsic::objc_autorelease); 2548 } 2549 2550 /// Autorelease the given object. 2551 /// call i8* \@objc_autoreleaseReturnValue(i8* %value) 2552 llvm::Value * 2553 CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) { 2554 return emitARCValueOperation(*this, value, nullptr, 2555 CGM.getObjCEntrypoints().objc_autoreleaseReturnValue, 2556 llvm::Intrinsic::objc_autoreleaseReturnValue, 2557 llvm::CallInst::TCK_Tail); 2558 } 2559 2560 /// Do a fused retain/autorelease of the given object. 2561 /// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value) 2562 llvm::Value * 2563 CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) { 2564 return emitARCValueOperation(*this, value, nullptr, 2565 CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue, 2566 llvm::Intrinsic::objc_retainAutoreleaseReturnValue, 2567 llvm::CallInst::TCK_Tail); 2568 } 2569 2570 /// Do a fused retain/autorelease of the given object. 2571 /// call i8* \@objc_retainAutorelease(i8* %value) 2572 /// or 2573 /// %retain = call i8* \@objc_retainBlock(i8* %value) 2574 /// call i8* \@objc_autorelease(i8* %retain) 2575 llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type, 2576 llvm::Value *value) { 2577 if (!type->isBlockPointerType()) 2578 return EmitARCRetainAutoreleaseNonBlock(value); 2579 2580 if (isa<llvm::ConstantPointerNull>(value)) return value; 2581 2582 llvm::Type *origType = value->getType(); 2583 value = Builder.CreateBitCast(value, Int8PtrTy); 2584 value = EmitARCRetainBlock(value, /*mandatory*/ true); 2585 value = EmitARCAutorelease(value); 2586 return Builder.CreateBitCast(value, origType); 2587 } 2588 2589 /// Do a fused retain/autorelease of the given object. 2590 /// call i8* \@objc_retainAutorelease(i8* %value) 2591 llvm::Value * 2592 CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) { 2593 return emitARCValueOperation(*this, value, nullptr, 2594 CGM.getObjCEntrypoints().objc_retainAutorelease, 2595 llvm::Intrinsic::objc_retainAutorelease); 2596 } 2597 2598 /// i8* \@objc_loadWeak(i8** %addr) 2599 /// Essentially objc_autorelease(objc_loadWeakRetained(addr)). 2600 llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) { 2601 return emitARCLoadOperation(*this, addr, 2602 CGM.getObjCEntrypoints().objc_loadWeak, 2603 llvm::Intrinsic::objc_loadWeak); 2604 } 2605 2606 /// i8* \@objc_loadWeakRetained(i8** %addr) 2607 llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) { 2608 return emitARCLoadOperation(*this, addr, 2609 CGM.getObjCEntrypoints().objc_loadWeakRetained, 2610 llvm::Intrinsic::objc_loadWeakRetained); 2611 } 2612 2613 /// i8* \@objc_storeWeak(i8** %addr, i8* %value) 2614 /// Returns %value. 2615 llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr, 2616 llvm::Value *value, 2617 bool ignored) { 2618 return emitARCStoreOperation(*this, addr, value, 2619 CGM.getObjCEntrypoints().objc_storeWeak, 2620 llvm::Intrinsic::objc_storeWeak, ignored); 2621 } 2622 2623 /// i8* \@objc_initWeak(i8** %addr, i8* %value) 2624 /// Returns %value. %addr is known to not have a current weak entry. 2625 /// Essentially equivalent to: 2626 /// *addr = nil; objc_storeWeak(addr, value); 2627 void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) { 2628 // If we're initializing to null, just write null to memory; no need 2629 // to get the runtime involved. But don't do this if optimization 2630 // is enabled, because accounting for this would make the optimizer 2631 // much more complicated. 2632 if (isa<llvm::ConstantPointerNull>(value) && 2633 CGM.getCodeGenOpts().OptimizationLevel == 0) { 2634 Builder.CreateStore(value, addr); 2635 return; 2636 } 2637 2638 emitARCStoreOperation(*this, addr, value, 2639 CGM.getObjCEntrypoints().objc_initWeak, 2640 llvm::Intrinsic::objc_initWeak, /*ignored*/ true); 2641 } 2642 2643 /// void \@objc_destroyWeak(i8** %addr) 2644 /// Essentially objc_storeWeak(addr, nil). 2645 void CodeGenFunction::EmitARCDestroyWeak(Address addr) { 2646 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_destroyWeak; 2647 if (!fn) 2648 fn = getARCIntrinsic(llvm::Intrinsic::objc_destroyWeak, CGM); 2649 2650 EmitNounwindRuntimeCall(fn, addr.getPointer()); 2651 } 2652 2653 /// void \@objc_moveWeak(i8** %dest, i8** %src) 2654 /// Disregards the current value in %dest. Leaves %src pointing to nothing. 2655 /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)). 2656 void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) { 2657 emitARCCopyOperation(*this, dst, src, 2658 CGM.getObjCEntrypoints().objc_moveWeak, 2659 llvm::Intrinsic::objc_moveWeak); 2660 } 2661 2662 /// void \@objc_copyWeak(i8** %dest, i8** %src) 2663 /// Disregards the current value in %dest. Essentially 2664 /// objc_release(objc_initWeak(dest, objc_readWeakRetained(src))) 2665 void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) { 2666 emitARCCopyOperation(*this, dst, src, 2667 CGM.getObjCEntrypoints().objc_copyWeak, 2668 llvm::Intrinsic::objc_copyWeak); 2669 } 2670 2671 void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr, 2672 Address SrcAddr) { 2673 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); 2674 Object = EmitObjCConsumeObject(Ty, Object); 2675 EmitARCStoreWeak(DstAddr, Object, false); 2676 } 2677 2678 void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr, 2679 Address SrcAddr) { 2680 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); 2681 Object = EmitObjCConsumeObject(Ty, Object); 2682 EmitARCStoreWeak(DstAddr, Object, false); 2683 EmitARCDestroyWeak(SrcAddr); 2684 } 2685 2686 /// Produce the code to do a objc_autoreleasepool_push. 2687 /// call i8* \@objc_autoreleasePoolPush(void) 2688 llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() { 2689 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush; 2690 if (!fn) 2691 fn = getARCIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush, CGM); 2692 2693 return EmitNounwindRuntimeCall(fn); 2694 } 2695 2696 /// Produce the code to do a primitive release. 2697 /// call void \@objc_autoreleasePoolPop(i8* %ptr) 2698 void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) { 2699 assert(value->getType() == Int8PtrTy); 2700 2701 if (getInvokeDest()) { 2702 // Call the runtime method not the intrinsic if we are handling exceptions 2703 llvm::FunctionCallee &fn = 2704 CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke; 2705 if (!fn) { 2706 llvm::FunctionType *fnType = 2707 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2708 fn = CGM.CreateRuntimeFunction(fnType, "objc_autoreleasePoolPop"); 2709 setARCRuntimeFunctionLinkage(CGM, fn); 2710 } 2711 2712 // objc_autoreleasePoolPop can throw. 2713 EmitRuntimeCallOrInvoke(fn, value); 2714 } else { 2715 llvm::FunctionCallee &fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop; 2716 if (!fn) 2717 fn = getARCIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop, CGM); 2718 2719 EmitRuntimeCall(fn, value); 2720 } 2721 } 2722 2723 /// Produce the code to do an MRR version objc_autoreleasepool_push. 2724 /// Which is: [[NSAutoreleasePool alloc] init]; 2725 /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class. 2726 /// init is declared as: - (id) init; in its NSObject super class. 2727 /// 2728 llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() { 2729 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 2730 llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this); 2731 // [NSAutoreleasePool alloc] 2732 IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); 2733 Selector AllocSel = getContext().Selectors.getSelector(0, &II); 2734 CallArgList Args; 2735 RValue AllocRV = 2736 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2737 getContext().getObjCIdType(), 2738 AllocSel, Receiver, Args); 2739 2740 // [Receiver init] 2741 Receiver = AllocRV.getScalarVal(); 2742 II = &CGM.getContext().Idents.get("init"); 2743 Selector InitSel = getContext().Selectors.getSelector(0, &II); 2744 RValue InitRV = 2745 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2746 getContext().getObjCIdType(), 2747 InitSel, Receiver, Args); 2748 return InitRV.getScalarVal(); 2749 } 2750 2751 /// Allocate the given objc object. 2752 /// call i8* \@objc_alloc(i8* %value) 2753 llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value, 2754 llvm::Type *resultType) { 2755 return emitObjCValueOperation(*this, value, resultType, 2756 CGM.getObjCEntrypoints().objc_alloc, 2757 "objc_alloc"); 2758 } 2759 2760 /// Allocate the given objc object. 2761 /// call i8* \@objc_allocWithZone(i8* %value) 2762 llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value, 2763 llvm::Type *resultType) { 2764 return emitObjCValueOperation(*this, value, resultType, 2765 CGM.getObjCEntrypoints().objc_allocWithZone, 2766 "objc_allocWithZone"); 2767 } 2768 2769 llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value, 2770 llvm::Type *resultType) { 2771 return emitObjCValueOperation(*this, value, resultType, 2772 CGM.getObjCEntrypoints().objc_alloc_init, 2773 "objc_alloc_init"); 2774 } 2775 2776 /// Produce the code to do a primitive release. 2777 /// [tmp drain]; 2778 void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { 2779 IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); 2780 Selector DrainSel = getContext().Selectors.getSelector(0, &II); 2781 CallArgList Args; 2782 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 2783 getContext().VoidTy, DrainSel, Arg, Args); 2784 } 2785 2786 void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF, 2787 Address addr, 2788 QualType type) { 2789 CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime); 2790 } 2791 2792 void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF, 2793 Address addr, 2794 QualType type) { 2795 CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime); 2796 } 2797 2798 void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF, 2799 Address addr, 2800 QualType type) { 2801 CGF.EmitARCDestroyWeak(addr); 2802 } 2803 2804 void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr, 2805 QualType type) { 2806 llvm::Value *value = CGF.Builder.CreateLoad(addr); 2807 CGF.EmitARCIntrinsicUse(value); 2808 } 2809 2810 /// Autorelease the given object. 2811 /// call i8* \@objc_autorelease(i8* %value) 2812 llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value, 2813 llvm::Type *returnType) { 2814 return emitObjCValueOperation( 2815 *this, value, returnType, 2816 CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction, 2817 "objc_autorelease"); 2818 } 2819 2820 /// Retain the given object, with normal retain semantics. 2821 /// call i8* \@objc_retain(i8* %value) 2822 llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value, 2823 llvm::Type *returnType) { 2824 return emitObjCValueOperation( 2825 *this, value, returnType, 2826 CGM.getObjCEntrypoints().objc_retainRuntimeFunction, "objc_retain"); 2827 } 2828 2829 /// Release the given object. 2830 /// call void \@objc_release(i8* %value) 2831 void CodeGenFunction::EmitObjCRelease(llvm::Value *value, 2832 ARCPreciseLifetime_t precise) { 2833 if (isa<llvm::ConstantPointerNull>(value)) return; 2834 2835 llvm::FunctionCallee &fn = 2836 CGM.getObjCEntrypoints().objc_releaseRuntimeFunction; 2837 if (!fn) { 2838 llvm::FunctionType *fnType = 2839 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2840 fn = CGM.CreateRuntimeFunction(fnType, "objc_release"); 2841 setARCRuntimeFunctionLinkage(CGM, fn); 2842 // We have Native ARC, so set nonlazybind attribute for performance 2843 if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) 2844 f->addFnAttr(llvm::Attribute::NonLazyBind); 2845 } 2846 2847 // Cast the argument to 'id'. 2848 value = Builder.CreateBitCast(value, Int8PtrTy); 2849 2850 // Call objc_release. 2851 llvm::CallBase *call = EmitCallOrInvoke(fn, value); 2852 2853 if (precise == ARCImpreciseLifetime) { 2854 call->setMetadata("clang.imprecise_release", 2855 llvm::MDNode::get(Builder.getContext(), std::nullopt)); 2856 } 2857 } 2858 2859 namespace { 2860 struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup { 2861 llvm::Value *Token; 2862 2863 CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2864 2865 void Emit(CodeGenFunction &CGF, Flags flags) override { 2866 CGF.EmitObjCAutoreleasePoolPop(Token); 2867 } 2868 }; 2869 struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup { 2870 llvm::Value *Token; 2871 2872 CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2873 2874 void Emit(CodeGenFunction &CGF, Flags flags) override { 2875 CGF.EmitObjCMRRAutoreleasePoolPop(Token); 2876 } 2877 }; 2878 } 2879 2880 void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) { 2881 if (CGM.getLangOpts().ObjCAutoRefCount) 2882 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr); 2883 else 2884 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr); 2885 } 2886 2887 static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) { 2888 switch (lifetime) { 2889 case Qualifiers::OCL_None: 2890 case Qualifiers::OCL_ExplicitNone: 2891 case Qualifiers::OCL_Strong: 2892 case Qualifiers::OCL_Autoreleasing: 2893 return true; 2894 2895 case Qualifiers::OCL_Weak: 2896 return false; 2897 } 2898 2899 llvm_unreachable("impossible lifetime!"); 2900 } 2901 2902 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2903 LValue lvalue, 2904 QualType type) { 2905 llvm::Value *result; 2906 bool shouldRetain = shouldRetainObjCLifetime(type.getObjCLifetime()); 2907 if (shouldRetain) { 2908 result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal(); 2909 } else { 2910 assert(type.getObjCLifetime() == Qualifiers::OCL_Weak); 2911 result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress(CGF)); 2912 } 2913 return TryEmitResult(result, !shouldRetain); 2914 } 2915 2916 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2917 const Expr *e) { 2918 e = e->IgnoreParens(); 2919 QualType type = e->getType(); 2920 2921 // If we're loading retained from a __strong xvalue, we can avoid 2922 // an extra retain/release pair by zeroing out the source of this 2923 // "move" operation. 2924 if (e->isXValue() && 2925 !type.isConstQualified() && 2926 type.getObjCLifetime() == Qualifiers::OCL_Strong) { 2927 // Emit the lvalue. 2928 LValue lv = CGF.EmitLValue(e); 2929 2930 // Load the object pointer. 2931 llvm::Value *result = CGF.EmitLoadOfLValue(lv, 2932 SourceLocation()).getScalarVal(); 2933 2934 // Set the source pointer to NULL. 2935 CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress(CGF)), lv); 2936 2937 return TryEmitResult(result, true); 2938 } 2939 2940 // As a very special optimization, in ARC++, if the l-value is the 2941 // result of a non-volatile assignment, do a simple retain of the 2942 // result of the call to objc_storeWeak instead of reloading. 2943 if (CGF.getLangOpts().CPlusPlus && 2944 !type.isVolatileQualified() && 2945 type.getObjCLifetime() == Qualifiers::OCL_Weak && 2946 isa<BinaryOperator>(e) && 2947 cast<BinaryOperator>(e)->getOpcode() == BO_Assign) 2948 return TryEmitResult(CGF.EmitScalarExpr(e), false); 2949 2950 // Try to emit code for scalar constant instead of emitting LValue and 2951 // loading it because we are not guaranteed to have an l-value. One of such 2952 // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable. 2953 if (const auto *decl_expr = dyn_cast<DeclRefExpr>(e)) { 2954 auto *DRE = const_cast<DeclRefExpr *>(decl_expr); 2955 if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(DRE)) 2956 return TryEmitResult(CGF.emitScalarConstant(constant, DRE), 2957 !shouldRetainObjCLifetime(type.getObjCLifetime())); 2958 } 2959 2960 return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type); 2961 } 2962 2963 typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, 2964 llvm::Value *value)> 2965 ValueTransform; 2966 2967 /// Insert code immediately after a call. 2968 2969 // FIXME: We should find a way to emit the runtime call immediately 2970 // after the call is emitted to eliminate the need for this function. 2971 static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF, 2972 llvm::Value *value, 2973 ValueTransform doAfterCall, 2974 ValueTransform doFallback) { 2975 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); 2976 auto *callBase = dyn_cast<llvm::CallBase>(value); 2977 2978 if (callBase && llvm::objcarc::hasAttachedCallOpBundle(callBase)) { 2979 // Fall back if the call base has operand bundle "clang.arc.attachedcall". 2980 value = doFallback(CGF, value); 2981 } else if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) { 2982 // Place the retain immediately following the call. 2983 CGF.Builder.SetInsertPoint(call->getParent(), 2984 ++llvm::BasicBlock::iterator(call)); 2985 value = doAfterCall(CGF, value); 2986 } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) { 2987 // Place the retain at the beginning of the normal destination block. 2988 llvm::BasicBlock *BB = invoke->getNormalDest(); 2989 CGF.Builder.SetInsertPoint(BB, BB->begin()); 2990 value = doAfterCall(CGF, value); 2991 2992 // Bitcasts can arise because of related-result returns. Rewrite 2993 // the operand. 2994 } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) { 2995 // Change the insert point to avoid emitting the fall-back call after the 2996 // bitcast. 2997 CGF.Builder.SetInsertPoint(bitcast->getParent(), bitcast->getIterator()); 2998 llvm::Value *operand = bitcast->getOperand(0); 2999 operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback); 3000 bitcast->setOperand(0, operand); 3001 value = bitcast; 3002 } else { 3003 auto *phi = dyn_cast<llvm::PHINode>(value); 3004 if (phi && phi->getNumIncomingValues() == 2 && 3005 isa<llvm::ConstantPointerNull>(phi->getIncomingValue(1)) && 3006 isa<llvm::CallBase>(phi->getIncomingValue(0))) { 3007 // Handle phi instructions that are generated when it's necessary to check 3008 // whether the receiver of a message is null. 3009 llvm::Value *inVal = phi->getIncomingValue(0); 3010 inVal = emitARCOperationAfterCall(CGF, inVal, doAfterCall, doFallback); 3011 phi->setIncomingValue(0, inVal); 3012 value = phi; 3013 } else { 3014 // Generic fall-back case. 3015 // Retain using the non-block variant: we never need to do a copy 3016 // of a block that's been returned to us. 3017 value = doFallback(CGF, value); 3018 } 3019 } 3020 3021 CGF.Builder.restoreIP(ip); 3022 return value; 3023 } 3024 3025 /// Given that the given expression is some sort of call (which does 3026 /// not return retained), emit a retain following it. 3027 static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF, 3028 const Expr *e) { 3029 llvm::Value *value = CGF.EmitScalarExpr(e); 3030 return emitARCOperationAfterCall(CGF, value, 3031 [](CodeGenFunction &CGF, llvm::Value *value) { 3032 return CGF.EmitARCRetainAutoreleasedReturnValue(value); 3033 }, 3034 [](CodeGenFunction &CGF, llvm::Value *value) { 3035 return CGF.EmitARCRetainNonBlock(value); 3036 }); 3037 } 3038 3039 /// Given that the given expression is some sort of call (which does 3040 /// not return retained), perform an unsafeClaim following it. 3041 static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF, 3042 const Expr *e) { 3043 llvm::Value *value = CGF.EmitScalarExpr(e); 3044 return emitARCOperationAfterCall(CGF, value, 3045 [](CodeGenFunction &CGF, llvm::Value *value) { 3046 return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value); 3047 }, 3048 [](CodeGenFunction &CGF, llvm::Value *value) { 3049 return value; 3050 }); 3051 } 3052 3053 llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E, 3054 bool allowUnsafeClaim) { 3055 if (allowUnsafeClaim && 3056 CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) { 3057 return emitARCUnsafeClaimCallResult(*this, E); 3058 } else { 3059 llvm::Value *value = emitARCRetainCallResult(*this, E); 3060 return EmitObjCConsumeObject(E->getType(), value); 3061 } 3062 } 3063 3064 /// Determine whether it might be important to emit a separate 3065 /// objc_retain_block on the result of the given expression, or 3066 /// whether it's okay to just emit it in a +1 context. 3067 static bool shouldEmitSeparateBlockRetain(const Expr *e) { 3068 assert(e->getType()->isBlockPointerType()); 3069 e = e->IgnoreParens(); 3070 3071 // For future goodness, emit block expressions directly in +1 3072 // contexts if we can. 3073 if (isa<BlockExpr>(e)) 3074 return false; 3075 3076 if (const CastExpr *cast = dyn_cast<CastExpr>(e)) { 3077 switch (cast->getCastKind()) { 3078 // Emitting these operations in +1 contexts is goodness. 3079 case CK_LValueToRValue: 3080 case CK_ARCReclaimReturnedObject: 3081 case CK_ARCConsumeObject: 3082 case CK_ARCProduceObject: 3083 return false; 3084 3085 // These operations preserve a block type. 3086 case CK_NoOp: 3087 case CK_BitCast: 3088 return shouldEmitSeparateBlockRetain(cast->getSubExpr()); 3089 3090 // These operations are known to be bad (or haven't been considered). 3091 case CK_AnyPointerToBlockPointerCast: 3092 default: 3093 return true; 3094 } 3095 } 3096 3097 return true; 3098 } 3099 3100 namespace { 3101 /// A CRTP base class for emitting expressions of retainable object 3102 /// pointer type in ARC. 3103 template <typename Impl, typename Result> class ARCExprEmitter { 3104 protected: 3105 CodeGenFunction &CGF; 3106 Impl &asImpl() { return *static_cast<Impl*>(this); } 3107 3108 ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {} 3109 3110 public: 3111 Result visit(const Expr *e); 3112 Result visitCastExpr(const CastExpr *e); 3113 Result visitPseudoObjectExpr(const PseudoObjectExpr *e); 3114 Result visitBlockExpr(const BlockExpr *e); 3115 Result visitBinaryOperator(const BinaryOperator *e); 3116 Result visitBinAssign(const BinaryOperator *e); 3117 Result visitBinAssignUnsafeUnretained(const BinaryOperator *e); 3118 Result visitBinAssignAutoreleasing(const BinaryOperator *e); 3119 Result visitBinAssignWeak(const BinaryOperator *e); 3120 Result visitBinAssignStrong(const BinaryOperator *e); 3121 3122 // Minimal implementation: 3123 // Result visitLValueToRValue(const Expr *e) 3124 // Result visitConsumeObject(const Expr *e) 3125 // Result visitExtendBlockObject(const Expr *e) 3126 // Result visitReclaimReturnedObject(const Expr *e) 3127 // Result visitCall(const Expr *e) 3128 // Result visitExpr(const Expr *e) 3129 // 3130 // Result emitBitCast(Result result, llvm::Type *resultType) 3131 // llvm::Value *getValueOfResult(Result result) 3132 }; 3133 } 3134 3135 /// Try to emit a PseudoObjectExpr under special ARC rules. 3136 /// 3137 /// This massively duplicates emitPseudoObjectRValue. 3138 template <typename Impl, typename Result> 3139 Result 3140 ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) { 3141 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 3142 3143 // Find the result expression. 3144 const Expr *resultExpr = E->getResultExpr(); 3145 assert(resultExpr); 3146 Result result; 3147 3148 for (PseudoObjectExpr::const_semantics_iterator 3149 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 3150 const Expr *semantic = *i; 3151 3152 // If this semantic expression is an opaque value, bind it 3153 // to the result of its source expression. 3154 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 3155 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 3156 OVMA opaqueData; 3157 3158 // If this semantic is the result of the pseudo-object 3159 // expression, try to evaluate the source as +1. 3160 if (ov == resultExpr) { 3161 assert(!OVMA::shouldBindAsLValue(ov)); 3162 result = asImpl().visit(ov->getSourceExpr()); 3163 opaqueData = OVMA::bind(CGF, ov, 3164 RValue::get(asImpl().getValueOfResult(result))); 3165 3166 // Otherwise, just bind it. 3167 } else { 3168 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 3169 } 3170 opaques.push_back(opaqueData); 3171 3172 // Otherwise, if the expression is the result, evaluate it 3173 // and remember the result. 3174 } else if (semantic == resultExpr) { 3175 result = asImpl().visit(semantic); 3176 3177 // Otherwise, evaluate the expression in an ignored context. 3178 } else { 3179 CGF.EmitIgnoredExpr(semantic); 3180 } 3181 } 3182 3183 // Unbind all the opaques now. 3184 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 3185 opaques[i].unbind(CGF); 3186 3187 return result; 3188 } 3189 3190 template <typename Impl, typename Result> 3191 Result ARCExprEmitter<Impl, Result>::visitBlockExpr(const BlockExpr *e) { 3192 // The default implementation just forwards the expression to visitExpr. 3193 return asImpl().visitExpr(e); 3194 } 3195 3196 template <typename Impl, typename Result> 3197 Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) { 3198 switch (e->getCastKind()) { 3199 3200 // No-op casts don't change the type, so we just ignore them. 3201 case CK_NoOp: 3202 return asImpl().visit(e->getSubExpr()); 3203 3204 // These casts can change the type. 3205 case CK_CPointerToObjCPointerCast: 3206 case CK_BlockPointerToObjCPointerCast: 3207 case CK_AnyPointerToBlockPointerCast: 3208 case CK_BitCast: { 3209 llvm::Type *resultType = CGF.ConvertType(e->getType()); 3210 assert(e->getSubExpr()->getType()->hasPointerRepresentation()); 3211 Result result = asImpl().visit(e->getSubExpr()); 3212 return asImpl().emitBitCast(result, resultType); 3213 } 3214 3215 // Handle some casts specially. 3216 case CK_LValueToRValue: 3217 return asImpl().visitLValueToRValue(e->getSubExpr()); 3218 case CK_ARCConsumeObject: 3219 return asImpl().visitConsumeObject(e->getSubExpr()); 3220 case CK_ARCExtendBlockObject: 3221 return asImpl().visitExtendBlockObject(e->getSubExpr()); 3222 case CK_ARCReclaimReturnedObject: 3223 return asImpl().visitReclaimReturnedObject(e->getSubExpr()); 3224 3225 // Otherwise, use the default logic. 3226 default: 3227 return asImpl().visitExpr(e); 3228 } 3229 } 3230 3231 template <typename Impl, typename Result> 3232 Result 3233 ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) { 3234 switch (e->getOpcode()) { 3235 case BO_Comma: 3236 CGF.EmitIgnoredExpr(e->getLHS()); 3237 CGF.EnsureInsertPoint(); 3238 return asImpl().visit(e->getRHS()); 3239 3240 case BO_Assign: 3241 return asImpl().visitBinAssign(e); 3242 3243 default: 3244 return asImpl().visitExpr(e); 3245 } 3246 } 3247 3248 template <typename Impl, typename Result> 3249 Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) { 3250 switch (e->getLHS()->getType().getObjCLifetime()) { 3251 case Qualifiers::OCL_ExplicitNone: 3252 return asImpl().visitBinAssignUnsafeUnretained(e); 3253 3254 case Qualifiers::OCL_Weak: 3255 return asImpl().visitBinAssignWeak(e); 3256 3257 case Qualifiers::OCL_Autoreleasing: 3258 return asImpl().visitBinAssignAutoreleasing(e); 3259 3260 case Qualifiers::OCL_Strong: 3261 return asImpl().visitBinAssignStrong(e); 3262 3263 case Qualifiers::OCL_None: 3264 return asImpl().visitExpr(e); 3265 } 3266 llvm_unreachable("bad ObjC ownership qualifier"); 3267 } 3268 3269 /// The default rule for __unsafe_unretained emits the RHS recursively, 3270 /// stores into the unsafe variable, and propagates the result outward. 3271 template <typename Impl, typename Result> 3272 Result ARCExprEmitter<Impl,Result>:: 3273 visitBinAssignUnsafeUnretained(const BinaryOperator *e) { 3274 // Recursively emit the RHS. 3275 // For __block safety, do this before emitting the LHS. 3276 Result result = asImpl().visit(e->getRHS()); 3277 3278 // Perform the store. 3279 LValue lvalue = 3280 CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store); 3281 CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)), 3282 lvalue); 3283 3284 return result; 3285 } 3286 3287 template <typename Impl, typename Result> 3288 Result 3289 ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) { 3290 return asImpl().visitExpr(e); 3291 } 3292 3293 template <typename Impl, typename Result> 3294 Result 3295 ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) { 3296 return asImpl().visitExpr(e); 3297 } 3298 3299 template <typename Impl, typename Result> 3300 Result 3301 ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) { 3302 return asImpl().visitExpr(e); 3303 } 3304 3305 /// The general expression-emission logic. 3306 template <typename Impl, typename Result> 3307 Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) { 3308 // We should *never* see a nested full-expression here, because if 3309 // we fail to emit at +1, our caller must not retain after we close 3310 // out the full-expression. This isn't as important in the unsafe 3311 // emitter. 3312 assert(!isa<ExprWithCleanups>(e)); 3313 3314 // Look through parens, __extension__, generic selection, etc. 3315 e = e->IgnoreParens(); 3316 3317 // Handle certain kinds of casts. 3318 if (const CastExpr *ce = dyn_cast<CastExpr>(e)) { 3319 return asImpl().visitCastExpr(ce); 3320 3321 // Handle the comma operator. 3322 } else if (auto op = dyn_cast<BinaryOperator>(e)) { 3323 return asImpl().visitBinaryOperator(op); 3324 3325 // TODO: handle conditional operators here 3326 3327 // For calls and message sends, use the retained-call logic. 3328 // Delegate inits are a special case in that they're the only 3329 // returns-retained expression that *isn't* surrounded by 3330 // a consume. 3331 } else if (isa<CallExpr>(e) || 3332 (isa<ObjCMessageExpr>(e) && 3333 !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) { 3334 return asImpl().visitCall(e); 3335 3336 // Look through pseudo-object expressions. 3337 } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 3338 return asImpl().visitPseudoObjectExpr(pseudo); 3339 } else if (auto *be = dyn_cast<BlockExpr>(e)) 3340 return asImpl().visitBlockExpr(be); 3341 3342 return asImpl().visitExpr(e); 3343 } 3344 3345 namespace { 3346 3347 /// An emitter for +1 results. 3348 struct ARCRetainExprEmitter : 3349 public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> { 3350 3351 ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 3352 3353 llvm::Value *getValueOfResult(TryEmitResult result) { 3354 return result.getPointer(); 3355 } 3356 3357 TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) { 3358 llvm::Value *value = result.getPointer(); 3359 value = CGF.Builder.CreateBitCast(value, resultType); 3360 result.setPointer(value); 3361 return result; 3362 } 3363 3364 TryEmitResult visitLValueToRValue(const Expr *e) { 3365 return tryEmitARCRetainLoadOfScalar(CGF, e); 3366 } 3367 3368 /// For consumptions, just emit the subexpression and thus elide 3369 /// the retain/release pair. 3370 TryEmitResult visitConsumeObject(const Expr *e) { 3371 llvm::Value *result = CGF.EmitScalarExpr(e); 3372 return TryEmitResult(result, true); 3373 } 3374 3375 TryEmitResult visitBlockExpr(const BlockExpr *e) { 3376 TryEmitResult result = visitExpr(e); 3377 // Avoid the block-retain if this is a block literal that doesn't need to be 3378 // copied to the heap. 3379 if (CGF.CGM.getCodeGenOpts().ObjCAvoidHeapifyLocalBlocks && 3380 e->getBlockDecl()->canAvoidCopyToHeap()) 3381 result.setInt(true); 3382 return result; 3383 } 3384 3385 /// Block extends are net +0. Naively, we could just recurse on 3386 /// the subexpression, but actually we need to ensure that the 3387 /// value is copied as a block, so there's a little filter here. 3388 TryEmitResult visitExtendBlockObject(const Expr *e) { 3389 llvm::Value *result; // will be a +0 value 3390 3391 // If we can't safely assume the sub-expression will produce a 3392 // block-copied value, emit the sub-expression at +0. 3393 if (shouldEmitSeparateBlockRetain(e)) { 3394 result = CGF.EmitScalarExpr(e); 3395 3396 // Otherwise, try to emit the sub-expression at +1 recursively. 3397 } else { 3398 TryEmitResult subresult = asImpl().visit(e); 3399 3400 // If that produced a retained value, just use that. 3401 if (subresult.getInt()) { 3402 return subresult; 3403 } 3404 3405 // Otherwise it's +0. 3406 result = subresult.getPointer(); 3407 } 3408 3409 // Retain the object as a block. 3410 result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true); 3411 return TryEmitResult(result, true); 3412 } 3413 3414 /// For reclaims, emit the subexpression as a retained call and 3415 /// skip the consumption. 3416 TryEmitResult visitReclaimReturnedObject(const Expr *e) { 3417 llvm::Value *result = emitARCRetainCallResult(CGF, e); 3418 return TryEmitResult(result, true); 3419 } 3420 3421 /// When we have an undecorated call, retroactively do a claim. 3422 TryEmitResult visitCall(const Expr *e) { 3423 llvm::Value *result = emitARCRetainCallResult(CGF, e); 3424 return TryEmitResult(result, true); 3425 } 3426 3427 // TODO: maybe special-case visitBinAssignWeak? 3428 3429 TryEmitResult visitExpr(const Expr *e) { 3430 // We didn't find an obvious production, so emit what we've got and 3431 // tell the caller that we didn't manage to retain. 3432 llvm::Value *result = CGF.EmitScalarExpr(e); 3433 return TryEmitResult(result, false); 3434 } 3435 }; 3436 } 3437 3438 static TryEmitResult 3439 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) { 3440 return ARCRetainExprEmitter(CGF).visit(e); 3441 } 3442 3443 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 3444 LValue lvalue, 3445 QualType type) { 3446 TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type); 3447 llvm::Value *value = result.getPointer(); 3448 if (!result.getInt()) 3449 value = CGF.EmitARCRetain(type, value); 3450 return value; 3451 } 3452 3453 /// EmitARCRetainScalarExpr - Semantically equivalent to 3454 /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a 3455 /// best-effort attempt to peephole expressions that naturally produce 3456 /// retained objects. 3457 llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) { 3458 // The retain needs to happen within the full-expression. 3459 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3460 RunCleanupsScope scope(*this); 3461 return EmitARCRetainScalarExpr(cleanups->getSubExpr()); 3462 } 3463 3464 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 3465 llvm::Value *value = result.getPointer(); 3466 if (!result.getInt()) 3467 value = EmitARCRetain(e->getType(), value); 3468 return value; 3469 } 3470 3471 llvm::Value * 3472 CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) { 3473 // The retain needs to happen within the full-expression. 3474 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3475 RunCleanupsScope scope(*this); 3476 return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr()); 3477 } 3478 3479 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 3480 llvm::Value *value = result.getPointer(); 3481 if (result.getInt()) 3482 value = EmitARCAutorelease(value); 3483 else 3484 value = EmitARCRetainAutorelease(e->getType(), value); 3485 return value; 3486 } 3487 3488 llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) { 3489 llvm::Value *result; 3490 bool doRetain; 3491 3492 if (shouldEmitSeparateBlockRetain(e)) { 3493 result = EmitScalarExpr(e); 3494 doRetain = true; 3495 } else { 3496 TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e); 3497 result = subresult.getPointer(); 3498 doRetain = !subresult.getInt(); 3499 } 3500 3501 if (doRetain) 3502 result = EmitARCRetainBlock(result, /*mandatory*/ true); 3503 return EmitObjCConsumeObject(e->getType(), result); 3504 } 3505 3506 llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) { 3507 // In ARC, retain and autorelease the expression. 3508 if (getLangOpts().ObjCAutoRefCount) { 3509 // Do so before running any cleanups for the full-expression. 3510 // EmitARCRetainAutoreleaseScalarExpr does this for us. 3511 return EmitARCRetainAutoreleaseScalarExpr(expr); 3512 } 3513 3514 // Otherwise, use the normal scalar-expression emission. The 3515 // exception machinery doesn't do anything special with the 3516 // exception like retaining it, so there's no safety associated with 3517 // only running cleanups after the throw has started, and when it 3518 // matters it tends to be substantially inferior code. 3519 return EmitScalarExpr(expr); 3520 } 3521 3522 namespace { 3523 3524 /// An emitter for assigning into an __unsafe_unretained context. 3525 struct ARCUnsafeUnretainedExprEmitter : 3526 public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> { 3527 3528 ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 3529 3530 llvm::Value *getValueOfResult(llvm::Value *value) { 3531 return value; 3532 } 3533 3534 llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) { 3535 return CGF.Builder.CreateBitCast(value, resultType); 3536 } 3537 3538 llvm::Value *visitLValueToRValue(const Expr *e) { 3539 return CGF.EmitScalarExpr(e); 3540 } 3541 3542 /// For consumptions, just emit the subexpression and perform the 3543 /// consumption like normal. 3544 llvm::Value *visitConsumeObject(const Expr *e) { 3545 llvm::Value *value = CGF.EmitScalarExpr(e); 3546 return CGF.EmitObjCConsumeObject(e->getType(), value); 3547 } 3548 3549 /// No special logic for block extensions. (This probably can't 3550 /// actually happen in this emitter, though.) 3551 llvm::Value *visitExtendBlockObject(const Expr *e) { 3552 return CGF.EmitARCExtendBlockObject(e); 3553 } 3554 3555 /// For reclaims, perform an unsafeClaim if that's enabled. 3556 llvm::Value *visitReclaimReturnedObject(const Expr *e) { 3557 return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true); 3558 } 3559 3560 /// When we have an undecorated call, just emit it without adding 3561 /// the unsafeClaim. 3562 llvm::Value *visitCall(const Expr *e) { 3563 return CGF.EmitScalarExpr(e); 3564 } 3565 3566 /// Just do normal scalar emission in the default case. 3567 llvm::Value *visitExpr(const Expr *e) { 3568 return CGF.EmitScalarExpr(e); 3569 } 3570 }; 3571 } 3572 3573 static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF, 3574 const Expr *e) { 3575 return ARCUnsafeUnretainedExprEmitter(CGF).visit(e); 3576 } 3577 3578 /// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to 3579 /// immediately releasing the resut of EmitARCRetainScalarExpr, but 3580 /// avoiding any spurious retains, including by performing reclaims 3581 /// with objc_unsafeClaimAutoreleasedReturnValue. 3582 llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) { 3583 // Look through full-expressions. 3584 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3585 RunCleanupsScope scope(*this); 3586 return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr()); 3587 } 3588 3589 return emitARCUnsafeUnretainedScalarExpr(*this, e); 3590 } 3591 3592 std::pair<LValue,llvm::Value*> 3593 CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e, 3594 bool ignored) { 3595 // Evaluate the RHS first. If we're ignoring the result, assume 3596 // that we can emit at an unsafe +0. 3597 llvm::Value *value; 3598 if (ignored) { 3599 value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS()); 3600 } else { 3601 value = EmitScalarExpr(e->getRHS()); 3602 } 3603 3604 // Emit the LHS and perform the store. 3605 LValue lvalue = EmitLValue(e->getLHS()); 3606 EmitStoreOfScalar(value, lvalue); 3607 3608 return std::pair<LValue,llvm::Value*>(std::move(lvalue), value); 3609 } 3610 3611 std::pair<LValue,llvm::Value*> 3612 CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e, 3613 bool ignored) { 3614 // Evaluate the RHS first. 3615 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS()); 3616 llvm::Value *value = result.getPointer(); 3617 3618 bool hasImmediateRetain = result.getInt(); 3619 3620 // If we didn't emit a retained object, and the l-value is of block 3621 // type, then we need to emit the block-retain immediately in case 3622 // it invalidates the l-value. 3623 if (!hasImmediateRetain && e->getType()->isBlockPointerType()) { 3624 value = EmitARCRetainBlock(value, /*mandatory*/ false); 3625 hasImmediateRetain = true; 3626 } 3627 3628 LValue lvalue = EmitLValue(e->getLHS()); 3629 3630 // If the RHS was emitted retained, expand this. 3631 if (hasImmediateRetain) { 3632 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation()); 3633 EmitStoreOfScalar(value, lvalue); 3634 EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime()); 3635 } else { 3636 value = EmitARCStoreStrong(lvalue, value, ignored); 3637 } 3638 3639 return std::pair<LValue,llvm::Value*>(lvalue, value); 3640 } 3641 3642 std::pair<LValue,llvm::Value*> 3643 CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) { 3644 llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS()); 3645 LValue lvalue = EmitLValue(e->getLHS()); 3646 3647 EmitStoreOfScalar(value, lvalue); 3648 3649 return std::pair<LValue,llvm::Value*>(lvalue, value); 3650 } 3651 3652 void CodeGenFunction::EmitObjCAutoreleasePoolStmt( 3653 const ObjCAutoreleasePoolStmt &ARPS) { 3654 const Stmt *subStmt = ARPS.getSubStmt(); 3655 const CompoundStmt &S = cast<CompoundStmt>(*subStmt); 3656 3657 CGDebugInfo *DI = getDebugInfo(); 3658 if (DI) 3659 DI->EmitLexicalBlockStart(Builder, S.getLBracLoc()); 3660 3661 // Keep track of the current cleanup stack depth. 3662 RunCleanupsScope Scope(*this); 3663 if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) { 3664 llvm::Value *token = EmitObjCAutoreleasePoolPush(); 3665 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token); 3666 } else { 3667 llvm::Value *token = EmitObjCMRRAutoreleasePoolPush(); 3668 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token); 3669 } 3670 3671 for (const auto *I : S.body()) 3672 EmitStmt(I); 3673 3674 if (DI) 3675 DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc()); 3676 } 3677 3678 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, 3679 /// make sure it survives garbage collection until this point. 3680 void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) { 3681 // We just use an inline assembly. 3682 llvm::FunctionType *extenderType 3683 = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All); 3684 llvm::InlineAsm *extender = llvm::InlineAsm::get(extenderType, 3685 /* assembly */ "", 3686 /* constraints */ "r", 3687 /* side effects */ true); 3688 3689 object = Builder.CreateBitCast(object, VoidPtrTy); 3690 EmitNounwindRuntimeCall(extender, object); 3691 } 3692 3693 /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with 3694 /// non-trivial copy assignment function, produce following helper function. 3695 /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; } 3696 /// 3697 llvm::Constant * 3698 CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( 3699 const ObjCPropertyImplDecl *PID) { 3700 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3701 if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic))) 3702 return nullptr; 3703 3704 QualType Ty = PID->getPropertyIvarDecl()->getType(); 3705 ASTContext &C = getContext(); 3706 3707 if (Ty.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { 3708 // Call the move assignment operator instead of calling the copy assignment 3709 // operator and destructor. 3710 CharUnits Alignment = C.getTypeAlignInChars(Ty); 3711 llvm::Constant *Fn = getNonTrivialCStructMoveAssignmentOperator( 3712 CGM, Alignment, Alignment, Ty.isVolatileQualified(), Ty); 3713 return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3714 } 3715 3716 if (!getLangOpts().CPlusPlus || 3717 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3718 return nullptr; 3719 if (!Ty->isRecordType()) 3720 return nullptr; 3721 llvm::Constant *HelperFn = nullptr; 3722 if (hasTrivialSetExpr(PID)) 3723 return nullptr; 3724 assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null"); 3725 if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty))) 3726 return HelperFn; 3727 3728 IdentifierInfo *II 3729 = &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); 3730 3731 QualType ReturnTy = C.VoidTy; 3732 QualType DestTy = C.getPointerType(Ty); 3733 QualType SrcTy = Ty; 3734 SrcTy.addConst(); 3735 SrcTy = C.getPointerType(SrcTy); 3736 3737 SmallVector<QualType, 2> ArgTys; 3738 ArgTys.push_back(DestTy); 3739 ArgTys.push_back(SrcTy); 3740 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); 3741 3742 FunctionDecl *FD = FunctionDecl::Create( 3743 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, 3744 FunctionTy, nullptr, SC_Static, false, false, false); 3745 3746 FunctionArgList args; 3747 ParmVarDecl *Params[2]; 3748 ParmVarDecl *DstDecl = ParmVarDecl::Create( 3749 C, FD, SourceLocation(), SourceLocation(), nullptr, DestTy, 3750 C.getTrivialTypeSourceInfo(DestTy, SourceLocation()), SC_None, 3751 /*DefArg=*/nullptr); 3752 args.push_back(Params[0] = DstDecl); 3753 ParmVarDecl *SrcDecl = ParmVarDecl::Create( 3754 C, FD, SourceLocation(), SourceLocation(), nullptr, SrcTy, 3755 C.getTrivialTypeSourceInfo(SrcTy, SourceLocation()), SC_None, 3756 /*DefArg=*/nullptr); 3757 args.push_back(Params[1] = SrcDecl); 3758 FD->setParams(Params); 3759 3760 const CGFunctionInfo &FI = 3761 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); 3762 3763 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3764 3765 llvm::Function *Fn = 3766 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 3767 "__assign_helper_atomic_property_", 3768 &CGM.getModule()); 3769 3770 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 3771 3772 StartFunction(FD, ReturnTy, Fn, FI, args); 3773 3774 DeclRefExpr DstExpr(C, DstDecl, false, DestTy, VK_PRValue, SourceLocation()); 3775 UnaryOperator *DST = UnaryOperator::Create( 3776 C, &DstExpr, UO_Deref, DestTy->getPointeeType(), VK_LValue, OK_Ordinary, 3777 SourceLocation(), false, FPOptionsOverride()); 3778 3779 DeclRefExpr SrcExpr(C, SrcDecl, false, SrcTy, VK_PRValue, SourceLocation()); 3780 UnaryOperator *SRC = UnaryOperator::Create( 3781 C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary, 3782 SourceLocation(), false, FPOptionsOverride()); 3783 3784 Expr *Args[2] = {DST, SRC}; 3785 CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment()); 3786 CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create( 3787 C, OO_Equal, CalleeExp->getCallee(), Args, DestTy->getPointeeType(), 3788 VK_LValue, SourceLocation(), FPOptionsOverride()); 3789 3790 EmitStmt(TheCall); 3791 3792 FinishFunction(); 3793 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3794 CGM.setAtomicSetterHelperFnMap(Ty, HelperFn); 3795 return HelperFn; 3796 } 3797 3798 llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( 3799 const ObjCPropertyImplDecl *PID) { 3800 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3801 if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic))) 3802 return nullptr; 3803 3804 QualType Ty = PD->getType(); 3805 ASTContext &C = getContext(); 3806 3807 if (Ty.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { 3808 CharUnits Alignment = C.getTypeAlignInChars(Ty); 3809 llvm::Constant *Fn = getNonTrivialCStructCopyConstructor( 3810 CGM, Alignment, Alignment, Ty.isVolatileQualified(), Ty); 3811 return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3812 } 3813 3814 if (!getLangOpts().CPlusPlus || 3815 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3816 return nullptr; 3817 if (!Ty->isRecordType()) 3818 return nullptr; 3819 llvm::Constant *HelperFn = nullptr; 3820 if (hasTrivialGetExpr(PID)) 3821 return nullptr; 3822 assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null"); 3823 if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty))) 3824 return HelperFn; 3825 3826 IdentifierInfo *II = 3827 &CGM.getContext().Idents.get("__copy_helper_atomic_property_"); 3828 3829 QualType ReturnTy = C.VoidTy; 3830 QualType DestTy = C.getPointerType(Ty); 3831 QualType SrcTy = Ty; 3832 SrcTy.addConst(); 3833 SrcTy = C.getPointerType(SrcTy); 3834 3835 SmallVector<QualType, 2> ArgTys; 3836 ArgTys.push_back(DestTy); 3837 ArgTys.push_back(SrcTy); 3838 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); 3839 3840 FunctionDecl *FD = FunctionDecl::Create( 3841 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, 3842 FunctionTy, nullptr, SC_Static, false, false, false); 3843 3844 FunctionArgList args; 3845 ParmVarDecl *Params[2]; 3846 ParmVarDecl *DstDecl = ParmVarDecl::Create( 3847 C, FD, SourceLocation(), SourceLocation(), nullptr, DestTy, 3848 C.getTrivialTypeSourceInfo(DestTy, SourceLocation()), SC_None, 3849 /*DefArg=*/nullptr); 3850 args.push_back(Params[0] = DstDecl); 3851 ParmVarDecl *SrcDecl = ParmVarDecl::Create( 3852 C, FD, SourceLocation(), SourceLocation(), nullptr, SrcTy, 3853 C.getTrivialTypeSourceInfo(SrcTy, SourceLocation()), SC_None, 3854 /*DefArg=*/nullptr); 3855 args.push_back(Params[1] = SrcDecl); 3856 FD->setParams(Params); 3857 3858 const CGFunctionInfo &FI = 3859 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); 3860 3861 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3862 3863 llvm::Function *Fn = llvm::Function::Create( 3864 LTy, llvm::GlobalValue::InternalLinkage, "__copy_helper_atomic_property_", 3865 &CGM.getModule()); 3866 3867 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 3868 3869 StartFunction(FD, ReturnTy, Fn, FI, args); 3870 3871 DeclRefExpr SrcExpr(getContext(), SrcDecl, false, SrcTy, VK_PRValue, 3872 SourceLocation()); 3873 3874 UnaryOperator *SRC = UnaryOperator::Create( 3875 C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary, 3876 SourceLocation(), false, FPOptionsOverride()); 3877 3878 CXXConstructExpr *CXXConstExpr = 3879 cast<CXXConstructExpr>(PID->getGetterCXXConstructor()); 3880 3881 SmallVector<Expr*, 4> ConstructorArgs; 3882 ConstructorArgs.push_back(SRC); 3883 ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()), 3884 CXXConstExpr->arg_end()); 3885 3886 CXXConstructExpr *TheCXXConstructExpr = 3887 CXXConstructExpr::Create(C, Ty, SourceLocation(), 3888 CXXConstExpr->getConstructor(), 3889 CXXConstExpr->isElidable(), 3890 ConstructorArgs, 3891 CXXConstExpr->hadMultipleCandidates(), 3892 CXXConstExpr->isListInitialization(), 3893 CXXConstExpr->isStdInitListInitialization(), 3894 CXXConstExpr->requiresZeroInitialization(), 3895 CXXConstExpr->getConstructionKind(), 3896 SourceRange()); 3897 3898 DeclRefExpr DstExpr(getContext(), DstDecl, false, DestTy, VK_PRValue, 3899 SourceLocation()); 3900 3901 RValue DV = EmitAnyExpr(&DstExpr); 3902 CharUnits Alignment = 3903 getContext().getTypeAlignInChars(TheCXXConstructExpr->getType()); 3904 EmitAggExpr(TheCXXConstructExpr, 3905 AggValueSlot::forAddr( 3906 Address(DV.getScalarVal(), ConvertTypeForMem(Ty), Alignment), 3907 Qualifiers(), AggValueSlot::IsDestructed, 3908 AggValueSlot::DoesNotNeedGCBarriers, 3909 AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); 3910 3911 FinishFunction(); 3912 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3913 CGM.setAtomicGetterHelperFnMap(Ty, HelperFn); 3914 return HelperFn; 3915 } 3916 3917 llvm::Value * 3918 CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) { 3919 // Get selectors for retain/autorelease. 3920 IdentifierInfo *CopyID = &getContext().Idents.get("copy"); 3921 Selector CopySelector = 3922 getContext().Selectors.getNullarySelector(CopyID); 3923 IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); 3924 Selector AutoreleaseSelector = 3925 getContext().Selectors.getNullarySelector(AutoreleaseID); 3926 3927 // Emit calls to retain/autorelease. 3928 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 3929 llvm::Value *Val = Block; 3930 RValue Result; 3931 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3932 Ty, CopySelector, 3933 Val, CallArgList(), nullptr, nullptr); 3934 Val = Result.getScalarVal(); 3935 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3936 Ty, AutoreleaseSelector, 3937 Val, CallArgList(), nullptr, nullptr); 3938 Val = Result.getScalarVal(); 3939 return Val; 3940 } 3941 3942 static unsigned getBaseMachOPlatformID(const llvm::Triple &TT) { 3943 switch (TT.getOS()) { 3944 case llvm::Triple::Darwin: 3945 case llvm::Triple::MacOSX: 3946 return llvm::MachO::PLATFORM_MACOS; 3947 case llvm::Triple::IOS: 3948 return llvm::MachO::PLATFORM_IOS; 3949 case llvm::Triple::TvOS: 3950 return llvm::MachO::PLATFORM_TVOS; 3951 case llvm::Triple::WatchOS: 3952 return llvm::MachO::PLATFORM_WATCHOS; 3953 case llvm::Triple::DriverKit: 3954 return llvm::MachO::PLATFORM_DRIVERKIT; 3955 default: 3956 return /*Unknown platform*/ 0; 3957 } 3958 } 3959 3960 static llvm::Value *emitIsPlatformVersionAtLeast(CodeGenFunction &CGF, 3961 const VersionTuple &Version) { 3962 CodeGenModule &CGM = CGF.CGM; 3963 // Note: we intend to support multi-platform version checks, so reserve 3964 // the room for a dual platform checking invocation that will be 3965 // implemented in the future. 3966 llvm::SmallVector<llvm::Value *, 8> Args; 3967 3968 auto EmitArgs = [&](const VersionTuple &Version, const llvm::Triple &TT) { 3969 std::optional<unsigned> Min = Version.getMinor(), 3970 SMin = Version.getSubminor(); 3971 Args.push_back( 3972 llvm::ConstantInt::get(CGM.Int32Ty, getBaseMachOPlatformID(TT))); 3973 Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor())); 3974 Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min.value_or(0))); 3975 Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin.value_or(0))); 3976 }; 3977 3978 assert(!Version.empty() && "unexpected empty version"); 3979 EmitArgs(Version, CGM.getTarget().getTriple()); 3980 3981 if (!CGM.IsPlatformVersionAtLeastFn) { 3982 llvm::FunctionType *FTy = llvm::FunctionType::get( 3983 CGM.Int32Ty, {CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty}, 3984 false); 3985 CGM.IsPlatformVersionAtLeastFn = 3986 CGM.CreateRuntimeFunction(FTy, "__isPlatformVersionAtLeast"); 3987 } 3988 3989 llvm::Value *Check = 3990 CGF.EmitNounwindRuntimeCall(CGM.IsPlatformVersionAtLeastFn, Args); 3991 return CGF.Builder.CreateICmpNE(Check, 3992 llvm::Constant::getNullValue(CGM.Int32Ty)); 3993 } 3994 3995 llvm::Value * 3996 CodeGenFunction::EmitBuiltinAvailable(const VersionTuple &Version) { 3997 // Darwin uses the new __isPlatformVersionAtLeast family of routines. 3998 if (CGM.getTarget().getTriple().isOSDarwin()) 3999 return emitIsPlatformVersionAtLeast(*this, Version); 4000 4001 if (!CGM.IsOSVersionAtLeastFn) { 4002 llvm::FunctionType *FTy = 4003 llvm::FunctionType::get(Int32Ty, {Int32Ty, Int32Ty, Int32Ty}, false); 4004 CGM.IsOSVersionAtLeastFn = 4005 CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast"); 4006 } 4007 4008 std::optional<unsigned> Min = Version.getMinor(), 4009 SMin = Version.getSubminor(); 4010 llvm::Value *Args[] = { 4011 llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()), 4012 llvm::ConstantInt::get(CGM.Int32Ty, Min.value_or(0)), 4013 llvm::ConstantInt::get(CGM.Int32Ty, SMin.value_or(0))}; 4014 4015 llvm::Value *CallRes = 4016 EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args); 4017 4018 return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty)); 4019 } 4020 4021 static bool isFoundationNeededForDarwinAvailabilityCheck( 4022 const llvm::Triple &TT, const VersionTuple &TargetVersion) { 4023 VersionTuple FoundationDroppedInVersion; 4024 switch (TT.getOS()) { 4025 case llvm::Triple::IOS: 4026 case llvm::Triple::TvOS: 4027 FoundationDroppedInVersion = VersionTuple(/*Major=*/13); 4028 break; 4029 case llvm::Triple::WatchOS: 4030 FoundationDroppedInVersion = VersionTuple(/*Major=*/6); 4031 break; 4032 case llvm::Triple::Darwin: 4033 case llvm::Triple::MacOSX: 4034 FoundationDroppedInVersion = VersionTuple(/*Major=*/10, /*Minor=*/15); 4035 break; 4036 case llvm::Triple::DriverKit: 4037 // DriverKit doesn't need Foundation. 4038 return false; 4039 default: 4040 llvm_unreachable("Unexpected OS"); 4041 } 4042 return TargetVersion < FoundationDroppedInVersion; 4043 } 4044 4045 void CodeGenModule::emitAtAvailableLinkGuard() { 4046 if (!IsPlatformVersionAtLeastFn) 4047 return; 4048 // @available requires CoreFoundation only on Darwin. 4049 if (!Target.getTriple().isOSDarwin()) 4050 return; 4051 // @available doesn't need Foundation on macOS 10.15+, iOS/tvOS 13+, or 4052 // watchOS 6+. 4053 if (!isFoundationNeededForDarwinAvailabilityCheck( 4054 Target.getTriple(), Target.getPlatformMinVersion())) 4055 return; 4056 // Add -framework CoreFoundation to the linker commands. We still want to 4057 // emit the core foundation reference down below because otherwise if 4058 // CoreFoundation is not used in the code, the linker won't link the 4059 // framework. 4060 auto &Context = getLLVMContext(); 4061 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"), 4062 llvm::MDString::get(Context, "CoreFoundation")}; 4063 LinkerOptionsMetadata.push_back(llvm::MDNode::get(Context, Args)); 4064 // Emit a reference to a symbol from CoreFoundation to ensure that 4065 // CoreFoundation is linked into the final binary. 4066 llvm::FunctionType *FTy = 4067 llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false); 4068 llvm::FunctionCallee CFFunc = 4069 CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber"); 4070 4071 llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false); 4072 llvm::FunctionCallee CFLinkCheckFuncRef = CreateRuntimeFunction( 4073 CheckFTy, "__clang_at_available_requires_core_foundation_framework", 4074 llvm::AttributeList(), /*Local=*/true); 4075 llvm::Function *CFLinkCheckFunc = 4076 cast<llvm::Function>(CFLinkCheckFuncRef.getCallee()->stripPointerCasts()); 4077 if (CFLinkCheckFunc->empty()) { 4078 CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage); 4079 CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility); 4080 CodeGenFunction CGF(*this); 4081 CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc)); 4082 CGF.EmitNounwindRuntimeCall(CFFunc, 4083 llvm::Constant::getNullValue(VoidPtrTy)); 4084 CGF.Builder.CreateUnreachable(); 4085 addCompilerUsedGlobal(CFLinkCheckFunc); 4086 } 4087 } 4088 4089 CGObjCRuntime::~CGObjCRuntime() {} 4090