1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCall.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGCleanup.h" 19 #include "CGRecordLayout.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclCXX.h" 26 #include "clang/AST/DeclObjC.h" 27 #include "clang/Basic/CodeGenOptions.h" 28 #include "clang/Basic/TargetInfo.h" 29 #include "clang/CodeGen/CGFunctionInfo.h" 30 #include "clang/CodeGen/SwiftCallingConv.h" 31 #include "llvm/ADT/StringExtras.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/IR/Assumptions.h" 34 #include "llvm/IR/AttributeMask.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/CallingConv.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/InlineAsm.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/Type.h" 42 #include "llvm/Transforms/Utils/Local.h" 43 #include <optional> 44 using namespace clang; 45 using namespace CodeGen; 46 47 /***/ 48 49 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 50 switch (CC) { 51 default: return llvm::CallingConv::C; 52 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 53 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 54 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 55 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 56 case CC_Win64: return llvm::CallingConv::Win64; 57 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 58 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 59 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 60 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 61 // TODO: Add support for __pascal to LLVM. 62 case CC_X86Pascal: return llvm::CallingConv::C; 63 // TODO: Add support for __vectorcall to LLVM. 64 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 65 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; 66 case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall; 67 case CC_AMDGPUKernelCall: return llvm::CallingConv::AMDGPU_KERNEL; 68 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 69 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 70 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 71 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 72 case CC_Swift: return llvm::CallingConv::Swift; 73 case CC_SwiftAsync: return llvm::CallingConv::SwiftTail; 74 } 75 } 76 77 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR 78 /// qualification. Either or both of RD and MD may be null. A null RD indicates 79 /// that there is no meaningful 'this' type, and a null MD can occur when 80 /// calling a method pointer. 81 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, 82 const CXXMethodDecl *MD) { 83 QualType RecTy; 84 if (RD) 85 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 86 else 87 RecTy = Context.VoidTy; 88 89 if (MD) 90 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); 91 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 92 } 93 94 /// Returns the canonical formal type of the given C++ method. 95 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 96 return MD->getType()->getCanonicalTypeUnqualified() 97 .getAs<FunctionProtoType>(); 98 } 99 100 /// Returns the "extra-canonicalized" return type, which discards 101 /// qualifiers on the return type. Codegen doesn't care about them, 102 /// and it makes ABI code a little easier to be able to assume that 103 /// all parameter and return types are top-level unqualified. 104 static CanQualType GetReturnType(QualType RetTy) { 105 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 106 } 107 108 /// Arrange the argument and result information for a value of the given 109 /// unprototyped freestanding function type. 110 const CGFunctionInfo & 111 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 112 // When translating an unprototyped function type, always use a 113 // variadic type. 114 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 115 /*instanceMethod=*/false, 116 /*chainCall=*/false, std::nullopt, 117 FTNP->getExtInfo(), {}, RequiredArgs(0)); 118 } 119 120 static void addExtParameterInfosForCall( 121 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 122 const FunctionProtoType *proto, 123 unsigned prefixArgs, 124 unsigned totalArgs) { 125 assert(proto->hasExtParameterInfos()); 126 assert(paramInfos.size() <= prefixArgs); 127 assert(proto->getNumParams() + prefixArgs <= totalArgs); 128 129 paramInfos.reserve(totalArgs); 130 131 // Add default infos for any prefix args that don't already have infos. 132 paramInfos.resize(prefixArgs); 133 134 // Add infos for the prototype. 135 for (const auto &ParamInfo : proto->getExtParameterInfos()) { 136 paramInfos.push_back(ParamInfo); 137 // pass_object_size params have no parameter info. 138 if (ParamInfo.hasPassObjectSize()) 139 paramInfos.emplace_back(); 140 } 141 142 assert(paramInfos.size() <= totalArgs && 143 "Did we forget to insert pass_object_size args?"); 144 // Add default infos for the variadic and/or suffix arguments. 145 paramInfos.resize(totalArgs); 146 } 147 148 /// Adds the formal parameters in FPT to the given prefix. If any parameter in 149 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 150 static void appendParameterTypes(const CodeGenTypes &CGT, 151 SmallVectorImpl<CanQualType> &prefix, 152 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 153 CanQual<FunctionProtoType> FPT) { 154 // Fast path: don't touch param info if we don't need to. 155 if (!FPT->hasExtParameterInfos()) { 156 assert(paramInfos.empty() && 157 "We have paramInfos, but the prototype doesn't?"); 158 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 159 return; 160 } 161 162 unsigned PrefixSize = prefix.size(); 163 // In the vast majority of cases, we'll have precisely FPT->getNumParams() 164 // parameters; the only thing that can change this is the presence of 165 // pass_object_size. So, we preallocate for the common case. 166 prefix.reserve(prefix.size() + FPT->getNumParams()); 167 168 auto ExtInfos = FPT->getExtParameterInfos(); 169 assert(ExtInfos.size() == FPT->getNumParams()); 170 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 171 prefix.push_back(FPT->getParamType(I)); 172 if (ExtInfos[I].hasPassObjectSize()) 173 prefix.push_back(CGT.getContext().getSizeType()); 174 } 175 176 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, 177 prefix.size()); 178 } 179 180 /// Arrange the LLVM function layout for a value of the given function 181 /// type, on top of any implicit parameters already stored. 182 static const CGFunctionInfo & 183 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 184 SmallVectorImpl<CanQualType> &prefix, 185 CanQual<FunctionProtoType> FTP) { 186 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 187 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 188 // FIXME: Kill copy. 189 appendParameterTypes(CGT, prefix, paramInfos, FTP); 190 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 191 192 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 193 /*chainCall=*/false, prefix, 194 FTP->getExtInfo(), paramInfos, 195 Required); 196 } 197 198 /// Arrange the argument and result information for a value of the 199 /// given freestanding function type. 200 const CGFunctionInfo & 201 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 202 SmallVector<CanQualType, 16> argTypes; 203 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 204 FTP); 205 } 206 207 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, 208 bool IsWindows) { 209 // Set the appropriate calling convention for the Function. 210 if (D->hasAttr<StdCallAttr>()) 211 return CC_X86StdCall; 212 213 if (D->hasAttr<FastCallAttr>()) 214 return CC_X86FastCall; 215 216 if (D->hasAttr<RegCallAttr>()) 217 return CC_X86RegCall; 218 219 if (D->hasAttr<ThisCallAttr>()) 220 return CC_X86ThisCall; 221 222 if (D->hasAttr<VectorCallAttr>()) 223 return CC_X86VectorCall; 224 225 if (D->hasAttr<PascalAttr>()) 226 return CC_X86Pascal; 227 228 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 229 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 230 231 if (D->hasAttr<AArch64VectorPcsAttr>()) 232 return CC_AArch64VectorCall; 233 234 if (D->hasAttr<AArch64SVEPcsAttr>()) 235 return CC_AArch64SVEPCS; 236 237 if (D->hasAttr<AMDGPUKernelCallAttr>()) 238 return CC_AMDGPUKernelCall; 239 240 if (D->hasAttr<IntelOclBiccAttr>()) 241 return CC_IntelOclBicc; 242 243 if (D->hasAttr<MSABIAttr>()) 244 return IsWindows ? CC_C : CC_Win64; 245 246 if (D->hasAttr<SysVABIAttr>()) 247 return IsWindows ? CC_X86_64SysV : CC_C; 248 249 if (D->hasAttr<PreserveMostAttr>()) 250 return CC_PreserveMost; 251 252 if (D->hasAttr<PreserveAllAttr>()) 253 return CC_PreserveAll; 254 255 return CC_C; 256 } 257 258 /// Arrange the argument and result information for a call to an 259 /// unknown C++ non-static member function of the given abstract type. 260 /// (A null RD means we don't have any meaningful "this" argument type, 261 /// so fall back to a generic pointer type). 262 /// The member function must be an ordinary function, i.e. not a 263 /// constructor or destructor. 264 const CGFunctionInfo & 265 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 266 const FunctionProtoType *FTP, 267 const CXXMethodDecl *MD) { 268 SmallVector<CanQualType, 16> argTypes; 269 270 // Add the 'this' pointer. 271 argTypes.push_back(DeriveThisType(RD, MD)); 272 273 return ::arrangeLLVMFunctionInfo( 274 *this, true, argTypes, 275 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 276 } 277 278 /// Set calling convention for CUDA/HIP kernel. 279 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, 280 const FunctionDecl *FD) { 281 if (FD->hasAttr<CUDAGlobalAttr>()) { 282 const FunctionType *FT = FTy->getAs<FunctionType>(); 283 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); 284 FTy = FT->getCanonicalTypeUnqualified(); 285 } 286 } 287 288 /// Arrange the argument and result information for a declaration or 289 /// definition of the given C++ non-static member function. The 290 /// member function must be an ordinary function, i.e. not a 291 /// constructor or destructor. 292 const CGFunctionInfo & 293 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 294 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 295 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 296 297 CanQualType FT = GetFormalType(MD).getAs<Type>(); 298 setCUDAKernelCallingConvention(FT, CGM, MD); 299 auto prototype = FT.getAs<FunctionProtoType>(); 300 301 if (MD->isInstance()) { 302 // The abstract case is perfectly fine. 303 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 304 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 305 } 306 307 return arrangeFreeFunctionType(prototype); 308 } 309 310 bool CodeGenTypes::inheritingCtorHasParams( 311 const InheritedConstructor &Inherited, CXXCtorType Type) { 312 // Parameters are unnecessary if we're constructing a base class subobject 313 // and the inherited constructor lives in a virtual base. 314 return Type == Ctor_Complete || 315 !Inherited.getShadowDecl()->constructsVirtualBase() || 316 !Target.getCXXABI().hasConstructorVariants(); 317 } 318 319 const CGFunctionInfo & 320 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { 321 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 322 323 SmallVector<CanQualType, 16> argTypes; 324 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 325 326 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(GD); 327 argTypes.push_back(DeriveThisType(ThisType, MD)); 328 329 bool PassParams = true; 330 331 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 332 // A base class inheriting constructor doesn't get forwarded arguments 333 // needed to construct a virtual base (or base class thereof). 334 if (auto Inherited = CD->getInheritedConstructor()) 335 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); 336 } 337 338 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 339 340 // Add the formal parameters. 341 if (PassParams) 342 appendParameterTypes(*this, argTypes, paramInfos, FTP); 343 344 CGCXXABI::AddedStructorArgCounts AddedArgs = 345 TheCXXABI.buildStructorSignature(GD, argTypes); 346 if (!paramInfos.empty()) { 347 // Note: prefix implies after the first param. 348 if (AddedArgs.Prefix) 349 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, 350 FunctionProtoType::ExtParameterInfo{}); 351 if (AddedArgs.Suffix) 352 paramInfos.append(AddedArgs.Suffix, 353 FunctionProtoType::ExtParameterInfo{}); 354 } 355 356 RequiredArgs required = 357 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 358 : RequiredArgs::All); 359 360 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 361 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 362 ? argTypes.front() 363 : TheCXXABI.hasMostDerivedReturn(GD) 364 ? CGM.getContext().VoidPtrTy 365 : Context.VoidTy; 366 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 367 /*chainCall=*/false, argTypes, extInfo, 368 paramInfos, required); 369 } 370 371 static SmallVector<CanQualType, 16> 372 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 373 SmallVector<CanQualType, 16> argTypes; 374 for (auto &arg : args) 375 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 376 return argTypes; 377 } 378 379 static SmallVector<CanQualType, 16> 380 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 381 SmallVector<CanQualType, 16> argTypes; 382 for (auto &arg : args) 383 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 384 return argTypes; 385 } 386 387 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 388 getExtParameterInfosForCall(const FunctionProtoType *proto, 389 unsigned prefixArgs, unsigned totalArgs) { 390 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 391 if (proto->hasExtParameterInfos()) { 392 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 393 } 394 return result; 395 } 396 397 /// Arrange a call to a C++ method, passing the given arguments. 398 /// 399 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` 400 /// parameter. 401 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of 402 /// args. 403 /// PassProtoArgs indicates whether `args` has args for the parameters in the 404 /// given CXXConstructorDecl. 405 const CGFunctionInfo & 406 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 407 const CXXConstructorDecl *D, 408 CXXCtorType CtorKind, 409 unsigned ExtraPrefixArgs, 410 unsigned ExtraSuffixArgs, 411 bool PassProtoArgs) { 412 // FIXME: Kill copy. 413 SmallVector<CanQualType, 16> ArgTypes; 414 for (const auto &Arg : args) 415 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 416 417 // +1 for implicit this, which should always be args[0]. 418 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; 419 420 CanQual<FunctionProtoType> FPT = GetFormalType(D); 421 RequiredArgs Required = PassProtoArgs 422 ? RequiredArgs::forPrototypePlus( 423 FPT, TotalPrefixArgs + ExtraSuffixArgs) 424 : RequiredArgs::All; 425 426 GlobalDecl GD(D, CtorKind); 427 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 428 ? ArgTypes.front() 429 : TheCXXABI.hasMostDerivedReturn(GD) 430 ? CGM.getContext().VoidPtrTy 431 : Context.VoidTy; 432 433 FunctionType::ExtInfo Info = FPT->getExtInfo(); 434 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; 435 // If the prototype args are elided, we should only have ABI-specific args, 436 // which never have param info. 437 if (PassProtoArgs && FPT->hasExtParameterInfos()) { 438 // ABI-specific suffix arguments are treated the same as variadic arguments. 439 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, 440 ArgTypes.size()); 441 } 442 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 443 /*chainCall=*/false, ArgTypes, Info, 444 ParamInfos, Required); 445 } 446 447 /// Arrange the argument and result information for the declaration or 448 /// definition of the given function. 449 const CGFunctionInfo & 450 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 451 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 452 if (MD->isInstance()) 453 return arrangeCXXMethodDeclaration(MD); 454 455 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 456 457 assert(isa<FunctionType>(FTy)); 458 setCUDAKernelCallingConvention(FTy, CGM, FD); 459 460 // When declaring a function without a prototype, always use a 461 // non-variadic type. 462 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 463 return arrangeLLVMFunctionInfo( 464 noProto->getReturnType(), /*instanceMethod=*/false, 465 /*chainCall=*/false, std::nullopt, noProto->getExtInfo(), {}, 466 RequiredArgs::All); 467 } 468 469 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); 470 } 471 472 /// Arrange the argument and result information for the declaration or 473 /// definition of an Objective-C method. 474 const CGFunctionInfo & 475 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 476 // It happens that this is the same as a call with no optional 477 // arguments, except also using the formal 'self' type. 478 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 479 } 480 481 /// Arrange the argument and result information for the function type 482 /// through which to perform a send to the given Objective-C method, 483 /// using the given receiver type. The receiver type is not always 484 /// the 'self' type of the method or even an Objective-C pointer type. 485 /// This is *not* the right method for actually performing such a 486 /// message send, due to the possibility of optional arguments. 487 const CGFunctionInfo & 488 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 489 QualType receiverType) { 490 SmallVector<CanQualType, 16> argTys; 491 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos( 492 MD->isDirectMethod() ? 1 : 2); 493 argTys.push_back(Context.getCanonicalParamType(receiverType)); 494 if (!MD->isDirectMethod()) 495 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 496 // FIXME: Kill copy? 497 for (const auto *I : MD->parameters()) { 498 argTys.push_back(Context.getCanonicalParamType(I->getType())); 499 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( 500 I->hasAttr<NoEscapeAttr>()); 501 extParamInfos.push_back(extParamInfo); 502 } 503 504 FunctionType::ExtInfo einfo; 505 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 506 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 507 508 if (getContext().getLangOpts().ObjCAutoRefCount && 509 MD->hasAttr<NSReturnsRetainedAttr>()) 510 einfo = einfo.withProducesResult(true); 511 512 RequiredArgs required = 513 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 514 515 return arrangeLLVMFunctionInfo( 516 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 517 /*chainCall=*/false, argTys, einfo, extParamInfos, required); 518 } 519 520 const CGFunctionInfo & 521 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 522 const CallArgList &args) { 523 auto argTypes = getArgTypesForCall(Context, args); 524 FunctionType::ExtInfo einfo; 525 526 return arrangeLLVMFunctionInfo( 527 GetReturnType(returnType), /*instanceMethod=*/false, 528 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 529 } 530 531 const CGFunctionInfo & 532 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 533 // FIXME: Do we need to handle ObjCMethodDecl? 534 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 535 536 if (isa<CXXConstructorDecl>(GD.getDecl()) || 537 isa<CXXDestructorDecl>(GD.getDecl())) 538 return arrangeCXXStructorDeclaration(GD); 539 540 return arrangeFunctionDeclaration(FD); 541 } 542 543 /// Arrange a thunk that takes 'this' as the first parameter followed by 544 /// varargs. Return a void pointer, regardless of the actual return type. 545 /// The body of the thunk will end in a musttail call to a function of the 546 /// correct type, and the caller will bitcast the function to the correct 547 /// prototype. 548 const CGFunctionInfo & 549 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { 550 assert(MD->isVirtual() && "only methods have thunks"); 551 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 552 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; 553 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 554 /*chainCall=*/false, ArgTys, 555 FTP->getExtInfo(), {}, RequiredArgs(1)); 556 } 557 558 const CGFunctionInfo & 559 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 560 CXXCtorType CT) { 561 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 562 563 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 564 SmallVector<CanQualType, 2> ArgTys; 565 const CXXRecordDecl *RD = CD->getParent(); 566 ArgTys.push_back(DeriveThisType(RD, CD)); 567 if (CT == Ctor_CopyingClosure) 568 ArgTys.push_back(*FTP->param_type_begin()); 569 if (RD->getNumVBases() > 0) 570 ArgTys.push_back(Context.IntTy); 571 CallingConv CC = Context.getDefaultCallingConvention( 572 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 573 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 574 /*chainCall=*/false, ArgTys, 575 FunctionType::ExtInfo(CC), {}, 576 RequiredArgs::All); 577 } 578 579 /// Arrange a call as unto a free function, except possibly with an 580 /// additional number of formal parameters considered required. 581 static const CGFunctionInfo & 582 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 583 CodeGenModule &CGM, 584 const CallArgList &args, 585 const FunctionType *fnType, 586 unsigned numExtraRequiredArgs, 587 bool chainCall) { 588 assert(args.size() >= numExtraRequiredArgs); 589 590 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 591 592 // In most cases, there are no optional arguments. 593 RequiredArgs required = RequiredArgs::All; 594 595 // If we have a variadic prototype, the required arguments are the 596 // extra prefix plus the arguments in the prototype. 597 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 598 if (proto->isVariadic()) 599 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); 600 601 if (proto->hasExtParameterInfos()) 602 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 603 args.size()); 604 605 // If we don't have a prototype at all, but we're supposed to 606 // explicitly use the variadic convention for unprototyped calls, 607 // treat all of the arguments as required but preserve the nominal 608 // possibility of variadics. 609 } else if (CGM.getTargetCodeGenInfo() 610 .isNoProtoCallVariadic(args, 611 cast<FunctionNoProtoType>(fnType))) { 612 required = RequiredArgs(args.size()); 613 } 614 615 // FIXME: Kill copy. 616 SmallVector<CanQualType, 16> argTypes; 617 for (const auto &arg : args) 618 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 619 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 620 /*instanceMethod=*/false, chainCall, 621 argTypes, fnType->getExtInfo(), paramInfos, 622 required); 623 } 624 625 /// Figure out the rules for calling a function with the given formal 626 /// type using the given arguments. The arguments are necessary 627 /// because the function might be unprototyped, in which case it's 628 /// target-dependent in crazy ways. 629 const CGFunctionInfo & 630 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 631 const FunctionType *fnType, 632 bool chainCall) { 633 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 634 chainCall ? 1 : 0, chainCall); 635 } 636 637 /// A block function is essentially a free function with an 638 /// extra implicit argument. 639 const CGFunctionInfo & 640 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 641 const FunctionType *fnType) { 642 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 643 /*chainCall=*/false); 644 } 645 646 const CGFunctionInfo & 647 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 648 const FunctionArgList ¶ms) { 649 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 650 auto argTypes = getArgTypesForDeclaration(Context, params); 651 652 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), 653 /*instanceMethod*/ false, /*chainCall*/ false, 654 argTypes, proto->getExtInfo(), paramInfos, 655 RequiredArgs::forPrototypePlus(proto, 1)); 656 } 657 658 const CGFunctionInfo & 659 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 660 const CallArgList &args) { 661 // FIXME: Kill copy. 662 SmallVector<CanQualType, 16> argTypes; 663 for (const auto &Arg : args) 664 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 665 return arrangeLLVMFunctionInfo( 666 GetReturnType(resultType), /*instanceMethod=*/false, 667 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 668 /*paramInfos=*/ {}, RequiredArgs::All); 669 } 670 671 const CGFunctionInfo & 672 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 673 const FunctionArgList &args) { 674 auto argTypes = getArgTypesForDeclaration(Context, args); 675 676 return arrangeLLVMFunctionInfo( 677 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 678 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 679 } 680 681 const CGFunctionInfo & 682 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 683 ArrayRef<CanQualType> argTypes) { 684 return arrangeLLVMFunctionInfo( 685 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 686 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 687 } 688 689 /// Arrange a call to a C++ method, passing the given arguments. 690 /// 691 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It 692 /// does not count `this`. 693 const CGFunctionInfo & 694 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 695 const FunctionProtoType *proto, 696 RequiredArgs required, 697 unsigned numPrefixArgs) { 698 assert(numPrefixArgs + 1 <= args.size() && 699 "Emitting a call with less args than the required prefix?"); 700 // Add one to account for `this`. It's a bit awkward here, but we don't count 701 // `this` in similar places elsewhere. 702 auto paramInfos = 703 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); 704 705 // FIXME: Kill copy. 706 auto argTypes = getArgTypesForCall(Context, args); 707 708 FunctionType::ExtInfo info = proto->getExtInfo(); 709 return arrangeLLVMFunctionInfo( 710 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 711 /*chainCall=*/false, argTypes, info, paramInfos, required); 712 } 713 714 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 715 return arrangeLLVMFunctionInfo( 716 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 717 std::nullopt, FunctionType::ExtInfo(), {}, RequiredArgs::All); 718 } 719 720 const CGFunctionInfo & 721 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 722 const CallArgList &args) { 723 assert(signature.arg_size() <= args.size()); 724 if (signature.arg_size() == args.size()) 725 return signature; 726 727 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 728 auto sigParamInfos = signature.getExtParameterInfos(); 729 if (!sigParamInfos.empty()) { 730 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 731 paramInfos.resize(args.size()); 732 } 733 734 auto argTypes = getArgTypesForCall(Context, args); 735 736 assert(signature.getRequiredArgs().allowsOptionalArgs()); 737 return arrangeLLVMFunctionInfo(signature.getReturnType(), 738 signature.isInstanceMethod(), 739 signature.isChainCall(), 740 argTypes, 741 signature.getExtInfo(), 742 paramInfos, 743 signature.getRequiredArgs()); 744 } 745 746 namespace clang { 747 namespace CodeGen { 748 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); 749 } 750 } 751 752 /// Arrange the argument and result information for an abstract value 753 /// of a given function type. This is the method which all of the 754 /// above functions ultimately defer to. 755 const CGFunctionInfo & 756 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 757 bool instanceMethod, 758 bool chainCall, 759 ArrayRef<CanQualType> argTypes, 760 FunctionType::ExtInfo info, 761 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 762 RequiredArgs required) { 763 assert(llvm::all_of(argTypes, 764 [](CanQualType T) { return T.isCanonicalAsParam(); })); 765 766 // Lookup or create unique function info. 767 llvm::FoldingSetNodeID ID; 768 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 769 required, resultType, argTypes); 770 771 void *insertPos = nullptr; 772 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 773 if (FI) 774 return *FI; 775 776 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 777 778 // Construct the function info. We co-allocate the ArgInfos. 779 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 780 paramInfos, resultType, argTypes, required); 781 FunctionInfos.InsertNode(FI, insertPos); 782 783 bool inserted = FunctionsBeingProcessed.insert(FI).second; 784 (void)inserted; 785 assert(inserted && "Recursively being processed?"); 786 787 // Compute ABI information. 788 if (CC == llvm::CallingConv::SPIR_KERNEL) { 789 // Force target independent argument handling for the host visible 790 // kernel functions. 791 computeSPIRKernelABIInfo(CGM, *FI); 792 } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) { 793 swiftcall::computeABIInfo(CGM, *FI); 794 } else { 795 getABIInfo().computeInfo(*FI); 796 } 797 798 // Loop over all of the computed argument and return value info. If any of 799 // them are direct or extend without a specified coerce type, specify the 800 // default now. 801 ABIArgInfo &retInfo = FI->getReturnInfo(); 802 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 803 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 804 805 for (auto &I : FI->arguments()) 806 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 807 I.info.setCoerceToType(ConvertType(I.type)); 808 809 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 810 assert(erased && "Not in set?"); 811 812 return *FI; 813 } 814 815 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 816 bool instanceMethod, 817 bool chainCall, 818 const FunctionType::ExtInfo &info, 819 ArrayRef<ExtParameterInfo> paramInfos, 820 CanQualType resultType, 821 ArrayRef<CanQualType> argTypes, 822 RequiredArgs required) { 823 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 824 assert(!required.allowsOptionalArgs() || 825 required.getNumRequiredArgs() <= argTypes.size()); 826 827 void *buffer = 828 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 829 argTypes.size() + 1, paramInfos.size())); 830 831 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 832 FI->CallingConvention = llvmCC; 833 FI->EffectiveCallingConvention = llvmCC; 834 FI->ASTCallingConvention = info.getCC(); 835 FI->InstanceMethod = instanceMethod; 836 FI->ChainCall = chainCall; 837 FI->CmseNSCall = info.getCmseNSCall(); 838 FI->NoReturn = info.getNoReturn(); 839 FI->ReturnsRetained = info.getProducesResult(); 840 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); 841 FI->NoCfCheck = info.getNoCfCheck(); 842 FI->Required = required; 843 FI->HasRegParm = info.getHasRegParm(); 844 FI->RegParm = info.getRegParm(); 845 FI->ArgStruct = nullptr; 846 FI->ArgStructAlign = 0; 847 FI->NumArgs = argTypes.size(); 848 FI->HasExtParameterInfos = !paramInfos.empty(); 849 FI->getArgsBuffer()[0].type = resultType; 850 FI->MaxVectorWidth = 0; 851 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 852 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 853 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 854 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 855 return FI; 856 } 857 858 /***/ 859 860 namespace { 861 // ABIArgInfo::Expand implementation. 862 863 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 864 struct TypeExpansion { 865 enum TypeExpansionKind { 866 // Elements of constant arrays are expanded recursively. 867 TEK_ConstantArray, 868 // Record fields are expanded recursively (but if record is a union, only 869 // the field with the largest size is expanded). 870 TEK_Record, 871 // For complex types, real and imaginary parts are expanded recursively. 872 TEK_Complex, 873 // All other types are not expandable. 874 TEK_None 875 }; 876 877 const TypeExpansionKind Kind; 878 879 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 880 virtual ~TypeExpansion() {} 881 }; 882 883 struct ConstantArrayExpansion : TypeExpansion { 884 QualType EltTy; 885 uint64_t NumElts; 886 887 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 888 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 889 static bool classof(const TypeExpansion *TE) { 890 return TE->Kind == TEK_ConstantArray; 891 } 892 }; 893 894 struct RecordExpansion : TypeExpansion { 895 SmallVector<const CXXBaseSpecifier *, 1> Bases; 896 897 SmallVector<const FieldDecl *, 1> Fields; 898 899 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 900 SmallVector<const FieldDecl *, 1> &&Fields) 901 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 902 Fields(std::move(Fields)) {} 903 static bool classof(const TypeExpansion *TE) { 904 return TE->Kind == TEK_Record; 905 } 906 }; 907 908 struct ComplexExpansion : TypeExpansion { 909 QualType EltTy; 910 911 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 912 static bool classof(const TypeExpansion *TE) { 913 return TE->Kind == TEK_Complex; 914 } 915 }; 916 917 struct NoExpansion : TypeExpansion { 918 NoExpansion() : TypeExpansion(TEK_None) {} 919 static bool classof(const TypeExpansion *TE) { 920 return TE->Kind == TEK_None; 921 } 922 }; 923 } // namespace 924 925 static std::unique_ptr<TypeExpansion> 926 getTypeExpansion(QualType Ty, const ASTContext &Context) { 927 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 928 return std::make_unique<ConstantArrayExpansion>( 929 AT->getElementType(), AT->getSize().getZExtValue()); 930 } 931 if (const RecordType *RT = Ty->getAs<RecordType>()) { 932 SmallVector<const CXXBaseSpecifier *, 1> Bases; 933 SmallVector<const FieldDecl *, 1> Fields; 934 const RecordDecl *RD = RT->getDecl(); 935 assert(!RD->hasFlexibleArrayMember() && 936 "Cannot expand structure with flexible array."); 937 if (RD->isUnion()) { 938 // Unions can be here only in degenerative cases - all the fields are same 939 // after flattening. Thus we have to use the "largest" field. 940 const FieldDecl *LargestFD = nullptr; 941 CharUnits UnionSize = CharUnits::Zero(); 942 943 for (const auto *FD : RD->fields()) { 944 if (FD->isZeroLengthBitField(Context)) 945 continue; 946 assert(!FD->isBitField() && 947 "Cannot expand structure with bit-field members."); 948 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 949 if (UnionSize < FieldSize) { 950 UnionSize = FieldSize; 951 LargestFD = FD; 952 } 953 } 954 if (LargestFD) 955 Fields.push_back(LargestFD); 956 } else { 957 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 958 assert(!CXXRD->isDynamicClass() && 959 "cannot expand vtable pointers in dynamic classes"); 960 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases())); 961 } 962 963 for (const auto *FD : RD->fields()) { 964 if (FD->isZeroLengthBitField(Context)) 965 continue; 966 assert(!FD->isBitField() && 967 "Cannot expand structure with bit-field members."); 968 Fields.push_back(FD); 969 } 970 } 971 return std::make_unique<RecordExpansion>(std::move(Bases), 972 std::move(Fields)); 973 } 974 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 975 return std::make_unique<ComplexExpansion>(CT->getElementType()); 976 } 977 return std::make_unique<NoExpansion>(); 978 } 979 980 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 981 auto Exp = getTypeExpansion(Ty, Context); 982 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 983 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 984 } 985 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 986 int Res = 0; 987 for (auto BS : RExp->Bases) 988 Res += getExpansionSize(BS->getType(), Context); 989 for (auto FD : RExp->Fields) 990 Res += getExpansionSize(FD->getType(), Context); 991 return Res; 992 } 993 if (isa<ComplexExpansion>(Exp.get())) 994 return 2; 995 assert(isa<NoExpansion>(Exp.get())); 996 return 1; 997 } 998 999 void 1000 CodeGenTypes::getExpandedTypes(QualType Ty, 1001 SmallVectorImpl<llvm::Type *>::iterator &TI) { 1002 auto Exp = getTypeExpansion(Ty, Context); 1003 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1004 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 1005 getExpandedTypes(CAExp->EltTy, TI); 1006 } 1007 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1008 for (auto BS : RExp->Bases) 1009 getExpandedTypes(BS->getType(), TI); 1010 for (auto FD : RExp->Fields) 1011 getExpandedTypes(FD->getType(), TI); 1012 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 1013 llvm::Type *EltTy = ConvertType(CExp->EltTy); 1014 *TI++ = EltTy; 1015 *TI++ = EltTy; 1016 } else { 1017 assert(isa<NoExpansion>(Exp.get())); 1018 *TI++ = ConvertType(Ty); 1019 } 1020 } 1021 1022 static void forConstantArrayExpansion(CodeGenFunction &CGF, 1023 ConstantArrayExpansion *CAE, 1024 Address BaseAddr, 1025 llvm::function_ref<void(Address)> Fn) { 1026 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 1027 CharUnits EltAlign = 1028 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 1029 llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy); 1030 1031 for (int i = 0, n = CAE->NumElts; i < n; i++) { 1032 llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32( 1033 BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i); 1034 Fn(Address(EltAddr, EltTy, EltAlign)); 1035 } 1036 } 1037 1038 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 1039 llvm::Function::arg_iterator &AI) { 1040 assert(LV.isSimple() && 1041 "Unexpected non-simple lvalue during struct expansion."); 1042 1043 auto Exp = getTypeExpansion(Ty, getContext()); 1044 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1045 forConstantArrayExpansion( 1046 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { 1047 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 1048 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 1049 }); 1050 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1051 Address This = LV.getAddress(*this); 1052 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1053 // Perform a single step derived-to-base conversion. 1054 Address Base = 1055 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1056 /*NullCheckValue=*/false, SourceLocation()); 1057 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 1058 1059 // Recurse onto bases. 1060 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 1061 } 1062 for (auto FD : RExp->Fields) { 1063 // FIXME: What are the right qualifiers here? 1064 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 1065 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 1066 } 1067 } else if (isa<ComplexExpansion>(Exp.get())) { 1068 auto realValue = &*AI++; 1069 auto imagValue = &*AI++; 1070 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 1071 } else { 1072 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a 1073 // primitive store. 1074 assert(isa<NoExpansion>(Exp.get())); 1075 llvm::Value *Arg = &*AI++; 1076 if (LV.isBitField()) { 1077 EmitStoreThroughLValue(RValue::get(Arg), LV); 1078 } else { 1079 // TODO: currently there are some places are inconsistent in what LLVM 1080 // pointer type they use (see D118744). Once clang uses opaque pointers 1081 // all LLVM pointer types will be the same and we can remove this check. 1082 if (Arg->getType()->isPointerTy()) { 1083 Address Addr = LV.getAddress(*this); 1084 Arg = Builder.CreateBitCast(Arg, Addr.getElementType()); 1085 } 1086 EmitStoreOfScalar(Arg, LV); 1087 } 1088 } 1089 } 1090 1091 void CodeGenFunction::ExpandTypeToArgs( 1092 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, 1093 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 1094 auto Exp = getTypeExpansion(Ty, getContext()); 1095 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1096 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1097 : Arg.getKnownRValue().getAggregateAddress(); 1098 forConstantArrayExpansion( 1099 *this, CAExp, Addr, [&](Address EltAddr) { 1100 CallArg EltArg = CallArg( 1101 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), 1102 CAExp->EltTy); 1103 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, 1104 IRCallArgPos); 1105 }); 1106 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1107 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1108 : Arg.getKnownRValue().getAggregateAddress(); 1109 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1110 // Perform a single step derived-to-base conversion. 1111 Address Base = 1112 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1113 /*NullCheckValue=*/false, SourceLocation()); 1114 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); 1115 1116 // Recurse onto bases. 1117 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, 1118 IRCallArgPos); 1119 } 1120 1121 LValue LV = MakeAddrLValue(This, Ty); 1122 for (auto FD : RExp->Fields) { 1123 CallArg FldArg = 1124 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); 1125 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, 1126 IRCallArgPos); 1127 } 1128 } else if (isa<ComplexExpansion>(Exp.get())) { 1129 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); 1130 IRCallArgs[IRCallArgPos++] = CV.first; 1131 IRCallArgs[IRCallArgPos++] = CV.second; 1132 } else { 1133 assert(isa<NoExpansion>(Exp.get())); 1134 auto RV = Arg.getKnownRValue(); 1135 assert(RV.isScalar() && 1136 "Unexpected non-scalar rvalue during struct expansion."); 1137 1138 // Insert a bitcast as needed. 1139 llvm::Value *V = RV.getScalarVal(); 1140 if (IRCallArgPos < IRFuncTy->getNumParams() && 1141 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1142 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1143 1144 IRCallArgs[IRCallArgPos++] = V; 1145 } 1146 } 1147 1148 /// Create a temporary allocation for the purposes of coercion. 1149 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1150 CharUnits MinAlign, 1151 const Twine &Name = "tmp") { 1152 // Don't use an alignment that's worse than what LLVM would prefer. 1153 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty); 1154 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1155 1156 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce"); 1157 } 1158 1159 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1160 /// accessing some number of bytes out of it, try to gep into the struct to get 1161 /// at its inner goodness. Dive as deep as possible without entering an element 1162 /// with an in-memory size smaller than DstSize. 1163 static Address 1164 EnterStructPointerForCoercedAccess(Address SrcPtr, 1165 llvm::StructType *SrcSTy, 1166 uint64_t DstSize, CodeGenFunction &CGF) { 1167 // We can't dive into a zero-element struct. 1168 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1169 1170 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1171 1172 // If the first elt is at least as large as what we're looking for, or if the 1173 // first element is the same size as the whole struct, we can enter it. The 1174 // comparison must be made on the store size and not the alloca size. Using 1175 // the alloca size may overstate the size of the load. 1176 uint64_t FirstEltSize = 1177 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1178 if (FirstEltSize < DstSize && 1179 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1180 return SrcPtr; 1181 1182 // GEP into the first element. 1183 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive"); 1184 1185 // If the first element is a struct, recurse. 1186 llvm::Type *SrcTy = SrcPtr.getElementType(); 1187 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1188 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1189 1190 return SrcPtr; 1191 } 1192 1193 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1194 /// are either integers or pointers. This does a truncation of the value if it 1195 /// is too large or a zero extension if it is too small. 1196 /// 1197 /// This behaves as if the value were coerced through memory, so on big-endian 1198 /// targets the high bits are preserved in a truncation, while little-endian 1199 /// targets preserve the low bits. 1200 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1201 llvm::Type *Ty, 1202 CodeGenFunction &CGF) { 1203 if (Val->getType() == Ty) 1204 return Val; 1205 1206 if (isa<llvm::PointerType>(Val->getType())) { 1207 // If this is Pointer->Pointer avoid conversion to and from int. 1208 if (isa<llvm::PointerType>(Ty)) 1209 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1210 1211 // Convert the pointer to an integer so we can play with its width. 1212 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1213 } 1214 1215 llvm::Type *DestIntTy = Ty; 1216 if (isa<llvm::PointerType>(DestIntTy)) 1217 DestIntTy = CGF.IntPtrTy; 1218 1219 if (Val->getType() != DestIntTy) { 1220 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1221 if (DL.isBigEndian()) { 1222 // Preserve the high bits on big-endian targets. 1223 // That is what memory coercion does. 1224 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1225 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1226 1227 if (SrcSize > DstSize) { 1228 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1229 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1230 } else { 1231 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1232 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1233 } 1234 } else { 1235 // Little-endian targets preserve the low bits. No shifts required. 1236 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1237 } 1238 } 1239 1240 if (isa<llvm::PointerType>(Ty)) 1241 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1242 return Val; 1243 } 1244 1245 1246 1247 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1248 /// a pointer to an object of type \arg Ty, known to be aligned to 1249 /// \arg SrcAlign bytes. 1250 /// 1251 /// This safely handles the case when the src type is smaller than the 1252 /// destination type; in this situation the values of bits which not 1253 /// present in the src are undefined. 1254 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1255 CodeGenFunction &CGF) { 1256 llvm::Type *SrcTy = Src.getElementType(); 1257 1258 // If SrcTy and Ty are the same, just do a load. 1259 if (SrcTy == Ty) 1260 return CGF.Builder.CreateLoad(Src); 1261 1262 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1263 1264 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1265 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, 1266 DstSize.getFixedValue(), CGF); 1267 SrcTy = Src.getElementType(); 1268 } 1269 1270 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1271 1272 // If the source and destination are integer or pointer types, just do an 1273 // extension or truncation to the desired type. 1274 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1275 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1276 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1277 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1278 } 1279 1280 // If load is legal, just bitcast the src pointer. 1281 if (!SrcSize.isScalable() && !DstSize.isScalable() && 1282 SrcSize.getFixedValue() >= DstSize.getFixedValue()) { 1283 // Generally SrcSize is never greater than DstSize, since this means we are 1284 // losing bits. However, this can happen in cases where the structure has 1285 // additional padding, for example due to a user specified alignment. 1286 // 1287 // FIXME: Assert that we aren't truncating non-padding bits when have access 1288 // to that information. 1289 Src = Src.withElementType(Ty); 1290 return CGF.Builder.CreateLoad(Src); 1291 } 1292 1293 // If coercing a fixed vector to a scalable vector for ABI compatibility, and 1294 // the types match, use the llvm.vector.insert intrinsic to perform the 1295 // conversion. 1296 if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) { 1297 if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 1298 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate 1299 // vector, use a vector insert and bitcast the result. 1300 bool NeedsBitcast = false; 1301 auto PredType = 1302 llvm::ScalableVectorType::get(CGF.Builder.getInt1Ty(), 16); 1303 llvm::Type *OrigType = Ty; 1304 if (ScalableDst == PredType && 1305 FixedSrc->getElementType() == CGF.Builder.getInt8Ty()) { 1306 ScalableDst = llvm::ScalableVectorType::get(CGF.Builder.getInt8Ty(), 2); 1307 NeedsBitcast = true; 1308 } 1309 if (ScalableDst->getElementType() == FixedSrc->getElementType()) { 1310 auto *Load = CGF.Builder.CreateLoad(Src); 1311 auto *UndefVec = llvm::UndefValue::get(ScalableDst); 1312 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 1313 llvm::Value *Result = CGF.Builder.CreateInsertVector( 1314 ScalableDst, UndefVec, Load, Zero, "cast.scalable"); 1315 if (NeedsBitcast) 1316 Result = CGF.Builder.CreateBitCast(Result, OrigType); 1317 return Result; 1318 } 1319 } 1320 } 1321 1322 // Otherwise do coercion through memory. This is stupid, but simple. 1323 Address Tmp = 1324 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName()); 1325 CGF.Builder.CreateMemCpy( 1326 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), 1327 Src.getAlignment().getAsAlign(), 1328 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue())); 1329 return CGF.Builder.CreateLoad(Tmp); 1330 } 1331 1332 // Function to store a first-class aggregate into memory. We prefer to 1333 // store the elements rather than the aggregate to be more friendly to 1334 // fast-isel. 1335 // FIXME: Do we need to recurse here? 1336 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, 1337 bool DestIsVolatile) { 1338 // Prefer scalar stores to first-class aggregate stores. 1339 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) { 1340 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1341 Address EltPtr = Builder.CreateStructGEP(Dest, i); 1342 llvm::Value *Elt = Builder.CreateExtractValue(Val, i); 1343 Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1344 } 1345 } else { 1346 Builder.CreateStore(Val, Dest, DestIsVolatile); 1347 } 1348 } 1349 1350 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1351 /// where the source and destination may have different types. The 1352 /// destination is known to be aligned to \arg DstAlign bytes. 1353 /// 1354 /// This safely handles the case when the src type is larger than the 1355 /// destination type; the upper bits of the src will be lost. 1356 static void CreateCoercedStore(llvm::Value *Src, 1357 Address Dst, 1358 bool DstIsVolatile, 1359 CodeGenFunction &CGF) { 1360 llvm::Type *SrcTy = Src->getType(); 1361 llvm::Type *DstTy = Dst.getElementType(); 1362 if (SrcTy == DstTy) { 1363 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1364 return; 1365 } 1366 1367 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1368 1369 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1370 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, 1371 SrcSize.getFixedValue(), CGF); 1372 DstTy = Dst.getElementType(); 1373 } 1374 1375 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); 1376 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); 1377 if (SrcPtrTy && DstPtrTy && 1378 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { 1379 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); 1380 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1381 return; 1382 } 1383 1384 // If the source and destination are integer or pointer types, just do an 1385 // extension or truncation to the desired type. 1386 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1387 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1388 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1389 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1390 return; 1391 } 1392 1393 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1394 1395 // If store is legal, just bitcast the src pointer. 1396 if (isa<llvm::ScalableVectorType>(SrcTy) || 1397 isa<llvm::ScalableVectorType>(DstTy) || 1398 SrcSize.getFixedValue() <= DstSize.getFixedValue()) { 1399 Dst = Dst.withElementType(SrcTy); 1400 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); 1401 } else { 1402 // Otherwise do coercion through memory. This is stupid, but 1403 // simple. 1404 1405 // Generally SrcSize is never greater than DstSize, since this means we are 1406 // losing bits. However, this can happen in cases where the structure has 1407 // additional padding, for example due to a user specified alignment. 1408 // 1409 // FIXME: Assert that we aren't truncating non-padding bits when have access 1410 // to that information. 1411 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1412 CGF.Builder.CreateStore(Src, Tmp); 1413 CGF.Builder.CreateMemCpy( 1414 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), 1415 Tmp.getAlignment().getAsAlign(), 1416 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue())); 1417 } 1418 } 1419 1420 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1421 const ABIArgInfo &info) { 1422 if (unsigned offset = info.getDirectOffset()) { 1423 addr = addr.withElementType(CGF.Int8Ty); 1424 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1425 CharUnits::fromQuantity(offset)); 1426 addr = addr.withElementType(info.getCoerceToType()); 1427 } 1428 return addr; 1429 } 1430 1431 namespace { 1432 1433 /// Encapsulates information about the way function arguments from 1434 /// CGFunctionInfo should be passed to actual LLVM IR function. 1435 class ClangToLLVMArgMapping { 1436 static const unsigned InvalidIndex = ~0U; 1437 unsigned InallocaArgNo; 1438 unsigned SRetArgNo; 1439 unsigned TotalIRArgs; 1440 1441 /// Arguments of LLVM IR function corresponding to single Clang argument. 1442 struct IRArgs { 1443 unsigned PaddingArgIndex; 1444 // Argument is expanded to IR arguments at positions 1445 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1446 unsigned FirstArgIndex; 1447 unsigned NumberOfArgs; 1448 1449 IRArgs() 1450 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1451 NumberOfArgs(0) {} 1452 }; 1453 1454 SmallVector<IRArgs, 8> ArgInfo; 1455 1456 public: 1457 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1458 bool OnlyRequiredArgs = false) 1459 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1460 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1461 construct(Context, FI, OnlyRequiredArgs); 1462 } 1463 1464 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1465 unsigned getInallocaArgNo() const { 1466 assert(hasInallocaArg()); 1467 return InallocaArgNo; 1468 } 1469 1470 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1471 unsigned getSRetArgNo() const { 1472 assert(hasSRetArg()); 1473 return SRetArgNo; 1474 } 1475 1476 unsigned totalIRArgs() const { return TotalIRArgs; } 1477 1478 bool hasPaddingArg(unsigned ArgNo) const { 1479 assert(ArgNo < ArgInfo.size()); 1480 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1481 } 1482 unsigned getPaddingArgNo(unsigned ArgNo) const { 1483 assert(hasPaddingArg(ArgNo)); 1484 return ArgInfo[ArgNo].PaddingArgIndex; 1485 } 1486 1487 /// Returns index of first IR argument corresponding to ArgNo, and their 1488 /// quantity. 1489 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1490 assert(ArgNo < ArgInfo.size()); 1491 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1492 ArgInfo[ArgNo].NumberOfArgs); 1493 } 1494 1495 private: 1496 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1497 bool OnlyRequiredArgs); 1498 }; 1499 1500 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1501 const CGFunctionInfo &FI, 1502 bool OnlyRequiredArgs) { 1503 unsigned IRArgNo = 0; 1504 bool SwapThisWithSRet = false; 1505 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1506 1507 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1508 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1509 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1510 } 1511 1512 unsigned ArgNo = 0; 1513 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1514 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1515 ++I, ++ArgNo) { 1516 assert(I != FI.arg_end()); 1517 QualType ArgType = I->type; 1518 const ABIArgInfo &AI = I->info; 1519 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1520 auto &IRArgs = ArgInfo[ArgNo]; 1521 1522 if (AI.getPaddingType()) 1523 IRArgs.PaddingArgIndex = IRArgNo++; 1524 1525 switch (AI.getKind()) { 1526 case ABIArgInfo::Extend: 1527 case ABIArgInfo::Direct: { 1528 // FIXME: handle sseregparm someday... 1529 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1530 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1531 IRArgs.NumberOfArgs = STy->getNumElements(); 1532 } else { 1533 IRArgs.NumberOfArgs = 1; 1534 } 1535 break; 1536 } 1537 case ABIArgInfo::Indirect: 1538 case ABIArgInfo::IndirectAliased: 1539 IRArgs.NumberOfArgs = 1; 1540 break; 1541 case ABIArgInfo::Ignore: 1542 case ABIArgInfo::InAlloca: 1543 // ignore and inalloca doesn't have matching LLVM parameters. 1544 IRArgs.NumberOfArgs = 0; 1545 break; 1546 case ABIArgInfo::CoerceAndExpand: 1547 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1548 break; 1549 case ABIArgInfo::Expand: 1550 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1551 break; 1552 } 1553 1554 if (IRArgs.NumberOfArgs > 0) { 1555 IRArgs.FirstArgIndex = IRArgNo; 1556 IRArgNo += IRArgs.NumberOfArgs; 1557 } 1558 1559 // Skip over the sret parameter when it comes second. We already handled it 1560 // above. 1561 if (IRArgNo == 1 && SwapThisWithSRet) 1562 IRArgNo++; 1563 } 1564 assert(ArgNo == ArgInfo.size()); 1565 1566 if (FI.usesInAlloca()) 1567 InallocaArgNo = IRArgNo++; 1568 1569 TotalIRArgs = IRArgNo; 1570 } 1571 } // namespace 1572 1573 /***/ 1574 1575 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1576 const auto &RI = FI.getReturnInfo(); 1577 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); 1578 } 1579 1580 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1581 return ReturnTypeUsesSRet(FI) && 1582 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1583 } 1584 1585 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1586 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1587 switch (BT->getKind()) { 1588 default: 1589 return false; 1590 case BuiltinType::Float: 1591 return getTarget().useObjCFPRetForRealType(FloatModeKind::Float); 1592 case BuiltinType::Double: 1593 return getTarget().useObjCFPRetForRealType(FloatModeKind::Double); 1594 case BuiltinType::LongDouble: 1595 return getTarget().useObjCFPRetForRealType(FloatModeKind::LongDouble); 1596 } 1597 } 1598 1599 return false; 1600 } 1601 1602 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1603 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1604 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1605 if (BT->getKind() == BuiltinType::LongDouble) 1606 return getTarget().useObjCFP2RetForComplexLongDouble(); 1607 } 1608 } 1609 1610 return false; 1611 } 1612 1613 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1614 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1615 return GetFunctionType(FI); 1616 } 1617 1618 llvm::FunctionType * 1619 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1620 1621 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1622 (void)Inserted; 1623 assert(Inserted && "Recursively being processed?"); 1624 1625 llvm::Type *resultType = nullptr; 1626 const ABIArgInfo &retAI = FI.getReturnInfo(); 1627 switch (retAI.getKind()) { 1628 case ABIArgInfo::Expand: 1629 case ABIArgInfo::IndirectAliased: 1630 llvm_unreachable("Invalid ABI kind for return argument"); 1631 1632 case ABIArgInfo::Extend: 1633 case ABIArgInfo::Direct: 1634 resultType = retAI.getCoerceToType(); 1635 break; 1636 1637 case ABIArgInfo::InAlloca: 1638 if (retAI.getInAllocaSRet()) { 1639 // sret things on win32 aren't void, they return the sret pointer. 1640 QualType ret = FI.getReturnType(); 1641 unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret); 1642 resultType = llvm::PointerType::get(getLLVMContext(), addressSpace); 1643 } else { 1644 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1645 } 1646 break; 1647 1648 case ABIArgInfo::Indirect: 1649 case ABIArgInfo::Ignore: 1650 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1651 break; 1652 1653 case ABIArgInfo::CoerceAndExpand: 1654 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1655 break; 1656 } 1657 1658 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1659 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1660 1661 // Add type for sret argument. 1662 if (IRFunctionArgs.hasSRetArg()) { 1663 QualType Ret = FI.getReturnType(); 1664 unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(Ret); 1665 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1666 llvm::PointerType::get(getLLVMContext(), AddressSpace); 1667 } 1668 1669 // Add type for inalloca argument. 1670 if (IRFunctionArgs.hasInallocaArg()) 1671 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = 1672 llvm::PointerType::getUnqual(getLLVMContext()); 1673 1674 // Add in all of the required arguments. 1675 unsigned ArgNo = 0; 1676 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1677 ie = it + FI.getNumRequiredArgs(); 1678 for (; it != ie; ++it, ++ArgNo) { 1679 const ABIArgInfo &ArgInfo = it->info; 1680 1681 // Insert a padding type to ensure proper alignment. 1682 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1683 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1684 ArgInfo.getPaddingType(); 1685 1686 unsigned FirstIRArg, NumIRArgs; 1687 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1688 1689 switch (ArgInfo.getKind()) { 1690 case ABIArgInfo::Ignore: 1691 case ABIArgInfo::InAlloca: 1692 assert(NumIRArgs == 0); 1693 break; 1694 1695 case ABIArgInfo::Indirect: 1696 assert(NumIRArgs == 1); 1697 // indirect arguments are always on the stack, which is alloca addr space. 1698 ArgTypes[FirstIRArg] = llvm::PointerType::get( 1699 getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace()); 1700 break; 1701 case ABIArgInfo::IndirectAliased: 1702 assert(NumIRArgs == 1); 1703 ArgTypes[FirstIRArg] = llvm::PointerType::get( 1704 getLLVMContext(), ArgInfo.getIndirectAddrSpace()); 1705 break; 1706 case ABIArgInfo::Extend: 1707 case ABIArgInfo::Direct: { 1708 // Fast-isel and the optimizer generally like scalar values better than 1709 // FCAs, so we flatten them if this is safe to do for this argument. 1710 llvm::Type *argType = ArgInfo.getCoerceToType(); 1711 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1712 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1713 assert(NumIRArgs == st->getNumElements()); 1714 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1715 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1716 } else { 1717 assert(NumIRArgs == 1); 1718 ArgTypes[FirstIRArg] = argType; 1719 } 1720 break; 1721 } 1722 1723 case ABIArgInfo::CoerceAndExpand: { 1724 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1725 for (auto *EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1726 *ArgTypesIter++ = EltTy; 1727 } 1728 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1729 break; 1730 } 1731 1732 case ABIArgInfo::Expand: 1733 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1734 getExpandedTypes(it->type, ArgTypesIter); 1735 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1736 break; 1737 } 1738 } 1739 1740 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1741 assert(Erased && "Not in set?"); 1742 1743 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1744 } 1745 1746 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1747 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1748 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 1749 1750 if (!isFuncTypeConvertible(FPT)) 1751 return llvm::StructType::get(getLLVMContext()); 1752 1753 return GetFunctionType(GD); 1754 } 1755 1756 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1757 llvm::AttrBuilder &FuncAttrs, 1758 const FunctionProtoType *FPT) { 1759 if (!FPT) 1760 return; 1761 1762 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1763 FPT->isNothrow()) 1764 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1765 } 1766 1767 static void AddAttributesFromAssumes(llvm::AttrBuilder &FuncAttrs, 1768 const Decl *Callee) { 1769 if (!Callee) 1770 return; 1771 1772 SmallVector<StringRef, 4> Attrs; 1773 1774 for (const AssumptionAttr *AA : Callee->specific_attrs<AssumptionAttr>()) 1775 AA->getAssumption().split(Attrs, ","); 1776 1777 if (!Attrs.empty()) 1778 FuncAttrs.addAttribute(llvm::AssumptionAttrKey, 1779 llvm::join(Attrs.begin(), Attrs.end(), ",")); 1780 } 1781 1782 bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context, 1783 QualType ReturnType) const { 1784 // We can't just discard the return value for a record type with a 1785 // complex destructor or a non-trivially copyable type. 1786 if (const RecordType *RT = 1787 ReturnType.getCanonicalType()->getAs<RecordType>()) { 1788 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1789 return ClassDecl->hasTrivialDestructor(); 1790 } 1791 return ReturnType.isTriviallyCopyableType(Context); 1792 } 1793 1794 static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, 1795 const Decl *TargetDecl) { 1796 // As-is msan can not tolerate noundef mismatch between caller and 1797 // implementation. Mismatch is possible for e.g. indirect calls from C-caller 1798 // into C++. Such mismatches lead to confusing false reports. To avoid 1799 // expensive workaround on msan we enforce initialization event in uncommon 1800 // cases where it's allowed. 1801 if (Module.getLangOpts().Sanitize.has(SanitizerKind::Memory)) 1802 return true; 1803 // C++ explicitly makes returning undefined values UB. C's rule only applies 1804 // to used values, so we never mark them noundef for now. 1805 if (!Module.getLangOpts().CPlusPlus) 1806 return false; 1807 if (TargetDecl) { 1808 if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) { 1809 if (FDecl->isExternC()) 1810 return false; 1811 } else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) { 1812 // Function pointer. 1813 if (VDecl->isExternC()) 1814 return false; 1815 } 1816 } 1817 1818 // We don't want to be too aggressive with the return checking, unless 1819 // it's explicit in the code opts or we're using an appropriate sanitizer. 1820 // Try to respect what the programmer intended. 1821 return Module.getCodeGenOpts().StrictReturn || 1822 !Module.MayDropFunctionReturn(Module.getContext(), RetTy) || 1823 Module.getLangOpts().Sanitize.has(SanitizerKind::Return); 1824 } 1825 1826 /// Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the 1827 /// requested denormal behavior, accounting for the overriding behavior of the 1828 /// -f32 case. 1829 static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, 1830 llvm::DenormalMode FP32DenormalMode, 1831 llvm::AttrBuilder &FuncAttrs) { 1832 if (FPDenormalMode != llvm::DenormalMode::getDefault()) 1833 FuncAttrs.addAttribute("denormal-fp-math", FPDenormalMode.str()); 1834 1835 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid()) 1836 FuncAttrs.addAttribute("denormal-fp-math-f32", FP32DenormalMode.str()); 1837 } 1838 1839 /// Add default attributes to a function, which have merge semantics under 1840 /// -mlink-builtin-bitcode and should not simply overwrite any existing 1841 /// attributes in the linked library. 1842 static void 1843 addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, 1844 llvm::AttrBuilder &FuncAttrs) { 1845 addDenormalModeAttrs(CodeGenOpts.FPDenormalMode, CodeGenOpts.FP32DenormalMode, 1846 FuncAttrs); 1847 } 1848 1849 static void getTrivialDefaultFunctionAttributes( 1850 StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, 1851 const LangOptions &LangOpts, bool AttrOnCallSite, 1852 llvm::AttrBuilder &FuncAttrs) { 1853 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1854 if (!HasOptnone) { 1855 if (CodeGenOpts.OptimizeSize) 1856 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1857 if (CodeGenOpts.OptimizeSize == 2) 1858 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1859 } 1860 1861 if (CodeGenOpts.DisableRedZone) 1862 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1863 if (CodeGenOpts.IndirectTlsSegRefs) 1864 FuncAttrs.addAttribute("indirect-tls-seg-refs"); 1865 if (CodeGenOpts.NoImplicitFloat) 1866 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1867 1868 if (AttrOnCallSite) { 1869 // Attributes that should go on the call site only. 1870 // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking 1871 // the -fno-builtin-foo list. 1872 if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name)) 1873 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1874 if (!CodeGenOpts.TrapFuncName.empty()) 1875 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1876 } else { 1877 switch (CodeGenOpts.getFramePointer()) { 1878 case CodeGenOptions::FramePointerKind::None: 1879 // This is the default behavior. 1880 break; 1881 case CodeGenOptions::FramePointerKind::NonLeaf: 1882 case CodeGenOptions::FramePointerKind::All: 1883 FuncAttrs.addAttribute("frame-pointer", 1884 CodeGenOptions::getFramePointerKindName( 1885 CodeGenOpts.getFramePointer())); 1886 } 1887 1888 if (CodeGenOpts.LessPreciseFPMAD) 1889 FuncAttrs.addAttribute("less-precise-fpmad", "true"); 1890 1891 if (CodeGenOpts.NullPointerIsValid) 1892 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); 1893 1894 if (LangOpts.getDefaultExceptionMode() == LangOptions::FPE_Ignore) 1895 FuncAttrs.addAttribute("no-trapping-math", "true"); 1896 1897 // TODO: Are these all needed? 1898 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1899 if (LangOpts.NoHonorInfs) 1900 FuncAttrs.addAttribute("no-infs-fp-math", "true"); 1901 if (LangOpts.NoHonorNaNs) 1902 FuncAttrs.addAttribute("no-nans-fp-math", "true"); 1903 if (LangOpts.ApproxFunc) 1904 FuncAttrs.addAttribute("approx-func-fp-math", "true"); 1905 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip && 1906 LangOpts.NoSignedZero && LangOpts.ApproxFunc && 1907 (LangOpts.getDefaultFPContractMode() == 1908 LangOptions::FPModeKind::FPM_Fast || 1909 LangOpts.getDefaultFPContractMode() == 1910 LangOptions::FPModeKind::FPM_FastHonorPragmas)) 1911 FuncAttrs.addAttribute("unsafe-fp-math", "true"); 1912 if (CodeGenOpts.SoftFloat) 1913 FuncAttrs.addAttribute("use-soft-float", "true"); 1914 FuncAttrs.addAttribute("stack-protector-buffer-size", 1915 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1916 if (LangOpts.NoSignedZero) 1917 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true"); 1918 1919 // TODO: Reciprocal estimate codegen options should apply to instructions? 1920 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; 1921 if (!Recips.empty()) 1922 FuncAttrs.addAttribute("reciprocal-estimates", 1923 llvm::join(Recips, ",")); 1924 1925 if (!CodeGenOpts.PreferVectorWidth.empty() && 1926 CodeGenOpts.PreferVectorWidth != "none") 1927 FuncAttrs.addAttribute("prefer-vector-width", 1928 CodeGenOpts.PreferVectorWidth); 1929 1930 if (CodeGenOpts.StackRealignment) 1931 FuncAttrs.addAttribute("stackrealign"); 1932 if (CodeGenOpts.Backchain) 1933 FuncAttrs.addAttribute("backchain"); 1934 if (CodeGenOpts.EnableSegmentedStacks) 1935 FuncAttrs.addAttribute("split-stack"); 1936 1937 if (CodeGenOpts.SpeculativeLoadHardening) 1938 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 1939 1940 // Add zero-call-used-regs attribute. 1941 switch (CodeGenOpts.getZeroCallUsedRegs()) { 1942 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip: 1943 FuncAttrs.removeAttribute("zero-call-used-regs"); 1944 break; 1945 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg: 1946 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg"); 1947 break; 1948 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR: 1949 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr"); 1950 break; 1951 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg: 1952 FuncAttrs.addAttribute("zero-call-used-regs", "used-arg"); 1953 break; 1954 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used: 1955 FuncAttrs.addAttribute("zero-call-used-regs", "used"); 1956 break; 1957 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg: 1958 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg"); 1959 break; 1960 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR: 1961 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr"); 1962 break; 1963 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg: 1964 FuncAttrs.addAttribute("zero-call-used-regs", "all-arg"); 1965 break; 1966 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All: 1967 FuncAttrs.addAttribute("zero-call-used-regs", "all"); 1968 break; 1969 } 1970 } 1971 1972 if (LangOpts.assumeFunctionsAreConvergent()) { 1973 // Conservatively, mark all functions and calls in CUDA and OpenCL as 1974 // convergent (meaning, they may call an intrinsically convergent op, such 1975 // as __syncthreads() / barrier(), and so can't have certain optimizations 1976 // applied around them). LLVM will remove this attribute where it safely 1977 // can. 1978 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1979 } 1980 1981 // TODO: NoUnwind attribute should be added for other GPU modes HIP, 1982 // OpenMP offload. AFAIK, neither of them support exceptions in device code. 1983 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL || 1984 LangOpts.SYCLIsDevice) { 1985 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1986 } 1987 1988 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { 1989 StringRef Var, Value; 1990 std::tie(Var, Value) = Attr.split('='); 1991 FuncAttrs.addAttribute(Var, Value); 1992 } 1993 } 1994 1995 /// Adds attributes to \p F according to our \p CodeGenOpts and \p LangOpts, as 1996 /// though we had emitted it ourselves. We remove any attributes on F that 1997 /// conflict with the attributes we add here. 1998 static void mergeDefaultFunctionDefinitionAttributes( 1999 llvm::Function &F, const CodeGenOptions CodeGenOpts, 2000 const LangOptions &LangOpts, const TargetOptions &TargetOpts, 2001 bool WillInternalize) { 2002 2003 llvm::AttrBuilder FuncAttrs(F.getContext()); 2004 // Here we only extract the options that are relevant compared to the version 2005 // from GetCPUAndFeaturesAttributes. 2006 if (!TargetOpts.CPU.empty()) 2007 FuncAttrs.addAttribute("target-cpu", TargetOpts.CPU); 2008 if (!TargetOpts.TuneCPU.empty()) 2009 FuncAttrs.addAttribute("tune-cpu", TargetOpts.TuneCPU); 2010 2011 ::getTrivialDefaultFunctionAttributes(F.getName(), F.hasOptNone(), 2012 CodeGenOpts, LangOpts, 2013 /*AttrOnCallSite=*/false, FuncAttrs); 2014 2015 if (!WillInternalize && F.isInterposable()) { 2016 // Do not promote "dynamic" denormal-fp-math to this translation unit's 2017 // setting for weak functions that won't be internalized. The user has no 2018 // real control for how builtin bitcode is linked, so we shouldn't assume 2019 // later copies will use a consistent mode. 2020 F.addFnAttrs(FuncAttrs); 2021 return; 2022 } 2023 2024 llvm::AttributeMask AttrsToRemove; 2025 2026 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw(); 2027 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw(); 2028 llvm::DenormalMode Merged = 2029 CodeGenOpts.FPDenormalMode.mergeCalleeMode(DenormModeToMerge); 2030 llvm::DenormalMode MergedF32 = CodeGenOpts.FP32DenormalMode; 2031 2032 if (DenormModeToMergeF32.isValid()) { 2033 MergedF32 = 2034 CodeGenOpts.FP32DenormalMode.mergeCalleeMode(DenormModeToMergeF32); 2035 } 2036 2037 if (Merged == llvm::DenormalMode::getDefault()) { 2038 AttrsToRemove.addAttribute("denormal-fp-math"); 2039 } else if (Merged != DenormModeToMerge) { 2040 // Overwrite existing attribute 2041 FuncAttrs.addAttribute("denormal-fp-math", 2042 CodeGenOpts.FPDenormalMode.str()); 2043 } 2044 2045 if (MergedF32 == llvm::DenormalMode::getDefault()) { 2046 AttrsToRemove.addAttribute("denormal-fp-math-f32"); 2047 } else if (MergedF32 != DenormModeToMergeF32) { 2048 // Overwrite existing attribute 2049 FuncAttrs.addAttribute("denormal-fp-math-f32", 2050 CodeGenOpts.FP32DenormalMode.str()); 2051 } 2052 2053 F.removeFnAttrs(AttrsToRemove); 2054 addDenormalModeAttrs(Merged, MergedF32, FuncAttrs); 2055 F.addFnAttrs(FuncAttrs); 2056 } 2057 2058 void clang::CodeGen::mergeDefaultFunctionDefinitionAttributes( 2059 llvm::Function &F, const CodeGenOptions CodeGenOpts, 2060 const LangOptions &LangOpts, const TargetOptions &TargetOpts, 2061 bool WillInternalize) { 2062 2063 ::mergeDefaultFunctionDefinitionAttributes(F, CodeGenOpts, LangOpts, 2064 TargetOpts, WillInternalize); 2065 } 2066 2067 void CodeGenModule::getTrivialDefaultFunctionAttributes( 2068 StringRef Name, bool HasOptnone, bool AttrOnCallSite, 2069 llvm::AttrBuilder &FuncAttrs) { 2070 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone, getCodeGenOpts(), 2071 getLangOpts(), AttrOnCallSite, 2072 FuncAttrs); 2073 } 2074 2075 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, 2076 bool HasOptnone, 2077 bool AttrOnCallSite, 2078 llvm::AttrBuilder &FuncAttrs) { 2079 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, 2080 FuncAttrs); 2081 // If we're just getting the default, get the default values for mergeable 2082 // attributes. 2083 if (!AttrOnCallSite) 2084 addMergableDefaultFunctionAttributes(CodeGenOpts, FuncAttrs); 2085 } 2086 2087 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) { 2088 llvm::AttrBuilder FuncAttrs(F.getContext()); 2089 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(), 2090 /* AttrOnCallSite = */ false, FuncAttrs); 2091 // TODO: call GetCPUAndFeaturesAttributes? 2092 F.addFnAttrs(FuncAttrs); 2093 } 2094 2095 /// Apply default attributes to \p F, accounting for merge semantics of 2096 /// attributes that should not overwrite existing attributes. 2097 void CodeGenModule::mergeDefaultFunctionDefinitionAttributes( 2098 llvm::Function &F, bool WillInternalize) { 2099 ::mergeDefaultFunctionDefinitionAttributes(F, getCodeGenOpts(), getLangOpts(), 2100 getTarget().getTargetOpts(), 2101 WillInternalize); 2102 } 2103 2104 void CodeGenModule::addDefaultFunctionDefinitionAttributes( 2105 llvm::AttrBuilder &attrs) { 2106 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false, 2107 /*for call*/ false, attrs); 2108 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs); 2109 } 2110 2111 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, 2112 const LangOptions &LangOpts, 2113 const NoBuiltinAttr *NBA = nullptr) { 2114 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { 2115 SmallString<32> AttributeName; 2116 AttributeName += "no-builtin-"; 2117 AttributeName += BuiltinName; 2118 FuncAttrs.addAttribute(AttributeName); 2119 }; 2120 2121 // First, handle the language options passed through -fno-builtin. 2122 if (LangOpts.NoBuiltin) { 2123 // -fno-builtin disables them all. 2124 FuncAttrs.addAttribute("no-builtins"); 2125 return; 2126 } 2127 2128 // Then, add attributes for builtins specified through -fno-builtin-<name>. 2129 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); 2130 2131 // Now, let's check the __attribute__((no_builtin("...")) attribute added to 2132 // the source. 2133 if (!NBA) 2134 return; 2135 2136 // If there is a wildcard in the builtin names specified through the 2137 // attribute, disable them all. 2138 if (llvm::is_contained(NBA->builtinNames(), "*")) { 2139 FuncAttrs.addAttribute("no-builtins"); 2140 return; 2141 } 2142 2143 // And last, add the rest of the builtin names. 2144 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); 2145 } 2146 2147 static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, 2148 const llvm::DataLayout &DL, const ABIArgInfo &AI, 2149 bool CheckCoerce = true) { 2150 llvm::Type *Ty = Types.ConvertTypeForMem(QTy); 2151 if (AI.getKind() == ABIArgInfo::Indirect) 2152 return true; 2153 if (AI.getKind() == ABIArgInfo::Extend) 2154 return true; 2155 if (!DL.typeSizeEqualsStoreSize(Ty)) 2156 // TODO: This will result in a modest amount of values not marked noundef 2157 // when they could be. We care about values that *invisibly* contain undef 2158 // bits from the perspective of LLVM IR. 2159 return false; 2160 if (CheckCoerce && AI.canHaveCoerceToType()) { 2161 llvm::Type *CoerceTy = AI.getCoerceToType(); 2162 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy), 2163 DL.getTypeSizeInBits(Ty))) 2164 // If we're coercing to a type with a greater size than the canonical one, 2165 // we're introducing new undef bits. 2166 // Coercing to a type of smaller or equal size is ok, as we know that 2167 // there's no internal padding (typeSizeEqualsStoreSize). 2168 return false; 2169 } 2170 if (QTy->isBitIntType()) 2171 return true; 2172 if (QTy->isReferenceType()) 2173 return true; 2174 if (QTy->isNullPtrType()) 2175 return false; 2176 if (QTy->isMemberPointerType()) 2177 // TODO: Some member pointers are `noundef`, but it depends on the ABI. For 2178 // now, never mark them. 2179 return false; 2180 if (QTy->isScalarType()) { 2181 if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy)) 2182 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false); 2183 return true; 2184 } 2185 if (const VectorType *Vector = dyn_cast<VectorType>(QTy)) 2186 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false); 2187 if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy)) 2188 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false); 2189 if (const ArrayType *Array = dyn_cast<ArrayType>(QTy)) 2190 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false); 2191 2192 // TODO: Some structs may be `noundef`, in specific situations. 2193 return false; 2194 } 2195 2196 /// Check if the argument of a function has maybe_undef attribute. 2197 static bool IsArgumentMaybeUndef(const Decl *TargetDecl, 2198 unsigned NumRequiredArgs, unsigned ArgNo) { 2199 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl); 2200 if (!FD) 2201 return false; 2202 2203 // Assume variadic arguments do not have maybe_undef attribute. 2204 if (ArgNo >= NumRequiredArgs) 2205 return false; 2206 2207 // Check if argument has maybe_undef attribute. 2208 if (ArgNo < FD->getNumParams()) { 2209 const ParmVarDecl *Param = FD->getParamDecl(ArgNo); 2210 if (Param && Param->hasAttr<MaybeUndefAttr>()) 2211 return true; 2212 } 2213 2214 return false; 2215 } 2216 2217 /// Test if it's legal to apply nofpclass for the given parameter type and it's 2218 /// lowered IR type. 2219 static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, 2220 bool IsReturn) { 2221 // Should only apply to FP types in the source, not ABI promoted. 2222 if (!ParamType->hasFloatingRepresentation()) 2223 return false; 2224 2225 // The promoted-to IR type also needs to support nofpclass. 2226 llvm::Type *IRTy = AI.getCoerceToType(); 2227 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy)) 2228 return true; 2229 2230 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) { 2231 return !IsReturn && AI.getCanBeFlattened() && 2232 llvm::all_of(ST->elements(), [](llvm::Type *Ty) { 2233 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty); 2234 }); 2235 } 2236 2237 return false; 2238 } 2239 2240 /// Return the nofpclass mask that can be applied to floating-point parameters. 2241 static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts) { 2242 llvm::FPClassTest Mask = llvm::fcNone; 2243 if (LangOpts.NoHonorInfs) 2244 Mask |= llvm::fcInf; 2245 if (LangOpts.NoHonorNaNs) 2246 Mask |= llvm::fcNan; 2247 return Mask; 2248 } 2249 2250 /// Construct the IR attribute list of a function or call. 2251 /// 2252 /// When adding an attribute, please consider where it should be handled: 2253 /// 2254 /// - getDefaultFunctionAttributes is for attributes that are essentially 2255 /// part of the global target configuration (but perhaps can be 2256 /// overridden on a per-function basis). Adding attributes there 2257 /// will cause them to also be set in frontends that build on Clang's 2258 /// target-configuration logic, as well as for code defined in library 2259 /// modules such as CUDA's libdevice. 2260 /// 2261 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes 2262 /// and adds declaration-specific, convention-specific, and 2263 /// frontend-specific logic. The last is of particular importance: 2264 /// attributes that restrict how the frontend generates code must be 2265 /// added here rather than getDefaultFunctionAttributes. 2266 /// 2267 void CodeGenModule::ConstructAttributeList(StringRef Name, 2268 const CGFunctionInfo &FI, 2269 CGCalleeInfo CalleeInfo, 2270 llvm::AttributeList &AttrList, 2271 unsigned &CallingConv, 2272 bool AttrOnCallSite, bool IsThunk) { 2273 llvm::AttrBuilder FuncAttrs(getLLVMContext()); 2274 llvm::AttrBuilder RetAttrs(getLLVMContext()); 2275 2276 // Collect function IR attributes from the CC lowering. 2277 // We'll collect the paramete and result attributes later. 2278 CallingConv = FI.getEffectiveCallingConvention(); 2279 if (FI.isNoReturn()) 2280 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2281 if (FI.isCmseNSCall()) 2282 FuncAttrs.addAttribute("cmse_nonsecure_call"); 2283 2284 // Collect function IR attributes from the callee prototype if we have one. 2285 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 2286 CalleeInfo.getCalleeFunctionProtoType()); 2287 2288 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); 2289 2290 // Attach assumption attributes to the declaration. If this is a call 2291 // site, attach assumptions from the caller to the call as well. 2292 AddAttributesFromAssumes(FuncAttrs, TargetDecl); 2293 2294 bool HasOptnone = false; 2295 // The NoBuiltinAttr attached to the target FunctionDecl. 2296 const NoBuiltinAttr *NBA = nullptr; 2297 2298 // Some ABIs may result in additional accesses to arguments that may 2299 // otherwise not be present. 2300 auto AddPotentialArgAccess = [&]() { 2301 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory); 2302 if (A.isValid()) 2303 FuncAttrs.addMemoryAttr(A.getMemoryEffects() | 2304 llvm::MemoryEffects::argMemOnly()); 2305 }; 2306 2307 // Collect function IR attributes based on declaration-specific 2308 // information. 2309 // FIXME: handle sseregparm someday... 2310 if (TargetDecl) { 2311 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 2312 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 2313 if (TargetDecl->hasAttr<NoThrowAttr>()) 2314 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2315 if (TargetDecl->hasAttr<NoReturnAttr>()) 2316 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2317 if (TargetDecl->hasAttr<ColdAttr>()) 2318 FuncAttrs.addAttribute(llvm::Attribute::Cold); 2319 if (TargetDecl->hasAttr<HotAttr>()) 2320 FuncAttrs.addAttribute(llvm::Attribute::Hot); 2321 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 2322 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 2323 if (TargetDecl->hasAttr<ConvergentAttr>()) 2324 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 2325 2326 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2327 AddAttributesFromFunctionProtoType( 2328 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 2329 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { 2330 // A sane operator new returns a non-aliasing pointer. 2331 auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); 2332 if (getCodeGenOpts().AssumeSaneOperatorNew && 2333 (Kind == OO_New || Kind == OO_Array_New)) 2334 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2335 } 2336 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 2337 const bool IsVirtualCall = MD && MD->isVirtual(); 2338 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a 2339 // virtual function. These attributes are not inherited by overloads. 2340 if (!(AttrOnCallSite && IsVirtualCall)) { 2341 if (Fn->isNoReturn()) 2342 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2343 NBA = Fn->getAttr<NoBuiltinAttr>(); 2344 } 2345 } 2346 2347 if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) { 2348 // Only place nomerge attribute on call sites, never functions. This 2349 // allows it to work on indirect virtual function calls. 2350 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>()) 2351 FuncAttrs.addAttribute(llvm::Attribute::NoMerge); 2352 } 2353 2354 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 2355 if (TargetDecl->hasAttr<ConstAttr>()) { 2356 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none()); 2357 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2358 // gcc specifies that 'const' functions have greater restrictions than 2359 // 'pure' functions, so they also cannot have infinite loops. 2360 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2361 } else if (TargetDecl->hasAttr<PureAttr>()) { 2362 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly()); 2363 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2364 // gcc specifies that 'pure' functions cannot have infinite loops. 2365 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2366 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 2367 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::argMemOnly()); 2368 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2369 } 2370 if (TargetDecl->hasAttr<RestrictAttr>()) 2371 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2372 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && 2373 !CodeGenOpts.NullPointerIsValid) 2374 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2375 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) 2376 FuncAttrs.addAttribute("no_caller_saved_registers"); 2377 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) 2378 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); 2379 if (TargetDecl->hasAttr<LeafAttr>()) 2380 FuncAttrs.addAttribute(llvm::Attribute::NoCallback); 2381 2382 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 2383 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 2384 std::optional<unsigned> NumElemsParam; 2385 if (AllocSize->getNumElemsParam().isValid()) 2386 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); 2387 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), 2388 NumElemsParam); 2389 } 2390 2391 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { 2392 if (getLangOpts().OpenCLVersion <= 120) { 2393 // OpenCL v1.2 Work groups are always uniform 2394 FuncAttrs.addAttribute("uniform-work-group-size", "true"); 2395 } else { 2396 // OpenCL v2.0 Work groups may be whether uniform or not. 2397 // '-cl-uniform-work-group-size' compile option gets a hint 2398 // to the compiler that the global work-size be a multiple of 2399 // the work-group size specified to clEnqueueNDRangeKernel 2400 // (i.e. work groups are uniform). 2401 FuncAttrs.addAttribute("uniform-work-group-size", 2402 llvm::toStringRef(CodeGenOpts.UniformWGSize)); 2403 } 2404 } 2405 } 2406 2407 // Attach "no-builtins" attributes to: 2408 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". 2409 // * definitions: "no-builtins" or "no-builtin-<name>" only. 2410 // The attributes can come from: 2411 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> 2412 // * FunctionDecl attributes: __attribute__((no_builtin(...))) 2413 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); 2414 2415 // Collect function IR attributes based on global settiings. 2416 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); 2417 2418 // Override some default IR attributes based on declaration-specific 2419 // information. 2420 if (TargetDecl) { 2421 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) 2422 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); 2423 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) 2424 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 2425 if (TargetDecl->hasAttr<NoSplitStackAttr>()) 2426 FuncAttrs.removeAttribute("split-stack"); 2427 if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) { 2428 // A function "__attribute__((...))" overrides the command-line flag. 2429 auto Kind = 2430 TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs(); 2431 FuncAttrs.removeAttribute("zero-call-used-regs"); 2432 FuncAttrs.addAttribute( 2433 "zero-call-used-regs", 2434 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind)); 2435 } 2436 2437 // Add NonLazyBind attribute to function declarations when -fno-plt 2438 // is used. 2439 // FIXME: what if we just haven't processed the function definition 2440 // yet, or if it's an external definition like C99 inline? 2441 if (CodeGenOpts.NoPLT) { 2442 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2443 if (!Fn->isDefined() && !AttrOnCallSite) { 2444 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); 2445 } 2446 } 2447 } 2448 } 2449 2450 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage 2451 // functions with -funique-internal-linkage-names. 2452 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) { 2453 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 2454 if (!FD->isExternallyVisible()) 2455 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy", 2456 "selected"); 2457 } 2458 } 2459 2460 // Collect non-call-site function IR attributes from declaration-specific 2461 // information. 2462 if (!AttrOnCallSite) { 2463 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) 2464 FuncAttrs.addAttribute("cmse_nonsecure_entry"); 2465 2466 // Whether tail calls are enabled. 2467 auto shouldDisableTailCalls = [&] { 2468 // Should this be honored in getDefaultFunctionAttributes? 2469 if (CodeGenOpts.DisableTailCalls) 2470 return true; 2471 2472 if (!TargetDecl) 2473 return false; 2474 2475 if (TargetDecl->hasAttr<DisableTailCallsAttr>() || 2476 TargetDecl->hasAttr<AnyX86InterruptAttr>()) 2477 return true; 2478 2479 if (CodeGenOpts.NoEscapingBlockTailCalls) { 2480 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl)) 2481 if (!BD->doesNotEscape()) 2482 return true; 2483 } 2484 2485 return false; 2486 }; 2487 if (shouldDisableTailCalls()) 2488 FuncAttrs.addAttribute("disable-tail-calls", "true"); 2489 2490 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes 2491 // handles these separately to set them based on the global defaults. 2492 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); 2493 } 2494 2495 // Collect attributes from arguments and return values. 2496 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 2497 2498 QualType RetTy = FI.getReturnType(); 2499 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2500 const llvm::DataLayout &DL = getDataLayout(); 2501 2502 // Determine if the return type could be partially undef 2503 if (CodeGenOpts.EnableNoundefAttrs && 2504 HasStrictReturn(*this, RetTy, TargetDecl)) { 2505 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect && 2506 DetermineNoUndef(RetTy, getTypes(), DL, RetAI)) 2507 RetAttrs.addAttribute(llvm::Attribute::NoUndef); 2508 } 2509 2510 switch (RetAI.getKind()) { 2511 case ABIArgInfo::Extend: 2512 if (RetAI.isSignExt()) 2513 RetAttrs.addAttribute(llvm::Attribute::SExt); 2514 else 2515 RetAttrs.addAttribute(llvm::Attribute::ZExt); 2516 [[fallthrough]]; 2517 case ABIArgInfo::Direct: 2518 if (RetAI.getInReg()) 2519 RetAttrs.addAttribute(llvm::Attribute::InReg); 2520 2521 if (canApplyNoFPClass(RetAI, RetTy, true)) 2522 RetAttrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts())); 2523 2524 break; 2525 case ABIArgInfo::Ignore: 2526 break; 2527 2528 case ABIArgInfo::InAlloca: 2529 case ABIArgInfo::Indirect: { 2530 // inalloca and sret disable readnone and readonly 2531 AddPotentialArgAccess(); 2532 break; 2533 } 2534 2535 case ABIArgInfo::CoerceAndExpand: 2536 break; 2537 2538 case ABIArgInfo::Expand: 2539 case ABIArgInfo::IndirectAliased: 2540 llvm_unreachable("Invalid ABI kind for return argument"); 2541 } 2542 2543 if (!IsThunk) { 2544 // FIXME: fix this properly, https://reviews.llvm.org/D100388 2545 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 2546 QualType PTy = RefTy->getPointeeType(); 2547 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2548 RetAttrs.addDereferenceableAttr( 2549 getMinimumObjectSize(PTy).getQuantity()); 2550 if (getTypes().getTargetAddressSpace(PTy) == 0 && 2551 !CodeGenOpts.NullPointerIsValid) 2552 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2553 if (PTy->isObjectType()) { 2554 llvm::Align Alignment = 2555 getNaturalPointeeTypeAlignment(RetTy).getAsAlign(); 2556 RetAttrs.addAlignmentAttr(Alignment); 2557 } 2558 } 2559 } 2560 2561 bool hasUsedSRet = false; 2562 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); 2563 2564 // Attach attributes to sret. 2565 if (IRFunctionArgs.hasSRetArg()) { 2566 llvm::AttrBuilder SRETAttrs(getLLVMContext()); 2567 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy)); 2568 hasUsedSRet = true; 2569 if (RetAI.getInReg()) 2570 SRETAttrs.addAttribute(llvm::Attribute::InReg); 2571 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity()); 2572 ArgAttrs[IRFunctionArgs.getSRetArgNo()] = 2573 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); 2574 } 2575 2576 // Attach attributes to inalloca argument. 2577 if (IRFunctionArgs.hasInallocaArg()) { 2578 llvm::AttrBuilder Attrs(getLLVMContext()); 2579 Attrs.addInAllocaAttr(FI.getArgStruct()); 2580 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = 2581 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2582 } 2583 2584 // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, 2585 // unless this is a thunk function. 2586 // FIXME: fix this properly, https://reviews.llvm.org/D100388 2587 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() && 2588 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) { 2589 auto IRArgs = IRFunctionArgs.getIRArgs(0); 2590 2591 assert(IRArgs.second == 1 && "Expected only a single `this` pointer."); 2592 2593 llvm::AttrBuilder Attrs(getLLVMContext()); 2594 2595 QualType ThisTy = 2596 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType(); 2597 2598 if (!CodeGenOpts.NullPointerIsValid && 2599 getTypes().getTargetAddressSpace(FI.arg_begin()->type) == 0) { 2600 Attrs.addAttribute(llvm::Attribute::NonNull); 2601 Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity()); 2602 } else { 2603 // FIXME dereferenceable should be correct here, regardless of 2604 // NullPointerIsValid. However, dereferenceable currently does not always 2605 // respect NullPointerIsValid and may imply nonnull and break the program. 2606 // See https://reviews.llvm.org/D66618 for discussions. 2607 Attrs.addDereferenceableOrNullAttr( 2608 getMinimumObjectSize( 2609 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) 2610 .getQuantity()); 2611 } 2612 2613 llvm::Align Alignment = 2614 getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr, 2615 /*TBAAInfo=*/nullptr, /*forPointeeType=*/true) 2616 .getAsAlign(); 2617 Attrs.addAlignmentAttr(Alignment); 2618 2619 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs); 2620 } 2621 2622 unsigned ArgNo = 0; 2623 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 2624 E = FI.arg_end(); 2625 I != E; ++I, ++ArgNo) { 2626 QualType ParamType = I->type; 2627 const ABIArgInfo &AI = I->info; 2628 llvm::AttrBuilder Attrs(getLLVMContext()); 2629 2630 // Add attribute for padding argument, if necessary. 2631 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 2632 if (AI.getPaddingInReg()) { 2633 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 2634 llvm::AttributeSet::get( 2635 getLLVMContext(), 2636 llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg)); 2637 } 2638 } 2639 2640 // Decide whether the argument we're handling could be partially undef 2641 if (CodeGenOpts.EnableNoundefAttrs && 2642 DetermineNoUndef(ParamType, getTypes(), DL, AI)) { 2643 Attrs.addAttribute(llvm::Attribute::NoUndef); 2644 } 2645 2646 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 2647 // have the corresponding parameter variable. It doesn't make 2648 // sense to do it here because parameters are so messed up. 2649 switch (AI.getKind()) { 2650 case ABIArgInfo::Extend: 2651 if (AI.isSignExt()) 2652 Attrs.addAttribute(llvm::Attribute::SExt); 2653 else 2654 Attrs.addAttribute(llvm::Attribute::ZExt); 2655 [[fallthrough]]; 2656 case ABIArgInfo::Direct: 2657 if (ArgNo == 0 && FI.isChainCall()) 2658 Attrs.addAttribute(llvm::Attribute::Nest); 2659 else if (AI.getInReg()) 2660 Attrs.addAttribute(llvm::Attribute::InReg); 2661 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); 2662 2663 if (canApplyNoFPClass(AI, ParamType, false)) 2664 Attrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts())); 2665 break; 2666 case ABIArgInfo::Indirect: { 2667 if (AI.getInReg()) 2668 Attrs.addAttribute(llvm::Attribute::InReg); 2669 2670 if (AI.getIndirectByVal()) 2671 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType)); 2672 2673 auto *Decl = ParamType->getAsRecordDecl(); 2674 if (CodeGenOpts.PassByValueIsNoAlias && Decl && 2675 Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs) 2676 // When calling the function, the pointer passed in will be the only 2677 // reference to the underlying object. Mark it accordingly. 2678 Attrs.addAttribute(llvm::Attribute::NoAlias); 2679 2680 // TODO: We could add the byref attribute if not byval, but it would 2681 // require updating many testcases. 2682 2683 CharUnits Align = AI.getIndirectAlign(); 2684 2685 // In a byval argument, it is important that the required 2686 // alignment of the type is honored, as LLVM might be creating a 2687 // *new* stack object, and needs to know what alignment to give 2688 // it. (Sometimes it can deduce a sensible alignment on its own, 2689 // but not if clang decides it must emit a packed struct, or the 2690 // user specifies increased alignment requirements.) 2691 // 2692 // This is different from indirect *not* byval, where the object 2693 // exists already, and the align attribute is purely 2694 // informative. 2695 assert(!Align.isZero()); 2696 2697 // For now, only add this when we have a byval argument. 2698 // TODO: be less lazy about updating test cases. 2699 if (AI.getIndirectByVal()) 2700 Attrs.addAlignmentAttr(Align.getQuantity()); 2701 2702 // byval disables readnone and readonly. 2703 AddPotentialArgAccess(); 2704 break; 2705 } 2706 case ABIArgInfo::IndirectAliased: { 2707 CharUnits Align = AI.getIndirectAlign(); 2708 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType)); 2709 Attrs.addAlignmentAttr(Align.getQuantity()); 2710 break; 2711 } 2712 case ABIArgInfo::Ignore: 2713 case ABIArgInfo::Expand: 2714 case ABIArgInfo::CoerceAndExpand: 2715 break; 2716 2717 case ABIArgInfo::InAlloca: 2718 // inalloca disables readnone and readonly. 2719 AddPotentialArgAccess(); 2720 continue; 2721 } 2722 2723 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 2724 QualType PTy = RefTy->getPointeeType(); 2725 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2726 Attrs.addDereferenceableAttr( 2727 getMinimumObjectSize(PTy).getQuantity()); 2728 if (getTypes().getTargetAddressSpace(PTy) == 0 && 2729 !CodeGenOpts.NullPointerIsValid) 2730 Attrs.addAttribute(llvm::Attribute::NonNull); 2731 if (PTy->isObjectType()) { 2732 llvm::Align Alignment = 2733 getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); 2734 Attrs.addAlignmentAttr(Alignment); 2735 } 2736 } 2737 2738 // From OpenCL spec v3.0.10 section 6.3.5 Alignment of Types: 2739 // > For arguments to a __kernel function declared to be a pointer to a 2740 // > data type, the OpenCL compiler can assume that the pointee is always 2741 // > appropriately aligned as required by the data type. 2742 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() && 2743 ParamType->isPointerType()) { 2744 QualType PTy = ParamType->getPointeeType(); 2745 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2746 llvm::Align Alignment = 2747 getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); 2748 Attrs.addAlignmentAttr(Alignment); 2749 } 2750 } 2751 2752 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 2753 case ParameterABI::Ordinary: 2754 break; 2755 2756 case ParameterABI::SwiftIndirectResult: { 2757 // Add 'sret' if we haven't already used it for something, but 2758 // only if the result is void. 2759 if (!hasUsedSRet && RetTy->isVoidType()) { 2760 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType)); 2761 hasUsedSRet = true; 2762 } 2763 2764 // Add 'noalias' in either case. 2765 Attrs.addAttribute(llvm::Attribute::NoAlias); 2766 2767 // Add 'dereferenceable' and 'alignment'. 2768 auto PTy = ParamType->getPointeeType(); 2769 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2770 auto info = getContext().getTypeInfoInChars(PTy); 2771 Attrs.addDereferenceableAttr(info.Width.getQuantity()); 2772 Attrs.addAlignmentAttr(info.Align.getAsAlign()); 2773 } 2774 break; 2775 } 2776 2777 case ParameterABI::SwiftErrorResult: 2778 Attrs.addAttribute(llvm::Attribute::SwiftError); 2779 break; 2780 2781 case ParameterABI::SwiftContext: 2782 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2783 break; 2784 2785 case ParameterABI::SwiftAsyncContext: 2786 Attrs.addAttribute(llvm::Attribute::SwiftAsync); 2787 break; 2788 } 2789 2790 if (FI.getExtParameterInfo(ArgNo).isNoEscape()) 2791 Attrs.addAttribute(llvm::Attribute::NoCapture); 2792 2793 if (Attrs.hasAttributes()) { 2794 unsigned FirstIRArg, NumIRArgs; 2795 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2796 for (unsigned i = 0; i < NumIRArgs; i++) 2797 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes( 2798 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), Attrs)); 2799 } 2800 } 2801 assert(ArgNo == FI.arg_size()); 2802 2803 AttrList = llvm::AttributeList::get( 2804 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), 2805 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); 2806 } 2807 2808 /// An argument came in as a promoted argument; demote it back to its 2809 /// declared type. 2810 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2811 const VarDecl *var, 2812 llvm::Value *value) { 2813 llvm::Type *varType = CGF.ConvertType(var->getType()); 2814 2815 // This can happen with promotions that actually don't change the 2816 // underlying type, like the enum promotions. 2817 if (value->getType() == varType) return value; 2818 2819 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2820 && "unexpected promotion type"); 2821 2822 if (isa<llvm::IntegerType>(varType)) 2823 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2824 2825 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2826 } 2827 2828 /// Returns the attribute (either parameter attribute, or function 2829 /// attribute), which declares argument ArgNo to be non-null. 2830 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2831 QualType ArgType, unsigned ArgNo) { 2832 // FIXME: __attribute__((nonnull)) can also be applied to: 2833 // - references to pointers, where the pointee is known to be 2834 // nonnull (apparently a Clang extension) 2835 // - transparent unions containing pointers 2836 // In the former case, LLVM IR cannot represent the constraint. In 2837 // the latter case, we have no guarantee that the transparent union 2838 // is in fact passed as a pointer. 2839 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2840 return nullptr; 2841 // First, check attribute on parameter itself. 2842 if (PVD) { 2843 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2844 return ParmNNAttr; 2845 } 2846 // Check function attributes. 2847 if (!FD) 2848 return nullptr; 2849 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2850 if (NNAttr->isNonNull(ArgNo)) 2851 return NNAttr; 2852 } 2853 return nullptr; 2854 } 2855 2856 namespace { 2857 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2858 Address Temp; 2859 Address Arg; 2860 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2861 void Emit(CodeGenFunction &CGF, Flags flags) override { 2862 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2863 CGF.Builder.CreateStore(errorValue, Arg); 2864 } 2865 }; 2866 } 2867 2868 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2869 llvm::Function *Fn, 2870 const FunctionArgList &Args) { 2871 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2872 // Naked functions don't have prologues. 2873 return; 2874 2875 // If this is an implicit-return-zero function, go ahead and 2876 // initialize the return value. TODO: it might be nice to have 2877 // a more general mechanism for this that didn't require synthesized 2878 // return statements. 2879 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2880 if (FD->hasImplicitReturnZero()) { 2881 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2882 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2883 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2884 Builder.CreateStore(Zero, ReturnValue); 2885 } 2886 } 2887 2888 // FIXME: We no longer need the types from FunctionArgList; lift up and 2889 // simplify. 2890 2891 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2892 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); 2893 2894 // If we're using inalloca, all the memory arguments are GEPs off of the last 2895 // parameter, which is a pointer to the complete memory area. 2896 Address ArgStruct = Address::invalid(); 2897 if (IRFunctionArgs.hasInallocaArg()) 2898 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()), 2899 FI.getArgStruct(), FI.getArgStructAlignment()); 2900 2901 // Name the struct return parameter. 2902 if (IRFunctionArgs.hasSRetArg()) { 2903 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo()); 2904 AI->setName("agg.result"); 2905 AI->addAttr(llvm::Attribute::NoAlias); 2906 } 2907 2908 // Track if we received the parameter as a pointer (indirect, byval, or 2909 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2910 // into a local alloca for us. 2911 SmallVector<ParamValue, 16> ArgVals; 2912 ArgVals.reserve(Args.size()); 2913 2914 // Create a pointer value for every parameter declaration. This usually 2915 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2916 // any cleanups or do anything that might unwind. We do that separately, so 2917 // we can push the cleanups in the correct order for the ABI. 2918 assert(FI.arg_size() == Args.size() && 2919 "Mismatch between function signature & arguments."); 2920 unsigned ArgNo = 0; 2921 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2922 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2923 i != e; ++i, ++info_it, ++ArgNo) { 2924 const VarDecl *Arg = *i; 2925 const ABIArgInfo &ArgI = info_it->info; 2926 2927 bool isPromoted = 2928 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2929 // We are converting from ABIArgInfo type to VarDecl type directly, unless 2930 // the parameter is promoted. In this case we convert to 2931 // CGFunctionInfo::ArgInfo type with subsequent argument demotion. 2932 QualType Ty = isPromoted ? info_it->type : Arg->getType(); 2933 assert(hasScalarEvaluationKind(Ty) == 2934 hasScalarEvaluationKind(Arg->getType())); 2935 2936 unsigned FirstIRArg, NumIRArgs; 2937 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2938 2939 switch (ArgI.getKind()) { 2940 case ABIArgInfo::InAlloca: { 2941 assert(NumIRArgs == 0); 2942 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2943 Address V = 2944 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); 2945 if (ArgI.getInAllocaIndirect()) 2946 V = Address(Builder.CreateLoad(V), ConvertTypeForMem(Ty), 2947 getContext().getTypeAlignInChars(Ty)); 2948 ArgVals.push_back(ParamValue::forIndirect(V)); 2949 break; 2950 } 2951 2952 case ABIArgInfo::Indirect: 2953 case ABIArgInfo::IndirectAliased: { 2954 assert(NumIRArgs == 1); 2955 Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty), 2956 ArgI.getIndirectAlign(), KnownNonNull); 2957 2958 if (!hasScalarEvaluationKind(Ty)) { 2959 // Aggregates and complex variables are accessed by reference. All we 2960 // need to do is realign the value, if requested. Also, if the address 2961 // may be aliased, copy it to ensure that the parameter variable is 2962 // mutable and has a unique adress, as C requires. 2963 Address V = ParamAddr; 2964 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { 2965 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2966 2967 // Copy from the incoming argument pointer to the temporary with the 2968 // appropriate alignment. 2969 // 2970 // FIXME: We should have a common utility for generating an aggregate 2971 // copy. 2972 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2973 Builder.CreateMemCpy( 2974 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(), 2975 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(), 2976 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity())); 2977 V = AlignedTemp; 2978 } 2979 ArgVals.push_back(ParamValue::forIndirect(V)); 2980 } else { 2981 // Load scalar value from indirect argument. 2982 llvm::Value *V = 2983 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); 2984 2985 if (isPromoted) 2986 V = emitArgumentDemotion(*this, Arg, V); 2987 ArgVals.push_back(ParamValue::forDirect(V)); 2988 } 2989 break; 2990 } 2991 2992 case ABIArgInfo::Extend: 2993 case ABIArgInfo::Direct: { 2994 auto AI = Fn->getArg(FirstIRArg); 2995 llvm::Type *LTy = ConvertType(Arg->getType()); 2996 2997 // Prepare parameter attributes. So far, only attributes for pointer 2998 // parameters are prepared. See 2999 // http://llvm.org/docs/LangRef.html#paramattrs. 3000 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && 3001 ArgI.getCoerceToType()->isPointerTy()) { 3002 assert(NumIRArgs == 1); 3003 3004 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 3005 // Set `nonnull` attribute if any. 3006 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 3007 PVD->getFunctionScopeIndex()) && 3008 !CGM.getCodeGenOpts().NullPointerIsValid) 3009 AI->addAttr(llvm::Attribute::NonNull); 3010 3011 QualType OTy = PVD->getOriginalType(); 3012 if (const auto *ArrTy = 3013 getContext().getAsConstantArrayType(OTy)) { 3014 // A C99 array parameter declaration with the static keyword also 3015 // indicates dereferenceability, and if the size is constant we can 3016 // use the dereferenceable attribute (which requires the size in 3017 // bytes). 3018 if (ArrTy->getSizeModifier() == ArrayType::Static) { 3019 QualType ETy = ArrTy->getElementType(); 3020 llvm::Align Alignment = 3021 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 3022 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment)); 3023 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 3024 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 3025 ArrSize) { 3026 llvm::AttrBuilder Attrs(getLLVMContext()); 3027 Attrs.addDereferenceableAttr( 3028 getContext().getTypeSizeInChars(ETy).getQuantity() * 3029 ArrSize); 3030 AI->addAttrs(Attrs); 3031 } else if (getContext().getTargetInfo().getNullPointerValue( 3032 ETy.getAddressSpace()) == 0 && 3033 !CGM.getCodeGenOpts().NullPointerIsValid) { 3034 AI->addAttr(llvm::Attribute::NonNull); 3035 } 3036 } 3037 } else if (const auto *ArrTy = 3038 getContext().getAsVariableArrayType(OTy)) { 3039 // For C99 VLAs with the static keyword, we don't know the size so 3040 // we can't use the dereferenceable attribute, but in addrspace(0) 3041 // we know that it must be nonnull. 3042 if (ArrTy->getSizeModifier() == VariableArrayType::Static) { 3043 QualType ETy = ArrTy->getElementType(); 3044 llvm::Align Alignment = 3045 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 3046 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment)); 3047 if (!getTypes().getTargetAddressSpace(ETy) && 3048 !CGM.getCodeGenOpts().NullPointerIsValid) 3049 AI->addAttr(llvm::Attribute::NonNull); 3050 } 3051 } 3052 3053 // Set `align` attribute if any. 3054 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 3055 if (!AVAttr) 3056 if (const auto *TOTy = OTy->getAs<TypedefType>()) 3057 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 3058 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { 3059 // If alignment-assumption sanitizer is enabled, we do *not* add 3060 // alignment attribute here, but emit normal alignment assumption, 3061 // so the UBSAN check could function. 3062 llvm::ConstantInt *AlignmentCI = 3063 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment())); 3064 uint64_t AlignmentInt = 3065 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment); 3066 if (AI->getParamAlign().valueOrOne() < AlignmentInt) { 3067 AI->removeAttr(llvm::Attribute::AttrKind::Alignment); 3068 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr( 3069 llvm::Align(AlignmentInt))); 3070 } 3071 } 3072 } 3073 3074 // Set 'noalias' if an argument type has the `restrict` qualifier. 3075 if (Arg->getType().isRestrictQualified()) 3076 AI->addAttr(llvm::Attribute::NoAlias); 3077 } 3078 3079 // Prepare the argument value. If we have the trivial case, handle it 3080 // with no muss and fuss. 3081 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 3082 ArgI.getCoerceToType() == ConvertType(Ty) && 3083 ArgI.getDirectOffset() == 0) { 3084 assert(NumIRArgs == 1); 3085 3086 // LLVM expects swifterror parameters to be used in very restricted 3087 // ways. Copy the value into a less-restricted temporary. 3088 llvm::Value *V = AI; 3089 if (FI.getExtParameterInfo(ArgNo).getABI() 3090 == ParameterABI::SwiftErrorResult) { 3091 QualType pointeeTy = Ty->getPointeeType(); 3092 assert(pointeeTy->isPointerType()); 3093 Address temp = 3094 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 3095 Address arg(V, ConvertTypeForMem(pointeeTy), 3096 getContext().getTypeAlignInChars(pointeeTy)); 3097 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 3098 Builder.CreateStore(incomingErrorValue, temp); 3099 V = temp.getPointer(); 3100 3101 // Push a cleanup to copy the value back at the end of the function. 3102 // The convention does not guarantee that the value will be written 3103 // back if the function exits with an unwind exception. 3104 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 3105 } 3106 3107 // Ensure the argument is the correct type. 3108 if (V->getType() != ArgI.getCoerceToType()) 3109 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 3110 3111 if (isPromoted) 3112 V = emitArgumentDemotion(*this, Arg, V); 3113 3114 // Because of merging of function types from multiple decls it is 3115 // possible for the type of an argument to not match the corresponding 3116 // type in the function type. Since we are codegening the callee 3117 // in here, add a cast to the argument type. 3118 llvm::Type *LTy = ConvertType(Arg->getType()); 3119 if (V->getType() != LTy) 3120 V = Builder.CreateBitCast(V, LTy); 3121 3122 ArgVals.push_back(ParamValue::forDirect(V)); 3123 break; 3124 } 3125 3126 // VLST arguments are coerced to VLATs at the function boundary for 3127 // ABI consistency. If this is a VLST that was coerced to 3128 // a VLAT at the function boundary and the types match up, use 3129 // llvm.vector.extract to convert back to the original VLST. 3130 if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) { 3131 llvm::Value *Coerced = Fn->getArg(FirstIRArg); 3132 if (auto *VecTyFrom = 3133 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) { 3134 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8 3135 // vector, bitcast the source and use a vector extract. 3136 auto PredType = 3137 llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); 3138 if (VecTyFrom == PredType && 3139 VecTyTo->getElementType() == Builder.getInt8Ty()) { 3140 VecTyFrom = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); 3141 Coerced = Builder.CreateBitCast(Coerced, VecTyFrom); 3142 } 3143 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { 3144 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); 3145 3146 assert(NumIRArgs == 1); 3147 Coerced->setName(Arg->getName() + ".coerce"); 3148 ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector( 3149 VecTyTo, Coerced, Zero, "cast.fixed"))); 3150 break; 3151 } 3152 } 3153 } 3154 3155 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 3156 Arg->getName()); 3157 3158 // Pointer to store into. 3159 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 3160 3161 // Fast-isel and the optimizer generally like scalar values better than 3162 // FCAs, so we flatten them if this is safe to do for this argument. 3163 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 3164 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 3165 STy->getNumElements() > 1) { 3166 llvm::TypeSize StructSize = CGM.getDataLayout().getTypeAllocSize(STy); 3167 llvm::TypeSize PtrElementSize = 3168 CGM.getDataLayout().getTypeAllocSize(Ptr.getElementType()); 3169 if (StructSize.isScalable()) { 3170 assert(STy->containsHomogeneousScalableVectorTypes() && 3171 "ABI only supports structure with homogeneous scalable vector " 3172 "type"); 3173 assert(StructSize == PtrElementSize && 3174 "Only allow non-fractional movement of structure with" 3175 "homogeneous scalable vector type"); 3176 assert(STy->getNumElements() == NumIRArgs); 3177 3178 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy); 3179 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3180 auto *AI = Fn->getArg(FirstIRArg + i); 3181 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 3182 LoadedStructValue = 3183 Builder.CreateInsertValue(LoadedStructValue, AI, i); 3184 } 3185 3186 Builder.CreateStore(LoadedStructValue, Ptr); 3187 } else { 3188 uint64_t SrcSize = StructSize.getFixedValue(); 3189 uint64_t DstSize = PtrElementSize.getFixedValue(); 3190 3191 Address AddrToStoreInto = Address::invalid(); 3192 if (SrcSize <= DstSize) { 3193 AddrToStoreInto = Ptr.withElementType(STy); 3194 } else { 3195 AddrToStoreInto = 3196 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 3197 } 3198 3199 assert(STy->getNumElements() == NumIRArgs); 3200 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3201 auto AI = Fn->getArg(FirstIRArg + i); 3202 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 3203 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i); 3204 Builder.CreateStore(AI, EltPtr); 3205 } 3206 3207 if (SrcSize > DstSize) { 3208 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 3209 } 3210 } 3211 } else { 3212 // Simple case, just do a coerced store of the argument into the alloca. 3213 assert(NumIRArgs == 1); 3214 auto AI = Fn->getArg(FirstIRArg); 3215 AI->setName(Arg->getName() + ".coerce"); 3216 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); 3217 } 3218 3219 // Match to what EmitParmDecl is expecting for this type. 3220 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 3221 llvm::Value *V = 3222 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); 3223 if (isPromoted) 3224 V = emitArgumentDemotion(*this, Arg, V); 3225 ArgVals.push_back(ParamValue::forDirect(V)); 3226 } else { 3227 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 3228 } 3229 break; 3230 } 3231 3232 case ABIArgInfo::CoerceAndExpand: { 3233 // Reconstruct into a temporary. 3234 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 3235 ArgVals.push_back(ParamValue::forIndirect(alloca)); 3236 3237 auto coercionType = ArgI.getCoerceAndExpandType(); 3238 alloca = alloca.withElementType(coercionType); 3239 3240 unsigned argIndex = FirstIRArg; 3241 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3242 llvm::Type *eltType = coercionType->getElementType(i); 3243 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 3244 continue; 3245 3246 auto eltAddr = Builder.CreateStructGEP(alloca, i); 3247 auto elt = Fn->getArg(argIndex++); 3248 Builder.CreateStore(elt, eltAddr); 3249 } 3250 assert(argIndex == FirstIRArg + NumIRArgs); 3251 break; 3252 } 3253 3254 case ABIArgInfo::Expand: { 3255 // If this structure was expanded into multiple arguments then 3256 // we need to create a temporary and reconstruct it from the 3257 // arguments. 3258 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 3259 LValue LV = MakeAddrLValue(Alloca, Ty); 3260 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 3261 3262 auto FnArgIter = Fn->arg_begin() + FirstIRArg; 3263 ExpandTypeFromArgs(Ty, LV, FnArgIter); 3264 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); 3265 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 3266 auto AI = Fn->getArg(FirstIRArg + i); 3267 AI->setName(Arg->getName() + "." + Twine(i)); 3268 } 3269 break; 3270 } 3271 3272 case ABIArgInfo::Ignore: 3273 assert(NumIRArgs == 0); 3274 // Initialize the local variable appropriately. 3275 if (!hasScalarEvaluationKind(Ty)) { 3276 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 3277 } else { 3278 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 3279 ArgVals.push_back(ParamValue::forDirect(U)); 3280 } 3281 break; 3282 } 3283 } 3284 3285 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 3286 for (int I = Args.size() - 1; I >= 0; --I) 3287 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 3288 } else { 3289 for (unsigned I = 0, E = Args.size(); I != E; ++I) 3290 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 3291 } 3292 } 3293 3294 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 3295 while (insn->use_empty()) { 3296 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 3297 if (!bitcast) return; 3298 3299 // This is "safe" because we would have used a ConstantExpr otherwise. 3300 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 3301 bitcast->eraseFromParent(); 3302 } 3303 } 3304 3305 /// Try to emit a fused autorelease of a return result. 3306 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 3307 llvm::Value *result) { 3308 // We must be immediately followed the cast. 3309 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 3310 if (BB->empty()) return nullptr; 3311 if (&BB->back() != result) return nullptr; 3312 3313 llvm::Type *resultType = result->getType(); 3314 3315 // result is in a BasicBlock and is therefore an Instruction. 3316 llvm::Instruction *generator = cast<llvm::Instruction>(result); 3317 3318 SmallVector<llvm::Instruction *, 4> InstsToKill; 3319 3320 // Look for: 3321 // %generator = bitcast %type1* %generator2 to %type2* 3322 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 3323 // We would have emitted this as a constant if the operand weren't 3324 // an Instruction. 3325 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 3326 3327 // Require the generator to be immediately followed by the cast. 3328 if (generator->getNextNode() != bitcast) 3329 return nullptr; 3330 3331 InstsToKill.push_back(bitcast); 3332 } 3333 3334 // Look for: 3335 // %generator = call i8* @objc_retain(i8* %originalResult) 3336 // or 3337 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 3338 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 3339 if (!call) return nullptr; 3340 3341 bool doRetainAutorelease; 3342 3343 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { 3344 doRetainAutorelease = true; 3345 } else if (call->getCalledOperand() == 3346 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { 3347 doRetainAutorelease = false; 3348 3349 // If we emitted an assembly marker for this call (and the 3350 // ARCEntrypoints field should have been set if so), go looking 3351 // for that call. If we can't find it, we can't do this 3352 // optimization. But it should always be the immediately previous 3353 // instruction, unless we needed bitcasts around the call. 3354 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 3355 llvm::Instruction *prev = call->getPrevNode(); 3356 assert(prev); 3357 if (isa<llvm::BitCastInst>(prev)) { 3358 prev = prev->getPrevNode(); 3359 assert(prev); 3360 } 3361 assert(isa<llvm::CallInst>(prev)); 3362 assert(cast<llvm::CallInst>(prev)->getCalledOperand() == 3363 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 3364 InstsToKill.push_back(prev); 3365 } 3366 } else { 3367 return nullptr; 3368 } 3369 3370 result = call->getArgOperand(0); 3371 InstsToKill.push_back(call); 3372 3373 // Keep killing bitcasts, for sanity. Note that we no longer care 3374 // about precise ordering as long as there's exactly one use. 3375 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 3376 if (!bitcast->hasOneUse()) break; 3377 InstsToKill.push_back(bitcast); 3378 result = bitcast->getOperand(0); 3379 } 3380 3381 // Delete all the unnecessary instructions, from latest to earliest. 3382 for (auto *I : InstsToKill) 3383 I->eraseFromParent(); 3384 3385 // Do the fused retain/autorelease if we were asked to. 3386 if (doRetainAutorelease) 3387 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 3388 3389 // Cast back to the result type. 3390 return CGF.Builder.CreateBitCast(result, resultType); 3391 } 3392 3393 /// If this is a +1 of the value of an immutable 'self', remove it. 3394 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 3395 llvm::Value *result) { 3396 // This is only applicable to a method with an immutable 'self'. 3397 const ObjCMethodDecl *method = 3398 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 3399 if (!method) return nullptr; 3400 const VarDecl *self = method->getSelfDecl(); 3401 if (!self->getType().isConstQualified()) return nullptr; 3402 3403 // Look for a retain call. 3404 llvm::CallInst *retainCall = 3405 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 3406 if (!retainCall || retainCall->getCalledOperand() != 3407 CGF.CGM.getObjCEntrypoints().objc_retain) 3408 return nullptr; 3409 3410 // Look for an ordinary load of 'self'. 3411 llvm::Value *retainedValue = retainCall->getArgOperand(0); 3412 llvm::LoadInst *load = 3413 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 3414 if (!load || load->isAtomic() || load->isVolatile() || 3415 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 3416 return nullptr; 3417 3418 // Okay! Burn it all down. This relies for correctness on the 3419 // assumption that the retain is emitted as part of the return and 3420 // that thereafter everything is used "linearly". 3421 llvm::Type *resultType = result->getType(); 3422 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 3423 assert(retainCall->use_empty()); 3424 retainCall->eraseFromParent(); 3425 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 3426 3427 return CGF.Builder.CreateBitCast(load, resultType); 3428 } 3429 3430 /// Emit an ARC autorelease of the result of a function. 3431 /// 3432 /// \return the value to actually return from the function 3433 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 3434 llvm::Value *result) { 3435 // If we're returning 'self', kill the initial retain. This is a 3436 // heuristic attempt to "encourage correctness" in the really unfortunate 3437 // case where we have a return of self during a dealloc and we desperately 3438 // need to avoid the possible autorelease. 3439 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 3440 return self; 3441 3442 // At -O0, try to emit a fused retain/autorelease. 3443 if (CGF.shouldUseFusedARCCalls()) 3444 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 3445 return fused; 3446 3447 return CGF.EmitARCAutoreleaseReturnValue(result); 3448 } 3449 3450 /// Heuristically search for a dominating store to the return-value slot. 3451 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 3452 // Check if a User is a store which pointerOperand is the ReturnValue. 3453 // We are looking for stores to the ReturnValue, not for stores of the 3454 // ReturnValue to some other location. 3455 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 3456 auto *SI = dyn_cast<llvm::StoreInst>(U); 3457 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() || 3458 SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType()) 3459 return nullptr; 3460 // These aren't actually possible for non-coerced returns, and we 3461 // only care about non-coerced returns on this code path. 3462 assert(!SI->isAtomic() && !SI->isVolatile()); 3463 return SI; 3464 }; 3465 // If there are multiple uses of the return-value slot, just check 3466 // for something immediately preceding the IP. Sometimes this can 3467 // happen with how we generate implicit-returns; it can also happen 3468 // with noreturn cleanups. 3469 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 3470 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3471 if (IP->empty()) return nullptr; 3472 3473 // Look at directly preceding instruction, skipping bitcasts and lifetime 3474 // markers. 3475 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) { 3476 if (isa<llvm::BitCastInst>(&I)) 3477 continue; 3478 if (auto *II = dyn_cast<llvm::IntrinsicInst>(&I)) 3479 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end) 3480 continue; 3481 3482 return GetStoreIfValid(&I); 3483 } 3484 return nullptr; 3485 } 3486 3487 llvm::StoreInst *store = 3488 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 3489 if (!store) return nullptr; 3490 3491 // Now do a first-and-dirty dominance check: just walk up the 3492 // single-predecessors chain from the current insertion point. 3493 llvm::BasicBlock *StoreBB = store->getParent(); 3494 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3495 llvm::SmallPtrSet<llvm::BasicBlock *, 4> SeenBBs; 3496 while (IP != StoreBB) { 3497 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor())) 3498 return nullptr; 3499 } 3500 3501 // Okay, the store's basic block dominates the insertion point; we 3502 // can do our thing. 3503 return store; 3504 } 3505 3506 // Helper functions for EmitCMSEClearRecord 3507 3508 // Set the bits corresponding to a field having width `BitWidth` and located at 3509 // offset `BitOffset` (from the least significant bit) within a storage unit of 3510 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. 3511 // Use little-endian layout, i.e.`Bits[0]` is the LSB. 3512 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset, 3513 int BitWidth, int CharWidth) { 3514 assert(CharWidth <= 64); 3515 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth); 3516 3517 int Pos = 0; 3518 if (BitOffset >= CharWidth) { 3519 Pos += BitOffset / CharWidth; 3520 BitOffset = BitOffset % CharWidth; 3521 } 3522 3523 const uint64_t Used = (uint64_t(1) << CharWidth) - 1; 3524 if (BitOffset + BitWidth >= CharWidth) { 3525 Bits[Pos++] |= (Used << BitOffset) & Used; 3526 BitWidth -= CharWidth - BitOffset; 3527 BitOffset = 0; 3528 } 3529 3530 while (BitWidth >= CharWidth) { 3531 Bits[Pos++] = Used; 3532 BitWidth -= CharWidth; 3533 } 3534 3535 if (BitWidth > 0) 3536 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; 3537 } 3538 3539 // Set the bits corresponding to a field having width `BitWidth` and located at 3540 // offset `BitOffset` (from the least significant bit) within a storage unit of 3541 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of 3542 // `Bits` corresponds to one target byte. Use target endian layout. 3543 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset, 3544 int StorageSize, int BitOffset, int BitWidth, 3545 int CharWidth, bool BigEndian) { 3546 3547 SmallVector<uint64_t, 8> TmpBits(StorageSize); 3548 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth); 3549 3550 if (BigEndian) 3551 std::reverse(TmpBits.begin(), TmpBits.end()); 3552 3553 for (uint64_t V : TmpBits) 3554 Bits[StorageOffset++] |= V; 3555 } 3556 3557 static void setUsedBits(CodeGenModule &, QualType, int, 3558 SmallVectorImpl<uint64_t> &); 3559 3560 // Set the bits in `Bits`, which correspond to the value representations of 3561 // the actual members of the record type `RTy`. Note that this function does 3562 // not handle base classes, virtual tables, etc, since they cannot happen in 3563 // CMSE function arguments or return. The bit mask corresponds to the target 3564 // memory layout, i.e. it's endian dependent. 3565 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, 3566 SmallVectorImpl<uint64_t> &Bits) { 3567 ASTContext &Context = CGM.getContext(); 3568 int CharWidth = Context.getCharWidth(); 3569 const RecordDecl *RD = RTy->getDecl()->getDefinition(); 3570 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD); 3571 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); 3572 3573 int Idx = 0; 3574 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { 3575 const FieldDecl *F = *I; 3576 3577 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) || 3578 F->getType()->isIncompleteArrayType()) 3579 continue; 3580 3581 if (F->isBitField()) { 3582 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F); 3583 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(), 3584 BFI.StorageSize / CharWidth, BFI.Offset, 3585 BFI.Size, CharWidth, 3586 CGM.getDataLayout().isBigEndian()); 3587 continue; 3588 } 3589 3590 setUsedBits(CGM, F->getType(), 3591 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits); 3592 } 3593 } 3594 3595 // Set the bits in `Bits`, which correspond to the value representations of 3596 // the elements of an array type `ATy`. 3597 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, 3598 int Offset, SmallVectorImpl<uint64_t> &Bits) { 3599 const ASTContext &Context = CGM.getContext(); 3600 3601 QualType ETy = Context.getBaseElementType(ATy); 3602 int Size = Context.getTypeSizeInChars(ETy).getQuantity(); 3603 SmallVector<uint64_t, 4> TmpBits(Size); 3604 setUsedBits(CGM, ETy, 0, TmpBits); 3605 3606 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) { 3607 auto Src = TmpBits.begin(); 3608 auto Dst = Bits.begin() + Offset + I * Size; 3609 for (int J = 0; J < Size; ++J) 3610 *Dst++ |= *Src++; 3611 } 3612 } 3613 3614 // Set the bits in `Bits`, which correspond to the value representations of 3615 // the type `QTy`. 3616 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, 3617 SmallVectorImpl<uint64_t> &Bits) { 3618 if (const auto *RTy = QTy->getAs<RecordType>()) 3619 return setUsedBits(CGM, RTy, Offset, Bits); 3620 3621 ASTContext &Context = CGM.getContext(); 3622 if (const auto *ATy = Context.getAsConstantArrayType(QTy)) 3623 return setUsedBits(CGM, ATy, Offset, Bits); 3624 3625 int Size = Context.getTypeSizeInChars(QTy).getQuantity(); 3626 if (Size <= 0) 3627 return; 3628 3629 std::fill_n(Bits.begin() + Offset, Size, 3630 (uint64_t(1) << Context.getCharWidth()) - 1); 3631 } 3632 3633 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits, 3634 int Pos, int Size, int CharWidth, 3635 bool BigEndian) { 3636 assert(Size > 0); 3637 uint64_t Mask = 0; 3638 if (BigEndian) { 3639 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; 3640 ++P) 3641 Mask = (Mask << CharWidth) | *P; 3642 } else { 3643 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; 3644 do 3645 Mask = (Mask << CharWidth) | *--P; 3646 while (P != End); 3647 } 3648 return Mask; 3649 } 3650 3651 // Emit code to clear the bits in a record, which aren't a part of any user 3652 // declared member, when the record is a function return. 3653 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3654 llvm::IntegerType *ITy, 3655 QualType QTy) { 3656 assert(Src->getType() == ITy); 3657 assert(ITy->getScalarSizeInBits() <= 64); 3658 3659 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3660 int Size = DataLayout.getTypeStoreSize(ITy); 3661 SmallVector<uint64_t, 4> Bits(Size); 3662 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3663 3664 int CharWidth = CGM.getContext().getCharWidth(); 3665 uint64_t Mask = 3666 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian()); 3667 3668 return Builder.CreateAnd(Src, Mask, "cmse.clear"); 3669 } 3670 3671 // Emit code to clear the bits in a record, which aren't a part of any user 3672 // declared member, when the record is a function argument. 3673 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3674 llvm::ArrayType *ATy, 3675 QualType QTy) { 3676 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3677 int Size = DataLayout.getTypeStoreSize(ATy); 3678 SmallVector<uint64_t, 16> Bits(Size); 3679 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3680 3681 // Clear each element of the LLVM array. 3682 int CharWidth = CGM.getContext().getCharWidth(); 3683 int CharsPerElt = 3684 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; 3685 int MaskIndex = 0; 3686 llvm::Value *R = llvm::PoisonValue::get(ATy); 3687 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { 3688 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth, 3689 DataLayout.isBigEndian()); 3690 MaskIndex += CharsPerElt; 3691 llvm::Value *T0 = Builder.CreateExtractValue(Src, I); 3692 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear"); 3693 R = Builder.CreateInsertValue(R, T1, I); 3694 } 3695 3696 return R; 3697 } 3698 3699 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 3700 bool EmitRetDbgLoc, 3701 SourceLocation EndLoc) { 3702 if (FI.isNoReturn()) { 3703 // Noreturn functions don't return. 3704 EmitUnreachable(EndLoc); 3705 return; 3706 } 3707 3708 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 3709 // Naked functions don't have epilogues. 3710 Builder.CreateUnreachable(); 3711 return; 3712 } 3713 3714 // Functions with no result always return void. 3715 if (!ReturnValue.isValid()) { 3716 Builder.CreateRetVoid(); 3717 return; 3718 } 3719 3720 llvm::DebugLoc RetDbgLoc; 3721 llvm::Value *RV = nullptr; 3722 QualType RetTy = FI.getReturnType(); 3723 const ABIArgInfo &RetAI = FI.getReturnInfo(); 3724 3725 switch (RetAI.getKind()) { 3726 case ABIArgInfo::InAlloca: 3727 // Aggregates get evaluated directly into the destination. Sometimes we 3728 // need to return the sret value in a register, though. 3729 assert(hasAggregateEvaluationKind(RetTy)); 3730 if (RetAI.getInAllocaSRet()) { 3731 llvm::Function::arg_iterator EI = CurFn->arg_end(); 3732 --EI; 3733 llvm::Value *ArgStruct = &*EI; 3734 llvm::Value *SRet = Builder.CreateStructGEP( 3735 FI.getArgStruct(), ArgStruct, RetAI.getInAllocaFieldIndex()); 3736 llvm::Type *Ty = 3737 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType(); 3738 RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret"); 3739 } 3740 break; 3741 3742 case ABIArgInfo::Indirect: { 3743 auto AI = CurFn->arg_begin(); 3744 if (RetAI.isSRetAfterThis()) 3745 ++AI; 3746 switch (getEvaluationKind(RetTy)) { 3747 case TEK_Complex: { 3748 ComplexPairTy RT = 3749 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 3750 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 3751 /*isInit*/ true); 3752 break; 3753 } 3754 case TEK_Aggregate: 3755 // Do nothing; aggregates get evaluated directly into the destination. 3756 break; 3757 case TEK_Scalar: { 3758 LValueBaseInfo BaseInfo; 3759 TBAAAccessInfo TBAAInfo; 3760 CharUnits Alignment = 3761 CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo); 3762 Address ArgAddr(&*AI, ConvertType(RetTy), Alignment); 3763 LValue ArgVal = 3764 LValue::MakeAddr(ArgAddr, RetTy, getContext(), BaseInfo, TBAAInfo); 3765 EmitStoreOfScalar( 3766 Builder.CreateLoad(ReturnValue), ArgVal, /*isInit*/ true); 3767 break; 3768 } 3769 } 3770 break; 3771 } 3772 3773 case ABIArgInfo::Extend: 3774 case ABIArgInfo::Direct: 3775 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 3776 RetAI.getDirectOffset() == 0) { 3777 // The internal return value temp always will have pointer-to-return-type 3778 // type, just do a load. 3779 3780 // If there is a dominating store to ReturnValue, we can elide 3781 // the load, zap the store, and usually zap the alloca. 3782 if (llvm::StoreInst *SI = 3783 findDominatingStoreToReturnValue(*this)) { 3784 // Reuse the debug location from the store unless there is 3785 // cleanup code to be emitted between the store and return 3786 // instruction. 3787 if (EmitRetDbgLoc && !AutoreleaseResult) 3788 RetDbgLoc = SI->getDebugLoc(); 3789 // Get the stored value and nuke the now-dead store. 3790 RV = SI->getValueOperand(); 3791 SI->eraseFromParent(); 3792 3793 // Otherwise, we have to do a simple load. 3794 } else { 3795 RV = Builder.CreateLoad(ReturnValue); 3796 } 3797 } else { 3798 // If the value is offset in memory, apply the offset now. 3799 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 3800 3801 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 3802 } 3803 3804 // In ARC, end functions that return a retainable type with a call 3805 // to objc_autoreleaseReturnValue. 3806 if (AutoreleaseResult) { 3807 #ifndef NDEBUG 3808 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 3809 // been stripped of the typedefs, so we cannot use RetTy here. Get the 3810 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 3811 // CurCodeDecl or BlockInfo. 3812 QualType RT; 3813 3814 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 3815 RT = FD->getReturnType(); 3816 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 3817 RT = MD->getReturnType(); 3818 else if (isa<BlockDecl>(CurCodeDecl)) 3819 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 3820 else 3821 llvm_unreachable("Unexpected function/method type"); 3822 3823 assert(getLangOpts().ObjCAutoRefCount && 3824 !FI.isReturnsRetained() && 3825 RT->isObjCRetainableType()); 3826 #endif 3827 RV = emitAutoreleaseOfResult(*this, RV); 3828 } 3829 3830 break; 3831 3832 case ABIArgInfo::Ignore: 3833 break; 3834 3835 case ABIArgInfo::CoerceAndExpand: { 3836 auto coercionType = RetAI.getCoerceAndExpandType(); 3837 3838 // Load all of the coerced elements out into results. 3839 llvm::SmallVector<llvm::Value*, 4> results; 3840 Address addr = ReturnValue.withElementType(coercionType); 3841 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3842 auto coercedEltType = coercionType->getElementType(i); 3843 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 3844 continue; 3845 3846 auto eltAddr = Builder.CreateStructGEP(addr, i); 3847 auto elt = Builder.CreateLoad(eltAddr); 3848 results.push_back(elt); 3849 } 3850 3851 // If we have one result, it's the single direct result type. 3852 if (results.size() == 1) { 3853 RV = results[0]; 3854 3855 // Otherwise, we need to make a first-class aggregate. 3856 } else { 3857 // Construct a return type that lacks padding elements. 3858 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 3859 3860 RV = llvm::PoisonValue::get(returnType); 3861 for (unsigned i = 0, e = results.size(); i != e; ++i) { 3862 RV = Builder.CreateInsertValue(RV, results[i], i); 3863 } 3864 } 3865 break; 3866 } 3867 case ABIArgInfo::Expand: 3868 case ABIArgInfo::IndirectAliased: 3869 llvm_unreachable("Invalid ABI kind for return argument"); 3870 } 3871 3872 llvm::Instruction *Ret; 3873 if (RV) { 3874 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { 3875 // For certain return types, clear padding bits, as they may reveal 3876 // sensitive information. 3877 // Small struct/union types are passed as integers. 3878 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType()); 3879 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType())) 3880 RV = EmitCMSEClearRecord(RV, ITy, RetTy); 3881 } 3882 EmitReturnValueCheck(RV); 3883 Ret = Builder.CreateRet(RV); 3884 } else { 3885 Ret = Builder.CreateRetVoid(); 3886 } 3887 3888 if (RetDbgLoc) 3889 Ret->setDebugLoc(std::move(RetDbgLoc)); 3890 } 3891 3892 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { 3893 // A current decl may not be available when emitting vtable thunks. 3894 if (!CurCodeDecl) 3895 return; 3896 3897 // If the return block isn't reachable, neither is this check, so don't emit 3898 // it. 3899 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) 3900 return; 3901 3902 ReturnsNonNullAttr *RetNNAttr = nullptr; 3903 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) 3904 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); 3905 3906 if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) 3907 return; 3908 3909 // Prefer the returns_nonnull attribute if it's present. 3910 SourceLocation AttrLoc; 3911 SanitizerMask CheckKind; 3912 SanitizerHandler Handler; 3913 if (RetNNAttr) { 3914 assert(!requiresReturnValueNullabilityCheck() && 3915 "Cannot check nullability and the nonnull attribute"); 3916 AttrLoc = RetNNAttr->getLocation(); 3917 CheckKind = SanitizerKind::ReturnsNonnullAttribute; 3918 Handler = SanitizerHandler::NonnullReturn; 3919 } else { 3920 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) 3921 if (auto *TSI = DD->getTypeSourceInfo()) 3922 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) 3923 AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); 3924 CheckKind = SanitizerKind::NullabilityReturn; 3925 Handler = SanitizerHandler::NullabilityReturn; 3926 } 3927 3928 SanitizerScope SanScope(this); 3929 3930 // Make sure the "return" source location is valid. If we're checking a 3931 // nullability annotation, make sure the preconditions for the check are met. 3932 llvm::BasicBlock *Check = createBasicBlock("nullcheck"); 3933 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); 3934 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); 3935 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); 3936 if (requiresReturnValueNullabilityCheck()) 3937 CanNullCheck = 3938 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); 3939 Builder.CreateCondBr(CanNullCheck, Check, NoCheck); 3940 EmitBlock(Check); 3941 3942 // Now do the null check. 3943 llvm::Value *Cond = Builder.CreateIsNotNull(RV); 3944 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; 3945 llvm::Value *DynamicData[] = {SLocPtr}; 3946 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); 3947 3948 EmitBlock(NoCheck); 3949 3950 #ifndef NDEBUG 3951 // The return location should not be used after the check has been emitted. 3952 ReturnLocation = Address::invalid(); 3953 #endif 3954 } 3955 3956 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 3957 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3958 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 3959 } 3960 3961 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 3962 QualType Ty) { 3963 // FIXME: Generate IR in one pass, rather than going back and fixing up these 3964 // placeholders. 3965 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 3966 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.getLLVMContext()); 3967 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy); 3968 3969 // FIXME: When we generate this IR in one pass, we shouldn't need 3970 // this win32-specific alignment hack. 3971 CharUnits Align = CharUnits::fromQuantity(4); 3972 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 3973 3974 return AggValueSlot::forAddr(Address(Placeholder, IRTy, Align), 3975 Ty.getQualifiers(), 3976 AggValueSlot::IsNotDestructed, 3977 AggValueSlot::DoesNotNeedGCBarriers, 3978 AggValueSlot::IsNotAliased, 3979 AggValueSlot::DoesNotOverlap); 3980 } 3981 3982 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 3983 const VarDecl *param, 3984 SourceLocation loc) { 3985 // StartFunction converted the ABI-lowered parameter(s) into a 3986 // local alloca. We need to turn that into an r-value suitable 3987 // for EmitCall. 3988 Address local = GetAddrOfLocalVar(param); 3989 3990 QualType type = param->getType(); 3991 3992 if (isInAllocaArgument(CGM.getCXXABI(), type)) { 3993 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter"); 3994 } 3995 3996 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 3997 // but the argument needs to be the original pointer. 3998 if (type->isReferenceType()) { 3999 args.add(RValue::get(Builder.CreateLoad(local)), type); 4000 4001 // In ARC, move out of consumed arguments so that the release cleanup 4002 // entered by StartFunction doesn't cause an over-release. This isn't 4003 // optimal -O0 code generation, but it should get cleaned up when 4004 // optimization is enabled. This also assumes that delegate calls are 4005 // performed exactly once for a set of arguments, but that should be safe. 4006 } else if (getLangOpts().ObjCAutoRefCount && 4007 param->hasAttr<NSConsumedAttr>() && 4008 type->isObjCRetainableType()) { 4009 llvm::Value *ptr = Builder.CreateLoad(local); 4010 auto null = 4011 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 4012 Builder.CreateStore(null, local); 4013 args.add(RValue::get(ptr), type); 4014 4015 // For the most part, we just need to load the alloca, except that 4016 // aggregate r-values are actually pointers to temporaries. 4017 } else { 4018 args.add(convertTempToRValue(local, type, loc), type); 4019 } 4020 4021 // Deactivate the cleanup for the callee-destructed param that was pushed. 4022 if (type->isRecordType() && !CurFuncIsThunk && 4023 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && 4024 param->needsDestruction(getContext())) { 4025 EHScopeStack::stable_iterator cleanup = 4026 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); 4027 assert(cleanup.isValid() && 4028 "cleanup for callee-destructed param not recorded"); 4029 // This unreachable is a temporary marker which will be removed later. 4030 llvm::Instruction *isActive = Builder.CreateUnreachable(); 4031 args.addArgCleanupDeactivation(cleanup, isActive); 4032 } 4033 } 4034 4035 static bool isProvablyNull(llvm::Value *addr) { 4036 return isa<llvm::ConstantPointerNull>(addr); 4037 } 4038 4039 /// Emit the actual writing-back of a writeback. 4040 static void emitWriteback(CodeGenFunction &CGF, 4041 const CallArgList::Writeback &writeback) { 4042 const LValue &srcLV = writeback.Source; 4043 Address srcAddr = srcLV.getAddress(CGF); 4044 assert(!isProvablyNull(srcAddr.getPointer()) && 4045 "shouldn't have writeback for provably null argument"); 4046 4047 llvm::BasicBlock *contBB = nullptr; 4048 4049 // If the argument wasn't provably non-null, we need to null check 4050 // before doing the store. 4051 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 4052 CGF.CGM.getDataLayout()); 4053 if (!provablyNonNull) { 4054 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 4055 contBB = CGF.createBasicBlock("icr.done"); 4056 4057 llvm::Value *isNull = 4058 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 4059 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 4060 CGF.EmitBlock(writebackBB); 4061 } 4062 4063 // Load the value to writeback. 4064 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 4065 4066 // Cast it back, in case we're writing an id to a Foo* or something. 4067 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 4068 "icr.writeback-cast"); 4069 4070 // Perform the writeback. 4071 4072 // If we have a "to use" value, it's something we need to emit a use 4073 // of. This has to be carefully threaded in: if it's done after the 4074 // release it's potentially undefined behavior (and the optimizer 4075 // will ignore it), and if it happens before the retain then the 4076 // optimizer could move the release there. 4077 if (writeback.ToUse) { 4078 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 4079 4080 // Retain the new value. No need to block-copy here: the block's 4081 // being passed up the stack. 4082 value = CGF.EmitARCRetainNonBlock(value); 4083 4084 // Emit the intrinsic use here. 4085 CGF.EmitARCIntrinsicUse(writeback.ToUse); 4086 4087 // Load the old value (primitively). 4088 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 4089 4090 // Put the new value in place (primitively). 4091 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 4092 4093 // Release the old value. 4094 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 4095 4096 // Otherwise, we can just do a normal lvalue store. 4097 } else { 4098 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 4099 } 4100 4101 // Jump to the continuation block. 4102 if (!provablyNonNull) 4103 CGF.EmitBlock(contBB); 4104 } 4105 4106 static void emitWritebacks(CodeGenFunction &CGF, 4107 const CallArgList &args) { 4108 for (const auto &I : args.writebacks()) 4109 emitWriteback(CGF, I); 4110 } 4111 4112 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 4113 const CallArgList &CallArgs) { 4114 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 4115 CallArgs.getCleanupsToDeactivate(); 4116 // Iterate in reverse to increase the likelihood of popping the cleanup. 4117 for (const auto &I : llvm::reverse(Cleanups)) { 4118 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 4119 I.IsActiveIP->eraseFromParent(); 4120 } 4121 } 4122 4123 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 4124 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 4125 if (uop->getOpcode() == UO_AddrOf) 4126 return uop->getSubExpr(); 4127 return nullptr; 4128 } 4129 4130 /// Emit an argument that's being passed call-by-writeback. That is, 4131 /// we are passing the address of an __autoreleased temporary; it 4132 /// might be copy-initialized with the current value of the given 4133 /// address, but it will definitely be copied out of after the call. 4134 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 4135 const ObjCIndirectCopyRestoreExpr *CRE) { 4136 LValue srcLV; 4137 4138 // Make an optimistic effort to emit the address as an l-value. 4139 // This can fail if the argument expression is more complicated. 4140 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 4141 srcLV = CGF.EmitLValue(lvExpr); 4142 4143 // Otherwise, just emit it as a scalar. 4144 } else { 4145 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 4146 4147 QualType srcAddrType = 4148 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 4149 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 4150 } 4151 Address srcAddr = srcLV.getAddress(CGF); 4152 4153 // The dest and src types don't necessarily match in LLVM terms 4154 // because of the crazy ObjC compatibility rules. 4155 4156 llvm::PointerType *destType = 4157 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 4158 llvm::Type *destElemType = 4159 CGF.ConvertTypeForMem(CRE->getType()->getPointeeType()); 4160 4161 // If the address is a constant null, just pass the appropriate null. 4162 if (isProvablyNull(srcAddr.getPointer())) { 4163 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 4164 CRE->getType()); 4165 return; 4166 } 4167 4168 // Create the temporary. 4169 Address temp = 4170 CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp"); 4171 // Loading an l-value can introduce a cleanup if the l-value is __weak, 4172 // and that cleanup will be conditional if we can't prove that the l-value 4173 // isn't null, so we need to register a dominating point so that the cleanups 4174 // system will make valid IR. 4175 CodeGenFunction::ConditionalEvaluation condEval(CGF); 4176 4177 // Zero-initialize it if we're not doing a copy-initialization. 4178 bool shouldCopy = CRE->shouldCopy(); 4179 if (!shouldCopy) { 4180 llvm::Value *null = 4181 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType)); 4182 CGF.Builder.CreateStore(null, temp); 4183 } 4184 4185 llvm::BasicBlock *contBB = nullptr; 4186 llvm::BasicBlock *originBB = nullptr; 4187 4188 // If the address is *not* known to be non-null, we need to switch. 4189 llvm::Value *finalArgument; 4190 4191 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 4192 CGF.CGM.getDataLayout()); 4193 if (provablyNonNull) { 4194 finalArgument = temp.getPointer(); 4195 } else { 4196 llvm::Value *isNull = 4197 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 4198 4199 finalArgument = CGF.Builder.CreateSelect(isNull, 4200 llvm::ConstantPointerNull::get(destType), 4201 temp.getPointer(), "icr.argument"); 4202 4203 // If we need to copy, then the load has to be conditional, which 4204 // means we need control flow. 4205 if (shouldCopy) { 4206 originBB = CGF.Builder.GetInsertBlock(); 4207 contBB = CGF.createBasicBlock("icr.cont"); 4208 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 4209 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 4210 CGF.EmitBlock(copyBB); 4211 condEval.begin(CGF); 4212 } 4213 } 4214 4215 llvm::Value *valueToUse = nullptr; 4216 4217 // Perform a copy if necessary. 4218 if (shouldCopy) { 4219 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 4220 assert(srcRV.isScalar()); 4221 4222 llvm::Value *src = srcRV.getScalarVal(); 4223 src = CGF.Builder.CreateBitCast(src, destElemType, "icr.cast"); 4224 4225 // Use an ordinary store, not a store-to-lvalue. 4226 CGF.Builder.CreateStore(src, temp); 4227 4228 // If optimization is enabled, and the value was held in a 4229 // __strong variable, we need to tell the optimizer that this 4230 // value has to stay alive until we're doing the store back. 4231 // This is because the temporary is effectively unretained, 4232 // and so otherwise we can violate the high-level semantics. 4233 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 4234 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 4235 valueToUse = src; 4236 } 4237 } 4238 4239 // Finish the control flow if we needed it. 4240 if (shouldCopy && !provablyNonNull) { 4241 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 4242 CGF.EmitBlock(contBB); 4243 4244 // Make a phi for the value to intrinsically use. 4245 if (valueToUse) { 4246 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 4247 "icr.to-use"); 4248 phiToUse->addIncoming(valueToUse, copyBB); 4249 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 4250 originBB); 4251 valueToUse = phiToUse; 4252 } 4253 4254 condEval.end(CGF); 4255 } 4256 4257 args.addWriteback(srcLV, temp, valueToUse); 4258 args.add(RValue::get(finalArgument), CRE->getType()); 4259 } 4260 4261 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 4262 assert(!StackBase); 4263 4264 // Save the stack. 4265 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 4266 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 4267 } 4268 4269 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 4270 if (StackBase) { 4271 // Restore the stack after the call. 4272 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 4273 CGF.Builder.CreateCall(F, StackBase); 4274 } 4275 } 4276 4277 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 4278 SourceLocation ArgLoc, 4279 AbstractCallee AC, 4280 unsigned ParmNum) { 4281 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || 4282 SanOpts.has(SanitizerKind::NullabilityArg))) 4283 return; 4284 4285 // The param decl may be missing in a variadic function. 4286 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; 4287 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 4288 4289 // Prefer the nonnull attribute if it's present. 4290 const NonNullAttr *NNAttr = nullptr; 4291 if (SanOpts.has(SanitizerKind::NonnullAttribute)) 4292 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); 4293 4294 bool CanCheckNullability = false; 4295 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { 4296 auto Nullability = PVD->getType()->getNullability(); 4297 CanCheckNullability = Nullability && 4298 *Nullability == NullabilityKind::NonNull && 4299 PVD->getTypeSourceInfo(); 4300 } 4301 4302 if (!NNAttr && !CanCheckNullability) 4303 return; 4304 4305 SourceLocation AttrLoc; 4306 SanitizerMask CheckKind; 4307 SanitizerHandler Handler; 4308 if (NNAttr) { 4309 AttrLoc = NNAttr->getLocation(); 4310 CheckKind = SanitizerKind::NonnullAttribute; 4311 Handler = SanitizerHandler::NonnullArg; 4312 } else { 4313 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); 4314 CheckKind = SanitizerKind::NullabilityArg; 4315 Handler = SanitizerHandler::NullabilityArg; 4316 } 4317 4318 SanitizerScope SanScope(this); 4319 llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType); 4320 llvm::Constant *StaticData[] = { 4321 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), 4322 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 4323 }; 4324 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt); 4325 } 4326 4327 // Check if the call is going to use the inalloca convention. This needs to 4328 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged 4329 // later, so we can't check it directly. 4330 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, 4331 ArrayRef<QualType> ArgTypes) { 4332 // The Swift calling conventions don't go through the target-specific 4333 // argument classification, they never use inalloca. 4334 // TODO: Consider limiting inalloca use to only calling conventions supported 4335 // by MSVC. 4336 if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync) 4337 return false; 4338 if (!CGM.getTarget().getCXXABI().isMicrosoft()) 4339 return false; 4340 return llvm::any_of(ArgTypes, [&](QualType Ty) { 4341 return isInAllocaArgument(CGM.getCXXABI(), Ty); 4342 }); 4343 } 4344 4345 #ifndef NDEBUG 4346 // Determine whether the given argument is an Objective-C method 4347 // that may have type parameters in its signature. 4348 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) { 4349 const DeclContext *dc = method->getDeclContext(); 4350 if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) { 4351 return classDecl->getTypeParamListAsWritten(); 4352 } 4353 4354 if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) { 4355 return catDecl->getTypeParamList(); 4356 } 4357 4358 return false; 4359 } 4360 #endif 4361 4362 /// EmitCallArgs - Emit call arguments for a function. 4363 void CodeGenFunction::EmitCallArgs( 4364 CallArgList &Args, PrototypeWrapper Prototype, 4365 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 4366 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { 4367 SmallVector<QualType, 16> ArgTypes; 4368 4369 assert((ParamsToSkip == 0 || Prototype.P) && 4370 "Can't skip parameters if type info is not provided"); 4371 4372 // This variable only captures *explicitly* written conventions, not those 4373 // applied by default via command line flags or target defaults, such as 4374 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would 4375 // require knowing if this is a C++ instance method or being able to see 4376 // unprototyped FunctionTypes. 4377 CallingConv ExplicitCC = CC_C; 4378 4379 // First, if a prototype was provided, use those argument types. 4380 bool IsVariadic = false; 4381 if (Prototype.P) { 4382 const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>(); 4383 if (MD) { 4384 IsVariadic = MD->isVariadic(); 4385 ExplicitCC = getCallingConventionForDecl( 4386 MD, CGM.getTarget().getTriple().isOSWindows()); 4387 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip, 4388 MD->param_type_end()); 4389 } else { 4390 const auto *FPT = Prototype.P.get<const FunctionProtoType *>(); 4391 IsVariadic = FPT->isVariadic(); 4392 ExplicitCC = FPT->getExtInfo().getCC(); 4393 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, 4394 FPT->param_type_end()); 4395 } 4396 4397 #ifndef NDEBUG 4398 // Check that the prototyped types match the argument expression types. 4399 bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD); 4400 CallExpr::const_arg_iterator Arg = ArgRange.begin(); 4401 for (QualType Ty : ArgTypes) { 4402 assert(Arg != ArgRange.end() && "Running over edge of argument list!"); 4403 assert( 4404 (isGenericMethod || Ty->isVariablyModifiedType() || 4405 Ty.getNonReferenceType()->isObjCRetainableType() || 4406 getContext() 4407 .getCanonicalType(Ty.getNonReferenceType()) 4408 .getTypePtr() == 4409 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && 4410 "type mismatch in call argument!"); 4411 ++Arg; 4412 } 4413 4414 // Either we've emitted all the call args, or we have a call to variadic 4415 // function. 4416 assert((Arg == ArgRange.end() || IsVariadic) && 4417 "Extra arguments in non-variadic function!"); 4418 #endif 4419 } 4420 4421 // If we still have any arguments, emit them using the type of the argument. 4422 for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size())) 4423 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); 4424 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 4425 4426 // We must evaluate arguments from right to left in the MS C++ ABI, 4427 // because arguments are destroyed left to right in the callee. As a special 4428 // case, there are certain language constructs that require left-to-right 4429 // evaluation, and in those cases we consider the evaluation order requirement 4430 // to trump the "destruction order is reverse construction order" guarantee. 4431 bool LeftToRight = 4432 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 4433 ? Order == EvaluationOrder::ForceLeftToRight 4434 : Order != EvaluationOrder::ForceRightToLeft; 4435 4436 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, 4437 RValue EmittedArg) { 4438 if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) 4439 return; 4440 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 4441 if (PS == nullptr) 4442 return; 4443 4444 const auto &Context = getContext(); 4445 auto SizeTy = Context.getSizeType(); 4446 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 4447 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); 4448 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, 4449 EmittedArg.getScalarVal(), 4450 PS->isDynamic()); 4451 Args.add(RValue::get(V), SizeTy); 4452 // If we're emitting args in reverse, be sure to do so with 4453 // pass_object_size, as well. 4454 if (!LeftToRight) 4455 std::swap(Args.back(), *(&Args.back() - 1)); 4456 }; 4457 4458 // Insert a stack save if we're going to need any inalloca args. 4459 if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) { 4460 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 && 4461 "inalloca only supported on x86"); 4462 Args.allocateArgumentMemory(*this); 4463 } 4464 4465 // Evaluate each argument in the appropriate order. 4466 size_t CallArgsStart = Args.size(); 4467 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 4468 unsigned Idx = LeftToRight ? I : E - I - 1; 4469 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 4470 unsigned InitialArgSize = Args.size(); 4471 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of 4472 // the argument and parameter match or the objc method is parameterized. 4473 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || 4474 getContext().hasSameUnqualifiedType((*Arg)->getType(), 4475 ArgTypes[Idx]) || 4476 (isa<ObjCMethodDecl>(AC.getDecl()) && 4477 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && 4478 "Argument and parameter types don't match"); 4479 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 4480 // In particular, we depend on it being the last arg in Args, and the 4481 // objectsize bits depend on there only being one arg if !LeftToRight. 4482 assert(InitialArgSize + 1 == Args.size() && 4483 "The code below depends on only adding one arg per EmitCallArg"); 4484 (void)InitialArgSize; 4485 // Since pointer argument are never emitted as LValue, it is safe to emit 4486 // non-null argument check for r-value only. 4487 if (!Args.back().hasLValue()) { 4488 RValue RVArg = Args.back().getKnownRValue(); 4489 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, 4490 ParamsToSkip + Idx); 4491 // @llvm.objectsize should never have side-effects and shouldn't need 4492 // destruction/cleanups, so we can safely "emit" it after its arg, 4493 // regardless of right-to-leftness 4494 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); 4495 } 4496 } 4497 4498 if (!LeftToRight) { 4499 // Un-reverse the arguments we just evaluated so they match up with the LLVM 4500 // IR function. 4501 std::reverse(Args.begin() + CallArgsStart, Args.end()); 4502 } 4503 } 4504 4505 namespace { 4506 4507 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 4508 DestroyUnpassedArg(Address Addr, QualType Ty) 4509 : Addr(Addr), Ty(Ty) {} 4510 4511 Address Addr; 4512 QualType Ty; 4513 4514 void Emit(CodeGenFunction &CGF, Flags flags) override { 4515 QualType::DestructionKind DtorKind = Ty.isDestructedType(); 4516 if (DtorKind == QualType::DK_cxx_destructor) { 4517 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 4518 assert(!Dtor->isTrivial()); 4519 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 4520 /*Delegating=*/false, Addr, Ty); 4521 } else { 4522 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); 4523 } 4524 } 4525 }; 4526 4527 struct DisableDebugLocationUpdates { 4528 CodeGenFunction &CGF; 4529 bool disabledDebugInfo; 4530 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 4531 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 4532 CGF.disableDebugInfo(); 4533 } 4534 ~DisableDebugLocationUpdates() { 4535 if (disabledDebugInfo) 4536 CGF.enableDebugInfo(); 4537 } 4538 }; 4539 4540 } // end anonymous namespace 4541 4542 RValue CallArg::getRValue(CodeGenFunction &CGF) const { 4543 if (!HasLV) 4544 return RV; 4545 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); 4546 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, 4547 LV.isVolatile()); 4548 IsUsed = true; 4549 return RValue::getAggregate(Copy.getAddress(CGF)); 4550 } 4551 4552 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { 4553 LValue Dst = CGF.MakeAddrLValue(Addr, Ty); 4554 if (!HasLV && RV.isScalar()) 4555 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); 4556 else if (!HasLV && RV.isComplex()) 4557 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); 4558 else { 4559 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); 4560 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); 4561 // We assume that call args are never copied into subobjects. 4562 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, 4563 HasLV ? LV.isVolatileQualified() 4564 : RV.isVolatileQualified()); 4565 } 4566 IsUsed = true; 4567 } 4568 4569 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 4570 QualType type) { 4571 DisableDebugLocationUpdates Dis(*this, E); 4572 if (const ObjCIndirectCopyRestoreExpr *CRE 4573 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 4574 assert(getLangOpts().ObjCAutoRefCount); 4575 return emitWritebackArg(*this, args, CRE); 4576 } 4577 4578 assert(type->isReferenceType() == E->isGLValue() && 4579 "reference binding to unmaterialized r-value!"); 4580 4581 if (E->isGLValue()) { 4582 assert(E->getObjectKind() == OK_Ordinary); 4583 return args.add(EmitReferenceBindingToExpr(E), type); 4584 } 4585 4586 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 4587 4588 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 4589 // However, we still have to push an EH-only cleanup in case we unwind before 4590 // we make it to the call. 4591 if (type->isRecordType() && 4592 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { 4593 // If we're using inalloca, use the argument memory. Otherwise, use a 4594 // temporary. 4595 AggValueSlot Slot = args.isUsingInAlloca() 4596 ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp"); 4597 4598 bool DestroyedInCallee = true, NeedsEHCleanup = true; 4599 if (const auto *RD = type->getAsCXXRecordDecl()) 4600 DestroyedInCallee = RD->hasNonTrivialDestructor(); 4601 else 4602 NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); 4603 4604 if (DestroyedInCallee) 4605 Slot.setExternallyDestructed(); 4606 4607 EmitAggExpr(E, Slot); 4608 RValue RV = Slot.asRValue(); 4609 args.add(RV, type); 4610 4611 if (DestroyedInCallee && NeedsEHCleanup) { 4612 // Create a no-op GEP between the placeholder and the cleanup so we can 4613 // RAUW it successfully. It also serves as a marker of the first 4614 // instruction where the cleanup is active. 4615 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 4616 type); 4617 // This unreachable is a temporary marker which will be removed later. 4618 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 4619 args.addArgCleanupDeactivation(EHStack.stable_begin(), IsActive); 4620 } 4621 return; 4622 } 4623 4624 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 4625 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 4626 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 4627 assert(L.isSimple()); 4628 args.addUncopiedAggregate(L, type); 4629 return; 4630 } 4631 4632 args.add(EmitAnyExprToTemp(E), type); 4633 } 4634 4635 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 4636 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 4637 // implicitly widens null pointer constants that are arguments to varargs 4638 // functions to pointer-sized ints. 4639 if (!getTarget().getTriple().isOSWindows()) 4640 return Arg->getType(); 4641 4642 if (Arg->getType()->isIntegerType() && 4643 getContext().getTypeSize(Arg->getType()) < 4644 getContext().getTargetInfo().getPointerWidth(LangAS::Default) && 4645 Arg->isNullPointerConstant(getContext(), 4646 Expr::NPC_ValueDependentIsNotNull)) { 4647 return getContext().getIntPtrType(); 4648 } 4649 4650 return Arg->getType(); 4651 } 4652 4653 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4654 // optimizer it can aggressively ignore unwind edges. 4655 void 4656 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 4657 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 4658 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 4659 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 4660 CGM.getNoObjCARCExceptionsMetadata()); 4661 } 4662 4663 /// Emits a call to the given no-arguments nounwind runtime function. 4664 llvm::CallInst * 4665 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4666 const llvm::Twine &name) { 4667 return EmitNounwindRuntimeCall(callee, std::nullopt, name); 4668 } 4669 4670 /// Emits a call to the given nounwind runtime function. 4671 llvm::CallInst * 4672 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4673 ArrayRef<llvm::Value *> args, 4674 const llvm::Twine &name) { 4675 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 4676 call->setDoesNotThrow(); 4677 return call; 4678 } 4679 4680 /// Emits a simple call (never an invoke) to the given no-arguments 4681 /// runtime function. 4682 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4683 const llvm::Twine &name) { 4684 return EmitRuntimeCall(callee, std::nullopt, name); 4685 } 4686 4687 // Calls which may throw must have operand bundles indicating which funclet 4688 // they are nested within. 4689 SmallVector<llvm::OperandBundleDef, 1> 4690 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { 4691 // There is no need for a funclet operand bundle if we aren't inside a 4692 // funclet. 4693 if (!CurrentFuncletPad) 4694 return (SmallVector<llvm::OperandBundleDef, 1>()); 4695 4696 // Skip intrinsics which cannot throw (as long as they don't lower into 4697 // regular function calls in the course of IR transformations). 4698 if (auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) { 4699 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) { 4700 auto IID = CalleeFn->getIntrinsicID(); 4701 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID)) 4702 return (SmallVector<llvm::OperandBundleDef, 1>()); 4703 } 4704 } 4705 4706 SmallVector<llvm::OperandBundleDef, 1> BundleList; 4707 BundleList.emplace_back("funclet", CurrentFuncletPad); 4708 return BundleList; 4709 } 4710 4711 /// Emits a simple call (never an invoke) to the given runtime function. 4712 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4713 ArrayRef<llvm::Value *> args, 4714 const llvm::Twine &name) { 4715 llvm::CallInst *call = Builder.CreateCall( 4716 callee, args, getBundlesForFunclet(callee.getCallee()), name); 4717 call->setCallingConv(getRuntimeCC()); 4718 return call; 4719 } 4720 4721 /// Emits a call or invoke to the given noreturn runtime function. 4722 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( 4723 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { 4724 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4725 getBundlesForFunclet(callee.getCallee()); 4726 4727 if (getInvokeDest()) { 4728 llvm::InvokeInst *invoke = 4729 Builder.CreateInvoke(callee, 4730 getUnreachableBlock(), 4731 getInvokeDest(), 4732 args, 4733 BundleList); 4734 invoke->setDoesNotReturn(); 4735 invoke->setCallingConv(getRuntimeCC()); 4736 } else { 4737 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 4738 call->setDoesNotReturn(); 4739 call->setCallingConv(getRuntimeCC()); 4740 Builder.CreateUnreachable(); 4741 } 4742 } 4743 4744 /// Emits a call or invoke instruction to the given nullary runtime function. 4745 llvm::CallBase * 4746 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4747 const Twine &name) { 4748 return EmitRuntimeCallOrInvoke(callee, std::nullopt, name); 4749 } 4750 4751 /// Emits a call or invoke instruction to the given runtime function. 4752 llvm::CallBase * 4753 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4754 ArrayRef<llvm::Value *> args, 4755 const Twine &name) { 4756 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); 4757 call->setCallingConv(getRuntimeCC()); 4758 return call; 4759 } 4760 4761 /// Emits a call or invoke instruction to the given function, depending 4762 /// on the current state of the EH stack. 4763 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, 4764 ArrayRef<llvm::Value *> Args, 4765 const Twine &Name) { 4766 llvm::BasicBlock *InvokeDest = getInvokeDest(); 4767 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4768 getBundlesForFunclet(Callee.getCallee()); 4769 4770 llvm::CallBase *Inst; 4771 if (!InvokeDest) 4772 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 4773 else { 4774 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 4775 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 4776 Name); 4777 EmitBlock(ContBB); 4778 } 4779 4780 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4781 // optimizer it can aggressively ignore unwind edges. 4782 if (CGM.getLangOpts().ObjCAutoRefCount) 4783 AddObjCARCExceptionMetadata(Inst); 4784 4785 return Inst; 4786 } 4787 4788 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 4789 llvm::Value *New) { 4790 DeferredReplacements.push_back( 4791 std::make_pair(llvm::WeakTrackingVH(Old), New)); 4792 } 4793 4794 namespace { 4795 4796 /// Specify given \p NewAlign as the alignment of return value attribute. If 4797 /// such attribute already exists, re-set it to the maximal one of two options. 4798 [[nodiscard]] llvm::AttributeList 4799 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, 4800 const llvm::AttributeList &Attrs, 4801 llvm::Align NewAlign) { 4802 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); 4803 if (CurAlign >= NewAlign) 4804 return Attrs; 4805 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign); 4806 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment) 4807 .addRetAttribute(Ctx, AlignAttr); 4808 } 4809 4810 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { 4811 protected: 4812 CodeGenFunction &CGF; 4813 4814 /// We do nothing if this is, or becomes, nullptr. 4815 const AlignedAttrTy *AA = nullptr; 4816 4817 llvm::Value *Alignment = nullptr; // May or may not be a constant. 4818 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. 4819 4820 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4821 : CGF(CGF_) { 4822 if (!FuncDecl) 4823 return; 4824 AA = FuncDecl->getAttr<AlignedAttrTy>(); 4825 } 4826 4827 public: 4828 /// If we can, materialize the alignment as an attribute on return value. 4829 [[nodiscard]] llvm::AttributeList 4830 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { 4831 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment)) 4832 return Attrs; 4833 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment); 4834 if (!AlignmentCI) 4835 return Attrs; 4836 // We may legitimately have non-power-of-2 alignment here. 4837 // If so, this is UB land, emit it via `@llvm.assume` instead. 4838 if (!AlignmentCI->getValue().isPowerOf2()) 4839 return Attrs; 4840 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( 4841 CGF.getLLVMContext(), Attrs, 4842 llvm::Align( 4843 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); 4844 AA = nullptr; // We're done. Disallow doing anything else. 4845 return NewAttrs; 4846 } 4847 4848 /// Emit alignment assumption. 4849 /// This is a general fallback that we take if either there is an offset, 4850 /// or the alignment is variable or we are sanitizing for alignment. 4851 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { 4852 if (!AA) 4853 return; 4854 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, 4855 AA->getLocation(), Alignment, OffsetCI); 4856 AA = nullptr; // We're done. Disallow doing anything else. 4857 } 4858 }; 4859 4860 /// Helper data structure to emit `AssumeAlignedAttr`. 4861 class AssumeAlignedAttrEmitter final 4862 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { 4863 public: 4864 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4865 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4866 if (!AA) 4867 return; 4868 // It is guaranteed that the alignment/offset are constants. 4869 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); 4870 if (Expr *Offset = AA->getOffset()) { 4871 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset)); 4872 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. 4873 OffsetCI = nullptr; 4874 } 4875 } 4876 }; 4877 4878 /// Helper data structure to emit `AllocAlignAttr`. 4879 class AllocAlignAttrEmitter final 4880 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { 4881 public: 4882 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, 4883 const CallArgList &CallArgs) 4884 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4885 if (!AA) 4886 return; 4887 // Alignment may or may not be a constant, and that is okay. 4888 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] 4889 .getRValue(CGF) 4890 .getScalarVal(); 4891 } 4892 }; 4893 4894 } // namespace 4895 4896 static unsigned getMaxVectorWidth(const llvm::Type *Ty) { 4897 if (auto *VT = dyn_cast<llvm::VectorType>(Ty)) 4898 return VT->getPrimitiveSizeInBits().getKnownMinValue(); 4899 if (auto *AT = dyn_cast<llvm::ArrayType>(Ty)) 4900 return getMaxVectorWidth(AT->getElementType()); 4901 4902 unsigned MaxVectorWidth = 0; 4903 if (auto *ST = dyn_cast<llvm::StructType>(Ty)) 4904 for (auto *I : ST->elements()) 4905 MaxVectorWidth = std::max(MaxVectorWidth, getMaxVectorWidth(I)); 4906 return MaxVectorWidth; 4907 } 4908 4909 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 4910 const CGCallee &Callee, 4911 ReturnValueSlot ReturnValue, 4912 const CallArgList &CallArgs, 4913 llvm::CallBase **callOrInvoke, bool IsMustTail, 4914 SourceLocation Loc) { 4915 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 4916 4917 assert(Callee.isOrdinary() || Callee.isVirtual()); 4918 4919 // Handle struct-return functions by passing a pointer to the 4920 // location that we would like to return into. 4921 QualType RetTy = CallInfo.getReturnType(); 4922 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 4923 4924 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo); 4925 4926 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); 4927 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 4928 // We can only guarantee that a function is called from the correct 4929 // context/function based on the appropriate target attributes, 4930 // so only check in the case where we have both always_inline and target 4931 // since otherwise we could be making a conditional call after a check for 4932 // the proper cpu features (and it won't cause code generation issues due to 4933 // function based code generation). 4934 if (TargetDecl->hasAttr<AlwaysInlineAttr>() && 4935 (TargetDecl->hasAttr<TargetAttr>() || 4936 (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>()))) 4937 checkTargetFeatures(Loc, FD); 4938 4939 // Some architectures (such as x86-64) have the ABI changed based on 4940 // attribute-target/features. Give them a chance to diagnose. 4941 CGM.getTargetCodeGenInfo().checkFunctionCallABI( 4942 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs); 4943 } 4944 4945 // 1. Set up the arguments. 4946 4947 // If we're using inalloca, insert the allocation after the stack save. 4948 // FIXME: Do this earlier rather than hacking it in here! 4949 Address ArgMemory = Address::invalid(); 4950 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 4951 const llvm::DataLayout &DL = CGM.getDataLayout(); 4952 llvm::Instruction *IP = CallArgs.getStackBase(); 4953 llvm::AllocaInst *AI; 4954 if (IP) { 4955 IP = IP->getNextNode(); 4956 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), 4957 "argmem", IP); 4958 } else { 4959 AI = CreateTempAlloca(ArgStruct, "argmem"); 4960 } 4961 auto Align = CallInfo.getArgStructAlignment(); 4962 AI->setAlignment(Align.getAsAlign()); 4963 AI->setUsedWithInAlloca(true); 4964 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 4965 ArgMemory = Address(AI, ArgStruct, Align); 4966 } 4967 4968 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 4969 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 4970 4971 // If the call returns a temporary with struct return, create a temporary 4972 // alloca to hold the result, unless one is given to us. 4973 Address SRetPtr = Address::invalid(); 4974 Address SRetAlloca = Address::invalid(); 4975 llvm::Value *UnusedReturnSizePtr = nullptr; 4976 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 4977 if (!ReturnValue.isNull()) { 4978 SRetPtr = ReturnValue.getValue(); 4979 } else { 4980 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); 4981 if (HaveInsertPoint() && ReturnValue.isUnused()) { 4982 llvm::TypeSize size = 4983 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 4984 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); 4985 } 4986 } 4987 if (IRFunctionArgs.hasSRetArg()) { 4988 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 4989 } else if (RetAI.isInAlloca()) { 4990 Address Addr = 4991 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 4992 Builder.CreateStore(SRetPtr.getPointer(), Addr); 4993 } 4994 } 4995 4996 Address swiftErrorTemp = Address::invalid(); 4997 Address swiftErrorArg = Address::invalid(); 4998 4999 // When passing arguments using temporary allocas, we need to add the 5000 // appropriate lifetime markers. This vector keeps track of all the lifetime 5001 // markers that need to be ended right after the call. 5002 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; 5003 5004 // Translate all of the arguments as necessary to match the IR lowering. 5005 assert(CallInfo.arg_size() == CallArgs.size() && 5006 "Mismatch between function signature & arguments."); 5007 unsigned ArgNo = 0; 5008 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 5009 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 5010 I != E; ++I, ++info_it, ++ArgNo) { 5011 const ABIArgInfo &ArgInfo = info_it->info; 5012 5013 // Insert a padding argument to ensure proper alignment. 5014 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 5015 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 5016 llvm::UndefValue::get(ArgInfo.getPaddingType()); 5017 5018 unsigned FirstIRArg, NumIRArgs; 5019 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 5020 5021 bool ArgHasMaybeUndefAttr = 5022 IsArgumentMaybeUndef(TargetDecl, CallInfo.getNumRequiredArgs(), ArgNo); 5023 5024 switch (ArgInfo.getKind()) { 5025 case ABIArgInfo::InAlloca: { 5026 assert(NumIRArgs == 0); 5027 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 5028 if (I->isAggregate()) { 5029 Address Addr = I->hasLValue() 5030 ? I->getKnownLValue().getAddress(*this) 5031 : I->getKnownRValue().getAggregateAddress(); 5032 llvm::Instruction *Placeholder = 5033 cast<llvm::Instruction>(Addr.getPointer()); 5034 5035 if (!ArgInfo.getInAllocaIndirect()) { 5036 // Replace the placeholder with the appropriate argument slot GEP. 5037 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 5038 Builder.SetInsertPoint(Placeholder); 5039 Addr = Builder.CreateStructGEP(ArgMemory, 5040 ArgInfo.getInAllocaFieldIndex()); 5041 Builder.restoreIP(IP); 5042 } else { 5043 // For indirect things such as overaligned structs, replace the 5044 // placeholder with a regular aggregate temporary alloca. Store the 5045 // address of this alloca into the struct. 5046 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); 5047 Address ArgSlot = Builder.CreateStructGEP( 5048 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 5049 Builder.CreateStore(Addr.getPointer(), ArgSlot); 5050 } 5051 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 5052 } else if (ArgInfo.getInAllocaIndirect()) { 5053 // Make a temporary alloca and store the address of it into the argument 5054 // struct. 5055 Address Addr = CreateMemTempWithoutCast( 5056 I->Ty, getContext().getTypeAlignInChars(I->Ty), 5057 "indirect-arg-temp"); 5058 I->copyInto(*this, Addr); 5059 Address ArgSlot = 5060 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 5061 Builder.CreateStore(Addr.getPointer(), ArgSlot); 5062 } else { 5063 // Store the RValue into the argument struct. 5064 Address Addr = 5065 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 5066 Addr = Addr.withElementType(ConvertTypeForMem(I->Ty)); 5067 I->copyInto(*this, Addr); 5068 } 5069 break; 5070 } 5071 5072 case ABIArgInfo::Indirect: 5073 case ABIArgInfo::IndirectAliased: { 5074 assert(NumIRArgs == 1); 5075 if (!I->isAggregate()) { 5076 // Make a temporary alloca to pass the argument. 5077 Address Addr = CreateMemTempWithoutCast( 5078 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); 5079 5080 llvm::Value *Val = Addr.getPointer(); 5081 if (ArgHasMaybeUndefAttr) 5082 Val = Builder.CreateFreeze(Addr.getPointer()); 5083 IRCallArgs[FirstIRArg] = Val; 5084 5085 I->copyInto(*this, Addr); 5086 } else { 5087 // We want to avoid creating an unnecessary temporary+copy here; 5088 // however, we need one in three cases: 5089 // 1. If the argument is not byval, and we are required to copy the 5090 // source. (This case doesn't occur on any common architecture.) 5091 // 2. If the argument is byval, RV is not sufficiently aligned, and 5092 // we cannot force it to be sufficiently aligned. 5093 // 3. If the argument is byval, but RV is not located in default 5094 // or alloca address space. 5095 Address Addr = I->hasLValue() 5096 ? I->getKnownLValue().getAddress(*this) 5097 : I->getKnownRValue().getAggregateAddress(); 5098 llvm::Value *V = Addr.getPointer(); 5099 CharUnits Align = ArgInfo.getIndirectAlign(); 5100 const llvm::DataLayout *TD = &CGM.getDataLayout(); 5101 5102 assert((FirstIRArg >= IRFuncTy->getNumParams() || 5103 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == 5104 TD->getAllocaAddrSpace()) && 5105 "indirect argument must be in alloca address space"); 5106 5107 bool NeedCopy = false; 5108 5109 if (Addr.getAlignment() < Align && 5110 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < 5111 Align.getAsAlign()) { 5112 NeedCopy = true; 5113 } else if (I->hasLValue()) { 5114 auto LV = I->getKnownLValue(); 5115 auto AS = LV.getAddressSpace(); 5116 5117 if (!ArgInfo.getIndirectByVal() || 5118 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { 5119 NeedCopy = true; 5120 } 5121 if (!getLangOpts().OpenCL) { 5122 if ((ArgInfo.getIndirectByVal() && 5123 (AS != LangAS::Default && 5124 AS != CGM.getASTAllocaAddressSpace()))) { 5125 NeedCopy = true; 5126 } 5127 } 5128 // For OpenCL even if RV is located in default or alloca address space 5129 // we don't want to perform address space cast for it. 5130 else if ((ArgInfo.getIndirectByVal() && 5131 Addr.getType()->getAddressSpace() != IRFuncTy-> 5132 getParamType(FirstIRArg)->getPointerAddressSpace())) { 5133 NeedCopy = true; 5134 } 5135 } 5136 5137 if (NeedCopy) { 5138 // Create an aligned temporary, and copy to it. 5139 Address AI = CreateMemTempWithoutCast( 5140 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); 5141 llvm::Value *Val = AI.getPointer(); 5142 if (ArgHasMaybeUndefAttr) 5143 Val = Builder.CreateFreeze(AI.getPointer()); 5144 IRCallArgs[FirstIRArg] = Val; 5145 5146 // Emit lifetime markers for the temporary alloca. 5147 llvm::TypeSize ByvalTempElementSize = 5148 CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); 5149 llvm::Value *LifetimeSize = 5150 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); 5151 5152 // Add cleanup code to emit the end lifetime marker after the call. 5153 if (LifetimeSize) // In case we disabled lifetime markers. 5154 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); 5155 5156 // Generate the copy. 5157 I->copyInto(*this, AI); 5158 } else { 5159 // Skip the extra memcpy call. 5160 auto *T = llvm::PointerType::get( 5161 CGM.getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace()); 5162 5163 llvm::Value *Val = getTargetHooks().performAddrSpaceCast( 5164 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, 5165 true); 5166 if (ArgHasMaybeUndefAttr) 5167 Val = Builder.CreateFreeze(Val); 5168 IRCallArgs[FirstIRArg] = Val; 5169 } 5170 } 5171 break; 5172 } 5173 5174 case ABIArgInfo::Ignore: 5175 assert(NumIRArgs == 0); 5176 break; 5177 5178 case ABIArgInfo::Extend: 5179 case ABIArgInfo::Direct: { 5180 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 5181 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 5182 ArgInfo.getDirectOffset() == 0) { 5183 assert(NumIRArgs == 1); 5184 llvm::Value *V; 5185 if (!I->isAggregate()) 5186 V = I->getKnownRValue().getScalarVal(); 5187 else 5188 V = Builder.CreateLoad( 5189 I->hasLValue() ? I->getKnownLValue().getAddress(*this) 5190 : I->getKnownRValue().getAggregateAddress()); 5191 5192 // Implement swifterror by copying into a new swifterror argument. 5193 // We'll write back in the normal path out of the call. 5194 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 5195 == ParameterABI::SwiftErrorResult) { 5196 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 5197 5198 QualType pointeeTy = I->Ty->getPointeeType(); 5199 swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy), 5200 getContext().getTypeAlignInChars(pointeeTy)); 5201 5202 swiftErrorTemp = 5203 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 5204 V = swiftErrorTemp.getPointer(); 5205 cast<llvm::AllocaInst>(V)->setSwiftError(true); 5206 5207 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 5208 Builder.CreateStore(errorValue, swiftErrorTemp); 5209 } 5210 5211 // We might have to widen integers, but we should never truncate. 5212 if (ArgInfo.getCoerceToType() != V->getType() && 5213 V->getType()->isIntegerTy()) 5214 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 5215 5216 // If the argument doesn't match, perform a bitcast to coerce it. This 5217 // can happen due to trivial type mismatches. 5218 if (FirstIRArg < IRFuncTy->getNumParams() && 5219 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 5220 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 5221 5222 if (ArgHasMaybeUndefAttr) 5223 V = Builder.CreateFreeze(V); 5224 IRCallArgs[FirstIRArg] = V; 5225 break; 5226 } 5227 5228 // FIXME: Avoid the conversion through memory if possible. 5229 Address Src = Address::invalid(); 5230 if (!I->isAggregate()) { 5231 Src = CreateMemTemp(I->Ty, "coerce"); 5232 I->copyInto(*this, Src); 5233 } else { 5234 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 5235 : I->getKnownRValue().getAggregateAddress(); 5236 } 5237 5238 // If the value is offset in memory, apply the offset now. 5239 Src = emitAddressAtOffset(*this, Src, ArgInfo); 5240 5241 // Fast-isel and the optimizer generally like scalar values better than 5242 // FCAs, so we flatten them if this is safe to do for this argument. 5243 llvm::StructType *STy = 5244 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 5245 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 5246 llvm::Type *SrcTy = Src.getElementType(); 5247 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 5248 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 5249 5250 // If the source type is smaller than the destination type of the 5251 // coerce-to logic, copy the source value into a temp alloca the size 5252 // of the destination type to allow loading all of it. The bits past 5253 // the source value are left undef. 5254 if (SrcSize < DstSize) { 5255 Address TempAlloca 5256 = CreateTempAlloca(STy, Src.getAlignment(), 5257 Src.getName() + ".coerce"); 5258 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 5259 Src = TempAlloca; 5260 } else { 5261 Src = Src.withElementType(STy); 5262 } 5263 5264 assert(NumIRArgs == STy->getNumElements()); 5265 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 5266 Address EltPtr = Builder.CreateStructGEP(Src, i); 5267 llvm::Value *LI = Builder.CreateLoad(EltPtr); 5268 if (ArgHasMaybeUndefAttr) 5269 LI = Builder.CreateFreeze(LI); 5270 IRCallArgs[FirstIRArg + i] = LI; 5271 } 5272 } else { 5273 // In the simple case, just pass the coerced loaded value. 5274 assert(NumIRArgs == 1); 5275 llvm::Value *Load = 5276 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 5277 5278 if (CallInfo.isCmseNSCall()) { 5279 // For certain parameter types, clear padding bits, as they may reveal 5280 // sensitive information. 5281 // Small struct/union types are passed as integer arrays. 5282 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType()); 5283 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType())) 5284 Load = EmitCMSEClearRecord(Load, ATy, I->Ty); 5285 } 5286 5287 if (ArgHasMaybeUndefAttr) 5288 Load = Builder.CreateFreeze(Load); 5289 IRCallArgs[FirstIRArg] = Load; 5290 } 5291 5292 break; 5293 } 5294 5295 case ABIArgInfo::CoerceAndExpand: { 5296 auto coercionType = ArgInfo.getCoerceAndExpandType(); 5297 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 5298 5299 llvm::Value *tempSize = nullptr; 5300 Address addr = Address::invalid(); 5301 Address AllocaAddr = Address::invalid(); 5302 if (I->isAggregate()) { 5303 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 5304 : I->getKnownRValue().getAggregateAddress(); 5305 5306 } else { 5307 RValue RV = I->getKnownRValue(); 5308 assert(RV.isScalar()); // complex should always just be direct 5309 5310 llvm::Type *scalarType = RV.getScalarVal()->getType(); 5311 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 5312 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(scalarType); 5313 5314 // Materialize to a temporary. 5315 addr = CreateTempAlloca( 5316 RV.getScalarVal()->getType(), 5317 CharUnits::fromQuantity(std::max(layout->getAlignment(), scalarAlign)), 5318 "tmp", 5319 /*ArraySize=*/nullptr, &AllocaAddr); 5320 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); 5321 5322 Builder.CreateStore(RV.getScalarVal(), addr); 5323 } 5324 5325 addr = addr.withElementType(coercionType); 5326 5327 unsigned IRArgPos = FirstIRArg; 5328 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5329 llvm::Type *eltType = coercionType->getElementType(i); 5330 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5331 Address eltAddr = Builder.CreateStructGEP(addr, i); 5332 llvm::Value *elt = Builder.CreateLoad(eltAddr); 5333 if (ArgHasMaybeUndefAttr) 5334 elt = Builder.CreateFreeze(elt); 5335 IRCallArgs[IRArgPos++] = elt; 5336 } 5337 assert(IRArgPos == FirstIRArg + NumIRArgs); 5338 5339 if (tempSize) { 5340 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); 5341 } 5342 5343 break; 5344 } 5345 5346 case ABIArgInfo::Expand: { 5347 unsigned IRArgPos = FirstIRArg; 5348 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); 5349 assert(IRArgPos == FirstIRArg + NumIRArgs); 5350 break; 5351 } 5352 } 5353 } 5354 5355 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); 5356 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); 5357 5358 // If we're using inalloca, set up that argument. 5359 if (ArgMemory.isValid()) { 5360 llvm::Value *Arg = ArgMemory.getPointer(); 5361 assert(IRFunctionArgs.hasInallocaArg()); 5362 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 5363 } 5364 5365 // 2. Prepare the function pointer. 5366 5367 // If the callee is a bitcast of a non-variadic function to have a 5368 // variadic function pointer type, check to see if we can remove the 5369 // bitcast. This comes up with unprototyped functions. 5370 // 5371 // This makes the IR nicer, but more importantly it ensures that we 5372 // can inline the function at -O0 if it is marked always_inline. 5373 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, 5374 llvm::Value *Ptr) -> llvm::Function * { 5375 if (!CalleeFT->isVarArg()) 5376 return nullptr; 5377 5378 // Get underlying value if it's a bitcast 5379 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) { 5380 if (CE->getOpcode() == llvm::Instruction::BitCast) 5381 Ptr = CE->getOperand(0); 5382 } 5383 5384 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr); 5385 if (!OrigFn) 5386 return nullptr; 5387 5388 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 5389 5390 // If the original type is variadic, or if any of the component types 5391 // disagree, we cannot remove the cast. 5392 if (OrigFT->isVarArg() || 5393 OrigFT->getNumParams() != CalleeFT->getNumParams() || 5394 OrigFT->getReturnType() != CalleeFT->getReturnType()) 5395 return nullptr; 5396 5397 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 5398 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 5399 return nullptr; 5400 5401 return OrigFn; 5402 }; 5403 5404 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { 5405 CalleePtr = OrigFn; 5406 IRFuncTy = OrigFn->getFunctionType(); 5407 } 5408 5409 // 3. Perform the actual call. 5410 5411 // Deactivate any cleanups that we're supposed to do immediately before 5412 // the call. 5413 if (!CallArgs.getCleanupsToDeactivate().empty()) 5414 deactivateArgCleanupsBeforeCall(*this, CallArgs); 5415 5416 // Assert that the arguments we computed match up. The IR verifier 5417 // will catch this, but this is a common enough source of problems 5418 // during IRGen changes that it's way better for debugging to catch 5419 // it ourselves here. 5420 #ifndef NDEBUG 5421 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 5422 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 5423 // Inalloca argument can have different type. 5424 if (IRFunctionArgs.hasInallocaArg() && 5425 i == IRFunctionArgs.getInallocaArgNo()) 5426 continue; 5427 if (i < IRFuncTy->getNumParams()) 5428 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 5429 } 5430 #endif 5431 5432 // Update the largest vector width if any arguments have vector types. 5433 for (unsigned i = 0; i < IRCallArgs.size(); ++i) 5434 LargestVectorWidth = std::max(LargestVectorWidth, 5435 getMaxVectorWidth(IRCallArgs[i]->getType())); 5436 5437 // Compute the calling convention and attributes. 5438 unsigned CallingConv; 5439 llvm::AttributeList Attrs; 5440 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 5441 Callee.getAbstractInfo(), Attrs, CallingConv, 5442 /*AttrOnCallSite=*/true, 5443 /*IsThunk=*/false); 5444 5445 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5446 if (FD->hasAttr<StrictFPAttr>()) 5447 // All calls within a strictfp function are marked strictfp 5448 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); 5449 5450 // Add call-site nomerge attribute if exists. 5451 if (InNoMergeAttributedStmt) 5452 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge); 5453 5454 // Add call-site noinline attribute if exists. 5455 if (InNoInlineAttributedStmt) 5456 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline); 5457 5458 // Add call-site always_inline attribute if exists. 5459 if (InAlwaysInlineAttributedStmt) 5460 Attrs = 5461 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline); 5462 5463 // Apply some call-site-specific attributes. 5464 // TODO: work this into building the attribute set. 5465 5466 // Apply always_inline to all calls within flatten functions. 5467 // FIXME: should this really take priority over __try, below? 5468 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 5469 !InNoInlineAttributedStmt && 5470 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { 5471 Attrs = 5472 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline); 5473 } 5474 5475 // Disable inlining inside SEH __try blocks. 5476 if (isSEHTryScope()) { 5477 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline); 5478 } 5479 5480 // Decide whether to use a call or an invoke. 5481 bool CannotThrow; 5482 if (currentFunctionUsesSEHTry()) { 5483 // SEH cares about asynchronous exceptions, so everything can "throw." 5484 CannotThrow = false; 5485 } else if (isCleanupPadScope() && 5486 EHPersonality::get(*this).isMSVCXXPersonality()) { 5487 // The MSVC++ personality will implicitly terminate the program if an 5488 // exception is thrown during a cleanup outside of a try/catch. 5489 // We don't need to model anything in IR to get this behavior. 5490 CannotThrow = true; 5491 } else { 5492 // Otherwise, nounwind call sites will never throw. 5493 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind); 5494 5495 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr)) 5496 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind)) 5497 CannotThrow = true; 5498 } 5499 5500 // If we made a temporary, be sure to clean up after ourselves. Note that we 5501 // can't depend on being inside of an ExprWithCleanups, so we need to manually 5502 // pop this cleanup later on. Being eager about this is OK, since this 5503 // temporary is 'invisible' outside of the callee. 5504 if (UnusedReturnSizePtr) 5505 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca, 5506 UnusedReturnSizePtr); 5507 5508 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 5509 5510 SmallVector<llvm::OperandBundleDef, 1> BundleList = 5511 getBundlesForFunclet(CalleePtr); 5512 5513 if (SanOpts.has(SanitizerKind::KCFI) && 5514 !isa_and_nonnull<FunctionDecl>(TargetDecl)) 5515 EmitKCFIOperandBundle(ConcreteCallee, BundleList); 5516 5517 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5518 if (FD->hasAttr<StrictFPAttr>()) 5519 // All calls within a strictfp function are marked strictfp 5520 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); 5521 5522 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); 5523 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5524 5525 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); 5526 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5527 5528 // Emit the actual call/invoke instruction. 5529 llvm::CallBase *CI; 5530 if (!InvokeDest) { 5531 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList); 5532 } else { 5533 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 5534 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs, 5535 BundleList); 5536 EmitBlock(Cont); 5537 } 5538 if (callOrInvoke) 5539 *callOrInvoke = CI; 5540 5541 // If this is within a function that has the guard(nocf) attribute and is an 5542 // indirect call, add the "guard_nocf" attribute to this call to indicate that 5543 // Control Flow Guard checks should not be added, even if the call is inlined. 5544 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 5545 if (const auto *A = FD->getAttr<CFGuardAttr>()) { 5546 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) 5547 Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf"); 5548 } 5549 } 5550 5551 // Apply the attributes and calling convention. 5552 CI->setAttributes(Attrs); 5553 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 5554 5555 // Apply various metadata. 5556 5557 if (!CI->getType()->isVoidTy()) 5558 CI->setName("call"); 5559 5560 // Update largest vector width from the return type. 5561 LargestVectorWidth = 5562 std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType())); 5563 5564 // Insert instrumentation or attach profile metadata at indirect call sites. 5565 // For more details, see the comment before the definition of 5566 // IPVK_IndirectCallTarget in InstrProfData.inc. 5567 if (!CI->getCalledFunction()) 5568 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 5569 CI, CalleePtr); 5570 5571 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 5572 // optimizer it can aggressively ignore unwind edges. 5573 if (CGM.getLangOpts().ObjCAutoRefCount) 5574 AddObjCARCExceptionMetadata(CI); 5575 5576 // Set tail call kind if necessary. 5577 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 5578 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 5579 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 5580 else if (IsMustTail) 5581 Call->setTailCallKind(llvm::CallInst::TCK_MustTail); 5582 } 5583 5584 // Add metadata for calls to MSAllocator functions 5585 if (getDebugInfo() && TargetDecl && 5586 TargetDecl->hasAttr<MSAllocatorAttr>()) 5587 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc); 5588 5589 // Add metadata if calling an __attribute__((error(""))) or warning fn. 5590 if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) { 5591 llvm::ConstantInt *Line = 5592 llvm::ConstantInt::get(Int32Ty, Loc.getRawEncoding()); 5593 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line); 5594 llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD}); 5595 CI->setMetadata("srcloc", MDT); 5596 } 5597 5598 // 4. Finish the call. 5599 5600 // If the call doesn't return, finish the basic block and clear the 5601 // insertion point; this allows the rest of IRGen to discard 5602 // unreachable code. 5603 if (CI->doesNotReturn()) { 5604 if (UnusedReturnSizePtr) 5605 PopCleanupBlock(); 5606 5607 // Strip away the noreturn attribute to better diagnose unreachable UB. 5608 if (SanOpts.has(SanitizerKind::Unreachable)) { 5609 // Also remove from function since CallBase::hasFnAttr additionally checks 5610 // attributes of the called function. 5611 if (auto *F = CI->getCalledFunction()) 5612 F->removeFnAttr(llvm::Attribute::NoReturn); 5613 CI->removeFnAttr(llvm::Attribute::NoReturn); 5614 5615 // Avoid incompatibility with ASan which relies on the `noreturn` 5616 // attribute to insert handler calls. 5617 if (SanOpts.hasOneOf(SanitizerKind::Address | 5618 SanitizerKind::KernelAddress)) { 5619 SanitizerScope SanScope(this); 5620 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); 5621 Builder.SetInsertPoint(CI); 5622 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 5623 llvm::FunctionCallee Fn = 5624 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); 5625 EmitNounwindRuntimeCall(Fn); 5626 } 5627 } 5628 5629 EmitUnreachable(Loc); 5630 Builder.ClearInsertionPoint(); 5631 5632 // FIXME: For now, emit a dummy basic block because expr emitters in 5633 // generally are not ready to handle emitting expressions at unreachable 5634 // points. 5635 EnsureInsertPoint(); 5636 5637 // Return a reasonable RValue. 5638 return GetUndefRValue(RetTy); 5639 } 5640 5641 // If this is a musttail call, return immediately. We do not branch to the 5642 // epilogue in this case. 5643 if (IsMustTail) { 5644 for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end(); 5645 ++it) { 5646 EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it); 5647 if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn())) 5648 CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups"); 5649 } 5650 if (CI->getType()->isVoidTy()) 5651 Builder.CreateRetVoid(); 5652 else 5653 Builder.CreateRet(CI); 5654 Builder.ClearInsertionPoint(); 5655 EnsureInsertPoint(); 5656 return GetUndefRValue(RetTy); 5657 } 5658 5659 // Perform the swifterror writeback. 5660 if (swiftErrorTemp.isValid()) { 5661 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 5662 Builder.CreateStore(errorResult, swiftErrorArg); 5663 } 5664 5665 // Emit any call-associated writebacks immediately. Arguably this 5666 // should happen after any return-value munging. 5667 if (CallArgs.hasWritebacks()) 5668 emitWritebacks(*this, CallArgs); 5669 5670 // The stack cleanup for inalloca arguments has to run out of the normal 5671 // lexical order, so deactivate it and run it manually here. 5672 CallArgs.freeArgumentMemory(*this); 5673 5674 // Extract the return value. 5675 RValue Ret = [&] { 5676 switch (RetAI.getKind()) { 5677 case ABIArgInfo::CoerceAndExpand: { 5678 auto coercionType = RetAI.getCoerceAndExpandType(); 5679 5680 Address addr = SRetPtr.withElementType(coercionType); 5681 5682 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 5683 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 5684 5685 unsigned unpaddedIndex = 0; 5686 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5687 llvm::Type *eltType = coercionType->getElementType(i); 5688 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5689 Address eltAddr = Builder.CreateStructGEP(addr, i); 5690 llvm::Value *elt = CI; 5691 if (requiresExtract) 5692 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 5693 else 5694 assert(unpaddedIndex == 0); 5695 Builder.CreateStore(elt, eltAddr); 5696 } 5697 [[fallthrough]]; 5698 } 5699 5700 case ABIArgInfo::InAlloca: 5701 case ABIArgInfo::Indirect: { 5702 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 5703 if (UnusedReturnSizePtr) 5704 PopCleanupBlock(); 5705 return ret; 5706 } 5707 5708 case ABIArgInfo::Ignore: 5709 // If we are ignoring an argument that had a result, make sure to 5710 // construct the appropriate return value for our caller. 5711 return GetUndefRValue(RetTy); 5712 5713 case ABIArgInfo::Extend: 5714 case ABIArgInfo::Direct: { 5715 llvm::Type *RetIRTy = ConvertType(RetTy); 5716 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 5717 switch (getEvaluationKind(RetTy)) { 5718 case TEK_Complex: { 5719 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 5720 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 5721 return RValue::getComplex(std::make_pair(Real, Imag)); 5722 } 5723 case TEK_Aggregate: { 5724 Address DestPtr = ReturnValue.getValue(); 5725 bool DestIsVolatile = ReturnValue.isVolatile(); 5726 5727 if (!DestPtr.isValid()) { 5728 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 5729 DestIsVolatile = false; 5730 } 5731 EmitAggregateStore(CI, DestPtr, DestIsVolatile); 5732 return RValue::getAggregate(DestPtr); 5733 } 5734 case TEK_Scalar: { 5735 // If the argument doesn't match, perform a bitcast to coerce it. This 5736 // can happen due to trivial type mismatches. 5737 llvm::Value *V = CI; 5738 if (V->getType() != RetIRTy) 5739 V = Builder.CreateBitCast(V, RetIRTy); 5740 return RValue::get(V); 5741 } 5742 } 5743 llvm_unreachable("bad evaluation kind"); 5744 } 5745 5746 // If coercing a fixed vector from a scalable vector for ABI 5747 // compatibility, and the types match, use the llvm.vector.extract 5748 // intrinsic to perform the conversion. 5749 if (auto *FixedDst = dyn_cast<llvm::FixedVectorType>(RetIRTy)) { 5750 llvm::Value *V = CI; 5751 if (auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(V->getType())) { 5752 if (FixedDst->getElementType() == ScalableSrc->getElementType()) { 5753 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); 5754 V = Builder.CreateExtractVector(FixedDst, V, Zero, "cast.fixed"); 5755 return RValue::get(V); 5756 } 5757 } 5758 } 5759 5760 Address DestPtr = ReturnValue.getValue(); 5761 bool DestIsVolatile = ReturnValue.isVolatile(); 5762 5763 if (!DestPtr.isValid()) { 5764 DestPtr = CreateMemTemp(RetTy, "coerce"); 5765 DestIsVolatile = false; 5766 } 5767 5768 // If the value is offset in memory, apply the offset now. 5769 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 5770 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 5771 5772 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 5773 } 5774 5775 case ABIArgInfo::Expand: 5776 case ABIArgInfo::IndirectAliased: 5777 llvm_unreachable("Invalid ABI kind for return argument"); 5778 } 5779 5780 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 5781 } (); 5782 5783 // Emit the assume_aligned check on the return value. 5784 if (Ret.isScalar() && TargetDecl) { 5785 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5786 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5787 } 5788 5789 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though 5790 // we can't use the full cleanup mechanism. 5791 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) 5792 LifetimeEnd.Emit(*this, /*Flags=*/{}); 5793 5794 if (!ReturnValue.isExternallyDestructed() && 5795 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct) 5796 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(), 5797 RetTy); 5798 5799 return Ret; 5800 } 5801 5802 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { 5803 if (isVirtual()) { 5804 const CallExpr *CE = getVirtualCallExpr(); 5805 return CGF.CGM.getCXXABI().getVirtualFunctionPointer( 5806 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), 5807 CE ? CE->getBeginLoc() : SourceLocation()); 5808 } 5809 5810 return *this; 5811 } 5812 5813 /* VarArg handling */ 5814 5815 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 5816 VAListAddr = VE->isMicrosoftABI() 5817 ? EmitMSVAListRef(VE->getSubExpr()) 5818 : EmitVAListRef(VE->getSubExpr()); 5819 QualType Ty = VE->getType(); 5820 if (VE->isMicrosoftABI()) 5821 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 5822 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 5823 } 5824