Lines Matching +full:cold +full:- +full:temp

1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
12 //===----------------------------------------------------------------------===//
77 // clang-format off in ClangCallConvToLLVMCallConv()
79 // clang-format on in ClangCallConvToLLVMCallConv()
91 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); in DeriveThisType()
96 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); in DeriveThisType()
102 return MD->getType()->getCanonicalTypeUnqualified() in GetFormalType()
106 /// Returns the "extra-canonicalized" return type, which discards
109 /// all parameter and return types are top-level unqualified.
111 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); in GetReturnType()
120 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), in arrangeFreeFunctionType()
122 FTNP->getExtInfo(), {}, RequiredArgs(0)); in arrangeFreeFunctionType()
130 assert(proto->hasExtParameterInfos()); in addExtParameterInfosForCall()
132 assert(proto->getNumParams() + prefixArgs <= totalArgs); in addExtParameterInfosForCall()
140 for (const auto &ParamInfo : proto->getExtParameterInfos()) { in addExtParameterInfosForCall()
160 if (!FPT->hasExtParameterInfos()) { in appendParameterTypes()
163 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); in appendParameterTypes()
168 // In the vast majority of cases, we'll have precisely FPT->getNumParams() in appendParameterTypes()
171 prefix.reserve(prefix.size() + FPT->getNumParams()); in appendParameterTypes()
173 auto ExtInfos = FPT->getExtParameterInfos(); in appendParameterTypes()
174 assert(ExtInfos.size() == FPT->getNumParams()); in appendParameterTypes()
175 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { in appendParameterTypes()
176 prefix.push_back(FPT->getParamType(I)); in appendParameterTypes()
195 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); in arrangeLLVMFunctionInfo()
200 FTP->getExtInfo(), paramInfos, Required); in arrangeLLVMFunctionInfo()
215 if (D->hasAttr<StdCallAttr>()) in getCallingConventionForDecl()
218 if (D->hasAttr<FastCallAttr>()) in getCallingConventionForDecl()
221 if (D->hasAttr<RegCallAttr>()) in getCallingConventionForDecl()
224 if (D->hasAttr<ThisCallAttr>()) in getCallingConventionForDecl()
227 if (D->hasAttr<VectorCallAttr>()) in getCallingConventionForDecl()
230 if (D->hasAttr<PascalAttr>()) in getCallingConventionForDecl()
233 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) in getCallingConventionForDecl()
234 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); in getCallingConventionForDecl()
236 if (D->hasAttr<AArch64VectorPcsAttr>()) in getCallingConventionForDecl()
239 if (D->hasAttr<AArch64SVEPcsAttr>()) in getCallingConventionForDecl()
242 if (D->hasAttr<AMDGPUKernelCallAttr>()) in getCallingConventionForDecl()
245 if (D->hasAttr<IntelOclBiccAttr>()) in getCallingConventionForDecl()
248 if (D->hasAttr<MSABIAttr>()) in getCallingConventionForDecl()
251 if (D->hasAttr<SysVABIAttr>()) in getCallingConventionForDecl()
254 if (D->hasAttr<PreserveMostAttr>()) in getCallingConventionForDecl()
257 if (D->hasAttr<PreserveAllAttr>()) in getCallingConventionForDecl()
260 if (D->hasAttr<M68kRTDAttr>()) in getCallingConventionForDecl()
263 if (D->hasAttr<PreserveNoneAttr>()) in getCallingConventionForDecl()
266 if (D->hasAttr<RISCVVectorCCAttr>()) in getCallingConventionForDecl()
273 /// unknown C++ non-static member function of the given abstract type.
289 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); in arrangeCXXMethodType()
295 if (FD->hasAttr<CUDAGlobalAttr>()) { in setCUDAKernelCallingConvention()
296 const FunctionType *FT = FTy->getAs<FunctionType>(); in setCUDAKernelCallingConvention()
298 FTy = FT->getCanonicalTypeUnqualified(); in setCUDAKernelCallingConvention()
303 /// definition of the given C++ non-static member function. The
315 if (MD->isImplicitObjectMemberFunction()) { in arrangeCXXMethodDeclaration()
330 !Inherited.getShadowDecl()->constructsVirtualBase() || in inheritingCtorHasParams()
349 if (auto Inherited = CD->getInheritedConstructor()) in arrangeCXXStructorDeclaration()
372 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) in arrangeCXXStructorDeclaration()
375 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); in arrangeCXXStructorDeclaration()
396 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); in getArgTypesForDeclaration()
404 if (proto->hasExtParameterInfos()) { in getExtParameterInfosForCall()
412 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
414 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
445 FunctionType::ExtInfo Info = FPT->getExtInfo(); in arrangeCXXConstructorCall()
447 // If the prototype args are elided, we should only have ABI-specific args, in arrangeCXXConstructorCall()
449 if (PassProtoArgs && FPT->hasExtParameterInfos()) { in arrangeCXXConstructorCall()
450 // ABI-specific suffix arguments are treated the same as variadic arguments. in arrangeCXXConstructorCall()
464 if (MD->isImplicitObjectMemberFunction()) in arrangeFunctionDeclaration()
467 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); in arrangeFunctionDeclaration()
473 // non-variadic type. in arrangeFunctionDeclaration()
475 return arrangeLLVMFunctionInfo(noProto->getReturnType(), FnInfoOpts::None, in arrangeFunctionDeclaration()
476 std::nullopt, noProto->getExtInfo(), {}, in arrangeFunctionDeclaration()
484 /// definition of an Objective-C method.
489 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); in arrangeObjCMethodDeclaration()
493 /// through which to perform a send to the given Objective-C method,
495 /// the 'self' type of the method or even an Objective-C pointer type.
503 MD->isDirectMethod() ? 1 : 2); in arrangeObjCMessageSendSignature()
505 if (!MD->isDirectMethod()) in arrangeObjCMessageSendSignature()
508 for (const auto *I : MD->parameters()) { in arrangeObjCMessageSendSignature()
509 argTys.push_back(Context.getCanonicalParamType(I->getType())); in arrangeObjCMessageSendSignature()
511 I->hasAttr<NoEscapeAttr>()); in arrangeObjCMessageSendSignature()
520 MD->hasAttr<NSReturnsRetainedAttr>()) in arrangeObjCMessageSendSignature()
524 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); in arrangeObjCMessageSendSignature()
526 return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()), in arrangeObjCMessageSendSignature()
560 assert(MD->isVirtual() && "only methods have thunks"); in arrangeUnprototypedMustTailThunk()
562 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; in arrangeUnprototypedMustTailThunk()
564 FTP->getExtInfo(), {}, RequiredArgs(1)); in arrangeUnprototypedMustTailThunk()
574 const CXXRecordDecl *RD = CD->getParent(); in arrangeMSCtorClosure()
577 ArgTys.push_back(*FTP->param_type_begin()); in arrangeMSCtorClosure()
578 if (RD->getNumVBases() > 0) in arrangeMSCtorClosure()
606 if (proto->isVariadic()) in arrangeFreeFunctionLikeCall()
609 if (proto->hasExtParameterInfos()) in arrangeFreeFunctionLikeCall()
628 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), in arrangeFreeFunctionLikeCall()
629 opts, argTypes, fnType->getExtInfo(), in arrangeFreeFunctionLikeCall()
636 /// target-dependent in crazy ways.
660 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), in arrangeBlockFunctionDeclaration()
662 proto->getExtInfo(), paramInfos, in arrangeBlockFunctionDeclaration()
698 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
715 FunctionType::ExtInfo info = proto->getExtInfo(); in arrangeCXXMethodCall()
716 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), in arrangeCXXMethodCall()
791 // Construct the function info. We co-allocate the ArgInfos. in arrangeLLVMFunctionInfo()
814 ABIArgInfo &retInfo = FI->getReturnInfo(); in arrangeLLVMFunctionInfo()
816 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); in arrangeLLVMFunctionInfo()
818 for (auto &I : FI->arguments()) in arrangeLLVMFunctionInfo()
844 FI->CallingConvention = llvmCC; in create()
845 FI->EffectiveCallingConvention = llvmCC; in create()
846 FI->ASTCallingConvention = info.getCC(); in create()
847 FI->InstanceMethod = instanceMethod; in create()
848 FI->ChainCall = chainCall; in create()
849 FI->DelegateCall = delegateCall; in create()
850 FI->CmseNSCall = info.getCmseNSCall(); in create()
851 FI->NoReturn = info.getNoReturn(); in create()
852 FI->ReturnsRetained = info.getProducesResult(); in create()
853 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); in create()
854 FI->NoCfCheck = info.getNoCfCheck(); in create()
855 FI->Required = required; in create()
856 FI->HasRegParm = info.getHasRegParm(); in create()
857 FI->RegParm = info.getRegParm(); in create()
858 FI->ArgStruct = nullptr; in create()
859 FI->ArgStructAlign = 0; in create()
860 FI->NumArgs = argTypes.size(); in create()
861 FI->HasExtParameterInfos = !paramInfos.empty(); in create()
862 FI->getArgsBuffer()[0].type = resultType; in create()
863 FI->MaxVectorWidth = 0; in create()
865 FI->getArgsBuffer()[i + 1].type = argTypes[i]; in create()
867 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; in create()
903 return TE->Kind == TEK_ConstantArray; in classof()
917 return TE->Kind == TEK_Record; in classof()
926 return TE->Kind == TEK_Complex; in classof()
933 return TE->Kind == TEK_None; in classof()
941 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(), in getTypeExpansion()
942 AT->getZExtSize()); in getTypeExpansion()
944 if (const RecordType *RT = Ty->getAs<RecordType>()) { in getTypeExpansion()
947 const RecordDecl *RD = RT->getDecl(); in getTypeExpansion()
948 assert(!RD->hasFlexibleArrayMember() && in getTypeExpansion()
950 if (RD->isUnion()) { in getTypeExpansion()
951 // Unions can be here only in degenerative cases - all the fields are same in getTypeExpansion()
956 for (const auto *FD : RD->fields()) { in getTypeExpansion()
957 if (FD->isZeroLengthBitField(Context)) in getTypeExpansion()
959 assert(!FD->isBitField() && in getTypeExpansion()
960 "Cannot expand structure with bit-field members."); in getTypeExpansion()
961 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); in getTypeExpansion()
971 assert(!CXXRD->isDynamicClass() && in getTypeExpansion()
973 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases())); in getTypeExpansion()
976 for (const auto *FD : RD->fields()) { in getTypeExpansion()
977 if (FD->isZeroLengthBitField(Context)) in getTypeExpansion()
979 assert(!FD->isBitField() && in getTypeExpansion()
980 "Cannot expand structure with bit-field members."); in getTypeExpansion()
987 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { in getTypeExpansion()
988 return std::make_unique<ComplexExpansion>(CT->getElementType()); in getTypeExpansion()
996 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); in getExpansionSize()
1000 for (auto BS : RExp->Bases) in getExpansionSize()
1001 Res += getExpansionSize(BS->getType(), Context); in getExpansionSize()
1002 for (auto FD : RExp->Fields) in getExpansionSize()
1003 Res += getExpansionSize(FD->getType(), Context); in getExpansionSize()
1017 for (int i = 0, n = CAExp->NumElts; i < n; i++) { in getExpandedTypes()
1018 getExpandedTypes(CAExp->EltTy, TI); in getExpandedTypes()
1021 for (auto BS : RExp->Bases) in getExpandedTypes()
1022 getExpandedTypes(BS->getType(), TI); in getExpandedTypes()
1023 for (auto FD : RExp->Fields) in getExpandedTypes()
1024 getExpandedTypes(FD->getType(), TI); in getExpandedTypes()
1026 llvm::Type *EltTy = ConvertType(CExp->EltTy); in getExpandedTypes()
1039 for (int i = 0, n = CAE->NumElts; i < n; i++) { in forConstantArrayExpansion()
1048 "Unexpected non-simple lvalue during struct expansion."); in ExpandTypeFromArgs()
1054 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); in ExpandTypeFromArgs()
1055 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); in ExpandTypeFromArgs()
1059 for (const CXXBaseSpecifier *BS : RExp->Bases) { in ExpandTypeFromArgs()
1060 // Perform a single step derived-to-base conversion. in ExpandTypeFromArgs()
1062 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, in ExpandTypeFromArgs()
1064 LValue SubLV = MakeAddrLValue(Base, BS->getType()); in ExpandTypeFromArgs()
1067 ExpandTypeFromArgs(BS->getType(), SubLV, AI); in ExpandTypeFromArgs()
1069 for (auto FD : RExp->Fields) { in ExpandTypeFromArgs()
1072 ExpandTypeFromArgs(FD->getType(), SubLV, AI); in ExpandTypeFromArgs()
1089 if (Arg->getType()->isPointerTy()) { in ExpandTypeFromArgs()
1108 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), in ExpandTypeToArgs()
1109 CAExp->EltTy); in ExpandTypeToArgs()
1110 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, in ExpandTypeToArgs()
1116 for (const CXXBaseSpecifier *BS : RExp->Bases) { in ExpandTypeToArgs()
1117 // Perform a single step derived-to-base conversion. in ExpandTypeToArgs()
1119 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, in ExpandTypeToArgs()
1121 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); in ExpandTypeToArgs()
1124 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, in ExpandTypeToArgs()
1129 for (auto FD : RExp->Fields) { in ExpandTypeToArgs()
1131 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); in ExpandTypeToArgs()
1132 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, in ExpandTypeToArgs()
1143 "Unexpected non-scalar rvalue during struct expansion."); in ExpandTypeToArgs()
1147 if (IRCallArgPos < IRFuncTy->getNumParams() && in ExpandTypeToArgs()
1148 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) in ExpandTypeToArgs()
1149 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); in ExpandTypeToArgs()
1167 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1170 /// with an in-memory size smaller than DstSize.
1175 // We can't dive into a zero-element struct. in EnterStructPointerForCoercedAccess()
1176 if (SrcSTy->getNumElements() == 0) return SrcPtr; in EnterStructPointerForCoercedAccess()
1178 llvm::Type *FirstElt = SrcSTy->getElementType(0); in EnterStructPointerForCoercedAccess()
1201 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1205 /// This behaves as if the value were coerced through memory, so on big-endian
1206 /// targets the high bits are preserved in a truncation, while little-endian
1211 if (Val->getType() == Ty) in CoerceIntOrPtrToIntOrPtr()
1214 if (isa<llvm::PointerType>(Val->getType())) { in CoerceIntOrPtrToIntOrPtr()
1215 // If this is Pointer->Pointer avoid conversion to and from int. in CoerceIntOrPtrToIntOrPtr()
1227 if (Val->getType() != DestIntTy) { in CoerceIntOrPtrToIntOrPtr()
1230 // Preserve the high bits on big-endian targets. in CoerceIntOrPtrToIntOrPtr()
1232 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); in CoerceIntOrPtrToIntOrPtr()
1236 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); in CoerceIntOrPtrToIntOrPtr()
1240 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); in CoerceIntOrPtrToIntOrPtr()
1243 // Little-endian targets preserve the low bits. No shifts required. in CoerceIntOrPtrToIntOrPtr()
1255 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1295 // FIXME: Assert that we aren't truncating non-padding bits when have access in CreateCoercedLoad()
1308 if (ScalableDstTy->getElementType()->isIntegerTy(1) && in CreateCoercedLoad()
1309 ScalableDstTy->getElementCount().isKnownMultipleOf(8) && in CreateCoercedLoad()
1310 FixedSrcTy->getElementType()->isIntegerTy(8)) { in CreateCoercedLoad()
1312 FixedSrcTy->getElementType(), in CreateCoercedLoad()
1313 ScalableDstTy->getElementCount().getKnownMinValue() / 8); in CreateCoercedLoad()
1315 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) { in CreateCoercedLoad()
1344 llvm::Type *SrcTy = Src->getType(); in CreateCoercedStore()
1360 if (SrcTy->isIntegerTy() && Dst.getElementType()->isPointerTy() && in CreateCoercedStore()
1366 dyn_cast<llvm::StructType>(Src->getType())) { in CreateCoercedStore()
1367 // Prefer scalar stores to first-class aggregate stores. in CreateCoercedStore()
1369 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { in CreateCoercedStore()
1377 } else if (SrcTy->isIntegerTy()) { in CreateCoercedStore()
1390 // FIXME: Assert that we aren't truncating non-padding bits when have access in CreateCoercedStore()
1499 QualType ArgType = I->type; in construct()
1500 const ABIArgInfo &AI = I->info; in construct()
1513 IRArgs.NumberOfArgs = STy->getNumElements(); in construct()
1573 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { in ReturnTypeUsesFPRet()
1574 switch (BT->getKind()) { in ReturnTypeUsesFPRet()
1590 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { in ReturnTypeUsesFP2Ret()
1591 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { in ReturnTypeUsesFP2Ret()
1592 if (BT->getKind() == BuiltinType::LongDouble) in ReturnTypeUsesFP2Ret()
1666 const ABIArgInfo &ArgInfo = it->info; in GetFunctionType()
1695 // Fast-isel and the optimizer generally like scalar values better than in GetFunctionType()
1700 assert(NumIRArgs == st->getNumElements()); in GetFunctionType()
1701 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) in GetFunctionType()
1702 ArgTypes[FirstIRArg + i] = st->getElementType(i); in GetFunctionType()
1721 getExpandedTypes(it->type, ArgTypesIter); in GetFunctionType()
1735 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); in GetFunctionTypeForVTable()
1749 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && in AddAttributesFromFunctionProtoType()
1750 FPT->isNothrow()) in AddAttributesFromFunctionProtoType()
1753 unsigned SMEBits = FPT->getAArch64SMEAttributes(); in AddAttributesFromFunctionProtoType()
1787 for (const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>()) in AddAttributesFromOMPAssumes()
1788 AA->getAssumption().split(Attrs, ","); in AddAttributesFromOMPAssumes()
1798 // complex destructor or a non-trivially copyable type. in MayDropFunctionReturn()
1800 ReturnType.getCanonicalType()->getAs<RecordType>()) { in MayDropFunctionReturn()
1801 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) in MayDropFunctionReturn()
1802 return ClassDecl->hasTrivialDestructor(); in MayDropFunctionReturn()
1809 // As-is msan can not tolerate noundef mismatch between caller and in HasStrictReturn()
1810 // implementation. Mismatch is possible for e.g. indirect calls from C-caller in HasStrictReturn()
1822 if (FDecl->isExternC()) in HasStrictReturn()
1826 if (VDecl->isExternC()) in HasStrictReturn()
1839 /// Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the
1841 /// -f32 case.
1846 FuncAttrs.addAttribute("denormal-fp-math", FPDenormalMode.str()); in addDenormalModeAttrs()
1849 FuncAttrs.addAttribute("denormal-fp-math-f32", FP32DenormalMode.str()); in addDenormalModeAttrs()
1853 /// -mlink-builtin-bitcode and should not simply overwrite any existing
1866 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. in getTrivialDefaultFunctionAttributes()
1877 FuncAttrs.addAttribute("indirect-tls-seg-refs"); in getTrivialDefaultFunctionAttributes()
1883 // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking in getTrivialDefaultFunctionAttributes()
1884 // the -fno-builtin-foo list. in getTrivialDefaultFunctionAttributes()
1888 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); in getTrivialDefaultFunctionAttributes()
1897 FuncAttrs.addAttribute("frame-pointer", in getTrivialDefaultFunctionAttributes()
1903 FuncAttrs.addAttribute("less-precise-fpmad", "true"); in getTrivialDefaultFunctionAttributes()
1909 FuncAttrs.addAttribute("no-trapping-math", "true"); in getTrivialDefaultFunctionAttributes()
1912 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. in getTrivialDefaultFunctionAttributes()
1914 FuncAttrs.addAttribute("no-infs-fp-math", "true"); in getTrivialDefaultFunctionAttributes()
1916 FuncAttrs.addAttribute("no-nans-fp-math", "true"); in getTrivialDefaultFunctionAttributes()
1918 FuncAttrs.addAttribute("approx-func-fp-math", "true"); in getTrivialDefaultFunctionAttributes()
1925 FuncAttrs.addAttribute("unsafe-fp-math", "true"); in getTrivialDefaultFunctionAttributes()
1927 FuncAttrs.addAttribute("use-soft-float", "true"); in getTrivialDefaultFunctionAttributes()
1928 FuncAttrs.addAttribute("stack-protector-buffer-size", in getTrivialDefaultFunctionAttributes()
1931 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true"); in getTrivialDefaultFunctionAttributes()
1936 FuncAttrs.addAttribute("reciprocal-estimates", in getTrivialDefaultFunctionAttributes()
1941 FuncAttrs.addAttribute("prefer-vector-width", in getTrivialDefaultFunctionAttributes()
1949 FuncAttrs.addAttribute("split-stack"); in getTrivialDefaultFunctionAttributes()
1954 // Add zero-call-used-regs attribute. in getTrivialDefaultFunctionAttributes()
1957 FuncAttrs.removeAttribute("zero-call-used-regs"); in getTrivialDefaultFunctionAttributes()
1960 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg"); in getTrivialDefaultFunctionAttributes()
1963 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr"); in getTrivialDefaultFunctionAttributes()
1966 FuncAttrs.addAttribute("zero-call-used-regs", "used-arg"); in getTrivialDefaultFunctionAttributes()
1969 FuncAttrs.addAttribute("zero-call-used-regs", "used"); in getTrivialDefaultFunctionAttributes()
1972 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg"); in getTrivialDefaultFunctionAttributes()
1975 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr"); in getTrivialDefaultFunctionAttributes()
1978 FuncAttrs.addAttribute("zero-call-used-regs", "all-arg"); in getTrivialDefaultFunctionAttributes()
1981 FuncAttrs.addAttribute("zero-call-used-regs", "all"); in getTrivialDefaultFunctionAttributes()
2012 /// Merges `target-features` from \TargetOpts and \F, and sets the result in
2021 auto FFeatures = F.getFnAttribute("target-features"); in overrideFunctionFeaturesWithTargetFeatures()
2031 assert(Feature[0] == '+' || Feature[0] == '-'); in overrideFunctionFeaturesWithTargetFeatures()
2045 FuncAttr.addAttribute("target-features", llvm::join(MergedFeatures, ",")); in overrideFunctionFeaturesWithTargetFeatures()
2058 FuncAttrs.addAttribute("target-cpu", TargetOpts.CPU); in mergeDefaultFunctionDefinitionAttributes()
2060 FuncAttrs.addAttribute("tune-cpu", TargetOpts.TuneCPU); in mergeDefaultFunctionDefinitionAttributes()
2067 // Do not promote "dynamic" denormal-fp-math to this translation unit's in mergeDefaultFunctionDefinitionAttributes()
2089 AttrsToRemove.addAttribute("denormal-fp-math"); in mergeDefaultFunctionDefinitionAttributes()
2092 FuncAttrs.addAttribute("denormal-fp-math", in mergeDefaultFunctionDefinitionAttributes()
2097 AttrsToRemove.addAttribute("denormal-fp-math-f32"); in mergeDefaultFunctionDefinitionAttributes()
2100 FuncAttrs.addAttribute("denormal-fp-math-f32", in mergeDefaultFunctionDefinitionAttributes()
2144 AttributeName += "no-builtin-"; in addNoBuiltinAttributes()
2149 // First, handle the language options passed through -fno-builtin. in addNoBuiltinAttributes()
2151 // -fno-builtin disables them all. in addNoBuiltinAttributes()
2152 FuncAttrs.addAttribute("no-builtins"); in addNoBuiltinAttributes()
2156 // Then, add attributes for builtins specified through -fno-builtin-<name>. in addNoBuiltinAttributes()
2166 if (llvm::is_contained(NBA->builtinNames(), "*")) { in addNoBuiltinAttributes()
2167 FuncAttrs.addAttribute("no-builtins"); in addNoBuiltinAttributes()
2172 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); in addNoBuiltinAttributes()
2199 if (QTy->isBitIntType()) in DetermineNoUndef()
2201 if (QTy->isReferenceType()) in DetermineNoUndef()
2203 if (QTy->isNullPtrType()) in DetermineNoUndef()
2205 if (QTy->isMemberPointerType()) in DetermineNoUndef()
2209 if (QTy->isScalarType()) { in DetermineNoUndef()
2211 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false); in DetermineNoUndef()
2215 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false); in DetermineNoUndef()
2217 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false); in DetermineNoUndef()
2219 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false); in DetermineNoUndef()
2237 if (ArgNo < FD->getNumParams()) { in IsArgumentMaybeUndef()
2238 const ParmVarDecl *Param = FD->getParamDecl(ArgNo); in IsArgumentMaybeUndef()
2239 if (Param && Param->hasAttr<MaybeUndefAttr>()) in IsArgumentMaybeUndef()
2251 if (!ParamType->hasFloatingRepresentation()) in canApplyNoFPClass()
2254 // The promoted-to IR type also needs to support nofpclass. in canApplyNoFPClass()
2261 llvm::all_of(ST->elements(), [](llvm::Type *Ty) { in canApplyNoFPClass()
2269 /// Return the nofpclass mask that can be applied to floating-point parameters.
2294 /// - getDefaultFunctionAttributes is for attributes that are essentially
2296 /// overridden on a per-function basis). Adding attributes there
2298 /// target-configuration logic, as well as for code defined in library
2301 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes
2302 /// and adds declaration-specific, convention-specific, and
2303 /// frontend-specific logic. The last is of particular importance:
2347 // Collect function IR attributes based on declaration-specific in ConstructAttributeList()
2351 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) in ConstructAttributeList()
2353 if (TargetDecl->hasAttr<NoThrowAttr>()) in ConstructAttributeList()
2355 if (TargetDecl->hasAttr<NoReturnAttr>()) in ConstructAttributeList()
2357 if (TargetDecl->hasAttr<ColdAttr>()) in ConstructAttributeList()
2358 FuncAttrs.addAttribute(llvm::Attribute::Cold); in ConstructAttributeList()
2359 if (TargetDecl->hasAttr<HotAttr>()) in ConstructAttributeList()
2361 if (TargetDecl->hasAttr<NoDuplicateAttr>()) in ConstructAttributeList()
2363 if (TargetDecl->hasAttr<ConvergentAttr>()) in ConstructAttributeList()
2368 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); in ConstructAttributeList()
2369 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { in ConstructAttributeList()
2370 // A sane operator new returns a non-aliasing pointer. in ConstructAttributeList()
2371 auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); in ConstructAttributeList()
2377 const bool IsVirtualCall = MD && MD->isVirtual(); in ConstructAttributeList()
2381 if (Fn->isNoReturn()) in ConstructAttributeList()
2383 NBA = Fn->getAttr<NoBuiltinAttr>(); in ConstructAttributeList()
2390 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>()) in ConstructAttributeList()
2395 if (TargetDecl->hasAttr<ConstAttr>()) { in ConstructAttributeList()
2401 } else if (TargetDecl->hasAttr<PureAttr>()) { in ConstructAttributeList()
2406 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { in ConstructAttributeList()
2410 if (TargetDecl->hasAttr<RestrictAttr>()) in ConstructAttributeList()
2412 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && in ConstructAttributeList()
2415 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) in ConstructAttributeList()
2417 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) in ConstructAttributeList()
2419 if (TargetDecl->hasAttr<LeafAttr>()) in ConstructAttributeList()
2422 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); in ConstructAttributeList()
2423 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { in ConstructAttributeList()
2425 if (AllocSize->getNumElemsParam().isValid()) in ConstructAttributeList()
2426 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); in ConstructAttributeList()
2427 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), in ConstructAttributeList()
2431 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { in ConstructAttributeList()
2434 FuncAttrs.addAttribute("uniform-work-group-size", "true"); in ConstructAttributeList()
2437 // '-cl-uniform-work-group-size' compile option gets a hint in ConstructAttributeList()
2438 // to the compiler that the global work-size be a multiple of in ConstructAttributeList()
2439 // the work-group size specified to clEnqueueNDRangeKernel in ConstructAttributeList()
2442 "uniform-work-group-size", in ConstructAttributeList()
2447 if (TargetDecl->hasAttr<CUDAGlobalAttr>() && in ConstructAttributeList()
2449 FuncAttrs.addAttribute("uniform-work-group-size", "true"); in ConstructAttributeList()
2451 if (TargetDecl->hasAttr<ArmLocallyStreamingAttr>()) in ConstructAttributeList()
2455 // Attach "no-builtins" attributes to: in ConstructAttributeList()
2456 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". in ConstructAttributeList()
2457 // * definitions: "no-builtins" or "no-builtin-<name>" only. in ConstructAttributeList()
2459 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> in ConstructAttributeList()
2466 // Override some default IR attributes based on declaration-specific in ConstructAttributeList()
2469 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) in ConstructAttributeList()
2471 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) in ConstructAttributeList()
2473 if (TargetDecl->hasAttr<NoSplitStackAttr>()) in ConstructAttributeList()
2474 FuncAttrs.removeAttribute("split-stack"); in ConstructAttributeList()
2475 if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) { in ConstructAttributeList()
2476 // A function "__attribute__((...))" overrides the command-line flag. in ConstructAttributeList()
2478 TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs(); in ConstructAttributeList()
2479 FuncAttrs.removeAttribute("zero-call-used-regs"); in ConstructAttributeList()
2481 "zero-call-used-regs", in ConstructAttributeList()
2485 // Add NonLazyBind attribute to function declarations when -fno-plt in ConstructAttributeList()
2491 if (!Fn->isDefined() && !AttrOnCallSite) { in ConstructAttributeList()
2498 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage in ConstructAttributeList()
2499 // functions with -funique-internal-linkage-names. in ConstructAttributeList()
2502 if (!FD->isExternallyVisible()) in ConstructAttributeList()
2503 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy", in ConstructAttributeList()
2508 // Collect non-call-site function IR attributes from declaration-specific in ConstructAttributeList()
2511 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) in ConstructAttributeList()
2523 if (TargetDecl->hasAttr<DisableTailCallsAttr>() || in ConstructAttributeList()
2524 TargetDecl->hasAttr<AnyX86InterruptAttr>()) in ConstructAttributeList()
2529 if (!BD->doesNotEscape()) in ConstructAttributeList()
2536 FuncAttrs.addAttribute("disable-tail-calls", "true"); in ConstructAttributeList()
2553 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect && in ConstructAttributeList()
2593 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { in ConstructAttributeList()
2594 QualType PTy = RefTy->getPointeeType(); in ConstructAttributeList()
2595 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) in ConstructAttributeList()
2601 if (PTy->isObjectType()) { in ConstructAttributeList()
2638 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) { in ConstructAttributeList()
2646 FI.arg_begin()->type.getTypePtr()->getPointeeType(); in ConstructAttributeList()
2649 getTypes().getTargetAddressSpace(FI.arg_begin()->type) == 0) { in ConstructAttributeList()
2659 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) in ConstructAttributeList()
2676 QualType ParamType = I->type; in ConstructAttributeList()
2677 const ABIArgInfo &AI = I->info; in ConstructAttributeList()
2696 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we in ConstructAttributeList()
2723 auto *Decl = ParamType->getAsRecordDecl(); in ConstructAttributeList()
2725 Decl->getArgPassingRestrictions() == in ConstructAttributeList()
2774 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { in ConstructAttributeList()
2775 QualType PTy = RefTy->getPointeeType(); in ConstructAttributeList()
2776 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) in ConstructAttributeList()
2782 if (PTy->isObjectType()) { in ConstructAttributeList()
2793 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() && in ConstructAttributeList()
2794 ParamType->isPointerType()) { in ConstructAttributeList()
2795 QualType PTy = ParamType->getPointeeType(); in ConstructAttributeList()
2796 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { in ConstructAttributeList()
2810 if (!hasUsedSRet && RetTy->isVoidType()) { in ConstructAttributeList()
2819 auto PTy = ParamType->getPointeeType(); in ConstructAttributeList()
2820 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { in ConstructAttributeList()
2864 llvm::Type *varType = CGF.ConvertType(var->getType()); in emitArgumentDemotion()
2868 if (value->getType() == varType) return value; in emitArgumentDemotion()
2870 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) in emitArgumentDemotion()
2880 /// attribute), which declares argument ArgNo to be non-null.
2884 // - references to pointers, where the pointee is known to be in getNonNullAttr()
2886 // - transparent unions containing pointers in getNonNullAttr()
2890 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) in getNonNullAttr()
2894 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) in getNonNullAttr()
2900 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { in getNonNullAttr()
2901 if (NNAttr->isNonNull(ArgNo)) in getNonNullAttr()
2909 Address Temp; member
2911 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} in CopyBackSwiftError()
2913 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); in Emit()
2922 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) in EmitFunctionProlog()
2926 // If this is an implicit-return-zero function, go ahead and in EmitFunctionProlog()
2931 if (FD->hasImplicitReturnZero()) { in EmitFunctionProlog()
2932 QualType RetTy = FD->getReturnType().getUnqualifiedType(); in EmitFunctionProlog()
2943 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); in EmitFunctionProlog()
2949 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()), in EmitFunctionProlog()
2954 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo()); in EmitFunctionProlog()
2955 AI->setName("agg.result"); in EmitFunctionProlog()
2956 AI->addAttr(llvm::Attribute::NoAlias); in EmitFunctionProlog()
2976 const ABIArgInfo &ArgI = info_it->info; in EmitFunctionProlog()
2979 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); in EmitFunctionProlog()
2983 QualType Ty = isPromoted ? info_it->type : Arg->getType(); in EmitFunctionProlog()
2985 hasScalarEvaluationKind(Arg->getType())); in EmitFunctionProlog()
2995 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); in EmitFunctionProlog()
3007 Fn->getArg(FirstIRArg), Ty, ArgI.getIndirectAlign(), false, nullptr, in EmitFunctionProlog()
3035 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); in EmitFunctionProlog()
3046 auto AI = Fn->getArg(FirstIRArg); in EmitFunctionProlog()
3047 llvm::Type *LTy = ConvertType(Arg->getType()); in EmitFunctionProlog()
3052 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && in EmitFunctionProlog()
3053 ArgI.getCoerceToType()->isPointerTy()) { in EmitFunctionProlog()
3058 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), in EmitFunctionProlog()
3059 PVD->getFunctionScopeIndex()) && in EmitFunctionProlog()
3061 AI->addAttr(llvm::Attribute::NonNull); in EmitFunctionProlog()
3063 QualType OTy = PVD->getOriginalType(); in EmitFunctionProlog()
3070 if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) { in EmitFunctionProlog()
3071 QualType ETy = ArrTy->getElementType(); in EmitFunctionProlog()
3074 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment)); in EmitFunctionProlog()
3075 uint64_t ArrSize = ArrTy->getZExtSize(); in EmitFunctionProlog()
3076 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && in EmitFunctionProlog()
3082 AI->addAttrs(Attrs); in EmitFunctionProlog()
3086 AI->addAttr(llvm::Attribute::NonNull); in EmitFunctionProlog()
3094 if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) { in EmitFunctionProlog()
3095 QualType ETy = ArrTy->getElementType(); in EmitFunctionProlog()
3098 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment)); in EmitFunctionProlog()
3101 AI->addAttr(llvm::Attribute::NonNull); in EmitFunctionProlog()
3106 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); in EmitFunctionProlog()
3108 if (const auto *TOTy = OTy->getAs<TypedefType>()) in EmitFunctionProlog()
3109 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); in EmitFunctionProlog()
3111 // If alignment-assumption sanitizer is enabled, we do *not* add in EmitFunctionProlog()
3115 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment())); in EmitFunctionProlog()
3117 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment); in EmitFunctionProlog()
3118 if (AI->getParamAlign().valueOrOne() < AlignmentInt) { in EmitFunctionProlog()
3119 AI->removeAttr(llvm::Attribute::AttrKind::Alignment); in EmitFunctionProlog()
3120 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr( in EmitFunctionProlog()
3127 if (Arg->getType().isRestrictQualified()) in EmitFunctionProlog()
3128 AI->addAttr(llvm::Attribute::NoAlias); in EmitFunctionProlog()
3139 // ways. Copy the value into a less-restricted temporary. in EmitFunctionProlog()
3143 QualType pointeeTy = Ty->getPointeeType(); in EmitFunctionProlog()
3144 assert(pointeeTy->isPointerType()); in EmitFunctionProlog()
3145 RawAddress temp = in EmitFunctionProlog() local
3146 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); in EmitFunctionProlog()
3150 Builder.CreateStore(incomingErrorValue, temp); in EmitFunctionProlog()
3151 V = temp.getPointer(); in EmitFunctionProlog()
3156 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); in EmitFunctionProlog()
3160 if (V->getType() != ArgI.getCoerceToType()) in EmitFunctionProlog()
3170 llvm::Type *LTy = ConvertType(Arg->getType()); in EmitFunctionProlog()
3171 if (V->getType() != LTy) in EmitFunctionProlog()
3183 llvm::Value *Coerced = Fn->getArg(FirstIRArg); in EmitFunctionProlog()
3185 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) { in EmitFunctionProlog()
3188 if (VecTyFrom->getElementType()->isIntegerTy(1) && in EmitFunctionProlog()
3189 VecTyFrom->getElementCount().isKnownMultipleOf(8) && in EmitFunctionProlog()
3190 VecTyTo->getElementType() == Builder.getInt8Ty()) { in EmitFunctionProlog()
3192 VecTyTo->getElementType(), in EmitFunctionProlog()
3193 VecTyFrom->getElementCount().getKnownMinValue() / 8); in EmitFunctionProlog()
3196 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { in EmitFunctionProlog()
3200 Coerced->setName(Arg->getName() + ".coerce"); in EmitFunctionProlog()
3211 STy->getNumElements() > 1) { in EmitFunctionProlog()
3216 if (STy->containsHomogeneousScalableVectorTypes()) { in EmitFunctionProlog()
3218 "Only allow non-fractional movement of structure with" in EmitFunctionProlog()
3227 Arg->getName()); in EmitFunctionProlog()
3232 // Fast-isel and the optimizer generally like scalar values better than in EmitFunctionProlog()
3235 STy->getNumElements() > 1) { in EmitFunctionProlog()
3240 assert(STy->containsHomogeneousScalableVectorTypes() && in EmitFunctionProlog()
3244 "Only allow non-fractional movement of structure with" in EmitFunctionProlog()
3246 assert(STy->getNumElements() == NumIRArgs); in EmitFunctionProlog()
3249 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { in EmitFunctionProlog()
3250 auto *AI = Fn->getArg(FirstIRArg + i); in EmitFunctionProlog()
3251 AI->setName(Arg->getName() + ".coerce" + Twine(i)); in EmitFunctionProlog()
3269 assert(STy->getNumElements() == NumIRArgs); in EmitFunctionProlog()
3270 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { in EmitFunctionProlog()
3271 auto AI = Fn->getArg(FirstIRArg + i); in EmitFunctionProlog()
3272 AI->setName(Arg->getName() + ".coerce" + Twine(i)); in EmitFunctionProlog()
3284 auto AI = Fn->getArg(FirstIRArg); in EmitFunctionProlog()
3285 AI->setName(Arg->getName() + ".coerce"); in EmitFunctionProlog()
3289 getContext().getTypeSizeInChars(Ty).getQuantity() - in EmitFunctionProlog()
3297 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); in EmitFunctionProlog()
3316 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { in EmitFunctionProlog()
3317 llvm::Type *eltType = coercionType->getElementType(i); in EmitFunctionProlog()
3322 auto elt = Fn->getArg(argIndex++); in EmitFunctionProlog()
3337 auto FnArgIter = Fn->arg_begin() + FirstIRArg; in EmitFunctionProlog()
3339 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); in EmitFunctionProlog()
3341 auto AI = Fn->getArg(FirstIRArg + i); in EmitFunctionProlog()
3342 AI->setName(Arg->getName() + "." + Twine(i)); in EmitFunctionProlog()
3353 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); in EmitFunctionProlog()
3361 for (int I = Args.size() - 1; I >= 0; --I) in EmitFunctionProlog()
3370 while (insn->use_empty()) { in eraseUnusedBitCasts()
3375 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); in eraseUnusedBitCasts()
3376 bitcast->eraseFromParent(); in eraseUnusedBitCasts()
3385 if (BB->empty()) return nullptr; in tryEmitFusedAutoreleaseOfResult()
3386 if (&BB->back() != result) return nullptr; in tryEmitFusedAutoreleaseOfResult()
3388 llvm::Type *resultType = result->getType(); in tryEmitFusedAutoreleaseOfResult()
3400 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); in tryEmitFusedAutoreleaseOfResult()
3403 if (generator->getNextNode() != bitcast) in tryEmitFusedAutoreleaseOfResult()
3418 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { in tryEmitFusedAutoreleaseOfResult()
3420 } else if (call->getCalledOperand() == in tryEmitFusedAutoreleaseOfResult()
3430 llvm::Instruction *prev = call->getPrevNode(); in tryEmitFusedAutoreleaseOfResult()
3433 prev = prev->getPrevNode(); in tryEmitFusedAutoreleaseOfResult()
3437 assert(cast<llvm::CallInst>(prev)->getCalledOperand() == in tryEmitFusedAutoreleaseOfResult()
3445 result = call->getArgOperand(0); in tryEmitFusedAutoreleaseOfResult()
3451 if (!bitcast->hasOneUse()) break; in tryEmitFusedAutoreleaseOfResult()
3453 result = bitcast->getOperand(0); in tryEmitFusedAutoreleaseOfResult()
3458 I->eraseFromParent(); in tryEmitFusedAutoreleaseOfResult()
3475 const VarDecl *self = method->getSelfDecl(); in tryRemoveRetainOfSelf()
3476 if (!self->getType().isConstQualified()) return nullptr; in tryRemoveRetainOfSelf()
3481 if (!retainCall || retainCall->getCalledOperand() != in tryRemoveRetainOfSelf()
3486 llvm::Value *retainedValue = retainCall->getArgOperand(0); in tryRemoveRetainOfSelf()
3488 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); in tryRemoveRetainOfSelf()
3489 if (!load || load->isAtomic() || load->isVolatile() || in tryRemoveRetainOfSelf()
3490 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getBasePointer()) in tryRemoveRetainOfSelf()
3496 llvm::Type *resultType = result->getType(); in tryRemoveRetainOfSelf()
3498 assert(retainCall->use_empty()); in tryRemoveRetainOfSelf()
3499 retainCall->eraseFromParent(); in tryRemoveRetainOfSelf()
3517 // At -O0, try to emit a fused retain/autorelease. in emitAutoreleaseOfResult()
3525 /// Heuristically search for a dominating store to the return-value slot.
3533 ReturnValuePtr](llvm::User *U) -> llvm::StoreInst * { in findDominatingStoreToReturnValue()
3535 if (!SI || SI->getPointerOperand() != ReturnValuePtr || in findDominatingStoreToReturnValue()
3536 SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType()) in findDominatingStoreToReturnValue()
3538 // These aren't actually possible for non-coerced returns, and we in findDominatingStoreToReturnValue()
3539 // only care about non-coerced returns on this code path. in findDominatingStoreToReturnValue()
3541 assert(!SI->isAtomic() && in findDominatingStoreToReturnValue()
3542 (!SI->isVolatile() || CGF.currentFunctionUsesSEHTry())); in findDominatingStoreToReturnValue()
3545 // If there are multiple uses of the return-value slot, just check in findDominatingStoreToReturnValue()
3547 // happen with how we generate implicit-returns; it can also happen in findDominatingStoreToReturnValue()
3549 if (!ReturnValuePtr->hasOneUse()) { in findDominatingStoreToReturnValue()
3551 if (IP->empty()) return nullptr; in findDominatingStoreToReturnValue()
3555 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) { in findDominatingStoreToReturnValue()
3559 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end) in findDominatingStoreToReturnValue()
3567 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back()); in findDominatingStoreToReturnValue()
3570 // Now do a first-and-dirty dominance check: just walk up the in findDominatingStoreToReturnValue()
3571 // single-predecessors chain from the current insertion point. in findDominatingStoreToReturnValue()
3572 llvm::BasicBlock *StoreBB = store->getParent(); in findDominatingStoreToReturnValue()
3576 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor())) in findDominatingStoreToReturnValue()
3590 // Use little-endian layout, i.e.`Bits[0]` is the LSB.
3602 const uint64_t Used = (uint64_t(1) << CharWidth) - 1; in setBitRange()
3605 BitWidth -= CharWidth - BitOffset; in setBitRange()
3611 BitWidth -= CharWidth; in setBitRange()
3615 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; in setBitRange()
3648 const RecordDecl *RD = RTy->getDecl()->getDefinition(); in setUsedBits()
3653 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { in setUsedBits()
3656 if (F->isUnnamedBitField() || F->isZeroLengthBitField(Context) || in setUsedBits()
3657 F->getType()->isIncompleteArrayType()) in setUsedBits()
3660 if (F->isBitField()) { in setUsedBits()
3669 setUsedBits(CGM, F->getType(), in setUsedBits()
3697 if (const auto *RTy = QTy->getAs<RecordType>()) in setUsedBits()
3709 (uint64_t(1) << Context.getCharWidth()) - 1); in setUsedBits()
3724 Mask = (Mask << CharWidth) | *--P; in buildMultiCharMask()
3735 assert(Src->getType() == ITy); in EmitCMSEClearRecord()
3736 assert(ITy->getScalarSizeInBits() <= 64); in EmitCMSEClearRecord()
3741 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); in EmitCMSEClearRecord()
3758 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); in EmitCMSEClearRecord()
3763 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; in EmitCMSEClearRecord()
3766 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { in EmitCMSEClearRecord()
3787 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { in EmitFunctionEpilog()
3810 llvm::Function::arg_iterator EI = CurFn->arg_end(); in EmitFunctionEpilog()
3811 --EI; in EmitFunctionEpilog()
3816 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType(); in EmitFunctionEpilog()
3822 auto AI = CurFn->arg_begin(); in EmitFunctionEpilog()
3857 // The internal return value temp always will have pointer-to-return-type in EmitFunctionEpilog()
3868 RetDbgLoc = SI->getDebugLoc(); in EmitFunctionEpilog()
3869 // Get the stored value and nuke the now-dead store. in EmitFunctionEpilog()
3870 RV = SI->getValueOperand(); in EmitFunctionEpilog()
3871 SI->eraseFromParent(); in EmitFunctionEpilog()
3895 RT = FD->getReturnType(); in EmitFunctionEpilog()
3897 RT = MD->getReturnType(); in EmitFunctionEpilog()
3899 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); in EmitFunctionEpilog()
3905 RT->isObjCRetainableType()); in EmitFunctionEpilog()
3921 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { in EmitFunctionEpilog()
3922 auto coercedEltType = coercionType->getElementType(i); in EmitFunctionEpilog()
3935 // Otherwise, we need to make a first-class aggregate. in EmitFunctionEpilog()
3954 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { in EmitFunctionEpilog()
3958 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType()); in EmitFunctionEpilog()
3969 Ret->setDebugLoc(std::move(RetDbgLoc)); in EmitFunctionEpilog()
3979 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) in EmitReturnValueCheck()
3984 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); in EmitReturnValueCheck()
3996 AttrLoc = RetNNAttr->getLocation(); in EmitReturnValueCheck()
4001 if (auto *TSI = DD->getTypeSourceInfo()) in EmitReturnValueCheck()
4002 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) in EmitReturnValueCheck()
4037 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); in isInAllocaArgument()
4050 // this win32-specific alignment hack. in createPlaceholderSlot()
4065 // StartFunction converted the ABI-lowered parameter(s) into a in EmitDelegateCallArg()
4066 // local alloca. We need to turn that into an r-value suitable in EmitDelegateCallArg()
4070 QualType type = param->getType(); in EmitDelegateCallArg()
4072 // GetAddrOfLocalVar returns a pointer-to-pointer for references, in EmitDelegateCallArg()
4074 if (type->isReferenceType()) { in EmitDelegateCallArg()
4078 // entered by StartFunction doesn't cause an over-release. This isn't in EmitDelegateCallArg()
4079 // optimal -O0 code generation, but it should get cleaned up when in EmitDelegateCallArg()
4083 param->hasAttr<NSConsumedAttr>() && in EmitDelegateCallArg()
4084 type->isObjCRetainableType()) { in EmitDelegateCallArg()
4087 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); in EmitDelegateCallArg()
4092 // aggregate r-values are actually pointers to temporaries. in EmitDelegateCallArg()
4097 // Deactivate the cleanup for the callee-destructed param that was pushed. in EmitDelegateCallArg()
4098 if (type->isRecordType() && !CurFuncIsThunk && in EmitDelegateCallArg()
4099 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && in EmitDelegateCallArg()
4100 param->needsDestruction(getContext())) { in EmitDelegateCallArg()
4104 "cleanup for callee-destructed param not recorded"); in EmitDelegateCallArg()
4119 /// Emit the actual writing-back of a writeback.
4129 // If the argument wasn't provably non-null, we need to null check in emitWriteback()
4147 "icr.writeback-cast"); in emitWriteback()
4159 // Retain the new value. No need to block-copy here: the block's in emitWriteback()
4198 I.IsActiveIP->eraseFromParent(); in deactivateArgCleanupsBeforeCall()
4203 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) in maybeGetUnaryAddrOfOperand()
4204 if (uop->getOpcode() == UO_AddrOf) in maybeGetUnaryAddrOfOperand()
4205 return uop->getSubExpr(); in maybeGetUnaryAddrOfOperand()
4209 /// Emit an argument that's being passed call-by-writeback. That is,
4211 /// might be copy-initialized with the current value of the given
4217 // Make an optimistic effort to emit the address as an l-value. in emitWritebackArg()
4219 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { in emitWritebackArg()
4224 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); in emitWritebackArg()
4227 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); in emitWritebackArg()
4236 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); in emitWritebackArg()
4238 CGF.ConvertTypeForMem(CRE->getType()->getPointeeType()); in emitWritebackArg()
4243 CRE->getType()); in emitWritebackArg()
4248 Address temp = in emitWritebackArg() local
4249 CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp"); in emitWritebackArg()
4250 // Loading an l-value can introduce a cleanup if the l-value is __weak, in emitWritebackArg()
4251 // and that cleanup will be conditional if we can't prove that the l-value in emitWritebackArg()
4256 // Zero-initialize it if we're not doing a copy-initialization. in emitWritebackArg()
4257 bool shouldCopy = CRE->shouldCopy(); in emitWritebackArg()
4261 CGF.Builder.CreateStore(null, temp); in emitWritebackArg()
4267 // If the address is *not* known to be non-null, we need to switch. in emitWritebackArg()
4273 finalArgument = temp.emitRawPointer(CGF); in emitWritebackArg()
4279 temp.emitRawPointer(CGF), "icr.argument"); in emitWritebackArg()
4303 // Use an ordinary store, not a store-to-lvalue. in emitWritebackArg()
4304 CGF.Builder.CreateStore(src, temp); in emitWritebackArg()
4310 // and so otherwise we can violate the high-level semantics. in emitWritebackArg()
4324 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, in emitWritebackArg()
4325 "icr.to-use"); in emitWritebackArg()
4326 phiToUse->addIncoming(valueToUse, copyBB); in emitWritebackArg()
4327 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), in emitWritebackArg()
4335 args.addWriteback(srcLV, temp, valueToUse); in emitWritebackArg()
4336 args.add(RValue::get(finalArgument), CRE->getType()); in emitWritebackArg()
4363 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; in EmitNonNullArgCheck()
4372 !PVD->getType()->isRecordType()) { in EmitNonNullArgCheck()
4373 auto Nullability = PVD->getType()->getNullability(); in EmitNonNullArgCheck()
4376 PVD->getTypeSourceInfo(); in EmitNonNullArgCheck()
4386 AttrLoc = NNAttr->getLocation(); in EmitNonNullArgCheck()
4390 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); in EmitNonNullArgCheck()
4419 // The Swift calling conventions don't go through the target-specific in hasInAllocaArgs()
4433 // Determine whether the given argument is an Objective-C method
4436 const DeclContext *dc = method->getDeclContext(); in isObjCMethodWithTypeParams()
4438 return classDecl->getTypeParamListAsWritten(); in isObjCMethodWithTypeParams()
4442 return catDecl->getTypeParamList(); in isObjCMethodWithTypeParams()
4449 /// EmitCallArgs - Emit call arguments for a function.
4461 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would in EmitCallArgs()
4471 IsVariadic = MD->isVariadic(); in EmitCallArgs()
4474 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip, in EmitCallArgs()
4475 MD->param_type_end()); in EmitCallArgs()
4478 IsVariadic = FPT->isVariadic(); in EmitCallArgs()
4479 ExplicitCC = FPT->getExtInfo().getCC(); in EmitCallArgs()
4480 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, in EmitCallArgs()
4481 FPT->param_type_end()); in EmitCallArgs()
4491 (isGenericMethod || Ty->isVariablyModifiedType() || in EmitCallArgs()
4492 Ty.getNonReferenceType()->isObjCRetainableType() || in EmitCallArgs()
4496 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && in EmitCallArgs()
4504 "Extra arguments in non-variadic function!"); in EmitCallArgs()
4510 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); in EmitCallArgs()
4511 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); in EmitCallArgs()
4515 // case, there are certain language constructs that require left-to-right in EmitCallArgs()
4527 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); in EmitCallArgs()
4535 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, in EmitCallArgs()
4537 PS->isDynamic()); in EmitCallArgs()
4542 std::swap(Args.back(), *(&Args.back() - 1)); in EmitCallArgs()
4555 unsigned Idx = LeftToRight ? I : E - I - 1; in EmitCallArgs()
4561 getContext().hasSameUnqualifiedType((*Arg)->getType(), in EmitCallArgs()
4573 // non-null argument check for r-value only. in EmitCallArgs()
4576 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, in EmitCallArgs()
4578 // @llvm.objectsize should never have side-effects and shouldn't need in EmitCallArgs()
4580 // regardless of right-to-leftness in EmitCallArgs()
4586 // Un-reverse the arguments we just evaluated so they match up with the LLVM in EmitCallArgs()
4604 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); in Emit()
4605 assert(!Dtor->isTrivial()); in Emit()
4665 assert(type->isReferenceType() == E->isGLValue() && in EmitCallArg()
4666 "reference binding to unmaterialized r-value!"); in EmitCallArg()
4668 if (E->isGLValue()) { in EmitCallArg()
4669 assert(E->getObjectKind() == OK_Ordinary); in EmitCallArg()
4676 // However, we still have to push an EH-only cleanup in case we unwind before in EmitCallArg()
4678 if (type->isRecordType() && in EmitCallArg()
4679 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { in EmitCallArg()
4686 if (const auto *RD = type->getAsCXXRecordDecl()) in EmitCallArg()
4687 DestroyedInCallee = RD->hasNonTrivialDestructor(); in EmitCallArg()
4699 // Create a no-op GEP between the placeholder and the cleanup so we can in EmitCallArg()
4713 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue && in EmitCallArg()
4714 !type->isArrayParameterType()) { in EmitCallArg()
4715 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); in EmitCallArg()
4727 // functions to pointer-sized ints. in getVarArgType()
4729 return Arg->getType(); in getVarArgType()
4731 if (Arg->getType()->isIntegerType() && in getVarArgType()
4732 getContext().getTypeSize(Arg->getType()) < in getVarArgType()
4734 Arg->isNullPointerConstant(getContext(), in getVarArgType()
4739 return Arg->getType(); in getVarArgType()
4748 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", in AddObjCARCExceptionMetadata()
4752 /// Emits a call to the given no-arguments nounwind runtime function.
4775 call->setDoesNotThrow(); in EmitNounwindRuntimeCall()
4779 /// Emits a simple call (never an invoke) to the given no-arguments
4797 if (auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) { in getBundlesForFunclet()
4798 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) { in getBundlesForFunclet()
4799 auto IID = CalleeFn->getIntrinsicID(); in getBundlesForFunclet()
4816 call->setCallingConv(getRuntimeCC()); in EmitRuntimeCall()
4818 if (CGM.shouldEmitConvergenceTokens() && call->isConvergent()) in EmitRuntimeCall()
4836 invoke->setDoesNotReturn(); in EmitNoreturnRuntimeCallOrInvoke()
4837 invoke->setCallingConv(getRuntimeCC()); in EmitNoreturnRuntimeCallOrInvoke()
4840 call->setDoesNotReturn(); in EmitNoreturnRuntimeCallOrInvoke()
4841 call->setCallingConv(getRuntimeCC()); in EmitNoreturnRuntimeCallOrInvoke()
4859 call->setCallingConv(getRuntimeCC()); in EmitRuntimeCallOrInvoke()
4899 /// such attribute already exists, re-set it to the maximal one of two options.
4926 AA = FuncDecl->getAttr<AlignedAttrTy>(); in AbstractAssumeAlignedAttrEmitter()
4938 // We may legitimately have non-power-of-2 alignment here. in TryEmitAsCallSiteAttribute()
4940 if (!AlignmentCI->getValue().isPowerOf2()) in TryEmitAsCallSiteAttribute()
4945 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); in TryEmitAsCallSiteAttribute()
4957 AA->getLocation(), Alignment, OffsetCI); in EmitAsAnAssumption()
4971 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); in AssumeAlignedAttrEmitter()
4972 if (Expr *Offset = AA->getOffset()) { in AssumeAlignedAttrEmitter()
4974 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. in AssumeAlignedAttrEmitter()
4990 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] in AllocAlignAttrEmitter()
5000 return VT->getPrimitiveSizeInBits().getKnownMinValue(); in getMaxVectorWidth()
5002 return getMaxVectorWidth(AT->getElementType()); in getMaxVectorWidth()
5006 for (auto *I : ST->elements()) in getMaxVectorWidth()
5022 // Handle struct-return functions by passing a pointer to the in EmitCall()
5037 if (TargetDecl->hasAttr<AlwaysInlineAttr>() && in EmitCall()
5038 (TargetDecl->hasAttr<TargetAttr>() || in EmitCall()
5039 (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>()))) in EmitCall()
5043 // Some architectures (such as x86-64) have the ABI changed based on in EmitCall()
5044 // attribute-target/features. Give them a chance to diagnose. in EmitCall()
5059 IP = IP->getNextNode(); in EmitCall()
5066 AI->setAlignment(Align.getAsAlign()); in EmitCall()
5067 AI->setUsedWithInAlloca(true); in EmitCall()
5068 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); in EmitCall()
5082 SRetPtr = makeNaturalAddressForPointer(CurFn->arg_begin() + in EmitCall()
5120 const ABIArgInfo &ArgInfo = info_it->info; in EmitCall()
5137 if (I->isAggregate()) { in EmitCall()
5138 RawAddress Addr = I->hasLValue() in EmitCall()
5139 ? I->getKnownLValue().getAddress() in EmitCall()
5140 : I->getKnownRValue().getAggregateAddress(); in EmitCall()
5155 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); in EmitCall()
5165 I->Ty, getContext().getTypeAlignInChars(I->Ty), in EmitCall()
5166 "indirect-arg-temp"); in EmitCall()
5167 I->copyInto(*this, Addr); in EmitCall()
5175 Addr = Addr.withElementType(ConvertTypeForMem(I->Ty)); in EmitCall()
5176 I->copyInto(*this, Addr); in EmitCall()
5184 if (I->isAggregate()) { in EmitCall()
5193 Address Addr = I->hasLValue() in EmitCall()
5194 ? I->getKnownLValue().getAddress() in EmitCall()
5195 : I->getKnownRValue().getAggregateAddress(); in EmitCall()
5199 assert((FirstIRArg >= IRFuncTy->getNumParams() || in EmitCall()
5200 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == in EmitCall()
5201 TD->getAllocaAddrSpace()) && in EmitCall()
5210 } else if (I->hasLValue()) { in EmitCall()
5211 auto LV = I->getKnownLValue(); in EmitCall()
5218 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { in EmitCall()
5231 Addr.getType()->getAddressSpace() != IRFuncTy-> in EmitCall()
5232 getParamType(FirstIRArg)->getPointerAddressSpace())) { in EmitCall()
5239 llvm::Value *V = getAsNaturalPointerTo(Addr, I->Ty); in EmitCall()
5253 // For non-aggregate args and aggregate args meeting conditions above in EmitCall()
5256 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); in EmitCall()
5257 llvm::Value *Val = getAsNaturalPointerTo(AI, I->Ty); in EmitCall()
5273 I->copyInto(*this, AI); in EmitCall()
5284 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && in EmitCall()
5288 if (!I->isAggregate()) in EmitCall()
5289 V = I->getKnownRValue().getScalarVal(); in EmitCall()
5292 I->hasLValue() ? I->getKnownLValue().getAddress() in EmitCall()
5293 : I->getKnownRValue().getAggregateAddress()); in EmitCall()
5301 QualType pointeeTy = I->Ty->getPointeeType(); in EmitCall()
5306 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); in EmitCall()
5308 cast<llvm::AllocaInst>(V)->setSwiftError(true); in EmitCall()
5315 if (ArgInfo.getCoerceToType() != V->getType() && in EmitCall()
5316 V->getType()->isIntegerTy()) in EmitCall()
5321 if (FirstIRArg < IRFuncTy->getNumParams() && in EmitCall()
5322 V->getType() != IRFuncTy->getParamType(FirstIRArg)) in EmitCall()
5323 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); in EmitCall()
5334 llvm::Type *SrcTy = ConvertTypeForMem(I->Ty); in EmitCall()
5339 if (STy->containsHomogeneousScalableVectorTypes()) { in EmitCall()
5341 "Only allow non-fractional movement of structure with " in EmitCall()
5344 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal(); in EmitCall()
5351 if (!I->isAggregate()) { in EmitCall()
5352 Src = CreateMemTemp(I->Ty, "coerce"); in EmitCall()
5353 I->copyInto(*this, Src); in EmitCall()
5355 Src = I->hasLValue() ? I->getKnownLValue().getAddress() in EmitCall()
5356 : I->getKnownRValue().getAggregateAddress(); in EmitCall()
5362 // Fast-isel and the optimizer generally like scalar values better than in EmitCall()
5370 assert(STy->containsHomogeneousScalableVectorTypes() && in EmitCall()
5374 "Only allow non-fractional movement of structure with " in EmitCall()
5376 assert(NumIRArgs == STy->getNumElements()); in EmitCall()
5380 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { in EmitCall()
5390 // coerce-to logic, copy the source value into a temp alloca the size in EmitCall()
5402 assert(NumIRArgs == STy->getNumElements()); in EmitCall()
5403 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { in EmitCall()
5421 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType()); in EmitCall()
5422 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType())) in EmitCall()
5423 Load = EmitCMSEClearRecord(Load, ATy, I->Ty); in EmitCall()
5441 if (I->isAggregate()) { in EmitCall()
5442 addr = I->hasLValue() ? I->getKnownLValue().getAddress() in EmitCall()
5443 : I->getKnownRValue().getAggregateAddress(); in EmitCall()
5446 RValue RV = I->getKnownRValue(); in EmitCall()
5449 llvm::Type *scalarType = RV.getScalarVal()->getType(); in EmitCall()
5455 RV.getScalarVal()->getType(), in EmitCall()
5456 CharUnits::fromQuantity(std::max(layout->getAlignment(), scalarAlign)), in EmitCall()
5467 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { in EmitCall()
5468 llvm::Type *eltType = coercionType->getElementType(i); in EmitCall()
5487 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); in EmitCall()
5506 // If the callee is a bitcast of a non-variadic function to have a in EmitCall()
5511 // can inline the function at -O0 if it is marked always_inline. in EmitCall()
5513 llvm::Value *Ptr) -> llvm::Function * { in EmitCall()
5514 if (!CalleeFT->isVarArg()) in EmitCall()
5519 if (CE->getOpcode() == llvm::Instruction::BitCast) in EmitCall()
5520 Ptr = CE->getOperand(0); in EmitCall()
5527 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); in EmitCall()
5531 if (OrigFT->isVarArg() || in EmitCall()
5532 OrigFT->getNumParams() != CalleeFT->getNumParams() || in EmitCall()
5533 OrigFT->getReturnType() != CalleeFT->getReturnType()) in EmitCall()
5536 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) in EmitCall()
5537 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) in EmitCall()
5545 IRFuncTy = OrigFn->getFunctionType(); in EmitCall()
5560 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); in EmitCall()
5566 if (i < IRFuncTy->getNumParams()) in EmitCall()
5567 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); in EmitCall()
5574 getMaxVectorWidth(IRCallArgs[i]->getType())); in EmitCall()
5579 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, in EmitCall()
5591 if (FD->hasAttr<StrictFPAttr>()) in EmitCall()
5595 // If -ffast-math is enabled and the function is guarded by an in EmitCall()
5598 if (FD->hasAttr<OptimizeNoneAttr>() && getLangOpts().FastMath) in EmitCall()
5599 CGM.AdjustMemoryAttribute(CalleePtr->getName(), Callee.getAbstractInfo(), in EmitCall()
5602 // Add call-site nomerge attribute if exists. in EmitCall()
5606 // Add call-site noinline attribute if exists. in EmitCall()
5610 // Add call-site always_inline attribute if exists. in EmitCall()
5615 // Apply some call-site-specific attributes. in EmitCall()
5620 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && in EmitCall()
5622 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { in EmitCall()
5648 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind)) in EmitCall()
5669 // Add the pointer-authentication bundle. in EmitCall()
5673 if (FD->hasAttr<StrictFPAttr>()) in EmitCall()
5693 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() && in EmitCall()
5694 CI->getCalledFunction()->getName().starts_with("_Z4sqrt")) { in EmitCall()
5704 if (const auto *A = FD->getAttr<CFGuardAttr>()) { in EmitCall()
5705 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) in EmitCall()
5711 CI->setAttributes(Attrs); in EmitCall()
5712 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); in EmitCall()
5716 if (!CI->getType()->isVoidTy()) in EmitCall()
5717 CI->setName("call"); in EmitCall()
5719 if (CGM.shouldEmitConvergenceTokens() && CI->isConvergent()) in EmitCall()
5724 std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType())); in EmitCall()
5729 if (!CI->getCalledFunction()) in EmitCall()
5740 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) in EmitCall()
5741 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); in EmitCall()
5746 else if (!getTarget().hasFeature("pcrelative-memops")) { in EmitCall()
5749 else if (Call->isIndirectCall()) in EmitCall()
5752 if (!cast<FunctionDecl>(TargetDecl)->isDefined()) in EmitCall()
5769 Call->setTailCallKind(llvm::CallInst::TCK_MustTail); in EmitCall()
5775 TargetDecl->hasAttr<MSAllocatorAttr>()) in EmitCall()
5776 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc); in EmitCall()
5779 if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) { in EmitCall()
5784 CI->setMetadata("srcloc", MDT); in EmitCall()
5792 if (CI->doesNotReturn()) { in EmitCall()
5800 if (auto *F = CI->getCalledFunction()) in EmitCall()
5801 F->removeFnAttr(llvm::Attribute::NoReturn); in EmitCall()
5802 CI->removeFnAttr(llvm::Attribute::NoReturn); in EmitCall()
5836 if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn())) in EmitCall()
5839 if (CI->getType()->isVoidTy()) in EmitCall()
5854 // Emit any call-associated writebacks immediately. Arguably this in EmitCall()
5855 // should happen after any return-value munging. in EmitCall()
5878 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); in EmitCall()
5879 bool requiresExtract = isa<llvm::StructType>(CI->getType()); in EmitCall()
5882 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { in EmitCall()
5883 llvm::Type *eltType = coercionType->getElementType(i); in EmitCall()
5927 if (V->getType() != RetIRTy) in EmitCall()
5940 dyn_cast<llvm::ScalableVectorType>(V->getType())) { in EmitCall()
5941 if (FixedDstTy->getElementType() == in EmitCall()
5942 ScalableSrcTy->getElementType()) { in EmitCall()
5963 // no_unique_address); omit the store for such types - as there is no in EmitCall()
5970 llvm::TypeSize::getFixed(DestSize - RetAI.getDirectOffset()), in EmitCall()
5992 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though in EmitCall()
6010 CE ? CE->getBeginLoc() : SourceLocation()); in prepareConcreteCallee()
6020 VAListAddr = VE->isMicrosoftABI() ? EmitMSVAListRef(VE->getSubExpr()) in EmitVAArg()
6021 : EmitVAListRef(VE->getSubExpr()); in EmitVAArg()
6022 QualType Ty = VE->getType(); in EmitVAArg()
6023 if (VE->isMicrosoftABI()) in EmitVAArg()