1 //===- ARM.cpp ------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ABIInfoImpl.h" 10 #include "TargetInfo.h" 11 12 using namespace clang; 13 using namespace clang::CodeGen; 14 15 //===----------------------------------------------------------------------===// 16 // ARM ABI Implementation 17 //===----------------------------------------------------------------------===// 18 19 namespace { 20 21 class ARMABIInfo : public ABIInfo { 22 ARMABIKind Kind; 23 bool IsFloatABISoftFP; 24 25 public: 26 ARMABIInfo(CodeGenTypes &CGT, ARMABIKind Kind) : ABIInfo(CGT), Kind(Kind) { 27 setCCs(); 28 IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" || 29 CGT.getCodeGenOpts().FloatABI == ""; // default 30 } 31 32 bool isEABI() const { 33 switch (getTarget().getTriple().getEnvironment()) { 34 case llvm::Triple::Android: 35 case llvm::Triple::EABI: 36 case llvm::Triple::EABIHF: 37 case llvm::Triple::GNUEABI: 38 case llvm::Triple::GNUEABIT64: 39 case llvm::Triple::GNUEABIHF: 40 case llvm::Triple::GNUEABIHFT64: 41 case llvm::Triple::MuslEABI: 42 case llvm::Triple::MuslEABIHF: 43 return true; 44 default: 45 return getTarget().getTriple().isOHOSFamily(); 46 } 47 } 48 49 bool isEABIHF() const { 50 switch (getTarget().getTriple().getEnvironment()) { 51 case llvm::Triple::EABIHF: 52 case llvm::Triple::GNUEABIHF: 53 case llvm::Triple::GNUEABIHFT64: 54 case llvm::Triple::MuslEABIHF: 55 return true; 56 default: 57 return false; 58 } 59 } 60 61 ARMABIKind getABIKind() const { return Kind; } 62 63 bool allowBFloatArgsAndRet() const override { 64 return !IsFloatABISoftFP && getTarget().hasBFloat16Type(); 65 } 66 67 private: 68 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic, 69 unsigned functionCallConv) const; 70 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic, 71 unsigned functionCallConv) const; 72 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base, 73 uint64_t Members) const; 74 ABIArgInfo coerceIllegalVector(QualType Ty) const; 75 bool isIllegalVectorType(QualType Ty) const; 76 bool containsAnyFP16Vectors(QualType Ty) const; 77 78 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 79 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 80 uint64_t Members) const override; 81 bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override; 82 83 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const; 84 85 void computeInfo(CGFunctionInfo &FI) const override; 86 87 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, 88 AggValueSlot Slot) const override; 89 90 llvm::CallingConv::ID getLLVMDefaultCC() const; 91 llvm::CallingConv::ID getABIDefaultCC() const; 92 void setCCs(); 93 }; 94 95 class ARMSwiftABIInfo : public SwiftABIInfo { 96 public: 97 explicit ARMSwiftABIInfo(CodeGenTypes &CGT) 98 : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {} 99 100 bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, 101 unsigned NumElts) const override; 102 }; 103 104 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 105 public: 106 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K) 107 : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) { 108 SwiftInfo = std::make_unique<ARMSwiftABIInfo>(CGT); 109 } 110 111 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 112 return 13; 113 } 114 115 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 116 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue"; 117 } 118 119 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 120 llvm::Value *Address) const override { 121 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 122 123 // 0-15 are the 16 integer registers. 124 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 125 return false; 126 } 127 128 unsigned getSizeOfUnwindException() const override { 129 if (getABIInfo<ARMABIInfo>().isEABI()) 130 return 88; 131 return TargetCodeGenInfo::getSizeOfUnwindException(); 132 } 133 134 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 135 CodeGen::CodeGenModule &CGM) const override { 136 if (GV->isDeclaration()) 137 return; 138 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 139 if (!FD) 140 return; 141 auto *Fn = cast<llvm::Function>(GV); 142 143 if (const auto *TA = FD->getAttr<TargetAttr>()) { 144 ParsedTargetAttr Attr = 145 CGM.getTarget().parseTargetAttr(TA->getFeaturesStr()); 146 if (!Attr.BranchProtection.empty()) { 147 TargetInfo::BranchProtectionInfo BPI{}; 148 StringRef DiagMsg; 149 StringRef Arch = 150 Attr.CPU.empty() ? CGM.getTarget().getTargetOpts().CPU : Attr.CPU; 151 if (!CGM.getTarget().validateBranchProtection(Attr.BranchProtection, 152 Arch, BPI, DiagMsg)) { 153 CGM.getDiags().Report( 154 D->getLocation(), 155 diag::warn_target_unsupported_branch_protection_attribute) 156 << Arch; 157 } else 158 setBranchProtectionFnAttributes(BPI, (*Fn)); 159 } else if (CGM.getLangOpts().BranchTargetEnforcement || 160 CGM.getLangOpts().hasSignReturnAddress()) { 161 // If the Branch Protection attribute is missing, validate the target 162 // Architecture attribute against Branch Protection command line 163 // settings. 164 if (!CGM.getTarget().isBranchProtectionSupportedArch(Attr.CPU)) 165 CGM.getDiags().Report( 166 D->getLocation(), 167 diag::warn_target_unsupported_branch_protection_attribute) 168 << Attr.CPU; 169 } 170 } else if (CGM.getTarget().isBranchProtectionSupportedArch( 171 CGM.getTarget().getTargetOpts().CPU)) { 172 TargetInfo::BranchProtectionInfo BPI(CGM.getLangOpts()); 173 setBranchProtectionFnAttributes(BPI, (*Fn)); 174 } 175 176 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 177 if (!Attr) 178 return; 179 180 const char *Kind; 181 switch (Attr->getInterrupt()) { 182 case ARMInterruptAttr::Generic: Kind = ""; break; 183 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 184 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 185 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 186 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 187 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 188 } 189 190 Fn->addFnAttr("interrupt", Kind); 191 192 ARMABIKind ABI = getABIInfo<ARMABIInfo>().getABIKind(); 193 if (ABI == ARMABIKind::APCS) 194 return; 195 196 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 197 // however this is not necessarily true on taking any interrupt. Instruct 198 // the backend to perform a realignment as part of the function prologue. 199 llvm::AttrBuilder B(Fn->getContext()); 200 B.addStackAlignmentAttr(8); 201 Fn->addFnAttrs(B); 202 } 203 }; 204 205 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { 206 public: 207 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K) 208 : ARMTargetCodeGenInfo(CGT, K) {} 209 210 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 211 CodeGen::CodeGenModule &CGM) const override; 212 213 void getDependentLibraryOption(llvm::StringRef Lib, 214 llvm::SmallString<24> &Opt) const override { 215 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 216 } 217 218 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 219 llvm::SmallString<32> &Opt) const override { 220 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 221 } 222 }; 223 224 void WindowsARMTargetCodeGenInfo::setTargetAttributes( 225 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 226 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 227 if (GV->isDeclaration()) 228 return; 229 addStackProbeTargetAttributes(D, GV, CGM); 230 } 231 } 232 233 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 234 if (!::classifyReturnType(getCXXABI(), FI, *this)) 235 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(), 236 FI.getCallingConvention()); 237 238 for (auto &I : FI.arguments()) 239 I.info = classifyArgumentType(I.type, FI.isVariadic(), 240 FI.getCallingConvention()); 241 242 243 // Always honor user-specified calling convention. 244 if (FI.getCallingConvention() != llvm::CallingConv::C) 245 return; 246 247 llvm::CallingConv::ID cc = getRuntimeCC(); 248 if (cc != llvm::CallingConv::C) 249 FI.setEffectiveCallingConvention(cc); 250 } 251 252 /// Return the default calling convention that LLVM will use. 253 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 254 // The default calling convention that LLVM will infer. 255 if (isEABIHF() || getTarget().getTriple().isWatchABI()) 256 return llvm::CallingConv::ARM_AAPCS_VFP; 257 else if (isEABI()) 258 return llvm::CallingConv::ARM_AAPCS; 259 else 260 return llvm::CallingConv::ARM_APCS; 261 } 262 263 /// Return the calling convention that our ABI would like us to use 264 /// as the C calling convention. 265 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 266 switch (getABIKind()) { 267 case ARMABIKind::APCS: 268 return llvm::CallingConv::ARM_APCS; 269 case ARMABIKind::AAPCS: 270 return llvm::CallingConv::ARM_AAPCS; 271 case ARMABIKind::AAPCS_VFP: 272 return llvm::CallingConv::ARM_AAPCS_VFP; 273 case ARMABIKind::AAPCS16_VFP: 274 return llvm::CallingConv::ARM_AAPCS_VFP; 275 } 276 llvm_unreachable("bad ABI kind"); 277 } 278 279 void ARMABIInfo::setCCs() { 280 assert(getRuntimeCC() == llvm::CallingConv::C); 281 282 // Don't muddy up the IR with a ton of explicit annotations if 283 // they'd just match what LLVM will infer from the triple. 284 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 285 if (abiCC != getLLVMDefaultCC()) 286 RuntimeCC = abiCC; 287 } 288 289 ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const { 290 uint64_t Size = getContext().getTypeSize(Ty); 291 if (Size <= 32) { 292 llvm::Type *ResType = 293 llvm::Type::getInt32Ty(getVMContext()); 294 return ABIArgInfo::getDirect(ResType); 295 } 296 if (Size == 64 || Size == 128) { 297 auto *ResType = llvm::FixedVectorType::get( 298 llvm::Type::getInt32Ty(getVMContext()), Size / 32); 299 return ABIArgInfo::getDirect(ResType); 300 } 301 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 302 } 303 304 ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty, 305 const Type *Base, 306 uint64_t Members) const { 307 assert(Base && "Base class should be set for homogeneous aggregate"); 308 // Base can be a floating-point or a vector. 309 if (const VectorType *VT = Base->getAs<VectorType>()) { 310 // FP16 vectors should be converted to integer vectors 311 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) { 312 uint64_t Size = getContext().getTypeSize(VT); 313 auto *NewVecTy = llvm::FixedVectorType::get( 314 llvm::Type::getInt32Ty(getVMContext()), Size / 32); 315 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members); 316 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 317 } 318 } 319 unsigned Align = 0; 320 if (getABIKind() == ARMABIKind::AAPCS || 321 getABIKind() == ARMABIKind::AAPCS_VFP) { 322 // For alignment adjusted HFAs, cap the argument alignment to 8, leave it 323 // default otherwise. 324 Align = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); 325 unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity(); 326 Align = (Align > BaseAlign && Align >= 8) ? 8 : 0; 327 } 328 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false, Align); 329 } 330 331 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, 332 unsigned functionCallConv) const { 333 // 6.1.2.1 The following argument types are VFP CPRCs: 334 // A single-precision floating-point type (including promoted 335 // half-precision types); A double-precision floating-point type; 336 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 337 // with a Base Type of a single- or double-precision floating-point type, 338 // 64-bit containerized vectors or 128-bit containerized vectors with one 339 // to four Elements. 340 // Variadic functions should always marshal to the base standard. 341 bool IsAAPCS_VFP = 342 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false); 343 344 Ty = useFirstFieldIfTransparentUnion(Ty); 345 346 // Handle illegal vector types here. 347 if (isIllegalVectorType(Ty)) 348 return coerceIllegalVector(Ty); 349 350 if (!isAggregateTypeForABI(Ty)) { 351 // Treat an enum type as its underlying type. 352 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 353 Ty = EnumTy->getDecl()->getIntegerType(); 354 } 355 356 if (const auto *EIT = Ty->getAs<BitIntType>()) 357 if (EIT->getNumBits() > 64) 358 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 359 360 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 361 : ABIArgInfo::getDirect()); 362 } 363 364 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 365 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 366 } 367 368 // Ignore empty records. 369 if (isEmptyRecord(getContext(), Ty, true)) 370 return ABIArgInfo::getIgnore(); 371 372 if (IsAAPCS_VFP) { 373 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 374 // into VFP registers. 375 const Type *Base = nullptr; 376 uint64_t Members = 0; 377 if (isHomogeneousAggregate(Ty, Base, Members)) 378 return classifyHomogeneousAggregate(Ty, Base, Members); 379 } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) { 380 // WatchOS does have homogeneous aggregates. Note that we intentionally use 381 // this convention even for a variadic function: the backend will use GPRs 382 // if needed. 383 const Type *Base = nullptr; 384 uint64_t Members = 0; 385 if (isHomogeneousAggregate(Ty, Base, Members)) { 386 assert(Base && Members <= 4 && "unexpected homogeneous aggregate"); 387 llvm::Type *Ty = 388 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); 389 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 390 } 391 } 392 393 if (getABIKind() == ARMABIKind::AAPCS16_VFP && 394 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) { 395 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're 396 // bigger than 128-bits, they get placed in space allocated by the caller, 397 // and a pointer is passed. 398 return ABIArgInfo::getIndirect( 399 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false); 400 } 401 402 // Support byval for ARM. 403 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 404 // most 8-byte. We realign the indirect argument if type alignment is bigger 405 // than ABI alignment. 406 uint64_t ABIAlign = 4; 407 uint64_t TyAlign; 408 if (getABIKind() == ARMABIKind::AAPCS_VFP || 409 getABIKind() == ARMABIKind::AAPCS) { 410 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); 411 ABIAlign = std::clamp(TyAlign, (uint64_t)4, (uint64_t)8); 412 } else { 413 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 414 } 415 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 416 assert(getABIKind() != ARMABIKind::AAPCS16_VFP && "unexpected byval"); 417 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 418 /*ByVal=*/true, 419 /*Realign=*/TyAlign > ABIAlign); 420 } 421 422 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of 423 // same size and alignment. 424 if (getTarget().isRenderScriptTarget()) { 425 return coerceToIntArray(Ty, getContext(), getVMContext()); 426 } 427 428 // Otherwise, pass by coercing to a structure of the appropriate size. 429 llvm::Type* ElemTy; 430 unsigned SizeRegs; 431 // FIXME: Try to match the types of the arguments more accurately where 432 // we can. 433 if (TyAlign <= 4) { 434 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 435 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 436 } else { 437 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 438 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 439 } 440 441 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 442 } 443 444 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 445 llvm::LLVMContext &VMContext) { 446 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 447 // is called integer-like if its size is less than or equal to one word, and 448 // the offset of each of its addressable sub-fields is zero. 449 450 uint64_t Size = Context.getTypeSize(Ty); 451 452 // Check that the type fits in a word. 453 if (Size > 32) 454 return false; 455 456 // FIXME: Handle vector types! 457 if (Ty->isVectorType()) 458 return false; 459 460 // Float types are never treated as "integer like". 461 if (Ty->isRealFloatingType()) 462 return false; 463 464 // If this is a builtin or pointer type then it is ok. 465 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 466 return true; 467 468 // Small complex integer types are "integer like". 469 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 470 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 471 472 // Single element and zero sized arrays should be allowed, by the definition 473 // above, but they are not. 474 475 // Otherwise, it must be a record type. 476 const RecordType *RT = Ty->getAs<RecordType>(); 477 if (!RT) return false; 478 479 // Ignore records with flexible arrays. 480 const RecordDecl *RD = RT->getDecl(); 481 if (RD->hasFlexibleArrayMember()) 482 return false; 483 484 // Check that all sub-fields are at offset 0, and are themselves "integer 485 // like". 486 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 487 488 bool HadField = false; 489 unsigned idx = 0; 490 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 491 i != e; ++i, ++idx) { 492 const FieldDecl *FD = *i; 493 494 // Bit-fields are not addressable, we only need to verify they are "integer 495 // like". We still have to disallow a subsequent non-bitfield, for example: 496 // struct { int : 0; int x } 497 // is non-integer like according to gcc. 498 if (FD->isBitField()) { 499 if (!RD->isUnion()) 500 HadField = true; 501 502 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 503 return false; 504 505 continue; 506 } 507 508 // Check if this field is at offset 0. 509 if (Layout.getFieldOffset(idx) != 0) 510 return false; 511 512 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 513 return false; 514 515 // Only allow at most one field in a structure. This doesn't match the 516 // wording above, but follows gcc in situations with a field following an 517 // empty structure. 518 if (!RD->isUnion()) { 519 if (HadField) 520 return false; 521 522 HadField = true; 523 } 524 } 525 526 return true; 527 } 528 529 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic, 530 unsigned functionCallConv) const { 531 532 // Variadic functions should always marshal to the base standard. 533 bool IsAAPCS_VFP = 534 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true); 535 536 if (RetTy->isVoidType()) 537 return ABIArgInfo::getIgnore(); 538 539 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 540 // Large vector types should be returned via memory. 541 if (getContext().getTypeSize(RetTy) > 128) 542 return getNaturalAlignIndirect(RetTy); 543 // TODO: FP16/BF16 vectors should be converted to integer vectors 544 // This check is similar to isIllegalVectorType - refactor? 545 if ((!getTarget().hasLegalHalfType() && 546 (VT->getElementType()->isFloat16Type() || 547 VT->getElementType()->isHalfType())) || 548 (IsFloatABISoftFP && 549 VT->getElementType()->isBFloat16Type())) 550 return coerceIllegalVector(RetTy); 551 } 552 553 if (!isAggregateTypeForABI(RetTy)) { 554 // Treat an enum type as its underlying type. 555 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 556 RetTy = EnumTy->getDecl()->getIntegerType(); 557 558 if (const auto *EIT = RetTy->getAs<BitIntType>()) 559 if (EIT->getNumBits() > 64) 560 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 561 562 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 563 : ABIArgInfo::getDirect(); 564 } 565 566 // Are we following APCS? 567 if (getABIKind() == ARMABIKind::APCS) { 568 if (isEmptyRecord(getContext(), RetTy, false)) 569 return ABIArgInfo::getIgnore(); 570 571 // Complex types are all returned as packed integers. 572 // 573 // FIXME: Consider using 2 x vector types if the back end handles them 574 // correctly. 575 if (RetTy->isAnyComplexType()) 576 return ABIArgInfo::getDirect(llvm::IntegerType::get( 577 getVMContext(), getContext().getTypeSize(RetTy))); 578 579 // Integer like structures are returned in r0. 580 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 581 // Return in the smallest viable integer type. 582 uint64_t Size = getContext().getTypeSize(RetTy); 583 if (Size <= 8) 584 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 585 if (Size <= 16) 586 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 587 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 588 } 589 590 // Otherwise return in memory. 591 return getNaturalAlignIndirect(RetTy); 592 } 593 594 // Otherwise this is an AAPCS variant. 595 596 if (isEmptyRecord(getContext(), RetTy, true)) 597 return ABIArgInfo::getIgnore(); 598 599 // Check for homogeneous aggregates with AAPCS-VFP. 600 if (IsAAPCS_VFP) { 601 const Type *Base = nullptr; 602 uint64_t Members = 0; 603 if (isHomogeneousAggregate(RetTy, Base, Members)) 604 return classifyHomogeneousAggregate(RetTy, Base, Members); 605 } 606 607 // Aggregates <= 4 bytes are returned in r0; other aggregates 608 // are returned indirectly. 609 uint64_t Size = getContext().getTypeSize(RetTy); 610 if (Size <= 32) { 611 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of 612 // same size and alignment. 613 if (getTarget().isRenderScriptTarget()) { 614 return coerceToIntArray(RetTy, getContext(), getVMContext()); 615 } 616 if (getDataLayout().isBigEndian()) 617 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 618 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 619 620 // Return in the smallest viable integer type. 621 if (Size <= 8) 622 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 623 if (Size <= 16) 624 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 625 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 626 } else if (Size <= 128 && getABIKind() == ARMABIKind::AAPCS16_VFP) { 627 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext()); 628 llvm::Type *CoerceTy = 629 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32); 630 return ABIArgInfo::getDirect(CoerceTy); 631 } 632 633 return getNaturalAlignIndirect(RetTy); 634 } 635 636 /// isIllegalVector - check whether Ty is an illegal vector type. 637 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 638 if (const VectorType *VT = Ty->getAs<VectorType> ()) { 639 // On targets that don't support half, fp16 or bfloat, they are expanded 640 // into float, and we don't want the ABI to depend on whether or not they 641 // are supported in hardware. Thus return false to coerce vectors of these 642 // types into integer vectors. 643 // We do not depend on hasLegalHalfType for bfloat as it is a 644 // separate IR type. 645 if ((!getTarget().hasLegalHalfType() && 646 (VT->getElementType()->isFloat16Type() || 647 VT->getElementType()->isHalfType())) || 648 (IsFloatABISoftFP && 649 VT->getElementType()->isBFloat16Type())) 650 return true; 651 if (isAndroid()) { 652 // Android shipped using Clang 3.1, which supported a slightly different 653 // vector ABI. The primary differences were that 3-element vector types 654 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path 655 // accepts that legacy behavior for Android only. 656 // Check whether VT is legal. 657 unsigned NumElements = VT->getNumElements(); 658 // NumElements should be power of 2 or equal to 3. 659 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3) 660 return true; 661 } else { 662 // Check whether VT is legal. 663 unsigned NumElements = VT->getNumElements(); 664 uint64_t Size = getContext().getTypeSize(VT); 665 // NumElements should be power of 2. 666 if (!llvm::isPowerOf2_32(NumElements)) 667 return true; 668 // Size should be greater than 32 bits. 669 return Size <= 32; 670 } 671 } 672 return false; 673 } 674 675 /// Return true if a type contains any 16-bit floating point vectors 676 bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const { 677 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 678 uint64_t NElements = AT->getZExtSize(); 679 if (NElements == 0) 680 return false; 681 return containsAnyFP16Vectors(AT->getElementType()); 682 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 683 const RecordDecl *RD = RT->getDecl(); 684 685 // If this is a C++ record, check the bases first. 686 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 687 if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) { 688 return containsAnyFP16Vectors(B.getType()); 689 })) 690 return true; 691 692 if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) { 693 return FD && containsAnyFP16Vectors(FD->getType()); 694 })) 695 return true; 696 697 return false; 698 } else { 699 if (const VectorType *VT = Ty->getAs<VectorType>()) 700 return (VT->getElementType()->isFloat16Type() || 701 VT->getElementType()->isBFloat16Type() || 702 VT->getElementType()->isHalfType()); 703 return false; 704 } 705 } 706 707 bool ARMSwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, 708 unsigned NumElts) const { 709 if (!llvm::isPowerOf2_32(NumElts)) 710 return false; 711 unsigned size = CGT.getDataLayout().getTypeStoreSizeInBits(EltTy); 712 if (size > 64) 713 return false; 714 if (VectorSize.getQuantity() != 8 && 715 (VectorSize.getQuantity() != 16 || NumElts == 1)) 716 return false; 717 return true; 718 } 719 720 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 721 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 722 // double, or 64-bit or 128-bit vectors. 723 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 724 if (BT->getKind() == BuiltinType::Float || 725 BT->getKind() == BuiltinType::Double || 726 BT->getKind() == BuiltinType::LongDouble) 727 return true; 728 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 729 unsigned VecSize = getContext().getTypeSize(VT); 730 if (VecSize == 64 || VecSize == 128) 731 return true; 732 } 733 return false; 734 } 735 736 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 737 uint64_t Members) const { 738 return Members <= 4; 739 } 740 741 bool ARMABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const { 742 // AAPCS32 says that the rule for whether something is a homogeneous 743 // aggregate is applied to the output of the data layout decision. So 744 // anything that doesn't affect the data layout also does not affect 745 // homogeneity. In particular, zero-length bitfields don't stop a struct 746 // being homogeneous. 747 return true; 748 } 749 750 bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention, 751 bool acceptHalf) const { 752 // Give precedence to user-specified calling conventions. 753 if (callConvention != llvm::CallingConv::C) 754 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP); 755 else 756 return (getABIKind() == ARMABIKind::AAPCS_VFP) || 757 (acceptHalf && (getABIKind() == ARMABIKind::AAPCS16_VFP)); 758 } 759 760 RValue ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 761 QualType Ty, AggValueSlot Slot) const { 762 CharUnits SlotSize = CharUnits::fromQuantity(4); 763 764 // Empty records are ignored for parameter passing purposes. 765 if (isEmptyRecord(getContext(), Ty, true)) 766 return Slot.asRValue(); 767 768 CharUnits TySize = getContext().getTypeSizeInChars(Ty); 769 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty); 770 771 // Use indirect if size of the illegal vector is bigger than 16 bytes. 772 bool IsIndirect = false; 773 const Type *Base = nullptr; 774 uint64_t Members = 0; 775 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { 776 IsIndirect = true; 777 778 // ARMv7k passes structs bigger than 16 bytes indirectly, in space 779 // allocated by the caller. 780 } else if (TySize > CharUnits::fromQuantity(16) && 781 getABIKind() == ARMABIKind::AAPCS16_VFP && 782 !isHomogeneousAggregate(Ty, Base, Members)) { 783 IsIndirect = true; 784 785 // Otherwise, bound the type's ABI alignment. 786 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 787 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 788 // Our callers should be prepared to handle an under-aligned address. 789 } else if (getABIKind() == ARMABIKind::AAPCS_VFP || 790 getABIKind() == ARMABIKind::AAPCS) { 791 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 792 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8)); 793 } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) { 794 // ARMv7k allows type alignment up to 16 bytes. 795 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 796 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16)); 797 } else { 798 TyAlignForABI = CharUnits::fromQuantity(4); 799 } 800 801 TypeInfoChars TyInfo(TySize, TyAlignForABI, AlignRequirementKind::None); 802 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, SlotSize, 803 /*AllowHigherAlign*/ true, Slot); 804 } 805 806 std::unique_ptr<TargetCodeGenInfo> 807 CodeGen::createARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind Kind) { 808 return std::make_unique<ARMTargetCodeGenInfo>(CGM.getTypes(), Kind); 809 } 810 811 std::unique_ptr<TargetCodeGenInfo> 812 CodeGen::createWindowsARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind K) { 813 return std::make_unique<WindowsARMTargetCodeGenInfo>(CGM.getTypes(), K); 814 } 815