1 //===- RISCV.cpp ----------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ABIInfoImpl.h" 10 #include "TargetInfo.h" 11 12 using namespace clang; 13 using namespace clang::CodeGen; 14 15 //===----------------------------------------------------------------------===// 16 // RISC-V ABI Implementation 17 //===----------------------------------------------------------------------===// 18 19 namespace { 20 class RISCVABIInfo : public DefaultABIInfo { 21 private: 22 // Size of the integer ('x') registers in bits. 23 unsigned XLen; 24 // Size of the floating point ('f') registers in bits. Note that the target 25 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target 26 // with soft float ABI has FLen==0). 27 unsigned FLen; 28 const int NumArgGPRs; 29 const int NumArgFPRs; 30 const bool EABI; 31 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, 32 llvm::Type *&Field1Ty, 33 CharUnits &Field1Off, 34 llvm::Type *&Field2Ty, 35 CharUnits &Field2Off) const; 36 37 public: 38 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen, 39 bool EABI) 40 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen), NumArgGPRs(EABI ? 6 : 8), 41 NumArgFPRs(FLen != 0 ? 8 : 0), EABI(EABI) {} 42 43 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 44 // non-virtual, but computeInfo is virtual, so we overload it. 45 void computeInfo(CGFunctionInfo &FI) const override; 46 47 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft, 48 int &ArgFPRsLeft) const; 49 ABIArgInfo classifyReturnType(QualType RetTy) const; 50 51 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 52 QualType Ty) const override; 53 54 ABIArgInfo extendType(QualType Ty) const; 55 56 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, 57 CharUnits &Field1Off, llvm::Type *&Field2Ty, 58 CharUnits &Field2Off, int &NeededArgGPRs, 59 int &NeededArgFPRs) const; 60 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty, 61 CharUnits Field1Off, 62 llvm::Type *Field2Ty, 63 CharUnits Field2Off) const; 64 65 ABIArgInfo coerceVLSVector(QualType Ty) const; 66 }; 67 } // end anonymous namespace 68 69 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const { 70 QualType RetTy = FI.getReturnType(); 71 if (!getCXXABI().classifyReturnType(FI)) 72 FI.getReturnInfo() = classifyReturnType(RetTy); 73 74 // IsRetIndirect is true if classifyArgumentType indicated the value should 75 // be passed indirect, or if the type size is a scalar greater than 2*XLen 76 // and not a complex type with elements <= FLen. e.g. fp128 is passed direct 77 // in LLVM IR, relying on the backend lowering code to rewrite the argument 78 // list and pass indirectly on RV32. 79 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect; 80 if (!IsRetIndirect && RetTy->isScalarType() && 81 getContext().getTypeSize(RetTy) > (2 * XLen)) { 82 if (RetTy->isComplexType() && FLen) { 83 QualType EltTy = RetTy->castAs<ComplexType>()->getElementType(); 84 IsRetIndirect = getContext().getTypeSize(EltTy) > FLen; 85 } else { 86 // This is a normal scalar > 2*XLen, such as fp128 on RV32. 87 IsRetIndirect = true; 88 } 89 } 90 91 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs; 92 int ArgFPRsLeft = NumArgFPRs; 93 int NumFixedArgs = FI.getNumRequiredArgs(); 94 95 int ArgNum = 0; 96 for (auto &ArgInfo : FI.arguments()) { 97 bool IsFixed = ArgNum < NumFixedArgs; 98 ArgInfo.info = 99 classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft); 100 ArgNum++; 101 } 102 } 103 104 // Returns true if the struct is a potential candidate for the floating point 105 // calling convention. If this function returns true, the caller is 106 // responsible for checking that if there is only a single field then that 107 // field is a float. 108 bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, 109 llvm::Type *&Field1Ty, 110 CharUnits &Field1Off, 111 llvm::Type *&Field2Ty, 112 CharUnits &Field2Off) const { 113 bool IsInt = Ty->isIntegralOrEnumerationType(); 114 bool IsFloat = Ty->isRealFloatingType(); 115 116 if (IsInt || IsFloat) { 117 uint64_t Size = getContext().getTypeSize(Ty); 118 if (IsInt && Size > XLen) 119 return false; 120 // Can't be eligible if larger than the FP registers. Handling of half 121 // precision values has been specified in the ABI, so don't block those. 122 if (IsFloat && Size > FLen) 123 return false; 124 // Can't be eligible if an integer type was already found (int+int pairs 125 // are not eligible). 126 if (IsInt && Field1Ty && Field1Ty->isIntegerTy()) 127 return false; 128 if (!Field1Ty) { 129 Field1Ty = CGT.ConvertType(Ty); 130 Field1Off = CurOff; 131 return true; 132 } 133 if (!Field2Ty) { 134 Field2Ty = CGT.ConvertType(Ty); 135 Field2Off = CurOff; 136 return true; 137 } 138 return false; 139 } 140 141 if (auto CTy = Ty->getAs<ComplexType>()) { 142 if (Field1Ty) 143 return false; 144 QualType EltTy = CTy->getElementType(); 145 if (getContext().getTypeSize(EltTy) > FLen) 146 return false; 147 Field1Ty = CGT.ConvertType(EltTy); 148 Field1Off = CurOff; 149 Field2Ty = Field1Ty; 150 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy); 151 return true; 152 } 153 154 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) { 155 uint64_t ArraySize = ATy->getSize().getZExtValue(); 156 QualType EltTy = ATy->getElementType(); 157 // Non-zero-length arrays of empty records make the struct ineligible for 158 // the FP calling convention in C++. 159 if (const auto *RTy = EltTy->getAs<RecordType>()) { 160 if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) && 161 isEmptyRecord(getContext(), EltTy, true, true)) 162 return false; 163 } 164 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy); 165 for (uint64_t i = 0; i < ArraySize; ++i) { 166 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty, 167 Field1Off, Field2Ty, Field2Off); 168 if (!Ret) 169 return false; 170 CurOff += EltSize; 171 } 172 return true; 173 } 174 175 if (const auto *RTy = Ty->getAs<RecordType>()) { 176 // Structures with either a non-trivial destructor or a non-trivial 177 // copy constructor are not eligible for the FP calling convention. 178 if (getRecordArgABI(Ty, CGT.getCXXABI())) 179 return false; 180 if (isEmptyRecord(getContext(), Ty, true, true)) 181 return true; 182 const RecordDecl *RD = RTy->getDecl(); 183 // Unions aren't eligible unless they're empty (which is caught above). 184 if (RD->isUnion()) 185 return false; 186 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 187 // If this is a C++ record, check the bases first. 188 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 189 for (const CXXBaseSpecifier &B : CXXRD->bases()) { 190 const auto *BDecl = 191 cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl()); 192 CharUnits BaseOff = Layout.getBaseClassOffset(BDecl); 193 bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff, 194 Field1Ty, Field1Off, Field2Ty, 195 Field2Off); 196 if (!Ret) 197 return false; 198 } 199 } 200 int ZeroWidthBitFieldCount = 0; 201 for (const FieldDecl *FD : RD->fields()) { 202 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex()); 203 QualType QTy = FD->getType(); 204 if (FD->isBitField()) { 205 unsigned BitWidth = FD->getBitWidthValue(getContext()); 206 // Allow a bitfield with a type greater than XLen as long as the 207 // bitwidth is XLen or less. 208 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen) 209 QTy = getContext().getIntTypeForBitwidth(XLen, false); 210 if (BitWidth == 0) { 211 ZeroWidthBitFieldCount++; 212 continue; 213 } 214 } 215 216 bool Ret = detectFPCCEligibleStructHelper( 217 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits), 218 Field1Ty, Field1Off, Field2Ty, Field2Off); 219 if (!Ret) 220 return false; 221 222 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp 223 // or int+fp structs, but are ignored for a struct with an fp field and 224 // any number of zero-width bitfields. 225 if (Field2Ty && ZeroWidthBitFieldCount > 0) 226 return false; 227 } 228 return Field1Ty != nullptr; 229 } 230 231 return false; 232 } 233 234 // Determine if a struct is eligible for passing according to the floating 235 // point calling convention (i.e., when flattened it contains a single fp 236 // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and 237 // NeededArgGPRs are incremented appropriately. 238 bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, 239 CharUnits &Field1Off, 240 llvm::Type *&Field2Ty, 241 CharUnits &Field2Off, 242 int &NeededArgGPRs, 243 int &NeededArgFPRs) const { 244 Field1Ty = nullptr; 245 Field2Ty = nullptr; 246 NeededArgGPRs = 0; 247 NeededArgFPRs = 0; 248 bool IsCandidate = detectFPCCEligibleStructHelper( 249 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off); 250 if (!Field1Ty) 251 return false; 252 // Not really a candidate if we have a single int but no float. 253 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy()) 254 return false; 255 if (!IsCandidate) 256 return false; 257 if (Field1Ty && Field1Ty->isFloatingPointTy()) 258 NeededArgFPRs++; 259 else if (Field1Ty) 260 NeededArgGPRs++; 261 if (Field2Ty && Field2Ty->isFloatingPointTy()) 262 NeededArgFPRs++; 263 else if (Field2Ty) 264 NeededArgGPRs++; 265 return true; 266 } 267 268 // Call getCoerceAndExpand for the two-element flattened struct described by 269 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an 270 // appropriate coerceToType and unpaddedCoerceToType. 271 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct( 272 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty, 273 CharUnits Field2Off) const { 274 SmallVector<llvm::Type *, 3> CoerceElts; 275 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts; 276 if (!Field1Off.isZero()) 277 CoerceElts.push_back(llvm::ArrayType::get( 278 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity())); 279 280 CoerceElts.push_back(Field1Ty); 281 UnpaddedCoerceElts.push_back(Field1Ty); 282 283 if (!Field2Ty) { 284 return ABIArgInfo::getCoerceAndExpand( 285 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()), 286 UnpaddedCoerceElts[0]); 287 } 288 289 CharUnits Field2Align = 290 CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty)); 291 CharUnits Field1End = Field1Off + 292 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty)); 293 CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align); 294 295 CharUnits Padding = CharUnits::Zero(); 296 if (Field2Off > Field2OffNoPadNoPack) 297 Padding = Field2Off - Field2OffNoPadNoPack; 298 else if (Field2Off != Field2Align && Field2Off > Field1End) 299 Padding = Field2Off - Field1End; 300 301 bool IsPacked = !Field2Off.isMultipleOf(Field2Align); 302 303 if (!Padding.isZero()) 304 CoerceElts.push_back(llvm::ArrayType::get( 305 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity())); 306 307 CoerceElts.push_back(Field2Ty); 308 UnpaddedCoerceElts.push_back(Field2Ty); 309 310 auto CoerceToType = 311 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked); 312 auto UnpaddedCoerceToType = 313 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked); 314 315 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType); 316 } 317 318 // Fixed-length RVV vectors are represented as scalable vectors in function 319 // args/return and must be coerced from fixed vectors. 320 ABIArgInfo RISCVABIInfo::coerceVLSVector(QualType Ty) const { 321 assert(Ty->isVectorType() && "expected vector type!"); 322 323 const auto *VT = Ty->castAs<VectorType>(); 324 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); 325 326 auto VScale = 327 getContext().getTargetInfo().getVScaleRange(getContext().getLangOpts()); 328 329 unsigned NumElts = VT->getNumElements(); 330 llvm::Type *EltType; 331 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) { 332 NumElts *= 8; 333 EltType = llvm::Type::getInt1Ty(getVMContext()); 334 } else { 335 assert(VT->getVectorKind() == VectorKind::RVVFixedLengthData && 336 "Unexpected vector kind"); 337 EltType = CGT.ConvertType(VT->getElementType()); 338 } 339 340 // The MinNumElts is simplified from equation: 341 // NumElts / VScale = 342 // (EltSize * NumElts / (VScale * RVVBitsPerBlock)) 343 // * (RVVBitsPerBlock / EltSize) 344 llvm::ScalableVectorType *ResType = 345 llvm::ScalableVectorType::get(EltType, NumElts / VScale->first); 346 return ABIArgInfo::getDirect(ResType); 347 } 348 349 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, 350 int &ArgGPRsLeft, 351 int &ArgFPRsLeft) const { 352 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow"); 353 Ty = useFirstFieldIfTransparentUnion(Ty); 354 355 // Structures with either a non-trivial destructor or a non-trivial 356 // copy constructor are always passed indirectly. 357 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 358 if (ArgGPRsLeft) 359 ArgGPRsLeft -= 1; 360 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 361 CGCXXABI::RAA_DirectInMemory); 362 } 363 364 // Ignore empty structs/unions. 365 if (isEmptyRecord(getContext(), Ty, true)) 366 return ABIArgInfo::getIgnore(); 367 368 uint64_t Size = getContext().getTypeSize(Ty); 369 370 // Pass floating point values via FPRs if possible. 371 if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() && 372 FLen >= Size && ArgFPRsLeft) { 373 ArgFPRsLeft--; 374 return ABIArgInfo::getDirect(); 375 } 376 377 // Complex types for the hard float ABI must be passed direct rather than 378 // using CoerceAndExpand. 379 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) { 380 QualType EltTy = Ty->castAs<ComplexType>()->getElementType(); 381 if (getContext().getTypeSize(EltTy) <= FLen) { 382 ArgFPRsLeft -= 2; 383 return ABIArgInfo::getDirect(); 384 } 385 } 386 387 if (IsFixed && FLen && Ty->isStructureOrClassType()) { 388 llvm::Type *Field1Ty = nullptr; 389 llvm::Type *Field2Ty = nullptr; 390 CharUnits Field1Off = CharUnits::Zero(); 391 CharUnits Field2Off = CharUnits::Zero(); 392 int NeededArgGPRs = 0; 393 int NeededArgFPRs = 0; 394 bool IsCandidate = 395 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off, 396 NeededArgGPRs, NeededArgFPRs); 397 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft && 398 NeededArgFPRs <= ArgFPRsLeft) { 399 ArgGPRsLeft -= NeededArgGPRs; 400 ArgFPRsLeft -= NeededArgFPRs; 401 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty, 402 Field2Off); 403 } 404 } 405 406 uint64_t NeededAlign = getContext().getTypeAlign(Ty); 407 // Determine the number of GPRs needed to pass the current argument 408 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned" 409 // register pairs, so may consume 3 registers. 410 // TODO: To be compatible with GCC's behaviors, we don't align registers 411 // currently if we are using ILP32E calling convention. This behavior may be 412 // changed when RV32E/ILP32E is ratified. 413 int NeededArgGPRs = 1; 414 if (!IsFixed && NeededAlign == 2 * XLen) 415 NeededArgGPRs = 2 + (EABI && XLen == 32 ? 0 : (ArgGPRsLeft % 2)); 416 else if (Size > XLen && Size <= 2 * XLen) 417 NeededArgGPRs = 2; 418 419 if (NeededArgGPRs > ArgGPRsLeft) { 420 NeededArgGPRs = ArgGPRsLeft; 421 } 422 423 ArgGPRsLeft -= NeededArgGPRs; 424 425 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) { 426 // Treat an enum type as its underlying type. 427 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 428 Ty = EnumTy->getDecl()->getIntegerType(); 429 430 // All integral types are promoted to XLen width 431 if (Size < XLen && Ty->isIntegralOrEnumerationType()) { 432 return extendType(Ty); 433 } 434 435 if (const auto *EIT = Ty->getAs<BitIntType>()) { 436 if (EIT->getNumBits() < XLen) 437 return extendType(Ty); 438 if (EIT->getNumBits() > 128 || 439 (!getContext().getTargetInfo().hasInt128Type() && 440 EIT->getNumBits() > 64)) 441 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 442 } 443 444 return ABIArgInfo::getDirect(); 445 } 446 447 if (const VectorType *VT = Ty->getAs<VectorType>()) 448 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData || 449 VT->getVectorKind() == VectorKind::RVVFixedLengthMask) 450 return coerceVLSVector(Ty); 451 452 // Aggregates which are <= 2*XLen will be passed in registers if possible, 453 // so coerce to integers. 454 if (Size <= 2 * XLen) { 455 unsigned Alignment = getContext().getTypeAlign(Ty); 456 457 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is 458 // required, and a 2-element XLen array if only XLen alignment is required. 459 if (Size <= XLen) { 460 return ABIArgInfo::getDirect( 461 llvm::IntegerType::get(getVMContext(), XLen)); 462 } else if (Alignment == 2 * XLen) { 463 return ABIArgInfo::getDirect( 464 llvm::IntegerType::get(getVMContext(), 2 * XLen)); 465 } else { 466 return ABIArgInfo::getDirect(llvm::ArrayType::get( 467 llvm::IntegerType::get(getVMContext(), XLen), 2)); 468 } 469 } 470 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 471 } 472 473 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const { 474 if (RetTy->isVoidType()) 475 return ABIArgInfo::getIgnore(); 476 477 int ArgGPRsLeft = 2; 478 int ArgFPRsLeft = FLen ? 2 : 0; 479 480 // The rules for return and argument types are the same, so defer to 481 // classifyArgumentType. 482 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft, 483 ArgFPRsLeft); 484 } 485 486 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 487 QualType Ty) const { 488 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8); 489 490 // Empty records are ignored for parameter passing purposes. 491 if (isEmptyRecord(getContext(), Ty, true)) { 492 return Address(CGF.Builder.CreateLoad(VAListAddr), 493 CGF.ConvertTypeForMem(Ty), SlotSize); 494 } 495 496 auto TInfo = getContext().getTypeInfoInChars(Ty); 497 498 // TODO: To be compatible with GCC's behaviors, we force arguments with 499 // 2×XLEN-bit alignment and size at most 2×XLEN bits like `long long`, 500 // `unsigned long long` and `double` to have 4-byte alignment. This 501 // behavior may be changed when RV32E/ILP32E is ratified. 502 if (EABI && XLen == 32) 503 TInfo.Align = std::min(TInfo.Align, CharUnits::fromQuantity(4)); 504 505 // Arguments bigger than 2*Xlen bytes are passed indirectly. 506 bool IsIndirect = TInfo.Width > 2 * SlotSize; 507 508 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo, 509 SlotSize, /*AllowHigherAlign=*/true); 510 } 511 512 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const { 513 int TySize = getContext().getTypeSize(Ty); 514 // RV64 ABI requires unsigned 32 bit integers to be sign extended. 515 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 516 return ABIArgInfo::getSignExtend(Ty); 517 return ABIArgInfo::getExtend(Ty); 518 } 519 520 namespace { 521 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo { 522 public: 523 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, 524 unsigned FLen, bool EABI) 525 : TargetCodeGenInfo( 526 std::make_unique<RISCVABIInfo>(CGT, XLen, FLen, EABI)) {} 527 528 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 529 CodeGen::CodeGenModule &CGM) const override { 530 const auto *FD = dyn_cast_or_null<FunctionDecl>(D); 531 if (!FD) return; 532 533 const auto *Attr = FD->getAttr<RISCVInterruptAttr>(); 534 if (!Attr) 535 return; 536 537 const char *Kind; 538 switch (Attr->getInterrupt()) { 539 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break; 540 case RISCVInterruptAttr::machine: Kind = "machine"; break; 541 } 542 543 auto *Fn = cast<llvm::Function>(GV); 544 545 Fn->addFnAttr("interrupt", Kind); 546 } 547 }; 548 } // namespace 549 550 std::unique_ptr<TargetCodeGenInfo> 551 CodeGen::createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen, 552 unsigned FLen, bool EABI) { 553 return std::make_unique<RISCVTargetCodeGenInfo>(CGM.getTypes(), XLen, FLen, 554 EABI); 555 } 556