xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp (revision 06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e)
1 //===- RISCV.cpp ----------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
11 #include "llvm/TargetParser/RISCVTargetParser.h"
12 
13 using namespace clang;
14 using namespace clang::CodeGen;
15 
16 //===----------------------------------------------------------------------===//
17 // RISC-V ABI Implementation
18 //===----------------------------------------------------------------------===//
19 
20 namespace {
21 class RISCVABIInfo : public DefaultABIInfo {
22 private:
23   // Size of the integer ('x') registers in bits.
24   unsigned XLen;
25   // Size of the floating point ('f') registers in bits. Note that the target
26   // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
27   // with soft float ABI has FLen==0).
28   unsigned FLen;
29   static const int NumArgGPRs = 8;
30   static const int NumArgFPRs = 8;
31   bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
32                                       llvm::Type *&Field1Ty,
33                                       CharUnits &Field1Off,
34                                       llvm::Type *&Field2Ty,
35                                       CharUnits &Field2Off) const;
36 
37 public:
38   RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
39       : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
40 
41   // DefaultABIInfo's classifyReturnType and classifyArgumentType are
42   // non-virtual, but computeInfo is virtual, so we overload it.
43   void computeInfo(CGFunctionInfo &FI) const override;
44 
45   ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
46                                   int &ArgFPRsLeft) const;
47   ABIArgInfo classifyReturnType(QualType RetTy) const;
48 
49   Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
50                     QualType Ty) const override;
51 
52   ABIArgInfo extendType(QualType Ty) const;
53 
54   bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
55                                 CharUnits &Field1Off, llvm::Type *&Field2Ty,
56                                 CharUnits &Field2Off, int &NeededArgGPRs,
57                                 int &NeededArgFPRs) const;
58   ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
59                                                CharUnits Field1Off,
60                                                llvm::Type *Field2Ty,
61                                                CharUnits Field2Off) const;
62 
63   ABIArgInfo coerceVLSVector(QualType Ty) const;
64 };
65 } // end anonymous namespace
66 
67 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
68   QualType RetTy = FI.getReturnType();
69   if (!getCXXABI().classifyReturnType(FI))
70     FI.getReturnInfo() = classifyReturnType(RetTy);
71 
72   // IsRetIndirect is true if classifyArgumentType indicated the value should
73   // be passed indirect, or if the type size is a scalar greater than 2*XLen
74   // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
75   // in LLVM IR, relying on the backend lowering code to rewrite the argument
76   // list and pass indirectly on RV32.
77   bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
78   if (!IsRetIndirect && RetTy->isScalarType() &&
79       getContext().getTypeSize(RetTy) > (2 * XLen)) {
80     if (RetTy->isComplexType() && FLen) {
81       QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
82       IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
83     } else {
84       // This is a normal scalar > 2*XLen, such as fp128 on RV32.
85       IsRetIndirect = true;
86     }
87   }
88 
89   int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
90   int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
91   int NumFixedArgs = FI.getNumRequiredArgs();
92 
93   int ArgNum = 0;
94   for (auto &ArgInfo : FI.arguments()) {
95     bool IsFixed = ArgNum < NumFixedArgs;
96     ArgInfo.info =
97         classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
98     ArgNum++;
99   }
100 }
101 
102 // Returns true if the struct is a potential candidate for the floating point
103 // calling convention. If this function returns true, the caller is
104 // responsible for checking that if there is only a single field then that
105 // field is a float.
106 bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
107                                                   llvm::Type *&Field1Ty,
108                                                   CharUnits &Field1Off,
109                                                   llvm::Type *&Field2Ty,
110                                                   CharUnits &Field2Off) const {
111   bool IsInt = Ty->isIntegralOrEnumerationType();
112   bool IsFloat = Ty->isRealFloatingType();
113 
114   if (IsInt || IsFloat) {
115     uint64_t Size = getContext().getTypeSize(Ty);
116     if (IsInt && Size > XLen)
117       return false;
118     // Can't be eligible if larger than the FP registers. Handling of half
119     // precision values has been specified in the ABI, so don't block those.
120     if (IsFloat && Size > FLen)
121       return false;
122     // Can't be eligible if an integer type was already found (int+int pairs
123     // are not eligible).
124     if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
125       return false;
126     if (!Field1Ty) {
127       Field1Ty = CGT.ConvertType(Ty);
128       Field1Off = CurOff;
129       return true;
130     }
131     if (!Field2Ty) {
132       Field2Ty = CGT.ConvertType(Ty);
133       Field2Off = CurOff;
134       return true;
135     }
136     return false;
137   }
138 
139   if (auto CTy = Ty->getAs<ComplexType>()) {
140     if (Field1Ty)
141       return false;
142     QualType EltTy = CTy->getElementType();
143     if (getContext().getTypeSize(EltTy) > FLen)
144       return false;
145     Field1Ty = CGT.ConvertType(EltTy);
146     Field1Off = CurOff;
147     Field2Ty = Field1Ty;
148     Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
149     return true;
150   }
151 
152   if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
153     uint64_t ArraySize = ATy->getSize().getZExtValue();
154     QualType EltTy = ATy->getElementType();
155     CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
156     for (uint64_t i = 0; i < ArraySize; ++i) {
157       bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
158                                                 Field1Off, Field2Ty, Field2Off);
159       if (!Ret)
160         return false;
161       CurOff += EltSize;
162     }
163     return true;
164   }
165 
166   if (const auto *RTy = Ty->getAs<RecordType>()) {
167     // Structures with either a non-trivial destructor or a non-trivial
168     // copy constructor are not eligible for the FP calling convention.
169     if (getRecordArgABI(Ty, CGT.getCXXABI()))
170       return false;
171     if (isEmptyRecord(getContext(), Ty, true))
172       return true;
173     const RecordDecl *RD = RTy->getDecl();
174     // Unions aren't eligible unless they're empty (which is caught above).
175     if (RD->isUnion())
176       return false;
177     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
178     // If this is a C++ record, check the bases first.
179     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
180       for (const CXXBaseSpecifier &B : CXXRD->bases()) {
181         const auto *BDecl =
182             cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
183         CharUnits BaseOff = Layout.getBaseClassOffset(BDecl);
184         bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff,
185                                                   Field1Ty, Field1Off, Field2Ty,
186                                                   Field2Off);
187         if (!Ret)
188           return false;
189       }
190     }
191     int ZeroWidthBitFieldCount = 0;
192     for (const FieldDecl *FD : RD->fields()) {
193       uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
194       QualType QTy = FD->getType();
195       if (FD->isBitField()) {
196         unsigned BitWidth = FD->getBitWidthValue(getContext());
197         // Allow a bitfield with a type greater than XLen as long as the
198         // bitwidth is XLen or less.
199         if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
200           QTy = getContext().getIntTypeForBitwidth(XLen, false);
201         if (BitWidth == 0) {
202           ZeroWidthBitFieldCount++;
203           continue;
204         }
205       }
206 
207       bool Ret = detectFPCCEligibleStructHelper(
208           QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
209           Field1Ty, Field1Off, Field2Ty, Field2Off);
210       if (!Ret)
211         return false;
212 
213       // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
214       // or int+fp structs, but are ignored for a struct with an fp field and
215       // any number of zero-width bitfields.
216       if (Field2Ty && ZeroWidthBitFieldCount > 0)
217         return false;
218     }
219     return Field1Ty != nullptr;
220   }
221 
222   return false;
223 }
224 
225 // Determine if a struct is eligible for passing according to the floating
226 // point calling convention (i.e., when flattened it contains a single fp
227 // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
228 // NeededArgGPRs are incremented appropriately.
229 bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
230                                             CharUnits &Field1Off,
231                                             llvm::Type *&Field2Ty,
232                                             CharUnits &Field2Off,
233                                             int &NeededArgGPRs,
234                                             int &NeededArgFPRs) const {
235   Field1Ty = nullptr;
236   Field2Ty = nullptr;
237   NeededArgGPRs = 0;
238   NeededArgFPRs = 0;
239   bool IsCandidate = detectFPCCEligibleStructHelper(
240       Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
241   // Not really a candidate if we have a single int but no float.
242   if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
243     return false;
244   if (!IsCandidate)
245     return false;
246   if (Field1Ty && Field1Ty->isFloatingPointTy())
247     NeededArgFPRs++;
248   else if (Field1Ty)
249     NeededArgGPRs++;
250   if (Field2Ty && Field2Ty->isFloatingPointTy())
251     NeededArgFPRs++;
252   else if (Field2Ty)
253     NeededArgGPRs++;
254   return true;
255 }
256 
257 // Call getCoerceAndExpand for the two-element flattened struct described by
258 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
259 // appropriate coerceToType and unpaddedCoerceToType.
260 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
261     llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
262     CharUnits Field2Off) const {
263   SmallVector<llvm::Type *, 3> CoerceElts;
264   SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
265   if (!Field1Off.isZero())
266     CoerceElts.push_back(llvm::ArrayType::get(
267         llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
268 
269   CoerceElts.push_back(Field1Ty);
270   UnpaddedCoerceElts.push_back(Field1Ty);
271 
272   if (!Field2Ty) {
273     return ABIArgInfo::getCoerceAndExpand(
274         llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
275         UnpaddedCoerceElts[0]);
276   }
277 
278   CharUnits Field2Align =
279       CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty));
280   CharUnits Field1End = Field1Off +
281       CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
282   CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
283 
284   CharUnits Padding = CharUnits::Zero();
285   if (Field2Off > Field2OffNoPadNoPack)
286     Padding = Field2Off - Field2OffNoPadNoPack;
287   else if (Field2Off != Field2Align && Field2Off > Field1End)
288     Padding = Field2Off - Field1End;
289 
290   bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
291 
292   if (!Padding.isZero())
293     CoerceElts.push_back(llvm::ArrayType::get(
294         llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
295 
296   CoerceElts.push_back(Field2Ty);
297   UnpaddedCoerceElts.push_back(Field2Ty);
298 
299   auto CoerceToType =
300       llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
301   auto UnpaddedCoerceToType =
302       llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
303 
304   return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
305 }
306 
307 // Fixed-length RVV vectors are represented as scalable vectors in function
308 // args/return and must be coerced from fixed vectors.
309 ABIArgInfo RISCVABIInfo::coerceVLSVector(QualType Ty) const {
310   assert(Ty->isVectorType() && "expected vector type!");
311 
312   const auto *VT = Ty->castAs<VectorType>();
313   assert(VT->getVectorKind() == VectorType::RVVFixedLengthDataVector &&
314          "Unexpected vector kind");
315 
316   assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
317 
318   const auto *BT = VT->getElementType()->castAs<BuiltinType>();
319   unsigned EltSize = getContext().getTypeSize(BT);
320   llvm::ScalableVectorType *ResType =
321         llvm::ScalableVectorType::get(CGT.ConvertType(VT->getElementType()),
322                                       llvm::RISCV::RVVBitsPerBlock / EltSize);
323   return ABIArgInfo::getDirect(ResType);
324 }
325 
326 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
327                                               int &ArgGPRsLeft,
328                                               int &ArgFPRsLeft) const {
329   assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
330   Ty = useFirstFieldIfTransparentUnion(Ty);
331 
332   // Structures with either a non-trivial destructor or a non-trivial
333   // copy constructor are always passed indirectly.
334   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
335     if (ArgGPRsLeft)
336       ArgGPRsLeft -= 1;
337     return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
338                                            CGCXXABI::RAA_DirectInMemory);
339   }
340 
341   // Ignore empty structs/unions.
342   if (isEmptyRecord(getContext(), Ty, true))
343     return ABIArgInfo::getIgnore();
344 
345   uint64_t Size = getContext().getTypeSize(Ty);
346 
347   // Pass floating point values via FPRs if possible.
348   if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
349       FLen >= Size && ArgFPRsLeft) {
350     ArgFPRsLeft--;
351     return ABIArgInfo::getDirect();
352   }
353 
354   // Complex types for the hard float ABI must be passed direct rather than
355   // using CoerceAndExpand.
356   if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
357     QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
358     if (getContext().getTypeSize(EltTy) <= FLen) {
359       ArgFPRsLeft -= 2;
360       return ABIArgInfo::getDirect();
361     }
362   }
363 
364   if (IsFixed && FLen && Ty->isStructureOrClassType()) {
365     llvm::Type *Field1Ty = nullptr;
366     llvm::Type *Field2Ty = nullptr;
367     CharUnits Field1Off = CharUnits::Zero();
368     CharUnits Field2Off = CharUnits::Zero();
369     int NeededArgGPRs = 0;
370     int NeededArgFPRs = 0;
371     bool IsCandidate =
372         detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
373                                  NeededArgGPRs, NeededArgFPRs);
374     if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
375         NeededArgFPRs <= ArgFPRsLeft) {
376       ArgGPRsLeft -= NeededArgGPRs;
377       ArgFPRsLeft -= NeededArgFPRs;
378       return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
379                                                Field2Off);
380     }
381   }
382 
383   uint64_t NeededAlign = getContext().getTypeAlign(Ty);
384   // Determine the number of GPRs needed to pass the current argument
385   // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
386   // register pairs, so may consume 3 registers.
387   int NeededArgGPRs = 1;
388   if (!IsFixed && NeededAlign == 2 * XLen)
389     NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
390   else if (Size > XLen && Size <= 2 * XLen)
391     NeededArgGPRs = 2;
392 
393   if (NeededArgGPRs > ArgGPRsLeft) {
394     NeededArgGPRs = ArgGPRsLeft;
395   }
396 
397   ArgGPRsLeft -= NeededArgGPRs;
398 
399   if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
400     // Treat an enum type as its underlying type.
401     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
402       Ty = EnumTy->getDecl()->getIntegerType();
403 
404     // All integral types are promoted to XLen width
405     if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
406       return extendType(Ty);
407     }
408 
409     if (const auto *EIT = Ty->getAs<BitIntType>()) {
410       if (EIT->getNumBits() < XLen)
411         return extendType(Ty);
412       if (EIT->getNumBits() > 128 ||
413           (!getContext().getTargetInfo().hasInt128Type() &&
414            EIT->getNumBits() > 64))
415         return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
416     }
417 
418     return ABIArgInfo::getDirect();
419   }
420 
421   if (const VectorType *VT = Ty->getAs<VectorType>())
422     if (VT->getVectorKind() == VectorType::RVVFixedLengthDataVector)
423       return coerceVLSVector(Ty);
424 
425   // Aggregates which are <= 2*XLen will be passed in registers if possible,
426   // so coerce to integers.
427   if (Size <= 2 * XLen) {
428     unsigned Alignment = getContext().getTypeAlign(Ty);
429 
430     // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
431     // required, and a 2-element XLen array if only XLen alignment is required.
432     if (Size <= XLen) {
433       return ABIArgInfo::getDirect(
434           llvm::IntegerType::get(getVMContext(), XLen));
435     } else if (Alignment == 2 * XLen) {
436       return ABIArgInfo::getDirect(
437           llvm::IntegerType::get(getVMContext(), 2 * XLen));
438     } else {
439       return ABIArgInfo::getDirect(llvm::ArrayType::get(
440           llvm::IntegerType::get(getVMContext(), XLen), 2));
441     }
442   }
443   return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
444 }
445 
446 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
447   if (RetTy->isVoidType())
448     return ABIArgInfo::getIgnore();
449 
450   int ArgGPRsLeft = 2;
451   int ArgFPRsLeft = FLen ? 2 : 0;
452 
453   // The rules for return and argument types are the same, so defer to
454   // classifyArgumentType.
455   return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
456                               ArgFPRsLeft);
457 }
458 
459 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
460                                 QualType Ty) const {
461   CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
462 
463   // Empty records are ignored for parameter passing purposes.
464   if (isEmptyRecord(getContext(), Ty, true)) {
465     return Address(CGF.Builder.CreateLoad(VAListAddr),
466                    CGF.ConvertTypeForMem(Ty), SlotSize);
467   }
468 
469   auto TInfo = getContext().getTypeInfoInChars(Ty);
470 
471   // Arguments bigger than 2*Xlen bytes are passed indirectly.
472   bool IsIndirect = TInfo.Width > 2 * SlotSize;
473 
474   return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo,
475                           SlotSize, /*AllowHigherAlign=*/true);
476 }
477 
478 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
479   int TySize = getContext().getTypeSize(Ty);
480   // RV64 ABI requires unsigned 32 bit integers to be sign extended.
481   if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
482     return ABIArgInfo::getSignExtend(Ty);
483   return ABIArgInfo::getExtend(Ty);
484 }
485 
486 namespace {
487 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
488 public:
489   RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
490                          unsigned FLen)
491       : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {}
492 
493   void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
494                            CodeGen::CodeGenModule &CGM) const override {
495     const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
496     if (!FD) return;
497 
498     const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
499     if (!Attr)
500       return;
501 
502     const char *Kind;
503     switch (Attr->getInterrupt()) {
504     case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
505     case RISCVInterruptAttr::machine: Kind = "machine"; break;
506     }
507 
508     auto *Fn = cast<llvm::Function>(GV);
509 
510     Fn->addFnAttr("interrupt", Kind);
511   }
512 };
513 } // namespace
514 
515 std::unique_ptr<TargetCodeGenInfo>
516 CodeGen::createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen,
517                                       unsigned FLen) {
518   return std::make_unique<RISCVTargetCodeGenInfo>(CGM.getTypes(), XLen, FLen);
519 }
520