xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //===- LoongArch.cpp ------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
11 
12 using namespace clang;
13 using namespace clang::CodeGen;
14 
15 // LoongArch ABI Implementation. Documented at
16 // https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html
17 //
18 //===----------------------------------------------------------------------===//
19 
20 namespace {
21 class LoongArchABIInfo : public DefaultABIInfo {
22 private:
23   // Size of the integer ('r') registers in bits.
24   unsigned GRLen;
25   // Size of the floating point ('f') registers in bits.
26   unsigned FRLen;
27   // Number of general-purpose argument registers.
28   static const int NumGARs = 8;
29   // Number of floating-point argument registers.
30   static const int NumFARs = 8;
31   bool detectFARsEligibleStructHelper(QualType Ty, CharUnits CurOff,
32                                       llvm::Type *&Field1Ty,
33                                       CharUnits &Field1Off,
34                                       llvm::Type *&Field2Ty,
35                                       CharUnits &Field2Off) const;
36 
37 public:
38   LoongArchABIInfo(CodeGen::CodeGenTypes &CGT, unsigned GRLen, unsigned FRLen)
39       : DefaultABIInfo(CGT), GRLen(GRLen), FRLen(FRLen) {}
40 
41   void computeInfo(CGFunctionInfo &FI) const override;
42 
43   ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &GARsLeft,
44                                   int &FARsLeft) const;
45   ABIArgInfo classifyReturnType(QualType RetTy) const;
46 
47   Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
48                     QualType Ty) const override;
49 
50   ABIArgInfo extendType(QualType Ty) const;
51 
52   bool detectFARsEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
53                                 CharUnits &Field1Off, llvm::Type *&Field2Ty,
54                                 CharUnits &Field2Off, int &NeededArgGPRs,
55                                 int &NeededArgFPRs) const;
56   ABIArgInfo coerceAndExpandFARsEligibleStruct(llvm::Type *Field1Ty,
57                                                CharUnits Field1Off,
58                                                llvm::Type *Field2Ty,
59                                                CharUnits Field2Off) const;
60 };
61 } // end anonymous namespace
62 
63 void LoongArchABIInfo::computeInfo(CGFunctionInfo &FI) const {
64   QualType RetTy = FI.getReturnType();
65   if (!getCXXABI().classifyReturnType(FI))
66     FI.getReturnInfo() = classifyReturnType(RetTy);
67 
68   // IsRetIndirect is true if classifyArgumentType indicated the value should
69   // be passed indirect, or if the type size is a scalar greater than 2*GRLen
70   // and not a complex type with elements <= FRLen. e.g. fp128 is passed direct
71   // in LLVM IR, relying on the backend lowering code to rewrite the argument
72   // list and pass indirectly on LA32.
73   bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
74   if (!IsRetIndirect && RetTy->isScalarType() &&
75       getContext().getTypeSize(RetTy) > (2 * GRLen)) {
76     if (RetTy->isComplexType() && FRLen) {
77       QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
78       IsRetIndirect = getContext().getTypeSize(EltTy) > FRLen;
79     } else {
80       // This is a normal scalar > 2*GRLen, such as fp128 on LA32.
81       IsRetIndirect = true;
82     }
83   }
84 
85   // We must track the number of GARs and FARs used in order to conform to the
86   // LoongArch ABI. As GAR usage is different for variadic arguments, we must
87   // also track whether we are examining a vararg or not.
88   int GARsLeft = IsRetIndirect ? NumGARs - 1 : NumGARs;
89   int FARsLeft = FRLen ? NumFARs : 0;
90   int NumFixedArgs = FI.getNumRequiredArgs();
91 
92   int ArgNum = 0;
93   for (auto &ArgInfo : FI.arguments()) {
94     ArgInfo.info = classifyArgumentType(
95         ArgInfo.type, /*IsFixed=*/ArgNum < NumFixedArgs, GARsLeft, FARsLeft);
96     ArgNum++;
97   }
98 }
99 
100 // Returns true if the struct is a potential candidate to be passed in FARs (and
101 // GARs). If this function returns true, the caller is responsible for checking
102 // that if there is only a single field then that field is a float.
103 bool LoongArchABIInfo::detectFARsEligibleStructHelper(
104     QualType Ty, CharUnits CurOff, llvm::Type *&Field1Ty, CharUnits &Field1Off,
105     llvm::Type *&Field2Ty, CharUnits &Field2Off) const {
106   bool IsInt = Ty->isIntegralOrEnumerationType();
107   bool IsFloat = Ty->isRealFloatingType();
108 
109   if (IsInt || IsFloat) {
110     uint64_t Size = getContext().getTypeSize(Ty);
111     if (IsInt && Size > GRLen)
112       return false;
113     // Can't be eligible if larger than the FP registers. Half precision isn't
114     // currently supported on LoongArch and the ABI hasn't been confirmed, so
115     // default to the integer ABI in that case.
116     if (IsFloat && (Size > FRLen || Size < 32))
117       return false;
118     // Can't be eligible if an integer type was already found (int+int pairs
119     // are not eligible).
120     if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
121       return false;
122     if (!Field1Ty) {
123       Field1Ty = CGT.ConvertType(Ty);
124       Field1Off = CurOff;
125       return true;
126     }
127     if (!Field2Ty) {
128       Field2Ty = CGT.ConvertType(Ty);
129       Field2Off = CurOff;
130       return true;
131     }
132     return false;
133   }
134 
135   if (auto CTy = Ty->getAs<ComplexType>()) {
136     if (Field1Ty)
137       return false;
138     QualType EltTy = CTy->getElementType();
139     if (getContext().getTypeSize(EltTy) > FRLen)
140       return false;
141     Field1Ty = CGT.ConvertType(EltTy);
142     Field1Off = CurOff;
143     Field2Ty = Field1Ty;
144     Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
145     return true;
146   }
147 
148   if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
149     uint64_t ArraySize = ATy->getSize().getZExtValue();
150     QualType EltTy = ATy->getElementType();
151     // Non-zero-length arrays of empty records make the struct ineligible to be
152     // passed via FARs in C++.
153     if (const auto *RTy = EltTy->getAs<RecordType>()) {
154       if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
155           isEmptyRecord(getContext(), EltTy, true, true))
156         return false;
157     }
158     CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
159     for (uint64_t i = 0; i < ArraySize; ++i) {
160       if (!detectFARsEligibleStructHelper(EltTy, CurOff, Field1Ty, Field1Off,
161                                           Field2Ty, Field2Off))
162         return false;
163       CurOff += EltSize;
164     }
165     return true;
166   }
167 
168   if (const auto *RTy = Ty->getAs<RecordType>()) {
169     // Structures with either a non-trivial destructor or a non-trivial
170     // copy constructor are not eligible for the FP calling convention.
171     if (getRecordArgABI(Ty, CGT.getCXXABI()))
172       return false;
173     if (isEmptyRecord(getContext(), Ty, true, true))
174       return true;
175     const RecordDecl *RD = RTy->getDecl();
176     // Unions aren't eligible unless they're empty (which is caught above).
177     if (RD->isUnion())
178       return false;
179     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
180     // If this is a C++ record, check the bases first.
181     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
182       for (const CXXBaseSpecifier &B : CXXRD->bases()) {
183         const auto *BDecl =
184             cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
185         if (!detectFARsEligibleStructHelper(
186                 B.getType(), CurOff + Layout.getBaseClassOffset(BDecl),
187                 Field1Ty, Field1Off, Field2Ty, Field2Off))
188           return false;
189       }
190     }
191     for (const FieldDecl *FD : RD->fields()) {
192       QualType QTy = FD->getType();
193       if (FD->isBitField()) {
194         unsigned BitWidth = FD->getBitWidthValue(getContext());
195         // Zero-width bitfields are ignored.
196         if (BitWidth == 0)
197           continue;
198         // Allow a bitfield with a type greater than GRLen as long as the
199         // bitwidth is GRLen or less.
200         if (getContext().getTypeSize(QTy) > GRLen && BitWidth <= GRLen) {
201           QTy = getContext().getIntTypeForBitwidth(GRLen, false);
202         }
203       }
204 
205       if (!detectFARsEligibleStructHelper(
206               QTy,
207               CurOff + getContext().toCharUnitsFromBits(
208                            Layout.getFieldOffset(FD->getFieldIndex())),
209               Field1Ty, Field1Off, Field2Ty, Field2Off))
210         return false;
211     }
212     return Field1Ty != nullptr;
213   }
214 
215   return false;
216 }
217 
218 // Determine if a struct is eligible to be passed in FARs (and GARs) (i.e., when
219 // flattened it contains a single fp value, fp+fp, or int+fp of appropriate
220 // size). If so, NeededFARs and NeededGARs are incremented appropriately.
221 bool LoongArchABIInfo::detectFARsEligibleStruct(
222     QualType Ty, llvm::Type *&Field1Ty, CharUnits &Field1Off,
223     llvm::Type *&Field2Ty, CharUnits &Field2Off, int &NeededGARs,
224     int &NeededFARs) const {
225   Field1Ty = nullptr;
226   Field2Ty = nullptr;
227   NeededGARs = 0;
228   NeededFARs = 0;
229   if (!detectFARsEligibleStructHelper(Ty, CharUnits::Zero(), Field1Ty,
230                                       Field1Off, Field2Ty, Field2Off))
231     return false;
232   if (!Field1Ty)
233     return false;
234   // Not really a candidate if we have a single int but no float.
235   if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
236     return false;
237   if (Field1Ty && Field1Ty->isFloatingPointTy())
238     NeededFARs++;
239   else if (Field1Ty)
240     NeededGARs++;
241   if (Field2Ty && Field2Ty->isFloatingPointTy())
242     NeededFARs++;
243   else if (Field2Ty)
244     NeededGARs++;
245   return true;
246 }
247 
248 // Call getCoerceAndExpand for the two-element flattened struct described by
249 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
250 // appropriate coerceToType and unpaddedCoerceToType.
251 ABIArgInfo LoongArchABIInfo::coerceAndExpandFARsEligibleStruct(
252     llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
253     CharUnits Field2Off) const {
254   SmallVector<llvm::Type *, 3> CoerceElts;
255   SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
256   if (!Field1Off.isZero())
257     CoerceElts.push_back(llvm::ArrayType::get(
258         llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
259 
260   CoerceElts.push_back(Field1Ty);
261   UnpaddedCoerceElts.push_back(Field1Ty);
262 
263   if (!Field2Ty) {
264     return ABIArgInfo::getCoerceAndExpand(
265         llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
266         UnpaddedCoerceElts[0]);
267   }
268 
269   CharUnits Field2Align =
270       CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty));
271   CharUnits Field1End =
272       Field1Off +
273       CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
274   CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
275 
276   CharUnits Padding = CharUnits::Zero();
277   if (Field2Off > Field2OffNoPadNoPack)
278     Padding = Field2Off - Field2OffNoPadNoPack;
279   else if (Field2Off != Field2Align && Field2Off > Field1End)
280     Padding = Field2Off - Field1End;
281 
282   bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
283 
284   if (!Padding.isZero())
285     CoerceElts.push_back(llvm::ArrayType::get(
286         llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
287 
288   CoerceElts.push_back(Field2Ty);
289   UnpaddedCoerceElts.push_back(Field2Ty);
290 
291   return ABIArgInfo::getCoerceAndExpand(
292       llvm::StructType::get(getVMContext(), CoerceElts, IsPacked),
293       llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked));
294 }
295 
296 ABIArgInfo LoongArchABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
297                                                   int &GARsLeft,
298                                                   int &FARsLeft) const {
299   assert(GARsLeft <= NumGARs && "GAR tracking underflow");
300   Ty = useFirstFieldIfTransparentUnion(Ty);
301 
302   // Structures with either a non-trivial destructor or a non-trivial
303   // copy constructor are always passed indirectly.
304   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
305     if (GARsLeft)
306       GARsLeft -= 1;
307     return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
308                                            CGCXXABI::RAA_DirectInMemory);
309   }
310 
311   // Ignore empty structs/unions.
312   if (isEmptyRecord(getContext(), Ty, true))
313     return ABIArgInfo::getIgnore();
314 
315   uint64_t Size = getContext().getTypeSize(Ty);
316 
317   // Pass floating point values via FARs if possible.
318   if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
319       FRLen >= Size && FARsLeft) {
320     FARsLeft--;
321     return ABIArgInfo::getDirect();
322   }
323 
324   // Complex types for the *f or *d ABI must be passed directly rather than
325   // using CoerceAndExpand.
326   if (IsFixed && Ty->isComplexType() && FRLen && FARsLeft >= 2) {
327     QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
328     if (getContext().getTypeSize(EltTy) <= FRLen) {
329       FARsLeft -= 2;
330       return ABIArgInfo::getDirect();
331     }
332   }
333 
334   if (IsFixed && FRLen && Ty->isStructureOrClassType()) {
335     llvm::Type *Field1Ty = nullptr;
336     llvm::Type *Field2Ty = nullptr;
337     CharUnits Field1Off = CharUnits::Zero();
338     CharUnits Field2Off = CharUnits::Zero();
339     int NeededGARs = 0;
340     int NeededFARs = 0;
341     bool IsCandidate = detectFARsEligibleStruct(
342         Ty, Field1Ty, Field1Off, Field2Ty, Field2Off, NeededGARs, NeededFARs);
343     if (IsCandidate && NeededGARs <= GARsLeft && NeededFARs <= FARsLeft) {
344       GARsLeft -= NeededGARs;
345       FARsLeft -= NeededFARs;
346       return coerceAndExpandFARsEligibleStruct(Field1Ty, Field1Off, Field2Ty,
347                                                Field2Off);
348     }
349   }
350 
351   uint64_t NeededAlign = getContext().getTypeAlign(Ty);
352   // Determine the number of GARs needed to pass the current argument
353   // according to the ABI. 2*GRLen-aligned varargs are passed in "aligned"
354   // register pairs, so may consume 3 registers.
355   int NeededGARs = 1;
356   if (!IsFixed && NeededAlign == 2 * GRLen)
357     NeededGARs = 2 + (GARsLeft % 2);
358   else if (Size > GRLen && Size <= 2 * GRLen)
359     NeededGARs = 2;
360 
361   if (NeededGARs > GARsLeft)
362     NeededGARs = GARsLeft;
363 
364   GARsLeft -= NeededGARs;
365 
366   if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
367     // Treat an enum type as its underlying type.
368     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
369       Ty = EnumTy->getDecl()->getIntegerType();
370 
371     // All integral types are promoted to GRLen width.
372     if (Size < GRLen && Ty->isIntegralOrEnumerationType())
373       return extendType(Ty);
374 
375     if (const auto *EIT = Ty->getAs<BitIntType>()) {
376       if (EIT->getNumBits() < GRLen)
377         return extendType(Ty);
378       if (EIT->getNumBits() > 128 ||
379           (!getContext().getTargetInfo().hasInt128Type() &&
380            EIT->getNumBits() > 64))
381         return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
382     }
383 
384     return ABIArgInfo::getDirect();
385   }
386 
387   // Aggregates which are <= 2*GRLen will be passed in registers if possible,
388   // so coerce to integers.
389   if (Size <= 2 * GRLen) {
390     // Use a single GRLen int if possible, 2*GRLen if 2*GRLen alignment is
391     // required, and a 2-element GRLen array if only GRLen alignment is
392     // required.
393     if (Size <= GRLen) {
394       return ABIArgInfo::getDirect(
395           llvm::IntegerType::get(getVMContext(), GRLen));
396     }
397     if (getContext().getTypeAlign(Ty) == 2 * GRLen) {
398       return ABIArgInfo::getDirect(
399           llvm::IntegerType::get(getVMContext(), 2 * GRLen));
400     }
401     return ABIArgInfo::getDirect(
402         llvm::ArrayType::get(llvm::IntegerType::get(getVMContext(), GRLen), 2));
403   }
404   return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
405 }
406 
407 ABIArgInfo LoongArchABIInfo::classifyReturnType(QualType RetTy) const {
408   if (RetTy->isVoidType())
409     return ABIArgInfo::getIgnore();
410   // The rules for return and argument types are the same, so defer to
411   // classifyArgumentType.
412   int GARsLeft = 2;
413   int FARsLeft = FRLen ? 2 : 0;
414   return classifyArgumentType(RetTy, /*IsFixed=*/true, GARsLeft, FARsLeft);
415 }
416 
417 Address LoongArchABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
418                                     QualType Ty) const {
419   CharUnits SlotSize = CharUnits::fromQuantity(GRLen / 8);
420 
421   // Empty records are ignored for parameter passing purposes.
422   if (isEmptyRecord(getContext(), Ty, true))
423     return Address(CGF.Builder.CreateLoad(VAListAddr),
424                    CGF.ConvertTypeForMem(Ty), SlotSize);
425 
426   auto TInfo = getContext().getTypeInfoInChars(Ty);
427 
428   // Arguments bigger than 2*GRLen bytes are passed indirectly.
429   return emitVoidPtrVAArg(CGF, VAListAddr, Ty,
430                           /*IsIndirect=*/TInfo.Width > 2 * SlotSize, TInfo,
431                           SlotSize,
432                           /*AllowHigherAlign=*/true);
433 }
434 
435 ABIArgInfo LoongArchABIInfo::extendType(QualType Ty) const {
436   int TySize = getContext().getTypeSize(Ty);
437   // LA64 ABI requires unsigned 32 bit integers to be sign extended.
438   if (GRLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
439     return ABIArgInfo::getSignExtend(Ty);
440   return ABIArgInfo::getExtend(Ty);
441 }
442 
443 namespace {
444 class LoongArchTargetCodeGenInfo : public TargetCodeGenInfo {
445 public:
446   LoongArchTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned GRLen,
447                              unsigned FRLen)
448       : TargetCodeGenInfo(
449             std::make_unique<LoongArchABIInfo>(CGT, GRLen, FRLen)) {}
450 };
451 } // namespace
452 
453 std::unique_ptr<TargetCodeGenInfo>
454 CodeGen::createLoongArchTargetCodeGenInfo(CodeGenModule &CGM, unsigned GRLen,
455                                           unsigned FLen) {
456   return std::make_unique<LoongArchTargetCodeGenInfo>(CGM.getTypes(), GRLen,
457                                                       FLen);
458 }
459