xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp (revision 9aaf4e3be61fc20a84347b7c2c524256a4b93a43)
1 //===- RISCV.cpp ----------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
11 
12 using namespace clang;
13 using namespace clang::CodeGen;
14 
15 //===----------------------------------------------------------------------===//
16 // RISC-V ABI Implementation
17 //===----------------------------------------------------------------------===//
18 
19 namespace {
20 class RISCVABIInfo : public DefaultABIInfo {
21 private:
22   // Size of the integer ('x') registers in bits.
23   unsigned XLen;
24   // Size of the floating point ('f') registers in bits. Note that the target
25   // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
26   // with soft float ABI has FLen==0).
27   unsigned FLen;
28   static const int NumArgGPRs = 8;
29   static const int NumArgFPRs = 8;
30   bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
31                                       llvm::Type *&Field1Ty,
32                                       CharUnits &Field1Off,
33                                       llvm::Type *&Field2Ty,
34                                       CharUnits &Field2Off) const;
35 
36 public:
37   RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
38       : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
39 
40   // DefaultABIInfo's classifyReturnType and classifyArgumentType are
41   // non-virtual, but computeInfo is virtual, so we overload it.
42   void computeInfo(CGFunctionInfo &FI) const override;
43 
44   ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
45                                   int &ArgFPRsLeft) const;
46   ABIArgInfo classifyReturnType(QualType RetTy) const;
47 
48   Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
49                     QualType Ty) const override;
50 
51   ABIArgInfo extendType(QualType Ty) const;
52 
53   bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
54                                 CharUnits &Field1Off, llvm::Type *&Field2Ty,
55                                 CharUnits &Field2Off, int &NeededArgGPRs,
56                                 int &NeededArgFPRs) const;
57   ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
58                                                CharUnits Field1Off,
59                                                llvm::Type *Field2Ty,
60                                                CharUnits Field2Off) const;
61 
62   ABIArgInfo coerceVLSVector(QualType Ty) const;
63 };
64 } // end anonymous namespace
65 
66 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
67   QualType RetTy = FI.getReturnType();
68   if (!getCXXABI().classifyReturnType(FI))
69     FI.getReturnInfo() = classifyReturnType(RetTy);
70 
71   // IsRetIndirect is true if classifyArgumentType indicated the value should
72   // be passed indirect, or if the type size is a scalar greater than 2*XLen
73   // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
74   // in LLVM IR, relying on the backend lowering code to rewrite the argument
75   // list and pass indirectly on RV32.
76   bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
77   if (!IsRetIndirect && RetTy->isScalarType() &&
78       getContext().getTypeSize(RetTy) > (2 * XLen)) {
79     if (RetTy->isComplexType() && FLen) {
80       QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
81       IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
82     } else {
83       // This is a normal scalar > 2*XLen, such as fp128 on RV32.
84       IsRetIndirect = true;
85     }
86   }
87 
88   int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
89   int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
90   int NumFixedArgs = FI.getNumRequiredArgs();
91 
92   int ArgNum = 0;
93   for (auto &ArgInfo : FI.arguments()) {
94     bool IsFixed = ArgNum < NumFixedArgs;
95     ArgInfo.info =
96         classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
97     ArgNum++;
98   }
99 }
100 
101 // Returns true if the struct is a potential candidate for the floating point
102 // calling convention. If this function returns true, the caller is
103 // responsible for checking that if there is only a single field then that
104 // field is a float.
105 bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
106                                                   llvm::Type *&Field1Ty,
107                                                   CharUnits &Field1Off,
108                                                   llvm::Type *&Field2Ty,
109                                                   CharUnits &Field2Off) const {
110   bool IsInt = Ty->isIntegralOrEnumerationType();
111   bool IsFloat = Ty->isRealFloatingType();
112 
113   if (IsInt || IsFloat) {
114     uint64_t Size = getContext().getTypeSize(Ty);
115     if (IsInt && Size > XLen)
116       return false;
117     // Can't be eligible if larger than the FP registers. Handling of half
118     // precision values has been specified in the ABI, so don't block those.
119     if (IsFloat && Size > FLen)
120       return false;
121     // Can't be eligible if an integer type was already found (int+int pairs
122     // are not eligible).
123     if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
124       return false;
125     if (!Field1Ty) {
126       Field1Ty = CGT.ConvertType(Ty);
127       Field1Off = CurOff;
128       return true;
129     }
130     if (!Field2Ty) {
131       Field2Ty = CGT.ConvertType(Ty);
132       Field2Off = CurOff;
133       return true;
134     }
135     return false;
136   }
137 
138   if (auto CTy = Ty->getAs<ComplexType>()) {
139     if (Field1Ty)
140       return false;
141     QualType EltTy = CTy->getElementType();
142     if (getContext().getTypeSize(EltTy) > FLen)
143       return false;
144     Field1Ty = CGT.ConvertType(EltTy);
145     Field1Off = CurOff;
146     Field2Ty = Field1Ty;
147     Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
148     return true;
149   }
150 
151   if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
152     uint64_t ArraySize = ATy->getSize().getZExtValue();
153     QualType EltTy = ATy->getElementType();
154     // Non-zero-length arrays of empty records make the struct ineligible for
155     // the FP calling convention in C++.
156     if (const auto *RTy = EltTy->getAs<RecordType>()) {
157       if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
158           isEmptyRecord(getContext(), EltTy, true, true))
159         return false;
160     }
161     CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
162     for (uint64_t i = 0; i < ArraySize; ++i) {
163       bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
164                                                 Field1Off, Field2Ty, Field2Off);
165       if (!Ret)
166         return false;
167       CurOff += EltSize;
168     }
169     return true;
170   }
171 
172   if (const auto *RTy = Ty->getAs<RecordType>()) {
173     // Structures with either a non-trivial destructor or a non-trivial
174     // copy constructor are not eligible for the FP calling convention.
175     if (getRecordArgABI(Ty, CGT.getCXXABI()))
176       return false;
177     if (isEmptyRecord(getContext(), Ty, true, true))
178       return true;
179     const RecordDecl *RD = RTy->getDecl();
180     // Unions aren't eligible unless they're empty (which is caught above).
181     if (RD->isUnion())
182       return false;
183     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
184     // If this is a C++ record, check the bases first.
185     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
186       for (const CXXBaseSpecifier &B : CXXRD->bases()) {
187         const auto *BDecl =
188             cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
189         CharUnits BaseOff = Layout.getBaseClassOffset(BDecl);
190         bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff,
191                                                   Field1Ty, Field1Off, Field2Ty,
192                                                   Field2Off);
193         if (!Ret)
194           return false;
195       }
196     }
197     int ZeroWidthBitFieldCount = 0;
198     for (const FieldDecl *FD : RD->fields()) {
199       uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
200       QualType QTy = FD->getType();
201       if (FD->isBitField()) {
202         unsigned BitWidth = FD->getBitWidthValue(getContext());
203         // Allow a bitfield with a type greater than XLen as long as the
204         // bitwidth is XLen or less.
205         if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
206           QTy = getContext().getIntTypeForBitwidth(XLen, false);
207         if (BitWidth == 0) {
208           ZeroWidthBitFieldCount++;
209           continue;
210         }
211       }
212 
213       bool Ret = detectFPCCEligibleStructHelper(
214           QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
215           Field1Ty, Field1Off, Field2Ty, Field2Off);
216       if (!Ret)
217         return false;
218 
219       // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
220       // or int+fp structs, but are ignored for a struct with an fp field and
221       // any number of zero-width bitfields.
222       if (Field2Ty && ZeroWidthBitFieldCount > 0)
223         return false;
224     }
225     return Field1Ty != nullptr;
226   }
227 
228   return false;
229 }
230 
231 // Determine if a struct is eligible for passing according to the floating
232 // point calling convention (i.e., when flattened it contains a single fp
233 // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
234 // NeededArgGPRs are incremented appropriately.
235 bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
236                                             CharUnits &Field1Off,
237                                             llvm::Type *&Field2Ty,
238                                             CharUnits &Field2Off,
239                                             int &NeededArgGPRs,
240                                             int &NeededArgFPRs) const {
241   Field1Ty = nullptr;
242   Field2Ty = nullptr;
243   NeededArgGPRs = 0;
244   NeededArgFPRs = 0;
245   bool IsCandidate = detectFPCCEligibleStructHelper(
246       Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
247   if (!Field1Ty)
248     return false;
249   // Not really a candidate if we have a single int but no float.
250   if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
251     return false;
252   if (!IsCandidate)
253     return false;
254   if (Field1Ty && Field1Ty->isFloatingPointTy())
255     NeededArgFPRs++;
256   else if (Field1Ty)
257     NeededArgGPRs++;
258   if (Field2Ty && Field2Ty->isFloatingPointTy())
259     NeededArgFPRs++;
260   else if (Field2Ty)
261     NeededArgGPRs++;
262   return true;
263 }
264 
265 // Call getCoerceAndExpand for the two-element flattened struct described by
266 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
267 // appropriate coerceToType and unpaddedCoerceToType.
268 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
269     llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
270     CharUnits Field2Off) const {
271   SmallVector<llvm::Type *, 3> CoerceElts;
272   SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
273   if (!Field1Off.isZero())
274     CoerceElts.push_back(llvm::ArrayType::get(
275         llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
276 
277   CoerceElts.push_back(Field1Ty);
278   UnpaddedCoerceElts.push_back(Field1Ty);
279 
280   if (!Field2Ty) {
281     return ABIArgInfo::getCoerceAndExpand(
282         llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
283         UnpaddedCoerceElts[0]);
284   }
285 
286   CharUnits Field2Align =
287       CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty));
288   CharUnits Field1End = Field1Off +
289       CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
290   CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
291 
292   CharUnits Padding = CharUnits::Zero();
293   if (Field2Off > Field2OffNoPadNoPack)
294     Padding = Field2Off - Field2OffNoPadNoPack;
295   else if (Field2Off != Field2Align && Field2Off > Field1End)
296     Padding = Field2Off - Field1End;
297 
298   bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
299 
300   if (!Padding.isZero())
301     CoerceElts.push_back(llvm::ArrayType::get(
302         llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
303 
304   CoerceElts.push_back(Field2Ty);
305   UnpaddedCoerceElts.push_back(Field2Ty);
306 
307   auto CoerceToType =
308       llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
309   auto UnpaddedCoerceToType =
310       llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
311 
312   return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
313 }
314 
315 // Fixed-length RVV vectors are represented as scalable vectors in function
316 // args/return and must be coerced from fixed vectors.
317 ABIArgInfo RISCVABIInfo::coerceVLSVector(QualType Ty) const {
318   assert(Ty->isVectorType() && "expected vector type!");
319 
320   const auto *VT = Ty->castAs<VectorType>();
321   assert(VT->getVectorKind() == VectorType::RVVFixedLengthDataVector &&
322          "Unexpected vector kind");
323 
324   assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
325 
326   auto VScale =
327       getContext().getTargetInfo().getVScaleRange(getContext().getLangOpts());
328   // The MinNumElts is simplified from equation:
329   // NumElts / VScale =
330   //  (EltSize * NumElts / (VScale * RVVBitsPerBlock))
331   //    * (RVVBitsPerBlock / EltSize)
332   llvm::ScalableVectorType *ResType =
333       llvm::ScalableVectorType::get(CGT.ConvertType(VT->getElementType()),
334                                     VT->getNumElements() / VScale->first);
335   return ABIArgInfo::getDirect(ResType);
336 }
337 
338 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
339                                               int &ArgGPRsLeft,
340                                               int &ArgFPRsLeft) const {
341   assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
342   Ty = useFirstFieldIfTransparentUnion(Ty);
343 
344   // Structures with either a non-trivial destructor or a non-trivial
345   // copy constructor are always passed indirectly.
346   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
347     if (ArgGPRsLeft)
348       ArgGPRsLeft -= 1;
349     return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
350                                            CGCXXABI::RAA_DirectInMemory);
351   }
352 
353   // Ignore empty structs/unions.
354   if (isEmptyRecord(getContext(), Ty, true))
355     return ABIArgInfo::getIgnore();
356 
357   uint64_t Size = getContext().getTypeSize(Ty);
358 
359   // Pass floating point values via FPRs if possible.
360   if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
361       FLen >= Size && ArgFPRsLeft) {
362     ArgFPRsLeft--;
363     return ABIArgInfo::getDirect();
364   }
365 
366   // Complex types for the hard float ABI must be passed direct rather than
367   // using CoerceAndExpand.
368   if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
369     QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
370     if (getContext().getTypeSize(EltTy) <= FLen) {
371       ArgFPRsLeft -= 2;
372       return ABIArgInfo::getDirect();
373     }
374   }
375 
376   if (IsFixed && FLen && Ty->isStructureOrClassType()) {
377     llvm::Type *Field1Ty = nullptr;
378     llvm::Type *Field2Ty = nullptr;
379     CharUnits Field1Off = CharUnits::Zero();
380     CharUnits Field2Off = CharUnits::Zero();
381     int NeededArgGPRs = 0;
382     int NeededArgFPRs = 0;
383     bool IsCandidate =
384         detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
385                                  NeededArgGPRs, NeededArgFPRs);
386     if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
387         NeededArgFPRs <= ArgFPRsLeft) {
388       ArgGPRsLeft -= NeededArgGPRs;
389       ArgFPRsLeft -= NeededArgFPRs;
390       return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
391                                                Field2Off);
392     }
393   }
394 
395   uint64_t NeededAlign = getContext().getTypeAlign(Ty);
396   // Determine the number of GPRs needed to pass the current argument
397   // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
398   // register pairs, so may consume 3 registers.
399   int NeededArgGPRs = 1;
400   if (!IsFixed && NeededAlign == 2 * XLen)
401     NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
402   else if (Size > XLen && Size <= 2 * XLen)
403     NeededArgGPRs = 2;
404 
405   if (NeededArgGPRs > ArgGPRsLeft) {
406     NeededArgGPRs = ArgGPRsLeft;
407   }
408 
409   ArgGPRsLeft -= NeededArgGPRs;
410 
411   if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
412     // Treat an enum type as its underlying type.
413     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
414       Ty = EnumTy->getDecl()->getIntegerType();
415 
416     // All integral types are promoted to XLen width
417     if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
418       return extendType(Ty);
419     }
420 
421     if (const auto *EIT = Ty->getAs<BitIntType>()) {
422       if (EIT->getNumBits() < XLen)
423         return extendType(Ty);
424       if (EIT->getNumBits() > 128 ||
425           (!getContext().getTargetInfo().hasInt128Type() &&
426            EIT->getNumBits() > 64))
427         return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
428     }
429 
430     return ABIArgInfo::getDirect();
431   }
432 
433   if (const VectorType *VT = Ty->getAs<VectorType>())
434     if (VT->getVectorKind() == VectorType::RVVFixedLengthDataVector)
435       return coerceVLSVector(Ty);
436 
437   // Aggregates which are <= 2*XLen will be passed in registers if possible,
438   // so coerce to integers.
439   if (Size <= 2 * XLen) {
440     unsigned Alignment = getContext().getTypeAlign(Ty);
441 
442     // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
443     // required, and a 2-element XLen array if only XLen alignment is required.
444     if (Size <= XLen) {
445       return ABIArgInfo::getDirect(
446           llvm::IntegerType::get(getVMContext(), XLen));
447     } else if (Alignment == 2 * XLen) {
448       return ABIArgInfo::getDirect(
449           llvm::IntegerType::get(getVMContext(), 2 * XLen));
450     } else {
451       return ABIArgInfo::getDirect(llvm::ArrayType::get(
452           llvm::IntegerType::get(getVMContext(), XLen), 2));
453     }
454   }
455   return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
456 }
457 
458 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
459   if (RetTy->isVoidType())
460     return ABIArgInfo::getIgnore();
461 
462   int ArgGPRsLeft = 2;
463   int ArgFPRsLeft = FLen ? 2 : 0;
464 
465   // The rules for return and argument types are the same, so defer to
466   // classifyArgumentType.
467   return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
468                               ArgFPRsLeft);
469 }
470 
471 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
472                                 QualType Ty) const {
473   CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
474 
475   // Empty records are ignored for parameter passing purposes.
476   if (isEmptyRecord(getContext(), Ty, true)) {
477     return Address(CGF.Builder.CreateLoad(VAListAddr),
478                    CGF.ConvertTypeForMem(Ty), SlotSize);
479   }
480 
481   auto TInfo = getContext().getTypeInfoInChars(Ty);
482 
483   // Arguments bigger than 2*Xlen bytes are passed indirectly.
484   bool IsIndirect = TInfo.Width > 2 * SlotSize;
485 
486   return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo,
487                           SlotSize, /*AllowHigherAlign=*/true);
488 }
489 
490 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
491   int TySize = getContext().getTypeSize(Ty);
492   // RV64 ABI requires unsigned 32 bit integers to be sign extended.
493   if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
494     return ABIArgInfo::getSignExtend(Ty);
495   return ABIArgInfo::getExtend(Ty);
496 }
497 
498 namespace {
499 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
500 public:
501   RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
502                          unsigned FLen)
503       : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {}
504 
505   void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
506                            CodeGen::CodeGenModule &CGM) const override {
507     const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
508     if (!FD) return;
509 
510     const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
511     if (!Attr)
512       return;
513 
514     const char *Kind;
515     switch (Attr->getInterrupt()) {
516     case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
517     case RISCVInterruptAttr::machine: Kind = "machine"; break;
518     }
519 
520     auto *Fn = cast<llvm::Function>(GV);
521 
522     Fn->addFnAttr("interrupt", Kind);
523   }
524 };
525 } // namespace
526 
527 std::unique_ptr<TargetCodeGenInfo>
528 CodeGen::createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen,
529                                       unsigned FLen) {
530   return std::make_unique<RISCVTargetCodeGenInfo>(CGM.getTypes(), XLen, FLen);
531 }
532