xref: /freebsd/contrib/llvm-project/clang/lib/CIR/CodeGen/CIRGenTypes.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 #include "CIRGenTypes.h"
2 
3 #include "CIRGenFunctionInfo.h"
4 #include "CIRGenModule.h"
5 
6 #include "clang/AST/ASTContext.h"
7 #include "clang/AST/GlobalDecl.h"
8 #include "clang/AST/Type.h"
9 #include "clang/Basic/TargetInfo.h"
10 
11 #include <cassert>
12 
13 using namespace clang;
14 using namespace clang::CIRGen;
15 
CIRGenTypes(CIRGenModule & genModule)16 CIRGenTypes::CIRGenTypes(CIRGenModule &genModule)
17     : cgm(genModule), astContext(genModule.getASTContext()),
18       builder(cgm.getBuilder()), theCXXABI(cgm.getCXXABI()),
19       theABIInfo(cgm.getTargetCIRGenInfo().getABIInfo()) {}
20 
~CIRGenTypes()21 CIRGenTypes::~CIRGenTypes() {
22   for (auto i = functionInfos.begin(), e = functionInfos.end(); i != e;)
23     delete &*i++;
24 }
25 
getMLIRContext() const26 mlir::MLIRContext &CIRGenTypes::getMLIRContext() const {
27   return *builder.getContext();
28 }
29 
30 /// Return true if the specified type in a function parameter or result position
31 /// can be converted to a CIR type at this point. This boils down to being
32 /// whether it is complete, as well as whether we've temporarily deferred
33 /// expanding the type because we're in a recursive context.
isFuncParamTypeConvertible(clang::QualType type)34 bool CIRGenTypes::isFuncParamTypeConvertible(clang::QualType type) {
35   // Some ABIs cannot have their member pointers represented in LLVM IR unless
36   // certain circumstances have been reached.
37   assert(!type->getAs<MemberPointerType>() && "NYI");
38 
39   // If this isn't a tag type, we can convert it.
40   const TagType *tagType = type->getAs<TagType>();
41   if (!tagType)
42     return true;
43 
44   // Function types involving incomplete class types are problematic in MLIR.
45   return !tagType->isIncompleteType();
46 }
47 
48 /// Code to verify a given function type is complete, i.e. the return type and
49 /// all of the parameter types are complete. Also check to see if we are in a
50 /// RS_StructPointer context, and if so whether any struct types have been
51 /// pended. If so, we don't want to ask the ABI lowering code to handle a type
52 /// that cannot be converted to a CIR type.
isFuncTypeConvertible(const FunctionType * ft)53 bool CIRGenTypes::isFuncTypeConvertible(const FunctionType *ft) {
54   if (!isFuncParamTypeConvertible(ft->getReturnType()))
55     return false;
56 
57   if (const auto *fpt = dyn_cast<FunctionProtoType>(ft))
58     for (unsigned i = 0, e = fpt->getNumParams(); i != e; i++)
59       if (!isFuncParamTypeConvertible(fpt->getParamType(i)))
60         return false;
61 
62   return true;
63 }
64 
convertFunctionTypeInternal(QualType qft)65 mlir::Type CIRGenTypes::convertFunctionTypeInternal(QualType qft) {
66   assert(qft.isCanonical());
67   const FunctionType *ft = cast<FunctionType>(qft.getTypePtr());
68   // First, check whether we can build the full function type. If the function
69   // type depends on an incomplete type (e.g. a struct or enum), we cannot lower
70   // the function type.
71   if (!isFuncTypeConvertible(ft)) {
72     cgm.errorNYI(SourceLocation(), "function type involving an incomplete type",
73                  qft);
74     return cir::FuncType::get(SmallVector<mlir::Type, 1>{}, cgm.VoidTy);
75   }
76 
77   const CIRGenFunctionInfo *fi;
78   if (const auto *fpt = dyn_cast<FunctionProtoType>(ft)) {
79     fi = &arrangeFreeFunctionType(
80         CanQual<FunctionProtoType>::CreateUnsafe(QualType(fpt, 0)));
81   } else {
82     const FunctionNoProtoType *fnpt = cast<FunctionNoProtoType>(ft);
83     fi = &arrangeFreeFunctionType(
84         CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(fnpt, 0)));
85   }
86 
87   mlir::Type resultType = getFunctionType(*fi);
88 
89   return resultType;
90 }
91 
92 // This is CIR's version of CodeGenTypes::addRecordTypeName. It isn't shareable
93 // because CIR has different uniquing requirements.
getRecordTypeName(const clang::RecordDecl * recordDecl,StringRef suffix)94 std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl,
95                                            StringRef suffix) {
96   llvm::SmallString<256> typeName;
97   llvm::raw_svector_ostream outStream(typeName);
98 
99   PrintingPolicy policy = recordDecl->getASTContext().getPrintingPolicy();
100   policy.SuppressInlineNamespace = false;
101   policy.AlwaysIncludeTypeForTemplateArgument = true;
102   policy.PrintAsCanonical = true;
103   policy.SuppressTagKeyword = true;
104 
105   if (recordDecl->getIdentifier())
106     astContext.getRecordType(recordDecl).print(outStream, policy);
107   else if (auto *typedefNameDecl = recordDecl->getTypedefNameForAnonDecl())
108     typedefNameDecl->printQualifiedName(outStream, policy);
109   else
110     outStream << builder.getUniqueAnonRecordName();
111 
112   if (!suffix.empty())
113     outStream << suffix;
114 
115   return builder.getUniqueRecordName(std::string(typeName));
116 }
117 
118 /// Return true if the specified type is already completely laid out.
isRecordLayoutComplete(const Type * ty) const119 bool CIRGenTypes::isRecordLayoutComplete(const Type *ty) const {
120   const auto it = recordDeclTypes.find(ty);
121   return it != recordDeclTypes.end() && it->second.isComplete();
122 }
123 
124 // We have multiple forms of this function that call each other, so we need to
125 // declare one in advance.
126 static bool
127 isSafeToConvert(QualType qt, CIRGenTypes &cgt,
128                 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked);
129 
130 /// Return true if it is safe to convert the specified record decl to CIR and
131 /// lay it out, false if doing so would cause us to get into a recursive
132 /// compilation mess.
133 static bool
isSafeToConvert(const RecordDecl * rd,CIRGenTypes & cgt,llvm::SmallPtrSetImpl<const RecordDecl * > & alreadyChecked)134 isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt,
135                 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked) {
136   // If we have already checked this type (maybe the same type is used by-value
137   // multiple times in multiple record fields, don't check again.
138   if (!alreadyChecked.insert(rd).second)
139     return true;
140 
141   const Type *key = cgt.getASTContext().getTagDeclType(rd).getTypePtr();
142 
143   // If this type is already laid out, converting it is a noop.
144   if (cgt.isRecordLayoutComplete(key))
145     return true;
146 
147   // If this type is currently being laid out, we can't recursively compile it.
148   if (cgt.isRecordBeingLaidOut(key))
149     return false;
150 
151   // If this type would require laying out bases that are currently being laid
152   // out, don't do it.  This includes virtual base classes which get laid out
153   // when a class is translated, even though they aren't embedded by-value into
154   // the class.
155   if (auto *crd = dyn_cast<CXXRecordDecl>(rd)) {
156     if (crd->getNumBases() > 0) {
157       assert(!cir::MissingFeatures::cxxSupport());
158       cgt.getCGModule().errorNYI(rd->getSourceRange(),
159                                  "isSafeToConvert: CXXRecordDecl with bases");
160       return false;
161     }
162   }
163 
164   // If this type would require laying out members that are currently being laid
165   // out, don't do it.
166   for (const FieldDecl *field : rd->fields())
167     if (!isSafeToConvert(field->getType(), cgt, alreadyChecked))
168       return false;
169 
170   // If there are no problems, lets do it.
171   return true;
172 }
173 
174 /// Return true if it is safe to convert this field type, which requires the
175 /// record elements contained by-value to all be recursively safe to convert.
176 static bool
isSafeToConvert(QualType qt,CIRGenTypes & cgt,llvm::SmallPtrSetImpl<const RecordDecl * > & alreadyChecked)177 isSafeToConvert(QualType qt, CIRGenTypes &cgt,
178                 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked) {
179   // Strip off atomic type sugar.
180   if (const auto *at = qt->getAs<AtomicType>())
181     qt = at->getValueType();
182 
183   // If this is a record, check it.
184   if (const auto *rt = qt->getAs<RecordType>())
185     return isSafeToConvert(rt->getDecl(), cgt, alreadyChecked);
186 
187   // If this is an array, check the elements, which are embedded inline.
188   if (const auto *at = cgt.getASTContext().getAsArrayType(qt))
189     return isSafeToConvert(at->getElementType(), cgt, alreadyChecked);
190 
191   // Otherwise, there is no concern about transforming this. We only care about
192   // things that are contained by-value in a record that can have another
193   // record as a member.
194   return true;
195 }
196 
197 // Return true if it is safe to convert the specified record decl to CIR and lay
198 // it out, false if doing so would cause us to get into a recursive compilation
199 // mess.
isSafeToConvert(const RecordDecl * rd,CIRGenTypes & cgt)200 static bool isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt) {
201   // If no records are being laid out, we can certainly do this one.
202   if (cgt.noRecordsBeingLaidOut())
203     return true;
204 
205   llvm::SmallPtrSet<const RecordDecl *, 16> alreadyChecked;
206   return isSafeToConvert(rd, cgt, alreadyChecked);
207 }
208 
209 /// Lay out a tagged decl type like struct or union.
convertRecordDeclType(const clang::RecordDecl * rd)210 mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *rd) {
211   // TagDecl's are not necessarily unique, instead use the (clang) type
212   // connected to the decl.
213   const Type *key = astContext.getTagDeclType(rd).getTypePtr();
214   cir::RecordType entry = recordDeclTypes[key];
215 
216   // If we don't have an entry for this record yet, create one.
217   // We create an incomplete type initially. If `rd` is complete, we will
218   // add the members below.
219   if (!entry) {
220     auto name = getRecordTypeName(rd, "");
221     entry = builder.getIncompleteRecordTy(name, rd);
222     recordDeclTypes[key] = entry;
223   }
224 
225   rd = rd->getDefinition();
226   if (!rd || !rd->isCompleteDefinition() || entry.isComplete())
227     return entry;
228 
229   // If converting this type would cause us to infinitely loop, don't do it!
230   if (!isSafeToConvert(rd, *this)) {
231     deferredRecords.push_back(rd);
232     return entry;
233   }
234 
235   // Okay, this is a definition of a type. Compile the implementation now.
236   bool insertResult = recordsBeingLaidOut.insert(key).second;
237   (void)insertResult;
238   assert(insertResult && "isSafeToCovert() should have caught this.");
239 
240   // Force conversion of non-virtual base classes recursively.
241   if (const auto *cxxRecordDecl = dyn_cast<CXXRecordDecl>(rd)) {
242     for (const auto &base : cxxRecordDecl->bases()) {
243       if (base.isVirtual())
244         continue;
245       convertRecordDeclType(base.getType()->castAs<RecordType>()->getDecl());
246     }
247   }
248 
249   // Layout fields.
250   std::unique_ptr<CIRGenRecordLayout> layout = computeRecordLayout(rd, &entry);
251   recordDeclTypes[key] = entry;
252   cirGenRecordLayouts[key] = std::move(layout);
253 
254   // We're done laying out this record.
255   bool eraseResult = recordsBeingLaidOut.erase(key);
256   (void)eraseResult;
257   assert(eraseResult && "record not in RecordsBeingLaidOut set?");
258 
259   // If this record blocked a FunctionType conversion, then recompute whatever
260   // was derived from that.
261   assert(!cir::MissingFeatures::skippedLayout());
262 
263   // If we're done converting the outer-most record, then convert any deferred
264   // records as well.
265   if (recordsBeingLaidOut.empty())
266     while (!deferredRecords.empty())
267       convertRecordDeclType(deferredRecords.pop_back_val());
268 
269   return entry;
270 }
271 
convertType(QualType type)272 mlir::Type CIRGenTypes::convertType(QualType type) {
273   type = astContext.getCanonicalType(type);
274   const Type *ty = type.getTypePtr();
275 
276   // Process record types before the type cache lookup.
277   if (const auto *recordType = dyn_cast<RecordType>(type))
278     return convertRecordDeclType(recordType->getDecl());
279 
280   // Has the type already been processed?
281   TypeCacheTy::iterator tci = typeCache.find(ty);
282   if (tci != typeCache.end())
283     return tci->second;
284 
285   // For types that haven't been implemented yet or are otherwise unsupported,
286   // report an error and return 'int'.
287 
288   mlir::Type resultType = nullptr;
289   switch (ty->getTypeClass()) {
290   case Type::Record:
291     llvm_unreachable("Should have been handled above");
292 
293   case Type::Builtin: {
294     switch (cast<BuiltinType>(ty)->getKind()) {
295     // void
296     case BuiltinType::Void:
297       resultType = cgm.VoidTy;
298       break;
299 
300     // bool
301     case BuiltinType::Bool:
302       resultType = cir::BoolType::get(&getMLIRContext());
303       break;
304 
305     // Signed integral types.
306     case BuiltinType::Char_S:
307     case BuiltinType::Int:
308     case BuiltinType::Int128:
309     case BuiltinType::Long:
310     case BuiltinType::LongLong:
311     case BuiltinType::SChar:
312     case BuiltinType::Short:
313     case BuiltinType::WChar_S:
314       resultType =
315           cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty),
316                             /*isSigned=*/true);
317       break;
318     // Unsigned integral types.
319     case BuiltinType::Char8:
320     case BuiltinType::Char16:
321     case BuiltinType::Char32:
322     case BuiltinType::Char_U:
323     case BuiltinType::UChar:
324     case BuiltinType::UInt:
325     case BuiltinType::UInt128:
326     case BuiltinType::ULong:
327     case BuiltinType::ULongLong:
328     case BuiltinType::UShort:
329     case BuiltinType::WChar_U:
330       resultType =
331           cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty),
332                             /*isSigned=*/false);
333       break;
334 
335     // Floating-point types
336     case BuiltinType::Float16:
337       resultType = cgm.FP16Ty;
338       break;
339     case BuiltinType::Half:
340       if (astContext.getLangOpts().NativeHalfType ||
341           !astContext.getTargetInfo().useFP16ConversionIntrinsics()) {
342         resultType = cgm.FP16Ty;
343       } else {
344         cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
345         resultType = cgm.SInt32Ty;
346       }
347       break;
348     case BuiltinType::BFloat16:
349       resultType = cgm.BFloat16Ty;
350       break;
351     case BuiltinType::Float:
352       assert(&astContext.getFloatTypeSemantics(type) ==
353                  &llvm::APFloat::IEEEsingle() &&
354              "ClangIR NYI: 'float' in a format other than IEEE 32-bit");
355       resultType = cgm.FloatTy;
356       break;
357     case BuiltinType::Double:
358       assert(&astContext.getFloatTypeSemantics(type) ==
359                  &llvm::APFloat::IEEEdouble() &&
360              "ClangIR NYI: 'double' in a format other than IEEE 64-bit");
361       resultType = cgm.DoubleTy;
362       break;
363     case BuiltinType::LongDouble:
364       resultType =
365           builder.getLongDoubleTy(astContext.getFloatTypeSemantics(type));
366       break;
367     case BuiltinType::Float128:
368       resultType = cgm.FP128Ty;
369       break;
370     case BuiltinType::Ibm128:
371       cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
372       resultType = cgm.SInt32Ty;
373       break;
374 
375     case BuiltinType::NullPtr:
376       // Add proper CIR type for it? this looks mostly useful for sema related
377       // things (like for overloads accepting void), for now, given that
378       // `sizeof(std::nullptr_t)` is equal to `sizeof(void *)`, model
379       // std::nullptr_t as !cir.ptr<!void>
380       resultType = builder.getVoidPtrTy();
381       break;
382 
383     default:
384       cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
385       resultType = cgm.SInt32Ty;
386       break;
387     }
388     break;
389   }
390 
391   case Type::Complex: {
392     const auto *ct = cast<clang::ComplexType>(ty);
393     mlir::Type elementTy = convertType(ct->getElementType());
394     resultType = cir::ComplexType::get(elementTy);
395     break;
396   }
397 
398   case Type::LValueReference:
399   case Type::RValueReference: {
400     const ReferenceType *refTy = cast<ReferenceType>(ty);
401     QualType elemTy = refTy->getPointeeType();
402     auto pointeeType = convertTypeForMem(elemTy);
403     resultType = builder.getPointerTo(pointeeType);
404     assert(resultType && "Cannot get pointer type?");
405     break;
406   }
407 
408   case Type::Pointer: {
409     const PointerType *ptrTy = cast<PointerType>(ty);
410     QualType elemTy = ptrTy->getPointeeType();
411     assert(!elemTy->isConstantMatrixType() && "not implemented");
412 
413     mlir::Type pointeeType = convertType(elemTy);
414 
415     resultType = builder.getPointerTo(pointeeType);
416     break;
417   }
418 
419   case Type::IncompleteArray: {
420     const IncompleteArrayType *arrTy = cast<IncompleteArrayType>(ty);
421     if (arrTy->getIndexTypeCVRQualifiers() != 0)
422       cgm.errorNYI(SourceLocation(), "non trivial array types", type);
423 
424     mlir::Type elemTy = convertTypeForMem(arrTy->getElementType());
425     // int X[] -> [0 x int], unless the element type is not sized.  If it is
426     // unsized (e.g. an incomplete record) just use [0 x i8].
427     if (!cir::isSized(elemTy)) {
428       elemTy = cgm.SInt8Ty;
429     }
430 
431     resultType = cir::ArrayType::get(elemTy, 0);
432     break;
433   }
434 
435   case Type::ConstantArray: {
436     const ConstantArrayType *arrTy = cast<ConstantArrayType>(ty);
437     mlir::Type elemTy = convertTypeForMem(arrTy->getElementType());
438 
439     // TODO(CIR): In LLVM, "lower arrays of undefined struct type to arrays of
440     // i8 just to have a concrete type"
441     if (!cir::isSized(elemTy)) {
442       cgm.errorNYI(SourceLocation(), "arrays of undefined struct type", type);
443       resultType = cgm.UInt32Ty;
444       break;
445     }
446 
447     resultType = cir::ArrayType::get(elemTy, arrTy->getSize().getZExtValue());
448     break;
449   }
450 
451   case Type::ExtVector:
452   case Type::Vector: {
453     const VectorType *vec = cast<VectorType>(ty);
454     const mlir::Type elemTy = convertType(vec->getElementType());
455     resultType = cir::VectorType::get(elemTy, vec->getNumElements());
456     break;
457   }
458 
459   case Type::Enum: {
460     const EnumDecl *ed = cast<EnumType>(ty)->getDecl();
461     if (auto integerType = ed->getIntegerType(); !integerType.isNull())
462       return convertType(integerType);
463     // Return a placeholder 'i32' type.  This can be changed later when the
464     // type is defined (see UpdateCompletedType), but is likely to be the
465     // "right" answer.
466     resultType = cgm.UInt32Ty;
467     break;
468   }
469 
470   case Type::FunctionNoProto:
471   case Type::FunctionProto:
472     resultType = convertFunctionTypeInternal(type);
473     break;
474 
475   case Type::BitInt: {
476     const auto *bitIntTy = cast<BitIntType>(type);
477     if (bitIntTy->getNumBits() > cir::IntType::maxBitwidth()) {
478       cgm.errorNYI(SourceLocation(), "large _BitInt type", type);
479       resultType = cgm.SInt32Ty;
480     } else {
481       resultType = cir::IntType::get(&getMLIRContext(), bitIntTy->getNumBits(),
482                                      bitIntTy->isSigned());
483     }
484     break;
485   }
486 
487   default:
488     cgm.errorNYI(SourceLocation(), "processing of type",
489                  type->getTypeClassName());
490     resultType = cgm.SInt32Ty;
491     break;
492   }
493 
494   assert(resultType && "Type conversion not yet implemented");
495 
496   typeCache[ty] = resultType;
497   return resultType;
498 }
499 
convertTypeForMem(clang::QualType qualType,bool forBitField)500 mlir::Type CIRGenTypes::convertTypeForMem(clang::QualType qualType,
501                                           bool forBitField) {
502   assert(!qualType->isConstantMatrixType() && "Matrix types NYI");
503 
504   mlir::Type convertedType = convertType(qualType);
505 
506   assert(!forBitField && "Bit fields NYI");
507 
508   // If this is a bit-precise integer type in a bitfield representation, map
509   // this integer to the target-specified size.
510   if (forBitField && qualType->isBitIntType())
511     assert(!qualType->isBitIntType() && "Bit field with type _BitInt NYI");
512 
513   return convertedType;
514 }
515 
516 /// Return record layout info for the given record decl.
517 const CIRGenRecordLayout &
getCIRGenRecordLayout(const RecordDecl * rd)518 CIRGenTypes::getCIRGenRecordLayout(const RecordDecl *rd) {
519   const auto *key = astContext.getTagDeclType(rd).getTypePtr();
520 
521   // If we have already computed the layout, return it.
522   auto it = cirGenRecordLayouts.find(key);
523   if (it != cirGenRecordLayouts.end())
524     return *it->second;
525 
526   // Compute the type information.
527   convertRecordDeclType(rd);
528 
529   // Now try again.
530   it = cirGenRecordLayouts.find(key);
531 
532   assert(it != cirGenRecordLayouts.end() &&
533          "Unable to find record layout information for type");
534   return *it->second;
535 }
536 
isZeroInitializable(clang::QualType t)537 bool CIRGenTypes::isZeroInitializable(clang::QualType t) {
538   if (t->getAs<PointerType>())
539     return astContext.getTargetNullPointerValue(t) == 0;
540 
541   if (const auto *at = astContext.getAsArrayType(t)) {
542     if (isa<IncompleteArrayType>(at))
543       return true;
544 
545     if (const auto *cat = dyn_cast<ConstantArrayType>(at))
546       if (astContext.getConstantArrayElementCount(cat) == 0)
547         return true;
548   }
549 
550   if (const RecordType *rt = t->getAs<RecordType>()) {
551     const RecordDecl *rd = rt->getDecl();
552     return isZeroInitializable(rd);
553   }
554 
555   if (t->getAs<MemberPointerType>()) {
556     cgm.errorNYI(SourceLocation(), "isZeroInitializable for MemberPointerType",
557                  t);
558     return false;
559   }
560 
561   return true;
562 }
563 
isZeroInitializable(const RecordDecl * rd)564 bool CIRGenTypes::isZeroInitializable(const RecordDecl *rd) {
565   return getCIRGenRecordLayout(rd).isZeroInitializable();
566 }
567 
568 const CIRGenFunctionInfo &
arrangeCIRFunctionInfo(CanQualType returnType,llvm::ArrayRef<CanQualType> argTypes,RequiredArgs required)569 CIRGenTypes::arrangeCIRFunctionInfo(CanQualType returnType,
570                                     llvm::ArrayRef<CanQualType> argTypes,
571                                     RequiredArgs required) {
572   assert(llvm::all_of(argTypes,
573                       [](CanQualType t) { return t.isCanonicalAsParam(); }));
574   // Lookup or create unique function info.
575   llvm::FoldingSetNodeID id;
576   CIRGenFunctionInfo::Profile(id, required, returnType, argTypes);
577 
578   void *insertPos = nullptr;
579   CIRGenFunctionInfo *fi = functionInfos.FindNodeOrInsertPos(id, insertPos);
580   if (fi) {
581     // We found a matching function info based on id. These asserts verify that
582     // it really is a match.
583     assert(
584         fi->getReturnType() == returnType &&
585         std::equal(fi->argTypesBegin(), fi->argTypesEnd(), argTypes.begin()) &&
586         "Bad match based on CIRGenFunctionInfo folding set id");
587     return *fi;
588   }
589 
590   assert(!cir::MissingFeatures::opCallCallConv());
591 
592   // Construction the function info. We co-allocate the ArgInfos.
593   fi = CIRGenFunctionInfo::create(returnType, argTypes, required);
594   functionInfos.InsertNode(fi, insertPos);
595 
596   return *fi;
597 }
598 
arrangeGlobalDeclaration(GlobalDecl gd)599 const CIRGenFunctionInfo &CIRGenTypes::arrangeGlobalDeclaration(GlobalDecl gd) {
600   assert(!dyn_cast<ObjCMethodDecl>(gd.getDecl()) &&
601          "This is reported as a FIXME in LLVM codegen");
602   const auto *fd = cast<FunctionDecl>(gd.getDecl());
603 
604   if (isa<CXXConstructorDecl>(gd.getDecl()) ||
605       isa<CXXDestructorDecl>(gd.getDecl())) {
606     cgm.errorNYI(SourceLocation(),
607                  "arrangeGlobalDeclaration for C++ constructor or destructor");
608   }
609 
610   return arrangeFunctionDeclaration(fd);
611 }
612 
613 // When we find the full definition for a TagDecl, replace the 'opaque' type we
614 // previously made for it if applicable.
updateCompletedType(const TagDecl * td)615 void CIRGenTypes::updateCompletedType(const TagDecl *td) {
616   // If this is an enum being completed, then we flush all non-struct types
617   // from the cache. This allows function types and other things that may be
618   // derived from the enum to be recomputed.
619   if (const auto *ed = dyn_cast<EnumDecl>(td)) {
620     // Classic codegen clears the type cache if it contains an entry for this
621     // enum type that doesn't use i32 as the underlying type, but I can't find
622     // a test case that meets that condition. C++ doesn't allow forward
623     // declaration of enums, and C doesn't allow an incomplete forward
624     // declaration with a non-default type.
625     assert(
626         !typeCache.count(ed->getTypeForDecl()) ||
627         (convertType(ed->getIntegerType()) == typeCache[ed->getTypeForDecl()]));
628     // If necessary, provide the full definition of a type only used with a
629     // declaration so far.
630     assert(!cir::MissingFeatures::generateDebugInfo());
631     return;
632   }
633 
634   // If we completed a RecordDecl that we previously used and converted to an
635   // anonymous type, then go ahead and complete it now.
636   const auto *rd = cast<RecordDecl>(td);
637   if (rd->isDependentType())
638     return;
639 
640   // Only complete if we converted it already. If we haven't converted it yet,
641   // we'll just do it lazily.
642   if (recordDeclTypes.count(astContext.getTagDeclType(rd).getTypePtr()))
643     convertRecordDeclType(rd);
644 
645   // If necessary, provide the full definition of a type only used with a
646   // declaration so far.
647   assert(!cir::MissingFeatures::generateDebugInfo());
648 }
649