1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the code that handles AST -> LLVM type lowering.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CodeGenTypes.h"
14 #include "CGCXXABI.h"
15 #include "CGCall.h"
16 #include "CGOpenCLRuntime.h"
17 #include "CGRecordLayout.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclObjC.h"
22 #include "clang/AST/Expr.h"
23 #include "clang/AST/RecordLayout.h"
24 #include "clang/CodeGen/CGFunctionInfo.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Module.h"
28
29 using namespace clang;
30 using namespace CodeGen;
31
CodeGenTypes(CodeGenModule & cgm)32 CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
33 : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
34 Target(cgm.getTarget()) {
35 SkippedLayout = false;
36 LongDoubleReferenced = false;
37 }
38
~CodeGenTypes()39 CodeGenTypes::~CodeGenTypes() {
40 for (llvm::FoldingSet<CGFunctionInfo>::iterator
41 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
42 delete &*I++;
43 }
44
getCXXABI() const45 CGCXXABI &CodeGenTypes::getCXXABI() const { return getCGM().getCXXABI(); }
46
getCodeGenOpts() const47 const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const {
48 return CGM.getCodeGenOpts();
49 }
50
addRecordTypeName(const RecordDecl * RD,llvm::StructType * Ty,StringRef suffix)51 void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
52 llvm::StructType *Ty,
53 StringRef suffix) {
54 SmallString<256> TypeName;
55 llvm::raw_svector_ostream OS(TypeName);
56 OS << RD->getKindName() << '.';
57
58 // FIXME: We probably want to make more tweaks to the printing policy. For
59 // example, we should probably enable PrintCanonicalTypes and
60 // FullyQualifiedNames.
61 PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy();
62 Policy.SuppressInlineNamespace = false;
63
64 // Name the codegen type after the typedef name
65 // if there is no tag type name available
66 if (RD->getIdentifier()) {
67 // FIXME: We should not have to check for a null decl context here.
68 // Right now we do it because the implicit Obj-C decls don't have one.
69 if (RD->getDeclContext())
70 RD->printQualifiedName(OS, Policy);
71 else
72 RD->printName(OS, Policy);
73 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
74 // FIXME: We should not have to check for a null decl context here.
75 // Right now we do it because the implicit Obj-C decls don't have one.
76 if (TDD->getDeclContext())
77 TDD->printQualifiedName(OS, Policy);
78 else
79 TDD->printName(OS);
80 } else
81 OS << "anon";
82
83 if (!suffix.empty())
84 OS << suffix;
85
86 Ty->setName(OS.str());
87 }
88
89 /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
90 /// ConvertType in that it is used to convert to the memory representation for
91 /// a type. For example, the scalar representation for _Bool is i1, but the
92 /// memory representation is usually i8 or i32, depending on the target.
93 ///
94 /// We generally assume that the alloc size of this type under the LLVM
95 /// data layout is the same as the size of the AST type. The alignment
96 /// does not have to match: Clang should always use explicit alignments
97 /// and packed structs as necessary to produce the layout it needs.
98 /// But the size does need to be exactly right or else things like struct
99 /// layout will break.
ConvertTypeForMem(QualType T)100 llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
101 if (T->isConstantMatrixType()) {
102 const Type *Ty = Context.getCanonicalType(T).getTypePtr();
103 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
104 return llvm::ArrayType::get(ConvertType(MT->getElementType()),
105 MT->getNumRows() * MT->getNumColumns());
106 }
107
108 llvm::Type *R = ConvertType(T);
109
110 // Check for the boolean vector case.
111 if (T->isExtVectorBoolType()) {
112 auto *FixedVT = cast<llvm::FixedVectorType>(R);
113 // Pad to at least one byte.
114 uint64_t BytePadded = std::max<uint64_t>(FixedVT->getNumElements(), 8);
115 return llvm::IntegerType::get(FixedVT->getContext(), BytePadded);
116 }
117
118 // If T is _Bool or a _BitInt type, ConvertType will produce an IR type
119 // with the exact semantic bit-width of the AST type; for example,
120 // _BitInt(17) will turn into i17. In memory, however, we need to store
121 // such values extended to their full storage size as decided by AST
122 // layout; this is an ABI requirement. Ideally, we would always use an
123 // integer type that's just the bit-size of the AST type; for example, if
124 // sizeof(_BitInt(17)) == 4, _BitInt(17) would turn into i32. That is what's
125 // returned by convertTypeForLoadStore. However, that type does not
126 // always satisfy the size requirement on memory representation types
127 // describe above. For example, a 32-bit platform might reasonably set
128 // sizeof(_BitInt(65)) == 12, but i96 is likely to have to have an alloc size
129 // of 16 bytes in the LLVM data layout. In these cases, we simply return
130 // a byte array of the appropriate size.
131 if (T->isBitIntType()) {
132 if (typeRequiresSplitIntoByteArray(T, R))
133 return llvm::ArrayType::get(CGM.Int8Ty,
134 Context.getTypeSizeInChars(T).getQuantity());
135 return llvm::IntegerType::get(getLLVMContext(),
136 (unsigned)Context.getTypeSize(T));
137 }
138
139 if (R->isIntegerTy(1))
140 return llvm::IntegerType::get(getLLVMContext(),
141 (unsigned)Context.getTypeSize(T));
142
143 // Else, don't map it.
144 return R;
145 }
146
typeRequiresSplitIntoByteArray(QualType ASTTy,llvm::Type * LLVMTy)147 bool CodeGenTypes::typeRequiresSplitIntoByteArray(QualType ASTTy,
148 llvm::Type *LLVMTy) {
149 if (!LLVMTy)
150 LLVMTy = ConvertType(ASTTy);
151
152 CharUnits ASTSize = Context.getTypeSizeInChars(ASTTy);
153 CharUnits LLVMSize =
154 CharUnits::fromQuantity(getDataLayout().getTypeAllocSize(LLVMTy));
155 return ASTSize != LLVMSize;
156 }
157
convertTypeForLoadStore(QualType T,llvm::Type * LLVMTy)158 llvm::Type *CodeGenTypes::convertTypeForLoadStore(QualType T,
159 llvm::Type *LLVMTy) {
160 if (!LLVMTy)
161 LLVMTy = ConvertType(T);
162
163 if (T->isBitIntType())
164 return llvm::Type::getIntNTy(
165 getLLVMContext(), Context.getTypeSizeInChars(T).getQuantity() * 8);
166
167 if (LLVMTy->isIntegerTy(1))
168 return llvm::IntegerType::get(getLLVMContext(),
169 (unsigned)Context.getTypeSize(T));
170
171 if (T->isExtVectorBoolType())
172 return ConvertTypeForMem(T);
173
174 return LLVMTy;
175 }
176
177 /// isRecordLayoutComplete - Return true if the specified type is already
178 /// completely laid out.
isRecordLayoutComplete(const Type * Ty) const179 bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
180 llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
181 RecordDeclTypes.find(Ty);
182 return I != RecordDeclTypes.end() && !I->second->isOpaque();
183 }
184
185 /// isFuncParamTypeConvertible - Return true if the specified type in a
186 /// function parameter or result position can be converted to an IR type at this
187 /// point. This boils down to being whether it is complete.
isFuncParamTypeConvertible(QualType Ty)188 bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
189 // Some ABIs cannot have their member pointers represented in IR unless
190 // certain circumstances have been reached.
191 if (const auto *MPT = Ty->getAs<MemberPointerType>())
192 return getCXXABI().isMemberPointerConvertible(MPT);
193
194 // If this isn't a tagged type, we can convert it!
195 const TagType *TT = Ty->getAs<TagType>();
196 if (!TT) return true;
197
198 // Incomplete types cannot be converted.
199 return !TT->isIncompleteType();
200 }
201
202
203 /// Code to verify a given function type is complete, i.e. the return type
204 /// and all of the parameter types are complete. Also check to see if we are in
205 /// a RS_StructPointer context, and if so whether any struct types have been
206 /// pended. If so, we don't want to ask the ABI lowering code to handle a type
207 /// that cannot be converted to an IR type.
isFuncTypeConvertible(const FunctionType * FT)208 bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
209 if (!isFuncParamTypeConvertible(FT->getReturnType()))
210 return false;
211
212 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
213 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
214 if (!isFuncParamTypeConvertible(FPT->getParamType(i)))
215 return false;
216
217 return true;
218 }
219
220 /// UpdateCompletedType - When we find the full definition for a TagDecl,
221 /// replace the 'opaque' type we previously made for it if applicable.
UpdateCompletedType(const TagDecl * TD)222 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
223 // If this is an enum being completed, then we flush all non-struct types from
224 // the cache. This allows function types and other things that may be derived
225 // from the enum to be recomputed.
226 if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
227 // Only flush the cache if we've actually already converted this type.
228 if (TypeCache.count(ED->getTypeForDecl())) {
229 // Okay, we formed some types based on this. We speculated that the enum
230 // would be lowered to i32, so we only need to flush the cache if this
231 // didn't happen.
232 if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
233 TypeCache.clear();
234 }
235 // If necessary, provide the full definition of a type only used with a
236 // declaration so far.
237 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
238 DI->completeType(ED);
239 return;
240 }
241
242 // If we completed a RecordDecl that we previously used and converted to an
243 // anonymous type, then go ahead and complete it now.
244 const RecordDecl *RD = cast<RecordDecl>(TD);
245 if (RD->isDependentType()) return;
246
247 // Only complete it if we converted it already. If we haven't converted it
248 // yet, we'll just do it lazily.
249 if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
250 ConvertRecordDeclType(RD);
251
252 // If necessary, provide the full definition of a type only used with a
253 // declaration so far.
254 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
255 DI->completeType(RD);
256 }
257
RefreshTypeCacheForClass(const CXXRecordDecl * RD)258 void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
259 QualType T = Context.getRecordType(RD);
260 T = Context.getCanonicalType(T);
261
262 const Type *Ty = T.getTypePtr();
263 if (RecordsWithOpaqueMemberPointers.count(Ty)) {
264 TypeCache.clear();
265 RecordsWithOpaqueMemberPointers.clear();
266 }
267 }
268
getTypeForFormat(llvm::LLVMContext & VMContext,const llvm::fltSemantics & format,bool UseNativeHalf=false)269 static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
270 const llvm::fltSemantics &format,
271 bool UseNativeHalf = false) {
272 if (&format == &llvm::APFloat::IEEEhalf()) {
273 if (UseNativeHalf)
274 return llvm::Type::getHalfTy(VMContext);
275 else
276 return llvm::Type::getInt16Ty(VMContext);
277 }
278 if (&format == &llvm::APFloat::BFloat())
279 return llvm::Type::getBFloatTy(VMContext);
280 if (&format == &llvm::APFloat::IEEEsingle())
281 return llvm::Type::getFloatTy(VMContext);
282 if (&format == &llvm::APFloat::IEEEdouble())
283 return llvm::Type::getDoubleTy(VMContext);
284 if (&format == &llvm::APFloat::IEEEquad())
285 return llvm::Type::getFP128Ty(VMContext);
286 if (&format == &llvm::APFloat::PPCDoubleDouble())
287 return llvm::Type::getPPC_FP128Ty(VMContext);
288 if (&format == &llvm::APFloat::x87DoubleExtended())
289 return llvm::Type::getX86_FP80Ty(VMContext);
290 llvm_unreachable("Unknown float format!");
291 }
292
ConvertFunctionTypeInternal(QualType QFT)293 llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
294 assert(QFT.isCanonical());
295 const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
296 // First, check whether we can build the full function type. If the
297 // function type depends on an incomplete type (e.g. a struct or enum), we
298 // cannot lower the function type.
299 if (!isFuncTypeConvertible(FT)) {
300 // This function's type depends on an incomplete tag type.
301
302 // Force conversion of all the relevant record types, to make sure
303 // we re-convert the FunctionType when appropriate.
304 if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
305 ConvertRecordDeclType(RT->getDecl());
306 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
307 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
308 if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
309 ConvertRecordDeclType(RT->getDecl());
310
311 SkippedLayout = true;
312
313 // Return a placeholder type.
314 return llvm::StructType::get(getLLVMContext());
315 }
316
317 // The function type can be built; call the appropriate routines to
318 // build it.
319 const CGFunctionInfo *FI;
320 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
321 FI = &arrangeFreeFunctionType(
322 CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
323 } else {
324 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
325 FI = &arrangeFreeFunctionType(
326 CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
327 }
328
329 llvm::Type *ResultType = nullptr;
330 // If there is something higher level prodding our CGFunctionInfo, then
331 // don't recurse into it again.
332 if (FunctionsBeingProcessed.count(FI)) {
333
334 ResultType = llvm::StructType::get(getLLVMContext());
335 SkippedLayout = true;
336 } else {
337
338 // Otherwise, we're good to go, go ahead and convert it.
339 ResultType = GetFunctionType(*FI);
340 }
341
342 return ResultType;
343 }
344
345 /// ConvertType - Convert the specified type to its LLVM form.
ConvertType(QualType T)346 llvm::Type *CodeGenTypes::ConvertType(QualType T) {
347 T = Context.getCanonicalType(T);
348
349 const Type *Ty = T.getTypePtr();
350
351 // For the device-side compilation, CUDA device builtin surface/texture types
352 // may be represented in different types.
353 if (Context.getLangOpts().CUDAIsDevice) {
354 if (T->isCUDADeviceBuiltinSurfaceType()) {
355 if (auto *Ty = CGM.getTargetCodeGenInfo()
356 .getCUDADeviceBuiltinSurfaceDeviceType())
357 return Ty;
358 } else if (T->isCUDADeviceBuiltinTextureType()) {
359 if (auto *Ty = CGM.getTargetCodeGenInfo()
360 .getCUDADeviceBuiltinTextureDeviceType())
361 return Ty;
362 }
363 }
364
365 // RecordTypes are cached and processed specially.
366 if (const RecordType *RT = dyn_cast<RecordType>(Ty))
367 return ConvertRecordDeclType(RT->getDecl());
368
369 llvm::Type *CachedType = nullptr;
370 auto TCI = TypeCache.find(Ty);
371 if (TCI != TypeCache.end())
372 CachedType = TCI->second;
373 // With expensive checks, check that the type we compute matches the
374 // cached type.
375 #ifndef EXPENSIVE_CHECKS
376 if (CachedType)
377 return CachedType;
378 #endif
379
380 // If we don't have it in the cache, convert it now.
381 llvm::Type *ResultType = nullptr;
382 switch (Ty->getTypeClass()) {
383 case Type::Record: // Handled above.
384 #define TYPE(Class, Base)
385 #define ABSTRACT_TYPE(Class, Base)
386 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
387 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
388 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
389 #include "clang/AST/TypeNodes.inc"
390 llvm_unreachable("Non-canonical or dependent types aren't possible.");
391
392 case Type::Builtin: {
393 switch (cast<BuiltinType>(Ty)->getKind()) {
394 case BuiltinType::Void:
395 case BuiltinType::ObjCId:
396 case BuiltinType::ObjCClass:
397 case BuiltinType::ObjCSel:
398 // LLVM void type can only be used as the result of a function call. Just
399 // map to the same as char.
400 ResultType = llvm::Type::getInt8Ty(getLLVMContext());
401 break;
402
403 case BuiltinType::Bool:
404 // Note that we always return bool as i1 for use as a scalar type.
405 ResultType = llvm::Type::getInt1Ty(getLLVMContext());
406 break;
407
408 case BuiltinType::Char_S:
409 case BuiltinType::Char_U:
410 case BuiltinType::SChar:
411 case BuiltinType::UChar:
412 case BuiltinType::Short:
413 case BuiltinType::UShort:
414 case BuiltinType::Int:
415 case BuiltinType::UInt:
416 case BuiltinType::Long:
417 case BuiltinType::ULong:
418 case BuiltinType::LongLong:
419 case BuiltinType::ULongLong:
420 case BuiltinType::WChar_S:
421 case BuiltinType::WChar_U:
422 case BuiltinType::Char8:
423 case BuiltinType::Char16:
424 case BuiltinType::Char32:
425 case BuiltinType::ShortAccum:
426 case BuiltinType::Accum:
427 case BuiltinType::LongAccum:
428 case BuiltinType::UShortAccum:
429 case BuiltinType::UAccum:
430 case BuiltinType::ULongAccum:
431 case BuiltinType::ShortFract:
432 case BuiltinType::Fract:
433 case BuiltinType::LongFract:
434 case BuiltinType::UShortFract:
435 case BuiltinType::UFract:
436 case BuiltinType::ULongFract:
437 case BuiltinType::SatShortAccum:
438 case BuiltinType::SatAccum:
439 case BuiltinType::SatLongAccum:
440 case BuiltinType::SatUShortAccum:
441 case BuiltinType::SatUAccum:
442 case BuiltinType::SatULongAccum:
443 case BuiltinType::SatShortFract:
444 case BuiltinType::SatFract:
445 case BuiltinType::SatLongFract:
446 case BuiltinType::SatUShortFract:
447 case BuiltinType::SatUFract:
448 case BuiltinType::SatULongFract:
449 ResultType = llvm::IntegerType::get(getLLVMContext(),
450 static_cast<unsigned>(Context.getTypeSize(T)));
451 break;
452
453 case BuiltinType::Float16:
454 ResultType =
455 getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T),
456 /* UseNativeHalf = */ true);
457 break;
458
459 case BuiltinType::Half:
460 // Half FP can either be storage-only (lowered to i16) or native.
461 ResultType = getTypeForFormat(
462 getLLVMContext(), Context.getFloatTypeSemantics(T),
463 Context.getLangOpts().NativeHalfType ||
464 !Context.getTargetInfo().useFP16ConversionIntrinsics());
465 break;
466 case BuiltinType::LongDouble:
467 LongDoubleReferenced = true;
468 [[fallthrough]];
469 case BuiltinType::BFloat16:
470 case BuiltinType::Float:
471 case BuiltinType::Double:
472 case BuiltinType::Float128:
473 case BuiltinType::Ibm128:
474 ResultType = getTypeForFormat(getLLVMContext(),
475 Context.getFloatTypeSemantics(T),
476 /* UseNativeHalf = */ false);
477 break;
478
479 case BuiltinType::NullPtr:
480 // Model std::nullptr_t as i8*
481 ResultType = llvm::PointerType::getUnqual(getLLVMContext());
482 break;
483
484 case BuiltinType::UInt128:
485 case BuiltinType::Int128:
486 ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
487 break;
488
489 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
490 case BuiltinType::Id:
491 #include "clang/Basic/OpenCLImageTypes.def"
492 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
493 case BuiltinType::Id:
494 #include "clang/Basic/OpenCLExtensionTypes.def"
495 case BuiltinType::OCLSampler:
496 case BuiltinType::OCLEvent:
497 case BuiltinType::OCLClkEvent:
498 case BuiltinType::OCLQueue:
499 case BuiltinType::OCLReserveID:
500 ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
501 break;
502 case BuiltinType::SveInt8:
503 case BuiltinType::SveUint8:
504 case BuiltinType::SveInt8x2:
505 case BuiltinType::SveUint8x2:
506 case BuiltinType::SveInt8x3:
507 case BuiltinType::SveUint8x3:
508 case BuiltinType::SveInt8x4:
509 case BuiltinType::SveUint8x4:
510 case BuiltinType::SveInt16:
511 case BuiltinType::SveUint16:
512 case BuiltinType::SveInt16x2:
513 case BuiltinType::SveUint16x2:
514 case BuiltinType::SveInt16x3:
515 case BuiltinType::SveUint16x3:
516 case BuiltinType::SveInt16x4:
517 case BuiltinType::SveUint16x4:
518 case BuiltinType::SveInt32:
519 case BuiltinType::SveUint32:
520 case BuiltinType::SveInt32x2:
521 case BuiltinType::SveUint32x2:
522 case BuiltinType::SveInt32x3:
523 case BuiltinType::SveUint32x3:
524 case BuiltinType::SveInt32x4:
525 case BuiltinType::SveUint32x4:
526 case BuiltinType::SveInt64:
527 case BuiltinType::SveUint64:
528 case BuiltinType::SveInt64x2:
529 case BuiltinType::SveUint64x2:
530 case BuiltinType::SveInt64x3:
531 case BuiltinType::SveUint64x3:
532 case BuiltinType::SveInt64x4:
533 case BuiltinType::SveUint64x4:
534 case BuiltinType::SveBool:
535 case BuiltinType::SveBoolx2:
536 case BuiltinType::SveBoolx4:
537 case BuiltinType::SveFloat16:
538 case BuiltinType::SveFloat16x2:
539 case BuiltinType::SveFloat16x3:
540 case BuiltinType::SveFloat16x4:
541 case BuiltinType::SveFloat32:
542 case BuiltinType::SveFloat32x2:
543 case BuiltinType::SveFloat32x3:
544 case BuiltinType::SveFloat32x4:
545 case BuiltinType::SveFloat64:
546 case BuiltinType::SveFloat64x2:
547 case BuiltinType::SveFloat64x3:
548 case BuiltinType::SveFloat64x4:
549 case BuiltinType::SveBFloat16:
550 case BuiltinType::SveBFloat16x2:
551 case BuiltinType::SveBFloat16x3:
552 case BuiltinType::SveBFloat16x4: {
553 ASTContext::BuiltinVectorTypeInfo Info =
554 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
555 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
556 Info.EC.getKnownMinValue() *
557 Info.NumVectors);
558 }
559 case BuiltinType::SveCount:
560 return llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
561 #define PPC_VECTOR_TYPE(Name, Id, Size) \
562 case BuiltinType::Id: \
563 ResultType = \
564 llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \
565 break;
566 #include "clang/Basic/PPCTypes.def"
567 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
568 #include "clang/Basic/RISCVVTypes.def"
569 {
570 ASTContext::BuiltinVectorTypeInfo Info =
571 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
572 // Tuple types are expressed as aggregregate types of the same scalable
573 // vector type (e.g. vint32m1x2_t is two vint32m1_t, which is {<vscale x
574 // 2 x i32>, <vscale x 2 x i32>}).
575 if (Info.NumVectors != 1) {
576 llvm::Type *EltTy = llvm::ScalableVectorType::get(
577 ConvertType(Info.ElementType), Info.EC.getKnownMinValue());
578 llvm::SmallVector<llvm::Type *, 4> EltTys(Info.NumVectors, EltTy);
579 return llvm::StructType::get(getLLVMContext(), EltTys);
580 }
581 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
582 Info.EC.getKnownMinValue());
583 }
584 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
585 case BuiltinType::Id: { \
586 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
587 ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \
588 else \
589 llvm_unreachable("Unexpected wasm reference builtin type!"); \
590 } break;
591 #include "clang/Basic/WebAssemblyReferenceTypes.def"
592 #define AMDGPU_OPAQUE_PTR_TYPE(Name, MangledName, AS, Width, Align, Id, \
593 SingletonId) \
594 case BuiltinType::Id: \
595 return llvm::PointerType::get(getLLVMContext(), AS);
596 #include "clang/Basic/AMDGPUTypes.def"
597 case BuiltinType::Dependent:
598 #define BUILTIN_TYPE(Id, SingletonId)
599 #define PLACEHOLDER_TYPE(Id, SingletonId) \
600 case BuiltinType::Id:
601 #include "clang/AST/BuiltinTypes.def"
602 llvm_unreachable("Unexpected placeholder builtin type!");
603 }
604 break;
605 }
606 case Type::Auto:
607 case Type::DeducedTemplateSpecialization:
608 llvm_unreachable("Unexpected undeduced type!");
609 case Type::Complex: {
610 llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
611 ResultType = llvm::StructType::get(EltTy, EltTy);
612 break;
613 }
614 case Type::LValueReference:
615 case Type::RValueReference: {
616 const ReferenceType *RTy = cast<ReferenceType>(Ty);
617 QualType ETy = RTy->getPointeeType();
618 unsigned AS = getTargetAddressSpace(ETy);
619 ResultType = llvm::PointerType::get(getLLVMContext(), AS);
620 break;
621 }
622 case Type::Pointer: {
623 const PointerType *PTy = cast<PointerType>(Ty);
624 QualType ETy = PTy->getPointeeType();
625 unsigned AS = getTargetAddressSpace(ETy);
626 ResultType = llvm::PointerType::get(getLLVMContext(), AS);
627 break;
628 }
629
630 case Type::VariableArray: {
631 const VariableArrayType *A = cast<VariableArrayType>(Ty);
632 assert(A->getIndexTypeCVRQualifiers() == 0 &&
633 "FIXME: We only handle trivial array types so far!");
634 // VLAs resolve to the innermost element type; this matches
635 // the return of alloca, and there isn't any obviously better choice.
636 ResultType = ConvertTypeForMem(A->getElementType());
637 break;
638 }
639 case Type::IncompleteArray: {
640 const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
641 assert(A->getIndexTypeCVRQualifiers() == 0 &&
642 "FIXME: We only handle trivial array types so far!");
643 // int X[] -> [0 x int], unless the element type is not sized. If it is
644 // unsized (e.g. an incomplete struct) just use [0 x i8].
645 ResultType = ConvertTypeForMem(A->getElementType());
646 if (!ResultType->isSized()) {
647 SkippedLayout = true;
648 ResultType = llvm::Type::getInt8Ty(getLLVMContext());
649 }
650 ResultType = llvm::ArrayType::get(ResultType, 0);
651 break;
652 }
653 case Type::ArrayParameter:
654 case Type::ConstantArray: {
655 const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
656 llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
657
658 // Lower arrays of undefined struct type to arrays of i8 just to have a
659 // concrete type.
660 if (!EltTy->isSized()) {
661 SkippedLayout = true;
662 EltTy = llvm::Type::getInt8Ty(getLLVMContext());
663 }
664
665 ResultType = llvm::ArrayType::get(EltTy, A->getZExtSize());
666 break;
667 }
668 case Type::ExtVector:
669 case Type::Vector: {
670 const auto *VT = cast<VectorType>(Ty);
671 // An ext_vector_type of Bool is really a vector of bits.
672 llvm::Type *IRElemTy = VT->isExtVectorBoolType()
673 ? llvm::Type::getInt1Ty(getLLVMContext())
674 : ConvertType(VT->getElementType());
675 ResultType = llvm::FixedVectorType::get(IRElemTy, VT->getNumElements());
676 break;
677 }
678 case Type::ConstantMatrix: {
679 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
680 ResultType =
681 llvm::FixedVectorType::get(ConvertType(MT->getElementType()),
682 MT->getNumRows() * MT->getNumColumns());
683 break;
684 }
685 case Type::FunctionNoProto:
686 case Type::FunctionProto:
687 ResultType = ConvertFunctionTypeInternal(T);
688 break;
689 case Type::ObjCObject:
690 ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
691 break;
692
693 case Type::ObjCInterface: {
694 // Objective-C interfaces are always opaque (outside of the
695 // runtime, which can do whatever it likes); we never refine
696 // these.
697 llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
698 if (!T)
699 T = llvm::StructType::create(getLLVMContext());
700 ResultType = T;
701 break;
702 }
703
704 case Type::ObjCObjectPointer:
705 ResultType = llvm::PointerType::getUnqual(getLLVMContext());
706 break;
707
708 case Type::Enum: {
709 const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
710 if (ED->isCompleteDefinition() || ED->isFixed())
711 return ConvertType(ED->getIntegerType());
712 // Return a placeholder 'i32' type. This can be changed later when the
713 // type is defined (see UpdateCompletedType), but is likely to be the
714 // "right" answer.
715 ResultType = llvm::Type::getInt32Ty(getLLVMContext());
716 break;
717 }
718
719 case Type::BlockPointer: {
720 // Block pointers lower to function type. For function type,
721 // getTargetAddressSpace() returns default address space for
722 // function pointer i.e. program address space. Therefore, for block
723 // pointers, it is important to pass the pointee AST address space when
724 // calling getTargetAddressSpace(), to ensure that we get the LLVM IR
725 // address space for data pointers and not function pointers.
726 const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
727 unsigned AS = Context.getTargetAddressSpace(FTy.getAddressSpace());
728 ResultType = llvm::PointerType::get(getLLVMContext(), AS);
729 break;
730 }
731
732 case Type::MemberPointer: {
733 auto *MPTy = cast<MemberPointerType>(Ty);
734 if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
735 auto *C = MPTy->getClass();
736 auto Insertion = RecordsWithOpaqueMemberPointers.insert({C, nullptr});
737 if (Insertion.second)
738 Insertion.first->second = llvm::StructType::create(getLLVMContext());
739 ResultType = Insertion.first->second;
740 } else {
741 ResultType = getCXXABI().ConvertMemberPointerType(MPTy);
742 }
743 break;
744 }
745
746 case Type::Atomic: {
747 QualType valueType = cast<AtomicType>(Ty)->getValueType();
748 ResultType = ConvertTypeForMem(valueType);
749
750 // Pad out to the inflated size if necessary.
751 uint64_t valueSize = Context.getTypeSize(valueType);
752 uint64_t atomicSize = Context.getTypeSize(Ty);
753 if (valueSize != atomicSize) {
754 assert(valueSize < atomicSize);
755 llvm::Type *elts[] = {
756 ResultType,
757 llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8)
758 };
759 ResultType =
760 llvm::StructType::get(getLLVMContext(), llvm::ArrayRef(elts));
761 }
762 break;
763 }
764 case Type::Pipe: {
765 ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
766 break;
767 }
768 case Type::BitInt: {
769 const auto &EIT = cast<BitIntType>(Ty);
770 ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
771 break;
772 }
773 }
774
775 assert(ResultType && "Didn't convert a type?");
776 assert((!CachedType || CachedType == ResultType) &&
777 "Cached type doesn't match computed type");
778
779 TypeCache[Ty] = ResultType;
780 return ResultType;
781 }
782
isPaddedAtomicType(QualType type)783 bool CodeGenModule::isPaddedAtomicType(QualType type) {
784 return isPaddedAtomicType(type->castAs<AtomicType>());
785 }
786
isPaddedAtomicType(const AtomicType * type)787 bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
788 return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType());
789 }
790
791 /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
ConvertRecordDeclType(const RecordDecl * RD)792 llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
793 // TagDecl's are not necessarily unique, instead use the (clang)
794 // type connected to the decl.
795 const Type *Key = Context.getTagDeclType(RD).getTypePtr();
796
797 llvm::StructType *&Entry = RecordDeclTypes[Key];
798
799 // If we don't have a StructType at all yet, create the forward declaration.
800 if (!Entry) {
801 Entry = llvm::StructType::create(getLLVMContext());
802 addRecordTypeName(RD, Entry, "");
803 }
804 llvm::StructType *Ty = Entry;
805
806 // If this is still a forward declaration, or the LLVM type is already
807 // complete, there's nothing more to do.
808 RD = RD->getDefinition();
809 if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque())
810 return Ty;
811
812 // Force conversion of non-virtual base classes recursively.
813 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
814 for (const auto &I : CRD->bases()) {
815 if (I.isVirtual()) continue;
816 ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl());
817 }
818 }
819
820 // Layout fields.
821 std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty);
822 CGRecordLayouts[Key] = std::move(Layout);
823
824 // If this struct blocked a FunctionType conversion, then recompute whatever
825 // was derived from that.
826 // FIXME: This is hugely overconservative.
827 if (SkippedLayout)
828 TypeCache.clear();
829
830 return Ty;
831 }
832
833 /// getCGRecordLayout - Return record layout info for the given record decl.
834 const CGRecordLayout &
getCGRecordLayout(const RecordDecl * RD)835 CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
836 const Type *Key = Context.getTagDeclType(RD).getTypePtr();
837
838 auto I = CGRecordLayouts.find(Key);
839 if (I != CGRecordLayouts.end())
840 return *I->second;
841 // Compute the type information.
842 ConvertRecordDeclType(RD);
843
844 // Now try again.
845 I = CGRecordLayouts.find(Key);
846
847 assert(I != CGRecordLayouts.end() &&
848 "Unable to find record layout information for type");
849 return *I->second;
850 }
851
isPointerZeroInitializable(QualType T)852 bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
853 assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type");
854 return isZeroInitializable(T);
855 }
856
isZeroInitializable(QualType T)857 bool CodeGenTypes::isZeroInitializable(QualType T) {
858 if (T->getAs<PointerType>())
859 return Context.getTargetNullPointerValue(T) == 0;
860
861 if (const auto *AT = Context.getAsArrayType(T)) {
862 if (isa<IncompleteArrayType>(AT))
863 return true;
864 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
865 if (Context.getConstantArrayElementCount(CAT) == 0)
866 return true;
867 T = Context.getBaseElementType(T);
868 }
869
870 // Records are non-zero-initializable if they contain any
871 // non-zero-initializable subobjects.
872 if (const RecordType *RT = T->getAs<RecordType>()) {
873 const RecordDecl *RD = RT->getDecl();
874 return isZeroInitializable(RD);
875 }
876
877 // We have to ask the ABI about member pointers.
878 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
879 return getCXXABI().isZeroInitializable(MPT);
880
881 // Everything else is okay.
882 return true;
883 }
884
isZeroInitializable(const RecordDecl * RD)885 bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) {
886 return getCGRecordLayout(RD).isZeroInitializable();
887 }
888
getTargetAddressSpace(QualType T) const889 unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const {
890 // Return the address space for the type. If the type is a
891 // function type without an address space qualifier, the
892 // program address space is used. Otherwise, the target picks
893 // the best address space based on the type information
894 return T->isFunctionType() && !T.hasAddressSpace()
895 ? getDataLayout().getProgramAddressSpace()
896 : getContext().getTargetAddressSpace(T.getAddressSpace());
897 }
898