xref: /freebsd/contrib/llvm-project/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to compute the layout of a record.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CIRGenBuilder.h"
14 #include "CIRGenModule.h"
15 #include "CIRGenTypes.h"
16 
17 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/Decl.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/CIR/Dialect/IR/CIRAttrs.h"
22 #include "clang/CIR/Dialect/IR/CIRDataLayout.h"
23 #include "clang/CIR/MissingFeatures.h"
24 #include "llvm/Support/Casting.h"
25 
26 #include <memory>
27 
28 using namespace llvm;
29 using namespace clang;
30 using namespace clang::CIRGen;
31 
32 namespace {
33 /// The CIRRecordLowering is responsible for lowering an ASTRecordLayout to an
34 /// mlir::Type. Some of the lowering is straightforward, some is not.
35 // TODO: Detail some of the complexities and weirdnesses?
36 // (See CGRecordLayoutBuilder.cpp)
37 struct CIRRecordLowering final {
38 
39   // MemberInfo is a helper structure that contains information about a record
40   // member. In addition to the standard member types, there exists a sentinel
41   // member type that ensures correct rounding.
42   struct MemberInfo final {
43     CharUnits offset;
44     enum class InfoKind { Field, Base } kind;
45     mlir::Type data;
46     union {
47       const FieldDecl *fieldDecl;
48       const CXXRecordDecl *cxxRecordDecl;
49     };
MemberInfo__anon7f5543e70111::CIRRecordLowering::MemberInfo50     MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data,
51                const FieldDecl *fieldDecl = nullptr)
52         : offset{offset}, kind{kind}, data{data}, fieldDecl{fieldDecl} {}
MemberInfo__anon7f5543e70111::CIRRecordLowering::MemberInfo53     MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data,
54                const CXXRecordDecl *rd)
55         : offset{offset}, kind{kind}, data{data}, cxxRecordDecl{rd} {}
56     // MemberInfos are sorted so we define a < operator.
operator <__anon7f5543e70111::CIRRecordLowering::MemberInfo57     bool operator<(const MemberInfo &other) const {
58       return offset < other.offset;
59     }
60   };
61   // The constructor.
62   CIRRecordLowering(CIRGenTypes &cirGenTypes, const RecordDecl *recordDecl,
63                     bool packed);
64 
65   /// Constructs a MemberInfo instance from an offset and mlir::Type.
makeStorageInfo__anon7f5543e70111::CIRRecordLowering66   MemberInfo makeStorageInfo(CharUnits offset, mlir::Type data) {
67     return MemberInfo(offset, MemberInfo::InfoKind::Field, data);
68   }
69 
70   // Layout routines.
71   void setBitFieldInfo(const FieldDecl *fd, CharUnits startOffset,
72                        mlir::Type storageType);
73 
74   void lower();
75   void lowerUnion();
76 
77   /// Determines if we need a packed llvm struct.
78   void determinePacked();
79   /// Inserts padding everywhere it's needed.
80   void insertPadding();
81 
82   void computeVolatileBitfields();
83   void accumulateBases(const CXXRecordDecl *cxxRecordDecl);
84   void accumulateVPtrs();
85   void accumulateFields();
86   RecordDecl::field_iterator
87   accumulateBitFields(RecordDecl::field_iterator field,
88                       RecordDecl::field_iterator fieldEnd);
89 
isAAPCS__anon7f5543e70111::CIRRecordLowering90   bool isAAPCS() const {
91     return astContext.getTargetInfo().getABI().starts_with("aapcs");
92   }
93 
bitsToCharUnits__anon7f5543e70111::CIRRecordLowering94   CharUnits bitsToCharUnits(uint64_t bitOffset) {
95     return astContext.toCharUnitsFromBits(bitOffset);
96   }
97 
98   void calculateZeroInit();
99 
getSize__anon7f5543e70111::CIRRecordLowering100   CharUnits getSize(mlir::Type Ty) {
101     return CharUnits::fromQuantity(dataLayout.layout.getTypeSize(Ty));
102   }
getSizeInBits__anon7f5543e70111::CIRRecordLowering103   CharUnits getSizeInBits(mlir::Type ty) {
104     return CharUnits::fromQuantity(dataLayout.layout.getTypeSizeInBits(ty));
105   }
getAlignment__anon7f5543e70111::CIRRecordLowering106   CharUnits getAlignment(mlir::Type Ty) {
107     return CharUnits::fromQuantity(dataLayout.layout.getTypeABIAlignment(Ty));
108   }
109 
isZeroInitializable__anon7f5543e70111::CIRRecordLowering110   bool isZeroInitializable(const FieldDecl *fd) {
111     return cirGenTypes.isZeroInitializable(fd->getType());
112   }
isZeroInitializable__anon7f5543e70111::CIRRecordLowering113   bool isZeroInitializable(const RecordDecl *rd) {
114     return cirGenTypes.isZeroInitializable(rd);
115   }
116 
117   /// Wraps cir::IntType with some implicit arguments.
getUIntNType__anon7f5543e70111::CIRRecordLowering118   mlir::Type getUIntNType(uint64_t numBits) {
119     unsigned alignedBits = llvm::PowerOf2Ceil(numBits);
120     alignedBits = std::max(8u, alignedBits);
121     return cir::IntType::get(&cirGenTypes.getMLIRContext(), alignedBits,
122                              /*isSigned=*/false);
123   }
124 
getCharType__anon7f5543e70111::CIRRecordLowering125   mlir::Type getCharType() {
126     return cir::IntType::get(&cirGenTypes.getMLIRContext(),
127                              astContext.getCharWidth(),
128                              /*isSigned=*/false);
129   }
130 
getByteArrayType__anon7f5543e70111::CIRRecordLowering131   mlir::Type getByteArrayType(CharUnits numberOfChars) {
132     assert(!numberOfChars.isZero() && "Empty byte arrays aren't allowed.");
133     mlir::Type type = getCharType();
134     return numberOfChars == CharUnits::One()
135                ? type
136                : cir::ArrayType::get(type, numberOfChars.getQuantity());
137   }
138 
139   // Gets the CIR BaseSubobject type from a CXXRecordDecl.
getStorageType__anon7f5543e70111::CIRRecordLowering140   mlir::Type getStorageType(const CXXRecordDecl *RD) {
141     return cirGenTypes.getCIRGenRecordLayout(RD).getBaseSubobjectCIRType();
142   }
143   // This is different from LLVM traditional codegen because CIRGen uses arrays
144   // of bytes instead of arbitrary-sized integers. This is important for packed
145   // structures support.
getBitfieldStorageType__anon7f5543e70111::CIRRecordLowering146   mlir::Type getBitfieldStorageType(unsigned numBits) {
147     unsigned alignedBits = llvm::alignTo(numBits, astContext.getCharWidth());
148     if (cir::isValidFundamentalIntWidth(alignedBits))
149       return builder.getUIntNTy(alignedBits);
150 
151     mlir::Type type = getCharType();
152     return cir::ArrayType::get(type, alignedBits / astContext.getCharWidth());
153   }
154 
getStorageType__anon7f5543e70111::CIRRecordLowering155   mlir::Type getStorageType(const FieldDecl *fieldDecl) {
156     mlir::Type type = cirGenTypes.convertTypeForMem(fieldDecl->getType());
157     if (fieldDecl->isBitField()) {
158       cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
159                                          "getStorageType for bitfields");
160     }
161     return type;
162   }
163 
getFieldBitOffset__anon7f5543e70111::CIRRecordLowering164   uint64_t getFieldBitOffset(const FieldDecl *fieldDecl) {
165     return astRecordLayout.getFieldOffset(fieldDecl->getFieldIndex());
166   }
167 
168   /// Fills out the structures that are ultimately consumed.
169   void fillOutputFields();
170 
appendPaddingBytes__anon7f5543e70111::CIRRecordLowering171   void appendPaddingBytes(CharUnits size) {
172     if (!size.isZero()) {
173       fieldTypes.push_back(getByteArrayType(size));
174       padded = true;
175     }
176   }
177 
178   CIRGenTypes &cirGenTypes;
179   CIRGenBuilderTy &builder;
180   const ASTContext &astContext;
181   const RecordDecl *recordDecl;
182   const ASTRecordLayout &astRecordLayout;
183   // Helpful intermediate data-structures
184   std::vector<MemberInfo> members;
185   // Output fields, consumed by CIRGenTypes::computeRecordLayout
186   llvm::SmallVector<mlir::Type, 16> fieldTypes;
187   llvm::DenseMap<const FieldDecl *, CIRGenBitFieldInfo> bitFields;
188   llvm::DenseMap<const FieldDecl *, unsigned> fieldIdxMap;
189   llvm::DenseMap<const CXXRecordDecl *, unsigned> nonVirtualBases;
190   cir::CIRDataLayout dataLayout;
191 
192   LLVM_PREFERRED_TYPE(bool)
193   unsigned zeroInitializable : 1;
194   LLVM_PREFERRED_TYPE(bool)
195   unsigned zeroInitializableAsBase : 1;
196   LLVM_PREFERRED_TYPE(bool)
197   unsigned packed : 1;
198   LLVM_PREFERRED_TYPE(bool)
199   unsigned padded : 1;
200 
201 private:
202   CIRRecordLowering(const CIRRecordLowering &) = delete;
203   void operator=(const CIRRecordLowering &) = delete;
204 }; // CIRRecordLowering
205 } // namespace
206 
CIRRecordLowering(CIRGenTypes & cirGenTypes,const RecordDecl * recordDecl,bool packed)207 CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes,
208                                      const RecordDecl *recordDecl, bool packed)
209     : cirGenTypes(cirGenTypes), builder(cirGenTypes.getBuilder()),
210       astContext(cirGenTypes.getASTContext()), recordDecl(recordDecl),
211       astRecordLayout(
212           cirGenTypes.getASTContext().getASTRecordLayout(recordDecl)),
213       dataLayout(cirGenTypes.getCGModule().getModule()),
214       zeroInitializable(true), zeroInitializableAsBase(true), packed(packed),
215       padded(false) {}
216 
setBitFieldInfo(const FieldDecl * fd,CharUnits startOffset,mlir::Type storageType)217 void CIRRecordLowering::setBitFieldInfo(const FieldDecl *fd,
218                                         CharUnits startOffset,
219                                         mlir::Type storageType) {
220   CIRGenBitFieldInfo &info = bitFields[fd->getCanonicalDecl()];
221   info.isSigned = fd->getType()->isSignedIntegerOrEnumerationType();
222   info.offset =
223       (unsigned)(getFieldBitOffset(fd) - astContext.toBits(startOffset));
224   info.size = fd->getBitWidthValue();
225   info.storageSize = getSizeInBits(storageType).getQuantity();
226   info.storageOffset = startOffset;
227   info.storageType = storageType;
228   info.name = fd->getName();
229 
230   if (info.size > info.storageSize)
231     info.size = info.storageSize;
232   // Reverse the bit offsets for big endian machines. Since bitfields are laid
233   // out as packed bits within an integer-sized unit, we can imagine the bits
234   // counting from the most-significant-bit instead of the
235   // least-significant-bit.
236   if (dataLayout.isBigEndian())
237     info.offset = info.storageSize - (info.offset + info.size);
238 
239   info.volatileStorageSize = 0;
240   info.volatileOffset = 0;
241   info.volatileStorageOffset = CharUnits::Zero();
242 }
243 
lower()244 void CIRRecordLowering::lower() {
245   if (recordDecl->isUnion()) {
246     lowerUnion();
247     computeVolatileBitfields();
248     return;
249   }
250 
251   assert(!cir::MissingFeatures::recordLayoutVirtualBases());
252   CharUnits size = astRecordLayout.getSize();
253 
254   accumulateFields();
255 
256   if (const auto *cxxRecordDecl = dyn_cast<CXXRecordDecl>(recordDecl)) {
257     accumulateVPtrs();
258     accumulateBases(cxxRecordDecl);
259     if (members.empty()) {
260       appendPaddingBytes(size);
261       computeVolatileBitfields();
262       return;
263     }
264     assert(!cir::MissingFeatures::recordLayoutVirtualBases());
265   }
266 
267   llvm::stable_sort(members);
268   // TODO: implement clipTailPadding once bitfields are implemented
269   assert(!cir::MissingFeatures::bitfields());
270   assert(!cir::MissingFeatures::recordZeroInit());
271 
272   members.push_back(makeStorageInfo(size, getUIntNType(8)));
273   determinePacked();
274   insertPadding();
275   members.pop_back();
276 
277   calculateZeroInit();
278   fillOutputFields();
279   computeVolatileBitfields();
280 }
281 
fillOutputFields()282 void CIRRecordLowering::fillOutputFields() {
283   for (const MemberInfo &member : members) {
284     if (member.data)
285       fieldTypes.push_back(member.data);
286     if (member.kind == MemberInfo::InfoKind::Field) {
287       if (member.fieldDecl)
288         fieldIdxMap[member.fieldDecl->getCanonicalDecl()] =
289             fieldTypes.size() - 1;
290       // A field without storage must be a bitfield.
291       assert(!cir::MissingFeatures::bitfields());
292       if (!member.data)
293         setBitFieldInfo(member.fieldDecl, member.offset, fieldTypes.back());
294     } else if (member.kind == MemberInfo::InfoKind::Base) {
295       nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
296     }
297     assert(!cir::MissingFeatures::recordLayoutVirtualBases());
298   }
299 }
300 
301 RecordDecl::field_iterator
accumulateBitFields(RecordDecl::field_iterator field,RecordDecl::field_iterator fieldEnd)302 CIRRecordLowering::accumulateBitFields(RecordDecl::field_iterator field,
303                                        RecordDecl::field_iterator fieldEnd) {
304   assert(!cir::MissingFeatures::isDiscreteBitFieldABI());
305 
306   CharUnits regSize =
307       bitsToCharUnits(astContext.getTargetInfo().getRegisterWidth());
308   unsigned charBits = astContext.getCharWidth();
309 
310   // Data about the start of the span we're accumulating to create an access
311   // unit from. 'Begin' is the first bitfield of the span. If 'begin' is
312   // 'fieldEnd', we've not got a current span. The span starts at the
313   // 'beginOffset' character boundary. 'bitSizeSinceBegin' is the size (in bits)
314   // of the span -- this might include padding when we've advanced to a
315   // subsequent bitfield run.
316   RecordDecl::field_iterator begin = fieldEnd;
317   CharUnits beginOffset;
318   uint64_t bitSizeSinceBegin;
319 
320   // The (non-inclusive) end of the largest acceptable access unit we've found
321   // since 'begin'. If this is 'begin', we're gathering the initial set of
322   // bitfields of a new span. 'bestEndOffset' is the end of that acceptable
323   // access unit -- it might extend beyond the last character of the bitfield
324   // run, using available padding characters.
325   RecordDecl::field_iterator bestEnd = begin;
326   CharUnits bestEndOffset;
327   bool bestClipped; // Whether the representation must be in a byte array.
328 
329   for (;;) {
330     // atAlignedBoundary is true if 'field' is the (potential) start of a new
331     // span (or the end of the bitfields). When true, limitOffset is the
332     // character offset of that span and barrier indicates whether the new
333     // span cannot be merged into the current one.
334     bool atAlignedBoundary = false;
335     bool barrier = false; // a barrier can be a zero Bit Width or non bit member
336     if (field != fieldEnd && field->isBitField()) {
337       uint64_t bitOffset = getFieldBitOffset(*field);
338       if (begin == fieldEnd) {
339         // Beginning a new span.
340         begin = field;
341         bestEnd = begin;
342 
343         assert((bitOffset % charBits) == 0 && "Not at start of char");
344         beginOffset = bitsToCharUnits(bitOffset);
345         bitSizeSinceBegin = 0;
346       } else if ((bitOffset % charBits) != 0) {
347         // Bitfield occupies the same character as previous bitfield, it must be
348         // part of the same span. This can include zero-length bitfields, should
349         // the target not align them to character boundaries. Such non-alignment
350         // is at variance with the standards, which require zero-length
351         // bitfields be a barrier between access units. But of course we can't
352         // achieve that in the middle of a character.
353         assert(bitOffset ==
354                    astContext.toBits(beginOffset) + bitSizeSinceBegin &&
355                "Concatenating non-contiguous bitfields");
356       } else {
357         // Bitfield potentially begins a new span. This includes zero-length
358         // bitfields on non-aligning targets that lie at character boundaries
359         // (those are barriers to merging).
360         if (field->isZeroLengthBitField())
361           barrier = true;
362         atAlignedBoundary = true;
363       }
364     } else {
365       // We've reached the end of the bitfield run. Either we're done, or this
366       // is a barrier for the current span.
367       if (begin == fieldEnd)
368         break;
369 
370       barrier = true;
371       atAlignedBoundary = true;
372     }
373 
374     // 'installBest' indicates whether we should create an access unit for the
375     // current best span: fields ['begin', 'bestEnd') occupying characters
376     // ['beginOffset', 'bestEndOffset').
377     bool installBest = false;
378     if (atAlignedBoundary) {
379       // 'field' is the start of a new span or the end of the bitfields. The
380       // just-seen span now extends to 'bitSizeSinceBegin'.
381 
382       // Determine if we can accumulate that just-seen span into the current
383       // accumulation.
384       CharUnits accessSize = bitsToCharUnits(bitSizeSinceBegin + charBits - 1);
385       if (bestEnd == begin) {
386         // This is the initial run at the start of a new span. By definition,
387         // this is the best seen so far.
388         bestEnd = field;
389         bestEndOffset = beginOffset + accessSize;
390         // Assume clipped until proven not below.
391         bestClipped = true;
392         if (!bitSizeSinceBegin)
393           // A zero-sized initial span -- this will install nothing and reset
394           // for another.
395           installBest = true;
396       } else if (accessSize > regSize) {
397         // Accumulating the just-seen span would create a multi-register access
398         // unit, which would increase register pressure.
399         installBest = true;
400       }
401 
402       if (!installBest) {
403         // Determine if accumulating the just-seen span will create an expensive
404         // access unit or not.
405         mlir::Type type = getUIntNType(astContext.toBits(accessSize));
406         if (!astContext.getTargetInfo().hasCheapUnalignedBitFieldAccess())
407           cirGenTypes.getCGModule().errorNYI(
408               field->getSourceRange(), "NYI CheapUnalignedBitFieldAccess");
409 
410         if (!installBest) {
411           // Find the next used storage offset to determine what the limit of
412           // the current span is. That's either the offset of the next field
413           // with storage (which might be field itself) or the end of the
414           // non-reusable tail padding.
415           CharUnits limitOffset;
416           for (auto probe = field; probe != fieldEnd; ++probe)
417             if (!isEmptyFieldForLayout(astContext, *probe)) {
418               // A member with storage sets the limit.
419               assert((getFieldBitOffset(*probe) % charBits) == 0 &&
420                      "Next storage is not byte-aligned");
421               limitOffset = bitsToCharUnits(getFieldBitOffset(*probe));
422               goto FoundLimit;
423             }
424           assert(!cir::MissingFeatures::cxxSupport());
425           limitOffset = astRecordLayout.getDataSize();
426         FoundLimit:
427           CharUnits typeSize = getSize(type);
428           if (beginOffset + typeSize <= limitOffset) {
429             // There is space before limitOffset to create a naturally-sized
430             // access unit.
431             bestEndOffset = beginOffset + typeSize;
432             bestEnd = field;
433             bestClipped = false;
434           }
435           if (barrier) {
436             // The next field is a barrier that we cannot merge across.
437             installBest = true;
438           } else if (cirGenTypes.getCGModule()
439                          .getCodeGenOpts()
440                          .FineGrainedBitfieldAccesses) {
441             assert(!cir::MissingFeatures::nonFineGrainedBitfields());
442             cirGenTypes.getCGModule().errorNYI(field->getSourceRange(),
443                                                "NYI FineGrainedBitfield");
444           } else {
445             // Otherwise, we're not installing. Update the bit size
446             // of the current span to go all the way to limitOffset, which is
447             // the (aligned) offset of next bitfield to consider.
448             bitSizeSinceBegin = astContext.toBits(limitOffset - beginOffset);
449           }
450         }
451       }
452     }
453 
454     if (installBest) {
455       assert((field == fieldEnd || !field->isBitField() ||
456               (getFieldBitOffset(*field) % charBits) == 0) &&
457              "Installing but not at an aligned bitfield or limit");
458       CharUnits accessSize = bestEndOffset - beginOffset;
459       if (!accessSize.isZero()) {
460         // Add the storage member for the access unit to the record. The
461         // bitfields get the offset of their storage but come afterward and
462         // remain there after a stable sort.
463         mlir::Type type;
464         if (bestClipped) {
465           assert(getSize(getUIntNType(astContext.toBits(accessSize))) >
466                      accessSize &&
467                  "Clipped access need not be clipped");
468           type = getByteArrayType(accessSize);
469         } else {
470           type = getUIntNType(astContext.toBits(accessSize));
471           assert(getSize(type) == accessSize &&
472                  "Unclipped access must be clipped");
473         }
474         members.push_back(makeStorageInfo(beginOffset, type));
475         for (; begin != bestEnd; ++begin)
476           if (!begin->isZeroLengthBitField())
477             members.push_back(MemberInfo(
478                 beginOffset, MemberInfo::InfoKind::Field, nullptr, *begin));
479       }
480       // Reset to start a new span.
481       field = bestEnd;
482       begin = fieldEnd;
483     } else {
484       assert(field != fieldEnd && field->isBitField() &&
485              "Accumulating past end of bitfields");
486       assert(!barrier && "Accumulating across barrier");
487       // Accumulate this bitfield into the current (potential) span.
488       bitSizeSinceBegin += field->getBitWidthValue();
489       ++field;
490     }
491   }
492 
493   return field;
494 }
495 
accumulateFields()496 void CIRRecordLowering::accumulateFields() {
497   for (RecordDecl::field_iterator field = recordDecl->field_begin(),
498                                   fieldEnd = recordDecl->field_end();
499        field != fieldEnd;) {
500     if (field->isBitField()) {
501       RecordDecl::field_iterator start = field;
502       // Iterate to gather the list of bitfields.
503       for (++field; field != fieldEnd && field->isBitField(); ++field)
504         ;
505       field = accumulateBitFields(start, field);
506       assert((field == fieldEnd || !field->isBitField()) &&
507              "Failed to accumulate all the bitfields");
508     } else if (!field->isZeroSize(astContext)) {
509       members.push_back(MemberInfo(bitsToCharUnits(getFieldBitOffset(*field)),
510                                    MemberInfo::InfoKind::Field,
511                                    getStorageType(*field), *field));
512       ++field;
513     } else {
514       // TODO(cir): do we want to do anything special about zero size members?
515       assert(!cir::MissingFeatures::zeroSizeRecordMembers());
516       ++field;
517     }
518   }
519 }
520 
calculateZeroInit()521 void CIRRecordLowering::calculateZeroInit() {
522   for (const MemberInfo &member : members) {
523     if (member.kind == MemberInfo::InfoKind::Field) {
524       if (!member.fieldDecl || isZeroInitializable(member.fieldDecl))
525         continue;
526       zeroInitializable = zeroInitializableAsBase = false;
527       return;
528     } else if (member.kind == MemberInfo::InfoKind::Base) {
529       if (isZeroInitializable(member.cxxRecordDecl))
530         continue;
531       zeroInitializable = false;
532       if (member.kind == MemberInfo::InfoKind::Base)
533         zeroInitializableAsBase = false;
534     }
535     assert(!cir::MissingFeatures::recordLayoutVirtualBases());
536   }
537 }
538 
determinePacked()539 void CIRRecordLowering::determinePacked() {
540   if (packed)
541     return;
542   CharUnits alignment = CharUnits::One();
543 
544   // TODO(cir): handle non-virtual base types
545   assert(!cir::MissingFeatures::cxxSupport());
546 
547   for (const MemberInfo &member : members) {
548     if (!member.data)
549       continue;
550     // If any member falls at an offset that it not a multiple of its alignment,
551     // then the entire record must be packed.
552     if (member.offset % getAlignment(member.data))
553       packed = true;
554     alignment = std::max(alignment, getAlignment(member.data));
555   }
556   // If the size of the record (the capstone's offset) is not a multiple of the
557   // record's alignment, it must be packed.
558   if (members.back().offset % alignment)
559     packed = true;
560   // Update the alignment of the sentinel.
561   if (!packed)
562     members.back().data = getUIntNType(astContext.toBits(alignment));
563 }
564 
insertPadding()565 void CIRRecordLowering::insertPadding() {
566   std::vector<std::pair<CharUnits, CharUnits>> padding;
567   CharUnits size = CharUnits::Zero();
568   for (const MemberInfo &member : members) {
569     if (!member.data)
570       continue;
571     CharUnits offset = member.offset;
572     assert(offset >= size);
573     // Insert padding if we need to.
574     if (offset !=
575         size.alignTo(packed ? CharUnits::One() : getAlignment(member.data)))
576       padding.push_back(std::make_pair(size, offset - size));
577     size = offset + getSize(member.data);
578   }
579   if (padding.empty())
580     return;
581   padded = true;
582   // Add the padding to the Members list and sort it.
583   for (const std::pair<CharUnits, CharUnits> &paddingPair : padding)
584     members.push_back(makeStorageInfo(paddingPair.first,
585                                       getByteArrayType(paddingPair.second)));
586   llvm::stable_sort(members);
587 }
588 
589 std::unique_ptr<CIRGenRecordLayout>
computeRecordLayout(const RecordDecl * rd,cir::RecordType * ty)590 CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) {
591   CIRRecordLowering lowering(*this, rd, /*packed=*/false);
592   assert(ty->isIncomplete() && "recomputing record layout?");
593   lowering.lower();
594 
595   // If we're in C++, compute the base subobject type.
596   cir::RecordType baseTy;
597   if (llvm::isa<CXXRecordDecl>(rd) && !rd->isUnion() &&
598       !rd->hasAttr<FinalAttr>()) {
599     baseTy = *ty;
600     if (lowering.astRecordLayout.getNonVirtualSize() !=
601         lowering.astRecordLayout.getSize()) {
602       CIRRecordLowering baseLowering(*this, rd, /*Packed=*/lowering.packed);
603       baseLowering.lower();
604       std::string baseIdentifier = getRecordTypeName(rd, ".base");
605       baseTy =
606           builder.getCompleteRecordTy(baseLowering.fieldTypes, baseIdentifier,
607                                       baseLowering.packed, baseLowering.padded);
608       // TODO(cir): add something like addRecordTypeName
609 
610       // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work
611       // on both of them with the same index.
612       assert(lowering.packed == baseLowering.packed &&
613              "Non-virtual and complete types must agree on packedness");
614     }
615   }
616 
617   // Fill in the record *after* computing the base type.  Filling in the body
618   // signifies that the type is no longer opaque and record layout is complete,
619   // but we may need to recursively layout rd while laying D out as a base type.
620   assert(!cir::MissingFeatures::astRecordDeclAttr());
621   ty->complete(lowering.fieldTypes, lowering.packed, lowering.padded);
622 
623   auto rl = std::make_unique<CIRGenRecordLayout>(
624       ty ? *ty : cir::RecordType{}, baseTy ? baseTy : cir::RecordType{},
625       (bool)lowering.zeroInitializable, (bool)lowering.zeroInitializableAsBase);
626 
627   assert(!cir::MissingFeatures::recordZeroInit());
628 
629   rl->nonVirtualBases.swap(lowering.nonVirtualBases);
630 
631   assert(!cir::MissingFeatures::cxxSupport());
632   assert(!cir::MissingFeatures::bitfields());
633 
634   // Add all the field numbers.
635   rl->fieldIdxMap.swap(lowering.fieldIdxMap);
636 
637   rl->bitFields.swap(lowering.bitFields);
638 
639   // Dump the layout, if requested.
640   if (getASTContext().getLangOpts().DumpRecordLayouts) {
641     llvm::outs() << "\n*** Dumping CIRgen Record Layout\n";
642     llvm::outs() << "Record: ";
643     rd->dump(llvm::outs());
644     llvm::outs() << "\nLayout: ";
645     rl->print(llvm::outs());
646   }
647 
648   // TODO: implement verification
649   return rl;
650 }
651 
print(raw_ostream & os) const652 void CIRGenRecordLayout::print(raw_ostream &os) const {
653   os << "<CIRecordLayout\n";
654   os << "   CIR Type:" << completeObjectType << "\n";
655   if (baseSubobjectType)
656     os << "   NonVirtualBaseCIRType:" << baseSubobjectType << "\n";
657   os << "   IsZeroInitializable:" << zeroInitializable << "\n";
658   os << "   BitFields:[\n";
659   std::vector<std::pair<unsigned, const CIRGenBitFieldInfo *>> bitInfo;
660   for (auto &[decl, info] : bitFields) {
661     const RecordDecl *rd = decl->getParent();
662     unsigned index = 0;
663     for (RecordDecl::field_iterator it = rd->field_begin(); *it != decl; ++it)
664       ++index;
665     bitInfo.push_back(std::make_pair(index, &info));
666   }
667   llvm::array_pod_sort(bitInfo.begin(), bitInfo.end());
668   for (std::pair<unsigned, const CIRGenBitFieldInfo *> &info : bitInfo) {
669     os.indent(4);
670     info.second->print(os);
671     os << "\n";
672   }
673   os << "   ]>\n";
674 }
675 
print(raw_ostream & os) const676 void CIRGenBitFieldInfo::print(raw_ostream &os) const {
677   os << "<CIRBitFieldInfo" << " name:" << name << " offset:" << offset
678      << " size:" << size << " isSigned:" << isSigned
679      << " storageSize:" << storageSize
680      << " storageOffset:" << storageOffset.getQuantity()
681      << " volatileOffset:" << volatileOffset
682      << " volatileStorageSize:" << volatileStorageSize
683      << " volatileStorageOffset:" << volatileStorageOffset.getQuantity() << ">";
684 }
685 
dump() const686 void CIRGenRecordLayout::dump() const { print(llvm::errs()); }
687 
dump() const688 void CIRGenBitFieldInfo::dump() const { print(llvm::errs()); }
689 
lowerUnion()690 void CIRRecordLowering::lowerUnion() {
691   CharUnits layoutSize = astRecordLayout.getSize();
692   mlir::Type storageType = nullptr;
693   bool seenNamedMember = false;
694 
695   // Iterate through the fields setting bitFieldInfo and the Fields array. Also
696   // locate the "most appropriate" storage type.
697   for (const FieldDecl *field : recordDecl->fields()) {
698     mlir::Type fieldType;
699     if (field->isBitField()) {
700       if (field->isZeroLengthBitField())
701         continue;
702       fieldType = getBitfieldStorageType(field->getBitWidthValue());
703       setBitFieldInfo(field, CharUnits::Zero(), fieldType);
704     } else {
705       fieldType = getStorageType(field);
706     }
707 
708     // This maps a field to its index. For unions, the index is always 0.
709     fieldIdxMap[field->getCanonicalDecl()] = 0;
710 
711     // Compute zero-initializable status.
712     // This union might not be zero initialized: it may contain a pointer to
713     // data member which might have some exotic initialization sequence.
714     // If this is the case, then we ought not to try and come up with a "better"
715     // type, it might not be very easy to come up with a Constant which
716     // correctly initializes it.
717     if (!seenNamedMember) {
718       seenNamedMember = field->getIdentifier();
719       if (!seenNamedMember)
720         if (const RecordDecl *fieldRD = field->getType()->getAsRecordDecl())
721           seenNamedMember = fieldRD->findFirstNamedDataMember();
722       if (seenNamedMember && !isZeroInitializable(field)) {
723         zeroInitializable = zeroInitializableAsBase = false;
724         storageType = fieldType;
725       }
726     }
727 
728     // Because our union isn't zero initializable, we won't be getting a better
729     // storage type.
730     if (!zeroInitializable)
731       continue;
732 
733     // Conditionally update our storage type if we've got a new "better" one.
734     if (!storageType || getAlignment(fieldType) > getAlignment(storageType) ||
735         (getAlignment(fieldType) == getAlignment(storageType) &&
736          getSize(fieldType) > getSize(storageType)))
737       storageType = fieldType;
738 
739     // NOTE(cir): Track all union member's types, not just the largest one. It
740     // allows for proper type-checking and retain more info for analisys.
741     fieldTypes.push_back(fieldType);
742   }
743 
744   if (!storageType)
745     cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
746                                        "No-storage Union NYI");
747 
748   if (layoutSize < getSize(storageType))
749     storageType = getByteArrayType(layoutSize);
750   else
751     appendPaddingBytes(layoutSize - getSize(storageType));
752 
753   // Set packed if we need it.
754   if (layoutSize % getAlignment(storageType))
755     packed = true;
756 }
757 
758 /// The AAPCS that defines that, when possible, bit-fields should
759 /// be accessed using containers of the declared type width:
760 /// When a volatile bit-field is read, and its container does not overlap with
761 /// any non-bit-field member or any zero length bit-field member, its container
762 /// must be read exactly once using the access width appropriate to the type of
763 /// the container. When a volatile bit-field is written, and its container does
764 /// not overlap with any non-bit-field member or any zero-length bit-field
765 /// member, its container must be read exactly once and written exactly once
766 /// using the access width appropriate to the type of the container. The two
767 /// accesses are not atomic.
768 ///
769 /// Enforcing the width restriction can be disabled using
770 /// -fno-aapcs-bitfield-width.
computeVolatileBitfields()771 void CIRRecordLowering::computeVolatileBitfields() {
772   if (!isAAPCS() ||
773       !cirGenTypes.getCGModule().getCodeGenOpts().AAPCSBitfieldWidth)
774     return;
775 
776   assert(!cir::MissingFeatures::armComputeVolatileBitfields());
777 }
778 
accumulateBases(const CXXRecordDecl * cxxRecordDecl)779 void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) {
780   // If we've got a primary virtual base, we need to add it with the bases.
781   if (astRecordLayout.isPrimaryBaseVirtual()) {
782     cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
783                                        "accumulateBases: primary virtual base");
784   }
785 
786   // Accumulate the non-virtual bases.
787   for ([[maybe_unused]] const auto &base : cxxRecordDecl->bases()) {
788     if (base.isVirtual()) {
789       cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
790                                          "accumulateBases: virtual base");
791       continue;
792     }
793     // Bases can be zero-sized even if not technically empty if they
794     // contain only a trailing array member.
795     const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
796     if (!baseDecl->isEmpty() &&
797         !astContext.getASTRecordLayout(baseDecl).getNonVirtualSize().isZero()) {
798       members.push_back(MemberInfo(astRecordLayout.getBaseClassOffset(baseDecl),
799                                    MemberInfo::InfoKind::Base,
800                                    getStorageType(baseDecl), baseDecl));
801     }
802   }
803 }
804 
accumulateVPtrs()805 void CIRRecordLowering::accumulateVPtrs() {
806   if (astRecordLayout.hasOwnVFPtr())
807     cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
808                                        "accumulateVPtrs: hasOwnVFPtr");
809   if (astRecordLayout.hasOwnVBPtr())
810     cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
811                                        "accumulateVPtrs: hasOwnVBPtr");
812 }
813