xref: /freebsd/contrib/llvm-project/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp (revision 770cf0a5f02dc8983a89c6568d741fbc25baa999)
1 //===- CIRGenExprAggregrate.cpp - Emit CIR Code from Aggregate Expressions ===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Aggregate Expr nodes as CIR code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CIRGenBuilder.h"
14 #include "CIRGenFunction.h"
15 #include "CIRGenValue.h"
16 #include "clang/CIR/Dialect/IR/CIRAttrs.h"
17 
18 #include "clang/AST/Expr.h"
19 #include "clang/AST/RecordLayout.h"
20 #include "clang/AST/StmtVisitor.h"
21 #include <cstdint>
22 
23 using namespace clang;
24 using namespace clang::CIRGen;
25 
26 namespace {
27 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
28 
29   CIRGenFunction &cgf;
30   AggValueSlot dest;
31 
32   // Calls `fn` with a valid return value slot, potentially creating a temporary
33   // to do so. If a temporary is created, an appropriate copy into `Dest` will
34   // be emitted, as will lifetime markers.
35   //
36   // The given function should take a ReturnValueSlot, and return an RValue that
37   // points to said slot.
38   void withReturnValueSlot(const Expr *e,
39                            llvm::function_ref<RValue(ReturnValueSlot)> fn);
40 
41   AggValueSlot ensureSlot(mlir::Location loc, QualType t) {
42     if (!dest.isIgnored())
43       return dest;
44 
45     cgf.cgm.errorNYI(loc, "Slot for ignored address");
46     return dest;
47   }
48 
49 public:
50   AggExprEmitter(CIRGenFunction &cgf, AggValueSlot dest)
51       : cgf(cgf), dest(dest) {}
52 
53   /// Given an expression with aggregate type that represents a value lvalue,
54   /// this method emits the address of the lvalue, then loads the result into
55   /// DestPtr.
56   void emitAggLoadOfLValue(const Expr *e);
57 
58   void emitArrayInit(Address destPtr, cir::ArrayType arrayTy, QualType arrayQTy,
59                      Expr *exprToVisit, ArrayRef<Expr *> args,
60                      Expr *arrayFiller);
61 
62   /// Perform the final copy to DestPtr, if desired.
63   void emitFinalDestCopy(QualType type, const LValue &src);
64 
65   void emitInitializationToLValue(Expr *e, LValue lv);
66 
67   void emitNullInitializationToLValue(mlir::Location loc, LValue lv);
68 
69   void Visit(Expr *e) { StmtVisitor<AggExprEmitter>::Visit(e); }
70 
71   void VisitCallExpr(const CallExpr *e);
72 
73   void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
74 
75   void VisitInitListExpr(InitListExpr *e);
76   void VisitCXXConstructExpr(const CXXConstructExpr *e);
77 
78   void visitCXXParenListOrInitListExpr(Expr *e, ArrayRef<Expr *> args,
79                                        FieldDecl *initializedFieldInUnion,
80                                        Expr *arrayFiller);
81 };
82 
83 } // namespace
84 
85 static bool isTrivialFiller(Expr *e) {
86   if (!e)
87     return true;
88 
89   if (isa<ImplicitValueInitExpr>(e))
90     return true;
91 
92   if (auto *ile = dyn_cast<InitListExpr>(e)) {
93     if (ile->getNumInits())
94       return false;
95     return isTrivialFiller(ile->getArrayFiller());
96   }
97 
98   if (const auto *cons = dyn_cast_or_null<CXXConstructExpr>(e))
99     return cons->getConstructor()->isDefaultConstructor() &&
100            cons->getConstructor()->isTrivial();
101 
102   return false;
103 }
104 
105 /// Given an expression with aggregate type that represents a value lvalue, this
106 /// method emits the address of the lvalue, then loads the result into DestPtr.
107 void AggExprEmitter::emitAggLoadOfLValue(const Expr *e) {
108   LValue lv = cgf.emitLValue(e);
109 
110   // If the type of the l-value is atomic, then do an atomic load.
111   assert(!cir::MissingFeatures::opLoadStoreAtomic());
112 
113   emitFinalDestCopy(e->getType(), lv);
114 }
115 
116 void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
117                                    QualType arrayQTy, Expr *e,
118                                    ArrayRef<Expr *> args, Expr *arrayFiller) {
119   CIRGenBuilderTy &builder = cgf.getBuilder();
120   const mlir::Location loc = cgf.getLoc(e->getSourceRange());
121 
122   const uint64_t numInitElements = args.size();
123 
124   const QualType elementType =
125       cgf.getContext().getAsArrayType(arrayQTy)->getElementType();
126 
127   if (elementType.isDestructedType()) {
128     cgf.cgm.errorNYI(loc, "dtorKind NYI");
129     return;
130   }
131 
132   const QualType elementPtrType = cgf.getContext().getPointerType(elementType);
133 
134   const mlir::Type cirElementType = cgf.convertType(elementType);
135   const cir::PointerType cirElementPtrType =
136       builder.getPointerTo(cirElementType);
137 
138   auto begin = builder.create<cir::CastOp>(loc, cirElementPtrType,
139                                            cir::CastKind::array_to_ptrdecay,
140                                            destPtr.getPointer());
141 
142   const CharUnits elementSize =
143       cgf.getContext().getTypeSizeInChars(elementType);
144   const CharUnits elementAlign =
145       destPtr.getAlignment().alignmentOfArrayElement(elementSize);
146 
147   // The 'current element to initialize'.  The invariants on this
148   // variable are complicated.  Essentially, after each iteration of
149   // the loop, it points to the last initialized element, except
150   // that it points to the beginning of the array before any
151   // elements have been initialized.
152   mlir::Value element = begin;
153 
154   // Don't build the 'one' before the cycle to avoid
155   // emmiting the redundant `cir.const 1` instrs.
156   mlir::Value one;
157 
158   // Emit the explicit initializers.
159   for (uint64_t i = 0; i != numInitElements; ++i) {
160     // Advance to the next element.
161     if (i > 0) {
162       one = builder.getConstantInt(loc, cgf.PtrDiffTy, i);
163       element = builder.createPtrStride(loc, begin, one);
164     }
165 
166     const Address address = Address(element, cirElementType, elementAlign);
167     const LValue elementLV = cgf.makeAddrLValue(address, elementType);
168     emitInitializationToLValue(args[i], elementLV);
169   }
170 
171   const uint64_t numArrayElements = arrayTy.getSize();
172 
173   // Check whether there's a non-trivial array-fill expression.
174   const bool hasTrivialFiller = isTrivialFiller(arrayFiller);
175 
176   // Any remaining elements need to be zero-initialized, possibly
177   // using the filler expression.  We can skip this if the we're
178   // emitting to zeroed memory.
179   if (numInitElements != numArrayElements &&
180       !(dest.isZeroed() && hasTrivialFiller &&
181         cgf.getTypes().isZeroInitializable(elementType))) {
182     // Advance to the start of the rest of the array.
183     if (numInitElements) {
184       one = builder.getConstantInt(loc, cgf.PtrDiffTy, 1);
185       element = builder.create<cir::PtrStrideOp>(loc, cirElementPtrType,
186                                                  element, one);
187     }
188 
189     // Allocate the temporary variable
190     // to store the pointer to first unitialized element
191     const Address tmpAddr = cgf.createTempAlloca(
192         cirElementPtrType, cgf.getPointerAlign(), loc, "arrayinit.temp");
193     LValue tmpLV = cgf.makeAddrLValue(tmpAddr, elementPtrType);
194     cgf.emitStoreThroughLValue(RValue::get(element), tmpLV);
195 
196     // TODO(CIR): Replace this part later with cir::DoWhileOp
197     for (unsigned i = numInitElements; i != numArrayElements; ++i) {
198       cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
199 
200       // Emit the actual filler expression.
201       const LValue elementLV = cgf.makeAddrLValue(
202           Address(currentElement, cirElementType, elementAlign), elementType);
203 
204       if (arrayFiller)
205         emitInitializationToLValue(arrayFiller, elementLV);
206       else
207         emitNullInitializationToLValue(loc, elementLV);
208 
209       // Advance pointer and store them to temporary variable
210       one = builder.getConstantInt(loc, cgf.PtrDiffTy, 1);
211       cir::PtrStrideOp nextElement =
212           builder.createPtrStride(loc, currentElement, one);
213       cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
214     }
215   }
216 }
217 
218 /// Perform the final copy to destPtr, if desired.
219 void AggExprEmitter::emitFinalDestCopy(QualType type, const LValue &src) {
220   // If dest is ignored, then we're evaluating an aggregate expression
221   // in a context that doesn't care about the result.  Note that loads
222   // from volatile l-values force the existence of a non-ignored
223   // destination.
224   if (dest.isIgnored())
225     return;
226 
227   cgf.cgm.errorNYI("emitFinalDestCopy: non-ignored dest is NYI");
228 }
229 
230 void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
231   const QualType type = lv.getType();
232 
233   if (isa<ImplicitValueInitExpr, CXXScalarValueInitExpr>(e)) {
234     const mlir::Location loc = e->getSourceRange().isValid()
235                                    ? cgf.getLoc(e->getSourceRange())
236                                    : *cgf.currSrcLoc;
237     return emitNullInitializationToLValue(loc, lv);
238   }
239 
240   if (isa<NoInitExpr>(e))
241     return;
242 
243   if (type->isReferenceType())
244     cgf.cgm.errorNYI("emitInitializationToLValue ReferenceType");
245 
246   switch (cgf.getEvaluationKind(type)) {
247   case cir::TEK_Complex:
248     cgf.cgm.errorNYI("emitInitializationToLValue TEK_Complex");
249     break;
250   case cir::TEK_Aggregate:
251     cgf.emitAggExpr(e, AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed,
252                                                AggValueSlot::IsNotAliased,
253                                                AggValueSlot::MayOverlap,
254                                                dest.isZeroed()));
255 
256     return;
257   case cir::TEK_Scalar:
258     if (lv.isSimple())
259       cgf.emitScalarInit(e, cgf.getLoc(e->getSourceRange()), lv);
260     else
261       cgf.emitStoreThroughLValue(RValue::get(cgf.emitScalarExpr(e)), lv);
262     return;
263   }
264 }
265 
266 void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *e) {
267   AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
268   cgf.emitCXXConstructExpr(e, slot);
269 }
270 
271 void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc,
272                                                     LValue lv) {
273   const QualType type = lv.getType();
274 
275   // If the destination slot is already zeroed out before the aggregate is
276   // copied into it, we don't have to emit any zeros here.
277   if (dest.isZeroed() && cgf.getTypes().isZeroInitializable(type))
278     return;
279 
280   if (cgf.hasScalarEvaluationKind(type)) {
281     // For non-aggregates, we can store the appropriate null constant.
282     mlir::Value null = cgf.cgm.emitNullConstant(type, loc);
283     if (lv.isSimple()) {
284       cgf.emitStoreOfScalar(null, lv, /* isInitialization */ true);
285       return;
286     }
287 
288     cgf.cgm.errorNYI("emitStoreThroughBitfieldLValue");
289     return;
290   }
291 
292   // There's a potential optimization opportunity in combining
293   // memsets; that would be easy for arrays, but relatively
294   // difficult for structures with the current code.
295   cgf.emitNullInitialization(loc, lv.getAddress(), lv.getType());
296 }
297 
298 void AggExprEmitter::VisitCallExpr(const CallExpr *e) {
299   if (e->getCallReturnType(cgf.getContext())->isReferenceType()) {
300     cgf.cgm.errorNYI(e->getSourceRange(), "reference return type");
301     return;
302   }
303 
304   withReturnValueSlot(
305       e, [&](ReturnValueSlot slot) { return cgf.emitCallExpr(e, slot); });
306 }
307 
308 void AggExprEmitter::withReturnValueSlot(
309     const Expr *e, llvm::function_ref<RValue(ReturnValueSlot)> fn) {
310   QualType retTy = e->getType();
311 
312   assert(!cir::MissingFeatures::aggValueSlotDestructedFlag());
313   bool requiresDestruction =
314       retTy.isDestructedType() == QualType::DK_nontrivial_c_struct;
315   if (requiresDestruction)
316     cgf.cgm.errorNYI(
317         e->getSourceRange(),
318         "withReturnValueSlot: return value requiring destruction is NYI");
319 
320   // If it makes no observable difference, save a memcpy + temporary.
321   //
322   // We need to always provide our own temporary if destruction is required.
323   // Otherwise, fn will emit its own, notice that it's "unused", and end its
324   // lifetime before we have the chance to emit a proper destructor call.
325   assert(!cir::MissingFeatures::aggValueSlotAlias());
326   assert(!cir::MissingFeatures::aggValueSlotGC());
327 
328   Address retAddr = dest.getAddress();
329   assert(!cir::MissingFeatures::emitLifetimeMarkers());
330 
331   assert(!cir::MissingFeatures::aggValueSlotVolatile());
332   assert(!cir::MissingFeatures::aggValueSlotDestructedFlag());
333   fn(ReturnValueSlot(retAddr));
334 }
335 
336 void AggExprEmitter::VisitInitListExpr(InitListExpr *e) {
337   if (e->hadArrayRangeDesignator())
338     llvm_unreachable("GNU array range designator extension");
339 
340   if (e->isTransparent())
341     return Visit(e->getInit(0));
342 
343   visitCXXParenListOrInitListExpr(
344       e, e->inits(), e->getInitializedFieldInUnion(), e->getArrayFiller());
345 }
346 
347 void AggExprEmitter::visitCXXParenListOrInitListExpr(
348     Expr *e, ArrayRef<Expr *> args, FieldDecl *initializedFieldInUnion,
349     Expr *arrayFiller) {
350 
351   const AggValueSlot dest =
352       ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
353 
354   if (e->getType()->isConstantArrayType()) {
355     cir::ArrayType arrayTy =
356         cast<cir::ArrayType>(dest.getAddress().getElementType());
357     emitArrayInit(dest.getAddress(), arrayTy, e->getType(), e, args,
358                   arrayFiller);
359     return;
360   }
361 
362   cgf.cgm.errorNYI(
363       "visitCXXParenListOrInitListExpr Record or VariableSizeArray type");
364 }
365 
366 // TODO(cir): This could be shared with classic codegen.
367 AggValueSlot::Overlap_t CIRGenFunction::getOverlapForBaseInit(
368     const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual) {
369   // If the most-derived object is a field declared with [[no_unique_address]],
370   // the tail padding of any virtual base could be reused for other subobjects
371   // of that field's class.
372   if (isVirtual)
373     return AggValueSlot::MayOverlap;
374 
375   // If the base class is laid out entirely within the nvsize of the derived
376   // class, its tail padding cannot yet be initialized, so we can issue
377   // stores at the full width of the base class.
378   const ASTRecordLayout &layout = getContext().getASTRecordLayout(rd);
379   if (layout.getBaseClassOffset(baseRD) +
380           getContext().getASTRecordLayout(baseRD).getSize() <=
381       layout.getNonVirtualSize())
382     return AggValueSlot::DoesNotOverlap;
383 
384   // The tail padding may contain values we need to preserve.
385   return AggValueSlot::MayOverlap;
386 }
387 
388 void CIRGenFunction::emitAggExpr(const Expr *e, AggValueSlot slot) {
389   AggExprEmitter(*this, slot).Visit(const_cast<Expr *>(e));
390 }
391 
392 LValue CIRGenFunction::emitAggExprToLValue(const Expr *e) {
393   assert(hasAggregateEvaluationKind(e->getType()) && "Invalid argument!");
394   Address temp = createMemTemp(e->getType(), getLoc(e->getSourceRange()));
395   LValue lv = makeAddrLValue(temp, e->getType());
396   emitAggExpr(e, AggValueSlot::forLValue(lv, AggValueSlot::IsNotDestructed,
397                                          AggValueSlot::IsNotAliased,
398                                          AggValueSlot::DoesNotOverlap));
399   return lv;
400 }
401