xref: /freebsd/contrib/llvm-project/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Emit Expr nodes with scalar CIR types as CIR code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CIRGenFunction.h"
14 #include "CIRGenValue.h"
15 
16 #include "clang/AST/Expr.h"
17 #include "clang/AST/StmtVisitor.h"
18 #include "clang/CIR/MissingFeatures.h"
19 
20 #include "mlir/IR/Location.h"
21 #include "mlir/IR/Value.h"
22 
23 #include <cassert>
24 #include <utility>
25 
26 using namespace clang;
27 using namespace clang::CIRGen;
28 
29 namespace {
30 
31 struct BinOpInfo {
32   mlir::Value lhs;
33   mlir::Value rhs;
34   SourceRange loc;
35   QualType fullType;             // Type of operands and result
36   QualType compType;             // Type used for computations. Element type
37                                  // for vectors, otherwise same as FullType.
38   BinaryOperator::Opcode opcode; // Opcode of BinOp to perform
39   FPOptions fpfeatures;
40   const Expr *e; // Entire expr, for error unsupported.  May not be binop.
41 
42   /// Check if the binop computes a division or a remainder.
isDivRemOp__anon1373b7f80111::BinOpInfo43   bool isDivRemOp() const {
44     return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign ||
45            opcode == BO_RemAssign;
46   }
47 
48   /// Check if the binop can result in integer overflow.
mayHaveIntegerOverflow__anon1373b7f80111::BinOpInfo49   bool mayHaveIntegerOverflow() const {
50     // Without constant input, we can't rule out overflow.
51     auto lhsci = dyn_cast<cir::ConstantOp>(lhs.getDefiningOp());
52     auto rhsci = dyn_cast<cir::ConstantOp>(rhs.getDefiningOp());
53     if (!lhsci || !rhsci)
54       return true;
55 
56     assert(!cir::MissingFeatures::mayHaveIntegerOverflow());
57     // TODO(cir): For now we just assume that we might overflow
58     return true;
59   }
60 
61   /// Check if at least one operand is a fixed point type. In such cases,
62   /// this operation did not follow usual arithmetic conversion and both
63   /// operands might not be of the same type.
isFixedPointOp__anon1373b7f80111::BinOpInfo64   bool isFixedPointOp() const {
65     // We cannot simply check the result type since comparison operations
66     // return an int.
67     if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(e)) {
68       QualType lhstype = binOp->getLHS()->getType();
69       QualType rhstype = binOp->getRHS()->getType();
70       return lhstype->isFixedPointType() || rhstype->isFixedPointType();
71     }
72     if (const auto *unop = llvm::dyn_cast<UnaryOperator>(e))
73       return unop->getSubExpr()->getType()->isFixedPointType();
74     return false;
75   }
76 };
77 
78 class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
79   CIRGenFunction &cgf;
80   CIRGenBuilderTy &builder;
81   bool ignoreResultAssign;
82 
83 public:
ScalarExprEmitter(CIRGenFunction & cgf,CIRGenBuilderTy & builder)84   ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder)
85       : cgf(cgf), builder(builder) {}
86 
87   //===--------------------------------------------------------------------===//
88   //                               Utilities
89   //===--------------------------------------------------------------------===//
90 
emitPromotedValue(mlir::Value result,QualType promotionType)91   mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
92     return builder.createFloatingCast(result, cgf.convertType(promotionType));
93   }
94 
emitUnPromotedValue(mlir::Value result,QualType exprType)95   mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) {
96     return builder.createFloatingCast(result, cgf.convertType(exprType));
97   }
98 
99   mlir::Value emitPromoted(const Expr *e, QualType promotionType);
100 
maybePromoteBoolResult(mlir::Value value,mlir::Type dstTy) const101   mlir::Value maybePromoteBoolResult(mlir::Value value,
102                                      mlir::Type dstTy) const {
103     if (mlir::isa<cir::IntType>(dstTy))
104       return builder.createBoolToInt(value, dstTy);
105     if (mlir::isa<cir::BoolType>(dstTy))
106       return value;
107     llvm_unreachable("Can only promote integer or boolean types");
108   }
109 
110   //===--------------------------------------------------------------------===//
111   //                            Visitor Methods
112   //===--------------------------------------------------------------------===//
113 
Visit(Expr * e)114   mlir::Value Visit(Expr *e) {
115     return StmtVisitor<ScalarExprEmitter, mlir::Value>::Visit(e);
116   }
117 
VisitStmt(Stmt * s)118   mlir::Value VisitStmt(Stmt *s) {
119     llvm_unreachable("Statement passed to ScalarExprEmitter");
120   }
121 
VisitExpr(Expr * e)122   mlir::Value VisitExpr(Expr *e) {
123     cgf.getCIRGenModule().errorNYI(
124         e->getSourceRange(), "scalar expression kind: ", e->getStmtClassName());
125     return {};
126   }
127 
VisitPackIndexingExpr(PackIndexingExpr * e)128   mlir::Value VisitPackIndexingExpr(PackIndexingExpr *e) {
129     return Visit(e->getSelectedExpr());
130   }
131 
VisitParenExpr(ParenExpr * pe)132   mlir::Value VisitParenExpr(ParenExpr *pe) { return Visit(pe->getSubExpr()); }
133 
VisitGenericSelectionExpr(GenericSelectionExpr * ge)134   mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
135     return Visit(ge->getResultExpr());
136   }
137 
138   /// Emits the address of the l-value, then loads and returns the result.
emitLoadOfLValue(const Expr * e)139   mlir::Value emitLoadOfLValue(const Expr *e) {
140     LValue lv = cgf.emitLValue(e);
141     // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V);
142     return cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
143   }
144 
emitLoadOfLValue(LValue lv,SourceLocation loc)145   mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) {
146     return cgf.emitLoadOfLValue(lv, loc).getValue();
147   }
148 
149   // l-values
VisitDeclRefExpr(DeclRefExpr * e)150   mlir::Value VisitDeclRefExpr(DeclRefExpr *e) {
151     if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(e))
152       return cgf.emitScalarConstant(constant, e);
153 
154     return emitLoadOfLValue(e);
155   }
156 
VisitIntegerLiteral(const IntegerLiteral * e)157   mlir::Value VisitIntegerLiteral(const IntegerLiteral *e) {
158     mlir::Type type = cgf.convertType(e->getType());
159     return builder.create<cir::ConstantOp>(
160         cgf.getLoc(e->getExprLoc()), cir::IntAttr::get(type, e->getValue()));
161   }
162 
VisitFloatingLiteral(const FloatingLiteral * e)163   mlir::Value VisitFloatingLiteral(const FloatingLiteral *e) {
164     mlir::Type type = cgf.convertType(e->getType());
165     assert(mlir::isa<cir::FPTypeInterface>(type) &&
166            "expect floating-point type");
167     return builder.create<cir::ConstantOp>(
168         cgf.getLoc(e->getExprLoc()), cir::FPAttr::get(type, e->getValue()));
169   }
170 
VisitCharacterLiteral(const CharacterLiteral * e)171   mlir::Value VisitCharacterLiteral(const CharacterLiteral *e) {
172     mlir::Type ty = cgf.convertType(e->getType());
173     auto init = cir::IntAttr::get(ty, e->getValue());
174     return builder.create<cir::ConstantOp>(cgf.getLoc(e->getExprLoc()), init);
175   }
176 
VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr * e)177   mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *e) {
178     return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
179   }
180 
181   mlir::Value VisitCastExpr(CastExpr *e);
182   mlir::Value VisitCallExpr(const CallExpr *e);
183 
VisitArraySubscriptExpr(ArraySubscriptExpr * e)184   mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
185     if (e->getBase()->getType()->isVectorType()) {
186       assert(!cir::MissingFeatures::scalableVectors());
187 
188       const mlir::Location loc = cgf.getLoc(e->getSourceRange());
189       const mlir::Value vecValue = Visit(e->getBase());
190       const mlir::Value indexValue = Visit(e->getIdx());
191       return cgf.builder.create<cir::VecExtractOp>(loc, vecValue, indexValue);
192     }
193     // Just load the lvalue formed by the subscript expression.
194     return emitLoadOfLValue(e);
195   }
196 
VisitShuffleVectorExpr(ShuffleVectorExpr * e)197   mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *e) {
198     if (e->getNumSubExprs() == 2) {
199       // The undocumented form of __builtin_shufflevector.
200       mlir::Value inputVec = Visit(e->getExpr(0));
201       mlir::Value indexVec = Visit(e->getExpr(1));
202       return cgf.builder.create<cir::VecShuffleDynamicOp>(
203           cgf.getLoc(e->getSourceRange()), inputVec, indexVec);
204     }
205 
206     mlir::Value vec1 = Visit(e->getExpr(0));
207     mlir::Value vec2 = Visit(e->getExpr(1));
208 
209     // The documented form of __builtin_shufflevector, where the indices are
210     // a variable number of integer constants. The constants will be stored
211     // in an ArrayAttr.
212     SmallVector<mlir::Attribute, 8> indices;
213     for (unsigned i = 2; i < e->getNumSubExprs(); ++i) {
214       indices.push_back(
215           cir::IntAttr::get(cgf.builder.getSInt64Ty(),
216                             e->getExpr(i)
217                                 ->EvaluateKnownConstInt(cgf.getContext())
218                                 .getSExtValue()));
219     }
220 
221     return cgf.builder.create<cir::VecShuffleOp>(
222         cgf.getLoc(e->getSourceRange()), cgf.convertType(e->getType()), vec1,
223         vec2, cgf.builder.getArrayAttr(indices));
224   }
225 
VisitConvertVectorExpr(ConvertVectorExpr * e)226   mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *e) {
227     // __builtin_convertvector is an element-wise cast, and is implemented as a
228     // regular cast. The back end handles casts of vectors correctly.
229     return emitScalarConversion(Visit(e->getSrcExpr()),
230                                 e->getSrcExpr()->getType(), e->getType(),
231                                 e->getSourceRange().getBegin());
232   }
233 
234   mlir::Value VisitMemberExpr(MemberExpr *e);
235 
236   mlir::Value VisitInitListExpr(InitListExpr *e);
237 
VisitExplicitCastExpr(ExplicitCastExpr * e)238   mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) {
239     return VisitCastExpr(e);
240   }
241 
VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr * e)242   mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *e) {
243     return cgf.cgm.emitNullConstant(e->getType(),
244                                     cgf.getLoc(e->getSourceRange()));
245   }
246 
247   /// Perform a pointer to boolean conversion.
emitPointerToBoolConversion(mlir::Value v,QualType qt)248   mlir::Value emitPointerToBoolConversion(mlir::Value v, QualType qt) {
249     // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM.
250     // We might want to have a separate pass for these types of conversions.
251     return cgf.getBuilder().createPtrToBoolCast(v);
252   }
253 
emitFloatToBoolConversion(mlir::Value src,mlir::Location loc)254   mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) {
255     cir::BoolType boolTy = builder.getBoolTy();
256     return builder.create<cir::CastOp>(loc, boolTy,
257                                        cir::CastKind::float_to_bool, src);
258   }
259 
emitIntToBoolConversion(mlir::Value srcVal,mlir::Location loc)260   mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) {
261     // Because of the type rules of C, we often end up computing a
262     // logical value, then zero extending it to int, then wanting it
263     // as a logical value again.
264     // TODO: optimize this common case here or leave it for later
265     // CIR passes?
266     cir::BoolType boolTy = builder.getBoolTy();
267     return builder.create<cir::CastOp>(loc, boolTy, cir::CastKind::int_to_bool,
268                                        srcVal);
269   }
270 
271   /// Convert the specified expression value to a boolean (!cir.bool) truth
272   /// value. This is equivalent to "Val != 0".
emitConversionToBool(mlir::Value src,QualType srcType,mlir::Location loc)273   mlir::Value emitConversionToBool(mlir::Value src, QualType srcType,
274                                    mlir::Location loc) {
275     assert(srcType.isCanonical() && "EmitScalarConversion strips typedefs");
276 
277     if (srcType->isRealFloatingType())
278       return emitFloatToBoolConversion(src, loc);
279 
280     if (llvm::isa<MemberPointerType>(srcType)) {
281       cgf.getCIRGenModule().errorNYI(loc, "member pointer to bool conversion");
282       return builder.getFalse(loc);
283     }
284 
285     if (srcType->isIntegerType())
286       return emitIntToBoolConversion(src, loc);
287 
288     assert(::mlir::isa<cir::PointerType>(src.getType()));
289     return emitPointerToBoolConversion(src, srcType);
290   }
291 
292   // Emit a conversion from the specified type to the specified destination
293   // type, both of which are CIR scalar types.
294   struct ScalarConversionOpts {
295     bool treatBooleanAsSigned;
296     bool emitImplicitIntegerTruncationChecks;
297     bool emitImplicitIntegerSignChangeChecks;
298 
ScalarConversionOpts__anon1373b7f80111::ScalarExprEmitter::ScalarConversionOpts299     ScalarConversionOpts()
300         : treatBooleanAsSigned(false),
301           emitImplicitIntegerTruncationChecks(false),
302           emitImplicitIntegerSignChangeChecks(false) {}
303 
ScalarConversionOpts__anon1373b7f80111::ScalarExprEmitter::ScalarConversionOpts304     ScalarConversionOpts(clang::SanitizerSet sanOpts)
305         : treatBooleanAsSigned(false),
306           emitImplicitIntegerTruncationChecks(
307               sanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
308           emitImplicitIntegerSignChangeChecks(
309               sanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
310   };
311 
312   // Conversion from bool, integral, or floating-point to integral or
313   // floating-point. Conversions involving other types are handled elsewhere.
314   // Conversion to bool is handled elsewhere because that's a comparison against
315   // zero, not a simple cast. This handles both individual scalars and vectors.
emitScalarCast(mlir::Value src,QualType srcType,QualType dstType,mlir::Type srcTy,mlir::Type dstTy,ScalarConversionOpts opts)316   mlir::Value emitScalarCast(mlir::Value src, QualType srcType,
317                              QualType dstType, mlir::Type srcTy,
318                              mlir::Type dstTy, ScalarConversionOpts opts) {
319     assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
320            "Internal error: matrix types not handled by this function.");
321     assert(!(mlir::isa<mlir::IntegerType>(srcTy) ||
322              mlir::isa<mlir::IntegerType>(dstTy)) &&
323            "Obsolete code. Don't use mlir::IntegerType with CIR.");
324 
325     mlir::Type fullDstTy = dstTy;
326     if (mlir::isa<cir::VectorType>(srcTy) &&
327         mlir::isa<cir::VectorType>(dstTy)) {
328       // Use the element types of the vectors to figure out the CastKind.
329       srcTy = mlir::dyn_cast<cir::VectorType>(srcTy).getElementType();
330       dstTy = mlir::dyn_cast<cir::VectorType>(dstTy).getElementType();
331     }
332 
333     std::optional<cir::CastKind> castKind;
334 
335     if (mlir::isa<cir::BoolType>(srcTy)) {
336       if (opts.treatBooleanAsSigned)
337         cgf.getCIRGenModule().errorNYI("signed bool");
338       if (cgf.getBuilder().isInt(dstTy))
339         castKind = cir::CastKind::bool_to_int;
340       else if (mlir::isa<cir::FPTypeInterface>(dstTy))
341         castKind = cir::CastKind::bool_to_float;
342       else
343         llvm_unreachable("Internal error: Cast to unexpected type");
344     } else if (cgf.getBuilder().isInt(srcTy)) {
345       if (cgf.getBuilder().isInt(dstTy))
346         castKind = cir::CastKind::integral;
347       else if (mlir::isa<cir::FPTypeInterface>(dstTy))
348         castKind = cir::CastKind::int_to_float;
349       else
350         llvm_unreachable("Internal error: Cast to unexpected type");
351     } else if (mlir::isa<cir::FPTypeInterface>(srcTy)) {
352       if (cgf.getBuilder().isInt(dstTy)) {
353         // If we can't recognize overflow as undefined behavior, assume that
354         // overflow saturates. This protects against normal optimizations if we
355         // are compiling with non-standard FP semantics.
356         if (!cgf.cgm.getCodeGenOpts().StrictFloatCastOverflow)
357           cgf.getCIRGenModule().errorNYI("strict float cast overflow");
358         assert(!cir::MissingFeatures::fpConstraints());
359         castKind = cir::CastKind::float_to_int;
360       } else if (mlir::isa<cir::FPTypeInterface>(dstTy)) {
361         // TODO: split this to createFPExt/createFPTrunc
362         return builder.createFloatingCast(src, fullDstTy);
363       } else {
364         llvm_unreachable("Internal error: Cast to unexpected type");
365       }
366     } else {
367       llvm_unreachable("Internal error: Cast from unexpected type");
368     }
369 
370     assert(castKind.has_value() && "Internal error: CastKind not set.");
371     return builder.create<cir::CastOp>(src.getLoc(), fullDstTy, *castKind, src);
372   }
373 
374   mlir::Value
VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr * e)375   VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
376     return Visit(e->getReplacement());
377   }
378 
379   mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
380   mlir::Value
381   VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
382 
383   // Unary Operators.
VisitUnaryPostDec(const UnaryOperator * e)384   mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
385     LValue lv = cgf.emitLValue(e->getSubExpr());
386     return emitScalarPrePostIncDec(e, lv, false, false);
387   }
VisitUnaryPostInc(const UnaryOperator * e)388   mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
389     LValue lv = cgf.emitLValue(e->getSubExpr());
390     return emitScalarPrePostIncDec(e, lv, true, false);
391   }
VisitUnaryPreDec(const UnaryOperator * e)392   mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
393     LValue lv = cgf.emitLValue(e->getSubExpr());
394     return emitScalarPrePostIncDec(e, lv, false, true);
395   }
VisitUnaryPreInc(const UnaryOperator * e)396   mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
397     LValue lv = cgf.emitLValue(e->getSubExpr());
398     return emitScalarPrePostIncDec(e, lv, true, true);
399   }
emitScalarPrePostIncDec(const UnaryOperator * e,LValue lv,bool isInc,bool isPre)400   mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
401                                       bool isInc, bool isPre) {
402     if (cgf.getLangOpts().OpenMP)
403       cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
404 
405     QualType type = e->getSubExpr()->getType();
406 
407     mlir::Value value;
408     mlir::Value input;
409 
410     if (type->getAs<AtomicType>()) {
411       cgf.cgm.errorNYI(e->getSourceRange(), "Atomic inc/dec");
412       // TODO(cir): This is not correct, but it will produce reasonable code
413       // until atomic operations are implemented.
414       value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
415       input = value;
416     } else {
417       value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
418       input = value;
419     }
420 
421     // NOTE: When possible, more frequent cases are handled first.
422 
423     // Special case of integer increment that we have to check first: bool++.
424     // Due to promotion rules, we get:
425     //   bool++ -> bool = bool + 1
426     //          -> bool = (int)bool + 1
427     //          -> bool = ((int)bool + 1 != 0)
428     // An interesting aspect of this is that increment is always true.
429     // Decrement does not have this property.
430     if (isInc && type->isBooleanType()) {
431       value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
432     } else if (type->isIntegerType()) {
433       QualType promotedType;
434       bool canPerformLossyDemotionCheck = false;
435       if (cgf.getContext().isPromotableIntegerType(type)) {
436         promotedType = cgf.getContext().getPromotedIntegerType(type);
437         assert(promotedType != type && "Shouldn't promote to the same type.");
438         canPerformLossyDemotionCheck = true;
439         canPerformLossyDemotionCheck &=
440             cgf.getContext().getCanonicalType(type) !=
441             cgf.getContext().getCanonicalType(promotedType);
442         canPerformLossyDemotionCheck &=
443             type->isIntegerType() && promotedType->isIntegerType();
444 
445         // TODO(cir): Currently, we store bitwidths in CIR types only for
446         // integers. This might also be required for other types.
447 
448         assert(
449             (!canPerformLossyDemotionCheck ||
450              type->isSignedIntegerOrEnumerationType() ||
451              promotedType->isSignedIntegerOrEnumerationType() ||
452              mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth() ==
453                  mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth()) &&
454             "The following check expects that if we do promotion to different "
455             "underlying canonical type, at least one of the types (either "
456             "base or promoted) will be signed, or the bitwidths will match.");
457       }
458 
459       assert(!cir::MissingFeatures::sanitizers());
460       if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
461         value = emitIncDecConsiderOverflowBehavior(e, value, isInc);
462       } else {
463         cir::UnaryOpKind kind =
464             e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
465         // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
466         value = emitUnaryOp(e, kind, input, /*nsw=*/false);
467       }
468     } else if (const PointerType *ptr = type->getAs<PointerType>()) {
469       QualType type = ptr->getPointeeType();
470       if (cgf.getContext().getAsVariableArrayType(type)) {
471         // VLA types don't have constant size.
472         cgf.cgm.errorNYI(e->getSourceRange(), "Pointer arithmetic on VLA");
473         return {};
474       } else if (type->isFunctionType()) {
475         // Arithmetic on function pointers (!) is just +-1.
476         cgf.cgm.errorNYI(e->getSourceRange(),
477                          "Pointer arithmetic on function pointer");
478         return {};
479       } else {
480         // For everything else, we can just do a simple increment.
481         mlir::Location loc = cgf.getLoc(e->getSourceRange());
482         CIRGenBuilderTy &builder = cgf.getBuilder();
483         int amount = (isInc ? 1 : -1);
484         mlir::Value amt = builder.getSInt32(amount, loc);
485         assert(!cir::MissingFeatures::sanitizers());
486         value = builder.createPtrStride(loc, value, amt);
487       }
488     } else if (type->isVectorType()) {
489       cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
490       return {};
491     } else if (type->isRealFloatingType()) {
492       assert(!cir::MissingFeatures::cgFPOptionsRAII());
493 
494       if (type->isHalfType() &&
495           !cgf.getContext().getLangOpts().NativeHalfType) {
496         cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec half");
497         return {};
498       }
499 
500       if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
501         // Create the inc/dec operation.
502         // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
503         cir::UnaryOpKind kind =
504             (isInc ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec);
505         value = emitUnaryOp(e, kind, value);
506       } else {
507         cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
508         return {};
509       }
510     } else if (type->isFixedPointType()) {
511       cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fixed point");
512       return {};
513     } else {
514       assert(type->castAs<ObjCObjectPointerType>());
515       cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec ObjectiveC pointer");
516       return {};
517     }
518 
519     CIRGenFunction::SourceLocRAIIObject sourceloc{
520         cgf, cgf.getLoc(e->getSourceRange())};
521 
522     // Store the updated result through the lvalue
523     if (lv.isBitField())
524       return cgf.emitStoreThroughBitfieldLValue(RValue::get(value), lv);
525     else
526       cgf.emitStoreThroughLValue(RValue::get(value), lv);
527 
528     // If this is a postinc, return the value read from memory, otherwise use
529     // the updated value.
530     return isPre ? value : input;
531   }
532 
emitIncDecConsiderOverflowBehavior(const UnaryOperator * e,mlir::Value inVal,bool isInc)533   mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
534                                                  mlir::Value inVal,
535                                                  bool isInc) {
536     cir::UnaryOpKind kind =
537         e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
538     switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
539     case LangOptions::SOB_Defined:
540       return emitUnaryOp(e, kind, inVal, /*nsw=*/false);
541     case LangOptions::SOB_Undefined:
542       assert(!cir::MissingFeatures::sanitizers());
543       return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
544     case LangOptions::SOB_Trapping:
545       if (!e->canOverflow())
546         return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
547       cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
548       return {};
549     }
550     llvm_unreachable("Unexpected signed overflow behavior kind");
551   }
552 
VisitUnaryAddrOf(const UnaryOperator * e)553   mlir::Value VisitUnaryAddrOf(const UnaryOperator *e) {
554     if (llvm::isa<MemberPointerType>(e->getType())) {
555       cgf.cgm.errorNYI(e->getSourceRange(), "Address of member pointer");
556       return builder.getNullPtr(cgf.convertType(e->getType()),
557                                 cgf.getLoc(e->getExprLoc()));
558     }
559 
560     return cgf.emitLValue(e->getSubExpr()).getPointer();
561   }
562 
VisitUnaryDeref(const UnaryOperator * e)563   mlir::Value VisitUnaryDeref(const UnaryOperator *e) {
564     if (e->getType()->isVoidType())
565       return Visit(e->getSubExpr()); // the actual value should be unused
566     return emitLoadOfLValue(e);
567   }
568 
VisitUnaryPlus(const UnaryOperator * e)569   mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
570     return emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus);
571   }
572 
VisitUnaryMinus(const UnaryOperator * e)573   mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
574     return emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus);
575   }
576 
emitUnaryPlusOrMinus(const UnaryOperator * e,cir::UnaryOpKind kind)577   mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e,
578                                    cir::UnaryOpKind kind) {
579     ignoreResultAssign = false;
580 
581     QualType promotionType = getPromotionType(e->getSubExpr()->getType());
582 
583     mlir::Value operand;
584     if (!promotionType.isNull())
585       operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
586     else
587       operand = Visit(e->getSubExpr());
588 
589     bool nsw =
590         kind == cir::UnaryOpKind::Minus && e->getType()->isSignedIntegerType();
591 
592     // NOTE: LLVM codegen will lower this directly to either a FNeg
593     // or a Sub instruction.  In CIR this will be handled later in LowerToLLVM.
594     mlir::Value result = emitUnaryOp(e, kind, operand, nsw);
595     if (result && !promotionType.isNull())
596       return emitUnPromotedValue(result, e->getType());
597     return result;
598   }
599 
emitUnaryOp(const UnaryOperator * e,cir::UnaryOpKind kind,mlir::Value input,bool nsw=false)600   mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
601                           mlir::Value input, bool nsw = false) {
602     return builder.create<cir::UnaryOp>(
603         cgf.getLoc(e->getSourceRange().getBegin()), input.getType(), kind,
604         input, nsw);
605   }
606 
VisitUnaryNot(const UnaryOperator * e)607   mlir::Value VisitUnaryNot(const UnaryOperator *e) {
608     ignoreResultAssign = false;
609     mlir::Value op = Visit(e->getSubExpr());
610     return emitUnaryOp(e, cir::UnaryOpKind::Not, op);
611   }
612 
613   mlir::Value VisitUnaryLNot(const UnaryOperator *e);
614 
615   mlir::Value VisitUnaryReal(const UnaryOperator *e);
616 
617   mlir::Value VisitUnaryImag(const UnaryOperator *e);
618 
VisitCXXThisExpr(CXXThisExpr * te)619   mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
620 
VisitCXXNewExpr(const CXXNewExpr * e)621   mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
622     return cgf.emitCXXNewExpr(e);
623   }
624 
625   /// Emit a conversion from the specified type to the specified destination
626   /// type, both of which are CIR scalar types.
627   /// TODO: do we need ScalarConversionOpts here? Should be done in another
628   /// pass.
629   mlir::Value
emitScalarConversion(mlir::Value src,QualType srcType,QualType dstType,SourceLocation loc,ScalarConversionOpts opts=ScalarConversionOpts ())630   emitScalarConversion(mlir::Value src, QualType srcType, QualType dstType,
631                        SourceLocation loc,
632                        ScalarConversionOpts opts = ScalarConversionOpts()) {
633     // All conversions involving fixed point types should be handled by the
634     // emitFixedPoint family functions. This is done to prevent bloating up
635     // this function more, and although fixed point numbers are represented by
636     // integers, we do not want to follow any logic that assumes they should be
637     // treated as integers.
638     // TODO(leonardchan): When necessary, add another if statement checking for
639     // conversions to fixed point types from other types.
640     // conversions to fixed point types from other types.
641     if (srcType->isFixedPointType() || dstType->isFixedPointType()) {
642       cgf.getCIRGenModule().errorNYI(loc, "fixed point conversions");
643       return {};
644     }
645 
646     srcType = srcType.getCanonicalType();
647     dstType = dstType.getCanonicalType();
648     if (srcType == dstType) {
649       if (opts.emitImplicitIntegerSignChangeChecks)
650         cgf.getCIRGenModule().errorNYI(loc,
651                                        "implicit integer sign change checks");
652       return src;
653     }
654 
655     if (dstType->isVoidType())
656       return {};
657 
658     mlir::Type mlirSrcType = src.getType();
659 
660     // Handle conversions to bool first, they are special: comparisons against
661     // 0.
662     if (dstType->isBooleanType())
663       return emitConversionToBool(src, srcType, cgf.getLoc(loc));
664 
665     mlir::Type mlirDstType = cgf.convertType(dstType);
666 
667     if (srcType->isHalfType() &&
668         !cgf.getContext().getLangOpts().NativeHalfType) {
669       // Cast to FP using the intrinsic if the half type itself isn't supported.
670       if (mlir::isa<cir::FPTypeInterface>(mlirDstType)) {
671         if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
672           cgf.getCIRGenModule().errorNYI(loc,
673                                          "cast via llvm.convert.from.fp16");
674       } else {
675         // Cast to other types through float, using either the intrinsic or
676         // FPExt, depending on whether the half type itself is supported (as
677         // opposed to operations on half, available with NativeHalfType).
678         if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
679           cgf.getCIRGenModule().errorNYI(loc,
680                                          "cast via llvm.convert.from.fp16");
681         // FIXME(cir): For now lets pretend we shouldn't use the conversion
682         // intrinsics and insert a cast here unconditionally.
683         src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src,
684                                  cgf.FloatTy);
685         srcType = cgf.getContext().FloatTy;
686         mlirSrcType = cgf.FloatTy;
687       }
688     }
689 
690     // TODO(cir): LLVM codegen ignore conversions like int -> uint,
691     // is there anything to be done for CIR here?
692     if (mlirSrcType == mlirDstType) {
693       if (opts.emitImplicitIntegerSignChangeChecks)
694         cgf.getCIRGenModule().errorNYI(loc,
695                                        "implicit integer sign change checks");
696       return src;
697     }
698 
699     // Handle pointer conversions next: pointers can only be converted to/from
700     // other pointers and integers. Check for pointer types in terms of LLVM, as
701     // some native types (like Obj-C id) may map to a pointer type.
702     if (auto dstPT = dyn_cast<cir::PointerType>(mlirDstType)) {
703       cgf.getCIRGenModule().errorNYI(loc, "pointer casts");
704       return builder.getNullPtr(dstPT, src.getLoc());
705     }
706 
707     if (isa<cir::PointerType>(mlirSrcType)) {
708       // Must be an ptr to int cast.
709       assert(isa<cir::IntType>(mlirDstType) && "not ptr->int?");
710       return builder.createPtrToInt(src, mlirDstType);
711     }
712 
713     // A scalar can be splatted to an extended vector of the same element type
714     if (dstType->isExtVectorType() && !srcType->isVectorType()) {
715       // Sema should add casts to make sure that the source expression's type
716       // is the same as the vector's element type (sans qualifiers)
717       assert(dstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
718                  srcType.getTypePtr() &&
719              "Splatted expr doesn't match with vector element type?");
720 
721       cgf.getCIRGenModule().errorNYI(loc, "vector splatting");
722       return {};
723     }
724 
725     if (srcType->isMatrixType() && dstType->isMatrixType()) {
726       cgf.getCIRGenModule().errorNYI(loc,
727                                      "matrix type to matrix type conversion");
728       return {};
729     }
730     assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
731            "Internal error: conversion between matrix type and scalar type");
732 
733     // Finally, we have the arithmetic types or vectors of arithmetic types.
734     mlir::Value res = nullptr;
735     mlir::Type resTy = mlirDstType;
736 
737     res = emitScalarCast(src, srcType, dstType, mlirSrcType, mlirDstType, opts);
738 
739     if (mlirDstType != resTy) {
740       if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
741         cgf.getCIRGenModule().errorNYI(loc, "cast via llvm.convert.to.fp16");
742       }
743       // FIXME(cir): For now we never use FP16 conversion intrinsics even if
744       // required by the target. Change that once this is implemented
745       res = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, res,
746                                resTy);
747     }
748 
749     if (opts.emitImplicitIntegerTruncationChecks)
750       cgf.getCIRGenModule().errorNYI(loc, "implicit integer truncation checks");
751 
752     if (opts.emitImplicitIntegerSignChangeChecks)
753       cgf.getCIRGenModule().errorNYI(loc,
754                                      "implicit integer sign change checks");
755 
756     return res;
757   }
758 
emitBinOps(const BinaryOperator * e,QualType promotionType=QualType ())759   BinOpInfo emitBinOps(const BinaryOperator *e,
760                        QualType promotionType = QualType()) {
761     BinOpInfo result;
762     result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType);
763     result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType);
764     if (!promotionType.isNull())
765       result.fullType = promotionType;
766     else
767       result.fullType = e->getType();
768     result.compType = result.fullType;
769     if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) {
770       result.compType = vecType->getElementType();
771     }
772     result.opcode = e->getOpcode();
773     result.loc = e->getSourceRange();
774     // TODO(cir): Result.FPFeatures
775     assert(!cir::MissingFeatures::cgFPOptionsRAII());
776     result.e = e;
777     return result;
778   }
779 
780   mlir::Value emitMul(const BinOpInfo &ops);
781   mlir::Value emitDiv(const BinOpInfo &ops);
782   mlir::Value emitRem(const BinOpInfo &ops);
783   mlir::Value emitAdd(const BinOpInfo &ops);
784   mlir::Value emitSub(const BinOpInfo &ops);
785   mlir::Value emitShl(const BinOpInfo &ops);
786   mlir::Value emitShr(const BinOpInfo &ops);
787   mlir::Value emitAnd(const BinOpInfo &ops);
788   mlir::Value emitXor(const BinOpInfo &ops);
789   mlir::Value emitOr(const BinOpInfo &ops);
790 
791   LValue emitCompoundAssignLValue(
792       const CompoundAssignOperator *e,
793       mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &),
794       mlir::Value &result);
795   mlir::Value
796   emitCompoundAssign(const CompoundAssignOperator *e,
797                      mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &));
798 
799   // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
800   // codegen.
getPromotionType(QualType ty)801   QualType getPromotionType(QualType ty) {
802     if (ty->getAs<ComplexType>()) {
803       assert(!cir::MissingFeatures::complexType());
804       cgf.cgm.errorNYI("promotion to complex type");
805       return QualType();
806     }
807     if (ty.UseExcessPrecision(cgf.getContext())) {
808       if (ty->getAs<VectorType>()) {
809         assert(!cir::MissingFeatures::vectorType());
810         cgf.cgm.errorNYI("promotion to vector type");
811         return QualType();
812       }
813       return cgf.getContext().FloatTy;
814     }
815     return QualType();
816   }
817 
818 // Binary operators and binary compound assignment operators.
819 #define HANDLEBINOP(OP)                                                        \
820   mlir::Value VisitBin##OP(const BinaryOperator *e) {                          \
821     QualType promotionTy = getPromotionType(e->getType());                     \
822     auto result = emit##OP(emitBinOps(e, promotionTy));                        \
823     if (result && !promotionTy.isNull())                                       \
824       result = emitUnPromotedValue(result, e->getType());                      \
825     return result;                                                             \
826   }                                                                            \
827   mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) {          \
828     return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP);                \
829   }
830 
831   HANDLEBINOP(Mul)
HANDLEBINOP(Div)832   HANDLEBINOP(Div)
833   HANDLEBINOP(Rem)
834   HANDLEBINOP(Add)
835   HANDLEBINOP(Sub)
836   HANDLEBINOP(Shl)
837   HANDLEBINOP(Shr)
838   HANDLEBINOP(And)
839   HANDLEBINOP(Xor)
840   HANDLEBINOP(Or)
841 #undef HANDLEBINOP
842 
843   mlir::Value emitCmp(const BinaryOperator *e) {
844     const mlir::Location loc = cgf.getLoc(e->getExprLoc());
845     mlir::Value result;
846     QualType lhsTy = e->getLHS()->getType();
847     QualType rhsTy = e->getRHS()->getType();
848 
849     auto clangCmpToCIRCmp =
850         [](clang::BinaryOperatorKind clangCmp) -> cir::CmpOpKind {
851       switch (clangCmp) {
852       case BO_LT:
853         return cir::CmpOpKind::lt;
854       case BO_GT:
855         return cir::CmpOpKind::gt;
856       case BO_LE:
857         return cir::CmpOpKind::le;
858       case BO_GE:
859         return cir::CmpOpKind::ge;
860       case BO_EQ:
861         return cir::CmpOpKind::eq;
862       case BO_NE:
863         return cir::CmpOpKind::ne;
864       default:
865         llvm_unreachable("unsupported comparison kind for cir.cmp");
866       }
867     };
868 
869     cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
870     if (lhsTy->getAs<MemberPointerType>()) {
871       assert(!cir::MissingFeatures::dataMemberType());
872       assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
873       mlir::Value lhs = cgf.emitScalarExpr(e->getLHS());
874       mlir::Value rhs = cgf.emitScalarExpr(e->getRHS());
875       result = builder.createCompare(loc, kind, lhs, rhs);
876     } else if (!lhsTy->isAnyComplexType() && !rhsTy->isAnyComplexType()) {
877       BinOpInfo boInfo = emitBinOps(e);
878       mlir::Value lhs = boInfo.lhs;
879       mlir::Value rhs = boInfo.rhs;
880 
881       if (lhsTy->isVectorType()) {
882         if (!e->getType()->isVectorType()) {
883           // If AltiVec, the comparison results in a numeric type, so we use
884           // intrinsics comparing vectors and giving 0 or 1 as a result
885           cgf.cgm.errorNYI(loc, "AltiVec comparison");
886         } else {
887           // Other kinds of vectors. Element-wise comparison returning
888           // a vector.
889           result = builder.create<cir::VecCmpOp>(
890               cgf.getLoc(boInfo.loc), cgf.convertType(boInfo.fullType), kind,
891               boInfo.lhs, boInfo.rhs);
892         }
893       } else if (boInfo.isFixedPointOp()) {
894         assert(!cir::MissingFeatures::fixedPointType());
895         cgf.cgm.errorNYI(loc, "fixed point comparisons");
896         result = builder.getBool(false, loc);
897       } else {
898         // integers and pointers
899         if (cgf.cgm.getCodeGenOpts().StrictVTablePointers &&
900             mlir::isa<cir::PointerType>(lhs.getType()) &&
901             mlir::isa<cir::PointerType>(rhs.getType())) {
902           cgf.cgm.errorNYI(loc, "strict vtable pointer comparisons");
903         }
904 
905         cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
906         result = builder.createCompare(loc, kind, lhs, rhs);
907       }
908     } else {
909       // Complex Comparison: can only be an equality comparison.
910       assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
911 
912       BinOpInfo boInfo = emitBinOps(e);
913       result = builder.create<cir::CmpOp>(loc, kind, boInfo.lhs, boInfo.rhs);
914     }
915 
916     return emitScalarConversion(result, cgf.getContext().BoolTy, e->getType(),
917                                 e->getExprLoc());
918   }
919 
920 // Comparisons.
921 #define VISITCOMP(CODE)                                                        \
922   mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); }
923   VISITCOMP(LT)
VISITCOMP(GT)924   VISITCOMP(GT)
925   VISITCOMP(LE)
926   VISITCOMP(GE)
927   VISITCOMP(EQ)
928   VISITCOMP(NE)
929 #undef VISITCOMP
930 
931   mlir::Value VisitBinAssign(const BinaryOperator *e) {
932     const bool ignore = std::exchange(ignoreResultAssign, false);
933 
934     mlir::Value rhs;
935     LValue lhs;
936 
937     switch (e->getLHS()->getType().getObjCLifetime()) {
938     case Qualifiers::OCL_Strong:
939     case Qualifiers::OCL_Autoreleasing:
940     case Qualifiers::OCL_ExplicitNone:
941     case Qualifiers::OCL_Weak:
942       assert(!cir::MissingFeatures::objCLifetime());
943       break;
944     case Qualifiers::OCL_None:
945       // __block variables need to have the rhs evaluated first, plus this
946       // should improve codegen just a little.
947       rhs = Visit(e->getRHS());
948       assert(!cir::MissingFeatures::sanitizers());
949       // TODO(cir): This needs to be emitCheckedLValue() once we support
950       // sanitizers
951       lhs = cgf.emitLValue(e->getLHS());
952 
953       // Store the value into the LHS. Bit-fields are handled specially because
954       // the result is altered by the store, i.e., [C99 6.5.16p1]
955       // 'An assignment expression has the value of the left operand after the
956       // assignment...'.
957       if (lhs.isBitField()) {
958         rhs = cgf.emitStoreThroughBitfieldLValue(RValue::get(rhs), lhs);
959       } else {
960         cgf.emitNullabilityCheck(lhs, rhs, e->getExprLoc());
961         CIRGenFunction::SourceLocRAIIObject loc{
962             cgf, cgf.getLoc(e->getSourceRange())};
963         cgf.emitStoreThroughLValue(RValue::get(rhs), lhs);
964       }
965     }
966 
967     // If the result is clearly ignored, return now.
968     if (ignore)
969       return nullptr;
970 
971     // The result of an assignment in C is the assigned r-value.
972     if (!cgf.getLangOpts().CPlusPlus)
973       return rhs;
974 
975     // If the lvalue is non-volatile, return the computed value of the
976     // assignment.
977     if (!lhs.isVolatile())
978       return rhs;
979 
980     // Otherwise, reload the value.
981     return emitLoadOfLValue(lhs, e->getExprLoc());
982   }
983 
VisitBinComma(const BinaryOperator * e)984   mlir::Value VisitBinComma(const BinaryOperator *e) {
985     cgf.emitIgnoredExpr(e->getLHS());
986     // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen.
987     return Visit(e->getRHS());
988   }
989 
VisitBinLAnd(const clang::BinaryOperator * e)990   mlir::Value VisitBinLAnd(const clang::BinaryOperator *e) {
991     if (e->getType()->isVectorType()) {
992       assert(!cir::MissingFeatures::vectorType());
993       return {};
994     }
995 
996     assert(!cir::MissingFeatures::instrumentation());
997     mlir::Type resTy = cgf.convertType(e->getType());
998     mlir::Location loc = cgf.getLoc(e->getExprLoc());
999 
1000     CIRGenFunction::ConditionalEvaluation eval(cgf);
1001 
1002     mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1003     auto resOp = builder.create<cir::TernaryOp>(
1004         loc, lhsCondV, /*trueBuilder=*/
1005         [&](mlir::OpBuilder &b, mlir::Location loc) {
1006           CIRGenFunction::LexicalScope lexScope{cgf, loc,
1007                                                 b.getInsertionBlock()};
1008           cgf.curLexScope->setAsTernary();
1009           b.create<cir::YieldOp>(loc, cgf.evaluateExprAsBool(e->getRHS()));
1010         },
1011         /*falseBuilder*/
1012         [&](mlir::OpBuilder &b, mlir::Location loc) {
1013           CIRGenFunction::LexicalScope lexScope{cgf, loc,
1014                                                 b.getInsertionBlock()};
1015           cgf.curLexScope->setAsTernary();
1016           auto res = b.create<cir::ConstantOp>(loc, builder.getFalseAttr());
1017           b.create<cir::YieldOp>(loc, res.getRes());
1018         });
1019     return maybePromoteBoolResult(resOp.getResult(), resTy);
1020   }
1021 
VisitBinLOr(const clang::BinaryOperator * e)1022   mlir::Value VisitBinLOr(const clang::BinaryOperator *e) {
1023     if (e->getType()->isVectorType()) {
1024       assert(!cir::MissingFeatures::vectorType());
1025       return {};
1026     }
1027 
1028     assert(!cir::MissingFeatures::instrumentation());
1029     mlir::Type resTy = cgf.convertType(e->getType());
1030     mlir::Location loc = cgf.getLoc(e->getExprLoc());
1031 
1032     CIRGenFunction::ConditionalEvaluation eval(cgf);
1033 
1034     mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1035     auto resOp = builder.create<cir::TernaryOp>(
1036         loc, lhsCondV, /*trueBuilder=*/
1037         [&](mlir::OpBuilder &b, mlir::Location loc) {
1038           CIRGenFunction::LexicalScope lexScope{cgf, loc,
1039                                                 b.getInsertionBlock()};
1040           cgf.curLexScope->setAsTernary();
1041           auto res = b.create<cir::ConstantOp>(loc, builder.getTrueAttr());
1042           b.create<cir::YieldOp>(loc, res.getRes());
1043         },
1044         /*falseBuilder*/
1045         [&](mlir::OpBuilder &b, mlir::Location loc) {
1046           CIRGenFunction::LexicalScope lexScope{cgf, loc,
1047                                                 b.getInsertionBlock()};
1048           cgf.curLexScope->setAsTernary();
1049           b.create<cir::YieldOp>(loc, cgf.evaluateExprAsBool(e->getRHS()));
1050         });
1051 
1052     return maybePromoteBoolResult(resOp.getResult(), resTy);
1053   }
1054 };
1055 
emitCompoundAssignLValue(const CompoundAssignOperator * e,mlir::Value (ScalarExprEmitter::* func)(const BinOpInfo &),mlir::Value & result)1056 LValue ScalarExprEmitter::emitCompoundAssignLValue(
1057     const CompoundAssignOperator *e,
1058     mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
1059     mlir::Value &result) {
1060   QualType lhsTy = e->getLHS()->getType();
1061   BinOpInfo opInfo;
1062 
1063   if (e->getComputationResultType()->isAnyComplexType()) {
1064     cgf.cgm.errorNYI(result.getLoc(), "complex lvalue assign");
1065     return LValue();
1066   }
1067 
1068   // Emit the RHS first.  __block variables need to have the rhs evaluated
1069   // first, plus this should improve codegen a little.
1070 
1071   QualType promotionTypeCR = getPromotionType(e->getComputationResultType());
1072   if (promotionTypeCR.isNull())
1073     promotionTypeCR = e->getComputationResultType();
1074 
1075   QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType());
1076   QualType promotionTypeRHS = getPromotionType(e->getRHS()->getType());
1077 
1078   if (!promotionTypeRHS.isNull())
1079     opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
1080   else
1081     opInfo.rhs = Visit(e->getRHS());
1082 
1083   opInfo.fullType = promotionTypeCR;
1084   opInfo.compType = opInfo.fullType;
1085   if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType))
1086     opInfo.compType = vecType->getElementType();
1087   opInfo.opcode = e->getOpcode();
1088   opInfo.fpfeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
1089   opInfo.e = e;
1090   opInfo.loc = e->getSourceRange();
1091 
1092   // Load/convert the LHS
1093   LValue lhsLV = cgf.emitLValue(e->getLHS());
1094 
1095   if (lhsTy->getAs<AtomicType>()) {
1096     cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign");
1097     return LValue();
1098   }
1099 
1100   opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc());
1101 
1102   CIRGenFunction::SourceLocRAIIObject sourceloc{
1103       cgf, cgf.getLoc(e->getSourceRange())};
1104   SourceLocation loc = e->getExprLoc();
1105   if (!promotionTypeLHS.isNull())
1106     opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, loc);
1107   else
1108     opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy,
1109                                       e->getComputationLHSType(), loc);
1110 
1111   // Expand the binary operator.
1112   result = (this->*func)(opInfo);
1113 
1114   // Convert the result back to the LHS type,
1115   // potentially with Implicit Conversion sanitizer check.
1116   result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc,
1117                                 ScalarConversionOpts(cgf.sanOpts));
1118 
1119   // Store the result value into the LHS lvalue. Bit-fields are handled
1120   // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
1121   // 'An assignment expression has the value of the left operand after the
1122   // assignment...'.
1123   if (lhsLV.isBitField())
1124     cgf.cgm.errorNYI(e->getSourceRange(), "store through bitfield lvalue");
1125   else
1126     cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
1127 
1128   if (cgf.getLangOpts().OpenMP)
1129     cgf.cgm.errorNYI(e->getSourceRange(), "openmp");
1130 
1131   return lhsLV;
1132 }
1133 
emitPromoted(const Expr * e,QualType promotionType)1134 mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
1135                                             QualType promotionType) {
1136   e = e->IgnoreParens();
1137   if (const auto *bo = dyn_cast<BinaryOperator>(e)) {
1138     switch (bo->getOpcode()) {
1139 #define HANDLE_BINOP(OP)                                                       \
1140   case BO_##OP:                                                                \
1141     return emit##OP(emitBinOps(bo, promotionType));
1142       HANDLE_BINOP(Add)
1143       HANDLE_BINOP(Sub)
1144       HANDLE_BINOP(Mul)
1145       HANDLE_BINOP(Div)
1146 #undef HANDLE_BINOP
1147     default:
1148       break;
1149     }
1150   } else if (isa<UnaryOperator>(e)) {
1151     cgf.cgm.errorNYI(e->getSourceRange(), "unary operators");
1152     return {};
1153   }
1154   mlir::Value result = Visit(const_cast<Expr *>(e));
1155   if (result) {
1156     if (!promotionType.isNull())
1157       return emitPromotedValue(result, promotionType);
1158     return emitUnPromotedValue(result, e->getType());
1159   }
1160   return result;
1161 }
1162 
emitCompoundAssign(const CompoundAssignOperator * e,mlir::Value (ScalarExprEmitter::* func)(const BinOpInfo &))1163 mlir::Value ScalarExprEmitter::emitCompoundAssign(
1164     const CompoundAssignOperator *e,
1165     mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) {
1166 
1167   bool ignore = std::exchange(ignoreResultAssign, false);
1168   mlir::Value rhs;
1169   LValue lhs = emitCompoundAssignLValue(e, func, rhs);
1170 
1171   // If the result is clearly ignored, return now.
1172   if (ignore)
1173     return {};
1174 
1175   // The result of an assignment in C is the assigned r-value.
1176   if (!cgf.getLangOpts().CPlusPlus)
1177     return rhs;
1178 
1179   // If the lvalue is non-volatile, return the computed value of the assignment.
1180   if (!lhs.isVolatile())
1181     return rhs;
1182 
1183   // Otherwise, reload the value.
1184   return emitLoadOfLValue(lhs, e->getExprLoc());
1185 }
1186 
1187 } // namespace
1188 
1189 LValue
emitCompoundAssignmentLValue(const CompoundAssignOperator * e)1190 CIRGenFunction::emitCompoundAssignmentLValue(const CompoundAssignOperator *e) {
1191   ScalarExprEmitter emitter(*this, builder);
1192   mlir::Value result;
1193   switch (e->getOpcode()) {
1194 #define COMPOUND_OP(Op)                                                        \
1195   case BO_##Op##Assign:                                                        \
1196     return emitter.emitCompoundAssignLValue(e, &ScalarExprEmitter::emit##Op,   \
1197                                             result)
1198     COMPOUND_OP(Mul);
1199     COMPOUND_OP(Div);
1200     COMPOUND_OP(Rem);
1201     COMPOUND_OP(Add);
1202     COMPOUND_OP(Sub);
1203     COMPOUND_OP(Shl);
1204     COMPOUND_OP(Shr);
1205     COMPOUND_OP(And);
1206     COMPOUND_OP(Xor);
1207     COMPOUND_OP(Or);
1208 #undef COMPOUND_OP
1209 
1210   case BO_PtrMemD:
1211   case BO_PtrMemI:
1212   case BO_Mul:
1213   case BO_Div:
1214   case BO_Rem:
1215   case BO_Add:
1216   case BO_Sub:
1217   case BO_Shl:
1218   case BO_Shr:
1219   case BO_LT:
1220   case BO_GT:
1221   case BO_LE:
1222   case BO_GE:
1223   case BO_EQ:
1224   case BO_NE:
1225   case BO_Cmp:
1226   case BO_And:
1227   case BO_Xor:
1228   case BO_Or:
1229   case BO_LAnd:
1230   case BO_LOr:
1231   case BO_Assign:
1232   case BO_Comma:
1233     llvm_unreachable("Not valid compound assignment operators");
1234   }
1235   llvm_unreachable("Unhandled compound assignment operator");
1236 }
1237 
1238 /// Emit the computation of the specified expression of scalar type.
emitScalarExpr(const Expr * e)1239 mlir::Value CIRGenFunction::emitScalarExpr(const Expr *e) {
1240   assert(e && hasScalarEvaluationKind(e->getType()) &&
1241          "Invalid scalar expression to emit");
1242 
1243   return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1244 }
1245 
emitPromotedScalarExpr(const Expr * e,QualType promotionType)1246 mlir::Value CIRGenFunction::emitPromotedScalarExpr(const Expr *e,
1247                                                    QualType promotionType) {
1248   if (!promotionType.isNull())
1249     return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType);
1250   return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1251 }
1252 
mustVisitNullValue(const Expr * e)1253 [[maybe_unused]] static bool mustVisitNullValue(const Expr *e) {
1254   // If a null pointer expression's type is the C++0x nullptr_t and
1255   // the expression is not a simple literal, it must be evaluated
1256   // for its potential side effects.
1257   if (isa<IntegerLiteral>(e) || isa<CXXNullPtrLiteralExpr>(e))
1258     return false;
1259   return e->getType()->isNullPtrType();
1260 }
1261 
1262 /// If \p e is a widened promoted integer, get its base (unpromoted) type.
1263 static std::optional<QualType>
getUnwidenedIntegerType(const ASTContext & astContext,const Expr * e)1264 getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) {
1265   const Expr *base = e->IgnoreImpCasts();
1266   if (e == base)
1267     return std::nullopt;
1268 
1269   QualType baseTy = base->getType();
1270   if (!astContext.isPromotableIntegerType(baseTy) ||
1271       astContext.getTypeSize(baseTy) >= astContext.getTypeSize(e->getType()))
1272     return std::nullopt;
1273 
1274   return baseTy;
1275 }
1276 
1277 /// Check if \p e is a widened promoted integer.
isWidenedIntegerOp(const ASTContext & astContext,const Expr * e)1278 [[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext,
1279                                                 const Expr *e) {
1280   return getUnwidenedIntegerType(astContext, e).has_value();
1281 }
1282 
1283 /// Check if we can skip the overflow check for \p Op.
canElideOverflowCheck(const ASTContext & astContext,const BinOpInfo & op)1284 [[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext,
1285                                                    const BinOpInfo &op) {
1286   assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) &&
1287          "Expected a unary or binary operator");
1288 
1289   // If the binop has constant inputs and we can prove there is no overflow,
1290   // we can elide the overflow check.
1291   if (!op.mayHaveIntegerOverflow())
1292     return true;
1293 
1294   // If a unary op has a widened operand, the op cannot overflow.
1295   if (const auto *uo = dyn_cast<UnaryOperator>(op.e))
1296     return !uo->canOverflow();
1297 
1298   // We usually don't need overflow checks for binops with widened operands.
1299   // Multiplication with promoted unsigned operands is a special case.
1300   const auto *bo = cast<BinaryOperator>(op.e);
1301   std::optional<QualType> optionalLHSTy =
1302       getUnwidenedIntegerType(astContext, bo->getLHS());
1303   if (!optionalLHSTy)
1304     return false;
1305 
1306   std::optional<QualType> optionalRHSTy =
1307       getUnwidenedIntegerType(astContext, bo->getRHS());
1308   if (!optionalRHSTy)
1309     return false;
1310 
1311   QualType lhsTy = *optionalLHSTy;
1312   QualType rhsTy = *optionalRHSTy;
1313 
1314   // This is the simple case: binops without unsigned multiplication, and with
1315   // widened operands. No overflow check is needed here.
1316   if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) ||
1317       !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType())
1318     return true;
1319 
1320   // For unsigned multiplication the overflow check can be elided if either one
1321   // of the unpromoted types are less than half the size of the promoted type.
1322   unsigned promotedSize = astContext.getTypeSize(op.e->getType());
1323   return (2 * astContext.getTypeSize(lhsTy)) < promotedSize ||
1324          (2 * astContext.getTypeSize(rhsTy)) < promotedSize;
1325 }
1326 
1327 /// Emit pointer + index arithmetic.
emitPointerArithmetic(CIRGenFunction & cgf,const BinOpInfo & op,bool isSubtraction)1328 static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf,
1329                                          const BinOpInfo &op,
1330                                          bool isSubtraction) {
1331   // Must have binary (not unary) expr here.  Unary pointer
1332   // increment/decrement doesn't use this path.
1333   const BinaryOperator *expr = cast<BinaryOperator>(op.e);
1334 
1335   mlir::Value pointer = op.lhs;
1336   Expr *pointerOperand = expr->getLHS();
1337   mlir::Value index = op.rhs;
1338   Expr *indexOperand = expr->getRHS();
1339 
1340   // In the case of subtraction, the FE has ensured that the LHS is always the
1341   // pointer. However, addition can have the pointer on either side. We will
1342   // always have a pointer operand and an integer operand, so if the LHS wasn't
1343   // a pointer, we need to swap our values.
1344   if (!isSubtraction && !mlir::isa<cir::PointerType>(pointer.getType())) {
1345     std::swap(pointer, index);
1346     std::swap(pointerOperand, indexOperand);
1347   }
1348   assert(mlir::isa<cir::PointerType>(pointer.getType()) &&
1349          "Need a pointer operand");
1350   assert(mlir::isa<cir::IntType>(index.getType()) && "Need an integer operand");
1351 
1352   // Some versions of glibc and gcc use idioms (particularly in their malloc
1353   // routines) that add a pointer-sized integer (known to be a pointer value)
1354   // to a null pointer in order to cast the value back to an integer or as
1355   // part of a pointer alignment algorithm.  This is undefined behavior, but
1356   // we'd like to be able to compile programs that use it.
1357   //
1358   // Normally, we'd generate a GEP with a null-pointer base here in response
1359   // to that code, but it's also UB to dereference a pointer created that
1360   // way.  Instead (as an acknowledged hack to tolerate the idiom) we will
1361   // generate a direct cast of the integer value to a pointer.
1362   //
1363   // The idiom (p = nullptr + N) is not met if any of the following are true:
1364   //
1365   //   The operation is subtraction.
1366   //   The index is not pointer-sized.
1367   //   The pointer type is not byte-sized.
1368   //
1369   if (BinaryOperator::isNullPointerArithmeticExtension(
1370           cgf.getContext(), op.opcode, expr->getLHS(), expr->getRHS()))
1371     return cgf.getBuilder().createIntToPtr(index, pointer.getType());
1372 
1373   // Differently from LLVM codegen, ABI bits for index sizes is handled during
1374   // LLVM lowering.
1375 
1376   // If this is subtraction, negate the index.
1377   if (isSubtraction)
1378     index = cgf.getBuilder().createNeg(index);
1379 
1380   assert(!cir::MissingFeatures::sanitizers());
1381 
1382   const PointerType *pointerType =
1383       pointerOperand->getType()->getAs<PointerType>();
1384   if (!pointerType) {
1385     cgf.cgm.errorNYI("Objective-C:pointer arithmetic with non-pointer type");
1386     return nullptr;
1387   }
1388 
1389   QualType elementType = pointerType->getPointeeType();
1390   if (cgf.getContext().getAsVariableArrayType(elementType)) {
1391     cgf.cgm.errorNYI("variable array type");
1392     return nullptr;
1393   }
1394 
1395   if (elementType->isVoidType() || elementType->isFunctionType()) {
1396     cgf.cgm.errorNYI("void* or function pointer arithmetic");
1397     return nullptr;
1398   }
1399 
1400   assert(!cir::MissingFeatures::sanitizers());
1401   return cgf.getBuilder().create<cir::PtrStrideOp>(
1402       cgf.getLoc(op.e->getExprLoc()), pointer.getType(), pointer, index);
1403 }
1404 
emitMul(const BinOpInfo & ops)1405 mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) {
1406   const mlir::Location loc = cgf.getLoc(ops.loc);
1407   if (ops.compType->isSignedIntegerOrEnumerationType()) {
1408     switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1409     case LangOptions::SOB_Defined:
1410       if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1411         return builder.createMul(loc, ops.lhs, ops.rhs);
1412       [[fallthrough]];
1413     case LangOptions::SOB_Undefined:
1414       if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1415         return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1416       [[fallthrough]];
1417     case LangOptions::SOB_Trapping:
1418       if (canElideOverflowCheck(cgf.getContext(), ops))
1419         return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1420       cgf.cgm.errorNYI("sanitizers");
1421     }
1422   }
1423   if (ops.fullType->isConstantMatrixType()) {
1424     assert(!cir::MissingFeatures::matrixType());
1425     cgf.cgm.errorNYI("matrix types");
1426     return nullptr;
1427   }
1428   if (ops.compType->isUnsignedIntegerType() &&
1429       cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1430       !canElideOverflowCheck(cgf.getContext(), ops))
1431     cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1432 
1433   if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1434     assert(!cir::MissingFeatures::cgFPOptionsRAII());
1435     return builder.createFMul(loc, ops.lhs, ops.rhs);
1436   }
1437 
1438   if (ops.isFixedPointOp()) {
1439     assert(!cir::MissingFeatures::fixedPointType());
1440     cgf.cgm.errorNYI("fixed point");
1441     return nullptr;
1442   }
1443 
1444   return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1445                                     cgf.convertType(ops.fullType),
1446                                     cir::BinOpKind::Mul, ops.lhs, ops.rhs);
1447 }
emitDiv(const BinOpInfo & ops)1448 mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) {
1449   return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1450                                     cgf.convertType(ops.fullType),
1451                                     cir::BinOpKind::Div, ops.lhs, ops.rhs);
1452 }
emitRem(const BinOpInfo & ops)1453 mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) {
1454   return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1455                                     cgf.convertType(ops.fullType),
1456                                     cir::BinOpKind::Rem, ops.lhs, ops.rhs);
1457 }
1458 
emitAdd(const BinOpInfo & ops)1459 mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) {
1460   if (mlir::isa<cir::PointerType>(ops.lhs.getType()) ||
1461       mlir::isa<cir::PointerType>(ops.rhs.getType()))
1462     return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false);
1463 
1464   const mlir::Location loc = cgf.getLoc(ops.loc);
1465   if (ops.compType->isSignedIntegerOrEnumerationType()) {
1466     switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1467     case LangOptions::SOB_Defined:
1468       if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1469         return builder.createAdd(loc, ops.lhs, ops.rhs);
1470       [[fallthrough]];
1471     case LangOptions::SOB_Undefined:
1472       if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1473         return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1474       [[fallthrough]];
1475     case LangOptions::SOB_Trapping:
1476       if (canElideOverflowCheck(cgf.getContext(), ops))
1477         return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1478       cgf.cgm.errorNYI("sanitizers");
1479     }
1480   }
1481   if (ops.fullType->isConstantMatrixType()) {
1482     assert(!cir::MissingFeatures::matrixType());
1483     cgf.cgm.errorNYI("matrix types");
1484     return nullptr;
1485   }
1486 
1487   if (ops.compType->isUnsignedIntegerType() &&
1488       cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1489       !canElideOverflowCheck(cgf.getContext(), ops))
1490     cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1491 
1492   if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1493     assert(!cir::MissingFeatures::cgFPOptionsRAII());
1494     return builder.createFAdd(loc, ops.lhs, ops.rhs);
1495   }
1496 
1497   if (ops.isFixedPointOp()) {
1498     assert(!cir::MissingFeatures::fixedPointType());
1499     cgf.cgm.errorNYI("fixed point");
1500     return {};
1501   }
1502 
1503   return builder.create<cir::BinOp>(loc, cgf.convertType(ops.fullType),
1504                                     cir::BinOpKind::Add, ops.lhs, ops.rhs);
1505 }
1506 
emitSub(const BinOpInfo & ops)1507 mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
1508   const mlir::Location loc = cgf.getLoc(ops.loc);
1509   // The LHS is always a pointer if either side is.
1510   if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) {
1511     if (ops.compType->isSignedIntegerOrEnumerationType()) {
1512       switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1513       case LangOptions::SOB_Defined: {
1514         if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1515           return builder.createSub(loc, ops.lhs, ops.rhs);
1516         [[fallthrough]];
1517       }
1518       case LangOptions::SOB_Undefined:
1519         if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1520           return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1521         [[fallthrough]];
1522       case LangOptions::SOB_Trapping:
1523         if (canElideOverflowCheck(cgf.getContext(), ops))
1524           return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1525         cgf.cgm.errorNYI("sanitizers");
1526       }
1527     }
1528 
1529     if (ops.fullType->isConstantMatrixType()) {
1530       assert(!cir::MissingFeatures::matrixType());
1531       cgf.cgm.errorNYI("matrix types");
1532       return nullptr;
1533     }
1534 
1535     if (ops.compType->isUnsignedIntegerType() &&
1536         cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1537         !canElideOverflowCheck(cgf.getContext(), ops))
1538       cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1539 
1540     if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1541       assert(!cir::MissingFeatures::cgFPOptionsRAII());
1542       return builder.createFSub(loc, ops.lhs, ops.rhs);
1543     }
1544 
1545     if (ops.isFixedPointOp()) {
1546       assert(!cir::MissingFeatures::fixedPointType());
1547       cgf.cgm.errorNYI("fixed point");
1548       return {};
1549     }
1550 
1551     return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1552                                       cgf.convertType(ops.fullType),
1553                                       cir::BinOpKind::Sub, ops.lhs, ops.rhs);
1554   }
1555 
1556   // If the RHS is not a pointer, then we have normal pointer
1557   // arithmetic.
1558   if (!mlir::isa<cir::PointerType>(ops.rhs.getType()))
1559     return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true);
1560 
1561   // Otherwise, this is a pointer subtraction
1562 
1563   // Do the raw subtraction part.
1564   //
1565   // TODO(cir): note for LLVM lowering out of this; when expanding this into
1566   // LLVM we shall take VLA's, division by element size, etc.
1567   //
1568   // See more in `EmitSub` in CGExprScalar.cpp.
1569   assert(!cir::MissingFeatures::ptrDiffOp());
1570   cgf.cgm.errorNYI("ptrdiff");
1571   return {};
1572 }
1573 
emitShl(const BinOpInfo & ops)1574 mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
1575   // TODO: This misses out on the sanitizer check below.
1576   if (ops.isFixedPointOp()) {
1577     assert(cir::MissingFeatures::fixedPointType());
1578     cgf.cgm.errorNYI("fixed point");
1579     return {};
1580   }
1581 
1582   // CIR accepts shift between different types, meaning nothing special
1583   // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1584   // promote or truncate the RHS to the same size as the LHS.
1585 
1586   bool sanitizeSignedBase = cgf.sanOpts.has(SanitizerKind::ShiftBase) &&
1587                             ops.compType->hasSignedIntegerRepresentation() &&
1588                             !cgf.getLangOpts().isSignedOverflowDefined() &&
1589                             !cgf.getLangOpts().CPlusPlus20;
1590   bool sanitizeUnsignedBase =
1591       cgf.sanOpts.has(SanitizerKind::UnsignedShiftBase) &&
1592       ops.compType->hasUnsignedIntegerRepresentation();
1593   bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase;
1594   bool sanitizeExponent = cgf.sanOpts.has(SanitizerKind::ShiftExponent);
1595 
1596   // OpenCL 6.3j: shift values are effectively % word size of LHS.
1597   if (cgf.getLangOpts().OpenCL)
1598     cgf.cgm.errorNYI("opencl");
1599   else if ((sanitizeBase || sanitizeExponent) &&
1600            mlir::isa<cir::IntType>(ops.lhs.getType()))
1601     cgf.cgm.errorNYI("sanitizers");
1602 
1603   return builder.createShiftLeft(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1604 }
1605 
emitShr(const BinOpInfo & ops)1606 mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) {
1607   // TODO: This misses out on the sanitizer check below.
1608   if (ops.isFixedPointOp()) {
1609     assert(cir::MissingFeatures::fixedPointType());
1610     cgf.cgm.errorNYI("fixed point");
1611     return {};
1612   }
1613 
1614   // CIR accepts shift between different types, meaning nothing special
1615   // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1616   // promote or truncate the RHS to the same size as the LHS.
1617 
1618   // OpenCL 6.3j: shift values are effectively % word size of LHS.
1619   if (cgf.getLangOpts().OpenCL)
1620     cgf.cgm.errorNYI("opencl");
1621   else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) &&
1622            mlir::isa<cir::IntType>(ops.lhs.getType()))
1623     cgf.cgm.errorNYI("sanitizers");
1624 
1625   // Note that we don't need to distinguish unsigned treatment at this
1626   // point since it will be handled later by LLVM lowering.
1627   return builder.createShiftRight(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1628 }
1629 
emitAnd(const BinOpInfo & ops)1630 mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) {
1631   return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1632                                     cgf.convertType(ops.fullType),
1633                                     cir::BinOpKind::And, ops.lhs, ops.rhs);
1634 }
emitXor(const BinOpInfo & ops)1635 mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) {
1636   return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1637                                     cgf.convertType(ops.fullType),
1638                                     cir::BinOpKind::Xor, ops.lhs, ops.rhs);
1639 }
emitOr(const BinOpInfo & ops)1640 mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) {
1641   return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1642                                     cgf.convertType(ops.fullType),
1643                                     cir::BinOpKind::Or, ops.lhs, ops.rhs);
1644 }
1645 
1646 // Emit code for an explicit or implicit cast.  Implicit
1647 // casts have to handle a more broad range of conversions than explicit
1648 // casts, as they handle things like function to ptr-to-function decay
1649 // etc.
VisitCastExpr(CastExpr * ce)1650 mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
1651   Expr *subExpr = ce->getSubExpr();
1652   QualType destTy = ce->getType();
1653   CastKind kind = ce->getCastKind();
1654 
1655   // These cases are generally not written to ignore the result of evaluating
1656   // their sub-expressions, so we clear this now.
1657   ignoreResultAssign = false;
1658 
1659   switch (kind) {
1660   case clang::CK_Dependent:
1661     llvm_unreachable("dependent cast kind in CIR gen!");
1662   case clang::CK_BuiltinFnToFnPtr:
1663     llvm_unreachable("builtin functions are handled elsewhere");
1664 
1665   case CK_CPointerToObjCPointerCast:
1666   case CK_BlockPointerToObjCPointerCast:
1667   case CK_AnyPointerToBlockPointerCast:
1668   case CK_BitCast: {
1669     mlir::Value src = Visit(const_cast<Expr *>(subExpr));
1670     mlir::Type dstTy = cgf.convertType(destTy);
1671 
1672     assert(!cir::MissingFeatures::addressSpace());
1673 
1674     if (cgf.sanOpts.has(SanitizerKind::CFIUnrelatedCast))
1675       cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1676                                      "sanitizer support");
1677 
1678     if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1679       cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1680                                      "strict vtable pointers");
1681 
1682     // Update heapallocsite metadata when there is an explicit pointer cast.
1683     assert(!cir::MissingFeatures::addHeapAllocSiteMetadata());
1684 
1685     // If Src is a fixed vector and Dst is a scalable vector, and both have the
1686     // same element type, use the llvm.vector.insert intrinsic to perform the
1687     // bitcast.
1688     assert(!cir::MissingFeatures::scalableVectors());
1689 
1690     // If Src is a scalable vector and Dst is a fixed vector, and both have the
1691     // same element type, use the llvm.vector.extract intrinsic to perform the
1692     // bitcast.
1693     assert(!cir::MissingFeatures::scalableVectors());
1694 
1695     // Perform VLAT <-> VLST bitcast through memory.
1696     // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
1697     //       require the element types of the vectors to be the same, we
1698     //       need to keep this around for bitcasts between VLAT <-> VLST where
1699     //       the element types of the vectors are not the same, until we figure
1700     //       out a better way of doing these casts.
1701     assert(!cir::MissingFeatures::scalableVectors());
1702 
1703     return cgf.getBuilder().createBitcast(cgf.getLoc(subExpr->getSourceRange()),
1704                                           src, dstTy);
1705   }
1706 
1707   case CK_AtomicToNonAtomic: {
1708     cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1709                                    "CastExpr: ", ce->getCastKindName());
1710     mlir::Location loc = cgf.getLoc(subExpr->getSourceRange());
1711     return cgf.createDummyValue(loc, destTy);
1712   }
1713   case CK_NonAtomicToAtomic:
1714   case CK_UserDefinedConversion:
1715     return Visit(const_cast<Expr *>(subExpr));
1716   case CK_NoOp: {
1717     auto v = Visit(const_cast<Expr *>(subExpr));
1718     if (v) {
1719       // CK_NoOp can model a pointer qualification conversion, which can remove
1720       // an array bound and change the IR type.
1721       // FIXME: Once pointee types are removed from IR, remove this.
1722       mlir::Type t = cgf.convertType(destTy);
1723       if (t != v.getType())
1724         cgf.getCIRGenModule().errorNYI("pointer qualification conversion");
1725     }
1726     return v;
1727   }
1728 
1729   case CK_ArrayToPointerDecay:
1730     return cgf.emitArrayToPointerDecay(subExpr).getPointer();
1731 
1732   case CK_NullToPointer: {
1733     if (mustVisitNullValue(subExpr))
1734       cgf.emitIgnoredExpr(subExpr);
1735 
1736     // Note that DestTy is used as the MLIR type instead of a custom
1737     // nullptr type.
1738     mlir::Type ty = cgf.convertType(destTy);
1739     return builder.getNullPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
1740   }
1741 
1742   case CK_LValueToRValue:
1743     assert(cgf.getContext().hasSameUnqualifiedType(subExpr->getType(), destTy));
1744     assert(subExpr->isGLValue() && "lvalue-to-rvalue applied to r-value!");
1745     return Visit(const_cast<Expr *>(subExpr));
1746 
1747   case CK_IntegralCast: {
1748     ScalarConversionOpts opts;
1749     if (auto *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1750       if (!ice->isPartOfExplicitCast())
1751         opts = ScalarConversionOpts(cgf.sanOpts);
1752     }
1753     return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
1754                                 ce->getExprLoc(), opts);
1755   }
1756 
1757   case CK_FloatingRealToComplex:
1758   case CK_FloatingComplexCast:
1759   case CK_IntegralRealToComplex:
1760   case CK_IntegralComplexCast:
1761   case CK_IntegralComplexToFloatingComplex:
1762   case CK_FloatingComplexToIntegralComplex:
1763     llvm_unreachable("scalar cast to non-scalar value");
1764 
1765   case CK_PointerToIntegral: {
1766     assert(!destTy->isBooleanType() && "bool should use PointerToBool");
1767     if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1768       cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1769                                      "strict vtable pointers");
1770     return builder.createPtrToInt(Visit(subExpr), cgf.convertType(destTy));
1771   }
1772   case CK_ToVoid:
1773     cgf.emitIgnoredExpr(subExpr);
1774     return {};
1775 
1776   case CK_IntegralToFloating:
1777   case CK_FloatingToIntegral:
1778   case CK_FloatingCast:
1779   case CK_FixedPointToFloating:
1780   case CK_FloatingToFixedPoint: {
1781     if (kind == CK_FixedPointToFloating || kind == CK_FloatingToFixedPoint) {
1782       cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1783                                      "fixed point casts");
1784       return {};
1785     }
1786     assert(!cir::MissingFeatures::cgFPOptionsRAII());
1787     return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
1788                                 ce->getExprLoc());
1789   }
1790 
1791   case CK_IntegralToBoolean:
1792     return emitIntToBoolConversion(Visit(subExpr),
1793                                    cgf.getLoc(ce->getSourceRange()));
1794 
1795   case CK_PointerToBoolean:
1796     return emitPointerToBoolConversion(Visit(subExpr), subExpr->getType());
1797   case CK_FloatingToBoolean:
1798     return emitFloatToBoolConversion(Visit(subExpr),
1799                                      cgf.getLoc(subExpr->getExprLoc()));
1800   case CK_MemberPointerToBoolean: {
1801     mlir::Value memPtr = Visit(subExpr);
1802     return builder.createCast(cgf.getLoc(ce->getSourceRange()),
1803                               cir::CastKind::member_ptr_to_bool, memPtr,
1804                               cgf.convertType(destTy));
1805   }
1806 
1807   case CK_VectorSplat: {
1808     // Create a vector object and fill all elements with the same scalar value.
1809     assert(destTy->isVectorType() && "CK_VectorSplat to non-vector type");
1810     return builder.create<cir::VecSplatOp>(
1811         cgf.getLoc(subExpr->getSourceRange()), cgf.convertType(destTy),
1812         Visit(subExpr));
1813   }
1814 
1815   default:
1816     cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1817                                    "CastExpr: ", ce->getCastKindName());
1818   }
1819   return {};
1820 }
1821 
VisitCallExpr(const CallExpr * e)1822 mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *e) {
1823   if (e->getCallReturnType(cgf.getContext())->isReferenceType())
1824     return emitLoadOfLValue(e);
1825 
1826   auto v = cgf.emitCallExpr(e).getValue();
1827   assert(!cir::MissingFeatures::emitLValueAlignmentAssumption());
1828   return v;
1829 }
1830 
VisitMemberExpr(MemberExpr * e)1831 mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
1832   // TODO(cir): The classic codegen calls tryEmitAsConstant() here. Folding
1833   // constants sound like work for MLIR optimizers, but we'll keep an assertion
1834   // for now.
1835   assert(!cir::MissingFeatures::tryEmitAsConstant());
1836   Expr::EvalResult result;
1837   if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
1838     cgf.cgm.errorNYI(e->getSourceRange(), "Constant interger member expr");
1839     // Fall through to emit this as a non-constant access.
1840   }
1841   return emitLoadOfLValue(e);
1842 }
1843 
VisitInitListExpr(InitListExpr * e)1844 mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
1845   const unsigned numInitElements = e->getNumInits();
1846 
1847   if (e->hadArrayRangeDesignator()) {
1848     cgf.cgm.errorNYI(e->getSourceRange(), "ArrayRangeDesignator");
1849     return {};
1850   }
1851 
1852   if (e->getType()->isVectorType()) {
1853     const auto vectorType =
1854         mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1855 
1856     SmallVector<mlir::Value, 16> elements;
1857     for (Expr *init : e->inits()) {
1858       elements.push_back(Visit(init));
1859     }
1860 
1861     // Zero-initialize any remaining values.
1862     if (numInitElements < vectorType.getSize()) {
1863       const mlir::Value zeroValue = cgf.getBuilder().getNullValue(
1864           vectorType.getElementType(), cgf.getLoc(e->getSourceRange()));
1865       std::fill_n(std::back_inserter(elements),
1866                   vectorType.getSize() - numInitElements, zeroValue);
1867     }
1868 
1869     return cgf.getBuilder().create<cir::VecCreateOp>(
1870         cgf.getLoc(e->getSourceRange()), vectorType, elements);
1871   }
1872 
1873   if (numInitElements == 0) {
1874     cgf.cgm.errorNYI(e->getSourceRange(),
1875                      "InitListExpr Non VectorType with 0 init elements");
1876     return {};
1877   }
1878 
1879   return Visit(e->getInit(0));
1880 }
1881 
emitScalarConversion(mlir::Value src,QualType srcTy,QualType dstTy,SourceLocation loc)1882 mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src,
1883                                                  QualType srcTy, QualType dstTy,
1884                                                  SourceLocation loc) {
1885   assert(CIRGenFunction::hasScalarEvaluationKind(srcTy) &&
1886          CIRGenFunction::hasScalarEvaluationKind(dstTy) &&
1887          "Invalid scalar expression to emit");
1888   return ScalarExprEmitter(*this, builder)
1889       .emitScalarConversion(src, srcTy, dstTy, loc);
1890 }
1891 
VisitUnaryLNot(const UnaryOperator * e)1892 mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
1893   // Perform vector logical not on comparison with zero vector.
1894   if (e->getType()->isVectorType() &&
1895       e->getType()->castAs<VectorType>()->getVectorKind() ==
1896           VectorKind::Generic) {
1897     assert(!cir::MissingFeatures::vectorType());
1898     cgf.cgm.errorNYI(e->getSourceRange(), "vector logical not");
1899     return {};
1900   }
1901 
1902   // Compare operand to zero.
1903   mlir::Value boolVal = cgf.evaluateExprAsBool(e->getSubExpr());
1904 
1905   // Invert value.
1906   boolVal = builder.createNot(boolVal);
1907 
1908   // ZExt result to the expr type.
1909   return maybePromoteBoolResult(boolVal, cgf.convertType(e->getType()));
1910 }
1911 
VisitUnaryReal(const UnaryOperator * e)1912 mlir::Value ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *e) {
1913   // TODO(cir): handle scalar promotion.
1914   Expr *op = e->getSubExpr();
1915   if (op->getType()->isAnyComplexType()) {
1916     // If it's an l-value, load through the appropriate subobject l-value.
1917     // Note that we have to ask `e` because `op` might be an l-value that
1918     // this won't work for, e.g. an Obj-C property.
1919     if (e->isGLValue()) {
1920       mlir::Location loc = cgf.getLoc(e->getExprLoc());
1921       mlir::Value complex = cgf.emitComplexExpr(op);
1922       return cgf.builder.createComplexReal(loc, complex);
1923     }
1924 
1925     // Otherwise, calculate and project.
1926     cgf.cgm.errorNYI(e->getSourceRange(),
1927                      "VisitUnaryReal calculate and project");
1928   }
1929 
1930   return Visit(op);
1931 }
1932 
VisitUnaryImag(const UnaryOperator * e)1933 mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) {
1934   // TODO(cir): handle scalar promotion.
1935   Expr *op = e->getSubExpr();
1936   if (op->getType()->isAnyComplexType()) {
1937     // If it's an l-value, load through the appropriate subobject l-value.
1938     // Note that we have to ask `e` because `op` might be an l-value that
1939     // this won't work for, e.g. an Obj-C property.
1940     if (e->isGLValue()) {
1941       mlir::Location loc = cgf.getLoc(e->getExprLoc());
1942       mlir::Value complex = cgf.emitComplexExpr(op);
1943       return cgf.builder.createComplexImag(loc, complex);
1944     }
1945 
1946     // Otherwise, calculate and project.
1947     cgf.cgm.errorNYI(e->getSourceRange(),
1948                      "VisitUnaryImag calculate and project");
1949   }
1950 
1951   return Visit(op);
1952 }
1953 
1954 /// Return the size or alignment of the type of argument of the sizeof
1955 /// expression as an integer.
VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr * e)1956 mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
1957     const UnaryExprOrTypeTraitExpr *e) {
1958   const QualType typeToSize = e->getTypeOfArgument();
1959   const mlir::Location loc = cgf.getLoc(e->getSourceRange());
1960   if (auto kind = e->getKind();
1961       kind == UETT_SizeOf || kind == UETT_DataSizeOf) {
1962     if (cgf.getContext().getAsVariableArrayType(typeToSize)) {
1963       cgf.getCIRGenModule().errorNYI(e->getSourceRange(),
1964                                      "sizeof operator for VariableArrayType",
1965                                      e->getStmtClassName());
1966       return builder.getConstant(
1967           loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
1968                                  llvm::APSInt(llvm::APInt(64, 1), true)));
1969     }
1970   } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
1971     cgf.getCIRGenModule().errorNYI(
1972         e->getSourceRange(), "sizeof operator for OpenMpRequiredSimdAlign",
1973         e->getStmtClassName());
1974     return builder.getConstant(
1975         loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
1976                                llvm::APSInt(llvm::APInt(64, 1), true)));
1977   }
1978 
1979   return builder.getConstant(
1980       loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
1981                              e->EvaluateKnownConstInt(cgf.getContext())));
1982 }
1983 
1984 /// Return true if the specified expression is cheap enough and side-effect-free
1985 /// enough to evaluate unconditionally instead of conditionally.  This is used
1986 /// to convert control flow into selects in some cases.
1987 /// TODO(cir): can be shared with LLVM codegen.
isCheapEnoughToEvaluateUnconditionally(const Expr * e,CIRGenFunction & cgf)1988 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e,
1989                                                    CIRGenFunction &cgf) {
1990   // Anything that is an integer or floating point constant is fine.
1991   return e->IgnoreParens()->isEvaluatable(cgf.getContext());
1992 
1993   // Even non-volatile automatic variables can't be evaluated unconditionally.
1994   // Referencing a thread_local may cause non-trivial initialization work to
1995   // occur. If we're inside a lambda and one of the variables is from the scope
1996   // outside the lambda, that function may have returned already. Reading its
1997   // locals is a bad idea. Also, these reads may introduce races there didn't
1998   // exist in the source-level program.
1999 }
2000 
VisitAbstractConditionalOperator(const AbstractConditionalOperator * e)2001 mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
2002     const AbstractConditionalOperator *e) {
2003   CIRGenBuilderTy &builder = cgf.getBuilder();
2004   mlir::Location loc = cgf.getLoc(e->getSourceRange());
2005   ignoreResultAssign = false;
2006 
2007   // Bind the common expression if necessary.
2008   CIRGenFunction::OpaqueValueMapping binding(cgf, e);
2009 
2010   Expr *condExpr = e->getCond();
2011   Expr *lhsExpr = e->getTrueExpr();
2012   Expr *rhsExpr = e->getFalseExpr();
2013 
2014   // If the condition constant folds and can be elided, try to avoid emitting
2015   // the condition and the dead arm.
2016   bool condExprBool;
2017   if (cgf.constantFoldsToBool(condExpr, condExprBool)) {
2018     Expr *live = lhsExpr, *dead = rhsExpr;
2019     if (!condExprBool)
2020       std::swap(live, dead);
2021 
2022     // If the dead side doesn't have labels we need, just emit the Live part.
2023     if (!cgf.containsLabel(dead)) {
2024       if (condExprBool)
2025         assert(!cir::MissingFeatures::incrementProfileCounter());
2026       mlir::Value result = Visit(live);
2027 
2028       // If the live part is a throw expression, it acts like it has a void
2029       // type, so evaluating it returns a null Value.  However, a conditional
2030       // with non-void type must return a non-null Value.
2031       if (!result && !e->getType()->isVoidType()) {
2032         cgf.cgm.errorNYI(e->getSourceRange(),
2033                          "throw expression in conditional operator");
2034         result = {};
2035       }
2036 
2037       return result;
2038     }
2039   }
2040 
2041   QualType condType = condExpr->getType();
2042 
2043   // OpenCL: If the condition is a vector, we can treat this condition like
2044   // the select function.
2045   if ((cgf.getLangOpts().OpenCL && condType->isVectorType()) ||
2046       condType->isExtVectorType()) {
2047     assert(!cir::MissingFeatures::vectorType());
2048     cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
2049   }
2050 
2051   if (condType->isVectorType() || condType->isSveVLSBuiltinType()) {
2052     if (!condType->isVectorType()) {
2053       assert(!cir::MissingFeatures::vecTernaryOp());
2054       cgf.cgm.errorNYI(loc, "TernaryOp for SVE vector");
2055       return {};
2056     }
2057 
2058     mlir::Value condValue = Visit(condExpr);
2059     mlir::Value lhsValue = Visit(lhsExpr);
2060     mlir::Value rhsValue = Visit(rhsExpr);
2061     return builder.create<cir::VecTernaryOp>(loc, condValue, lhsValue,
2062                                              rhsValue);
2063   }
2064 
2065   // If this is a really simple expression (like x ? 4 : 5), emit this as a
2066   // select instead of as control flow.  We can only do this if it is cheap
2067   // and safe to evaluate the LHS and RHS unconditionally.
2068   if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, cgf) &&
2069       isCheapEnoughToEvaluateUnconditionally(rhsExpr, cgf)) {
2070     bool lhsIsVoid = false;
2071     mlir::Value condV = cgf.evaluateExprAsBool(condExpr);
2072     assert(!cir::MissingFeatures::incrementProfileCounter());
2073 
2074     mlir::Value lhs = Visit(lhsExpr);
2075     if (!lhs) {
2076       lhs = builder.getNullValue(cgf.VoidTy, loc);
2077       lhsIsVoid = true;
2078     }
2079 
2080     mlir::Value rhs = Visit(rhsExpr);
2081     if (lhsIsVoid) {
2082       assert(!rhs && "lhs and rhs types must match");
2083       rhs = builder.getNullValue(cgf.VoidTy, loc);
2084     }
2085 
2086     return builder.createSelect(loc, condV, lhs, rhs);
2087   }
2088 
2089   mlir::Value condV = cgf.emitOpOnBoolExpr(loc, condExpr);
2090   CIRGenFunction::ConditionalEvaluation eval(cgf);
2091   SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
2092   mlir::Type yieldTy{};
2093 
2094   auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc, Expr *expr) {
2095     CIRGenFunction::LexicalScope lexScope{cgf, loc, b.getInsertionBlock()};
2096     cgf.curLexScope->setAsTernary();
2097 
2098     assert(!cir::MissingFeatures::incrementProfileCounter());
2099     eval.beginEvaluation();
2100     mlir::Value branch = Visit(expr);
2101     eval.endEvaluation();
2102 
2103     if (branch) {
2104       yieldTy = branch.getType();
2105       b.create<cir::YieldOp>(loc, branch);
2106     } else {
2107       // If LHS or RHS is a throw or void expression we need to patch
2108       // arms as to properly match yield types.
2109       insertPoints.push_back(b.saveInsertionPoint());
2110     }
2111   };
2112 
2113   mlir::Value result = builder
2114                            .create<cir::TernaryOp>(
2115                                loc, condV,
2116                                /*trueBuilder=*/
2117                                [&](mlir::OpBuilder &b, mlir::Location loc) {
2118                                  emitBranch(b, loc, lhsExpr);
2119                                },
2120                                /*falseBuilder=*/
2121                                [&](mlir::OpBuilder &b, mlir::Location loc) {
2122                                  emitBranch(b, loc, rhsExpr);
2123                                })
2124                            .getResult();
2125 
2126   if (!insertPoints.empty()) {
2127     // If both arms are void, so be it.
2128     if (!yieldTy)
2129       yieldTy = cgf.VoidTy;
2130 
2131     // Insert required yields.
2132     for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2133       mlir::OpBuilder::InsertionGuard guard(builder);
2134       builder.restoreInsertionPoint(toInsert);
2135 
2136       // Block does not return: build empty yield.
2137       if (mlir::isa<cir::VoidType>(yieldTy)) {
2138         builder.create<cir::YieldOp>(loc);
2139       } else { // Block returns: set null yield value.
2140         mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2141         builder.create<cir::YieldOp>(loc, op0);
2142       }
2143     }
2144   }
2145 
2146   return result;
2147 }
2148 
emitScalarPrePostIncDec(const UnaryOperator * e,LValue lv,bool isInc,bool isPre)2149 mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *e,
2150                                                     LValue lv, bool isInc,
2151                                                     bool isPre) {
2152   return ScalarExprEmitter(*this, builder)
2153       .emitScalarPrePostIncDec(e, lv, isInc, isPre);
2154 }
2155