1 //===----------------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Expr nodes as CIR code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "Address.h" 14 #include "CIRGenConstantEmitter.h" 15 #include "CIRGenFunction.h" 16 #include "CIRGenModule.h" 17 #include "CIRGenValue.h" 18 #include "mlir/IR/BuiltinAttributes.h" 19 #include "mlir/IR/Value.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/CharUnits.h" 22 #include "clang/AST/Decl.h" 23 #include "clang/AST/Expr.h" 24 #include "clang/AST/ExprCXX.h" 25 #include "clang/CIR/Dialect/IR/CIRDialect.h" 26 #include "clang/CIR/MissingFeatures.h" 27 #include <optional> 28 29 using namespace clang; 30 using namespace clang::CIRGen; 31 using namespace cir; 32 33 /// Get the address of a zero-sized field within a record. The resulting address 34 /// doesn't necessarily have the right type. 35 Address CIRGenFunction::emitAddrOfFieldStorage(Address base, 36 const FieldDecl *field, 37 llvm::StringRef fieldName, 38 unsigned fieldIndex) { 39 if (field->isZeroSize(getContext())) { 40 cgm.errorNYI(field->getSourceRange(), 41 "emitAddrOfFieldStorage: zero-sized field"); 42 return Address::invalid(); 43 } 44 45 mlir::Location loc = getLoc(field->getLocation()); 46 47 mlir::Type fieldType = convertType(field->getType()); 48 auto fieldPtr = cir::PointerType::get(fieldType); 49 // For most cases fieldName is the same as field->getName() but for lambdas, 50 // which do not currently carry the name, so it can be passed down from the 51 // CaptureStmt. 52 cir::GetMemberOp memberAddr = builder.createGetMember( 53 loc, fieldPtr, base.getPointer(), fieldName, fieldIndex); 54 55 // Retrieve layout information, compute alignment and return the final 56 // address. 57 const RecordDecl *rec = field->getParent(); 58 const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(rec); 59 unsigned idx = layout.getCIRFieldNo(field); 60 CharUnits offset = CharUnits::fromQuantity( 61 layout.getCIRType().getElementOffset(cgm.getDataLayout().layout, idx)); 62 return Address(memberAddr, base.getAlignment().alignmentAtOffset(offset)); 63 } 64 65 /// Given an expression of pointer type, try to 66 /// derive a more accurate bound on the alignment of the pointer. 67 Address CIRGenFunction::emitPointerWithAlignment(const Expr *expr, 68 LValueBaseInfo *baseInfo) { 69 // We allow this with ObjC object pointers because of fragile ABIs. 70 assert(expr->getType()->isPointerType() || 71 expr->getType()->isObjCObjectPointerType()); 72 expr = expr->IgnoreParens(); 73 74 // Casts: 75 if (auto const *ce = dyn_cast<CastExpr>(expr)) { 76 if (isa<ExplicitCastExpr>(ce)) { 77 cgm.errorNYI(expr->getSourceRange(), 78 "emitPointerWithAlignment: explicit cast"); 79 return Address::invalid(); 80 } 81 82 switch (ce->getCastKind()) { 83 // Non-converting casts (but not C's implicit conversion from void*). 84 case CK_BitCast: 85 case CK_NoOp: 86 case CK_AddressSpaceConversion: { 87 cgm.errorNYI(expr->getSourceRange(), 88 "emitPointerWithAlignment: noop cast"); 89 return Address::invalid(); 90 } break; 91 92 // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo. 93 case CK_ArrayToPointerDecay: { 94 cgm.errorNYI(expr->getSourceRange(), 95 "emitPointerWithAlignment: array-to-pointer decay"); 96 return Address::invalid(); 97 } 98 99 case CK_UncheckedDerivedToBase: 100 case CK_DerivedToBase: { 101 assert(!cir::MissingFeatures::opTBAA()); 102 assert(!cir::MissingFeatures::addressIsKnownNonNull()); 103 Address addr = emitPointerWithAlignment(ce->getSubExpr(), baseInfo); 104 const CXXRecordDecl *derived = 105 ce->getSubExpr()->getType()->getPointeeCXXRecordDecl(); 106 return getAddressOfBaseClass(addr, derived, ce->path(), 107 shouldNullCheckClassCastValue(ce), 108 ce->getExprLoc()); 109 } 110 111 case CK_AnyPointerToBlockPointerCast: 112 case CK_BaseToDerived: 113 case CK_BaseToDerivedMemberPointer: 114 case CK_BlockPointerToObjCPointerCast: 115 case CK_BuiltinFnToFnPtr: 116 case CK_CPointerToObjCPointerCast: 117 case CK_DerivedToBaseMemberPointer: 118 case CK_Dynamic: 119 case CK_FunctionToPointerDecay: 120 case CK_IntegralToPointer: 121 case CK_LValueToRValue: 122 case CK_LValueToRValueBitCast: 123 case CK_NullToMemberPointer: 124 case CK_NullToPointer: 125 case CK_ReinterpretMemberPointer: 126 // Common pointer conversions, nothing to do here. 127 // TODO: Is there any reason to treat base-to-derived conversions 128 // specially? 129 break; 130 131 case CK_ARCConsumeObject: 132 case CK_ARCExtendBlockObject: 133 case CK_ARCProduceObject: 134 case CK_ARCReclaimReturnedObject: 135 case CK_AtomicToNonAtomic: 136 case CK_BooleanToSignedIntegral: 137 case CK_ConstructorConversion: 138 case CK_CopyAndAutoreleaseBlockObject: 139 case CK_Dependent: 140 case CK_FixedPointCast: 141 case CK_FixedPointToBoolean: 142 case CK_FixedPointToFloating: 143 case CK_FixedPointToIntegral: 144 case CK_FloatingCast: 145 case CK_FloatingComplexCast: 146 case CK_FloatingComplexToBoolean: 147 case CK_FloatingComplexToIntegralComplex: 148 case CK_FloatingComplexToReal: 149 case CK_FloatingRealToComplex: 150 case CK_FloatingToBoolean: 151 case CK_FloatingToFixedPoint: 152 case CK_FloatingToIntegral: 153 case CK_HLSLAggregateSplatCast: 154 case CK_HLSLArrayRValue: 155 case CK_HLSLElementwiseCast: 156 case CK_HLSLVectorTruncation: 157 case CK_IntToOCLSampler: 158 case CK_IntegralCast: 159 case CK_IntegralComplexCast: 160 case CK_IntegralComplexToBoolean: 161 case CK_IntegralComplexToFloatingComplex: 162 case CK_IntegralComplexToReal: 163 case CK_IntegralRealToComplex: 164 case CK_IntegralToBoolean: 165 case CK_IntegralToFixedPoint: 166 case CK_IntegralToFloating: 167 case CK_LValueBitCast: 168 case CK_MatrixCast: 169 case CK_MemberPointerToBoolean: 170 case CK_NonAtomicToAtomic: 171 case CK_ObjCObjectLValueCast: 172 case CK_PointerToBoolean: 173 case CK_PointerToIntegral: 174 case CK_ToUnion: 175 case CK_ToVoid: 176 case CK_UserDefinedConversion: 177 case CK_VectorSplat: 178 case CK_ZeroToOCLOpaqueType: 179 llvm_unreachable("unexpected cast for emitPointerWithAlignment"); 180 } 181 } 182 183 // Unary & 184 if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) { 185 // TODO(cir): maybe we should use cir.unary for pointers here instead. 186 if (uo->getOpcode() == UO_AddrOf) { 187 cgm.errorNYI(expr->getSourceRange(), "emitPointerWithAlignment: unary &"); 188 return Address::invalid(); 189 } 190 } 191 192 // std::addressof and variants. 193 if (auto const *call = dyn_cast<CallExpr>(expr)) { 194 switch (call->getBuiltinCallee()) { 195 default: 196 break; 197 case Builtin::BIaddressof: 198 case Builtin::BI__addressof: 199 case Builtin::BI__builtin_addressof: { 200 cgm.errorNYI(expr->getSourceRange(), 201 "emitPointerWithAlignment: builtin addressof"); 202 return Address::invalid(); 203 } 204 } 205 } 206 207 // Otherwise, use the alignment of the type. 208 return makeNaturalAddressForPointer( 209 emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(), 210 /*forPointeeType=*/true, baseInfo); 211 } 212 213 void CIRGenFunction::emitStoreThroughLValue(RValue src, LValue dst, 214 bool isInit) { 215 if (!dst.isSimple()) { 216 if (dst.isVectorElt()) { 217 // Read/modify/write the vector, inserting the new element 218 const mlir::Location loc = dst.getVectorPointer().getLoc(); 219 const mlir::Value vector = 220 builder.createLoad(loc, dst.getVectorAddress()); 221 const mlir::Value newVector = builder.create<cir::VecInsertOp>( 222 loc, vector, src.getValue(), dst.getVectorIdx()); 223 builder.createStore(loc, newVector, dst.getVectorAddress()); 224 return; 225 } 226 227 assert(dst.isBitField() && "Unknown LValue type"); 228 emitStoreThroughBitfieldLValue(src, dst); 229 return; 230 231 cgm.errorNYI(dst.getPointer().getLoc(), 232 "emitStoreThroughLValue: non-simple lvalue"); 233 return; 234 } 235 236 assert(!cir::MissingFeatures::opLoadStoreObjC()); 237 238 assert(src.isScalar() && "Can't emit an aggregate store with this method"); 239 emitStoreOfScalar(src.getValue(), dst, isInit); 240 } 241 242 static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e, 243 const VarDecl *vd) { 244 QualType t = e->getType(); 245 246 // If it's thread_local, emit a call to its wrapper function instead. 247 assert(!cir::MissingFeatures::opGlobalThreadLocal()); 248 if (vd->getTLSKind() == VarDecl::TLS_Dynamic) 249 cgf.cgm.errorNYI(e->getSourceRange(), 250 "emitGlobalVarDeclLValue: thread_local variable"); 251 252 // Check if the variable is marked as declare target with link clause in 253 // device codegen. 254 if (cgf.getLangOpts().OpenMP) 255 cgf.cgm.errorNYI(e->getSourceRange(), "emitGlobalVarDeclLValue: OpenMP"); 256 257 // Traditional LLVM codegen handles thread local separately, CIR handles 258 // as part of getAddrOfGlobalVar. 259 mlir::Value v = cgf.cgm.getAddrOfGlobalVar(vd); 260 261 assert(!cir::MissingFeatures::addressSpace()); 262 mlir::Type realVarTy = cgf.convertTypeForMem(vd->getType()); 263 cir::PointerType realPtrTy = cgf.getBuilder().getPointerTo(realVarTy); 264 if (realPtrTy != v.getType()) 265 v = cgf.getBuilder().createBitcast(v.getLoc(), v, realPtrTy); 266 267 CharUnits alignment = cgf.getContext().getDeclAlign(vd); 268 Address addr(v, realVarTy, alignment); 269 LValue lv; 270 if (vd->getType()->isReferenceType()) 271 cgf.cgm.errorNYI(e->getSourceRange(), 272 "emitGlobalVarDeclLValue: reference type"); 273 else 274 lv = cgf.makeAddrLValue(addr, t, AlignmentSource::Decl); 275 assert(!cir::MissingFeatures::setObjCGCLValueClass()); 276 return lv; 277 } 278 279 void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr, 280 bool isVolatile, QualType ty, 281 bool isInit, bool isNontemporal) { 282 assert(!cir::MissingFeatures::opLoadStoreThreadLocal()); 283 284 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) { 285 // Boolean vectors use `iN` as storage type. 286 if (clangVecTy->isExtVectorBoolType()) 287 cgm.errorNYI(addr.getPointer().getLoc(), 288 "emitStoreOfScalar ExtVectorBoolType"); 289 290 // Handle vectors of size 3 like size 4 for better performance. 291 const mlir::Type elementType = addr.getElementType(); 292 const auto vecTy = cast<cir::VectorType>(elementType); 293 294 // TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed 295 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type) 296 cgm.errorNYI(addr.getPointer().getLoc(), 297 "emitStoreOfScalar Vec3 & PreserveVec3Type disabled"); 298 } 299 300 value = emitToMemory(value, ty); 301 302 assert(!cir::MissingFeatures::opLoadStoreAtomic()); 303 304 // Update the alloca with more info on initialization. 305 assert(addr.getPointer() && "expected pointer to exist"); 306 auto srcAlloca = 307 dyn_cast_or_null<cir::AllocaOp>(addr.getPointer().getDefiningOp()); 308 if (currVarDecl && srcAlloca) { 309 const VarDecl *vd = currVarDecl; 310 assert(vd && "VarDecl expected"); 311 if (vd->hasInit()) 312 srcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext())); 313 } 314 315 assert(currSrcLoc && "must pass in source location"); 316 builder.createStore(*currSrcLoc, value, addr /*, isVolatile*/); 317 318 if (isNontemporal) { 319 cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal"); 320 return; 321 } 322 323 assert(!cir::MissingFeatures::opTBAA()); 324 } 325 326 mlir::Value CIRGenFunction::emitStoreThroughBitfieldLValue(RValue src, 327 LValue dst) { 328 329 assert(!cir::MissingFeatures::armComputeVolatileBitfields()); 330 331 const CIRGenBitFieldInfo &info = dst.getBitFieldInfo(); 332 mlir::Type resLTy = convertTypeForMem(dst.getType()); 333 Address ptr = dst.getBitFieldAddress(); 334 335 assert(!cir::MissingFeatures::armComputeVolatileBitfields()); 336 const bool useVolatile = false; 337 338 mlir::Value dstAddr = dst.getAddress().getPointer(); 339 340 return builder.createSetBitfield(dstAddr.getLoc(), resLTy, dstAddr, 341 ptr.getElementType(), src.getValue(), info, 342 dst.isVolatileQualified(), useVolatile); 343 } 344 345 RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) { 346 const CIRGenBitFieldInfo &info = lv.getBitFieldInfo(); 347 348 // Get the output type. 349 mlir::Type resLTy = convertType(lv.getType()); 350 Address ptr = lv.getBitFieldAddress(); 351 352 assert(!cir::MissingFeatures::armComputeVolatileBitfields()); 353 354 mlir::Value field = builder.createGetBitfield( 355 getLoc(loc), resLTy, ptr.getPointer(), ptr.getElementType(), info, 356 lv.isVolatile(), false); 357 assert(!cir::MissingFeatures::opLoadEmitScalarRangeCheck() && "NYI"); 358 return RValue::get(field); 359 } 360 361 Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base, 362 const FieldDecl *field, 363 mlir::Type fieldType, 364 unsigned index) { 365 mlir::Location loc = getLoc(field->getLocation()); 366 cir::PointerType fieldPtr = cir::PointerType::get(fieldType); 367 cir::GetMemberOp sea = getBuilder().createGetMember( 368 loc, fieldPtr, base.getPointer(), field->getName(), index); 369 return Address(sea, CharUnits::One()); 370 } 371 372 LValue CIRGenFunction::emitLValueForBitField(LValue base, 373 const FieldDecl *field) { 374 LValueBaseInfo baseInfo = base.getBaseInfo(); 375 const CIRGenRecordLayout &layout = 376 cgm.getTypes().getCIRGenRecordLayout(field->getParent()); 377 const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field); 378 assert(!cir::MissingFeatures::armComputeVolatileBitfields()); 379 assert(!cir::MissingFeatures::preservedAccessIndexRegion()); 380 unsigned idx = layout.getCIRFieldNo(field); 381 382 Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx); 383 384 mlir::Location loc = getLoc(field->getLocation()); 385 if (addr.getElementType() != info.storageType) 386 addr = builder.createElementBitCast(loc, addr, info.storageType); 387 388 QualType fieldType = 389 field->getType().withCVRQualifiers(base.getVRQualifiers()); 390 // TODO(cir): Support TBAA for bit fields. 391 assert(!cir::MissingFeatures::opTBAA()); 392 LValueBaseInfo fieldBaseInfo(baseInfo.getAlignmentSource()); 393 return LValue::makeBitfield(addr, info, fieldType, fieldBaseInfo); 394 } 395 396 LValue CIRGenFunction::emitLValueForField(LValue base, const FieldDecl *field) { 397 LValueBaseInfo baseInfo = base.getBaseInfo(); 398 399 if (field->isBitField()) 400 return emitLValueForBitField(base, field); 401 402 QualType fieldType = field->getType(); 403 const RecordDecl *rec = field->getParent(); 404 AlignmentSource baseAlignSource = baseInfo.getAlignmentSource(); 405 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(baseAlignSource)); 406 assert(!cir::MissingFeatures::opTBAA()); 407 408 Address addr = base.getAddress(); 409 if (auto *classDecl = dyn_cast<CXXRecordDecl>(rec)) { 410 if (cgm.getCodeGenOpts().StrictVTablePointers && 411 classDecl->isDynamicClass()) { 412 cgm.errorNYI(field->getSourceRange(), 413 "emitLValueForField: strict vtable for dynamic class"); 414 } 415 } 416 417 unsigned recordCVR = base.getVRQualifiers(); 418 419 llvm::StringRef fieldName = field->getName(); 420 unsigned fieldIndex; 421 assert(!cir::MissingFeatures::lambdaFieldToName()); 422 423 if (rec->isUnion()) 424 fieldIndex = field->getFieldIndex(); 425 else { 426 const CIRGenRecordLayout &layout = 427 cgm.getTypes().getCIRGenRecordLayout(field->getParent()); 428 fieldIndex = layout.getCIRFieldNo(field); 429 } 430 431 addr = emitAddrOfFieldStorage(addr, field, fieldName, fieldIndex); 432 assert(!cir::MissingFeatures::preservedAccessIndexRegion()); 433 434 // If this is a reference field, load the reference right now. 435 if (fieldType->isReferenceType()) { 436 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: reference type"); 437 return LValue(); 438 } 439 440 if (field->hasAttr<AnnotateAttr>()) { 441 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: AnnotateAttr"); 442 return LValue(); 443 } 444 445 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo); 446 lv.getQuals().addCVRQualifiers(recordCVR); 447 448 // __weak attribute on a field is ignored. 449 if (lv.getQuals().getObjCGCAttr() == Qualifiers::Weak) { 450 cgm.errorNYI(field->getSourceRange(), 451 "emitLValueForField: __weak attribute"); 452 return LValue(); 453 } 454 455 return lv; 456 } 457 458 LValue CIRGenFunction::emitLValueForFieldInitialization( 459 LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName) { 460 QualType fieldType = field->getType(); 461 462 if (!fieldType->isReferenceType()) 463 return emitLValueForField(base, field); 464 465 const CIRGenRecordLayout &layout = 466 cgm.getTypes().getCIRGenRecordLayout(field->getParent()); 467 unsigned fieldIndex = layout.getCIRFieldNo(field); 468 469 Address v = 470 emitAddrOfFieldStorage(base.getAddress(), field, fieldName, fieldIndex); 471 472 // Make sure that the address is pointing to the right type. 473 mlir::Type memTy = convertTypeForMem(fieldType); 474 v = builder.createElementBitCast(getLoc(field->getSourceRange()), v, memTy); 475 476 // TODO: Generate TBAA information that describes this access as a structure 477 // member access and not just an access to an object of the field's type. This 478 // should be similar to what we do in EmitLValueForField(). 479 LValueBaseInfo baseInfo = base.getBaseInfo(); 480 AlignmentSource fieldAlignSource = baseInfo.getAlignmentSource(); 481 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(fieldAlignSource)); 482 assert(!cir::MissingFeatures::opTBAA()); 483 return makeAddrLValue(v, fieldType, fieldBaseInfo); 484 } 485 486 mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) { 487 // Bool has a different representation in memory than in registers, 488 // but in ClangIR, it is simply represented as a cir.bool value. 489 // This function is here as a placeholder for possible future changes. 490 return value; 491 } 492 493 void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue, 494 bool isInit) { 495 if (lvalue.getType()->isConstantMatrixType()) { 496 assert(0 && "NYI: emitStoreOfScalar constant matrix type"); 497 return; 498 } 499 500 emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 501 lvalue.getType(), isInit, /*isNontemporal=*/false); 502 } 503 504 mlir::Value CIRGenFunction::emitLoadOfScalar(LValue lvalue, 505 SourceLocation loc) { 506 assert(!cir::MissingFeatures::opLoadStoreThreadLocal()); 507 assert(!cir::MissingFeatures::opLoadEmitScalarRangeCheck()); 508 assert(!cir::MissingFeatures::opLoadBooleanRepresentation()); 509 510 Address addr = lvalue.getAddress(); 511 mlir::Type eltTy = addr.getElementType(); 512 513 if (mlir::isa<cir::VoidType>(eltTy)) 514 cgm.errorNYI(loc, "emitLoadOfScalar: void type"); 515 516 mlir::Value loadOp = builder.createLoad(getLoc(loc), addr); 517 518 return loadOp; 519 } 520 521 /// Given an expression that represents a value lvalue, this 522 /// method emits the address of the lvalue, then loads the result as an rvalue, 523 /// returning the rvalue. 524 RValue CIRGenFunction::emitLoadOfLValue(LValue lv, SourceLocation loc) { 525 assert(!lv.getType()->isFunctionType()); 526 assert(!(lv.getType()->isConstantMatrixType()) && "not implemented"); 527 528 if (lv.isBitField()) 529 return emitLoadOfBitfieldLValue(lv, loc); 530 531 if (lv.isSimple()) 532 return RValue::get(emitLoadOfScalar(lv, loc)); 533 534 if (lv.isVectorElt()) { 535 const mlir::Value load = 536 builder.createLoad(getLoc(loc), lv.getVectorAddress()); 537 return RValue::get(builder.create<cir::VecExtractOp>(getLoc(loc), load, 538 lv.getVectorIdx())); 539 } 540 541 cgm.errorNYI(loc, "emitLoadOfLValue"); 542 return RValue::get(nullptr); 543 } 544 545 LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) { 546 const NamedDecl *nd = e->getDecl(); 547 QualType ty = e->getType(); 548 549 assert(e->isNonOdrUse() != NOUR_Unevaluated && 550 "should not emit an unevaluated operand"); 551 552 if (const auto *vd = dyn_cast<VarDecl>(nd)) { 553 // Checks for omitted feature handling 554 assert(!cir::MissingFeatures::opAllocaStaticLocal()); 555 assert(!cir::MissingFeatures::opAllocaNonGC()); 556 assert(!cir::MissingFeatures::opAllocaImpreciseLifetime()); 557 assert(!cir::MissingFeatures::opAllocaTLS()); 558 assert(!cir::MissingFeatures::opAllocaOpenMPThreadPrivate()); 559 assert(!cir::MissingFeatures::opAllocaEscapeByReference()); 560 561 // Check if this is a global variable 562 if (vd->hasLinkage() || vd->isStaticDataMember()) 563 return emitGlobalVarDeclLValue(*this, e, vd); 564 565 Address addr = Address::invalid(); 566 567 // The variable should generally be present in the local decl map. 568 auto iter = localDeclMap.find(vd); 569 if (iter != localDeclMap.end()) { 570 addr = iter->second; 571 } else { 572 // Otherwise, it might be static local we haven't emitted yet for some 573 // reason; most likely, because it's in an outer function. 574 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: static local"); 575 } 576 577 // Drill into reference types. 578 LValue lv = 579 vd->getType()->isReferenceType() 580 ? emitLoadOfReferenceLValue(addr, getLoc(e->getSourceRange()), 581 vd->getType(), AlignmentSource::Decl) 582 : makeAddrLValue(addr, ty, AlignmentSource::Decl); 583 return lv; 584 } 585 586 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type"); 587 return LValue(); 588 } 589 590 mlir::Value CIRGenFunction::evaluateExprAsBool(const Expr *e) { 591 QualType boolTy = getContext().BoolTy; 592 SourceLocation loc = e->getExprLoc(); 593 594 assert(!cir::MissingFeatures::pgoUse()); 595 if (e->getType()->getAs<MemberPointerType>()) { 596 cgm.errorNYI(e->getSourceRange(), 597 "evaluateExprAsBool: member pointer type"); 598 return createDummyValue(getLoc(loc), boolTy); 599 } 600 601 assert(!cir::MissingFeatures::cgFPOptionsRAII()); 602 if (!e->getType()->isAnyComplexType()) 603 return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc); 604 605 cgm.errorNYI(e->getSourceRange(), "evaluateExprAsBool: complex type"); 606 return createDummyValue(getLoc(loc), boolTy); 607 } 608 609 LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) { 610 UnaryOperatorKind op = e->getOpcode(); 611 612 // __extension__ doesn't affect lvalue-ness. 613 if (op == UO_Extension) 614 return emitLValue(e->getSubExpr()); 615 616 switch (op) { 617 case UO_Deref: { 618 QualType t = e->getSubExpr()->getType()->getPointeeType(); 619 assert(!t.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 620 621 assert(!cir::MissingFeatures::opTBAA()); 622 LValueBaseInfo baseInfo; 623 Address addr = emitPointerWithAlignment(e->getSubExpr(), &baseInfo); 624 625 // Tag 'load' with deref attribute. 626 // FIXME: This misses some derefence cases and has problematic interactions 627 // with other operators. 628 if (auto loadOp = 629 dyn_cast<cir::LoadOp>(addr.getPointer().getDefiningOp())) { 630 loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext())); 631 } 632 633 LValue lv = makeAddrLValue(addr, t, baseInfo); 634 assert(!cir::MissingFeatures::addressSpace()); 635 assert(!cir::MissingFeatures::setNonGC()); 636 return lv; 637 } 638 case UO_Real: 639 case UO_Imag: { 640 LValue lv = emitLValue(e->getSubExpr()); 641 assert(lv.isSimple() && "real/imag on non-ordinary l-value"); 642 643 // __real is valid on scalars. This is a faster way of testing that. 644 // __imag can only produce an rvalue on scalars. 645 if (e->getOpcode() == UO_Real && 646 !mlir::isa<cir::ComplexType>(lv.getAddress().getElementType())) { 647 assert(e->getSubExpr()->getType()->isArithmeticType()); 648 return lv; 649 } 650 651 QualType exprTy = getContext().getCanonicalType(e->getSubExpr()->getType()); 652 QualType elemTy = exprTy->castAs<clang::ComplexType>()->getElementType(); 653 mlir::Location loc = getLoc(e->getExprLoc()); 654 Address component = 655 e->getOpcode() == UO_Real 656 ? builder.createComplexRealPtr(loc, lv.getAddress()) 657 : builder.createComplexImagPtr(loc, lv.getAddress()); 658 assert(!cir::MissingFeatures::opTBAA()); 659 LValue elemLV = makeAddrLValue(component, elemTy); 660 elemLV.getQuals().addQualifiers(lv.getQuals()); 661 return elemLV; 662 } 663 case UO_PreInc: 664 case UO_PreDec: { 665 bool isInc = e->isIncrementOp(); 666 LValue lv = emitLValue(e->getSubExpr()); 667 668 assert(e->isPrefix() && "Prefix operator in unexpected state!"); 669 670 if (e->getType()->isAnyComplexType()) { 671 cgm.errorNYI(e->getSourceRange(), "UnaryOp complex inc/dec"); 672 lv = LValue(); 673 } else { 674 emitScalarPrePostIncDec(e, lv, isInc, /*isPre=*/true); 675 } 676 677 return lv; 678 } 679 case UO_Extension: 680 llvm_unreachable("UnaryOperator extension should be handled above!"); 681 case UO_Plus: 682 case UO_Minus: 683 case UO_Not: 684 case UO_LNot: 685 case UO_AddrOf: 686 case UO_PostInc: 687 case UO_PostDec: 688 case UO_Coawait: 689 llvm_unreachable("UnaryOperator of non-lvalue kind!"); 690 } 691 llvm_unreachable("Unknown unary operator kind!"); 692 } 693 694 /// If the specified expr is a simple decay from an array to pointer, 695 /// return the array subexpression. 696 /// FIXME: this could be abstracted into a common AST helper. 697 static const Expr *getSimpleArrayDecayOperand(const Expr *e) { 698 // If this isn't just an array->pointer decay, bail out. 699 const auto *castExpr = dyn_cast<CastExpr>(e); 700 if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay) 701 return nullptr; 702 703 // If this is a decay from variable width array, bail out. 704 const Expr *subExpr = castExpr->getSubExpr(); 705 if (subExpr->getType()->isVariableArrayType()) 706 return nullptr; 707 708 return subExpr; 709 } 710 711 static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) { 712 // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr? 713 if (auto constantOp = dyn_cast<cir::ConstantOp>(idx.getDefiningOp())) 714 return mlir::dyn_cast<cir::IntAttr>(constantOp.getValue()); 715 return {}; 716 } 717 718 static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, 719 CharUnits eltSize) { 720 // If we have a constant index, we can use the exact offset of the 721 // element we're accessing. 722 const cir::IntAttr constantIdx = getConstantIndexOrNull(idx); 723 if (constantIdx) { 724 const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize; 725 return arrayAlign.alignmentAtOffset(offset); 726 } 727 // Otherwise, use the worst-case alignment for any element. 728 return arrayAlign.alignmentOfArrayElement(eltSize); 729 } 730 731 static QualType getFixedSizeElementType(const ASTContext &astContext, 732 const VariableArrayType *vla) { 733 QualType eltType; 734 do { 735 eltType = vla->getElementType(); 736 } while ((vla = astContext.getAsVariableArrayType(eltType))); 737 return eltType; 738 } 739 740 static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf, 741 mlir::Location beginLoc, 742 mlir::Location endLoc, mlir::Value ptr, 743 mlir::Type eltTy, mlir::Value idx, 744 bool shouldDecay) { 745 CIRGenModule &cgm = cgf.getCIRGenModule(); 746 // TODO(cir): LLVM codegen emits in bound gep check here, is there anything 747 // that would enhance tracking this later in CIR? 748 assert(!cir::MissingFeatures::emitCheckedInBoundsGEP()); 749 return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx, 750 shouldDecay); 751 } 752 753 static Address emitArraySubscriptPtr(CIRGenFunction &cgf, 754 mlir::Location beginLoc, 755 mlir::Location endLoc, Address addr, 756 QualType eltType, mlir::Value idx, 757 mlir::Location loc, bool shouldDecay) { 758 759 // Determine the element size of the statically-sized base. This is 760 // the thing that the indices are expressed in terms of. 761 if (const VariableArrayType *vla = 762 cgf.getContext().getAsVariableArrayType(eltType)) { 763 eltType = getFixedSizeElementType(cgf.getContext(), vla); 764 } 765 766 // We can use that to compute the best alignment of the element. 767 const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(eltType); 768 const CharUnits eltAlign = 769 getArrayElementAlign(addr.getAlignment(), idx, eltSize); 770 771 assert(!cir::MissingFeatures::preservedAccessIndexRegion()); 772 const mlir::Value eltPtr = 773 emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(), 774 addr.getElementType(), idx, shouldDecay); 775 const mlir::Type elementType = cgf.convertTypeForMem(eltType); 776 return Address(eltPtr, elementType, eltAlign); 777 } 778 779 LValue 780 CIRGenFunction::emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e) { 781 if (isa<ExtVectorElementExpr>(e->getBase())) { 782 cgm.errorNYI(e->getSourceRange(), 783 "emitArraySubscriptExpr: ExtVectorElementExpr"); 784 return LValue::makeAddr(Address::invalid(), e->getType(), LValueBaseInfo()); 785 } 786 787 if (getContext().getAsVariableArrayType(e->getType())) { 788 cgm.errorNYI(e->getSourceRange(), 789 "emitArraySubscriptExpr: VariableArrayType"); 790 return LValue::makeAddr(Address::invalid(), e->getType(), LValueBaseInfo()); 791 } 792 793 if (e->getType()->getAs<ObjCObjectType>()) { 794 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType"); 795 return LValue::makeAddr(Address::invalid(), e->getType(), LValueBaseInfo()); 796 } 797 798 // The index must always be an integer, which is not an aggregate. Emit it 799 // in lexical order (this complexity is, sadly, required by C++17). 800 assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) && 801 "index was neither LHS nor RHS"); 802 803 auto emitIdxAfterBase = [&](bool promote) -> mlir::Value { 804 const mlir::Value idx = emitScalarExpr(e->getIdx()); 805 806 // Extend or truncate the index type to 32 or 64-bits. 807 auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType()); 808 if (promote && ptrTy && ptrTy.isPtrTo<cir::IntType>()) 809 cgm.errorNYI(e->getSourceRange(), 810 "emitArraySubscriptExpr: index type cast"); 811 return idx; 812 }; 813 814 // If the base is a vector type, then we are forming a vector element 815 // with this subscript. 816 if (e->getBase()->getType()->isVectorType() && 817 !isa<ExtVectorElementExpr>(e->getBase())) { 818 const mlir::Value idx = emitIdxAfterBase(/*promote=*/false); 819 const LValue lhs = emitLValue(e->getBase()); 820 return LValue::makeVectorElt(lhs.getAddress(), idx, e->getBase()->getType(), 821 lhs.getBaseInfo()); 822 } 823 824 const mlir::Value idx = emitIdxAfterBase(/*promote=*/true); 825 if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) { 826 LValue arrayLV; 827 if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array)) 828 arrayLV = emitArraySubscriptExpr(ase); 829 else 830 arrayLV = emitLValue(array); 831 832 // Propagate the alignment from the array itself to the result. 833 const Address addr = emitArraySubscriptPtr( 834 *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()), 835 arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()), 836 /*shouldDecay=*/true); 837 838 const LValue lv = LValue::makeAddr(addr, e->getType(), LValueBaseInfo()); 839 840 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) { 841 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC"); 842 } 843 844 return lv; 845 } 846 847 // The base must be a pointer; emit it with an estimate of its alignment. 848 assert(e->getBase()->getType()->isPointerType() && 849 "The base must be a pointer"); 850 851 LValueBaseInfo eltBaseInfo; 852 const Address ptrAddr = emitPointerWithAlignment(e->getBase(), &eltBaseInfo); 853 // Propagate the alignment from the array itself to the result. 854 const Address addxr = emitArraySubscriptPtr( 855 *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr, 856 e->getType(), idx, cgm.getLoc(e->getExprLoc()), 857 /*shouldDecay=*/false); 858 859 const LValue lv = LValue::makeAddr(addxr, e->getType(), eltBaseInfo); 860 861 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) { 862 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC"); 863 } 864 865 return lv; 866 } 867 868 LValue CIRGenFunction::emitStringLiteralLValue(const StringLiteral *e) { 869 cir::GlobalOp globalOp = cgm.getGlobalForStringLiteral(e); 870 assert(globalOp.getAlignment() && "expected alignment for string literal"); 871 unsigned align = *(globalOp.getAlignment()); 872 mlir::Value addr = 873 builder.createGetGlobal(getLoc(e->getSourceRange()), globalOp); 874 return makeAddrLValue( 875 Address(addr, globalOp.getSymType(), CharUnits::fromQuantity(align)), 876 e->getType(), AlignmentSource::Decl); 877 } 878 879 /// Casts are never lvalues unless that cast is to a reference type. If the cast 880 /// is to a reference, we can have the usual lvalue result, otherwise if a cast 881 /// is needed by the code generator in an lvalue context, then it must mean that 882 /// we need the address of an aggregate in order to access one of its members. 883 /// This can happen for all the reasons that casts are permitted with aggregate 884 /// result, including noop aggregate casts, and cast from scalar to union. 885 LValue CIRGenFunction::emitCastLValue(const CastExpr *e) { 886 switch (e->getCastKind()) { 887 case CK_ToVoid: 888 case CK_BitCast: 889 case CK_LValueToRValueBitCast: 890 case CK_ArrayToPointerDecay: 891 case CK_FunctionToPointerDecay: 892 case CK_NullToMemberPointer: 893 case CK_NullToPointer: 894 case CK_IntegralToPointer: 895 case CK_PointerToIntegral: 896 case CK_PointerToBoolean: 897 case CK_IntegralCast: 898 case CK_BooleanToSignedIntegral: 899 case CK_IntegralToBoolean: 900 case CK_IntegralToFloating: 901 case CK_FloatingToIntegral: 902 case CK_FloatingToBoolean: 903 case CK_FloatingCast: 904 case CK_FloatingRealToComplex: 905 case CK_FloatingComplexToReal: 906 case CK_FloatingComplexToBoolean: 907 case CK_FloatingComplexCast: 908 case CK_FloatingComplexToIntegralComplex: 909 case CK_IntegralRealToComplex: 910 case CK_IntegralComplexToReal: 911 case CK_IntegralComplexToBoolean: 912 case CK_IntegralComplexCast: 913 case CK_IntegralComplexToFloatingComplex: 914 case CK_DerivedToBaseMemberPointer: 915 case CK_BaseToDerivedMemberPointer: 916 case CK_MemberPointerToBoolean: 917 case CK_ReinterpretMemberPointer: 918 case CK_AnyPointerToBlockPointerCast: 919 case CK_ARCProduceObject: 920 case CK_ARCConsumeObject: 921 case CK_ARCReclaimReturnedObject: 922 case CK_ARCExtendBlockObject: 923 case CK_CopyAndAutoreleaseBlockObject: 924 case CK_IntToOCLSampler: 925 case CK_FloatingToFixedPoint: 926 case CK_FixedPointToFloating: 927 case CK_FixedPointCast: 928 case CK_FixedPointToBoolean: 929 case CK_FixedPointToIntegral: 930 case CK_IntegralToFixedPoint: 931 case CK_MatrixCast: 932 case CK_HLSLVectorTruncation: 933 case CK_HLSLArrayRValue: 934 case CK_HLSLElementwiseCast: 935 case CK_HLSLAggregateSplatCast: 936 llvm_unreachable("unexpected cast lvalue"); 937 938 case CK_Dependent: 939 llvm_unreachable("dependent cast kind in IR gen!"); 940 941 case CK_BuiltinFnToFnPtr: 942 llvm_unreachable("builtin functions are handled elsewhere"); 943 944 // These are never l-values; just use the aggregate emission code. 945 case CK_NonAtomicToAtomic: 946 case CK_AtomicToNonAtomic: 947 case CK_Dynamic: 948 case CK_ToUnion: 949 case CK_BaseToDerived: 950 case CK_LValueBitCast: 951 case CK_AddressSpaceConversion: 952 case CK_ObjCObjectLValueCast: 953 case CK_VectorSplat: 954 case CK_ConstructorConversion: 955 case CK_UserDefinedConversion: 956 case CK_CPointerToObjCPointerCast: 957 case CK_BlockPointerToObjCPointerCast: 958 case CK_LValueToRValue: { 959 cgm.errorNYI(e->getSourceRange(), 960 std::string("emitCastLValue for unhandled cast kind: ") + 961 e->getCastKindName()); 962 963 return {}; 964 } 965 966 case CK_NoOp: { 967 // CK_NoOp can model a qualification conversion, which can remove an array 968 // bound and change the IR type. 969 LValue lv = emitLValue(e->getSubExpr()); 970 // Propagate the volatile qualifier to LValue, if exists in e. 971 if (e->changesVolatileQualification()) 972 cgm.errorNYI(e->getSourceRange(), 973 "emitCastLValue: NoOp changes volatile qual"); 974 if (lv.isSimple()) { 975 Address v = lv.getAddress(); 976 if (v.isValid()) { 977 mlir::Type ty = convertTypeForMem(e->getType()); 978 if (v.getElementType() != ty) 979 cgm.errorNYI(e->getSourceRange(), 980 "emitCastLValue: NoOp needs bitcast"); 981 } 982 } 983 return lv; 984 } 985 986 case CK_UncheckedDerivedToBase: 987 case CK_DerivedToBase: { 988 const auto *derivedClassTy = 989 e->getSubExpr()->getType()->castAs<clang::RecordType>(); 990 auto *derivedClassDecl = cast<CXXRecordDecl>(derivedClassTy->getDecl()); 991 992 LValue lv = emitLValue(e->getSubExpr()); 993 Address thisAddr = lv.getAddress(); 994 995 // Perform the derived-to-base conversion 996 Address baseAddr = 997 getAddressOfBaseClass(thisAddr, derivedClassDecl, e->path(), 998 /*NullCheckValue=*/false, e->getExprLoc()); 999 1000 // TODO: Support accesses to members of base classes in TBAA. For now, we 1001 // conservatively pretend that the complete object is of the base class 1002 // type. 1003 assert(!cir::MissingFeatures::opTBAA()); 1004 return makeAddrLValue(baseAddr, e->getType(), lv.getBaseInfo()); 1005 } 1006 1007 case CK_ZeroToOCLOpaqueType: 1008 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid"); 1009 } 1010 1011 llvm_unreachable("Invalid cast kind"); 1012 } 1013 1014 LValue CIRGenFunction::emitMemberExpr(const MemberExpr *e) { 1015 if (isa<VarDecl>(e->getMemberDecl())) { 1016 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: VarDecl"); 1017 return LValue(); 1018 } 1019 1020 Expr *baseExpr = e->getBase(); 1021 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1022 LValue baseLV; 1023 if (e->isArrow()) { 1024 LValueBaseInfo baseInfo; 1025 assert(!cir::MissingFeatures::opTBAA()); 1026 Address addr = emitPointerWithAlignment(baseExpr, &baseInfo); 1027 QualType ptrTy = baseExpr->getType()->getPointeeType(); 1028 assert(!cir::MissingFeatures::typeChecks()); 1029 baseLV = makeAddrLValue(addr, ptrTy, baseInfo); 1030 } else { 1031 assert(!cir::MissingFeatures::typeChecks()); 1032 baseLV = emitLValue(baseExpr); 1033 } 1034 1035 const NamedDecl *nd = e->getMemberDecl(); 1036 if (auto *field = dyn_cast<FieldDecl>(nd)) { 1037 LValue lv = emitLValueForField(baseLV, field); 1038 assert(!cir::MissingFeatures::setObjCGCLValueClass()); 1039 if (getLangOpts().OpenMP) { 1040 // If the member was explicitly marked as nontemporal, mark it as 1041 // nontemporal. If the base lvalue is marked as nontemporal, mark access 1042 // to children as nontemporal too. 1043 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: OpenMP"); 1044 } 1045 return lv; 1046 } 1047 1048 if (isa<FunctionDecl>(nd)) { 1049 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: FunctionDecl"); 1050 return LValue(); 1051 } 1052 1053 llvm_unreachable("Unhandled member declaration!"); 1054 } 1055 1056 LValue CIRGenFunction::emitCallExprLValue(const CallExpr *e) { 1057 RValue rv = emitCallExpr(e); 1058 1059 if (!rv.isScalar()) { 1060 cgm.errorNYI(e->getSourceRange(), "emitCallExprLValue: non-scalar return"); 1061 return {}; 1062 } 1063 1064 assert(e->getCallReturnType(getContext())->isReferenceType() && 1065 "Can't have a scalar return unless the return type is a " 1066 "reference type!"); 1067 1068 return makeNaturalAlignPointeeAddrLValue(rv.getValue(), e->getType()); 1069 } 1070 1071 LValue CIRGenFunction::emitBinaryOperatorLValue(const BinaryOperator *e) { 1072 // Comma expressions just emit their LHS then their RHS as an l-value. 1073 if (e->getOpcode() == BO_Comma) { 1074 emitIgnoredExpr(e->getLHS()); 1075 return emitLValue(e->getRHS()); 1076 } 1077 1078 if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) { 1079 cgm.errorNYI(e->getSourceRange(), "member pointers"); 1080 return {}; 1081 } 1082 1083 assert(e->getOpcode() == BO_Assign && "unexpected binary l-value"); 1084 1085 // Note that in all of these cases, __block variables need the RHS 1086 // evaluated first just in case the variable gets moved by the RHS. 1087 1088 switch (CIRGenFunction::getEvaluationKind(e->getType())) { 1089 case cir::TEK_Scalar: { 1090 assert(!cir::MissingFeatures::objCLifetime()); 1091 if (e->getLHS()->getType().getObjCLifetime() != 1092 clang::Qualifiers::ObjCLifetime::OCL_None) { 1093 cgm.errorNYI(e->getSourceRange(), "objc lifetimes"); 1094 return {}; 1095 } 1096 1097 RValue rv = emitAnyExpr(e->getRHS()); 1098 LValue lv = emitLValue(e->getLHS()); 1099 1100 SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())}; 1101 if (lv.isBitField()) 1102 emitStoreThroughBitfieldLValue(rv, lv); 1103 else 1104 emitStoreThroughLValue(rv, lv); 1105 1106 if (getLangOpts().OpenMP) { 1107 cgm.errorNYI(e->getSourceRange(), "openmp"); 1108 return {}; 1109 } 1110 1111 return lv; 1112 } 1113 1114 case cir::TEK_Complex: { 1115 return emitComplexAssignmentLValue(e); 1116 } 1117 1118 case cir::TEK_Aggregate: 1119 cgm.errorNYI(e->getSourceRange(), "aggregate lvalues"); 1120 return {}; 1121 } 1122 llvm_unreachable("bad evaluation kind"); 1123 } 1124 1125 /// Emit code to compute the specified expression which 1126 /// can have any type. The result is returned as an RValue struct. 1127 RValue CIRGenFunction::emitAnyExpr(const Expr *e, AggValueSlot aggSlot) { 1128 switch (CIRGenFunction::getEvaluationKind(e->getType())) { 1129 case cir::TEK_Scalar: 1130 return RValue::get(emitScalarExpr(e)); 1131 case cir::TEK_Complex: 1132 return RValue::getComplex(emitComplexExpr(e)); 1133 case cir::TEK_Aggregate: { 1134 if (aggSlot.isIgnored()) 1135 aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()), 1136 getCounterAggTmpAsString()); 1137 emitAggExpr(e, aggSlot); 1138 return aggSlot.asRValue(); 1139 } 1140 } 1141 llvm_unreachable("bad evaluation kind"); 1142 } 1143 1144 static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) { 1145 assert(!cir::MissingFeatures::weakRefReference()); 1146 return cgm.getAddrOfFunction(gd); 1147 } 1148 1149 // Detect the unusual situation where an inline version is shadowed by a 1150 // non-inline version. In that case we should pick the external one 1151 // everywhere. That's GCC behavior too. 1152 static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *fd) { 1153 for (const FunctionDecl *pd = fd; pd; pd = pd->getPreviousDecl()) 1154 if (!pd->isInlineBuiltinDeclaration()) 1155 return false; 1156 return true; 1157 } 1158 1159 CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) { 1160 const auto *fd = cast<FunctionDecl>(gd.getDecl()); 1161 1162 if (unsigned builtinID = fd->getBuiltinID()) { 1163 if (fd->getAttr<AsmLabelAttr>()) { 1164 cgm.errorNYI("AsmLabelAttr"); 1165 } 1166 1167 StringRef ident = fd->getName(); 1168 std::string fdInlineName = (ident + ".inline").str(); 1169 1170 bool isPredefinedLibFunction = 1171 cgm.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID); 1172 // Assume nobuiltins everywhere until we actually read the attributes. 1173 bool hasAttributeNoBuiltin = true; 1174 assert(!cir::MissingFeatures::attributeNoBuiltin()); 1175 1176 // When directing calling an inline builtin, call it through it's mangled 1177 // name to make it clear it's not the actual builtin. 1178 auto fn = cast<cir::FuncOp>(curFn); 1179 if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) { 1180 cgm.errorNYI("Inline only builtin function calls"); 1181 } 1182 1183 // Replaceable builtins provide their own implementation of a builtin. If we 1184 // are in an inline builtin implementation, avoid trivial infinite 1185 // recursion. Honor __attribute__((no_builtin("foo"))) or 1186 // __attribute__((no_builtin)) on the current function unless foo is 1187 // not a predefined library function which means we must generate the 1188 // builtin no matter what. 1189 else if (!isPredefinedLibFunction || !hasAttributeNoBuiltin) 1190 return CIRGenCallee::forBuiltin(builtinID, fd); 1191 } 1192 1193 cir::FuncOp callee = emitFunctionDeclPointer(cgm, gd); 1194 1195 assert(!cir::MissingFeatures::hip()); 1196 1197 return CIRGenCallee::forDirect(callee, gd); 1198 } 1199 1200 RValue CIRGenFunction::getUndefRValue(QualType ty) { 1201 if (ty->isVoidType()) 1202 return RValue::get(nullptr); 1203 1204 cgm.errorNYI("unsupported type for undef rvalue"); 1205 return RValue::get(nullptr); 1206 } 1207 1208 RValue CIRGenFunction::emitCall(clang::QualType calleeTy, 1209 const CIRGenCallee &callee, 1210 const clang::CallExpr *e, 1211 ReturnValueSlot returnValue) { 1212 // Get the actual function type. The callee type will always be a pointer to 1213 // function type or a block pointer type. 1214 assert(calleeTy->isFunctionPointerType() && 1215 "Callee must have function pointer type!"); 1216 1217 calleeTy = getContext().getCanonicalType(calleeTy); 1218 auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType(); 1219 1220 if (getLangOpts().CPlusPlus) 1221 assert(!cir::MissingFeatures::sanitizers()); 1222 1223 const auto *fnType = cast<FunctionType>(pointeeTy); 1224 1225 assert(!cir::MissingFeatures::sanitizers()); 1226 1227 CallArgList args; 1228 assert(!cir::MissingFeatures::opCallArgEvaluationOrder()); 1229 1230 emitCallArgs(args, dyn_cast<FunctionProtoType>(fnType), e->arguments(), 1231 e->getDirectCallee()); 1232 1233 const CIRGenFunctionInfo &funcInfo = 1234 cgm.getTypes().arrangeFreeFunctionCall(args, fnType); 1235 1236 assert(!cir::MissingFeatures::opCallNoPrototypeFunc()); 1237 assert(!cir::MissingFeatures::opCallFnInfoOpts()); 1238 assert(!cir::MissingFeatures::hip()); 1239 assert(!cir::MissingFeatures::opCallMustTail()); 1240 1241 cir::CIRCallOpInterface callOp; 1242 RValue callResult = emitCall(funcInfo, callee, returnValue, args, &callOp, 1243 getLoc(e->getExprLoc())); 1244 1245 assert(!cir::MissingFeatures::generateDebugInfo()); 1246 1247 return callResult; 1248 } 1249 1250 CIRGenCallee CIRGenFunction::emitCallee(const clang::Expr *e) { 1251 e = e->IgnoreParens(); 1252 1253 // Look through function-to-pointer decay. 1254 if (const auto *implicitCast = dyn_cast<ImplicitCastExpr>(e)) { 1255 if (implicitCast->getCastKind() == CK_FunctionToPointerDecay || 1256 implicitCast->getCastKind() == CK_BuiltinFnToFnPtr) { 1257 return emitCallee(implicitCast->getSubExpr()); 1258 } 1259 // When performing an indirect call through a function pointer lvalue, the 1260 // function pointer lvalue is implicitly converted to an rvalue through an 1261 // lvalue-to-rvalue conversion. 1262 assert(implicitCast->getCastKind() == CK_LValueToRValue && 1263 "unexpected implicit cast on function pointers"); 1264 } else if (const auto *declRef = dyn_cast<DeclRefExpr>(e)) { 1265 // Resolve direct calls. 1266 const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl()); 1267 return emitDirectCallee(funcDecl); 1268 } else if (isa<MemberExpr>(e)) { 1269 cgm.errorNYI(e->getSourceRange(), 1270 "emitCallee: call to member function is NYI"); 1271 return {}; 1272 } 1273 1274 assert(!cir::MissingFeatures::opCallPseudoDtor()); 1275 1276 // Otherwise, we have an indirect reference. 1277 mlir::Value calleePtr; 1278 QualType functionType; 1279 if (const auto *ptrType = e->getType()->getAs<clang::PointerType>()) { 1280 calleePtr = emitScalarExpr(e); 1281 functionType = ptrType->getPointeeType(); 1282 } else { 1283 functionType = e->getType(); 1284 calleePtr = emitLValue(e).getPointer(); 1285 } 1286 assert(functionType->isFunctionType()); 1287 1288 GlobalDecl gd; 1289 if (const auto *vd = 1290 dyn_cast_or_null<VarDecl>(e->getReferencedDeclOfCallee())) 1291 gd = GlobalDecl(vd); 1292 1293 CIRGenCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), gd); 1294 CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp()); 1295 return callee; 1296 } 1297 1298 RValue CIRGenFunction::emitCallExpr(const clang::CallExpr *e, 1299 ReturnValueSlot returnValue) { 1300 assert(!cir::MissingFeatures::objCBlocks()); 1301 1302 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e)) 1303 return emitCXXMemberCallExpr(ce, returnValue); 1304 1305 if (isa<CUDAKernelCallExpr>(e)) { 1306 cgm.errorNYI(e->getSourceRange(), "call to CUDA kernel"); 1307 return RValue::get(nullptr); 1308 } 1309 1310 if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(e)) { 1311 // If the callee decl is a CXXMethodDecl, we need to emit this as a C++ 1312 // operator member call. 1313 if (const CXXMethodDecl *md = 1314 dyn_cast_or_null<CXXMethodDecl>(operatorCall->getCalleeDecl())) 1315 return emitCXXOperatorMemberCallExpr(operatorCall, md, returnValue); 1316 // A CXXOperatorCallExpr is created even for explicit object methods, but 1317 // these should be treated like static function calls. Fall through to do 1318 // that. 1319 } 1320 1321 CIRGenCallee callee = emitCallee(e->getCallee()); 1322 1323 if (callee.isBuiltin()) 1324 return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e, 1325 returnValue); 1326 1327 if (isa<CXXPseudoDestructorExpr>(e->getCallee())) { 1328 cgm.errorNYI(e->getSourceRange(), "call to pseudo destructor"); 1329 } 1330 assert(!cir::MissingFeatures::opCallPseudoDtor()); 1331 1332 return emitCall(e->getCallee()->getType(), callee, e, returnValue); 1333 } 1334 1335 /// Emit code to compute the specified expression, ignoring the result. 1336 void CIRGenFunction::emitIgnoredExpr(const Expr *e) { 1337 if (e->isPRValue()) { 1338 assert(!cir::MissingFeatures::aggValueSlot()); 1339 emitAnyExpr(e); 1340 return; 1341 } 1342 1343 // Just emit it as an l-value and drop the result. 1344 emitLValue(e); 1345 } 1346 1347 Address CIRGenFunction::emitArrayToPointerDecay(const Expr *e) { 1348 assert(e->getType()->isArrayType() && 1349 "Array to pointer decay must have array source type!"); 1350 1351 // Expressions of array type can't be bitfields or vector elements. 1352 LValue lv = emitLValue(e); 1353 Address addr = lv.getAddress(); 1354 1355 // If the array type was an incomplete type, we need to make sure 1356 // the decay ends up being the right type. 1357 auto lvalueAddrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType()); 1358 1359 if (e->getType()->isVariableArrayType()) 1360 return addr; 1361 1362 auto pointeeTy = mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee()); 1363 1364 mlir::Type arrayTy = convertType(e->getType()); 1365 assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array"); 1366 assert(pointeeTy == arrayTy); 1367 1368 // The result of this decay conversion points to an array element within the 1369 // base lvalue. However, since TBAA currently does not support representing 1370 // accesses to elements of member arrays, we conservatively represent accesses 1371 // to the pointee object as if it had no any base lvalue specified. 1372 // TODO: Support TBAA for member arrays. 1373 QualType eltType = e->getType()->castAsArrayTypeUnsafe()->getElementType(); 1374 assert(!cir::MissingFeatures::opTBAA()); 1375 1376 mlir::Value ptr = builder.maybeBuildArrayDecay( 1377 cgm.getLoc(e->getSourceRange()), addr.getPointer(), 1378 convertTypeForMem(eltType)); 1379 return Address(ptr, addr.getAlignment()); 1380 } 1381 1382 /// Given the address of a temporary variable, produce an r-value of its type. 1383 RValue CIRGenFunction::convertTempToRValue(Address addr, clang::QualType type, 1384 clang::SourceLocation loc) { 1385 LValue lvalue = makeAddrLValue(addr, type, AlignmentSource::Decl); 1386 switch (getEvaluationKind(type)) { 1387 case cir::TEK_Complex: 1388 cgm.errorNYI(loc, "convertTempToRValue: complex type"); 1389 return RValue::get(nullptr); 1390 case cir::TEK_Aggregate: 1391 cgm.errorNYI(loc, "convertTempToRValue: aggregate type"); 1392 return RValue::get(nullptr); 1393 case cir::TEK_Scalar: 1394 return RValue::get(emitLoadOfScalar(lvalue, loc)); 1395 } 1396 llvm_unreachable("bad evaluation kind"); 1397 } 1398 1399 /// Emit an `if` on a boolean condition, filling `then` and `else` into 1400 /// appropriated regions. 1401 mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond, 1402 const Stmt *thenS, 1403 const Stmt *elseS) { 1404 mlir::Location thenLoc = getLoc(thenS->getSourceRange()); 1405 std::optional<mlir::Location> elseLoc; 1406 if (elseS) 1407 elseLoc = getLoc(elseS->getSourceRange()); 1408 1409 mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); 1410 emitIfOnBoolExpr( 1411 cond, /*thenBuilder=*/ 1412 [&](mlir::OpBuilder &, mlir::Location) { 1413 LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()}; 1414 resThen = emitStmt(thenS, /*useCurrentScope=*/true); 1415 }, 1416 thenLoc, 1417 /*elseBuilder=*/ 1418 [&](mlir::OpBuilder &, mlir::Location) { 1419 assert(elseLoc && "Invalid location for elseS."); 1420 LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()}; 1421 resElse = emitStmt(elseS, /*useCurrentScope=*/true); 1422 }, 1423 elseLoc); 1424 1425 return mlir::LogicalResult::success(resThen.succeeded() && 1426 resElse.succeeded()); 1427 } 1428 1429 /// Emit an `if` on a boolean condition, filling `then` and `else` into 1430 /// appropriated regions. 1431 cir::IfOp CIRGenFunction::emitIfOnBoolExpr( 1432 const clang::Expr *cond, BuilderCallbackRef thenBuilder, 1433 mlir::Location thenLoc, BuilderCallbackRef elseBuilder, 1434 std::optional<mlir::Location> elseLoc) { 1435 // Attempt to be as accurate as possible with IfOp location, generate 1436 // one fused location that has either 2 or 4 total locations, depending 1437 // on else's availability. 1438 SmallVector<mlir::Location, 2> ifLocs{thenLoc}; 1439 if (elseLoc) 1440 ifLocs.push_back(*elseLoc); 1441 mlir::Location loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs); 1442 1443 // Emit the code with the fully general case. 1444 mlir::Value condV = emitOpOnBoolExpr(loc, cond); 1445 return builder.create<cir::IfOp>(loc, condV, elseLoc.has_value(), 1446 /*thenBuilder=*/thenBuilder, 1447 /*elseBuilder=*/elseBuilder); 1448 } 1449 1450 /// TODO(cir): see EmitBranchOnBoolExpr for extra ideas). 1451 mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc, 1452 const Expr *cond) { 1453 assert(!cir::MissingFeatures::pgoUse()); 1454 assert(!cir::MissingFeatures::generateDebugInfo()); 1455 cond = cond->IgnoreParens(); 1456 1457 // In LLVM the condition is reversed here for efficient codegen. 1458 // This should be done in CIR prior to LLVM lowering, if we do now 1459 // we can make CIR based diagnostics misleading. 1460 // cir.ternary(!x, t, f) -> cir.ternary(x, f, t) 1461 assert(!cir::MissingFeatures::shouldReverseUnaryCondOnBoolExpr()); 1462 1463 if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(cond)) { 1464 Expr *trueExpr = condOp->getTrueExpr(); 1465 Expr *falseExpr = condOp->getFalseExpr(); 1466 mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond()); 1467 1468 mlir::Value ternaryOpRes = 1469 builder 1470 .create<cir::TernaryOp>( 1471 loc, condV, /*thenBuilder=*/ 1472 [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) { 1473 mlir::Value lhs = emitScalarExpr(trueExpr); 1474 b.create<cir::YieldOp>(loc, lhs); 1475 }, 1476 /*elseBuilder=*/ 1477 [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) { 1478 mlir::Value rhs = emitScalarExpr(falseExpr); 1479 b.create<cir::YieldOp>(loc, rhs); 1480 }) 1481 .getResult(); 1482 1483 return emitScalarConversion(ternaryOpRes, condOp->getType(), 1484 getContext().BoolTy, condOp->getExprLoc()); 1485 } 1486 1487 if (isa<CXXThrowExpr>(cond)) { 1488 cgm.errorNYI("NYI"); 1489 return createDummyValue(loc, cond->getType()); 1490 } 1491 1492 // If the branch has a condition wrapped by __builtin_unpredictable, 1493 // create metadata that specifies that the branch is unpredictable. 1494 // Don't bother if not optimizing because that metadata would not be used. 1495 assert(!cir::MissingFeatures::insertBuiltinUnpredictable()); 1496 1497 // Emit the code with the fully general case. 1498 return evaluateExprAsBool(cond); 1499 } 1500 1501 mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty, 1502 mlir::Location loc, CharUnits alignment, 1503 bool insertIntoFnEntryBlock, 1504 mlir::Value arraySize) { 1505 mlir::Block *entryBlock = insertIntoFnEntryBlock 1506 ? getCurFunctionEntryBlock() 1507 : curLexScope->getEntryBlock(); 1508 1509 // If this is an alloca in the entry basic block of a cir.try and there's 1510 // a surrounding cir.scope, make sure the alloca ends up in the surrounding 1511 // scope instead. This is necessary in order to guarantee all SSA values are 1512 // reachable during cleanups. 1513 assert(!cir::MissingFeatures::tryOp()); 1514 1515 return emitAlloca(name, ty, loc, alignment, 1516 builder.getBestAllocaInsertPoint(entryBlock), arraySize); 1517 } 1518 1519 mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty, 1520 mlir::Location loc, CharUnits alignment, 1521 mlir::OpBuilder::InsertPoint ip, 1522 mlir::Value arraySize) { 1523 // CIR uses its own alloca address space rather than follow the target data 1524 // layout like original CodeGen. The data layout awareness should be done in 1525 // the lowering pass instead. 1526 assert(!cir::MissingFeatures::addressSpace()); 1527 cir::PointerType localVarPtrTy = builder.getPointerTo(ty); 1528 mlir::IntegerAttr alignIntAttr = cgm.getSize(alignment); 1529 1530 mlir::Value addr; 1531 { 1532 mlir::OpBuilder::InsertionGuard guard(builder); 1533 builder.restoreInsertionPoint(ip); 1534 addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy, 1535 /*var type*/ ty, name, alignIntAttr); 1536 assert(!cir::MissingFeatures::astVarDeclInterface()); 1537 } 1538 return addr; 1539 } 1540 1541 // Note: this function also emit constructor calls to support a MSVC extensions 1542 // allowing explicit constructor function call. 1543 RValue CIRGenFunction::emitCXXMemberCallExpr(const CXXMemberCallExpr *ce, 1544 ReturnValueSlot returnValue) { 1545 const Expr *callee = ce->getCallee()->IgnoreParens(); 1546 1547 if (isa<BinaryOperator>(callee)) { 1548 cgm.errorNYI(ce->getSourceRange(), 1549 "emitCXXMemberCallExpr: C++ binary operator"); 1550 return RValue::get(nullptr); 1551 } 1552 1553 const auto *me = cast<MemberExpr>(callee); 1554 const auto *md = cast<CXXMethodDecl>(me->getMemberDecl()); 1555 1556 if (md->isStatic()) { 1557 cgm.errorNYI(ce->getSourceRange(), "emitCXXMemberCallExpr: static method"); 1558 return RValue::get(nullptr); 1559 } 1560 1561 bool hasQualifier = me->hasQualifier(); 1562 NestedNameSpecifier *qualifier = hasQualifier ? me->getQualifier() : nullptr; 1563 bool isArrow = me->isArrow(); 1564 const Expr *base = me->getBase(); 1565 1566 return emitCXXMemberOrOperatorMemberCallExpr( 1567 ce, md, returnValue, hasQualifier, qualifier, isArrow, base); 1568 } 1569 1570 void CIRGenFunction::emitCXXConstructExpr(const CXXConstructExpr *e, 1571 AggValueSlot dest) { 1572 assert(!dest.isIgnored() && "Must have a destination!"); 1573 const CXXConstructorDecl *cd = e->getConstructor(); 1574 1575 // If we require zero initialization before (or instead of) calling the 1576 // constructor, as can be the case with a non-user-provided default 1577 // constructor, emit the zero initialization now, unless destination is 1578 // already zeroed. 1579 if (e->requiresZeroInitialization() && !dest.isZeroed()) { 1580 cgm.errorNYI(e->getSourceRange(), 1581 "emitCXXConstructExpr: requires initialization"); 1582 return; 1583 } 1584 1585 // If this is a call to a trivial default constructor: 1586 // In LLVM: do nothing. 1587 // In CIR: emit as a regular call, other later passes should lower the 1588 // ctor call into trivial initialization. 1589 1590 // Elide the constructor if we're constructing from a temporary 1591 if (getLangOpts().ElideConstructors && e->isElidable()) { 1592 cgm.errorNYI(e->getSourceRange(), 1593 "emitCXXConstructExpr: elidable constructor"); 1594 return; 1595 } 1596 1597 if (getContext().getAsArrayType(e->getType())) { 1598 cgm.errorNYI(e->getSourceRange(), "emitCXXConstructExpr: array type"); 1599 return; 1600 } 1601 1602 clang::CXXCtorType type = Ctor_Complete; 1603 bool forVirtualBase = false; 1604 bool delegating = false; 1605 1606 switch (e->getConstructionKind()) { 1607 case CXXConstructionKind::Complete: 1608 type = Ctor_Complete; 1609 break; 1610 case CXXConstructionKind::Delegating: 1611 // We should be emitting a constructor; GlobalDecl will assert this 1612 type = curGD.getCtorType(); 1613 delegating = true; 1614 break; 1615 case CXXConstructionKind::VirtualBase: 1616 // This should just set 'forVirtualBase' to true and fall through, but 1617 // virtual base class support is otherwise missing, so this needs to wait 1618 // until it can be tested. 1619 cgm.errorNYI(e->getSourceRange(), 1620 "emitCXXConstructExpr: virtual base constructor"); 1621 return; 1622 case CXXConstructionKind::NonVirtualBase: 1623 type = Ctor_Base; 1624 break; 1625 } 1626 1627 emitCXXConstructorCall(cd, type, forVirtualBase, delegating, dest, e); 1628 } 1629 1630 RValue CIRGenFunction::emitReferenceBindingToExpr(const Expr *e) { 1631 // Emit the expression as an lvalue. 1632 LValue lv = emitLValue(e); 1633 assert(lv.isSimple()); 1634 mlir::Value value = lv.getPointer(); 1635 1636 assert(!cir::MissingFeatures::sanitizers()); 1637 1638 return RValue::get(value); 1639 } 1640 1641 Address CIRGenFunction::emitLoadOfReference(LValue refLVal, mlir::Location loc, 1642 LValueBaseInfo *pointeeBaseInfo) { 1643 if (refLVal.isVolatile()) 1644 cgm.errorNYI(loc, "load of volatile reference"); 1645 1646 cir::LoadOp load = 1647 builder.create<cir::LoadOp>(loc, refLVal.getAddress().getElementType(), 1648 refLVal.getAddress().getPointer()); 1649 1650 assert(!cir::MissingFeatures::opTBAA()); 1651 1652 QualType pointeeType = refLVal.getType()->getPointeeType(); 1653 CharUnits align = cgm.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo); 1654 return Address(load, convertTypeForMem(pointeeType), align); 1655 } 1656 1657 LValue CIRGenFunction::emitLoadOfReferenceLValue(Address refAddr, 1658 mlir::Location loc, 1659 QualType refTy, 1660 AlignmentSource source) { 1661 LValue refLVal = makeAddrLValue(refAddr, refTy, LValueBaseInfo(source)); 1662 LValueBaseInfo pointeeBaseInfo; 1663 assert(!cir::MissingFeatures::opTBAA()); 1664 Address pointeeAddr = emitLoadOfReference(refLVal, loc, &pointeeBaseInfo); 1665 return makeAddrLValue(pointeeAddr, refLVal.getType()->getPointeeType(), 1666 pointeeBaseInfo); 1667 } 1668 1669 mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc, 1670 clang::QualType qt) { 1671 mlir::Type t = convertType(qt); 1672 CharUnits alignment = getContext().getTypeAlignInChars(qt); 1673 return builder.createDummyValue(loc, t, alignment); 1674 } 1675 1676 //===----------------------------------------------------------------------===// 1677 // CIR builder helpers 1678 //===----------------------------------------------------------------------===// 1679 1680 Address CIRGenFunction::createMemTemp(QualType ty, mlir::Location loc, 1681 const Twine &name, Address *alloca, 1682 mlir::OpBuilder::InsertPoint ip) { 1683 // FIXME: Should we prefer the preferred type alignment here? 1684 return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name, 1685 alloca, ip); 1686 } 1687 1688 Address CIRGenFunction::createMemTemp(QualType ty, CharUnits align, 1689 mlir::Location loc, const Twine &name, 1690 Address *alloca, 1691 mlir::OpBuilder::InsertPoint ip) { 1692 Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name, 1693 /*ArraySize=*/nullptr, alloca, ip); 1694 if (ty->isConstantMatrixType()) { 1695 assert(!cir::MissingFeatures::matrixType()); 1696 cgm.errorNYI(loc, "temporary matrix value"); 1697 } 1698 return result; 1699 } 1700 1701 /// This creates a alloca and inserts it into the entry block of the 1702 /// current region. 1703 Address CIRGenFunction::createTempAllocaWithoutCast( 1704 mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name, 1705 mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) { 1706 cir::AllocaOp alloca = ip.isSet() 1707 ? createTempAlloca(ty, loc, name, ip, arraySize) 1708 : createTempAlloca(ty, loc, name, arraySize); 1709 alloca.setAlignmentAttr(cgm.getSize(align)); 1710 return Address(alloca, ty, align); 1711 } 1712 1713 /// This creates a alloca and inserts it into the entry block. The alloca is 1714 /// casted to default address space if necessary. 1715 Address CIRGenFunction::createTempAlloca(mlir::Type ty, CharUnits align, 1716 mlir::Location loc, const Twine &name, 1717 mlir::Value arraySize, 1718 Address *allocaAddr, 1719 mlir::OpBuilder::InsertPoint ip) { 1720 Address alloca = 1721 createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip); 1722 if (allocaAddr) 1723 *allocaAddr = alloca; 1724 mlir::Value v = alloca.getPointer(); 1725 // Alloca always returns a pointer in alloca address space, which may 1726 // be different from the type defined by the language. For example, 1727 // in C++ the auto variables are in the default address space. Therefore 1728 // cast alloca to the default address space when necessary. 1729 assert(!cir::MissingFeatures::addressSpace()); 1730 return Address(v, ty, align); 1731 } 1732 1733 /// This creates an alloca and inserts it into the entry block if \p ArraySize 1734 /// is nullptr, otherwise inserts it at the current insertion point of the 1735 /// builder. 1736 cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty, 1737 mlir::Location loc, 1738 const Twine &name, 1739 mlir::Value arraySize, 1740 bool insertIntoFnEntryBlock) { 1741 return cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(), 1742 insertIntoFnEntryBlock, arraySize) 1743 .getDefiningOp()); 1744 } 1745 1746 /// This creates an alloca and inserts it into the provided insertion point 1747 cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty, 1748 mlir::Location loc, 1749 const Twine &name, 1750 mlir::OpBuilder::InsertPoint ip, 1751 mlir::Value arraySize) { 1752 assert(ip.isSet() && "Insertion point is not set"); 1753 return cast<cir::AllocaOp>( 1754 emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize) 1755 .getDefiningOp()); 1756 } 1757 1758 /// Try to emit a reference to the given value without producing it as 1759 /// an l-value. For many cases, this is just an optimization, but it avoids 1760 /// us needing to emit global copies of variables if they're named without 1761 /// triggering a formal use in a context where we can't emit a direct 1762 /// reference to them, for instance if a block or lambda or a member of a 1763 /// local class uses a const int variable or constexpr variable from an 1764 /// enclosing function. 1765 /// 1766 /// For named members of enums, this is the only way they are emitted. 1767 CIRGenFunction::ConstantEmission 1768 CIRGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { 1769 ValueDecl *value = refExpr->getDecl(); 1770 1771 // There is a lot more to do here, but for now only EnumConstantDecl is 1772 // supported. 1773 assert(!cir::MissingFeatures::tryEmitAsConstant()); 1774 1775 // The value needs to be an enum constant or a constant variable. 1776 if (!isa<EnumConstantDecl>(value)) 1777 return ConstantEmission(); 1778 1779 Expr::EvalResult result; 1780 if (!refExpr->EvaluateAsRValue(result, getContext())) 1781 return ConstantEmission(); 1782 1783 QualType resultType = refExpr->getType(); 1784 1785 // As long as we're only handling EnumConstantDecl, there should be no 1786 // side-effects. 1787 assert(!result.HasSideEffects); 1788 1789 // Emit as a constant. 1790 // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires 1791 // somewhat heavy refactoring...) 1792 mlir::Attribute c = ConstantEmitter(*this).emitAbstract( 1793 refExpr->getLocation(), result.Val, resultType); 1794 mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present<mlir::TypedAttr>(c); 1795 assert(cstToEmit && "expected a typed attribute"); 1796 1797 assert(!cir::MissingFeatures::generateDebugInfo()); 1798 1799 return ConstantEmission::forValue(cstToEmit); 1800 } 1801 1802 mlir::Value CIRGenFunction::emitScalarConstant( 1803 const CIRGenFunction::ConstantEmission &constant, Expr *e) { 1804 assert(constant && "not a constant"); 1805 if (constant.isReference()) { 1806 cgm.errorNYI(e->getSourceRange(), "emitScalarConstant: reference"); 1807 return {}; 1808 } 1809 return builder.getConstant(getLoc(e->getSourceRange()), constant.getValue()); 1810 } 1811 1812 /// An LValue is a candidate for having its loads and stores be made atomic if 1813 /// we are operating under /volatile:ms *and* the LValue itself is volatile and 1814 /// performing such an operation can be performed without a libcall. 1815 bool CIRGenFunction::isLValueSuitableForInlineAtomic(LValue lv) { 1816 if (!cgm.getLangOpts().MSVolatile) 1817 return false; 1818 1819 cgm.errorNYI("LValueSuitableForInlineAtomic LangOpts MSVolatile"); 1820 return false; 1821 } 1822