1 //===----------------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Internal per-function state used for AST-to-ClangIR code gen 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CIRGenFunction.h" 14 15 #include "CIRGenCXXABI.h" 16 #include "CIRGenCall.h" 17 #include "CIRGenValue.h" 18 #include "mlir/IR/Location.h" 19 #include "clang/AST/ExprCXX.h" 20 #include "clang/AST/GlobalDecl.h" 21 #include "clang/CIR/MissingFeatures.h" 22 23 #include <cassert> 24 25 namespace clang::CIRGen { 26 27 CIRGenFunction::CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, 28 bool suppressNewContext) 29 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {} 30 31 CIRGenFunction::~CIRGenFunction() {} 32 33 // This is copied from clang/lib/CodeGen/CodeGenFunction.cpp 34 cir::TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { 35 type = type.getCanonicalType(); 36 while (true) { 37 switch (type->getTypeClass()) { 38 #define TYPE(name, parent) 39 #define ABSTRACT_TYPE(name, parent) 40 #define NON_CANONICAL_TYPE(name, parent) case Type::name: 41 #define DEPENDENT_TYPE(name, parent) case Type::name: 42 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 43 #include "clang/AST/TypeNodes.inc" 44 llvm_unreachable("non-canonical or dependent type in IR-generation"); 45 46 case Type::Auto: 47 case Type::DeducedTemplateSpecialization: 48 llvm_unreachable("undeduced type in IR-generation"); 49 50 // Various scalar types. 51 case Type::Builtin: 52 case Type::Pointer: 53 case Type::BlockPointer: 54 case Type::LValueReference: 55 case Type::RValueReference: 56 case Type::MemberPointer: 57 case Type::Vector: 58 case Type::ExtVector: 59 case Type::ConstantMatrix: 60 case Type::FunctionProto: 61 case Type::FunctionNoProto: 62 case Type::Enum: 63 case Type::ObjCObjectPointer: 64 case Type::Pipe: 65 case Type::BitInt: 66 case Type::HLSLAttributedResource: 67 case Type::HLSLInlineSpirv: 68 return cir::TEK_Scalar; 69 70 // Complexes. 71 case Type::Complex: 72 return cir::TEK_Complex; 73 74 // Arrays, records, and Objective-C objects. 75 case Type::ConstantArray: 76 case Type::IncompleteArray: 77 case Type::VariableArray: 78 case Type::Record: 79 case Type::ObjCObject: 80 case Type::ObjCInterface: 81 case Type::ArrayParameter: 82 return cir::TEK_Aggregate; 83 84 // We operate on atomic values according to their underlying type. 85 case Type::Atomic: 86 type = cast<AtomicType>(type)->getValueType(); 87 continue; 88 } 89 llvm_unreachable("unknown type kind!"); 90 } 91 } 92 93 mlir::Type CIRGenFunction::convertTypeForMem(QualType t) { 94 return cgm.getTypes().convertTypeForMem(t); 95 } 96 97 mlir::Type CIRGenFunction::convertType(QualType t) { 98 return cgm.getTypes().convertType(t); 99 } 100 101 mlir::Location CIRGenFunction::getLoc(SourceLocation srcLoc) { 102 // Some AST nodes might contain invalid source locations (e.g. 103 // CXXDefaultArgExpr), workaround that to still get something out. 104 if (srcLoc.isValid()) { 105 const SourceManager &sm = getContext().getSourceManager(); 106 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc); 107 StringRef filename = pLoc.getFilename(); 108 return mlir::FileLineColLoc::get(builder.getStringAttr(filename), 109 pLoc.getLine(), pLoc.getColumn()); 110 } 111 // Do our best... 112 assert(currSrcLoc && "expected to inherit some source location"); 113 return *currSrcLoc; 114 } 115 116 mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) { 117 // Some AST nodes might contain invalid source locations (e.g. 118 // CXXDefaultArgExpr), workaround that to still get something out. 119 if (srcLoc.isValid()) { 120 mlir::Location beg = getLoc(srcLoc.getBegin()); 121 mlir::Location end = getLoc(srcLoc.getEnd()); 122 SmallVector<mlir::Location, 2> locs = {beg, end}; 123 mlir::Attribute metadata; 124 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext()); 125 } 126 if (currSrcLoc) { 127 return *currSrcLoc; 128 } 129 // We're brave, but time to give up. 130 return builder.getUnknownLoc(); 131 } 132 133 mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) { 134 SmallVector<mlir::Location, 2> locs = {lhs, rhs}; 135 mlir::Attribute metadata; 136 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext()); 137 } 138 139 bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) { 140 // Null statement, not a label! 141 if (!s) 142 return false; 143 144 // If this is a label, we have to emit the code, consider something like: 145 // if (0) { ... foo: bar(); } goto foo; 146 // 147 // TODO: If anyone cared, we could track __label__'s, since we know that you 148 // can't jump to one from outside their declared region. 149 if (isa<LabelStmt>(s)) 150 return true; 151 152 // If this is a case/default statement, and we haven't seen a switch, we 153 // have to emit the code. 154 if (isa<SwitchCase>(s) && !ignoreCaseStmts) 155 return true; 156 157 // If this is a switch statement, we want to ignore case statements when we 158 // recursively process the sub-statements of the switch. If we haven't 159 // encountered a switch statement, we treat case statements like labels, but 160 // if we are processing a switch statement, case statements are expected. 161 if (isa<SwitchStmt>(s)) 162 ignoreCaseStmts = true; 163 164 // Scan subexpressions for verboten labels. 165 return std::any_of(s->child_begin(), s->child_end(), 166 [=](const Stmt *subStmt) { 167 return containsLabel(subStmt, ignoreCaseStmts); 168 }); 169 } 170 171 /// If the specified expression does not fold to a constant, or if it does but 172 /// contains a label, return false. If it constant folds return true and set 173 /// the boolean result in Result. 174 bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool, 175 bool allowLabels) { 176 llvm::APSInt resultInt; 177 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels)) 178 return false; 179 180 resultBool = resultInt.getBoolValue(); 181 return true; 182 } 183 184 /// If the specified expression does not fold to a constant, or if it does 185 /// fold but contains a label, return false. If it constant folds, return 186 /// true and set the folded value. 187 bool CIRGenFunction::constantFoldsToSimpleInteger(const Expr *cond, 188 llvm::APSInt &resultInt, 189 bool allowLabels) { 190 // FIXME: Rename and handle conversion of other evaluatable things 191 // to bool. 192 Expr::EvalResult result; 193 if (!cond->EvaluateAsInt(result, getContext())) 194 return false; // Not foldable, not integer or not fully evaluatable. 195 196 llvm::APSInt intValue = result.Val.getInt(); 197 if (!allowLabels && containsLabel(cond)) 198 return false; // Contains a label. 199 200 resultInt = intValue; 201 return true; 202 } 203 204 void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc, 205 CharUnits alignment) { 206 if (!type->isVoidType()) { 207 fnRetAlloca = emitAlloca("__retval", convertType(type), loc, alignment, 208 /*insertIntoFnEntryBlock=*/false); 209 } 210 } 211 212 void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty, 213 mlir::Location loc, CharUnits alignment, 214 bool isParam) { 215 const auto *namedVar = dyn_cast_or_null<NamedDecl>(var); 216 assert(namedVar && "Needs a named decl"); 217 assert(!cir::MissingFeatures::cgfSymbolTable()); 218 219 auto allocaOp = cast<cir::AllocaOp>(addrVal.getDefiningOp()); 220 if (isParam) 221 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext())); 222 if (ty->isReferenceType() || ty.isConstQualified()) 223 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext())); 224 } 225 226 void CIRGenFunction::LexicalScope::cleanup() { 227 CIRGenBuilderTy &builder = cgf.builder; 228 LexicalScope *localScope = cgf.curLexScope; 229 230 if (returnBlock != nullptr) { 231 // Write out the return block, which loads the value from `__retval` and 232 // issues the `cir.return`. 233 mlir::OpBuilder::InsertionGuard guard(builder); 234 builder.setInsertionPointToEnd(returnBlock); 235 (void)emitReturn(*returnLoc); 236 } 237 238 mlir::Block *curBlock = builder.getBlock(); 239 if (isGlobalInit() && !curBlock) 240 return; 241 if (curBlock->mightHaveTerminator() && curBlock->getTerminator()) 242 return; 243 244 // Get rid of any empty block at the end of the scope. 245 bool entryBlock = builder.getInsertionBlock()->isEntryBlock(); 246 if (!entryBlock && curBlock->empty()) { 247 curBlock->erase(); 248 if (returnBlock != nullptr && returnBlock->getUses().empty()) 249 returnBlock->erase(); 250 return; 251 } 252 253 // Reached the end of the scope. 254 { 255 mlir::OpBuilder::InsertionGuard guard(builder); 256 builder.setInsertionPointToEnd(curBlock); 257 258 if (localScope->depth == 0) { 259 // Reached the end of the function. 260 if (returnBlock != nullptr) { 261 if (returnBlock->getUses().empty()) 262 returnBlock->erase(); 263 else { 264 builder.create<cir::BrOp>(*returnLoc, returnBlock); 265 return; 266 } 267 } 268 emitImplicitReturn(); 269 return; 270 } 271 // Reached the end of a non-function scope. Some scopes, such as those 272 // used with the ?: operator, can return a value. 273 if (!localScope->isTernary() && !curBlock->mightHaveTerminator()) { 274 !retVal ? builder.create<cir::YieldOp>(localScope->endLoc) 275 : builder.create<cir::YieldOp>(localScope->endLoc, retVal); 276 } 277 } 278 } 279 280 cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) { 281 CIRGenBuilderTy &builder = cgf.getBuilder(); 282 283 if (!cgf.curFn.getFunctionType().hasVoidReturn()) { 284 // Load the value from `__retval` and return it via the `cir.return` op. 285 auto value = builder.create<cir::LoadOp>( 286 loc, cgf.curFn.getFunctionType().getReturnType(), *cgf.fnRetAlloca); 287 return builder.create<cir::ReturnOp>(loc, 288 llvm::ArrayRef(value.getResult())); 289 } 290 return builder.create<cir::ReturnOp>(loc); 291 } 292 293 // This is copied from CodeGenModule::MayDropFunctionReturn. This is a 294 // candidate for sharing between CIRGen and CodeGen. 295 static bool mayDropFunctionReturn(const ASTContext &astContext, 296 QualType returnType) { 297 // We can't just discard the return value for a record type with a complex 298 // destructor or a non-trivially copyable type. 299 if (const RecordType *recordType = 300 returnType.getCanonicalType()->getAs<RecordType>()) { 301 if (const auto *classDecl = dyn_cast<CXXRecordDecl>(recordType->getDecl())) 302 return classDecl->hasTrivialDestructor(); 303 } 304 return returnType.isTriviallyCopyableType(astContext); 305 } 306 307 void CIRGenFunction::LexicalScope::emitImplicitReturn() { 308 CIRGenBuilderTy &builder = cgf.getBuilder(); 309 LexicalScope *localScope = cgf.curLexScope; 310 311 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl()); 312 313 // In C++, flowing off the end of a non-void function is always undefined 314 // behavior. In C, flowing off the end of a non-void function is undefined 315 // behavior only if the non-existent return value is used by the caller. 316 // That influences whether the terminating op is trap, unreachable, or 317 // return. 318 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() && 319 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() && 320 builder.getInsertionBlock()) { 321 bool shouldEmitUnreachable = 322 cgf.cgm.getCodeGenOpts().StrictReturn || 323 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType()); 324 325 if (shouldEmitUnreachable) { 326 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0) 327 builder.create<cir::TrapOp>(localScope->endLoc); 328 else 329 builder.create<cir::UnreachableOp>(localScope->endLoc); 330 builder.clearInsertionPoint(); 331 return; 332 } 333 } 334 335 (void)emitReturn(localScope->endLoc); 336 } 337 338 void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType, 339 cir::FuncOp fn, cir::FuncType funcType, 340 FunctionArgList args, SourceLocation loc, 341 SourceLocation startLoc) { 342 assert(!curFn && 343 "CIRGenFunction can only be used for one function at a time"); 344 345 curFn = fn; 346 347 const Decl *d = gd.getDecl(); 348 const auto *fd = dyn_cast_or_null<FunctionDecl>(d); 349 curFuncDecl = d->getNonClosureContext(); 350 351 mlir::Block *entryBB = &fn.getBlocks().front(); 352 builder.setInsertionPointToStart(entryBB); 353 354 // TODO(cir): this should live in `emitFunctionProlog 355 // Declare all the function arguments in the symbol table. 356 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) { 357 const VarDecl *paramVar = std::get<0>(nameValue); 358 mlir::Value paramVal = std::get<1>(nameValue); 359 CharUnits alignment = getContext().getDeclAlign(paramVar); 360 mlir::Location paramLoc = getLoc(paramVar->getSourceRange()); 361 paramVal.setLoc(paramLoc); 362 363 mlir::Value addrVal = 364 emitAlloca(cast<NamedDecl>(paramVar)->getName(), 365 convertType(paramVar->getType()), paramLoc, alignment, 366 /*insertIntoFnEntryBlock=*/true); 367 368 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment, 369 /*isParam=*/true); 370 371 setAddrOfLocalVar(paramVar, Address(addrVal, alignment)); 372 373 bool isPromoted = isa<ParmVarDecl>(paramVar) && 374 cast<ParmVarDecl>(paramVar)->isKNRPromoted(); 375 assert(!cir::MissingFeatures::constructABIArgDirectExtend()); 376 if (isPromoted) 377 cgm.errorNYI(fd->getSourceRange(), "Function argument demotion"); 378 379 // Location of the store to the param storage tracked as beginning of 380 // the function body. 381 mlir::Location fnBodyBegin = getLoc(fd->getBody()->getBeginLoc()); 382 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal); 383 } 384 assert(builder.getInsertionBlock() && "Should be valid"); 385 386 // When the current function is not void, create an address to store the 387 // result value. 388 if (!returnType->isVoidType()) 389 emitAndUpdateRetAlloca(returnType, getLoc(fd->getBody()->getEndLoc()), 390 getContext().getTypeAlignInChars(returnType)); 391 392 if (isa_and_nonnull<CXXMethodDecl>(d) && 393 cast<CXXMethodDecl>(d)->isInstance()) { 394 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this); 395 396 const auto *md = cast<CXXMethodDecl>(d); 397 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) { 398 cgm.errorNYI(loc, "lambda call operator"); 399 } else { 400 // Not in a lambda; just use 'this' from the method. 401 // FIXME: Should we generate a new load for each use of 'this'? The fast 402 // register allocator would be happier... 403 cxxThisValue = cxxabiThisValue; 404 } 405 406 assert(!cir::MissingFeatures::sanitizers()); 407 assert(!cir::MissingFeatures::emitTypeCheck()); 408 } 409 } 410 411 void CIRGenFunction::finishFunction(SourceLocation endLoc) {} 412 413 mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) { 414 auto result = mlir::LogicalResult::success(); 415 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body)) 416 emitCompoundStmtWithoutScope(*block); 417 else 418 result = emitStmt(body, /*useCurrentScope=*/true); 419 420 return result; 421 } 422 423 static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) { 424 // Remove any leftover blocks that are unreachable and empty, since they do 425 // not represent unreachable code useful for warnings nor anything deemed 426 // useful in general. 427 SmallVector<mlir::Block *> blocksToDelete; 428 for (mlir::Block &block : func.getBlocks()) { 429 if (block.empty() && block.getUses().empty()) 430 blocksToDelete.push_back(&block); 431 } 432 for (mlir::Block *block : blocksToDelete) 433 block->erase(); 434 } 435 436 cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn, 437 cir::FuncType funcType) { 438 const auto funcDecl = cast<FunctionDecl>(gd.getDecl()); 439 curGD = gd; 440 441 SourceLocation loc = funcDecl->getLocation(); 442 Stmt *body = funcDecl->getBody(); 443 SourceRange bodyRange = 444 body ? body->getSourceRange() : funcDecl->getLocation(); 445 446 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc) 447 : builder.getUnknownLoc()}; 448 449 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) { 450 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc(); 451 }; 452 const mlir::Location fusedLoc = mlir::FusedLoc::get( 453 &getMLIRContext(), 454 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())}); 455 mlir::Block *entryBB = fn.addEntryBlock(); 456 457 FunctionArgList args; 458 QualType retTy = buildFunctionArgList(gd, args); 459 460 { 461 LexicalScope lexScope(*this, fusedLoc, entryBB); 462 463 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin()); 464 465 if (isa<CXXDestructorDecl>(funcDecl)) { 466 emitDestructorBody(args); 467 } else if (isa<CXXConstructorDecl>(funcDecl)) { 468 emitConstructorBody(args); 469 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice && 470 funcDecl->hasAttr<CUDAGlobalAttr>()) { 471 getCIRGenModule().errorNYI(bodyRange, "CUDA kernel"); 472 } else if (isa<CXXMethodDecl>(funcDecl) && 473 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) { 474 getCIRGenModule().errorNYI(bodyRange, "Lambda static invoker"); 475 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) && 476 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() || 477 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) { 478 // Implicit copy-assignment gets the same special treatment as implicit 479 // copy-constructors. 480 emitImplicitAssignmentOperatorBody(args); 481 } else if (body) { 482 if (mlir::failed(emitFunctionBody(body))) { 483 fn.erase(); 484 return nullptr; 485 } 486 } else { 487 // Anything without a body should have been handled above. 488 llvm_unreachable("no definition for normal function"); 489 } 490 491 if (mlir::failed(fn.verifyBody())) 492 return nullptr; 493 494 finishFunction(bodyRange.getEnd()); 495 } 496 497 eraseEmptyAndUnusedBlocks(fn); 498 return fn; 499 } 500 501 void CIRGenFunction::emitConstructorBody(FunctionArgList &args) { 502 assert(!cir::MissingFeatures::sanitizers()); 503 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl()); 504 CXXCtorType ctorType = curGD.getCtorType(); 505 506 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() || 507 ctorType == Ctor_Complete) && 508 "can only generate complete ctor for this ABI"); 509 510 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) && 511 cgm.getTarget().getCXXABI().hasConstructorVariants()) { 512 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc()); 513 return; 514 } 515 516 const FunctionDecl *definition = nullptr; 517 Stmt *body = ctor->getBody(definition); 518 assert(definition == ctor && "emitting wrong constructor body"); 519 520 if (isa_and_nonnull<CXXTryStmt>(body)) { 521 cgm.errorNYI(ctor->getSourceRange(), "emitConstructorBody: try body"); 522 return; 523 } 524 525 assert(!cir::MissingFeatures::incrementProfileCounter()); 526 assert(!cir::MissingFeatures::runCleanupsScope()); 527 528 // TODO: in restricted cases, we can emit the vbase initializers of a 529 // complete ctor and then delegate to the base ctor. 530 531 // Emit the constructor prologue, i.e. the base and member initializers. 532 emitCtorPrologue(ctor, ctorType, args); 533 534 // TODO(cir): propagate this result via mlir::logical result. Just unreachable 535 // now just to have it handled. 536 if (mlir::failed(emitStmt(body, true))) { 537 cgm.errorNYI(ctor->getSourceRange(), 538 "emitConstructorBody: emit body statement failed."); 539 return; 540 } 541 } 542 543 /// Emits the body of the current destructor. 544 void CIRGenFunction::emitDestructorBody(FunctionArgList &args) { 545 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl()); 546 CXXDtorType dtorType = curGD.getDtorType(); 547 548 // For an abstract class, non-base destructors are never used (and can't 549 // be emitted in general, because vbase dtors may not have been validated 550 // by Sema), but the Itanium ABI doesn't make them optional and Clang may 551 // in fact emit references to them from other compilations, so emit them 552 // as functions containing a trap instruction. 553 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) { 554 cgm.errorNYI(dtor->getSourceRange(), "abstract base class destructors"); 555 return; 556 } 557 558 Stmt *body = dtor->getBody(); 559 assert(body && !cir::MissingFeatures::incrementProfileCounter()); 560 561 // The call to operator delete in a deleting destructor happens 562 // outside of the function-try-block, which means it's always 563 // possible to delegate the destructor body to the complete 564 // destructor. Do so. 565 if (dtorType == Dtor_Deleting) { 566 cgm.errorNYI(dtor->getSourceRange(), "deleting destructor"); 567 return; 568 } 569 570 // If the body is a function-try-block, enter the try before 571 // anything else. 572 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body); 573 if (isTryBody) 574 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor"); 575 576 assert(!cir::MissingFeatures::sanitizers()); 577 assert(!cir::MissingFeatures::dtorCleanups()); 578 579 // If this is the complete variant, just invoke the base variant; 580 // the epilogue will destruct the virtual bases. But we can't do 581 // this optimization if the body is a function-try-block, because 582 // we'd introduce *two* handler blocks. In the Microsoft ABI, we 583 // always delegate because we might not have a definition in this TU. 584 switch (dtorType) { 585 case Dtor_Comdat: 586 llvm_unreachable("not expecting a COMDAT"); 587 case Dtor_Deleting: 588 llvm_unreachable("already handled deleting case"); 589 590 case Dtor_Complete: 591 assert((body || getTarget().getCXXABI().isMicrosoft()) && 592 "can't emit a dtor without a body for non-Microsoft ABIs"); 593 594 assert(!cir::MissingFeatures::dtorCleanups()); 595 596 // TODO(cir): A complete destructor is supposed to call the base destructor. 597 // Since we have to emit both dtor kinds we just fall through for now and. 598 // As long as we don't support virtual bases this should be functionally 599 // equivalent. 600 assert(!cir::MissingFeatures::completeDtors()); 601 602 // Fallthrough: act like we're in the base variant. 603 [[fallthrough]]; 604 605 case Dtor_Base: 606 assert(body); 607 608 assert(!cir::MissingFeatures::dtorCleanups()); 609 assert(!cir::MissingFeatures::vtableInitialization()); 610 611 if (isTryBody) { 612 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor"); 613 } else if (body) { 614 (void)emitStmt(body, /*useCurrentScope=*/true); 615 } else { 616 assert(dtor->isImplicit() && "bodyless dtor not implicit"); 617 // nothing to do besides what's in the epilogue 618 } 619 // -fapple-kext must inline any call to this dtor into 620 // the caller's body. 621 assert(!cir::MissingFeatures::appleKext()); 622 623 break; 624 } 625 626 assert(!cir::MissingFeatures::dtorCleanups()); 627 628 // Exit the try if applicable. 629 if (isTryBody) 630 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor"); 631 } 632 633 /// Given a value of type T* that may not be to a complete object, construct 634 /// an l-vlaue withi the natural pointee alignment of T. 635 LValue CIRGenFunction::makeNaturalAlignPointeeAddrLValue(mlir::Value val, 636 QualType ty) { 637 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps 638 // assert on the result type first. 639 LValueBaseInfo baseInfo; 640 assert(!cir::MissingFeatures::opTBAA()); 641 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo); 642 return makeAddrLValue(Address(val, align), ty, baseInfo); 643 } 644 645 LValue CIRGenFunction::makeNaturalAlignAddrLValue(mlir::Value val, 646 QualType ty) { 647 LValueBaseInfo baseInfo; 648 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo); 649 Address addr(val, convertTypeForMem(ty), alignment); 650 assert(!cir::MissingFeatures::opTBAA()); 651 return makeAddrLValue(addr, ty, baseInfo); 652 } 653 654 clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl gd, 655 FunctionArgList &args) { 656 const auto *fd = cast<FunctionDecl>(gd.getDecl()); 657 QualType retTy = fd->getReturnType(); 658 659 const auto *md = dyn_cast<CXXMethodDecl>(fd); 660 if (md && md->isInstance()) { 661 if (cgm.getCXXABI().hasThisReturn(gd)) 662 cgm.errorNYI(fd->getSourceRange(), "this return"); 663 else if (cgm.getCXXABI().hasMostDerivedReturn(gd)) 664 cgm.errorNYI(fd->getSourceRange(), "most derived return"); 665 cgm.getCXXABI().buildThisParam(*this, args); 666 } 667 668 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd)) 669 if (cd->getInheritedConstructor()) 670 cgm.errorNYI(fd->getSourceRange(), 671 "buildFunctionArgList: inherited constructor"); 672 673 for (auto *param : fd->parameters()) 674 args.push_back(param); 675 676 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md))) 677 assert(!cir::MissingFeatures::cxxabiStructorImplicitParam()); 678 679 return retTy; 680 } 681 682 /// Emit code to compute a designator that specifies the location 683 /// of the expression. 684 /// FIXME: document this function better. 685 LValue CIRGenFunction::emitLValue(const Expr *e) { 686 // FIXME: ApplyDebugLocation DL(*this, e); 687 switch (e->getStmtClass()) { 688 default: 689 getCIRGenModule().errorNYI(e->getSourceRange(), 690 std::string("l-value not implemented for '") + 691 e->getStmtClassName() + "'"); 692 return LValue(); 693 case Expr::ArraySubscriptExprClass: 694 return emitArraySubscriptExpr(cast<ArraySubscriptExpr>(e)); 695 case Expr::UnaryOperatorClass: 696 return emitUnaryOpLValue(cast<UnaryOperator>(e)); 697 case Expr::StringLiteralClass: 698 return emitStringLiteralLValue(cast<StringLiteral>(e)); 699 case Expr::MemberExprClass: 700 return emitMemberExpr(cast<MemberExpr>(e)); 701 case Expr::BinaryOperatorClass: 702 return emitBinaryOperatorLValue(cast<BinaryOperator>(e)); 703 case Expr::CompoundAssignOperatorClass: { 704 QualType ty = e->getType(); 705 if (ty->getAs<AtomicType>()) { 706 cgm.errorNYI(e->getSourceRange(), 707 "CompoundAssignOperator with AtomicType"); 708 return LValue(); 709 } 710 if (!ty->isAnyComplexType()) 711 return emitCompoundAssignmentLValue(cast<CompoundAssignOperator>(e)); 712 cgm.errorNYI(e->getSourceRange(), 713 "CompoundAssignOperator with ComplexType"); 714 return LValue(); 715 } 716 case Expr::CallExprClass: 717 case Expr::CXXMemberCallExprClass: 718 case Expr::CXXOperatorCallExprClass: 719 case Expr::UserDefinedLiteralClass: 720 return emitCallExprLValue(cast<CallExpr>(e)); 721 case Expr::ParenExprClass: 722 return emitLValue(cast<ParenExpr>(e)->getSubExpr()); 723 case Expr::DeclRefExprClass: 724 return emitDeclRefLValue(cast<DeclRefExpr>(e)); 725 case Expr::CStyleCastExprClass: 726 case Expr::CXXStaticCastExprClass: 727 case Expr::CXXDynamicCastExprClass: 728 case Expr::ImplicitCastExprClass: 729 return emitCastLValue(cast<CastExpr>(e)); 730 } 731 } 732 733 static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) { 734 SmallString<256> buffer; 735 llvm::raw_svector_ostream out(buffer); 736 out << name << cnt; 737 return std::string(out.str()); 738 } 739 740 std::string CIRGenFunction::getCounterAggTmpAsString() { 741 return getVersionedTmpName("agg.tmp", counterAggTmp++); 742 } 743 744 void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr, 745 QualType ty) { 746 // Ignore empty classes in C++. 747 if (getLangOpts().CPlusPlus) { 748 if (const RecordType *rt = ty->getAs<RecordType>()) { 749 if (cast<CXXRecordDecl>(rt->getDecl())->isEmpty()) 750 return; 751 } 752 } 753 754 // Cast the dest ptr to the appropriate i8 pointer type. 755 if (builder.isInt8Ty(destPtr.getElementType())) { 756 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type"); 757 } 758 759 // Get size and alignment info for this aggregate. 760 const CharUnits size = getContext().getTypeSizeInChars(ty); 761 if (size.isZero()) { 762 // But note that getTypeInfo returns 0 for a VLA. 763 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) { 764 cgm.errorNYI(loc, 765 "emitNullInitialization for zero size VariableArrayType"); 766 } else { 767 return; 768 } 769 } 770 771 // If the type contains a pointer to data member we can't memset it to zero. 772 // Instead, create a null constant and copy it to the destination. 773 // TODO: there are other patterns besides zero that we can usefully memset, 774 // like -1, which happens to be the pattern used by member-pointers. 775 if (!cgm.getTypes().isZeroInitializable(ty)) { 776 cgm.errorNYI(loc, "type is not zero initializable"); 777 } 778 779 // In LLVM Codegen: otherwise, just memset the whole thing to zero using 780 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the 781 // respective address. 782 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false); 783 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc); 784 builder.createStore(loc, zeroValue, destPtr); 785 } 786 787 // TODO(cir): should be shared with LLVM codegen. 788 bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *ce) { 789 const Expr *e = ce->getSubExpr(); 790 791 if (ce->getCastKind() == CK_UncheckedDerivedToBase) 792 return false; 793 794 if (isa<CXXThisExpr>(e->IgnoreParens())) { 795 // We always assume that 'this' is never null. 796 return false; 797 } 798 799 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) { 800 // And that glvalue casts are never null. 801 if (ice->isGLValue()) 802 return false; 803 } 804 805 return true; 806 } 807 808 } // namespace clang::CIRGen 809