1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Stmt nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGDebugInfo.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/Attr.h" 19 #include "clang/AST/Expr.h" 20 #include "clang/AST/Stmt.h" 21 #include "clang/AST/StmtVisitor.h" 22 #include "clang/Basic/Builtins.h" 23 #include "clang/Basic/DiagnosticSema.h" 24 #include "clang/Basic/PrettyStackTrace.h" 25 #include "clang/Basic/SourceManager.h" 26 #include "clang/Basic/TargetInfo.h" 27 #include "llvm/ADT/SmallSet.h" 28 #include "llvm/ADT/StringExtras.h" 29 #include "llvm/IR/Assumptions.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/InlineAsm.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/IR/MDBuilder.h" 34 #include "llvm/Support/SaveAndRestore.h" 35 36 using namespace clang; 37 using namespace CodeGen; 38 39 //===----------------------------------------------------------------------===// 40 // Statement Emission 41 //===----------------------------------------------------------------------===// 42 43 void CodeGenFunction::EmitStopPoint(const Stmt *S) { 44 if (CGDebugInfo *DI = getDebugInfo()) { 45 SourceLocation Loc; 46 Loc = S->getBeginLoc(); 47 DI->EmitLocation(Builder, Loc); 48 49 LastStopPoint = Loc; 50 } 51 } 52 53 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) { 54 assert(S && "Null statement?"); 55 PGO.setCurrentStmt(S); 56 57 // These statements have their own debug info handling. 58 if (EmitSimpleStmt(S, Attrs)) 59 return; 60 61 // Check if we are generating unreachable code. 62 if (!HaveInsertPoint()) { 63 // If so, and the statement doesn't contain a label, then we do not need to 64 // generate actual code. This is safe because (1) the current point is 65 // unreachable, so we don't need to execute the code, and (2) we've already 66 // handled the statements which update internal data structures (like the 67 // local variable map) which could be used by subsequent statements. 68 if (!ContainsLabel(S)) { 69 // Verify that any decl statements were handled as simple, they may be in 70 // scope of subsequent reachable statements. 71 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!"); 72 return; 73 } 74 75 // Otherwise, make a new block to hold the code. 76 EnsureInsertPoint(); 77 } 78 79 // Generate a stoppoint if we are emitting debug info. 80 EmitStopPoint(S); 81 82 // Ignore all OpenMP directives except for simd if OpenMP with Simd is 83 // enabled. 84 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) { 85 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) { 86 EmitSimpleOMPExecutableDirective(*D); 87 return; 88 } 89 } 90 91 switch (S->getStmtClass()) { 92 case Stmt::NoStmtClass: 93 case Stmt::CXXCatchStmtClass: 94 case Stmt::SEHExceptStmtClass: 95 case Stmt::SEHFinallyStmtClass: 96 case Stmt::MSDependentExistsStmtClass: 97 llvm_unreachable("invalid statement class to emit generically"); 98 case Stmt::NullStmtClass: 99 case Stmt::CompoundStmtClass: 100 case Stmt::DeclStmtClass: 101 case Stmt::LabelStmtClass: 102 case Stmt::AttributedStmtClass: 103 case Stmt::GotoStmtClass: 104 case Stmt::BreakStmtClass: 105 case Stmt::ContinueStmtClass: 106 case Stmt::DefaultStmtClass: 107 case Stmt::CaseStmtClass: 108 case Stmt::SEHLeaveStmtClass: 109 llvm_unreachable("should have emitted these statements as simple"); 110 111 #define STMT(Type, Base) 112 #define ABSTRACT_STMT(Op) 113 #define EXPR(Type, Base) \ 114 case Stmt::Type##Class: 115 #include "clang/AST/StmtNodes.inc" 116 { 117 // Remember the block we came in on. 118 llvm::BasicBlock *incoming = Builder.GetInsertBlock(); 119 assert(incoming && "expression emission must have an insertion point"); 120 121 EmitIgnoredExpr(cast<Expr>(S)); 122 123 llvm::BasicBlock *outgoing = Builder.GetInsertBlock(); 124 assert(outgoing && "expression emission cleared block!"); 125 126 // The expression emitters assume (reasonably!) that the insertion 127 // point is always set. To maintain that, the call-emission code 128 // for noreturn functions has to enter a new block with no 129 // predecessors. We want to kill that block and mark the current 130 // insertion point unreachable in the common case of a call like 131 // "exit();". Since expression emission doesn't otherwise create 132 // blocks with no predecessors, we can just test for that. 133 // However, we must be careful not to do this to our incoming 134 // block, because *statement* emission does sometimes create 135 // reachable blocks which will have no predecessors until later in 136 // the function. This occurs with, e.g., labels that are not 137 // reachable by fallthrough. 138 if (incoming != outgoing && outgoing->use_empty()) { 139 outgoing->eraseFromParent(); 140 Builder.ClearInsertionPoint(); 141 } 142 break; 143 } 144 145 case Stmt::IndirectGotoStmtClass: 146 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break; 147 148 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break; 149 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break; 150 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break; 151 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break; 152 153 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break; 154 155 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break; 156 case Stmt::GCCAsmStmtClass: // Intentional fall-through. 157 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break; 158 case Stmt::CoroutineBodyStmtClass: 159 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S)); 160 break; 161 case Stmt::CoreturnStmtClass: 162 EmitCoreturnStmt(cast<CoreturnStmt>(*S)); 163 break; 164 case Stmt::CapturedStmtClass: { 165 const CapturedStmt *CS = cast<CapturedStmt>(S); 166 EmitCapturedStmt(*CS, CS->getCapturedRegionKind()); 167 } 168 break; 169 case Stmt::ObjCAtTryStmtClass: 170 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S)); 171 break; 172 case Stmt::ObjCAtCatchStmtClass: 173 llvm_unreachable( 174 "@catch statements should be handled by EmitObjCAtTryStmt"); 175 case Stmt::ObjCAtFinallyStmtClass: 176 llvm_unreachable( 177 "@finally statements should be handled by EmitObjCAtTryStmt"); 178 case Stmt::ObjCAtThrowStmtClass: 179 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S)); 180 break; 181 case Stmt::ObjCAtSynchronizedStmtClass: 182 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S)); 183 break; 184 case Stmt::ObjCForCollectionStmtClass: 185 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S)); 186 break; 187 case Stmt::ObjCAutoreleasePoolStmtClass: 188 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S)); 189 break; 190 191 case Stmt::CXXTryStmtClass: 192 EmitCXXTryStmt(cast<CXXTryStmt>(*S)); 193 break; 194 case Stmt::CXXForRangeStmtClass: 195 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs); 196 break; 197 case Stmt::SEHTryStmtClass: 198 EmitSEHTryStmt(cast<SEHTryStmt>(*S)); 199 break; 200 case Stmt::OMPMetaDirectiveClass: 201 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S)); 202 break; 203 case Stmt::OMPCanonicalLoopClass: 204 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S)); 205 break; 206 case Stmt::OMPParallelDirectiveClass: 207 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S)); 208 break; 209 case Stmt::OMPSimdDirectiveClass: 210 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S)); 211 break; 212 case Stmt::OMPTileDirectiveClass: 213 EmitOMPTileDirective(cast<OMPTileDirective>(*S)); 214 break; 215 case Stmt::OMPUnrollDirectiveClass: 216 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S)); 217 break; 218 case Stmt::OMPForDirectiveClass: 219 EmitOMPForDirective(cast<OMPForDirective>(*S)); 220 break; 221 case Stmt::OMPForSimdDirectiveClass: 222 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S)); 223 break; 224 case Stmt::OMPSectionsDirectiveClass: 225 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S)); 226 break; 227 case Stmt::OMPSectionDirectiveClass: 228 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S)); 229 break; 230 case Stmt::OMPSingleDirectiveClass: 231 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S)); 232 break; 233 case Stmt::OMPMasterDirectiveClass: 234 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S)); 235 break; 236 case Stmt::OMPCriticalDirectiveClass: 237 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S)); 238 break; 239 case Stmt::OMPParallelForDirectiveClass: 240 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S)); 241 break; 242 case Stmt::OMPParallelForSimdDirectiveClass: 243 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S)); 244 break; 245 case Stmt::OMPParallelMasterDirectiveClass: 246 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S)); 247 break; 248 case Stmt::OMPParallelSectionsDirectiveClass: 249 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S)); 250 break; 251 case Stmt::OMPTaskDirectiveClass: 252 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S)); 253 break; 254 case Stmt::OMPTaskyieldDirectiveClass: 255 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S)); 256 break; 257 case Stmt::OMPBarrierDirectiveClass: 258 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S)); 259 break; 260 case Stmt::OMPTaskwaitDirectiveClass: 261 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S)); 262 break; 263 case Stmt::OMPTaskgroupDirectiveClass: 264 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S)); 265 break; 266 case Stmt::OMPFlushDirectiveClass: 267 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S)); 268 break; 269 case Stmt::OMPDepobjDirectiveClass: 270 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S)); 271 break; 272 case Stmt::OMPScanDirectiveClass: 273 EmitOMPScanDirective(cast<OMPScanDirective>(*S)); 274 break; 275 case Stmt::OMPOrderedDirectiveClass: 276 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S)); 277 break; 278 case Stmt::OMPAtomicDirectiveClass: 279 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S)); 280 break; 281 case Stmt::OMPTargetDirectiveClass: 282 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S)); 283 break; 284 case Stmt::OMPTeamsDirectiveClass: 285 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S)); 286 break; 287 case Stmt::OMPCancellationPointDirectiveClass: 288 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S)); 289 break; 290 case Stmt::OMPCancelDirectiveClass: 291 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S)); 292 break; 293 case Stmt::OMPTargetDataDirectiveClass: 294 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S)); 295 break; 296 case Stmt::OMPTargetEnterDataDirectiveClass: 297 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S)); 298 break; 299 case Stmt::OMPTargetExitDataDirectiveClass: 300 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S)); 301 break; 302 case Stmt::OMPTargetParallelDirectiveClass: 303 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S)); 304 break; 305 case Stmt::OMPTargetParallelForDirectiveClass: 306 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S)); 307 break; 308 case Stmt::OMPTaskLoopDirectiveClass: 309 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S)); 310 break; 311 case Stmt::OMPTaskLoopSimdDirectiveClass: 312 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S)); 313 break; 314 case Stmt::OMPMasterTaskLoopDirectiveClass: 315 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S)); 316 break; 317 case Stmt::OMPMaskedTaskLoopDirectiveClass: 318 llvm_unreachable("masked taskloop directive not supported yet."); 319 break; 320 case Stmt::OMPMasterTaskLoopSimdDirectiveClass: 321 EmitOMPMasterTaskLoopSimdDirective( 322 cast<OMPMasterTaskLoopSimdDirective>(*S)); 323 break; 324 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass: 325 llvm_unreachable("masked taskloop simd directive not supported yet."); 326 break; 327 case Stmt::OMPParallelMasterTaskLoopDirectiveClass: 328 EmitOMPParallelMasterTaskLoopDirective( 329 cast<OMPParallelMasterTaskLoopDirective>(*S)); 330 break; 331 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass: 332 llvm_unreachable("parallel masked taskloop directive not supported yet."); 333 break; 334 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: 335 EmitOMPParallelMasterTaskLoopSimdDirective( 336 cast<OMPParallelMasterTaskLoopSimdDirective>(*S)); 337 break; 338 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass: 339 llvm_unreachable( 340 "parallel masked taskloop simd directive not supported yet."); 341 break; 342 case Stmt::OMPDistributeDirectiveClass: 343 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S)); 344 break; 345 case Stmt::OMPTargetUpdateDirectiveClass: 346 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S)); 347 break; 348 case Stmt::OMPDistributeParallelForDirectiveClass: 349 EmitOMPDistributeParallelForDirective( 350 cast<OMPDistributeParallelForDirective>(*S)); 351 break; 352 case Stmt::OMPDistributeParallelForSimdDirectiveClass: 353 EmitOMPDistributeParallelForSimdDirective( 354 cast<OMPDistributeParallelForSimdDirective>(*S)); 355 break; 356 case Stmt::OMPDistributeSimdDirectiveClass: 357 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S)); 358 break; 359 case Stmt::OMPTargetParallelForSimdDirectiveClass: 360 EmitOMPTargetParallelForSimdDirective( 361 cast<OMPTargetParallelForSimdDirective>(*S)); 362 break; 363 case Stmt::OMPTargetSimdDirectiveClass: 364 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S)); 365 break; 366 case Stmt::OMPTeamsDistributeDirectiveClass: 367 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S)); 368 break; 369 case Stmt::OMPTeamsDistributeSimdDirectiveClass: 370 EmitOMPTeamsDistributeSimdDirective( 371 cast<OMPTeamsDistributeSimdDirective>(*S)); 372 break; 373 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: 374 EmitOMPTeamsDistributeParallelForSimdDirective( 375 cast<OMPTeamsDistributeParallelForSimdDirective>(*S)); 376 break; 377 case Stmt::OMPTeamsDistributeParallelForDirectiveClass: 378 EmitOMPTeamsDistributeParallelForDirective( 379 cast<OMPTeamsDistributeParallelForDirective>(*S)); 380 break; 381 case Stmt::OMPTargetTeamsDirectiveClass: 382 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S)); 383 break; 384 case Stmt::OMPTargetTeamsDistributeDirectiveClass: 385 EmitOMPTargetTeamsDistributeDirective( 386 cast<OMPTargetTeamsDistributeDirective>(*S)); 387 break; 388 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: 389 EmitOMPTargetTeamsDistributeParallelForDirective( 390 cast<OMPTargetTeamsDistributeParallelForDirective>(*S)); 391 break; 392 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass: 393 EmitOMPTargetTeamsDistributeParallelForSimdDirective( 394 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S)); 395 break; 396 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: 397 EmitOMPTargetTeamsDistributeSimdDirective( 398 cast<OMPTargetTeamsDistributeSimdDirective>(*S)); 399 break; 400 case Stmt::OMPInteropDirectiveClass: 401 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S)); 402 break; 403 case Stmt::OMPDispatchDirectiveClass: 404 llvm_unreachable("Dispatch directive not supported yet."); 405 break; 406 case Stmt::OMPMaskedDirectiveClass: 407 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S)); 408 break; 409 case Stmt::OMPGenericLoopDirectiveClass: 410 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S)); 411 break; 412 case Stmt::OMPTeamsGenericLoopDirectiveClass: 413 llvm_unreachable("teams loop directive not supported yet."); 414 break; 415 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: 416 llvm_unreachable("target teams loop directive not supported yet."); 417 break; 418 case Stmt::OMPParallelGenericLoopDirectiveClass: 419 llvm_unreachable("parallel loop directive not supported yet."); 420 break; 421 case Stmt::OMPTargetParallelGenericLoopDirectiveClass: 422 llvm_unreachable("target parallel loop directive not supported yet."); 423 break; 424 case Stmt::OMPParallelMaskedDirectiveClass: 425 llvm_unreachable("parallel masked directive not supported yet."); 426 break; 427 } 428 } 429 430 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S, 431 ArrayRef<const Attr *> Attrs) { 432 switch (S->getStmtClass()) { 433 default: 434 return false; 435 case Stmt::NullStmtClass: 436 break; 437 case Stmt::CompoundStmtClass: 438 EmitCompoundStmt(cast<CompoundStmt>(*S)); 439 break; 440 case Stmt::DeclStmtClass: 441 EmitDeclStmt(cast<DeclStmt>(*S)); 442 break; 443 case Stmt::LabelStmtClass: 444 EmitLabelStmt(cast<LabelStmt>(*S)); 445 break; 446 case Stmt::AttributedStmtClass: 447 EmitAttributedStmt(cast<AttributedStmt>(*S)); 448 break; 449 case Stmt::GotoStmtClass: 450 EmitGotoStmt(cast<GotoStmt>(*S)); 451 break; 452 case Stmt::BreakStmtClass: 453 EmitBreakStmt(cast<BreakStmt>(*S)); 454 break; 455 case Stmt::ContinueStmtClass: 456 EmitContinueStmt(cast<ContinueStmt>(*S)); 457 break; 458 case Stmt::DefaultStmtClass: 459 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs); 460 break; 461 case Stmt::CaseStmtClass: 462 EmitCaseStmt(cast<CaseStmt>(*S), Attrs); 463 break; 464 case Stmt::SEHLeaveStmtClass: 465 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); 466 break; 467 } 468 return true; 469 } 470 471 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true, 472 /// this captures the expression result of the last sub-statement and returns it 473 /// (for use by the statement expression extension). 474 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, 475 AggValueSlot AggSlot) { 476 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(), 477 "LLVM IR generation of compound statement ('{}')"); 478 479 // Keep track of the current cleanup stack depth, including debug scopes. 480 LexicalScope Scope(*this, S.getSourceRange()); 481 482 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot); 483 } 484 485 Address 486 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S, 487 bool GetLast, 488 AggValueSlot AggSlot) { 489 490 const Stmt *ExprResult = S.getStmtExprResult(); 491 assert((!GetLast || (GetLast && ExprResult)) && 492 "If GetLast is true then the CompoundStmt must have a StmtExprResult"); 493 494 Address RetAlloca = Address::invalid(); 495 496 for (auto *CurStmt : S.body()) { 497 if (GetLast && ExprResult == CurStmt) { 498 // We have to special case labels here. They are statements, but when put 499 // at the end of a statement expression, they yield the value of their 500 // subexpression. Handle this by walking through all labels we encounter, 501 // emitting them before we evaluate the subexpr. 502 // Similar issues arise for attributed statements. 503 while (!isa<Expr>(ExprResult)) { 504 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) { 505 EmitLabel(LS->getDecl()); 506 ExprResult = LS->getSubStmt(); 507 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) { 508 // FIXME: Update this if we ever have attributes that affect the 509 // semantics of an expression. 510 ExprResult = AS->getSubStmt(); 511 } else { 512 llvm_unreachable("unknown value statement"); 513 } 514 } 515 516 EnsureInsertPoint(); 517 518 const Expr *E = cast<Expr>(ExprResult); 519 QualType ExprTy = E->getType(); 520 if (hasAggregateEvaluationKind(ExprTy)) { 521 EmitAggExpr(E, AggSlot); 522 } else { 523 // We can't return an RValue here because there might be cleanups at 524 // the end of the StmtExpr. Because of that, we have to emit the result 525 // here into a temporary alloca. 526 RetAlloca = CreateMemTemp(ExprTy); 527 EmitAnyExprToMem(E, RetAlloca, Qualifiers(), 528 /*IsInit*/ false); 529 } 530 } else { 531 EmitStmt(CurStmt); 532 } 533 } 534 535 return RetAlloca; 536 } 537 538 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) { 539 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator()); 540 541 // If there is a cleanup stack, then we it isn't worth trying to 542 // simplify this block (we would need to remove it from the scope map 543 // and cleanup entry). 544 if (!EHStack.empty()) 545 return; 546 547 // Can only simplify direct branches. 548 if (!BI || !BI->isUnconditional()) 549 return; 550 551 // Can only simplify empty blocks. 552 if (BI->getIterator() != BB->begin()) 553 return; 554 555 BB->replaceAllUsesWith(BI->getSuccessor(0)); 556 BI->eraseFromParent(); 557 BB->eraseFromParent(); 558 } 559 560 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) { 561 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 562 563 // Fall out of the current block (if necessary). 564 EmitBranch(BB); 565 566 if (IsFinished && BB->use_empty()) { 567 delete BB; 568 return; 569 } 570 571 // Place the block after the current block, if possible, or else at 572 // the end of the function. 573 if (CurBB && CurBB->getParent()) 574 CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB); 575 else 576 CurFn->getBasicBlockList().push_back(BB); 577 Builder.SetInsertPoint(BB); 578 } 579 580 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) { 581 // Emit a branch from the current block to the target one if this 582 // was a real block. If this was just a fall-through block after a 583 // terminator, don't emit it. 584 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 585 586 if (!CurBB || CurBB->getTerminator()) { 587 // If there is no insert point or the previous block is already 588 // terminated, don't touch it. 589 } else { 590 // Otherwise, create a fall-through branch. 591 Builder.CreateBr(Target); 592 } 593 594 Builder.ClearInsertionPoint(); 595 } 596 597 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) { 598 bool inserted = false; 599 for (llvm::User *u : block->users()) { 600 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) { 601 CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(), 602 block); 603 inserted = true; 604 break; 605 } 606 } 607 608 if (!inserted) 609 CurFn->getBasicBlockList().push_back(block); 610 611 Builder.SetInsertPoint(block); 612 } 613 614 CodeGenFunction::JumpDest 615 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) { 616 JumpDest &Dest = LabelMap[D]; 617 if (Dest.isValid()) return Dest; 618 619 // Create, but don't insert, the new block. 620 Dest = JumpDest(createBasicBlock(D->getName()), 621 EHScopeStack::stable_iterator::invalid(), 622 NextCleanupDestIndex++); 623 return Dest; 624 } 625 626 void CodeGenFunction::EmitLabel(const LabelDecl *D) { 627 // Add this label to the current lexical scope if we're within any 628 // normal cleanups. Jumps "in" to this label --- when permitted by 629 // the language --- may need to be routed around such cleanups. 630 if (EHStack.hasNormalCleanups() && CurLexicalScope) 631 CurLexicalScope->addLabel(D); 632 633 JumpDest &Dest = LabelMap[D]; 634 635 // If we didn't need a forward reference to this label, just go 636 // ahead and create a destination at the current scope. 637 if (!Dest.isValid()) { 638 Dest = getJumpDestInCurrentScope(D->getName()); 639 640 // Otherwise, we need to give this label a target depth and remove 641 // it from the branch-fixups list. 642 } else { 643 assert(!Dest.getScopeDepth().isValid() && "already emitted label!"); 644 Dest.setScopeDepth(EHStack.stable_begin()); 645 ResolveBranchFixups(Dest.getBlock()); 646 } 647 648 EmitBlock(Dest.getBlock()); 649 650 // Emit debug info for labels. 651 if (CGDebugInfo *DI = getDebugInfo()) { 652 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) { 653 DI->setLocation(D->getLocation()); 654 DI->EmitLabel(D, Builder); 655 } 656 } 657 658 incrementProfileCounter(D->getStmt()); 659 } 660 661 /// Change the cleanup scope of the labels in this lexical scope to 662 /// match the scope of the enclosing context. 663 void CodeGenFunction::LexicalScope::rescopeLabels() { 664 assert(!Labels.empty()); 665 EHScopeStack::stable_iterator innermostScope 666 = CGF.EHStack.getInnermostNormalCleanup(); 667 668 // Change the scope depth of all the labels. 669 for (SmallVectorImpl<const LabelDecl*>::const_iterator 670 i = Labels.begin(), e = Labels.end(); i != e; ++i) { 671 assert(CGF.LabelMap.count(*i)); 672 JumpDest &dest = CGF.LabelMap.find(*i)->second; 673 assert(dest.getScopeDepth().isValid()); 674 assert(innermostScope.encloses(dest.getScopeDepth())); 675 dest.setScopeDepth(innermostScope); 676 } 677 678 // Reparent the labels if the new scope also has cleanups. 679 if (innermostScope != EHScopeStack::stable_end() && ParentScope) { 680 ParentScope->Labels.append(Labels.begin(), Labels.end()); 681 } 682 } 683 684 685 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) { 686 EmitLabel(S.getDecl()); 687 688 // IsEHa - emit eha.scope.begin if it's a side entry of a scope 689 if (getLangOpts().EHAsynch && S.isSideEntry()) 690 EmitSehCppScopeBegin(); 691 692 EmitStmt(S.getSubStmt()); 693 } 694 695 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) { 696 bool nomerge = false; 697 bool noinline = false; 698 bool alwaysinline = false; 699 const CallExpr *musttail = nullptr; 700 701 for (const auto *A : S.getAttrs()) { 702 switch (A->getKind()) { 703 default: 704 break; 705 case attr::NoMerge: 706 nomerge = true; 707 break; 708 case attr::NoInline: 709 noinline = true; 710 break; 711 case attr::AlwaysInline: 712 alwaysinline = true; 713 break; 714 case attr::MustTail: 715 const Stmt *Sub = S.getSubStmt(); 716 const ReturnStmt *R = cast<ReturnStmt>(Sub); 717 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens()); 718 break; 719 } 720 } 721 SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge); 722 SaveAndRestore<bool> save_noinline(InNoInlineAttributedStmt, noinline); 723 SaveAndRestore<bool> save_alwaysinline(InAlwaysInlineAttributedStmt, 724 alwaysinline); 725 SaveAndRestore<const CallExpr *> save_musttail(MustTailCall, musttail); 726 EmitStmt(S.getSubStmt(), S.getAttrs()); 727 } 728 729 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) { 730 // If this code is reachable then emit a stop point (if generating 731 // debug info). We have to do this ourselves because we are on the 732 // "simple" statement path. 733 if (HaveInsertPoint()) 734 EmitStopPoint(&S); 735 736 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); 737 } 738 739 740 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { 741 if (const LabelDecl *Target = S.getConstantTarget()) { 742 EmitBranchThroughCleanup(getJumpDestForLabel(Target)); 743 return; 744 } 745 746 // Ensure that we have an i8* for our PHI node. 747 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()), 748 Int8PtrTy, "addr"); 749 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 750 751 // Get the basic block for the indirect goto. 752 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock(); 753 754 // The first instruction in the block has to be the PHI for the switch dest, 755 // add an entry for this branch. 756 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB); 757 758 EmitBranch(IndGotoBB); 759 } 760 761 void CodeGenFunction::EmitIfStmt(const IfStmt &S) { 762 // The else branch of a consteval if statement is always the only branch that 763 // can be runtime evaluated. 764 if (S.isConsteval()) { 765 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse(); 766 if (Executed) { 767 RunCleanupsScope ExecutedScope(*this); 768 EmitStmt(Executed); 769 } 770 return; 771 } 772 773 // C99 6.8.4.1: The first substatement is executed if the expression compares 774 // unequal to 0. The condition must be a scalar type. 775 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); 776 777 if (S.getInit()) 778 EmitStmt(S.getInit()); 779 780 if (S.getConditionVariable()) 781 EmitDecl(*S.getConditionVariable()); 782 783 // If the condition constant folds and can be elided, try to avoid emitting 784 // the condition and the dead arm of the if/else. 785 bool CondConstant; 786 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, 787 S.isConstexpr())) { 788 // Figure out which block (then or else) is executed. 789 const Stmt *Executed = S.getThen(); 790 const Stmt *Skipped = S.getElse(); 791 if (!CondConstant) // Condition false? 792 std::swap(Executed, Skipped); 793 794 // If the skipped block has no labels in it, just emit the executed block. 795 // This avoids emitting dead code and simplifies the CFG substantially. 796 if (S.isConstexpr() || !ContainsLabel(Skipped)) { 797 if (CondConstant) 798 incrementProfileCounter(&S); 799 if (Executed) { 800 RunCleanupsScope ExecutedScope(*this); 801 EmitStmt(Executed); 802 } 803 return; 804 } 805 } 806 807 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit 808 // the conditional branch. 809 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then"); 810 llvm::BasicBlock *ContBlock = createBasicBlock("if.end"); 811 llvm::BasicBlock *ElseBlock = ContBlock; 812 if (S.getElse()) 813 ElseBlock = createBasicBlock("if.else"); 814 815 // Prefer the PGO based weights over the likelihood attribute. 816 // When the build isn't optimized the metadata isn't used, so don't generate 817 // it. 818 Stmt::Likelihood LH = Stmt::LH_None; 819 uint64_t Count = getProfileCount(S.getThen()); 820 if (!Count && CGM.getCodeGenOpts().OptimizationLevel) 821 LH = Stmt::getLikelihood(S.getThen(), S.getElse()); 822 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, LH); 823 824 // Emit the 'then' code. 825 EmitBlock(ThenBlock); 826 incrementProfileCounter(&S); 827 { 828 RunCleanupsScope ThenScope(*this); 829 EmitStmt(S.getThen()); 830 } 831 EmitBranch(ContBlock); 832 833 // Emit the 'else' code if present. 834 if (const Stmt *Else = S.getElse()) { 835 { 836 // There is no need to emit line number for an unconditional branch. 837 auto NL = ApplyDebugLocation::CreateEmpty(*this); 838 EmitBlock(ElseBlock); 839 } 840 { 841 RunCleanupsScope ElseScope(*this); 842 EmitStmt(Else); 843 } 844 { 845 // There is no need to emit line number for an unconditional branch. 846 auto NL = ApplyDebugLocation::CreateEmpty(*this); 847 EmitBranch(ContBlock); 848 } 849 } 850 851 // Emit the continuation block for code after the if. 852 EmitBlock(ContBlock, true); 853 } 854 855 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S, 856 ArrayRef<const Attr *> WhileAttrs) { 857 // Emit the header for the loop, which will also become 858 // the continue target. 859 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond"); 860 EmitBlock(LoopHeader.getBlock()); 861 862 // Create an exit block for when the condition fails, which will 863 // also become the break target. 864 JumpDest LoopExit = getJumpDestInCurrentScope("while.end"); 865 866 // Store the blocks to use for break and continue. 867 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader)); 868 869 // C++ [stmt.while]p2: 870 // When the condition of a while statement is a declaration, the 871 // scope of the variable that is declared extends from its point 872 // of declaration (3.3.2) to the end of the while statement. 873 // [...] 874 // The object created in a condition is destroyed and created 875 // with each iteration of the loop. 876 RunCleanupsScope ConditionScope(*this); 877 878 if (S.getConditionVariable()) 879 EmitDecl(*S.getConditionVariable()); 880 881 // Evaluate the conditional in the while header. C99 6.8.5.1: The 882 // evaluation of the controlling expression takes place before each 883 // execution of the loop body. 884 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 885 886 // while(1) is common, avoid extra exit blocks. Be sure 887 // to correctly handle break/continue though. 888 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal); 889 bool CondIsConstInt = C != nullptr; 890 bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne(); 891 const SourceRange &R = S.getSourceRange(); 892 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(), 893 WhileAttrs, SourceLocToDebugLoc(R.getBegin()), 894 SourceLocToDebugLoc(R.getEnd()), 895 checkIfLoopMustProgress(CondIsConstInt)); 896 897 // As long as the condition is true, go to the loop body. 898 llvm::BasicBlock *LoopBody = createBasicBlock("while.body"); 899 if (EmitBoolCondBranch) { 900 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 901 if (ConditionScope.requiresCleanups()) 902 ExitBlock = createBasicBlock("while.exit"); 903 llvm::MDNode *Weights = 904 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())); 905 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel) 906 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic( 907 BoolCondVal, Stmt::getLikelihood(S.getBody())); 908 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights); 909 910 if (ExitBlock != LoopExit.getBlock()) { 911 EmitBlock(ExitBlock); 912 EmitBranchThroughCleanup(LoopExit); 913 } 914 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) { 915 CGM.getDiags().Report(A->getLocation(), 916 diag::warn_attribute_has_no_effect_on_infinite_loop) 917 << A << A->getRange(); 918 CGM.getDiags().Report( 919 S.getWhileLoc(), 920 diag::note_attribute_has_no_effect_on_infinite_loop_here) 921 << SourceRange(S.getWhileLoc(), S.getRParenLoc()); 922 } 923 924 // Emit the loop body. We have to emit this in a cleanup scope 925 // because it might be a singleton DeclStmt. 926 { 927 RunCleanupsScope BodyScope(*this); 928 EmitBlock(LoopBody); 929 incrementProfileCounter(&S); 930 EmitStmt(S.getBody()); 931 } 932 933 BreakContinueStack.pop_back(); 934 935 // Immediately force cleanup. 936 ConditionScope.ForceCleanup(); 937 938 EmitStopPoint(&S); 939 // Branch to the loop header again. 940 EmitBranch(LoopHeader.getBlock()); 941 942 LoopStack.pop(); 943 944 // Emit the exit block. 945 EmitBlock(LoopExit.getBlock(), true); 946 947 // The LoopHeader typically is just a branch if we skipped emitting 948 // a branch, try to erase it. 949 if (!EmitBoolCondBranch) 950 SimplifyForwardingBlocks(LoopHeader.getBlock()); 951 } 952 953 void CodeGenFunction::EmitDoStmt(const DoStmt &S, 954 ArrayRef<const Attr *> DoAttrs) { 955 JumpDest LoopExit = getJumpDestInCurrentScope("do.end"); 956 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond"); 957 958 uint64_t ParentCount = getCurrentProfileCount(); 959 960 // Store the blocks to use for break and continue. 961 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond)); 962 963 // Emit the body of the loop. 964 llvm::BasicBlock *LoopBody = createBasicBlock("do.body"); 965 966 EmitBlockWithFallThrough(LoopBody, &S); 967 { 968 RunCleanupsScope BodyScope(*this); 969 EmitStmt(S.getBody()); 970 } 971 972 EmitBlock(LoopCond.getBlock()); 973 974 // C99 6.8.5.2: "The evaluation of the controlling expression takes place 975 // after each execution of the loop body." 976 977 // Evaluate the conditional in the while header. 978 // C99 6.8.5p2/p4: The first substatement is executed if the expression 979 // compares unequal to 0. The condition must be a scalar type. 980 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 981 982 BreakContinueStack.pop_back(); 983 984 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure 985 // to correctly handle break/continue though. 986 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal); 987 bool CondIsConstInt = C; 988 bool EmitBoolCondBranch = !C || !C->isZero(); 989 990 const SourceRange &R = S.getSourceRange(); 991 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs, 992 SourceLocToDebugLoc(R.getBegin()), 993 SourceLocToDebugLoc(R.getEnd()), 994 checkIfLoopMustProgress(CondIsConstInt)); 995 996 // As long as the condition is true, iterate the loop. 997 if (EmitBoolCondBranch) { 998 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount; 999 Builder.CreateCondBr( 1000 BoolCondVal, LoopBody, LoopExit.getBlock(), 1001 createProfileWeightsForLoop(S.getCond(), BackedgeCount)); 1002 } 1003 1004 LoopStack.pop(); 1005 1006 // Emit the exit block. 1007 EmitBlock(LoopExit.getBlock()); 1008 1009 // The DoCond block typically is just a branch if we skipped 1010 // emitting a branch, try to erase it. 1011 if (!EmitBoolCondBranch) 1012 SimplifyForwardingBlocks(LoopCond.getBlock()); 1013 } 1014 1015 void CodeGenFunction::EmitForStmt(const ForStmt &S, 1016 ArrayRef<const Attr *> ForAttrs) { 1017 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 1018 1019 LexicalScope ForScope(*this, S.getSourceRange()); 1020 1021 // Evaluate the first part before the loop. 1022 if (S.getInit()) 1023 EmitStmt(S.getInit()); 1024 1025 // Start the loop with a block that tests the condition. 1026 // If there's an increment, the continue scope will be overwritten 1027 // later. 1028 JumpDest CondDest = getJumpDestInCurrentScope("for.cond"); 1029 llvm::BasicBlock *CondBlock = CondDest.getBlock(); 1030 EmitBlock(CondBlock); 1031 1032 Expr::EvalResult Result; 1033 bool CondIsConstInt = 1034 !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext()); 1035 1036 const SourceRange &R = S.getSourceRange(); 1037 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs, 1038 SourceLocToDebugLoc(R.getBegin()), 1039 SourceLocToDebugLoc(R.getEnd()), 1040 checkIfLoopMustProgress(CondIsConstInt)); 1041 1042 // Create a cleanup scope for the condition variable cleanups. 1043 LexicalScope ConditionScope(*this, S.getSourceRange()); 1044 1045 // If the for loop doesn't have an increment we can just use the condition as 1046 // the continue block. Otherwise, if there is no condition variable, we can 1047 // form the continue block now. If there is a condition variable, we can't 1048 // form the continue block until after we've emitted the condition, because 1049 // the condition is in scope in the increment, but Sema's jump diagnostics 1050 // ensure that there are no continues from the condition variable that jump 1051 // to the loop increment. 1052 JumpDest Continue; 1053 if (!S.getInc()) 1054 Continue = CondDest; 1055 else if (!S.getConditionVariable()) 1056 Continue = getJumpDestInCurrentScope("for.inc"); 1057 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1058 1059 if (S.getCond()) { 1060 // If the for statement has a condition scope, emit the local variable 1061 // declaration. 1062 if (S.getConditionVariable()) { 1063 EmitDecl(*S.getConditionVariable()); 1064 1065 // We have entered the condition variable's scope, so we're now able to 1066 // jump to the continue block. 1067 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest; 1068 BreakContinueStack.back().ContinueBlock = Continue; 1069 } 1070 1071 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1072 // If there are any cleanups between here and the loop-exit scope, 1073 // create a block to stage a loop exit along. 1074 if (ForScope.requiresCleanups()) 1075 ExitBlock = createBasicBlock("for.cond.cleanup"); 1076 1077 // As long as the condition is true, iterate the loop. 1078 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 1079 1080 // C99 6.8.5p2/p4: The first substatement is executed if the expression 1081 // compares unequal to 0. The condition must be a scalar type. 1082 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 1083 llvm::MDNode *Weights = 1084 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())); 1085 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel) 1086 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic( 1087 BoolCondVal, Stmt::getLikelihood(S.getBody())); 1088 1089 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights); 1090 1091 if (ExitBlock != LoopExit.getBlock()) { 1092 EmitBlock(ExitBlock); 1093 EmitBranchThroughCleanup(LoopExit); 1094 } 1095 1096 EmitBlock(ForBody); 1097 } else { 1098 // Treat it as a non-zero constant. Don't even create a new block for the 1099 // body, just fall into it. 1100 } 1101 incrementProfileCounter(&S); 1102 1103 { 1104 // Create a separate cleanup scope for the body, in case it is not 1105 // a compound statement. 1106 RunCleanupsScope BodyScope(*this); 1107 EmitStmt(S.getBody()); 1108 } 1109 1110 // If there is an increment, emit it next. 1111 if (S.getInc()) { 1112 EmitBlock(Continue.getBlock()); 1113 EmitStmt(S.getInc()); 1114 } 1115 1116 BreakContinueStack.pop_back(); 1117 1118 ConditionScope.ForceCleanup(); 1119 1120 EmitStopPoint(&S); 1121 EmitBranch(CondBlock); 1122 1123 ForScope.ForceCleanup(); 1124 1125 LoopStack.pop(); 1126 1127 // Emit the fall-through block. 1128 EmitBlock(LoopExit.getBlock(), true); 1129 } 1130 1131 void 1132 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S, 1133 ArrayRef<const Attr *> ForAttrs) { 1134 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 1135 1136 LexicalScope ForScope(*this, S.getSourceRange()); 1137 1138 // Evaluate the first pieces before the loop. 1139 if (S.getInit()) 1140 EmitStmt(S.getInit()); 1141 EmitStmt(S.getRangeStmt()); 1142 EmitStmt(S.getBeginStmt()); 1143 EmitStmt(S.getEndStmt()); 1144 1145 // Start the loop with a block that tests the condition. 1146 // If there's an increment, the continue scope will be overwritten 1147 // later. 1148 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 1149 EmitBlock(CondBlock); 1150 1151 const SourceRange &R = S.getSourceRange(); 1152 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs, 1153 SourceLocToDebugLoc(R.getBegin()), 1154 SourceLocToDebugLoc(R.getEnd())); 1155 1156 // If there are any cleanups between here and the loop-exit scope, 1157 // create a block to stage a loop exit along. 1158 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1159 if (ForScope.requiresCleanups()) 1160 ExitBlock = createBasicBlock("for.cond.cleanup"); 1161 1162 // The loop body, consisting of the specified body and the loop variable. 1163 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 1164 1165 // The body is executed if the expression, contextually converted 1166 // to bool, is true. 1167 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 1168 llvm::MDNode *Weights = 1169 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())); 1170 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel) 1171 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic( 1172 BoolCondVal, Stmt::getLikelihood(S.getBody())); 1173 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights); 1174 1175 if (ExitBlock != LoopExit.getBlock()) { 1176 EmitBlock(ExitBlock); 1177 EmitBranchThroughCleanup(LoopExit); 1178 } 1179 1180 EmitBlock(ForBody); 1181 incrementProfileCounter(&S); 1182 1183 // Create a block for the increment. In case of a 'continue', we jump there. 1184 JumpDest Continue = getJumpDestInCurrentScope("for.inc"); 1185 1186 // Store the blocks to use for break and continue. 1187 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1188 1189 { 1190 // Create a separate cleanup scope for the loop variable and body. 1191 LexicalScope BodyScope(*this, S.getSourceRange()); 1192 EmitStmt(S.getLoopVarStmt()); 1193 EmitStmt(S.getBody()); 1194 } 1195 1196 EmitStopPoint(&S); 1197 // If there is an increment, emit it next. 1198 EmitBlock(Continue.getBlock()); 1199 EmitStmt(S.getInc()); 1200 1201 BreakContinueStack.pop_back(); 1202 1203 EmitBranch(CondBlock); 1204 1205 ForScope.ForceCleanup(); 1206 1207 LoopStack.pop(); 1208 1209 // Emit the fall-through block. 1210 EmitBlock(LoopExit.getBlock(), true); 1211 } 1212 1213 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { 1214 if (RV.isScalar()) { 1215 Builder.CreateStore(RV.getScalarVal(), ReturnValue); 1216 } else if (RV.isAggregate()) { 1217 LValue Dest = MakeAddrLValue(ReturnValue, Ty); 1218 LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty); 1219 EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue()); 1220 } else { 1221 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty), 1222 /*init*/ true); 1223 } 1224 EmitBranchThroughCleanup(ReturnBlock); 1225 } 1226 1227 namespace { 1228 // RAII struct used to save and restore a return statment's result expression. 1229 struct SaveRetExprRAII { 1230 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF) 1231 : OldRetExpr(CGF.RetExpr), CGF(CGF) { 1232 CGF.RetExpr = RetExpr; 1233 } 1234 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; } 1235 const Expr *OldRetExpr; 1236 CodeGenFunction &CGF; 1237 }; 1238 } // namespace 1239 1240 /// If we have 'return f(...);', where both caller and callee are SwiftAsync, 1241 /// codegen it as 'tail call ...; ret void;'. 1242 static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder, 1243 const CGFunctionInfo *CurFnInfo) { 1244 auto calleeQualType = CE->getCallee()->getType(); 1245 const FunctionType *calleeType = nullptr; 1246 if (calleeQualType->isFunctionPointerType() || 1247 calleeQualType->isFunctionReferenceType() || 1248 calleeQualType->isBlockPointerType() || 1249 calleeQualType->isMemberFunctionPointerType()) { 1250 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>(); 1251 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) { 1252 calleeType = ty; 1253 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) { 1254 if (auto methodDecl = CMCE->getMethodDecl()) { 1255 // getMethodDecl() doesn't handle member pointers at the moment. 1256 calleeType = methodDecl->getType()->castAs<FunctionType>(); 1257 } else { 1258 return; 1259 } 1260 } else { 1261 return; 1262 } 1263 if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync && 1264 (CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)) { 1265 auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back()); 1266 CI->setTailCallKind(llvm::CallInst::TCK_MustTail); 1267 Builder.CreateRetVoid(); 1268 Builder.ClearInsertionPoint(); 1269 } 1270 } 1271 1272 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand 1273 /// if the function returns void, or may be missing one if the function returns 1274 /// non-void. Fun stuff :). 1275 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { 1276 if (requiresReturnValueCheck()) { 1277 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc()); 1278 auto *SLocPtr = 1279 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false, 1280 llvm::GlobalVariable::PrivateLinkage, SLoc); 1281 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 1282 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr); 1283 assert(ReturnLocation.isValid() && "No valid return location"); 1284 Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy), 1285 ReturnLocation); 1286 } 1287 1288 // Returning from an outlined SEH helper is UB, and we already warn on it. 1289 if (IsOutlinedSEHHelper) { 1290 Builder.CreateUnreachable(); 1291 Builder.ClearInsertionPoint(); 1292 } 1293 1294 // Emit the result value, even if unused, to evaluate the side effects. 1295 const Expr *RV = S.getRetValue(); 1296 1297 // Record the result expression of the return statement. The recorded 1298 // expression is used to determine whether a block capture's lifetime should 1299 // end at the end of the full expression as opposed to the end of the scope 1300 // enclosing the block expression. 1301 // 1302 // This permits a small, easily-implemented exception to our over-conservative 1303 // rules about not jumping to statements following block literals with 1304 // non-trivial cleanups. 1305 SaveRetExprRAII SaveRetExpr(RV, *this); 1306 1307 RunCleanupsScope cleanupScope(*this); 1308 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV)) 1309 RV = EWC->getSubExpr(); 1310 // FIXME: Clean this up by using an LValue for ReturnTemp, 1311 // EmitStoreThroughLValue, and EmitAnyExpr. 1312 // Check if the NRVO candidate was not globalized in OpenMP mode. 1313 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() && 1314 S.getNRVOCandidate()->isNRVOVariable() && 1315 (!getLangOpts().OpenMP || 1316 !CGM.getOpenMPRuntime() 1317 .getAddressOfLocalVariable(*this, S.getNRVOCandidate()) 1318 .isValid())) { 1319 // Apply the named return value optimization for this return statement, 1320 // which means doing nothing: the appropriate result has already been 1321 // constructed into the NRVO variable. 1322 1323 // If there is an NRVO flag for this variable, set it to 1 into indicate 1324 // that the cleanup code should not destroy the variable. 1325 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) 1326 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag); 1327 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) { 1328 // Make sure not to return anything, but evaluate the expression 1329 // for side effects. 1330 if (RV) { 1331 EmitAnyExpr(RV); 1332 if (auto *CE = dyn_cast<CallExpr>(RV)) 1333 makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo); 1334 } 1335 } else if (!RV) { 1336 // Do nothing (return value is left uninitialized) 1337 } else if (FnRetTy->isReferenceType()) { 1338 // If this function returns a reference, take the address of the expression 1339 // rather than the value. 1340 RValue Result = EmitReferenceBindingToExpr(RV); 1341 Builder.CreateStore(Result.getScalarVal(), ReturnValue); 1342 } else { 1343 switch (getEvaluationKind(RV->getType())) { 1344 case TEK_Scalar: 1345 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); 1346 break; 1347 case TEK_Complex: 1348 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()), 1349 /*isInit*/ true); 1350 break; 1351 case TEK_Aggregate: 1352 EmitAggExpr(RV, AggValueSlot::forAddr( 1353 ReturnValue, Qualifiers(), 1354 AggValueSlot::IsDestructed, 1355 AggValueSlot::DoesNotNeedGCBarriers, 1356 AggValueSlot::IsNotAliased, 1357 getOverlapForReturnValue())); 1358 break; 1359 } 1360 } 1361 1362 ++NumReturnExprs; 1363 if (!RV || RV->isEvaluatable(getContext())) 1364 ++NumSimpleReturnExprs; 1365 1366 cleanupScope.ForceCleanup(); 1367 EmitBranchThroughCleanup(ReturnBlock); 1368 } 1369 1370 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) { 1371 // As long as debug info is modeled with instructions, we have to ensure we 1372 // have a place to insert here and write the stop point here. 1373 if (HaveInsertPoint()) 1374 EmitStopPoint(&S); 1375 1376 for (const auto *I : S.decls()) 1377 EmitDecl(*I); 1378 } 1379 1380 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) { 1381 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!"); 1382 1383 // If this code is reachable then emit a stop point (if generating 1384 // debug info). We have to do this ourselves because we are on the 1385 // "simple" statement path. 1386 if (HaveInsertPoint()) 1387 EmitStopPoint(&S); 1388 1389 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock); 1390 } 1391 1392 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) { 1393 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); 1394 1395 // If this code is reachable then emit a stop point (if generating 1396 // debug info). We have to do this ourselves because we are on the 1397 // "simple" statement path. 1398 if (HaveInsertPoint()) 1399 EmitStopPoint(&S); 1400 1401 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock); 1402 } 1403 1404 /// EmitCaseStmtRange - If case statement range is not too big then 1405 /// add multiple cases to switch instruction, one for each value within 1406 /// the range. If range is too big then emit "if" condition check. 1407 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S, 1408 ArrayRef<const Attr *> Attrs) { 1409 assert(S.getRHS() && "Expected RHS value in CaseStmt"); 1410 1411 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext()); 1412 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext()); 1413 1414 // Emit the code for this case. We do this first to make sure it is 1415 // properly chained from our predecessor before generating the 1416 // switch machinery to enter this block. 1417 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); 1418 EmitBlockWithFallThrough(CaseDest, &S); 1419 EmitStmt(S.getSubStmt()); 1420 1421 // If range is empty, do nothing. 1422 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS)) 1423 return; 1424 1425 Stmt::Likelihood LH = Stmt::getLikelihood(Attrs); 1426 llvm::APInt Range = RHS - LHS; 1427 // FIXME: parameters such as this should not be hardcoded. 1428 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) { 1429 // Range is small enough to add multiple switch instruction cases. 1430 uint64_t Total = getProfileCount(&S); 1431 unsigned NCases = Range.getZExtValue() + 1; 1432 // We only have one region counter for the entire set of cases here, so we 1433 // need to divide the weights evenly between the generated cases, ensuring 1434 // that the total weight is preserved. E.g., a weight of 5 over three cases 1435 // will be distributed as weights of 2, 2, and 1. 1436 uint64_t Weight = Total / NCases, Rem = Total % NCases; 1437 for (unsigned I = 0; I != NCases; ++I) { 1438 if (SwitchWeights) 1439 SwitchWeights->push_back(Weight + (Rem ? 1 : 0)); 1440 else if (SwitchLikelihood) 1441 SwitchLikelihood->push_back(LH); 1442 1443 if (Rem) 1444 Rem--; 1445 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest); 1446 ++LHS; 1447 } 1448 return; 1449 } 1450 1451 // The range is too big. Emit "if" condition into a new block, 1452 // making sure to save and restore the current insertion point. 1453 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock(); 1454 1455 // Push this test onto the chain of range checks (which terminates 1456 // in the default basic block). The switch's default will be changed 1457 // to the top of this chain after switch emission is complete. 1458 llvm::BasicBlock *FalseDest = CaseRangeBlock; 1459 CaseRangeBlock = createBasicBlock("sw.caserange"); 1460 1461 CurFn->getBasicBlockList().push_back(CaseRangeBlock); 1462 Builder.SetInsertPoint(CaseRangeBlock); 1463 1464 // Emit range check. 1465 llvm::Value *Diff = 1466 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS)); 1467 llvm::Value *Cond = 1468 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds"); 1469 1470 llvm::MDNode *Weights = nullptr; 1471 if (SwitchWeights) { 1472 uint64_t ThisCount = getProfileCount(&S); 1473 uint64_t DefaultCount = (*SwitchWeights)[0]; 1474 Weights = createProfileWeights(ThisCount, DefaultCount); 1475 1476 // Since we're chaining the switch default through each large case range, we 1477 // need to update the weight for the default, ie, the first case, to include 1478 // this case. 1479 (*SwitchWeights)[0] += ThisCount; 1480 } else if (SwitchLikelihood) 1481 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH); 1482 1483 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights); 1484 1485 // Restore the appropriate insertion point. 1486 if (RestoreBB) 1487 Builder.SetInsertPoint(RestoreBB); 1488 else 1489 Builder.ClearInsertionPoint(); 1490 } 1491 1492 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S, 1493 ArrayRef<const Attr *> Attrs) { 1494 // If there is no enclosing switch instance that we're aware of, then this 1495 // case statement and its block can be elided. This situation only happens 1496 // when we've constant-folded the switch, are emitting the constant case, 1497 // and part of the constant case includes another case statement. For 1498 // instance: switch (4) { case 4: do { case 5: } while (1); } 1499 if (!SwitchInsn) { 1500 EmitStmt(S.getSubStmt()); 1501 return; 1502 } 1503 1504 // Handle case ranges. 1505 if (S.getRHS()) { 1506 EmitCaseStmtRange(S, Attrs); 1507 return; 1508 } 1509 1510 llvm::ConstantInt *CaseVal = 1511 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext())); 1512 if (SwitchLikelihood) 1513 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs)); 1514 1515 // If the body of the case is just a 'break', try to not emit an empty block. 1516 // If we're profiling or we're not optimizing, leave the block in for better 1517 // debug and coverage analysis. 1518 if (!CGM.getCodeGenOpts().hasProfileClangInstr() && 1519 CGM.getCodeGenOpts().OptimizationLevel > 0 && 1520 isa<BreakStmt>(S.getSubStmt())) { 1521 JumpDest Block = BreakContinueStack.back().BreakBlock; 1522 1523 // Only do this optimization if there are no cleanups that need emitting. 1524 if (isObviouslyBranchWithoutCleanups(Block)) { 1525 if (SwitchWeights) 1526 SwitchWeights->push_back(getProfileCount(&S)); 1527 SwitchInsn->addCase(CaseVal, Block.getBlock()); 1528 1529 // If there was a fallthrough into this case, make sure to redirect it to 1530 // the end of the switch as well. 1531 if (Builder.GetInsertBlock()) { 1532 Builder.CreateBr(Block.getBlock()); 1533 Builder.ClearInsertionPoint(); 1534 } 1535 return; 1536 } 1537 } 1538 1539 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); 1540 EmitBlockWithFallThrough(CaseDest, &S); 1541 if (SwitchWeights) 1542 SwitchWeights->push_back(getProfileCount(&S)); 1543 SwitchInsn->addCase(CaseVal, CaseDest); 1544 1545 // Recursively emitting the statement is acceptable, but is not wonderful for 1546 // code where we have many case statements nested together, i.e.: 1547 // case 1: 1548 // case 2: 1549 // case 3: etc. 1550 // Handling this recursively will create a new block for each case statement 1551 // that falls through to the next case which is IR intensive. It also causes 1552 // deep recursion which can run into stack depth limitations. Handle 1553 // sequential non-range case statements specially. 1554 // 1555 // TODO When the next case has a likelihood attribute the code returns to the 1556 // recursive algorithm. Maybe improve this case if it becomes common practice 1557 // to use a lot of attributes. 1558 const CaseStmt *CurCase = &S; 1559 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt()); 1560 1561 // Otherwise, iteratively add consecutive cases to this switch stmt. 1562 while (NextCase && NextCase->getRHS() == nullptr) { 1563 CurCase = NextCase; 1564 llvm::ConstantInt *CaseVal = 1565 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext())); 1566 1567 if (SwitchWeights) 1568 SwitchWeights->push_back(getProfileCount(NextCase)); 1569 if (CGM.getCodeGenOpts().hasProfileClangInstr()) { 1570 CaseDest = createBasicBlock("sw.bb"); 1571 EmitBlockWithFallThrough(CaseDest, CurCase); 1572 } 1573 // Since this loop is only executed when the CaseStmt has no attributes 1574 // use a hard-coded value. 1575 if (SwitchLikelihood) 1576 SwitchLikelihood->push_back(Stmt::LH_None); 1577 1578 SwitchInsn->addCase(CaseVal, CaseDest); 1579 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt()); 1580 } 1581 1582 // Generate a stop point for debug info if the case statement is 1583 // followed by a default statement. A fallthrough case before a 1584 // default case gets its own branch target. 1585 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass) 1586 EmitStopPoint(CurCase); 1587 1588 // Normal default recursion for non-cases. 1589 EmitStmt(CurCase->getSubStmt()); 1590 } 1591 1592 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S, 1593 ArrayRef<const Attr *> Attrs) { 1594 // If there is no enclosing switch instance that we're aware of, then this 1595 // default statement can be elided. This situation only happens when we've 1596 // constant-folded the switch. 1597 if (!SwitchInsn) { 1598 EmitStmt(S.getSubStmt()); 1599 return; 1600 } 1601 1602 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest(); 1603 assert(DefaultBlock->empty() && 1604 "EmitDefaultStmt: Default block already defined?"); 1605 1606 if (SwitchLikelihood) 1607 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs); 1608 1609 EmitBlockWithFallThrough(DefaultBlock, &S); 1610 1611 EmitStmt(S.getSubStmt()); 1612 } 1613 1614 /// CollectStatementsForCase - Given the body of a 'switch' statement and a 1615 /// constant value that is being switched on, see if we can dead code eliminate 1616 /// the body of the switch to a simple series of statements to emit. Basically, 1617 /// on a switch (5) we want to find these statements: 1618 /// case 5: 1619 /// printf(...); <-- 1620 /// ++i; <-- 1621 /// break; 1622 /// 1623 /// and add them to the ResultStmts vector. If it is unsafe to do this 1624 /// transformation (for example, one of the elided statements contains a label 1625 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S' 1626 /// should include statements after it (e.g. the printf() line is a substmt of 1627 /// the case) then return CSFC_FallThrough. If we handled it and found a break 1628 /// statement, then return CSFC_Success. 1629 /// 1630 /// If Case is non-null, then we are looking for the specified case, checking 1631 /// that nothing we jump over contains labels. If Case is null, then we found 1632 /// the case and are looking for the break. 1633 /// 1634 /// If the recursive walk actually finds our Case, then we set FoundCase to 1635 /// true. 1636 /// 1637 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success }; 1638 static CSFC_Result CollectStatementsForCase(const Stmt *S, 1639 const SwitchCase *Case, 1640 bool &FoundCase, 1641 SmallVectorImpl<const Stmt*> &ResultStmts) { 1642 // If this is a null statement, just succeed. 1643 if (!S) 1644 return Case ? CSFC_Success : CSFC_FallThrough; 1645 1646 // If this is the switchcase (case 4: or default) that we're looking for, then 1647 // we're in business. Just add the substatement. 1648 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) { 1649 if (S == Case) { 1650 FoundCase = true; 1651 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase, 1652 ResultStmts); 1653 } 1654 1655 // Otherwise, this is some other case or default statement, just ignore it. 1656 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase, 1657 ResultStmts); 1658 } 1659 1660 // If we are in the live part of the code and we found our break statement, 1661 // return a success! 1662 if (!Case && isa<BreakStmt>(S)) 1663 return CSFC_Success; 1664 1665 // If this is a switch statement, then it might contain the SwitchCase, the 1666 // break, or neither. 1667 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) { 1668 // Handle this as two cases: we might be looking for the SwitchCase (if so 1669 // the skipped statements must be skippable) or we might already have it. 1670 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end(); 1671 bool StartedInLiveCode = FoundCase; 1672 unsigned StartSize = ResultStmts.size(); 1673 1674 // If we've not found the case yet, scan through looking for it. 1675 if (Case) { 1676 // Keep track of whether we see a skipped declaration. The code could be 1677 // using the declaration even if it is skipped, so we can't optimize out 1678 // the decl if the kept statements might refer to it. 1679 bool HadSkippedDecl = false; 1680 1681 // If we're looking for the case, just see if we can skip each of the 1682 // substatements. 1683 for (; Case && I != E; ++I) { 1684 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I); 1685 1686 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) { 1687 case CSFC_Failure: return CSFC_Failure; 1688 case CSFC_Success: 1689 // A successful result means that either 1) that the statement doesn't 1690 // have the case and is skippable, or 2) does contain the case value 1691 // and also contains the break to exit the switch. In the later case, 1692 // we just verify the rest of the statements are elidable. 1693 if (FoundCase) { 1694 // If we found the case and skipped declarations, we can't do the 1695 // optimization. 1696 if (HadSkippedDecl) 1697 return CSFC_Failure; 1698 1699 for (++I; I != E; ++I) 1700 if (CodeGenFunction::ContainsLabel(*I, true)) 1701 return CSFC_Failure; 1702 return CSFC_Success; 1703 } 1704 break; 1705 case CSFC_FallThrough: 1706 // If we have a fallthrough condition, then we must have found the 1707 // case started to include statements. Consider the rest of the 1708 // statements in the compound statement as candidates for inclusion. 1709 assert(FoundCase && "Didn't find case but returned fallthrough?"); 1710 // We recursively found Case, so we're not looking for it anymore. 1711 Case = nullptr; 1712 1713 // If we found the case and skipped declarations, we can't do the 1714 // optimization. 1715 if (HadSkippedDecl) 1716 return CSFC_Failure; 1717 break; 1718 } 1719 } 1720 1721 if (!FoundCase) 1722 return CSFC_Success; 1723 1724 assert(!HadSkippedDecl && "fallthrough after skipping decl"); 1725 } 1726 1727 // If we have statements in our range, then we know that the statements are 1728 // live and need to be added to the set of statements we're tracking. 1729 bool AnyDecls = false; 1730 for (; I != E; ++I) { 1731 AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I); 1732 1733 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) { 1734 case CSFC_Failure: return CSFC_Failure; 1735 case CSFC_FallThrough: 1736 // A fallthrough result means that the statement was simple and just 1737 // included in ResultStmt, keep adding them afterwards. 1738 break; 1739 case CSFC_Success: 1740 // A successful result means that we found the break statement and 1741 // stopped statement inclusion. We just ensure that any leftover stmts 1742 // are skippable and return success ourselves. 1743 for (++I; I != E; ++I) 1744 if (CodeGenFunction::ContainsLabel(*I, true)) 1745 return CSFC_Failure; 1746 return CSFC_Success; 1747 } 1748 } 1749 1750 // If we're about to fall out of a scope without hitting a 'break;', we 1751 // can't perform the optimization if there were any decls in that scope 1752 // (we'd lose their end-of-lifetime). 1753 if (AnyDecls) { 1754 // If the entire compound statement was live, there's one more thing we 1755 // can try before giving up: emit the whole thing as a single statement. 1756 // We can do that unless the statement contains a 'break;'. 1757 // FIXME: Such a break must be at the end of a construct within this one. 1758 // We could emit this by just ignoring the BreakStmts entirely. 1759 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) { 1760 ResultStmts.resize(StartSize); 1761 ResultStmts.push_back(S); 1762 } else { 1763 return CSFC_Failure; 1764 } 1765 } 1766 1767 return CSFC_FallThrough; 1768 } 1769 1770 // Okay, this is some other statement that we don't handle explicitly, like a 1771 // for statement or increment etc. If we are skipping over this statement, 1772 // just verify it doesn't have labels, which would make it invalid to elide. 1773 if (Case) { 1774 if (CodeGenFunction::ContainsLabel(S, true)) 1775 return CSFC_Failure; 1776 return CSFC_Success; 1777 } 1778 1779 // Otherwise, we want to include this statement. Everything is cool with that 1780 // so long as it doesn't contain a break out of the switch we're in. 1781 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure; 1782 1783 // Otherwise, everything is great. Include the statement and tell the caller 1784 // that we fall through and include the next statement as well. 1785 ResultStmts.push_back(S); 1786 return CSFC_FallThrough; 1787 } 1788 1789 /// FindCaseStatementsForValue - Find the case statement being jumped to and 1790 /// then invoke CollectStatementsForCase to find the list of statements to emit 1791 /// for a switch on constant. See the comment above CollectStatementsForCase 1792 /// for more details. 1793 static bool FindCaseStatementsForValue(const SwitchStmt &S, 1794 const llvm::APSInt &ConstantCondValue, 1795 SmallVectorImpl<const Stmt*> &ResultStmts, 1796 ASTContext &C, 1797 const SwitchCase *&ResultCase) { 1798 // First step, find the switch case that is being branched to. We can do this 1799 // efficiently by scanning the SwitchCase list. 1800 const SwitchCase *Case = S.getSwitchCaseList(); 1801 const DefaultStmt *DefaultCase = nullptr; 1802 1803 for (; Case; Case = Case->getNextSwitchCase()) { 1804 // It's either a default or case. Just remember the default statement in 1805 // case we're not jumping to any numbered cases. 1806 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) { 1807 DefaultCase = DS; 1808 continue; 1809 } 1810 1811 // Check to see if this case is the one we're looking for. 1812 const CaseStmt *CS = cast<CaseStmt>(Case); 1813 // Don't handle case ranges yet. 1814 if (CS->getRHS()) return false; 1815 1816 // If we found our case, remember it as 'case'. 1817 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue) 1818 break; 1819 } 1820 1821 // If we didn't find a matching case, we use a default if it exists, or we 1822 // elide the whole switch body! 1823 if (!Case) { 1824 // It is safe to elide the body of the switch if it doesn't contain labels 1825 // etc. If it is safe, return successfully with an empty ResultStmts list. 1826 if (!DefaultCase) 1827 return !CodeGenFunction::ContainsLabel(&S); 1828 Case = DefaultCase; 1829 } 1830 1831 // Ok, we know which case is being jumped to, try to collect all the 1832 // statements that follow it. This can fail for a variety of reasons. Also, 1833 // check to see that the recursive walk actually found our case statement. 1834 // Insane cases like this can fail to find it in the recursive walk since we 1835 // don't handle every stmt kind: 1836 // switch (4) { 1837 // while (1) { 1838 // case 4: ... 1839 bool FoundCase = false; 1840 ResultCase = Case; 1841 return CollectStatementsForCase(S.getBody(), Case, FoundCase, 1842 ResultStmts) != CSFC_Failure && 1843 FoundCase; 1844 } 1845 1846 static Optional<SmallVector<uint64_t, 16>> 1847 getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) { 1848 // Are there enough branches to weight them? 1849 if (Likelihoods.size() <= 1) 1850 return None; 1851 1852 uint64_t NumUnlikely = 0; 1853 uint64_t NumNone = 0; 1854 uint64_t NumLikely = 0; 1855 for (const auto LH : Likelihoods) { 1856 switch (LH) { 1857 case Stmt::LH_Unlikely: 1858 ++NumUnlikely; 1859 break; 1860 case Stmt::LH_None: 1861 ++NumNone; 1862 break; 1863 case Stmt::LH_Likely: 1864 ++NumLikely; 1865 break; 1866 } 1867 } 1868 1869 // Is there a likelihood attribute used? 1870 if (NumUnlikely == 0 && NumLikely == 0) 1871 return None; 1872 1873 // When multiple cases share the same code they can be combined during 1874 // optimization. In that case the weights of the branch will be the sum of 1875 // the individual weights. Make sure the combined sum of all neutral cases 1876 // doesn't exceed the value of a single likely attribute. 1877 // The additions both avoid divisions by 0 and make sure the weights of None 1878 // don't exceed the weight of Likely. 1879 const uint64_t Likely = INT32_MAX / (NumLikely + 2); 1880 const uint64_t None = Likely / (NumNone + 1); 1881 const uint64_t Unlikely = 0; 1882 1883 SmallVector<uint64_t, 16> Result; 1884 Result.reserve(Likelihoods.size()); 1885 for (const auto LH : Likelihoods) { 1886 switch (LH) { 1887 case Stmt::LH_Unlikely: 1888 Result.push_back(Unlikely); 1889 break; 1890 case Stmt::LH_None: 1891 Result.push_back(None); 1892 break; 1893 case Stmt::LH_Likely: 1894 Result.push_back(Likely); 1895 break; 1896 } 1897 } 1898 1899 return Result; 1900 } 1901 1902 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { 1903 // Handle nested switch statements. 1904 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn; 1905 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights; 1906 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood; 1907 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock; 1908 1909 // See if we can constant fold the condition of the switch and therefore only 1910 // emit the live case statement (if any) of the switch. 1911 llvm::APSInt ConstantCondValue; 1912 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) { 1913 SmallVector<const Stmt*, 4> CaseStmts; 1914 const SwitchCase *Case = nullptr; 1915 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts, 1916 getContext(), Case)) { 1917 if (Case) 1918 incrementProfileCounter(Case); 1919 RunCleanupsScope ExecutedScope(*this); 1920 1921 if (S.getInit()) 1922 EmitStmt(S.getInit()); 1923 1924 // Emit the condition variable if needed inside the entire cleanup scope 1925 // used by this special case for constant folded switches. 1926 if (S.getConditionVariable()) 1927 EmitDecl(*S.getConditionVariable()); 1928 1929 // At this point, we are no longer "within" a switch instance, so 1930 // we can temporarily enforce this to ensure that any embedded case 1931 // statements are not emitted. 1932 SwitchInsn = nullptr; 1933 1934 // Okay, we can dead code eliminate everything except this case. Emit the 1935 // specified series of statements and we're good. 1936 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i) 1937 EmitStmt(CaseStmts[i]); 1938 incrementProfileCounter(&S); 1939 1940 // Now we want to restore the saved switch instance so that nested 1941 // switches continue to function properly 1942 SwitchInsn = SavedSwitchInsn; 1943 1944 return; 1945 } 1946 } 1947 1948 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog"); 1949 1950 RunCleanupsScope ConditionScope(*this); 1951 1952 if (S.getInit()) 1953 EmitStmt(S.getInit()); 1954 1955 if (S.getConditionVariable()) 1956 EmitDecl(*S.getConditionVariable()); 1957 llvm::Value *CondV = EmitScalarExpr(S.getCond()); 1958 1959 // Create basic block to hold stuff that comes after switch 1960 // statement. We also need to create a default block now so that 1961 // explicit case ranges tests can have a place to jump to on 1962 // failure. 1963 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default"); 1964 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock); 1965 if (PGO.haveRegionCounts()) { 1966 // Walk the SwitchCase list to find how many there are. 1967 uint64_t DefaultCount = 0; 1968 unsigned NumCases = 0; 1969 for (const SwitchCase *Case = S.getSwitchCaseList(); 1970 Case; 1971 Case = Case->getNextSwitchCase()) { 1972 if (isa<DefaultStmt>(Case)) 1973 DefaultCount = getProfileCount(Case); 1974 NumCases += 1; 1975 } 1976 SwitchWeights = new SmallVector<uint64_t, 16>(); 1977 SwitchWeights->reserve(NumCases); 1978 // The default needs to be first. We store the edge count, so we already 1979 // know the right weight. 1980 SwitchWeights->push_back(DefaultCount); 1981 } else if (CGM.getCodeGenOpts().OptimizationLevel) { 1982 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>(); 1983 // Initialize the default case. 1984 SwitchLikelihood->push_back(Stmt::LH_None); 1985 } 1986 1987 CaseRangeBlock = DefaultBlock; 1988 1989 // Clear the insertion point to indicate we are in unreachable code. 1990 Builder.ClearInsertionPoint(); 1991 1992 // All break statements jump to NextBlock. If BreakContinueStack is non-empty 1993 // then reuse last ContinueBlock. 1994 JumpDest OuterContinue; 1995 if (!BreakContinueStack.empty()) 1996 OuterContinue = BreakContinueStack.back().ContinueBlock; 1997 1998 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue)); 1999 2000 // Emit switch body. 2001 EmitStmt(S.getBody()); 2002 2003 BreakContinueStack.pop_back(); 2004 2005 // Update the default block in case explicit case range tests have 2006 // been chained on top. 2007 SwitchInsn->setDefaultDest(CaseRangeBlock); 2008 2009 // If a default was never emitted: 2010 if (!DefaultBlock->getParent()) { 2011 // If we have cleanups, emit the default block so that there's a 2012 // place to jump through the cleanups from. 2013 if (ConditionScope.requiresCleanups()) { 2014 EmitBlock(DefaultBlock); 2015 2016 // Otherwise, just forward the default block to the switch end. 2017 } else { 2018 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock()); 2019 delete DefaultBlock; 2020 } 2021 } 2022 2023 ConditionScope.ForceCleanup(); 2024 2025 // Emit continuation. 2026 EmitBlock(SwitchExit.getBlock(), true); 2027 incrementProfileCounter(&S); 2028 2029 // If the switch has a condition wrapped by __builtin_unpredictable, 2030 // create metadata that specifies that the switch is unpredictable. 2031 // Don't bother if not optimizing because that metadata would not be used. 2032 auto *Call = dyn_cast<CallExpr>(S.getCond()); 2033 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { 2034 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); 2035 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { 2036 llvm::MDBuilder MDHelper(getLLVMContext()); 2037 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable, 2038 MDHelper.createUnpredictable()); 2039 } 2040 } 2041 2042 if (SwitchWeights) { 2043 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() && 2044 "switch weights do not match switch cases"); 2045 // If there's only one jump destination there's no sense weighting it. 2046 if (SwitchWeights->size() > 1) 2047 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof, 2048 createProfileWeights(*SwitchWeights)); 2049 delete SwitchWeights; 2050 } else if (SwitchLikelihood) { 2051 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() && 2052 "switch likelihoods do not match switch cases"); 2053 Optional<SmallVector<uint64_t, 16>> LHW = 2054 getLikelihoodWeights(*SwitchLikelihood); 2055 if (LHW) { 2056 llvm::MDBuilder MDHelper(CGM.getLLVMContext()); 2057 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof, 2058 createProfileWeights(*LHW)); 2059 } 2060 delete SwitchLikelihood; 2061 } 2062 SwitchInsn = SavedSwitchInsn; 2063 SwitchWeights = SavedSwitchWeights; 2064 SwitchLikelihood = SavedSwitchLikelihood; 2065 CaseRangeBlock = SavedCRBlock; 2066 } 2067 2068 static std::string 2069 SimplifyConstraint(const char *Constraint, const TargetInfo &Target, 2070 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) { 2071 std::string Result; 2072 2073 while (*Constraint) { 2074 switch (*Constraint) { 2075 default: 2076 Result += Target.convertConstraint(Constraint); 2077 break; 2078 // Ignore these 2079 case '*': 2080 case '?': 2081 case '!': 2082 case '=': // Will see this and the following in mult-alt constraints. 2083 case '+': 2084 break; 2085 case '#': // Ignore the rest of the constraint alternative. 2086 while (Constraint[1] && Constraint[1] != ',') 2087 Constraint++; 2088 break; 2089 case '&': 2090 case '%': 2091 Result += *Constraint; 2092 while (Constraint[1] && Constraint[1] == *Constraint) 2093 Constraint++; 2094 break; 2095 case ',': 2096 Result += "|"; 2097 break; 2098 case 'g': 2099 Result += "imr"; 2100 break; 2101 case '[': { 2102 assert(OutCons && 2103 "Must pass output names to constraints with a symbolic name"); 2104 unsigned Index; 2105 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index); 2106 assert(result && "Could not resolve symbolic name"); (void)result; 2107 Result += llvm::utostr(Index); 2108 break; 2109 } 2110 } 2111 2112 Constraint++; 2113 } 2114 2115 return Result; 2116 } 2117 2118 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared 2119 /// as using a particular register add that as a constraint that will be used 2120 /// in this asm stmt. 2121 static std::string 2122 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, 2123 const TargetInfo &Target, CodeGenModule &CGM, 2124 const AsmStmt &Stmt, const bool EarlyClobber, 2125 std::string *GCCReg = nullptr) { 2126 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr); 2127 if (!AsmDeclRef) 2128 return Constraint; 2129 const ValueDecl &Value = *AsmDeclRef->getDecl(); 2130 const VarDecl *Variable = dyn_cast<VarDecl>(&Value); 2131 if (!Variable) 2132 return Constraint; 2133 if (Variable->getStorageClass() != SC_Register) 2134 return Constraint; 2135 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>(); 2136 if (!Attr) 2137 return Constraint; 2138 StringRef Register = Attr->getLabel(); 2139 assert(Target.isValidGCCRegisterName(Register)); 2140 // We're using validateOutputConstraint here because we only care if 2141 // this is a register constraint. 2142 TargetInfo::ConstraintInfo Info(Constraint, ""); 2143 if (Target.validateOutputConstraint(Info) && 2144 !Info.allowsRegister()) { 2145 CGM.ErrorUnsupported(&Stmt, "__asm__"); 2146 return Constraint; 2147 } 2148 // Canonicalize the register here before returning it. 2149 Register = Target.getNormalizedGCCRegisterName(Register); 2150 if (GCCReg != nullptr) 2151 *GCCReg = Register.str(); 2152 return (EarlyClobber ? "&{" : "{") + Register.str() + "}"; 2153 } 2154 2155 std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue( 2156 const TargetInfo::ConstraintInfo &Info, LValue InputValue, 2157 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) { 2158 if (Info.allowsRegister() || !Info.allowsMemory()) { 2159 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) 2160 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr}; 2161 2162 llvm::Type *Ty = ConvertType(InputType); 2163 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); 2164 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) || 2165 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) { 2166 Ty = llvm::IntegerType::get(getLLVMContext(), Size); 2167 2168 return {Builder.CreateLoad(Builder.CreateElementBitCast( 2169 InputValue.getAddress(*this), Ty)), 2170 nullptr}; 2171 } 2172 } 2173 2174 Address Addr = InputValue.getAddress(*this); 2175 ConstraintStr += '*'; 2176 return {Addr.getPointer(), Addr.getElementType()}; 2177 } 2178 2179 std::pair<llvm::Value *, llvm::Type *> 2180 CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info, 2181 const Expr *InputExpr, 2182 std::string &ConstraintStr) { 2183 // If this can't be a register or memory, i.e., has to be a constant 2184 // (immediate or symbolic), try to emit it as such. 2185 if (!Info.allowsRegister() && !Info.allowsMemory()) { 2186 if (Info.requiresImmediateConstant()) { 2187 Expr::EvalResult EVResult; 2188 InputExpr->EvaluateAsRValue(EVResult, getContext(), true); 2189 2190 llvm::APSInt IntResult; 2191 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(), 2192 getContext())) 2193 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr}; 2194 } 2195 2196 Expr::EvalResult Result; 2197 if (InputExpr->EvaluateAsInt(Result, getContext())) 2198 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()), 2199 nullptr}; 2200 } 2201 2202 if (Info.allowsRegister() || !Info.allowsMemory()) 2203 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType())) 2204 return {EmitScalarExpr(InputExpr), nullptr}; 2205 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass) 2206 return {EmitScalarExpr(InputExpr), nullptr}; 2207 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); 2208 LValue Dest = EmitLValue(InputExpr); 2209 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, 2210 InputExpr->getExprLoc()); 2211 } 2212 2213 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline 2214 /// asm call instruction. The !srcloc MDNode contains a list of constant 2215 /// integers which are the source locations of the start of each line in the 2216 /// asm. 2217 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str, 2218 CodeGenFunction &CGF) { 2219 SmallVector<llvm::Metadata *, 8> Locs; 2220 // Add the location of the first line to the MDNode. 2221 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 2222 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding()))); 2223 StringRef StrVal = Str->getString(); 2224 if (!StrVal.empty()) { 2225 const SourceManager &SM = CGF.CGM.getContext().getSourceManager(); 2226 const LangOptions &LangOpts = CGF.CGM.getLangOpts(); 2227 unsigned StartToken = 0; 2228 unsigned ByteOffset = 0; 2229 2230 // Add the location of the start of each subsequent line of the asm to the 2231 // MDNode. 2232 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) { 2233 if (StrVal[i] != '\n') continue; 2234 SourceLocation LineLoc = Str->getLocationOfByte( 2235 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset); 2236 Locs.push_back(llvm::ConstantAsMetadata::get( 2237 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding()))); 2238 } 2239 } 2240 2241 return llvm::MDNode::get(CGF.getLLVMContext(), Locs); 2242 } 2243 2244 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, 2245 bool HasUnwindClobber, bool ReadOnly, 2246 bool ReadNone, bool NoMerge, const AsmStmt &S, 2247 const std::vector<llvm::Type *> &ResultRegTypes, 2248 const std::vector<llvm::Type *> &ArgElemTypes, 2249 CodeGenFunction &CGF, 2250 std::vector<llvm::Value *> &RegResults) { 2251 if (!HasUnwindClobber) 2252 Result.addFnAttr(llvm::Attribute::NoUnwind); 2253 2254 if (NoMerge) 2255 Result.addFnAttr(llvm::Attribute::NoMerge); 2256 // Attach readnone and readonly attributes. 2257 if (!HasSideEffect) { 2258 if (ReadNone) 2259 Result.addFnAttr(llvm::Attribute::ReadNone); 2260 else if (ReadOnly) 2261 Result.addFnAttr(llvm::Attribute::ReadOnly); 2262 } 2263 2264 // Add elementtype attribute for indirect constraints. 2265 for (auto Pair : llvm::enumerate(ArgElemTypes)) { 2266 if (Pair.value()) { 2267 auto Attr = llvm::Attribute::get( 2268 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value()); 2269 Result.addParamAttr(Pair.index(), Attr); 2270 } 2271 } 2272 2273 // Slap the source location of the inline asm into a !srcloc metadata on the 2274 // call. 2275 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) 2276 Result.setMetadata("srcloc", 2277 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF)); 2278 else { 2279 // At least put the line number on MS inline asm blobs. 2280 llvm::Constant *Loc = 2281 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding()); 2282 Result.setMetadata("srcloc", 2283 llvm::MDNode::get(CGF.getLLVMContext(), 2284 llvm::ConstantAsMetadata::get(Loc))); 2285 } 2286 2287 if (CGF.getLangOpts().assumeFunctionsAreConvergent()) 2288 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as 2289 // convergent (meaning, they may call an intrinsically convergent op, such 2290 // as bar.sync, and so can't have certain optimizations applied around 2291 // them). 2292 Result.addFnAttr(llvm::Attribute::Convergent); 2293 // Extract all of the register value results from the asm. 2294 if (ResultRegTypes.size() == 1) { 2295 RegResults.push_back(&Result); 2296 } else { 2297 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) { 2298 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult"); 2299 RegResults.push_back(Tmp); 2300 } 2301 } 2302 } 2303 2304 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { 2305 // Pop all cleanup blocks at the end of the asm statement. 2306 CodeGenFunction::RunCleanupsScope Cleanups(*this); 2307 2308 // Assemble the final asm string. 2309 std::string AsmString = S.generateAsmString(getContext()); 2310 2311 // Get all the output and input constraints together. 2312 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos; 2313 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos; 2314 2315 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 2316 StringRef Name; 2317 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 2318 Name = GAS->getOutputName(i); 2319 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name); 2320 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid; 2321 assert(IsValid && "Failed to parse output constraint"); 2322 OutputConstraintInfos.push_back(Info); 2323 } 2324 2325 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 2326 StringRef Name; 2327 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 2328 Name = GAS->getInputName(i); 2329 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name); 2330 bool IsValid = 2331 getTarget().validateInputConstraint(OutputConstraintInfos, Info); 2332 assert(IsValid && "Failed to parse input constraint"); (void)IsValid; 2333 InputConstraintInfos.push_back(Info); 2334 } 2335 2336 std::string Constraints; 2337 2338 std::vector<LValue> ResultRegDests; 2339 std::vector<QualType> ResultRegQualTys; 2340 std::vector<llvm::Type *> ResultRegTypes; 2341 std::vector<llvm::Type *> ResultTruncRegTypes; 2342 std::vector<llvm::Type *> ArgTypes; 2343 std::vector<llvm::Type *> ArgElemTypes; 2344 std::vector<llvm::Value*> Args; 2345 llvm::BitVector ResultTypeRequiresCast; 2346 2347 // Keep track of inout constraints. 2348 std::string InOutConstraints; 2349 std::vector<llvm::Value*> InOutArgs; 2350 std::vector<llvm::Type*> InOutArgTypes; 2351 std::vector<llvm::Type*> InOutArgElemTypes; 2352 2353 // Keep track of out constraints for tied input operand. 2354 std::vector<std::string> OutputConstraints; 2355 2356 // Keep track of defined physregs. 2357 llvm::SmallSet<std::string, 8> PhysRegOutputs; 2358 2359 // An inline asm can be marked readonly if it meets the following conditions: 2360 // - it doesn't have any sideeffects 2361 // - it doesn't clobber memory 2362 // - it doesn't return a value by-reference 2363 // It can be marked readnone if it doesn't have any input memory constraints 2364 // in addition to meeting the conditions listed above. 2365 bool ReadOnly = true, ReadNone = true; 2366 2367 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 2368 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; 2369 2370 // Simplify the output constraint. 2371 std::string OutputConstraint(S.getOutputConstraint(i)); 2372 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, 2373 getTarget(), &OutputConstraintInfos); 2374 2375 const Expr *OutExpr = S.getOutputExpr(i); 2376 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); 2377 2378 std::string GCCReg; 2379 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, 2380 getTarget(), CGM, S, 2381 Info.earlyClobber(), 2382 &GCCReg); 2383 // Give an error on multiple outputs to same physreg. 2384 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second) 2385 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg); 2386 2387 OutputConstraints.push_back(OutputConstraint); 2388 LValue Dest = EmitLValue(OutExpr); 2389 if (!Constraints.empty()) 2390 Constraints += ','; 2391 2392 // If this is a register output, then make the inline asm return it 2393 // by-value. If this is a memory result, return the value by-reference. 2394 QualType QTy = OutExpr->getType(); 2395 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) || 2396 hasAggregateEvaluationKind(QTy); 2397 if (!Info.allowsMemory() && IsScalarOrAggregate) { 2398 2399 Constraints += "=" + OutputConstraint; 2400 ResultRegQualTys.push_back(QTy); 2401 ResultRegDests.push_back(Dest); 2402 2403 llvm::Type *Ty = ConvertTypeForMem(QTy); 2404 const bool RequiresCast = Info.allowsRegister() && 2405 (getTargetHooks().isScalarizableAsmOperand(*this, Ty) || 2406 Ty->isAggregateType()); 2407 2408 ResultTruncRegTypes.push_back(Ty); 2409 ResultTypeRequiresCast.push_back(RequiresCast); 2410 2411 if (RequiresCast) { 2412 unsigned Size = getContext().getTypeSize(QTy); 2413 Ty = llvm::IntegerType::get(getLLVMContext(), Size); 2414 } 2415 ResultRegTypes.push_back(Ty); 2416 // If this output is tied to an input, and if the input is larger, then 2417 // we need to set the actual result type of the inline asm node to be the 2418 // same as the input type. 2419 if (Info.hasMatchingInput()) { 2420 unsigned InputNo; 2421 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) { 2422 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo]; 2423 if (Input.hasTiedOperand() && Input.getTiedOperand() == i) 2424 break; 2425 } 2426 assert(InputNo != S.getNumInputs() && "Didn't find matching input!"); 2427 2428 QualType InputTy = S.getInputExpr(InputNo)->getType(); 2429 QualType OutputType = OutExpr->getType(); 2430 2431 uint64_t InputSize = getContext().getTypeSize(InputTy); 2432 if (getContext().getTypeSize(OutputType) < InputSize) { 2433 // Form the asm to return the value as a larger integer or fp type. 2434 ResultRegTypes.back() = ConvertType(InputTy); 2435 } 2436 } 2437 if (llvm::Type* AdjTy = 2438 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 2439 ResultRegTypes.back())) 2440 ResultRegTypes.back() = AdjTy; 2441 else { 2442 CGM.getDiags().Report(S.getAsmLoc(), 2443 diag::err_asm_invalid_type_in_input) 2444 << OutExpr->getType() << OutputConstraint; 2445 } 2446 2447 // Update largest vector width for any vector types. 2448 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back())) 2449 LargestVectorWidth = 2450 std::max((uint64_t)LargestVectorWidth, 2451 VT->getPrimitiveSizeInBits().getKnownMinSize()); 2452 } else { 2453 Address DestAddr = Dest.getAddress(*this); 2454 // Matrix types in memory are represented by arrays, but accessed through 2455 // vector pointers, with the alignment specified on the access operation. 2456 // For inline assembly, update pointer arguments to use vector pointers. 2457 // Otherwise there will be a mis-match if the matrix is also an 2458 // input-argument which is represented as vector. 2459 if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) 2460 DestAddr = Builder.CreateElementBitCast( 2461 DestAddr, ConvertType(OutExpr->getType())); 2462 2463 ArgTypes.push_back(DestAddr.getType()); 2464 ArgElemTypes.push_back(DestAddr.getElementType()); 2465 Args.push_back(DestAddr.getPointer()); 2466 Constraints += "=*"; 2467 Constraints += OutputConstraint; 2468 ReadOnly = ReadNone = false; 2469 } 2470 2471 if (Info.isReadWrite()) { 2472 InOutConstraints += ','; 2473 2474 const Expr *InputExpr = S.getOutputExpr(i); 2475 llvm::Value *Arg; 2476 llvm::Type *ArgElemType; 2477 std::tie(Arg, ArgElemType) = EmitAsmInputLValue( 2478 Info, Dest, InputExpr->getType(), InOutConstraints, 2479 InputExpr->getExprLoc()); 2480 2481 if (llvm::Type* AdjTy = 2482 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 2483 Arg->getType())) 2484 Arg = Builder.CreateBitCast(Arg, AdjTy); 2485 2486 // Update largest vector width for any vector types. 2487 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType())) 2488 LargestVectorWidth = 2489 std::max((uint64_t)LargestVectorWidth, 2490 VT->getPrimitiveSizeInBits().getKnownMinSize()); 2491 // Only tie earlyclobber physregs. 2492 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber())) 2493 InOutConstraints += llvm::utostr(i); 2494 else 2495 InOutConstraints += OutputConstraint; 2496 2497 InOutArgTypes.push_back(Arg->getType()); 2498 InOutArgElemTypes.push_back(ArgElemType); 2499 InOutArgs.push_back(Arg); 2500 } 2501 } 2502 2503 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX) 2504 // to the return value slot. Only do this when returning in registers. 2505 if (isa<MSAsmStmt>(&S)) { 2506 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); 2507 if (RetAI.isDirect() || RetAI.isExtend()) { 2508 // Make a fake lvalue for the return value slot. 2509 LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy); 2510 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs( 2511 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes, 2512 ResultRegDests, AsmString, S.getNumOutputs()); 2513 SawAsmBlock = true; 2514 } 2515 } 2516 2517 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 2518 const Expr *InputExpr = S.getInputExpr(i); 2519 2520 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; 2521 2522 if (Info.allowsMemory()) 2523 ReadNone = false; 2524 2525 if (!Constraints.empty()) 2526 Constraints += ','; 2527 2528 // Simplify the input constraint. 2529 std::string InputConstraint(S.getInputConstraint(i)); 2530 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), 2531 &OutputConstraintInfos); 2532 2533 InputConstraint = AddVariableConstraints( 2534 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), 2535 getTarget(), CGM, S, false /* No EarlyClobber */); 2536 2537 std::string ReplaceConstraint (InputConstraint); 2538 llvm::Value *Arg; 2539 llvm::Type *ArgElemType; 2540 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints); 2541 2542 // If this input argument is tied to a larger output result, extend the 2543 // input to be the same size as the output. The LLVM backend wants to see 2544 // the input and output of a matching constraint be the same size. Note 2545 // that GCC does not define what the top bits are here. We use zext because 2546 // that is usually cheaper, but LLVM IR should really get an anyext someday. 2547 if (Info.hasTiedOperand()) { 2548 unsigned Output = Info.getTiedOperand(); 2549 QualType OutputType = S.getOutputExpr(Output)->getType(); 2550 QualType InputTy = InputExpr->getType(); 2551 2552 if (getContext().getTypeSize(OutputType) > 2553 getContext().getTypeSize(InputTy)) { 2554 // Use ptrtoint as appropriate so that we can do our extension. 2555 if (isa<llvm::PointerType>(Arg->getType())) 2556 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy); 2557 llvm::Type *OutputTy = ConvertType(OutputType); 2558 if (isa<llvm::IntegerType>(OutputTy)) 2559 Arg = Builder.CreateZExt(Arg, OutputTy); 2560 else if (isa<llvm::PointerType>(OutputTy)) 2561 Arg = Builder.CreateZExt(Arg, IntPtrTy); 2562 else if (OutputTy->isFloatingPointTy()) 2563 Arg = Builder.CreateFPExt(Arg, OutputTy); 2564 } 2565 // Deal with the tied operands' constraint code in adjustInlineAsmType. 2566 ReplaceConstraint = OutputConstraints[Output]; 2567 } 2568 if (llvm::Type* AdjTy = 2569 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint, 2570 Arg->getType())) 2571 Arg = Builder.CreateBitCast(Arg, AdjTy); 2572 else 2573 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) 2574 << InputExpr->getType() << InputConstraint; 2575 2576 // Update largest vector width for any vector types. 2577 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType())) 2578 LargestVectorWidth = 2579 std::max((uint64_t)LargestVectorWidth, 2580 VT->getPrimitiveSizeInBits().getKnownMinSize()); 2581 2582 ArgTypes.push_back(Arg->getType()); 2583 ArgElemTypes.push_back(ArgElemType); 2584 Args.push_back(Arg); 2585 Constraints += InputConstraint; 2586 } 2587 2588 // Append the "input" part of inout constraints. 2589 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { 2590 ArgTypes.push_back(InOutArgTypes[i]); 2591 ArgElemTypes.push_back(InOutArgElemTypes[i]); 2592 Args.push_back(InOutArgs[i]); 2593 } 2594 Constraints += InOutConstraints; 2595 2596 // Labels 2597 SmallVector<llvm::BasicBlock *, 16> Transfer; 2598 llvm::BasicBlock *Fallthrough = nullptr; 2599 bool IsGCCAsmGoto = false; 2600 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) { 2601 IsGCCAsmGoto = GS->isAsmGoto(); 2602 if (IsGCCAsmGoto) { 2603 for (const auto *E : GS->labels()) { 2604 JumpDest Dest = getJumpDestForLabel(E->getLabel()); 2605 Transfer.push_back(Dest.getBlock()); 2606 if (!Constraints.empty()) 2607 Constraints += ','; 2608 Constraints += "!i"; 2609 } 2610 Fallthrough = createBasicBlock("asm.fallthrough"); 2611 } 2612 } 2613 2614 bool HasUnwindClobber = false; 2615 2616 // Clobbers 2617 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { 2618 StringRef Clobber = S.getClobber(i); 2619 2620 if (Clobber == "memory") 2621 ReadOnly = ReadNone = false; 2622 else if (Clobber == "unwind") { 2623 HasUnwindClobber = true; 2624 continue; 2625 } else if (Clobber != "cc") { 2626 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber); 2627 if (CGM.getCodeGenOpts().StackClashProtector && 2628 getTarget().isSPRegName(Clobber)) { 2629 CGM.getDiags().Report(S.getAsmLoc(), 2630 diag::warn_stack_clash_protection_inline_asm); 2631 } 2632 } 2633 2634 if (isa<MSAsmStmt>(&S)) { 2635 if (Clobber == "eax" || Clobber == "edx") { 2636 if (Constraints.find("=&A") != std::string::npos) 2637 continue; 2638 std::string::size_type position1 = 2639 Constraints.find("={" + Clobber.str() + "}"); 2640 if (position1 != std::string::npos) { 2641 Constraints.insert(position1 + 1, "&"); 2642 continue; 2643 } 2644 std::string::size_type position2 = Constraints.find("=A"); 2645 if (position2 != std::string::npos) { 2646 Constraints.insert(position2 + 1, "&"); 2647 continue; 2648 } 2649 } 2650 } 2651 if (!Constraints.empty()) 2652 Constraints += ','; 2653 2654 Constraints += "~{"; 2655 Constraints += Clobber; 2656 Constraints += '}'; 2657 } 2658 2659 assert(!(HasUnwindClobber && IsGCCAsmGoto) && 2660 "unwind clobber can't be used with asm goto"); 2661 2662 // Add machine specific clobbers 2663 std::string MachineClobbers = getTarget().getClobbers(); 2664 if (!MachineClobbers.empty()) { 2665 if (!Constraints.empty()) 2666 Constraints += ','; 2667 Constraints += MachineClobbers; 2668 } 2669 2670 llvm::Type *ResultType; 2671 if (ResultRegTypes.empty()) 2672 ResultType = VoidTy; 2673 else if (ResultRegTypes.size() == 1) 2674 ResultType = ResultRegTypes[0]; 2675 else 2676 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes); 2677 2678 llvm::FunctionType *FTy = 2679 llvm::FunctionType::get(ResultType, ArgTypes, false); 2680 2681 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; 2682 2683 llvm::InlineAsm::AsmDialect GnuAsmDialect = 2684 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT 2685 ? llvm::InlineAsm::AD_ATT 2686 : llvm::InlineAsm::AD_Intel; 2687 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ? 2688 llvm::InlineAsm::AD_Intel : GnuAsmDialect; 2689 2690 llvm::InlineAsm *IA = llvm::InlineAsm::get( 2691 FTy, AsmString, Constraints, HasSideEffect, 2692 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber); 2693 std::vector<llvm::Value*> RegResults; 2694 if (IsGCCAsmGoto) { 2695 llvm::CallBrInst *Result = 2696 Builder.CreateCallBr(IA, Fallthrough, Transfer, Args); 2697 EmitBlock(Fallthrough); 2698 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false, 2699 ReadOnly, ReadNone, InNoMergeAttributedStmt, S, 2700 ResultRegTypes, ArgElemTypes, *this, RegResults); 2701 } else if (HasUnwindClobber) { 2702 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, ""); 2703 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone, 2704 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes, 2705 *this, RegResults); 2706 } else { 2707 llvm::CallInst *Result = 2708 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA)); 2709 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false, 2710 ReadOnly, ReadNone, InNoMergeAttributedStmt, S, 2711 ResultRegTypes, ArgElemTypes, *this, RegResults); 2712 } 2713 2714 assert(RegResults.size() == ResultRegTypes.size()); 2715 assert(RegResults.size() == ResultTruncRegTypes.size()); 2716 assert(RegResults.size() == ResultRegDests.size()); 2717 // ResultRegDests can be also populated by addReturnRegisterOutputs() above, 2718 // in which case its size may grow. 2719 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size()); 2720 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { 2721 llvm::Value *Tmp = RegResults[i]; 2722 llvm::Type *TruncTy = ResultTruncRegTypes[i]; 2723 2724 // If the result type of the LLVM IR asm doesn't match the result type of 2725 // the expression, do the conversion. 2726 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) { 2727 2728 // Truncate the integer result to the right size, note that TruncTy can be 2729 // a pointer. 2730 if (TruncTy->isFloatingPointTy()) 2731 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy); 2732 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) { 2733 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); 2734 Tmp = Builder.CreateTrunc(Tmp, 2735 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize)); 2736 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy); 2737 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) { 2738 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType()); 2739 Tmp = Builder.CreatePtrToInt(Tmp, 2740 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize)); 2741 Tmp = Builder.CreateTrunc(Tmp, TruncTy); 2742 } else if (TruncTy->isIntegerTy()) { 2743 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy); 2744 } else if (TruncTy->isVectorTy()) { 2745 Tmp = Builder.CreateBitCast(Tmp, TruncTy); 2746 } 2747 } 2748 2749 LValue Dest = ResultRegDests[i]; 2750 // ResultTypeRequiresCast elements correspond to the first 2751 // ResultTypeRequiresCast.size() elements of RegResults. 2752 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) { 2753 unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]); 2754 Address A = Builder.CreateElementBitCast(Dest.getAddress(*this), 2755 ResultRegTypes[i]); 2756 if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) { 2757 Builder.CreateStore(Tmp, A); 2758 continue; 2759 } 2760 2761 QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false); 2762 if (Ty.isNull()) { 2763 const Expr *OutExpr = S.getOutputExpr(i); 2764 CGM.getDiags().Report(OutExpr->getExprLoc(), 2765 diag::err_store_value_to_reg); 2766 return; 2767 } 2768 Dest = MakeAddrLValue(A, Ty); 2769 } 2770 EmitStoreThroughLValue(RValue::get(Tmp), Dest); 2771 } 2772 } 2773 2774 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) { 2775 const RecordDecl *RD = S.getCapturedRecordDecl(); 2776 QualType RecordTy = getContext().getRecordType(RD); 2777 2778 // Initialize the captured struct. 2779 LValue SlotLV = 2780 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy); 2781 2782 RecordDecl::field_iterator CurField = RD->field_begin(); 2783 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 2784 E = S.capture_init_end(); 2785 I != E; ++I, ++CurField) { 2786 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField); 2787 if (CurField->hasCapturedVLAType()) { 2788 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV); 2789 } else { 2790 EmitInitializerForField(*CurField, LV, *I); 2791 } 2792 } 2793 2794 return SlotLV; 2795 } 2796 2797 /// Generate an outlined function for the body of a CapturedStmt, store any 2798 /// captured variables into the captured struct, and call the outlined function. 2799 llvm::Function * 2800 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) { 2801 LValue CapStruct = InitCapturedStruct(S); 2802 2803 // Emit the CapturedDecl 2804 CodeGenFunction CGF(CGM, true); 2805 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K)); 2806 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S); 2807 delete CGF.CapturedStmtInfo; 2808 2809 // Emit call to the helper function. 2810 EmitCallOrInvoke(F, CapStruct.getPointer(*this)); 2811 2812 return F; 2813 } 2814 2815 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) { 2816 LValue CapStruct = InitCapturedStruct(S); 2817 return CapStruct.getAddress(*this); 2818 } 2819 2820 /// Creates the outlined function for a CapturedStmt. 2821 llvm::Function * 2822 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) { 2823 assert(CapturedStmtInfo && 2824 "CapturedStmtInfo should be set when generating the captured function"); 2825 const CapturedDecl *CD = S.getCapturedDecl(); 2826 const RecordDecl *RD = S.getCapturedRecordDecl(); 2827 SourceLocation Loc = S.getBeginLoc(); 2828 assert(CD->hasBody() && "missing CapturedDecl body"); 2829 2830 // Build the argument list. 2831 ASTContext &Ctx = CGM.getContext(); 2832 FunctionArgList Args; 2833 Args.append(CD->param_begin(), CD->param_end()); 2834 2835 // Create the function declaration. 2836 const CGFunctionInfo &FuncInfo = 2837 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args); 2838 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 2839 2840 llvm::Function *F = 2841 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 2842 CapturedStmtInfo->getHelperName(), &CGM.getModule()); 2843 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 2844 if (CD->isNothrow()) 2845 F->addFnAttr(llvm::Attribute::NoUnwind); 2846 2847 // Generate the function. 2848 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(), 2849 CD->getBody()->getBeginLoc()); 2850 // Set the context parameter in CapturedStmtInfo. 2851 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam()); 2852 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr)); 2853 2854 // Initialize variable-length arrays. 2855 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(), 2856 Ctx.getTagDeclType(RD)); 2857 for (auto *FD : RD->fields()) { 2858 if (FD->hasCapturedVLAType()) { 2859 auto *ExprArg = 2860 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc()) 2861 .getScalarVal(); 2862 auto VAT = FD->getCapturedVLAType(); 2863 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 2864 } 2865 } 2866 2867 // If 'this' is captured, load it into CXXThisValue. 2868 if (CapturedStmtInfo->isCXXThisExprCaptured()) { 2869 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl(); 2870 LValue ThisLValue = EmitLValueForField(Base, FD); 2871 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal(); 2872 } 2873 2874 PGO.assignRegionCounters(GlobalDecl(CD), F); 2875 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 2876 FinishFunction(CD->getBodyRBrace()); 2877 2878 return F; 2879 } 2880