1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Stmt nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGDebugInfo.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "CodeGenPGO.h" 18 #include "TargetInfo.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/Expr.h" 21 #include "clang/AST/Stmt.h" 22 #include "clang/AST/StmtVisitor.h" 23 #include "clang/Basic/Builtins.h" 24 #include "clang/Basic/DiagnosticSema.h" 25 #include "clang/Basic/PrettyStackTrace.h" 26 #include "clang/Basic/SourceManager.h" 27 #include "clang/Basic/TargetInfo.h" 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/DenseMap.h" 30 #include "llvm/ADT/SmallSet.h" 31 #include "llvm/ADT/StringExtras.h" 32 #include "llvm/IR/Assumptions.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/InlineAsm.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/IR/MDBuilder.h" 37 #include "llvm/Support/SaveAndRestore.h" 38 #include <optional> 39 40 using namespace clang; 41 using namespace CodeGen; 42 43 //===----------------------------------------------------------------------===// 44 // Statement Emission 45 //===----------------------------------------------------------------------===// 46 47 namespace llvm { 48 extern cl::opt<bool> EnableSingleByteCoverage; 49 } // namespace llvm 50 51 void CodeGenFunction::EmitStopPoint(const Stmt *S) { 52 if (CGDebugInfo *DI = getDebugInfo()) { 53 SourceLocation Loc; 54 Loc = S->getBeginLoc(); 55 DI->EmitLocation(Builder, Loc); 56 57 LastStopPoint = Loc; 58 } 59 } 60 61 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) { 62 assert(S && "Null statement?"); 63 PGO->setCurrentStmt(S); 64 65 // These statements have their own debug info handling. 66 if (EmitSimpleStmt(S, Attrs)) 67 return; 68 69 // Check if we are generating unreachable code. 70 if (!HaveInsertPoint()) { 71 // If so, and the statement doesn't contain a label, then we do not need to 72 // generate actual code. This is safe because (1) the current point is 73 // unreachable, so we don't need to execute the code, and (2) we've already 74 // handled the statements which update internal data structures (like the 75 // local variable map) which could be used by subsequent statements. 76 if (!ContainsLabel(S)) { 77 // Verify that any decl statements were handled as simple, they may be in 78 // scope of subsequent reachable statements. 79 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!"); 80 PGO->markStmtMaybeUsed(S); 81 return; 82 } 83 84 // Otherwise, make a new block to hold the code. 85 EnsureInsertPoint(); 86 } 87 88 // Generate a stoppoint if we are emitting debug info. 89 EmitStopPoint(S); 90 91 // Ignore all OpenMP directives except for simd if OpenMP with Simd is 92 // enabled. 93 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) { 94 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) { 95 EmitSimpleOMPExecutableDirective(*D); 96 return; 97 } 98 } 99 100 switch (S->getStmtClass()) { 101 case Stmt::NoStmtClass: 102 case Stmt::CXXCatchStmtClass: 103 case Stmt::SEHExceptStmtClass: 104 case Stmt::SEHFinallyStmtClass: 105 case Stmt::MSDependentExistsStmtClass: 106 llvm_unreachable("invalid statement class to emit generically"); 107 case Stmt::NullStmtClass: 108 case Stmt::CompoundStmtClass: 109 case Stmt::DeclStmtClass: 110 case Stmt::LabelStmtClass: 111 case Stmt::AttributedStmtClass: 112 case Stmt::GotoStmtClass: 113 case Stmt::BreakStmtClass: 114 case Stmt::ContinueStmtClass: 115 case Stmt::DefaultStmtClass: 116 case Stmt::CaseStmtClass: 117 case Stmt::SEHLeaveStmtClass: 118 case Stmt::SYCLKernelCallStmtClass: 119 llvm_unreachable("should have emitted these statements as simple"); 120 121 #define STMT(Type, Base) 122 #define ABSTRACT_STMT(Op) 123 #define EXPR(Type, Base) \ 124 case Stmt::Type##Class: 125 #include "clang/AST/StmtNodes.inc" 126 { 127 // Remember the block we came in on. 128 llvm::BasicBlock *incoming = Builder.GetInsertBlock(); 129 assert(incoming && "expression emission must have an insertion point"); 130 131 EmitIgnoredExpr(cast<Expr>(S)); 132 133 llvm::BasicBlock *outgoing = Builder.GetInsertBlock(); 134 assert(outgoing && "expression emission cleared block!"); 135 136 // The expression emitters assume (reasonably!) that the insertion 137 // point is always set. To maintain that, the call-emission code 138 // for noreturn functions has to enter a new block with no 139 // predecessors. We want to kill that block and mark the current 140 // insertion point unreachable in the common case of a call like 141 // "exit();". Since expression emission doesn't otherwise create 142 // blocks with no predecessors, we can just test for that. 143 // However, we must be careful not to do this to our incoming 144 // block, because *statement* emission does sometimes create 145 // reachable blocks which will have no predecessors until later in 146 // the function. This occurs with, e.g., labels that are not 147 // reachable by fallthrough. 148 if (incoming != outgoing && outgoing->use_empty()) { 149 outgoing->eraseFromParent(); 150 Builder.ClearInsertionPoint(); 151 } 152 break; 153 } 154 155 case Stmt::IndirectGotoStmtClass: 156 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break; 157 158 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break; 159 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break; 160 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break; 161 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break; 162 163 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break; 164 165 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break; 166 case Stmt::GCCAsmStmtClass: // Intentional fall-through. 167 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break; 168 case Stmt::CoroutineBodyStmtClass: 169 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S)); 170 break; 171 case Stmt::CoreturnStmtClass: 172 EmitCoreturnStmt(cast<CoreturnStmt>(*S)); 173 break; 174 case Stmt::CapturedStmtClass: { 175 const CapturedStmt *CS = cast<CapturedStmt>(S); 176 EmitCapturedStmt(*CS, CS->getCapturedRegionKind()); 177 } 178 break; 179 case Stmt::ObjCAtTryStmtClass: 180 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S)); 181 break; 182 case Stmt::ObjCAtCatchStmtClass: 183 llvm_unreachable( 184 "@catch statements should be handled by EmitObjCAtTryStmt"); 185 case Stmt::ObjCAtFinallyStmtClass: 186 llvm_unreachable( 187 "@finally statements should be handled by EmitObjCAtTryStmt"); 188 case Stmt::ObjCAtThrowStmtClass: 189 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S)); 190 break; 191 case Stmt::ObjCAtSynchronizedStmtClass: 192 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S)); 193 break; 194 case Stmt::ObjCForCollectionStmtClass: 195 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S)); 196 break; 197 case Stmt::ObjCAutoreleasePoolStmtClass: 198 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S)); 199 break; 200 201 case Stmt::CXXTryStmtClass: 202 EmitCXXTryStmt(cast<CXXTryStmt>(*S)); 203 break; 204 case Stmt::CXXForRangeStmtClass: 205 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs); 206 break; 207 case Stmt::SEHTryStmtClass: 208 EmitSEHTryStmt(cast<SEHTryStmt>(*S)); 209 break; 210 case Stmt::OMPMetaDirectiveClass: 211 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S)); 212 break; 213 case Stmt::OMPCanonicalLoopClass: 214 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S)); 215 break; 216 case Stmt::OMPParallelDirectiveClass: 217 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S)); 218 break; 219 case Stmt::OMPSimdDirectiveClass: 220 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S)); 221 break; 222 case Stmt::OMPTileDirectiveClass: 223 EmitOMPTileDirective(cast<OMPTileDirective>(*S)); 224 break; 225 case Stmt::OMPStripeDirectiveClass: 226 EmitOMPStripeDirective(cast<OMPStripeDirective>(*S)); 227 break; 228 case Stmt::OMPUnrollDirectiveClass: 229 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S)); 230 break; 231 case Stmt::OMPReverseDirectiveClass: 232 EmitOMPReverseDirective(cast<OMPReverseDirective>(*S)); 233 break; 234 case Stmt::OMPInterchangeDirectiveClass: 235 EmitOMPInterchangeDirective(cast<OMPInterchangeDirective>(*S)); 236 break; 237 case Stmt::OMPForDirectiveClass: 238 EmitOMPForDirective(cast<OMPForDirective>(*S)); 239 break; 240 case Stmt::OMPForSimdDirectiveClass: 241 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S)); 242 break; 243 case Stmt::OMPSectionsDirectiveClass: 244 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S)); 245 break; 246 case Stmt::OMPSectionDirectiveClass: 247 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S)); 248 break; 249 case Stmt::OMPSingleDirectiveClass: 250 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S)); 251 break; 252 case Stmt::OMPMasterDirectiveClass: 253 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S)); 254 break; 255 case Stmt::OMPCriticalDirectiveClass: 256 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S)); 257 break; 258 case Stmt::OMPParallelForDirectiveClass: 259 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S)); 260 break; 261 case Stmt::OMPParallelForSimdDirectiveClass: 262 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S)); 263 break; 264 case Stmt::OMPParallelMasterDirectiveClass: 265 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S)); 266 break; 267 case Stmt::OMPParallelSectionsDirectiveClass: 268 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S)); 269 break; 270 case Stmt::OMPTaskDirectiveClass: 271 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S)); 272 break; 273 case Stmt::OMPTaskyieldDirectiveClass: 274 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S)); 275 break; 276 case Stmt::OMPErrorDirectiveClass: 277 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S)); 278 break; 279 case Stmt::OMPBarrierDirectiveClass: 280 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S)); 281 break; 282 case Stmt::OMPTaskwaitDirectiveClass: 283 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S)); 284 break; 285 case Stmt::OMPTaskgroupDirectiveClass: 286 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S)); 287 break; 288 case Stmt::OMPFlushDirectiveClass: 289 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S)); 290 break; 291 case Stmt::OMPDepobjDirectiveClass: 292 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S)); 293 break; 294 case Stmt::OMPScanDirectiveClass: 295 EmitOMPScanDirective(cast<OMPScanDirective>(*S)); 296 break; 297 case Stmt::OMPOrderedDirectiveClass: 298 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S)); 299 break; 300 case Stmt::OMPAtomicDirectiveClass: 301 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S)); 302 break; 303 case Stmt::OMPTargetDirectiveClass: 304 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S)); 305 break; 306 case Stmt::OMPTeamsDirectiveClass: 307 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S)); 308 break; 309 case Stmt::OMPCancellationPointDirectiveClass: 310 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S)); 311 break; 312 case Stmt::OMPCancelDirectiveClass: 313 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S)); 314 break; 315 case Stmt::OMPTargetDataDirectiveClass: 316 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S)); 317 break; 318 case Stmt::OMPTargetEnterDataDirectiveClass: 319 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S)); 320 break; 321 case Stmt::OMPTargetExitDataDirectiveClass: 322 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S)); 323 break; 324 case Stmt::OMPTargetParallelDirectiveClass: 325 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S)); 326 break; 327 case Stmt::OMPTargetParallelForDirectiveClass: 328 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S)); 329 break; 330 case Stmt::OMPTaskLoopDirectiveClass: 331 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S)); 332 break; 333 case Stmt::OMPTaskLoopSimdDirectiveClass: 334 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S)); 335 break; 336 case Stmt::OMPMasterTaskLoopDirectiveClass: 337 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S)); 338 break; 339 case Stmt::OMPMaskedTaskLoopDirectiveClass: 340 EmitOMPMaskedTaskLoopDirective(cast<OMPMaskedTaskLoopDirective>(*S)); 341 break; 342 case Stmt::OMPMasterTaskLoopSimdDirectiveClass: 343 EmitOMPMasterTaskLoopSimdDirective( 344 cast<OMPMasterTaskLoopSimdDirective>(*S)); 345 break; 346 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass: 347 EmitOMPMaskedTaskLoopSimdDirective( 348 cast<OMPMaskedTaskLoopSimdDirective>(*S)); 349 break; 350 case Stmt::OMPParallelMasterTaskLoopDirectiveClass: 351 EmitOMPParallelMasterTaskLoopDirective( 352 cast<OMPParallelMasterTaskLoopDirective>(*S)); 353 break; 354 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass: 355 EmitOMPParallelMaskedTaskLoopDirective( 356 cast<OMPParallelMaskedTaskLoopDirective>(*S)); 357 break; 358 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: 359 EmitOMPParallelMasterTaskLoopSimdDirective( 360 cast<OMPParallelMasterTaskLoopSimdDirective>(*S)); 361 break; 362 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass: 363 EmitOMPParallelMaskedTaskLoopSimdDirective( 364 cast<OMPParallelMaskedTaskLoopSimdDirective>(*S)); 365 break; 366 case Stmt::OMPDistributeDirectiveClass: 367 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S)); 368 break; 369 case Stmt::OMPTargetUpdateDirectiveClass: 370 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S)); 371 break; 372 case Stmt::OMPDistributeParallelForDirectiveClass: 373 EmitOMPDistributeParallelForDirective( 374 cast<OMPDistributeParallelForDirective>(*S)); 375 break; 376 case Stmt::OMPDistributeParallelForSimdDirectiveClass: 377 EmitOMPDistributeParallelForSimdDirective( 378 cast<OMPDistributeParallelForSimdDirective>(*S)); 379 break; 380 case Stmt::OMPDistributeSimdDirectiveClass: 381 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S)); 382 break; 383 case Stmt::OMPTargetParallelForSimdDirectiveClass: 384 EmitOMPTargetParallelForSimdDirective( 385 cast<OMPTargetParallelForSimdDirective>(*S)); 386 break; 387 case Stmt::OMPTargetSimdDirectiveClass: 388 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S)); 389 break; 390 case Stmt::OMPTeamsDistributeDirectiveClass: 391 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S)); 392 break; 393 case Stmt::OMPTeamsDistributeSimdDirectiveClass: 394 EmitOMPTeamsDistributeSimdDirective( 395 cast<OMPTeamsDistributeSimdDirective>(*S)); 396 break; 397 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: 398 EmitOMPTeamsDistributeParallelForSimdDirective( 399 cast<OMPTeamsDistributeParallelForSimdDirective>(*S)); 400 break; 401 case Stmt::OMPTeamsDistributeParallelForDirectiveClass: 402 EmitOMPTeamsDistributeParallelForDirective( 403 cast<OMPTeamsDistributeParallelForDirective>(*S)); 404 break; 405 case Stmt::OMPTargetTeamsDirectiveClass: 406 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S)); 407 break; 408 case Stmt::OMPTargetTeamsDistributeDirectiveClass: 409 EmitOMPTargetTeamsDistributeDirective( 410 cast<OMPTargetTeamsDistributeDirective>(*S)); 411 break; 412 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: 413 EmitOMPTargetTeamsDistributeParallelForDirective( 414 cast<OMPTargetTeamsDistributeParallelForDirective>(*S)); 415 break; 416 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass: 417 EmitOMPTargetTeamsDistributeParallelForSimdDirective( 418 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S)); 419 break; 420 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: 421 EmitOMPTargetTeamsDistributeSimdDirective( 422 cast<OMPTargetTeamsDistributeSimdDirective>(*S)); 423 break; 424 case Stmt::OMPInteropDirectiveClass: 425 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S)); 426 break; 427 case Stmt::OMPDispatchDirectiveClass: 428 CGM.ErrorUnsupported(S, "OpenMP dispatch directive"); 429 break; 430 case Stmt::OMPScopeDirectiveClass: 431 EmitOMPScopeDirective(cast<OMPScopeDirective>(*S)); 432 break; 433 case Stmt::OMPMaskedDirectiveClass: 434 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S)); 435 break; 436 case Stmt::OMPGenericLoopDirectiveClass: 437 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S)); 438 break; 439 case Stmt::OMPTeamsGenericLoopDirectiveClass: 440 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S)); 441 break; 442 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: 443 EmitOMPTargetTeamsGenericLoopDirective( 444 cast<OMPTargetTeamsGenericLoopDirective>(*S)); 445 break; 446 case Stmt::OMPParallelGenericLoopDirectiveClass: 447 EmitOMPParallelGenericLoopDirective( 448 cast<OMPParallelGenericLoopDirective>(*S)); 449 break; 450 case Stmt::OMPTargetParallelGenericLoopDirectiveClass: 451 EmitOMPTargetParallelGenericLoopDirective( 452 cast<OMPTargetParallelGenericLoopDirective>(*S)); 453 break; 454 case Stmt::OMPParallelMaskedDirectiveClass: 455 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S)); 456 break; 457 case Stmt::OMPAssumeDirectiveClass: 458 EmitOMPAssumeDirective(cast<OMPAssumeDirective>(*S)); 459 break; 460 case Stmt::OpenACCComputeConstructClass: 461 EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S)); 462 break; 463 case Stmt::OpenACCLoopConstructClass: 464 EmitOpenACCLoopConstruct(cast<OpenACCLoopConstruct>(*S)); 465 break; 466 case Stmt::OpenACCCombinedConstructClass: 467 EmitOpenACCCombinedConstruct(cast<OpenACCCombinedConstruct>(*S)); 468 break; 469 case Stmt::OpenACCDataConstructClass: 470 EmitOpenACCDataConstruct(cast<OpenACCDataConstruct>(*S)); 471 break; 472 case Stmt::OpenACCEnterDataConstructClass: 473 EmitOpenACCEnterDataConstruct(cast<OpenACCEnterDataConstruct>(*S)); 474 break; 475 case Stmt::OpenACCExitDataConstructClass: 476 EmitOpenACCExitDataConstruct(cast<OpenACCExitDataConstruct>(*S)); 477 break; 478 case Stmt::OpenACCHostDataConstructClass: 479 EmitOpenACCHostDataConstruct(cast<OpenACCHostDataConstruct>(*S)); 480 break; 481 case Stmt::OpenACCWaitConstructClass: 482 EmitOpenACCWaitConstruct(cast<OpenACCWaitConstruct>(*S)); 483 break; 484 case Stmt::OpenACCInitConstructClass: 485 EmitOpenACCInitConstruct(cast<OpenACCInitConstruct>(*S)); 486 break; 487 case Stmt::OpenACCShutdownConstructClass: 488 EmitOpenACCShutdownConstruct(cast<OpenACCShutdownConstruct>(*S)); 489 break; 490 case Stmt::OpenACCSetConstructClass: 491 EmitOpenACCSetConstruct(cast<OpenACCSetConstruct>(*S)); 492 break; 493 case Stmt::OpenACCUpdateConstructClass: 494 EmitOpenACCUpdateConstruct(cast<OpenACCUpdateConstruct>(*S)); 495 break; 496 case Stmt::OpenACCAtomicConstructClass: 497 EmitOpenACCAtomicConstruct(cast<OpenACCAtomicConstruct>(*S)); 498 break; 499 case Stmt::OpenACCCacheConstructClass: 500 EmitOpenACCCacheConstruct(cast<OpenACCCacheConstruct>(*S)); 501 break; 502 } 503 } 504 505 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S, 506 ArrayRef<const Attr *> Attrs) { 507 switch (S->getStmtClass()) { 508 default: 509 return false; 510 case Stmt::NullStmtClass: 511 break; 512 case Stmt::CompoundStmtClass: 513 EmitCompoundStmt(cast<CompoundStmt>(*S)); 514 break; 515 case Stmt::DeclStmtClass: 516 EmitDeclStmt(cast<DeclStmt>(*S)); 517 break; 518 case Stmt::LabelStmtClass: 519 EmitLabelStmt(cast<LabelStmt>(*S)); 520 break; 521 case Stmt::AttributedStmtClass: 522 EmitAttributedStmt(cast<AttributedStmt>(*S)); 523 break; 524 case Stmt::GotoStmtClass: 525 EmitGotoStmt(cast<GotoStmt>(*S)); 526 break; 527 case Stmt::BreakStmtClass: 528 EmitBreakStmt(cast<BreakStmt>(*S)); 529 break; 530 case Stmt::ContinueStmtClass: 531 EmitContinueStmt(cast<ContinueStmt>(*S)); 532 break; 533 case Stmt::DefaultStmtClass: 534 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs); 535 break; 536 case Stmt::CaseStmtClass: 537 EmitCaseStmt(cast<CaseStmt>(*S), Attrs); 538 break; 539 case Stmt::SEHLeaveStmtClass: 540 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); 541 break; 542 case Stmt::SYCLKernelCallStmtClass: 543 // SYCL kernel call statements are generated as wrappers around the body 544 // of functions declared with the sycl_kernel_entry_point attribute. Such 545 // functions are used to specify how a SYCL kernel (a function object) is 546 // to be invoked; the SYCL kernel call statement contains a transformed 547 // variation of the function body and is used to generate a SYCL kernel 548 // caller function; a function that serves as the device side entry point 549 // used to execute the SYCL kernel. The sycl_kernel_entry_point attributed 550 // function is invoked by host code in order to trigger emission of the 551 // device side SYCL kernel caller function and to generate metadata needed 552 // by SYCL run-time library implementations; the function is otherwise 553 // intended to have no effect. As such, the function body is not evaluated 554 // as part of the invocation during host compilation (and the function 555 // should not be called or emitted during device compilation); the SYCL 556 // kernel call statement is thus handled as a null statement for the 557 // purpose of code generation. 558 break; 559 } 560 return true; 561 } 562 563 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true, 564 /// this captures the expression result of the last sub-statement and returns it 565 /// (for use by the statement expression extension). 566 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, 567 AggValueSlot AggSlot) { 568 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(), 569 "LLVM IR generation of compound statement ('{}')"); 570 571 // Keep track of the current cleanup stack depth, including debug scopes. 572 LexicalScope Scope(*this, S.getSourceRange()); 573 574 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot); 575 } 576 577 Address 578 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S, 579 bool GetLast, 580 AggValueSlot AggSlot) { 581 582 const Stmt *ExprResult = S.getStmtExprResult(); 583 assert((!GetLast || (GetLast && ExprResult)) && 584 "If GetLast is true then the CompoundStmt must have a StmtExprResult"); 585 586 Address RetAlloca = Address::invalid(); 587 588 for (auto *CurStmt : S.body()) { 589 if (GetLast && ExprResult == CurStmt) { 590 // We have to special case labels here. They are statements, but when put 591 // at the end of a statement expression, they yield the value of their 592 // subexpression. Handle this by walking through all labels we encounter, 593 // emitting them before we evaluate the subexpr. 594 // Similar issues arise for attributed statements. 595 while (!isa<Expr>(ExprResult)) { 596 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) { 597 EmitLabel(LS->getDecl()); 598 ExprResult = LS->getSubStmt(); 599 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) { 600 // FIXME: Update this if we ever have attributes that affect the 601 // semantics of an expression. 602 ExprResult = AS->getSubStmt(); 603 } else { 604 llvm_unreachable("unknown value statement"); 605 } 606 } 607 608 EnsureInsertPoint(); 609 610 const Expr *E = cast<Expr>(ExprResult); 611 QualType ExprTy = E->getType(); 612 if (hasAggregateEvaluationKind(ExprTy)) { 613 EmitAggExpr(E, AggSlot); 614 } else { 615 // We can't return an RValue here because there might be cleanups at 616 // the end of the StmtExpr. Because of that, we have to emit the result 617 // here into a temporary alloca. 618 RetAlloca = CreateMemTemp(ExprTy); 619 EmitAnyExprToMem(E, RetAlloca, Qualifiers(), 620 /*IsInit*/ false); 621 } 622 } else { 623 EmitStmt(CurStmt); 624 } 625 } 626 627 return RetAlloca; 628 } 629 630 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) { 631 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator()); 632 633 // If there is a cleanup stack, then we it isn't worth trying to 634 // simplify this block (we would need to remove it from the scope map 635 // and cleanup entry). 636 if (!EHStack.empty()) 637 return; 638 639 // Can only simplify direct branches. 640 if (!BI || !BI->isUnconditional()) 641 return; 642 643 // Can only simplify empty blocks. 644 if (BI->getIterator() != BB->begin()) 645 return; 646 647 BB->replaceAllUsesWith(BI->getSuccessor(0)); 648 BI->eraseFromParent(); 649 BB->eraseFromParent(); 650 } 651 652 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) { 653 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 654 655 // Fall out of the current block (if necessary). 656 EmitBranch(BB); 657 658 if (IsFinished && BB->use_empty()) { 659 delete BB; 660 return; 661 } 662 663 // Place the block after the current block, if possible, or else at 664 // the end of the function. 665 if (CurBB && CurBB->getParent()) 666 CurFn->insert(std::next(CurBB->getIterator()), BB); 667 else 668 CurFn->insert(CurFn->end(), BB); 669 Builder.SetInsertPoint(BB); 670 } 671 672 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) { 673 // Emit a branch from the current block to the target one if this 674 // was a real block. If this was just a fall-through block after a 675 // terminator, don't emit it. 676 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 677 678 if (!CurBB || CurBB->getTerminator()) { 679 // If there is no insert point or the previous block is already 680 // terminated, don't touch it. 681 } else { 682 // Otherwise, create a fall-through branch. 683 Builder.CreateBr(Target); 684 } 685 686 Builder.ClearInsertionPoint(); 687 } 688 689 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) { 690 bool inserted = false; 691 for (llvm::User *u : block->users()) { 692 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) { 693 CurFn->insert(std::next(insn->getParent()->getIterator()), block); 694 inserted = true; 695 break; 696 } 697 } 698 699 if (!inserted) 700 CurFn->insert(CurFn->end(), block); 701 702 Builder.SetInsertPoint(block); 703 } 704 705 CodeGenFunction::JumpDest 706 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) { 707 JumpDest &Dest = LabelMap[D]; 708 if (Dest.isValid()) return Dest; 709 710 // Create, but don't insert, the new block. 711 Dest = JumpDest(createBasicBlock(D->getName()), 712 EHScopeStack::stable_iterator::invalid(), 713 NextCleanupDestIndex++); 714 return Dest; 715 } 716 717 void CodeGenFunction::EmitLabel(const LabelDecl *D) { 718 // Add this label to the current lexical scope if we're within any 719 // normal cleanups. Jumps "in" to this label --- when permitted by 720 // the language --- may need to be routed around such cleanups. 721 if (EHStack.hasNormalCleanups() && CurLexicalScope) 722 CurLexicalScope->addLabel(D); 723 724 JumpDest &Dest = LabelMap[D]; 725 726 // If we didn't need a forward reference to this label, just go 727 // ahead and create a destination at the current scope. 728 if (!Dest.isValid()) { 729 Dest = getJumpDestInCurrentScope(D->getName()); 730 731 // Otherwise, we need to give this label a target depth and remove 732 // it from the branch-fixups list. 733 } else { 734 assert(!Dest.getScopeDepth().isValid() && "already emitted label!"); 735 Dest.setScopeDepth(EHStack.stable_begin()); 736 ResolveBranchFixups(Dest.getBlock()); 737 } 738 739 EmitBlock(Dest.getBlock()); 740 741 // Emit debug info for labels. 742 if (CGDebugInfo *DI = getDebugInfo()) { 743 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) { 744 DI->setLocation(D->getLocation()); 745 DI->EmitLabel(D, Builder); 746 } 747 } 748 749 incrementProfileCounter(D->getStmt()); 750 } 751 752 /// Change the cleanup scope of the labels in this lexical scope to 753 /// match the scope of the enclosing context. 754 void CodeGenFunction::LexicalScope::rescopeLabels() { 755 assert(!Labels.empty()); 756 EHScopeStack::stable_iterator innermostScope 757 = CGF.EHStack.getInnermostNormalCleanup(); 758 759 // Change the scope depth of all the labels. 760 for (const LabelDecl *Label : Labels) { 761 assert(CGF.LabelMap.count(Label)); 762 JumpDest &dest = CGF.LabelMap.find(Label)->second; 763 assert(dest.getScopeDepth().isValid()); 764 assert(innermostScope.encloses(dest.getScopeDepth())); 765 dest.setScopeDepth(innermostScope); 766 } 767 768 // Reparent the labels if the new scope also has cleanups. 769 if (innermostScope != EHScopeStack::stable_end() && ParentScope) { 770 ParentScope->Labels.append(Labels.begin(), Labels.end()); 771 } 772 } 773 774 775 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) { 776 EmitLabel(S.getDecl()); 777 778 // IsEHa - emit eha.scope.begin if it's a side entry of a scope 779 if (getLangOpts().EHAsynch && S.isSideEntry()) 780 EmitSehCppScopeBegin(); 781 782 EmitStmt(S.getSubStmt()); 783 } 784 785 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) { 786 bool nomerge = false; 787 bool noinline = false; 788 bool alwaysinline = false; 789 bool noconvergent = false; 790 HLSLControlFlowHintAttr::Spelling flattenOrBranch = 791 HLSLControlFlowHintAttr::SpellingNotCalculated; 792 const CallExpr *musttail = nullptr; 793 const AtomicAttr *AA = nullptr; 794 795 for (const auto *A : S.getAttrs()) { 796 switch (A->getKind()) { 797 default: 798 break; 799 case attr::NoMerge: 800 nomerge = true; 801 break; 802 case attr::NoInline: 803 noinline = true; 804 break; 805 case attr::AlwaysInline: 806 alwaysinline = true; 807 break; 808 case attr::NoConvergent: 809 noconvergent = true; 810 break; 811 case attr::MustTail: { 812 const Stmt *Sub = S.getSubStmt(); 813 const ReturnStmt *R = cast<ReturnStmt>(Sub); 814 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens()); 815 } break; 816 case attr::CXXAssume: { 817 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption(); 818 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() && 819 !Assumption->HasSideEffects(getContext())) { 820 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption); 821 Builder.CreateAssumption(AssumptionVal); 822 } 823 } break; 824 case attr::Atomic: 825 AA = cast<AtomicAttr>(A); 826 break; 827 case attr::HLSLControlFlowHint: { 828 flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling(); 829 } break; 830 } 831 } 832 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge); 833 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline); 834 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline); 835 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent); 836 SaveAndRestore save_musttail(MustTailCall, musttail); 837 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch); 838 CGAtomicOptionsRAII AORAII(CGM, AA); 839 EmitStmt(S.getSubStmt(), S.getAttrs()); 840 } 841 842 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) { 843 // If this code is reachable then emit a stop point (if generating 844 // debug info). We have to do this ourselves because we are on the 845 // "simple" statement path. 846 if (HaveInsertPoint()) 847 EmitStopPoint(&S); 848 849 ApplyAtomGroup Grp(getDebugInfo()); 850 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); 851 } 852 853 854 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { 855 ApplyAtomGroup Grp(getDebugInfo()); 856 if (const LabelDecl *Target = S.getConstantTarget()) { 857 EmitBranchThroughCleanup(getJumpDestForLabel(Target)); 858 return; 859 } 860 861 // Ensure that we have an i8* for our PHI node. 862 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()), 863 Int8PtrTy, "addr"); 864 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 865 866 // Get the basic block for the indirect goto. 867 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock(); 868 869 // The first instruction in the block has to be the PHI for the switch dest, 870 // add an entry for this branch. 871 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB); 872 873 EmitBranch(IndGotoBB); 874 if (CurBB && CurBB->getTerminator()) 875 addInstToCurrentSourceAtom(CurBB->getTerminator(), nullptr); 876 } 877 878 void CodeGenFunction::EmitIfStmt(const IfStmt &S) { 879 const Stmt *Else = S.getElse(); 880 881 // The else branch of a consteval if statement is always the only branch that 882 // can be runtime evaluated. 883 if (S.isConsteval()) { 884 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else; 885 if (Executed) { 886 RunCleanupsScope ExecutedScope(*this); 887 EmitStmt(Executed); 888 } 889 return; 890 } 891 892 // C99 6.8.4.1: The first substatement is executed if the expression compares 893 // unequal to 0. The condition must be a scalar type. 894 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); 895 ApplyDebugLocation DL(*this, S.getCond()); 896 897 if (S.getInit()) 898 EmitStmt(S.getInit()); 899 900 if (S.getConditionVariable()) 901 EmitDecl(*S.getConditionVariable()); 902 903 // If the condition constant folds and can be elided, try to avoid emitting 904 // the condition and the dead arm of the if/else. 905 bool CondConstant; 906 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, 907 S.isConstexpr())) { 908 // Figure out which block (then or else) is executed. 909 const Stmt *Executed = S.getThen(); 910 const Stmt *Skipped = Else; 911 if (!CondConstant) // Condition false? 912 std::swap(Executed, Skipped); 913 914 // If the skipped block has no labels in it, just emit the executed block. 915 // This avoids emitting dead code and simplifies the CFG substantially. 916 if (S.isConstexpr() || !ContainsLabel(Skipped)) { 917 if (CondConstant) 918 incrementProfileCounter(&S); 919 if (Executed) { 920 MaybeEmitDeferredVarDeclInit(S.getConditionVariable()); 921 RunCleanupsScope ExecutedScope(*this); 922 EmitStmt(Executed); 923 } 924 PGO->markStmtMaybeUsed(Skipped); 925 return; 926 } 927 } 928 929 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit 930 // the conditional branch. 931 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then"); 932 llvm::BasicBlock *ContBlock = createBasicBlock("if.end"); 933 llvm::BasicBlock *ElseBlock = ContBlock; 934 if (Else) 935 ElseBlock = createBasicBlock("if.else"); 936 937 // Prefer the PGO based weights over the likelihood attribute. 938 // When the build isn't optimized the metadata isn't used, so don't generate 939 // it. 940 // Also, differentiate between disabled PGO and a never executed branch with 941 // PGO. Assuming PGO is in use: 942 // - we want to ignore the [[likely]] attribute if the branch is never 943 // executed, 944 // - assuming the profile is poor, preserving the attribute may still be 945 // beneficial. 946 // As an approximation, preserve the attribute only if both the branch and the 947 // parent context were not executed. 948 Stmt::Likelihood LH = Stmt::LH_None; 949 uint64_t ThenCount = getProfileCount(S.getThen()); 950 if (!ThenCount && !getCurrentProfileCount() && 951 CGM.getCodeGenOpts().OptimizationLevel) 952 LH = Stmt::getLikelihood(S.getThen(), Else); 953 954 // When measuring MC/DC, always fully evaluate the condition up front using 955 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to 956 // executing the body of the if.then or if.else. This is useful for when 957 // there is a 'return' within the body, but this is particularly beneficial 958 // when one if-stmt is nested within another if-stmt so that all of the MC/DC 959 // updates are kept linear and consistent. 960 if (!CGM.getCodeGenOpts().MCDCCoverage) { 961 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH, 962 /*ConditionalOp=*/nullptr, 963 /*ConditionalDecl=*/S.getConditionVariable()); 964 } else { 965 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 966 MaybeEmitDeferredVarDeclInit(S.getConditionVariable()); 967 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock); 968 } 969 970 // Emit the 'then' code. 971 EmitBlock(ThenBlock); 972 if (llvm::EnableSingleByteCoverage) 973 incrementProfileCounter(S.getThen()); 974 else 975 incrementProfileCounter(&S); 976 { 977 RunCleanupsScope ThenScope(*this); 978 EmitStmt(S.getThen()); 979 } 980 EmitBranch(ContBlock); 981 982 // Emit the 'else' code if present. 983 if (Else) { 984 { 985 // There is no need to emit line number for an unconditional branch. 986 auto NL = ApplyDebugLocation::CreateEmpty(*this); 987 EmitBlock(ElseBlock); 988 } 989 // When single byte coverage mode is enabled, add a counter to else block. 990 if (llvm::EnableSingleByteCoverage) 991 incrementProfileCounter(Else); 992 { 993 RunCleanupsScope ElseScope(*this); 994 EmitStmt(Else); 995 } 996 { 997 // There is no need to emit line number for an unconditional branch. 998 auto NL = ApplyDebugLocation::CreateEmpty(*this); 999 EmitBranch(ContBlock); 1000 } 1001 } 1002 1003 // Emit the continuation block for code after the if. 1004 EmitBlock(ContBlock, true); 1005 1006 // When single byte coverage mode is enabled, add a counter to continuation 1007 // block. 1008 if (llvm::EnableSingleByteCoverage) 1009 incrementProfileCounter(&S); 1010 } 1011 1012 bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression, 1013 bool HasEmptyBody) { 1014 if (CGM.getCodeGenOpts().getFiniteLoops() == 1015 CodeGenOptions::FiniteLoopsKind::Never) 1016 return false; 1017 1018 // Now apply rules for plain C (see 6.8.5.6 in C11). 1019 // Loops with constant conditions do not have to make progress in any C 1020 // version. 1021 // As an extension, we consisider loops whose constant expression 1022 // can be constant-folded. 1023 Expr::EvalResult Result; 1024 bool CondIsConstInt = 1025 !ControllingExpression || 1026 (ControllingExpression->EvaluateAsInt(Result, getContext()) && 1027 Result.Val.isInt()); 1028 1029 bool CondIsTrue = CondIsConstInt && (!ControllingExpression || 1030 Result.Val.getInt().getBoolValue()); 1031 1032 // Loops with non-constant conditions must make progress in C11 and later. 1033 if (getLangOpts().C11 && !CondIsConstInt) 1034 return true; 1035 1036 // [C++26][intro.progress] (DR) 1037 // The implementation may assume that any thread will eventually do one of the 1038 // following: 1039 // [...] 1040 // - continue execution of a trivial infinite loop ([stmt.iter.general]). 1041 if (CGM.getCodeGenOpts().getFiniteLoops() == 1042 CodeGenOptions::FiniteLoopsKind::Always || 1043 getLangOpts().CPlusPlus11) { 1044 if (HasEmptyBody && CondIsTrue) { 1045 CurFn->removeFnAttr(llvm::Attribute::MustProgress); 1046 return false; 1047 } 1048 return true; 1049 } 1050 return false; 1051 } 1052 1053 // [C++26][stmt.iter.general] (DR) 1054 // A trivially empty iteration statement is an iteration statement matching one 1055 // of the following forms: 1056 // - while ( expression ) ; 1057 // - while ( expression ) { } 1058 // - do ; while ( expression ) ; 1059 // - do { } while ( expression ) ; 1060 // - for ( init-statement expression(opt); ) ; 1061 // - for ( init-statement expression(opt); ) { } 1062 template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) { 1063 if constexpr (std::is_same_v<LoopStmt, ForStmt>) { 1064 if (S.getInc()) 1065 return false; 1066 } 1067 const Stmt *Body = S.getBody(); 1068 if (!Body || isa<NullStmt>(Body)) 1069 return true; 1070 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) 1071 return Compound->body_empty(); 1072 return false; 1073 } 1074 1075 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S, 1076 ArrayRef<const Attr *> WhileAttrs) { 1077 // Emit the header for the loop, which will also become 1078 // the continue target. 1079 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond"); 1080 EmitBlock(LoopHeader.getBlock()); 1081 1082 if (CGM.shouldEmitConvergenceTokens()) 1083 ConvergenceTokenStack.push_back( 1084 emitConvergenceLoopToken(LoopHeader.getBlock())); 1085 1086 // Create an exit block for when the condition fails, which will 1087 // also become the break target. 1088 JumpDest LoopExit = getJumpDestInCurrentScope("while.end"); 1089 1090 // Store the blocks to use for break and continue. 1091 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader)); 1092 1093 // C++ [stmt.while]p2: 1094 // When the condition of a while statement is a declaration, the 1095 // scope of the variable that is declared extends from its point 1096 // of declaration (3.3.2) to the end of the while statement. 1097 // [...] 1098 // The object created in a condition is destroyed and created 1099 // with each iteration of the loop. 1100 RunCleanupsScope ConditionScope(*this); 1101 1102 if (S.getConditionVariable()) 1103 EmitDecl(*S.getConditionVariable()); 1104 1105 // Evaluate the conditional in the while header. C99 6.8.5.1: The 1106 // evaluation of the controlling expression takes place before each 1107 // execution of the loop body. 1108 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 1109 1110 MaybeEmitDeferredVarDeclInit(S.getConditionVariable()); 1111 1112 // while(1) is common, avoid extra exit blocks. Be sure 1113 // to correctly handle break/continue though. 1114 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal); 1115 bool EmitBoolCondBranch = !C || !C->isOne(); 1116 const SourceRange &R = S.getSourceRange(); 1117 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(), 1118 WhileAttrs, SourceLocToDebugLoc(R.getBegin()), 1119 SourceLocToDebugLoc(R.getEnd()), 1120 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S))); 1121 1122 // When single byte coverage mode is enabled, add a counter to loop condition. 1123 if (llvm::EnableSingleByteCoverage) 1124 incrementProfileCounter(S.getCond()); 1125 1126 // As long as the condition is true, go to the loop body. 1127 llvm::BasicBlock *LoopBody = createBasicBlock("while.body"); 1128 if (EmitBoolCondBranch) { 1129 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1130 if (ConditionScope.requiresCleanups()) 1131 ExitBlock = createBasicBlock("while.exit"); 1132 llvm::MDNode *Weights = 1133 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())); 1134 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel) 1135 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic( 1136 BoolCondVal, Stmt::getLikelihood(S.getBody())); 1137 auto *I = Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights); 1138 // Key Instructions: Emit the condition and branch as separate source 1139 // location atoms otherwise we may omit a step onto the loop condition in 1140 // favour of the `while` keyword. 1141 // FIXME: We could have the branch as the backup location for the condition, 1142 // which would probably be a better experience. Explore this later. 1143 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal)) 1144 addInstToNewSourceAtom(CondI, nullptr); 1145 addInstToNewSourceAtom(I, nullptr); 1146 1147 if (ExitBlock != LoopExit.getBlock()) { 1148 EmitBlock(ExitBlock); 1149 EmitBranchThroughCleanup(LoopExit); 1150 } 1151 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) { 1152 CGM.getDiags().Report(A->getLocation(), 1153 diag::warn_attribute_has_no_effect_on_infinite_loop) 1154 << A << A->getRange(); 1155 CGM.getDiags().Report( 1156 S.getWhileLoc(), 1157 diag::note_attribute_has_no_effect_on_infinite_loop_here) 1158 << SourceRange(S.getWhileLoc(), S.getRParenLoc()); 1159 } 1160 1161 // Emit the loop body. We have to emit this in a cleanup scope 1162 // because it might be a singleton DeclStmt. 1163 { 1164 RunCleanupsScope BodyScope(*this); 1165 EmitBlock(LoopBody); 1166 // When single byte coverage mode is enabled, add a counter to the body. 1167 if (llvm::EnableSingleByteCoverage) 1168 incrementProfileCounter(S.getBody()); 1169 else 1170 incrementProfileCounter(&S); 1171 EmitStmt(S.getBody()); 1172 } 1173 1174 BreakContinueStack.pop_back(); 1175 1176 // Immediately force cleanup. 1177 ConditionScope.ForceCleanup(); 1178 1179 EmitStopPoint(&S); 1180 // Branch to the loop header again. 1181 EmitBranch(LoopHeader.getBlock()); 1182 1183 LoopStack.pop(); 1184 1185 // Emit the exit block. 1186 EmitBlock(LoopExit.getBlock(), true); 1187 1188 // The LoopHeader typically is just a branch if we skipped emitting 1189 // a branch, try to erase it. 1190 if (!EmitBoolCondBranch) 1191 SimplifyForwardingBlocks(LoopHeader.getBlock()); 1192 1193 // When single byte coverage mode is enabled, add a counter to continuation 1194 // block. 1195 if (llvm::EnableSingleByteCoverage) 1196 incrementProfileCounter(&S); 1197 1198 if (CGM.shouldEmitConvergenceTokens()) 1199 ConvergenceTokenStack.pop_back(); 1200 } 1201 1202 void CodeGenFunction::EmitDoStmt(const DoStmt &S, 1203 ArrayRef<const Attr *> DoAttrs) { 1204 JumpDest LoopExit = getJumpDestInCurrentScope("do.end"); 1205 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond"); 1206 1207 uint64_t ParentCount = getCurrentProfileCount(); 1208 1209 // Store the blocks to use for break and continue. 1210 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond)); 1211 1212 // Emit the body of the loop. 1213 llvm::BasicBlock *LoopBody = createBasicBlock("do.body"); 1214 1215 if (llvm::EnableSingleByteCoverage) 1216 EmitBlockWithFallThrough(LoopBody, S.getBody()); 1217 else 1218 EmitBlockWithFallThrough(LoopBody, &S); 1219 1220 if (CGM.shouldEmitConvergenceTokens()) 1221 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody)); 1222 1223 { 1224 RunCleanupsScope BodyScope(*this); 1225 EmitStmt(S.getBody()); 1226 } 1227 1228 EmitBlock(LoopCond.getBlock()); 1229 // When single byte coverage mode is enabled, add a counter to loop condition. 1230 if (llvm::EnableSingleByteCoverage) 1231 incrementProfileCounter(S.getCond()); 1232 1233 // C99 6.8.5.2: "The evaluation of the controlling expression takes place 1234 // after each execution of the loop body." 1235 1236 // Evaluate the conditional in the while header. 1237 // C99 6.8.5p2/p4: The first substatement is executed if the expression 1238 // compares unequal to 0. The condition must be a scalar type. 1239 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 1240 1241 BreakContinueStack.pop_back(); 1242 1243 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure 1244 // to correctly handle break/continue though. 1245 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal); 1246 bool EmitBoolCondBranch = !C || !C->isZero(); 1247 1248 const SourceRange &R = S.getSourceRange(); 1249 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs, 1250 SourceLocToDebugLoc(R.getBegin()), 1251 SourceLocToDebugLoc(R.getEnd()), 1252 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S))); 1253 1254 // As long as the condition is true, iterate the loop. 1255 if (EmitBoolCondBranch) { 1256 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount; 1257 auto *I = Builder.CreateCondBr( 1258 BoolCondVal, LoopBody, LoopExit.getBlock(), 1259 createProfileWeightsForLoop(S.getCond(), BackedgeCount)); 1260 1261 // Key Instructions: Emit the condition and branch as separate source 1262 // location atoms otherwise we may omit a step onto the loop condition in 1263 // favour of the closing brace. 1264 // FIXME: We could have the branch as the backup location for the condition, 1265 // which would probably be a better experience (no jumping to the brace). 1266 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal)) 1267 addInstToNewSourceAtom(CondI, nullptr); 1268 addInstToNewSourceAtom(I, nullptr); 1269 } 1270 1271 LoopStack.pop(); 1272 1273 // Emit the exit block. 1274 EmitBlock(LoopExit.getBlock()); 1275 1276 // The DoCond block typically is just a branch if we skipped 1277 // emitting a branch, try to erase it. 1278 if (!EmitBoolCondBranch) 1279 SimplifyForwardingBlocks(LoopCond.getBlock()); 1280 1281 // When single byte coverage mode is enabled, add a counter to continuation 1282 // block. 1283 if (llvm::EnableSingleByteCoverage) 1284 incrementProfileCounter(&S); 1285 1286 if (CGM.shouldEmitConvergenceTokens()) 1287 ConvergenceTokenStack.pop_back(); 1288 } 1289 1290 void CodeGenFunction::EmitForStmt(const ForStmt &S, 1291 ArrayRef<const Attr *> ForAttrs) { 1292 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 1293 1294 LexicalScope ForScope(*this, S.getSourceRange()); 1295 1296 // Evaluate the first part before the loop. 1297 if (S.getInit()) 1298 EmitStmt(S.getInit()); 1299 1300 // Start the loop with a block that tests the condition. 1301 // If there's an increment, the continue scope will be overwritten 1302 // later. 1303 JumpDest CondDest = getJumpDestInCurrentScope("for.cond"); 1304 llvm::BasicBlock *CondBlock = CondDest.getBlock(); 1305 EmitBlock(CondBlock); 1306 1307 if (CGM.shouldEmitConvergenceTokens()) 1308 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock)); 1309 1310 const SourceRange &R = S.getSourceRange(); 1311 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs, 1312 SourceLocToDebugLoc(R.getBegin()), 1313 SourceLocToDebugLoc(R.getEnd()), 1314 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S))); 1315 1316 // Create a cleanup scope for the condition variable cleanups. 1317 LexicalScope ConditionScope(*this, S.getSourceRange()); 1318 1319 // If the for loop doesn't have an increment we can just use the condition as 1320 // the continue block. Otherwise, if there is no condition variable, we can 1321 // form the continue block now. If there is a condition variable, we can't 1322 // form the continue block until after we've emitted the condition, because 1323 // the condition is in scope in the increment, but Sema's jump diagnostics 1324 // ensure that there are no continues from the condition variable that jump 1325 // to the loop increment. 1326 JumpDest Continue; 1327 if (!S.getInc()) 1328 Continue = CondDest; 1329 else if (!S.getConditionVariable()) 1330 Continue = getJumpDestInCurrentScope("for.inc"); 1331 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1332 1333 if (S.getCond()) { 1334 // If the for statement has a condition scope, emit the local variable 1335 // declaration. 1336 if (S.getConditionVariable()) { 1337 EmitDecl(*S.getConditionVariable()); 1338 1339 // We have entered the condition variable's scope, so we're now able to 1340 // jump to the continue block. 1341 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest; 1342 BreakContinueStack.back().ContinueBlock = Continue; 1343 } 1344 1345 // When single byte coverage mode is enabled, add a counter to loop 1346 // condition. 1347 if (llvm::EnableSingleByteCoverage) 1348 incrementProfileCounter(S.getCond()); 1349 1350 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1351 // If there are any cleanups between here and the loop-exit scope, 1352 // create a block to stage a loop exit along. 1353 if (ForScope.requiresCleanups()) 1354 ExitBlock = createBasicBlock("for.cond.cleanup"); 1355 1356 // As long as the condition is true, iterate the loop. 1357 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 1358 1359 // C99 6.8.5p2/p4: The first substatement is executed if the expression 1360 // compares unequal to 0. The condition must be a scalar type. 1361 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 1362 1363 MaybeEmitDeferredVarDeclInit(S.getConditionVariable()); 1364 1365 llvm::MDNode *Weights = 1366 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())); 1367 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel) 1368 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic( 1369 BoolCondVal, Stmt::getLikelihood(S.getBody())); 1370 1371 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights); 1372 // Key Instructions: Emit the condition and branch as separate atoms to 1373 // match existing loop stepping behaviour. FIXME: We could have the branch 1374 // as the backup location for the condition, which would probably be a 1375 // better experience (no jumping to the brace). 1376 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal)) 1377 addInstToNewSourceAtom(CondI, nullptr); 1378 addInstToNewSourceAtom(I, nullptr); 1379 1380 if (ExitBlock != LoopExit.getBlock()) { 1381 EmitBlock(ExitBlock); 1382 EmitBranchThroughCleanup(LoopExit); 1383 } 1384 1385 EmitBlock(ForBody); 1386 } else { 1387 // Treat it as a non-zero constant. Don't even create a new block for the 1388 // body, just fall into it. 1389 } 1390 1391 // When single byte coverage mode is enabled, add a counter to the body. 1392 if (llvm::EnableSingleByteCoverage) 1393 incrementProfileCounter(S.getBody()); 1394 else 1395 incrementProfileCounter(&S); 1396 { 1397 // Create a separate cleanup scope for the body, in case it is not 1398 // a compound statement. 1399 RunCleanupsScope BodyScope(*this); 1400 EmitStmt(S.getBody()); 1401 } 1402 1403 // The last block in the loop's body (which unconditionally branches to the 1404 // `inc` block if there is one). 1405 auto *FinalBodyBB = Builder.GetInsertBlock(); 1406 1407 // If there is an increment, emit it next. 1408 if (S.getInc()) { 1409 EmitBlock(Continue.getBlock()); 1410 EmitStmt(S.getInc()); 1411 if (llvm::EnableSingleByteCoverage) 1412 incrementProfileCounter(S.getInc()); 1413 } 1414 1415 BreakContinueStack.pop_back(); 1416 1417 ConditionScope.ForceCleanup(); 1418 1419 EmitStopPoint(&S); 1420 EmitBranch(CondBlock); 1421 1422 ForScope.ForceCleanup(); 1423 1424 LoopStack.pop(); 1425 1426 // Emit the fall-through block. 1427 EmitBlock(LoopExit.getBlock(), true); 1428 1429 // When single byte coverage mode is enabled, add a counter to continuation 1430 // block. 1431 if (llvm::EnableSingleByteCoverage) 1432 incrementProfileCounter(&S); 1433 1434 if (CGM.shouldEmitConvergenceTokens()) 1435 ConvergenceTokenStack.pop_back(); 1436 1437 if (FinalBodyBB) { 1438 // Key Instructions: We want the for closing brace to be step-able on to 1439 // match existing behaviour. 1440 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr); 1441 } 1442 } 1443 1444 void 1445 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S, 1446 ArrayRef<const Attr *> ForAttrs) { 1447 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 1448 1449 LexicalScope ForScope(*this, S.getSourceRange()); 1450 1451 // Evaluate the first pieces before the loop. 1452 if (S.getInit()) 1453 EmitStmt(S.getInit()); 1454 EmitStmt(S.getRangeStmt()); 1455 EmitStmt(S.getBeginStmt()); 1456 EmitStmt(S.getEndStmt()); 1457 1458 // Start the loop with a block that tests the condition. 1459 // If there's an increment, the continue scope will be overwritten 1460 // later. 1461 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 1462 EmitBlock(CondBlock); 1463 1464 if (CGM.shouldEmitConvergenceTokens()) 1465 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock)); 1466 1467 const SourceRange &R = S.getSourceRange(); 1468 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs, 1469 SourceLocToDebugLoc(R.getBegin()), 1470 SourceLocToDebugLoc(R.getEnd())); 1471 1472 // If there are any cleanups between here and the loop-exit scope, 1473 // create a block to stage a loop exit along. 1474 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1475 if (ForScope.requiresCleanups()) 1476 ExitBlock = createBasicBlock("for.cond.cleanup"); 1477 1478 // The loop body, consisting of the specified body and the loop variable. 1479 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 1480 1481 // The body is executed if the expression, contextually converted 1482 // to bool, is true. 1483 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 1484 llvm::MDNode *Weights = 1485 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())); 1486 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel) 1487 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic( 1488 BoolCondVal, Stmt::getLikelihood(S.getBody())); 1489 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights); 1490 // Key Instructions: Emit the condition and branch as separate atoms to 1491 // match existing loop stepping behaviour. FIXME: We could have the branch as 1492 // the backup location for the condition, which would probably be a better 1493 // experience. 1494 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal)) 1495 addInstToNewSourceAtom(CondI, nullptr); 1496 addInstToNewSourceAtom(I, nullptr); 1497 1498 if (ExitBlock != LoopExit.getBlock()) { 1499 EmitBlock(ExitBlock); 1500 EmitBranchThroughCleanup(LoopExit); 1501 } 1502 1503 EmitBlock(ForBody); 1504 if (llvm::EnableSingleByteCoverage) 1505 incrementProfileCounter(S.getBody()); 1506 else 1507 incrementProfileCounter(&S); 1508 1509 // Create a block for the increment. In case of a 'continue', we jump there. 1510 JumpDest Continue = getJumpDestInCurrentScope("for.inc"); 1511 1512 // Store the blocks to use for break and continue. 1513 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1514 1515 { 1516 // Create a separate cleanup scope for the loop variable and body. 1517 LexicalScope BodyScope(*this, S.getSourceRange()); 1518 EmitStmt(S.getLoopVarStmt()); 1519 EmitStmt(S.getBody()); 1520 } 1521 // The last block in the loop's body (which unconditionally branches to the 1522 // `inc` block if there is one). 1523 auto *FinalBodyBB = Builder.GetInsertBlock(); 1524 1525 EmitStopPoint(&S); 1526 // If there is an increment, emit it next. 1527 EmitBlock(Continue.getBlock()); 1528 EmitStmt(S.getInc()); 1529 1530 BreakContinueStack.pop_back(); 1531 1532 EmitBranch(CondBlock); 1533 1534 ForScope.ForceCleanup(); 1535 1536 LoopStack.pop(); 1537 1538 // Emit the fall-through block. 1539 EmitBlock(LoopExit.getBlock(), true); 1540 1541 // When single byte coverage mode is enabled, add a counter to continuation 1542 // block. 1543 if (llvm::EnableSingleByteCoverage) 1544 incrementProfileCounter(&S); 1545 1546 if (CGM.shouldEmitConvergenceTokens()) 1547 ConvergenceTokenStack.pop_back(); 1548 1549 if (FinalBodyBB) { 1550 // We want the for closing brace to be step-able on to match existing 1551 // behaviour. 1552 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr); 1553 } 1554 } 1555 1556 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { 1557 if (RV.isScalar()) { 1558 Builder.CreateStore(RV.getScalarVal(), ReturnValue); 1559 } else if (RV.isAggregate()) { 1560 LValue Dest = MakeAddrLValue(ReturnValue, Ty); 1561 LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty); 1562 EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue()); 1563 } else { 1564 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty), 1565 /*init*/ true); 1566 } 1567 EmitBranchThroughCleanup(ReturnBlock); 1568 } 1569 1570 namespace { 1571 // RAII struct used to save and restore a return statment's result expression. 1572 struct SaveRetExprRAII { 1573 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF) 1574 : OldRetExpr(CGF.RetExpr), CGF(CGF) { 1575 CGF.RetExpr = RetExpr; 1576 } 1577 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; } 1578 const Expr *OldRetExpr; 1579 CodeGenFunction &CGF; 1580 }; 1581 } // namespace 1582 1583 /// Determine if the given call uses the swiftasync calling convention. 1584 static bool isSwiftAsyncCallee(const CallExpr *CE) { 1585 auto calleeQualType = CE->getCallee()->getType(); 1586 const FunctionType *calleeType = nullptr; 1587 if (calleeQualType->isFunctionPointerType() || 1588 calleeQualType->isFunctionReferenceType() || 1589 calleeQualType->isBlockPointerType() || 1590 calleeQualType->isMemberFunctionPointerType()) { 1591 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>(); 1592 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) { 1593 calleeType = ty; 1594 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) { 1595 if (auto methodDecl = CMCE->getMethodDecl()) { 1596 // getMethodDecl() doesn't handle member pointers at the moment. 1597 calleeType = methodDecl->getType()->castAs<FunctionType>(); 1598 } else { 1599 return false; 1600 } 1601 } else { 1602 return false; 1603 } 1604 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync; 1605 } 1606 1607 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand 1608 /// if the function returns void, or may be missing one if the function returns 1609 /// non-void. Fun stuff :). 1610 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { 1611 ApplyAtomGroup Grp(getDebugInfo()); 1612 if (requiresReturnValueCheck()) { 1613 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc()); 1614 auto *SLocPtr = 1615 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false, 1616 llvm::GlobalVariable::PrivateLinkage, SLoc); 1617 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 1618 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr); 1619 assert(ReturnLocation.isValid() && "No valid return location"); 1620 Builder.CreateStore(SLocPtr, ReturnLocation); 1621 } 1622 1623 // Returning from an outlined SEH helper is UB, and we already warn on it. 1624 if (IsOutlinedSEHHelper) { 1625 Builder.CreateUnreachable(); 1626 Builder.ClearInsertionPoint(); 1627 } 1628 1629 // Emit the result value, even if unused, to evaluate the side effects. 1630 const Expr *RV = S.getRetValue(); 1631 1632 // Record the result expression of the return statement. The recorded 1633 // expression is used to determine whether a block capture's lifetime should 1634 // end at the end of the full expression as opposed to the end of the scope 1635 // enclosing the block expression. 1636 // 1637 // This permits a small, easily-implemented exception to our over-conservative 1638 // rules about not jumping to statements following block literals with 1639 // non-trivial cleanups. 1640 SaveRetExprRAII SaveRetExpr(RV, *this); 1641 1642 RunCleanupsScope cleanupScope(*this); 1643 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV)) 1644 RV = EWC->getSubExpr(); 1645 1646 // If we're in a swiftasynccall function, and the return expression is a 1647 // call to a swiftasynccall function, mark the call as the musttail call. 1648 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail; 1649 if (RV && CurFnInfo && 1650 CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync) { 1651 if (auto CE = dyn_cast<CallExpr>(RV)) { 1652 if (isSwiftAsyncCallee(CE)) { 1653 SaveMustTail.emplace(MustTailCall, CE); 1654 } 1655 } 1656 } 1657 1658 // FIXME: Clean this up by using an LValue for ReturnTemp, 1659 // EmitStoreThroughLValue, and EmitAnyExpr. 1660 // Check if the NRVO candidate was not globalized in OpenMP mode. 1661 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() && 1662 S.getNRVOCandidate()->isNRVOVariable() && 1663 (!getLangOpts().OpenMP || 1664 !CGM.getOpenMPRuntime() 1665 .getAddressOfLocalVariable(*this, S.getNRVOCandidate()) 1666 .isValid())) { 1667 // Apply the named return value optimization for this return statement, 1668 // which means doing nothing: the appropriate result has already been 1669 // constructed into the NRVO variable. 1670 1671 // If there is an NRVO flag for this variable, set it to 1 into indicate 1672 // that the cleanup code should not destroy the variable. 1673 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) 1674 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag); 1675 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) { 1676 // Make sure not to return anything, but evaluate the expression 1677 // for side effects. 1678 if (RV) { 1679 EmitAnyExpr(RV); 1680 } 1681 } else if (!RV) { 1682 // Do nothing (return value is left uninitialized) 1683 } else if (FnRetTy->isReferenceType()) { 1684 // If this function returns a reference, take the address of the expression 1685 // rather than the value. 1686 RValue Result = EmitReferenceBindingToExpr(RV); 1687 auto *I = Builder.CreateStore(Result.getScalarVal(), ReturnValue); 1688 addInstToCurrentSourceAtom(I, I->getValueOperand()); 1689 } else { 1690 switch (getEvaluationKind(RV->getType())) { 1691 case TEK_Scalar: { 1692 llvm::Value *Ret = EmitScalarExpr(RV); 1693 if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) { 1694 EmitStoreOfScalar(Ret, MakeAddrLValue(ReturnValue, RV->getType()), 1695 /*isInit*/ true); 1696 } else { 1697 auto *I = Builder.CreateStore(Ret, ReturnValue); 1698 addInstToCurrentSourceAtom(I, I->getValueOperand()); 1699 } 1700 break; 1701 } 1702 case TEK_Complex: 1703 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()), 1704 /*isInit*/ true); 1705 break; 1706 case TEK_Aggregate: 1707 EmitAggExpr(RV, AggValueSlot::forAddr( 1708 ReturnValue, Qualifiers(), 1709 AggValueSlot::IsDestructed, 1710 AggValueSlot::DoesNotNeedGCBarriers, 1711 AggValueSlot::IsNotAliased, 1712 getOverlapForReturnValue())); 1713 break; 1714 } 1715 } 1716 1717 ++NumReturnExprs; 1718 if (!RV || RV->isEvaluatable(getContext())) 1719 ++NumSimpleReturnExprs; 1720 1721 cleanupScope.ForceCleanup(); 1722 EmitBranchThroughCleanup(ReturnBlock); 1723 } 1724 1725 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) { 1726 // As long as debug info is modeled with instructions, we have to ensure we 1727 // have a place to insert here and write the stop point here. 1728 if (HaveInsertPoint()) 1729 EmitStopPoint(&S); 1730 1731 for (const auto *I : S.decls()) 1732 EmitDecl(*I, /*EvaluateConditionDecl=*/true); 1733 } 1734 1735 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) { 1736 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!"); 1737 1738 // If this code is reachable then emit a stop point (if generating 1739 // debug info). We have to do this ourselves because we are on the 1740 // "simple" statement path. 1741 if (HaveInsertPoint()) 1742 EmitStopPoint(&S); 1743 1744 ApplyAtomGroup Grp(getDebugInfo()); 1745 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock); 1746 } 1747 1748 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) { 1749 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); 1750 1751 // If this code is reachable then emit a stop point (if generating 1752 // debug info). We have to do this ourselves because we are on the 1753 // "simple" statement path. 1754 if (HaveInsertPoint()) 1755 EmitStopPoint(&S); 1756 1757 ApplyAtomGroup Grp(getDebugInfo()); 1758 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock); 1759 } 1760 1761 /// EmitCaseStmtRange - If case statement range is not too big then 1762 /// add multiple cases to switch instruction, one for each value within 1763 /// the range. If range is too big then emit "if" condition check. 1764 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S, 1765 ArrayRef<const Attr *> Attrs) { 1766 assert(S.getRHS() && "Expected RHS value in CaseStmt"); 1767 1768 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext()); 1769 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext()); 1770 1771 // Emit the code for this case. We do this first to make sure it is 1772 // properly chained from our predecessor before generating the 1773 // switch machinery to enter this block. 1774 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); 1775 EmitBlockWithFallThrough(CaseDest, &S); 1776 EmitStmt(S.getSubStmt()); 1777 1778 // If range is empty, do nothing. 1779 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS)) 1780 return; 1781 1782 Stmt::Likelihood LH = Stmt::getLikelihood(Attrs); 1783 llvm::APInt Range = RHS - LHS; 1784 // FIXME: parameters such as this should not be hardcoded. 1785 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) { 1786 // Range is small enough to add multiple switch instruction cases. 1787 uint64_t Total = getProfileCount(&S); 1788 unsigned NCases = Range.getZExtValue() + 1; 1789 // We only have one region counter for the entire set of cases here, so we 1790 // need to divide the weights evenly between the generated cases, ensuring 1791 // that the total weight is preserved. E.g., a weight of 5 over three cases 1792 // will be distributed as weights of 2, 2, and 1. 1793 uint64_t Weight = Total / NCases, Rem = Total % NCases; 1794 for (unsigned I = 0; I != NCases; ++I) { 1795 if (SwitchWeights) 1796 SwitchWeights->push_back(Weight + (Rem ? 1 : 0)); 1797 else if (SwitchLikelihood) 1798 SwitchLikelihood->push_back(LH); 1799 1800 if (Rem) 1801 Rem--; 1802 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest); 1803 ++LHS; 1804 } 1805 return; 1806 } 1807 1808 // The range is too big. Emit "if" condition into a new block, 1809 // making sure to save and restore the current insertion point. 1810 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock(); 1811 1812 // Push this test onto the chain of range checks (which terminates 1813 // in the default basic block). The switch's default will be changed 1814 // to the top of this chain after switch emission is complete. 1815 llvm::BasicBlock *FalseDest = CaseRangeBlock; 1816 CaseRangeBlock = createBasicBlock("sw.caserange"); 1817 1818 CurFn->insert(CurFn->end(), CaseRangeBlock); 1819 Builder.SetInsertPoint(CaseRangeBlock); 1820 1821 // Emit range check. 1822 llvm::Value *Diff = 1823 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS)); 1824 llvm::Value *Cond = 1825 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds"); 1826 1827 llvm::MDNode *Weights = nullptr; 1828 if (SwitchWeights) { 1829 uint64_t ThisCount = getProfileCount(&S); 1830 uint64_t DefaultCount = (*SwitchWeights)[0]; 1831 Weights = createProfileWeights(ThisCount, DefaultCount); 1832 1833 // Since we're chaining the switch default through each large case range, we 1834 // need to update the weight for the default, ie, the first case, to include 1835 // this case. 1836 (*SwitchWeights)[0] += ThisCount; 1837 } else if (SwitchLikelihood) 1838 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH); 1839 1840 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights); 1841 1842 // Restore the appropriate insertion point. 1843 if (RestoreBB) 1844 Builder.SetInsertPoint(RestoreBB); 1845 else 1846 Builder.ClearInsertionPoint(); 1847 } 1848 1849 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S, 1850 ArrayRef<const Attr *> Attrs) { 1851 // If there is no enclosing switch instance that we're aware of, then this 1852 // case statement and its block can be elided. This situation only happens 1853 // when we've constant-folded the switch, are emitting the constant case, 1854 // and part of the constant case includes another case statement. For 1855 // instance: switch (4) { case 4: do { case 5: } while (1); } 1856 if (!SwitchInsn) { 1857 EmitStmt(S.getSubStmt()); 1858 return; 1859 } 1860 1861 // Handle case ranges. 1862 if (S.getRHS()) { 1863 EmitCaseStmtRange(S, Attrs); 1864 return; 1865 } 1866 1867 llvm::ConstantInt *CaseVal = 1868 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext())); 1869 1870 // Emit debuginfo for the case value if it is an enum value. 1871 const ConstantExpr *CE; 1872 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS())) 1873 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr()); 1874 else 1875 CE = dyn_cast<ConstantExpr>(S.getLHS()); 1876 if (CE) { 1877 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) 1878 if (CGDebugInfo *Dbg = getDebugInfo()) 1879 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) 1880 Dbg->EmitGlobalVariable(DE->getDecl(), 1881 APValue(llvm::APSInt(CaseVal->getValue()))); 1882 } 1883 1884 if (SwitchLikelihood) 1885 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs)); 1886 1887 // If the body of the case is just a 'break', try to not emit an empty block. 1888 // If we're profiling or we're not optimizing, leave the block in for better 1889 // debug and coverage analysis. 1890 if (!CGM.getCodeGenOpts().hasProfileClangInstr() && 1891 CGM.getCodeGenOpts().OptimizationLevel > 0 && 1892 isa<BreakStmt>(S.getSubStmt())) { 1893 JumpDest Block = BreakContinueStack.back().BreakBlock; 1894 1895 // Only do this optimization if there are no cleanups that need emitting. 1896 if (isObviouslyBranchWithoutCleanups(Block)) { 1897 if (SwitchWeights) 1898 SwitchWeights->push_back(getProfileCount(&S)); 1899 SwitchInsn->addCase(CaseVal, Block.getBlock()); 1900 1901 // If there was a fallthrough into this case, make sure to redirect it to 1902 // the end of the switch as well. 1903 if (Builder.GetInsertBlock()) { 1904 Builder.CreateBr(Block.getBlock()); 1905 Builder.ClearInsertionPoint(); 1906 } 1907 return; 1908 } 1909 } 1910 1911 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); 1912 EmitBlockWithFallThrough(CaseDest, &S); 1913 if (SwitchWeights) 1914 SwitchWeights->push_back(getProfileCount(&S)); 1915 SwitchInsn->addCase(CaseVal, CaseDest); 1916 1917 // Recursively emitting the statement is acceptable, but is not wonderful for 1918 // code where we have many case statements nested together, i.e.: 1919 // case 1: 1920 // case 2: 1921 // case 3: etc. 1922 // Handling this recursively will create a new block for each case statement 1923 // that falls through to the next case which is IR intensive. It also causes 1924 // deep recursion which can run into stack depth limitations. Handle 1925 // sequential non-range case statements specially. 1926 // 1927 // TODO When the next case has a likelihood attribute the code returns to the 1928 // recursive algorithm. Maybe improve this case if it becomes common practice 1929 // to use a lot of attributes. 1930 const CaseStmt *CurCase = &S; 1931 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt()); 1932 1933 // Otherwise, iteratively add consecutive cases to this switch stmt. 1934 while (NextCase && NextCase->getRHS() == nullptr) { 1935 CurCase = NextCase; 1936 llvm::ConstantInt *CaseVal = 1937 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext())); 1938 1939 if (SwitchWeights) 1940 SwitchWeights->push_back(getProfileCount(NextCase)); 1941 if (CGM.getCodeGenOpts().hasProfileClangInstr()) { 1942 CaseDest = createBasicBlock("sw.bb"); 1943 EmitBlockWithFallThrough(CaseDest, CurCase); 1944 } 1945 // Since this loop is only executed when the CaseStmt has no attributes 1946 // use a hard-coded value. 1947 if (SwitchLikelihood) 1948 SwitchLikelihood->push_back(Stmt::LH_None); 1949 1950 SwitchInsn->addCase(CaseVal, CaseDest); 1951 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt()); 1952 } 1953 1954 // Generate a stop point for debug info if the case statement is 1955 // followed by a default statement. A fallthrough case before a 1956 // default case gets its own branch target. 1957 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass) 1958 EmitStopPoint(CurCase); 1959 1960 // Normal default recursion for non-cases. 1961 EmitStmt(CurCase->getSubStmt()); 1962 } 1963 1964 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S, 1965 ArrayRef<const Attr *> Attrs) { 1966 // If there is no enclosing switch instance that we're aware of, then this 1967 // default statement can be elided. This situation only happens when we've 1968 // constant-folded the switch. 1969 if (!SwitchInsn) { 1970 EmitStmt(S.getSubStmt()); 1971 return; 1972 } 1973 1974 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest(); 1975 assert(DefaultBlock->empty() && 1976 "EmitDefaultStmt: Default block already defined?"); 1977 1978 if (SwitchLikelihood) 1979 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs); 1980 1981 EmitBlockWithFallThrough(DefaultBlock, &S); 1982 1983 EmitStmt(S.getSubStmt()); 1984 } 1985 1986 /// CollectStatementsForCase - Given the body of a 'switch' statement and a 1987 /// constant value that is being switched on, see if we can dead code eliminate 1988 /// the body of the switch to a simple series of statements to emit. Basically, 1989 /// on a switch (5) we want to find these statements: 1990 /// case 5: 1991 /// printf(...); <-- 1992 /// ++i; <-- 1993 /// break; 1994 /// 1995 /// and add them to the ResultStmts vector. If it is unsafe to do this 1996 /// transformation (for example, one of the elided statements contains a label 1997 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S' 1998 /// should include statements after it (e.g. the printf() line is a substmt of 1999 /// the case) then return CSFC_FallThrough. If we handled it and found a break 2000 /// statement, then return CSFC_Success. 2001 /// 2002 /// If Case is non-null, then we are looking for the specified case, checking 2003 /// that nothing we jump over contains labels. If Case is null, then we found 2004 /// the case and are looking for the break. 2005 /// 2006 /// If the recursive walk actually finds our Case, then we set FoundCase to 2007 /// true. 2008 /// 2009 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success }; 2010 static CSFC_Result CollectStatementsForCase(const Stmt *S, 2011 const SwitchCase *Case, 2012 bool &FoundCase, 2013 SmallVectorImpl<const Stmt*> &ResultStmts) { 2014 // If this is a null statement, just succeed. 2015 if (!S) 2016 return Case ? CSFC_Success : CSFC_FallThrough; 2017 2018 // If this is the switchcase (case 4: or default) that we're looking for, then 2019 // we're in business. Just add the substatement. 2020 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) { 2021 if (S == Case) { 2022 FoundCase = true; 2023 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase, 2024 ResultStmts); 2025 } 2026 2027 // Otherwise, this is some other case or default statement, just ignore it. 2028 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase, 2029 ResultStmts); 2030 } 2031 2032 // If we are in the live part of the code and we found our break statement, 2033 // return a success! 2034 if (!Case && isa<BreakStmt>(S)) 2035 return CSFC_Success; 2036 2037 // If this is a switch statement, then it might contain the SwitchCase, the 2038 // break, or neither. 2039 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) { 2040 // Handle this as two cases: we might be looking for the SwitchCase (if so 2041 // the skipped statements must be skippable) or we might already have it. 2042 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end(); 2043 bool StartedInLiveCode = FoundCase; 2044 unsigned StartSize = ResultStmts.size(); 2045 2046 // If we've not found the case yet, scan through looking for it. 2047 if (Case) { 2048 // Keep track of whether we see a skipped declaration. The code could be 2049 // using the declaration even if it is skipped, so we can't optimize out 2050 // the decl if the kept statements might refer to it. 2051 bool HadSkippedDecl = false; 2052 2053 // If we're looking for the case, just see if we can skip each of the 2054 // substatements. 2055 for (; Case && I != E; ++I) { 2056 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I); 2057 2058 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) { 2059 case CSFC_Failure: return CSFC_Failure; 2060 case CSFC_Success: 2061 // A successful result means that either 1) that the statement doesn't 2062 // have the case and is skippable, or 2) does contain the case value 2063 // and also contains the break to exit the switch. In the later case, 2064 // we just verify the rest of the statements are elidable. 2065 if (FoundCase) { 2066 // If we found the case and skipped declarations, we can't do the 2067 // optimization. 2068 if (HadSkippedDecl) 2069 return CSFC_Failure; 2070 2071 for (++I; I != E; ++I) 2072 if (CodeGenFunction::ContainsLabel(*I, true)) 2073 return CSFC_Failure; 2074 return CSFC_Success; 2075 } 2076 break; 2077 case CSFC_FallThrough: 2078 // If we have a fallthrough condition, then we must have found the 2079 // case started to include statements. Consider the rest of the 2080 // statements in the compound statement as candidates for inclusion. 2081 assert(FoundCase && "Didn't find case but returned fallthrough?"); 2082 // We recursively found Case, so we're not looking for it anymore. 2083 Case = nullptr; 2084 2085 // If we found the case and skipped declarations, we can't do the 2086 // optimization. 2087 if (HadSkippedDecl) 2088 return CSFC_Failure; 2089 break; 2090 } 2091 } 2092 2093 if (!FoundCase) 2094 return CSFC_Success; 2095 2096 assert(!HadSkippedDecl && "fallthrough after skipping decl"); 2097 } 2098 2099 // If we have statements in our range, then we know that the statements are 2100 // live and need to be added to the set of statements we're tracking. 2101 bool AnyDecls = false; 2102 for (; I != E; ++I) { 2103 AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I); 2104 2105 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) { 2106 case CSFC_Failure: return CSFC_Failure; 2107 case CSFC_FallThrough: 2108 // A fallthrough result means that the statement was simple and just 2109 // included in ResultStmt, keep adding them afterwards. 2110 break; 2111 case CSFC_Success: 2112 // A successful result means that we found the break statement and 2113 // stopped statement inclusion. We just ensure that any leftover stmts 2114 // are skippable and return success ourselves. 2115 for (++I; I != E; ++I) 2116 if (CodeGenFunction::ContainsLabel(*I, true)) 2117 return CSFC_Failure; 2118 return CSFC_Success; 2119 } 2120 } 2121 2122 // If we're about to fall out of a scope without hitting a 'break;', we 2123 // can't perform the optimization if there were any decls in that scope 2124 // (we'd lose their end-of-lifetime). 2125 if (AnyDecls) { 2126 // If the entire compound statement was live, there's one more thing we 2127 // can try before giving up: emit the whole thing as a single statement. 2128 // We can do that unless the statement contains a 'break;'. 2129 // FIXME: Such a break must be at the end of a construct within this one. 2130 // We could emit this by just ignoring the BreakStmts entirely. 2131 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) { 2132 ResultStmts.resize(StartSize); 2133 ResultStmts.push_back(S); 2134 } else { 2135 return CSFC_Failure; 2136 } 2137 } 2138 2139 return CSFC_FallThrough; 2140 } 2141 2142 // Okay, this is some other statement that we don't handle explicitly, like a 2143 // for statement or increment etc. If we are skipping over this statement, 2144 // just verify it doesn't have labels, which would make it invalid to elide. 2145 if (Case) { 2146 if (CodeGenFunction::ContainsLabel(S, true)) 2147 return CSFC_Failure; 2148 return CSFC_Success; 2149 } 2150 2151 // Otherwise, we want to include this statement. Everything is cool with that 2152 // so long as it doesn't contain a break out of the switch we're in. 2153 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure; 2154 2155 // Otherwise, everything is great. Include the statement and tell the caller 2156 // that we fall through and include the next statement as well. 2157 ResultStmts.push_back(S); 2158 return CSFC_FallThrough; 2159 } 2160 2161 /// FindCaseStatementsForValue - Find the case statement being jumped to and 2162 /// then invoke CollectStatementsForCase to find the list of statements to emit 2163 /// for a switch on constant. See the comment above CollectStatementsForCase 2164 /// for more details. 2165 static bool FindCaseStatementsForValue(const SwitchStmt &S, 2166 const llvm::APSInt &ConstantCondValue, 2167 SmallVectorImpl<const Stmt*> &ResultStmts, 2168 ASTContext &C, 2169 const SwitchCase *&ResultCase) { 2170 // First step, find the switch case that is being branched to. We can do this 2171 // efficiently by scanning the SwitchCase list. 2172 const SwitchCase *Case = S.getSwitchCaseList(); 2173 const DefaultStmt *DefaultCase = nullptr; 2174 2175 for (; Case; Case = Case->getNextSwitchCase()) { 2176 // It's either a default or case. Just remember the default statement in 2177 // case we're not jumping to any numbered cases. 2178 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) { 2179 DefaultCase = DS; 2180 continue; 2181 } 2182 2183 // Check to see if this case is the one we're looking for. 2184 const CaseStmt *CS = cast<CaseStmt>(Case); 2185 // Don't handle case ranges yet. 2186 if (CS->getRHS()) return false; 2187 2188 // If we found our case, remember it as 'case'. 2189 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue) 2190 break; 2191 } 2192 2193 // If we didn't find a matching case, we use a default if it exists, or we 2194 // elide the whole switch body! 2195 if (!Case) { 2196 // It is safe to elide the body of the switch if it doesn't contain labels 2197 // etc. If it is safe, return successfully with an empty ResultStmts list. 2198 if (!DefaultCase) 2199 return !CodeGenFunction::ContainsLabel(&S); 2200 Case = DefaultCase; 2201 } 2202 2203 // Ok, we know which case is being jumped to, try to collect all the 2204 // statements that follow it. This can fail for a variety of reasons. Also, 2205 // check to see that the recursive walk actually found our case statement. 2206 // Insane cases like this can fail to find it in the recursive walk since we 2207 // don't handle every stmt kind: 2208 // switch (4) { 2209 // while (1) { 2210 // case 4: ... 2211 bool FoundCase = false; 2212 ResultCase = Case; 2213 return CollectStatementsForCase(S.getBody(), Case, FoundCase, 2214 ResultStmts) != CSFC_Failure && 2215 FoundCase; 2216 } 2217 2218 static std::optional<SmallVector<uint64_t, 16>> 2219 getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) { 2220 // Are there enough branches to weight them? 2221 if (Likelihoods.size() <= 1) 2222 return std::nullopt; 2223 2224 uint64_t NumUnlikely = 0; 2225 uint64_t NumNone = 0; 2226 uint64_t NumLikely = 0; 2227 for (const auto LH : Likelihoods) { 2228 switch (LH) { 2229 case Stmt::LH_Unlikely: 2230 ++NumUnlikely; 2231 break; 2232 case Stmt::LH_None: 2233 ++NumNone; 2234 break; 2235 case Stmt::LH_Likely: 2236 ++NumLikely; 2237 break; 2238 } 2239 } 2240 2241 // Is there a likelihood attribute used? 2242 if (NumUnlikely == 0 && NumLikely == 0) 2243 return std::nullopt; 2244 2245 // When multiple cases share the same code they can be combined during 2246 // optimization. In that case the weights of the branch will be the sum of 2247 // the individual weights. Make sure the combined sum of all neutral cases 2248 // doesn't exceed the value of a single likely attribute. 2249 // The additions both avoid divisions by 0 and make sure the weights of None 2250 // don't exceed the weight of Likely. 2251 const uint64_t Likely = INT32_MAX / (NumLikely + 2); 2252 const uint64_t None = Likely / (NumNone + 1); 2253 const uint64_t Unlikely = 0; 2254 2255 SmallVector<uint64_t, 16> Result; 2256 Result.reserve(Likelihoods.size()); 2257 for (const auto LH : Likelihoods) { 2258 switch (LH) { 2259 case Stmt::LH_Unlikely: 2260 Result.push_back(Unlikely); 2261 break; 2262 case Stmt::LH_None: 2263 Result.push_back(None); 2264 break; 2265 case Stmt::LH_Likely: 2266 Result.push_back(Likely); 2267 break; 2268 } 2269 } 2270 2271 return Result; 2272 } 2273 2274 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { 2275 // Handle nested switch statements. 2276 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn; 2277 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights; 2278 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood; 2279 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock; 2280 2281 // See if we can constant fold the condition of the switch and therefore only 2282 // emit the live case statement (if any) of the switch. 2283 llvm::APSInt ConstantCondValue; 2284 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) { 2285 SmallVector<const Stmt*, 4> CaseStmts; 2286 const SwitchCase *Case = nullptr; 2287 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts, 2288 getContext(), Case)) { 2289 if (Case) 2290 incrementProfileCounter(Case); 2291 RunCleanupsScope ExecutedScope(*this); 2292 2293 if (S.getInit()) 2294 EmitStmt(S.getInit()); 2295 2296 // Emit the condition variable if needed inside the entire cleanup scope 2297 // used by this special case for constant folded switches. 2298 if (S.getConditionVariable()) 2299 EmitDecl(*S.getConditionVariable(), /*EvaluateConditionDecl=*/true); 2300 2301 // At this point, we are no longer "within" a switch instance, so 2302 // we can temporarily enforce this to ensure that any embedded case 2303 // statements are not emitted. 2304 SwitchInsn = nullptr; 2305 2306 // Okay, we can dead code eliminate everything except this case. Emit the 2307 // specified series of statements and we're good. 2308 for (const Stmt *CaseStmt : CaseStmts) 2309 EmitStmt(CaseStmt); 2310 incrementProfileCounter(&S); 2311 PGO->markStmtMaybeUsed(S.getBody()); 2312 2313 // Now we want to restore the saved switch instance so that nested 2314 // switches continue to function properly 2315 SwitchInsn = SavedSwitchInsn; 2316 2317 return; 2318 } 2319 } 2320 2321 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog"); 2322 2323 RunCleanupsScope ConditionScope(*this); 2324 2325 if (S.getInit()) 2326 EmitStmt(S.getInit()); 2327 2328 if (S.getConditionVariable()) 2329 EmitDecl(*S.getConditionVariable()); 2330 llvm::Value *CondV = EmitScalarExpr(S.getCond()); 2331 MaybeEmitDeferredVarDeclInit(S.getConditionVariable()); 2332 2333 // Create basic block to hold stuff that comes after switch 2334 // statement. We also need to create a default block now so that 2335 // explicit case ranges tests can have a place to jump to on 2336 // failure. 2337 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default"); 2338 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock); 2339 addInstToNewSourceAtom(SwitchInsn, CondV); 2340 2341 if (HLSLControlFlowAttr != HLSLControlFlowHintAttr::SpellingNotCalculated) { 2342 llvm::MDBuilder MDHelper(CGM.getLLVMContext()); 2343 llvm::ConstantInt *BranchHintConstant = 2344 HLSLControlFlowAttr == 2345 HLSLControlFlowHintAttr::Spelling::Microsoft_branch 2346 ? llvm::ConstantInt::get(CGM.Int32Ty, 1) 2347 : llvm::ConstantInt::get(CGM.Int32Ty, 2); 2348 llvm::Metadata *Vals[] = {MDHelper.createString("hlsl.controlflow.hint"), 2349 MDHelper.createConstant(BranchHintConstant)}; 2350 SwitchInsn->setMetadata("hlsl.controlflow.hint", 2351 llvm::MDNode::get(CGM.getLLVMContext(), Vals)); 2352 } 2353 2354 if (PGO->haveRegionCounts()) { 2355 // Walk the SwitchCase list to find how many there are. 2356 uint64_t DefaultCount = 0; 2357 unsigned NumCases = 0; 2358 for (const SwitchCase *Case = S.getSwitchCaseList(); 2359 Case; 2360 Case = Case->getNextSwitchCase()) { 2361 if (isa<DefaultStmt>(Case)) 2362 DefaultCount = getProfileCount(Case); 2363 NumCases += 1; 2364 } 2365 SwitchWeights = new SmallVector<uint64_t, 16>(); 2366 SwitchWeights->reserve(NumCases); 2367 // The default needs to be first. We store the edge count, so we already 2368 // know the right weight. 2369 SwitchWeights->push_back(DefaultCount); 2370 } else if (CGM.getCodeGenOpts().OptimizationLevel) { 2371 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>(); 2372 // Initialize the default case. 2373 SwitchLikelihood->push_back(Stmt::LH_None); 2374 } 2375 2376 CaseRangeBlock = DefaultBlock; 2377 2378 // Clear the insertion point to indicate we are in unreachable code. 2379 Builder.ClearInsertionPoint(); 2380 2381 // All break statements jump to NextBlock. If BreakContinueStack is non-empty 2382 // then reuse last ContinueBlock. 2383 JumpDest OuterContinue; 2384 if (!BreakContinueStack.empty()) 2385 OuterContinue = BreakContinueStack.back().ContinueBlock; 2386 2387 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue)); 2388 2389 // Emit switch body. 2390 EmitStmt(S.getBody()); 2391 2392 BreakContinueStack.pop_back(); 2393 2394 // Update the default block in case explicit case range tests have 2395 // been chained on top. 2396 SwitchInsn->setDefaultDest(CaseRangeBlock); 2397 2398 // If a default was never emitted: 2399 if (!DefaultBlock->getParent()) { 2400 // If we have cleanups, emit the default block so that there's a 2401 // place to jump through the cleanups from. 2402 if (ConditionScope.requiresCleanups()) { 2403 EmitBlock(DefaultBlock); 2404 2405 // Otherwise, just forward the default block to the switch end. 2406 } else { 2407 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock()); 2408 delete DefaultBlock; 2409 } 2410 } 2411 2412 ConditionScope.ForceCleanup(); 2413 2414 // Emit continuation. 2415 EmitBlock(SwitchExit.getBlock(), true); 2416 incrementProfileCounter(&S); 2417 2418 // If the switch has a condition wrapped by __builtin_unpredictable, 2419 // create metadata that specifies that the switch is unpredictable. 2420 // Don't bother if not optimizing because that metadata would not be used. 2421 auto *Call = dyn_cast<CallExpr>(S.getCond()); 2422 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { 2423 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); 2424 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { 2425 llvm::MDBuilder MDHelper(getLLVMContext()); 2426 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable, 2427 MDHelper.createUnpredictable()); 2428 } 2429 } 2430 2431 if (SwitchWeights) { 2432 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() && 2433 "switch weights do not match switch cases"); 2434 // If there's only one jump destination there's no sense weighting it. 2435 if (SwitchWeights->size() > 1) 2436 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof, 2437 createProfileWeights(*SwitchWeights)); 2438 delete SwitchWeights; 2439 } else if (SwitchLikelihood) { 2440 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() && 2441 "switch likelihoods do not match switch cases"); 2442 std::optional<SmallVector<uint64_t, 16>> LHW = 2443 getLikelihoodWeights(*SwitchLikelihood); 2444 if (LHW) { 2445 llvm::MDBuilder MDHelper(CGM.getLLVMContext()); 2446 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof, 2447 createProfileWeights(*LHW)); 2448 } 2449 delete SwitchLikelihood; 2450 } 2451 SwitchInsn = SavedSwitchInsn; 2452 SwitchWeights = SavedSwitchWeights; 2453 SwitchLikelihood = SavedSwitchLikelihood; 2454 CaseRangeBlock = SavedCRBlock; 2455 } 2456 2457 static std::string 2458 SimplifyConstraint(const char *Constraint, const TargetInfo &Target, 2459 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) { 2460 std::string Result; 2461 2462 while (*Constraint) { 2463 switch (*Constraint) { 2464 default: 2465 Result += Target.convertConstraint(Constraint); 2466 break; 2467 // Ignore these 2468 case '*': 2469 case '?': 2470 case '!': 2471 case '=': // Will see this and the following in mult-alt constraints. 2472 case '+': 2473 break; 2474 case '#': // Ignore the rest of the constraint alternative. 2475 while (Constraint[1] && Constraint[1] != ',') 2476 Constraint++; 2477 break; 2478 case '&': 2479 case '%': 2480 Result += *Constraint; 2481 while (Constraint[1] && Constraint[1] == *Constraint) 2482 Constraint++; 2483 break; 2484 case ',': 2485 Result += "|"; 2486 break; 2487 case 'g': 2488 Result += "imr"; 2489 break; 2490 case '[': { 2491 assert(OutCons && 2492 "Must pass output names to constraints with a symbolic name"); 2493 unsigned Index; 2494 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index); 2495 assert(result && "Could not resolve symbolic name"); (void)result; 2496 Result += llvm::utostr(Index); 2497 break; 2498 } 2499 } 2500 2501 Constraint++; 2502 } 2503 2504 return Result; 2505 } 2506 2507 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared 2508 /// as using a particular register add that as a constraint that will be used 2509 /// in this asm stmt. 2510 static std::string 2511 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, 2512 const TargetInfo &Target, CodeGenModule &CGM, 2513 const AsmStmt &Stmt, const bool EarlyClobber, 2514 std::string *GCCReg = nullptr) { 2515 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr); 2516 if (!AsmDeclRef) 2517 return Constraint; 2518 const ValueDecl &Value = *AsmDeclRef->getDecl(); 2519 const VarDecl *Variable = dyn_cast<VarDecl>(&Value); 2520 if (!Variable) 2521 return Constraint; 2522 if (Variable->getStorageClass() != SC_Register) 2523 return Constraint; 2524 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>(); 2525 if (!Attr) 2526 return Constraint; 2527 StringRef Register = Attr->getLabel(); 2528 assert(Target.isValidGCCRegisterName(Register)); 2529 // We're using validateOutputConstraint here because we only care if 2530 // this is a register constraint. 2531 TargetInfo::ConstraintInfo Info(Constraint, ""); 2532 if (Target.validateOutputConstraint(Info) && 2533 !Info.allowsRegister()) { 2534 CGM.ErrorUnsupported(&Stmt, "__asm__"); 2535 return Constraint; 2536 } 2537 // Canonicalize the register here before returning it. 2538 Register = Target.getNormalizedGCCRegisterName(Register); 2539 if (GCCReg != nullptr) 2540 *GCCReg = Register.str(); 2541 return (EarlyClobber ? "&{" : "{") + Register.str() + "}"; 2542 } 2543 2544 std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue( 2545 const TargetInfo::ConstraintInfo &Info, LValue InputValue, 2546 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) { 2547 if (Info.allowsRegister() || !Info.allowsMemory()) { 2548 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) 2549 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr}; 2550 2551 llvm::Type *Ty = ConvertType(InputType); 2552 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); 2553 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) || 2554 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) { 2555 Ty = llvm::IntegerType::get(getLLVMContext(), Size); 2556 2557 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)), 2558 nullptr}; 2559 } 2560 } 2561 2562 Address Addr = InputValue.getAddress(); 2563 ConstraintStr += '*'; 2564 return {InputValue.getPointer(*this), Addr.getElementType()}; 2565 } 2566 2567 std::pair<llvm::Value *, llvm::Type *> 2568 CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info, 2569 const Expr *InputExpr, 2570 std::string &ConstraintStr) { 2571 // If this can't be a register or memory, i.e., has to be a constant 2572 // (immediate or symbolic), try to emit it as such. 2573 if (!Info.allowsRegister() && !Info.allowsMemory()) { 2574 if (Info.requiresImmediateConstant()) { 2575 Expr::EvalResult EVResult; 2576 InputExpr->EvaluateAsRValue(EVResult, getContext(), true); 2577 2578 llvm::APSInt IntResult; 2579 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(), 2580 getContext())) 2581 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr}; 2582 } 2583 2584 Expr::EvalResult Result; 2585 if (InputExpr->EvaluateAsInt(Result, getContext())) 2586 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()), 2587 nullptr}; 2588 } 2589 2590 if (Info.allowsRegister() || !Info.allowsMemory()) 2591 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType())) 2592 return {EmitScalarExpr(InputExpr), nullptr}; 2593 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass) 2594 return {EmitScalarExpr(InputExpr), nullptr}; 2595 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); 2596 LValue Dest = EmitLValue(InputExpr); 2597 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, 2598 InputExpr->getExprLoc()); 2599 } 2600 2601 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline 2602 /// asm call instruction. The !srcloc MDNode contains a list of constant 2603 /// integers which are the source locations of the start of each line in the 2604 /// asm. 2605 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str, 2606 CodeGenFunction &CGF) { 2607 SmallVector<llvm::Metadata *, 8> Locs; 2608 // Add the location of the first line to the MDNode. 2609 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 2610 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding()))); 2611 StringRef StrVal = Str->getString(); 2612 if (!StrVal.empty()) { 2613 const SourceManager &SM = CGF.CGM.getContext().getSourceManager(); 2614 const LangOptions &LangOpts = CGF.CGM.getLangOpts(); 2615 unsigned StartToken = 0; 2616 unsigned ByteOffset = 0; 2617 2618 // Add the location of the start of each subsequent line of the asm to the 2619 // MDNode. 2620 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) { 2621 if (StrVal[i] != '\n') continue; 2622 SourceLocation LineLoc = Str->getLocationOfByte( 2623 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset); 2624 Locs.push_back(llvm::ConstantAsMetadata::get( 2625 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding()))); 2626 } 2627 } 2628 2629 return llvm::MDNode::get(CGF.getLLVMContext(), Locs); 2630 } 2631 2632 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, 2633 bool HasUnwindClobber, bool ReadOnly, 2634 bool ReadNone, bool NoMerge, bool NoConvergent, 2635 const AsmStmt &S, 2636 const std::vector<llvm::Type *> &ResultRegTypes, 2637 const std::vector<llvm::Type *> &ArgElemTypes, 2638 CodeGenFunction &CGF, 2639 std::vector<llvm::Value *> &RegResults) { 2640 if (!HasUnwindClobber) 2641 Result.addFnAttr(llvm::Attribute::NoUnwind); 2642 2643 if (NoMerge) 2644 Result.addFnAttr(llvm::Attribute::NoMerge); 2645 // Attach readnone and readonly attributes. 2646 if (!HasSideEffect) { 2647 if (ReadNone) 2648 Result.setDoesNotAccessMemory(); 2649 else if (ReadOnly) 2650 Result.setOnlyReadsMemory(); 2651 } 2652 2653 // Add elementtype attribute for indirect constraints. 2654 for (auto Pair : llvm::enumerate(ArgElemTypes)) { 2655 if (Pair.value()) { 2656 auto Attr = llvm::Attribute::get( 2657 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value()); 2658 Result.addParamAttr(Pair.index(), Attr); 2659 } 2660 } 2661 2662 // Slap the source location of the inline asm into a !srcloc metadata on the 2663 // call. 2664 const StringLiteral *SL; 2665 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S); 2666 gccAsmStmt && 2667 (SL = dyn_cast<StringLiteral>(gccAsmStmt->getAsmStringExpr()))) { 2668 Result.setMetadata("srcloc", getAsmSrcLocInfo(SL, CGF)); 2669 } else { 2670 // At least put the line number on MS inline asm blobs and GCC asm constexpr 2671 // strings. 2672 llvm::Constant *Loc = 2673 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding()); 2674 Result.setMetadata("srcloc", 2675 llvm::MDNode::get(CGF.getLLVMContext(), 2676 llvm::ConstantAsMetadata::get(Loc))); 2677 } 2678 2679 // Make inline-asm calls Key for the debug info feature Key Instructions. 2680 CGF.addInstToNewSourceAtom(&Result, nullptr); 2681 2682 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent()) 2683 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as 2684 // convergent (meaning, they may call an intrinsically convergent op, such 2685 // as bar.sync, and so can't have certain optimizations applied around 2686 // them) unless it's explicitly marked 'noconvergent'. 2687 Result.addFnAttr(llvm::Attribute::Convergent); 2688 // Extract all of the register value results from the asm. 2689 if (ResultRegTypes.size() == 1) { 2690 RegResults.push_back(&Result); 2691 } else { 2692 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) { 2693 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult"); 2694 RegResults.push_back(Tmp); 2695 } 2696 } 2697 } 2698 2699 static void 2700 EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, 2701 const llvm::ArrayRef<llvm::Value *> RegResults, 2702 const llvm::ArrayRef<llvm::Type *> ResultRegTypes, 2703 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes, 2704 const llvm::ArrayRef<LValue> ResultRegDests, 2705 const llvm::ArrayRef<QualType> ResultRegQualTys, 2706 const llvm::BitVector &ResultTypeRequiresCast, 2707 const llvm::BitVector &ResultRegIsFlagReg) { 2708 CGBuilderTy &Builder = CGF.Builder; 2709 CodeGenModule &CGM = CGF.CGM; 2710 llvm::LLVMContext &CTX = CGF.getLLVMContext(); 2711 2712 assert(RegResults.size() == ResultRegTypes.size()); 2713 assert(RegResults.size() == ResultTruncRegTypes.size()); 2714 assert(RegResults.size() == ResultRegDests.size()); 2715 // ResultRegDests can be also populated by addReturnRegisterOutputs() above, 2716 // in which case its size may grow. 2717 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size()); 2718 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size()); 2719 2720 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { 2721 llvm::Value *Tmp = RegResults[i]; 2722 llvm::Type *TruncTy = ResultTruncRegTypes[i]; 2723 2724 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) { 2725 // Target must guarantee the Value `Tmp` here is lowered to a boolean 2726 // value. 2727 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2); 2728 llvm::Value *IsBooleanValue = 2729 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two); 2730 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume); 2731 Builder.CreateCall(FnAssume, IsBooleanValue); 2732 } 2733 2734 // If the result type of the LLVM IR asm doesn't match the result type of 2735 // the expression, do the conversion. 2736 if (ResultRegTypes[i] != TruncTy) { 2737 2738 // Truncate the integer result to the right size, note that TruncTy can be 2739 // a pointer. 2740 if (TruncTy->isFloatingPointTy()) 2741 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy); 2742 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) { 2743 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); 2744 Tmp = Builder.CreateTrunc( 2745 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize)); 2746 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy); 2747 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) { 2748 uint64_t TmpSize = 2749 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType()); 2750 Tmp = Builder.CreatePtrToInt( 2751 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize)); 2752 Tmp = Builder.CreateTrunc(Tmp, TruncTy); 2753 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) { 2754 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy); 2755 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) { 2756 Tmp = Builder.CreateBitCast(Tmp, TruncTy); 2757 } 2758 } 2759 2760 ApplyAtomGroup Grp(CGF.getDebugInfo()); 2761 LValue Dest = ResultRegDests[i]; 2762 // ResultTypeRequiresCast elements correspond to the first 2763 // ResultTypeRequiresCast.size() elements of RegResults. 2764 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) { 2765 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]); 2766 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]); 2767 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) { 2768 llvm::StoreInst *S = Builder.CreateStore(Tmp, A); 2769 CGF.addInstToCurrentSourceAtom(S, S->getValueOperand()); 2770 continue; 2771 } 2772 2773 QualType Ty = 2774 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false); 2775 if (Ty.isNull()) { 2776 const Expr *OutExpr = S.getOutputExpr(i); 2777 CGM.getDiags().Report(OutExpr->getExprLoc(), 2778 diag::err_store_value_to_reg); 2779 return; 2780 } 2781 Dest = CGF.MakeAddrLValue(A, Ty); 2782 } 2783 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest); 2784 } 2785 } 2786 2787 static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, 2788 const AsmStmt &S) { 2789 constexpr auto Name = "__ASM__hipstdpar_unsupported"; 2790 2791 std::string Asm; 2792 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S)) 2793 Asm = GCCAsm->getAsmString(); 2794 2795 auto &Ctx = CGF->CGM.getLLVMContext(); 2796 2797 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm); 2798 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx), 2799 {StrTy->getType()}, false); 2800 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy); 2801 2802 CGF->Builder.CreateCall(UBF, {StrTy}); 2803 } 2804 2805 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { 2806 // Pop all cleanup blocks at the end of the asm statement. 2807 CodeGenFunction::RunCleanupsScope Cleanups(*this); 2808 2809 // Assemble the final asm string. 2810 std::string AsmString = S.generateAsmString(getContext()); 2811 2812 // Get all the output and input constraints together. 2813 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos; 2814 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos; 2815 2816 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice; 2817 bool IsValidTargetAsm = true; 2818 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) { 2819 StringRef Name; 2820 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 2821 Name = GAS->getOutputName(i); 2822 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name); 2823 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid; 2824 if (IsHipStdPar && !IsValid) 2825 IsValidTargetAsm = false; 2826 else 2827 assert(IsValid && "Failed to parse output constraint"); 2828 OutputConstraintInfos.push_back(Info); 2829 } 2830 2831 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) { 2832 StringRef Name; 2833 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 2834 Name = GAS->getInputName(i); 2835 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name); 2836 bool IsValid = 2837 getTarget().validateInputConstraint(OutputConstraintInfos, Info); 2838 if (IsHipStdPar && !IsValid) 2839 IsValidTargetAsm = false; 2840 else 2841 assert(IsValid && "Failed to parse input constraint"); 2842 InputConstraintInfos.push_back(Info); 2843 } 2844 2845 if (!IsValidTargetAsm) 2846 return EmitHipStdParUnsupportedAsm(this, S); 2847 2848 std::string Constraints; 2849 2850 std::vector<LValue> ResultRegDests; 2851 std::vector<QualType> ResultRegQualTys; 2852 std::vector<llvm::Type *> ResultRegTypes; 2853 std::vector<llvm::Type *> ResultTruncRegTypes; 2854 std::vector<llvm::Type *> ArgTypes; 2855 std::vector<llvm::Type *> ArgElemTypes; 2856 std::vector<llvm::Value*> Args; 2857 llvm::BitVector ResultTypeRequiresCast; 2858 llvm::BitVector ResultRegIsFlagReg; 2859 2860 // Keep track of inout constraints. 2861 std::string InOutConstraints; 2862 std::vector<llvm::Value*> InOutArgs; 2863 std::vector<llvm::Type*> InOutArgTypes; 2864 std::vector<llvm::Type*> InOutArgElemTypes; 2865 2866 // Keep track of out constraints for tied input operand. 2867 std::vector<std::string> OutputConstraints; 2868 2869 // Keep track of defined physregs. 2870 llvm::SmallSet<std::string, 8> PhysRegOutputs; 2871 2872 // An inline asm can be marked readonly if it meets the following conditions: 2873 // - it doesn't have any sideeffects 2874 // - it doesn't clobber memory 2875 // - it doesn't return a value by-reference 2876 // It can be marked readnone if it doesn't have any input memory constraints 2877 // in addition to meeting the conditions listed above. 2878 bool ReadOnly = true, ReadNone = true; 2879 2880 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 2881 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; 2882 2883 // Simplify the output constraint. 2884 std::string OutputConstraint(S.getOutputConstraint(i)); 2885 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, 2886 getTarget(), &OutputConstraintInfos); 2887 2888 const Expr *OutExpr = S.getOutputExpr(i); 2889 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); 2890 2891 std::string GCCReg; 2892 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, 2893 getTarget(), CGM, S, 2894 Info.earlyClobber(), 2895 &GCCReg); 2896 // Give an error on multiple outputs to same physreg. 2897 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second) 2898 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg); 2899 2900 OutputConstraints.push_back(OutputConstraint); 2901 LValue Dest = EmitLValue(OutExpr); 2902 if (!Constraints.empty()) 2903 Constraints += ','; 2904 2905 // If this is a register output, then make the inline asm return it 2906 // by-value. If this is a memory result, return the value by-reference. 2907 QualType QTy = OutExpr->getType(); 2908 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) || 2909 hasAggregateEvaluationKind(QTy); 2910 if (!Info.allowsMemory() && IsScalarOrAggregate) { 2911 2912 Constraints += "=" + OutputConstraint; 2913 ResultRegQualTys.push_back(QTy); 2914 ResultRegDests.push_back(Dest); 2915 2916 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc"); 2917 ResultRegIsFlagReg.push_back(IsFlagReg); 2918 2919 llvm::Type *Ty = ConvertTypeForMem(QTy); 2920 const bool RequiresCast = Info.allowsRegister() && 2921 (getTargetHooks().isScalarizableAsmOperand(*this, Ty) || 2922 Ty->isAggregateType()); 2923 2924 ResultTruncRegTypes.push_back(Ty); 2925 ResultTypeRequiresCast.push_back(RequiresCast); 2926 2927 if (RequiresCast) { 2928 unsigned Size = getContext().getTypeSize(QTy); 2929 if (Size) 2930 Ty = llvm::IntegerType::get(getLLVMContext(), Size); 2931 else 2932 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero"); 2933 } 2934 ResultRegTypes.push_back(Ty); 2935 // If this output is tied to an input, and if the input is larger, then 2936 // we need to set the actual result type of the inline asm node to be the 2937 // same as the input type. 2938 if (Info.hasMatchingInput()) { 2939 unsigned InputNo; 2940 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) { 2941 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo]; 2942 if (Input.hasTiedOperand() && Input.getTiedOperand() == i) 2943 break; 2944 } 2945 assert(InputNo != S.getNumInputs() && "Didn't find matching input!"); 2946 2947 QualType InputTy = S.getInputExpr(InputNo)->getType(); 2948 QualType OutputType = OutExpr->getType(); 2949 2950 uint64_t InputSize = getContext().getTypeSize(InputTy); 2951 if (getContext().getTypeSize(OutputType) < InputSize) { 2952 // Form the asm to return the value as a larger integer or fp type. 2953 ResultRegTypes.back() = ConvertType(InputTy); 2954 } 2955 } 2956 if (llvm::Type* AdjTy = 2957 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 2958 ResultRegTypes.back())) 2959 ResultRegTypes.back() = AdjTy; 2960 else { 2961 CGM.getDiags().Report(S.getAsmLoc(), 2962 diag::err_asm_invalid_type_in_input) 2963 << OutExpr->getType() << OutputConstraint; 2964 } 2965 2966 // Update largest vector width for any vector types. 2967 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back())) 2968 LargestVectorWidth = 2969 std::max((uint64_t)LargestVectorWidth, 2970 VT->getPrimitiveSizeInBits().getKnownMinValue()); 2971 } else { 2972 Address DestAddr = Dest.getAddress(); 2973 // Matrix types in memory are represented by arrays, but accessed through 2974 // vector pointers, with the alignment specified on the access operation. 2975 // For inline assembly, update pointer arguments to use vector pointers. 2976 // Otherwise there will be a mis-match if the matrix is also an 2977 // input-argument which is represented as vector. 2978 if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) 2979 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType())); 2980 2981 ArgTypes.push_back(DestAddr.getType()); 2982 ArgElemTypes.push_back(DestAddr.getElementType()); 2983 Args.push_back(DestAddr.emitRawPointer(*this)); 2984 Constraints += "=*"; 2985 Constraints += OutputConstraint; 2986 ReadOnly = ReadNone = false; 2987 } 2988 2989 if (Info.isReadWrite()) { 2990 InOutConstraints += ','; 2991 2992 const Expr *InputExpr = S.getOutputExpr(i); 2993 llvm::Value *Arg; 2994 llvm::Type *ArgElemType; 2995 std::tie(Arg, ArgElemType) = EmitAsmInputLValue( 2996 Info, Dest, InputExpr->getType(), InOutConstraints, 2997 InputExpr->getExprLoc()); 2998 2999 if (llvm::Type* AdjTy = 3000 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 3001 Arg->getType())) 3002 Arg = Builder.CreateBitCast(Arg, AdjTy); 3003 3004 // Update largest vector width for any vector types. 3005 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType())) 3006 LargestVectorWidth = 3007 std::max((uint64_t)LargestVectorWidth, 3008 VT->getPrimitiveSizeInBits().getKnownMinValue()); 3009 // Only tie earlyclobber physregs. 3010 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber())) 3011 InOutConstraints += llvm::utostr(i); 3012 else 3013 InOutConstraints += OutputConstraint; 3014 3015 InOutArgTypes.push_back(Arg->getType()); 3016 InOutArgElemTypes.push_back(ArgElemType); 3017 InOutArgs.push_back(Arg); 3018 } 3019 } 3020 3021 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX) 3022 // to the return value slot. Only do this when returning in registers. 3023 if (isa<MSAsmStmt>(&S)) { 3024 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); 3025 if (RetAI.isDirect() || RetAI.isExtend()) { 3026 // Make a fake lvalue for the return value slot. 3027 LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy); 3028 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs( 3029 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes, 3030 ResultRegDests, AsmString, S.getNumOutputs()); 3031 SawAsmBlock = true; 3032 } 3033 } 3034 3035 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 3036 const Expr *InputExpr = S.getInputExpr(i); 3037 3038 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; 3039 3040 if (Info.allowsMemory()) 3041 ReadNone = false; 3042 3043 if (!Constraints.empty()) 3044 Constraints += ','; 3045 3046 // Simplify the input constraint. 3047 std::string InputConstraint(S.getInputConstraint(i)); 3048 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), 3049 &OutputConstraintInfos); 3050 3051 InputConstraint = AddVariableConstraints( 3052 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), 3053 getTarget(), CGM, S, false /* No EarlyClobber */); 3054 3055 std::string ReplaceConstraint (InputConstraint); 3056 llvm::Value *Arg; 3057 llvm::Type *ArgElemType; 3058 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints); 3059 3060 // If this input argument is tied to a larger output result, extend the 3061 // input to be the same size as the output. The LLVM backend wants to see 3062 // the input and output of a matching constraint be the same size. Note 3063 // that GCC does not define what the top bits are here. We use zext because 3064 // that is usually cheaper, but LLVM IR should really get an anyext someday. 3065 if (Info.hasTiedOperand()) { 3066 unsigned Output = Info.getTiedOperand(); 3067 QualType OutputType = S.getOutputExpr(Output)->getType(); 3068 QualType InputTy = InputExpr->getType(); 3069 3070 if (getContext().getTypeSize(OutputType) > 3071 getContext().getTypeSize(InputTy)) { 3072 // Use ptrtoint as appropriate so that we can do our extension. 3073 if (isa<llvm::PointerType>(Arg->getType())) 3074 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy); 3075 llvm::Type *OutputTy = ConvertType(OutputType); 3076 if (isa<llvm::IntegerType>(OutputTy)) 3077 Arg = Builder.CreateZExt(Arg, OutputTy); 3078 else if (isa<llvm::PointerType>(OutputTy)) 3079 Arg = Builder.CreateZExt(Arg, IntPtrTy); 3080 else if (OutputTy->isFloatingPointTy()) 3081 Arg = Builder.CreateFPExt(Arg, OutputTy); 3082 } 3083 // Deal with the tied operands' constraint code in adjustInlineAsmType. 3084 ReplaceConstraint = OutputConstraints[Output]; 3085 } 3086 if (llvm::Type* AdjTy = 3087 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint, 3088 Arg->getType())) 3089 Arg = Builder.CreateBitCast(Arg, AdjTy); 3090 else 3091 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) 3092 << InputExpr->getType() << InputConstraint; 3093 3094 // Update largest vector width for any vector types. 3095 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType())) 3096 LargestVectorWidth = 3097 std::max((uint64_t)LargestVectorWidth, 3098 VT->getPrimitiveSizeInBits().getKnownMinValue()); 3099 3100 ArgTypes.push_back(Arg->getType()); 3101 ArgElemTypes.push_back(ArgElemType); 3102 Args.push_back(Arg); 3103 Constraints += InputConstraint; 3104 } 3105 3106 // Append the "input" part of inout constraints. 3107 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { 3108 ArgTypes.push_back(InOutArgTypes[i]); 3109 ArgElemTypes.push_back(InOutArgElemTypes[i]); 3110 Args.push_back(InOutArgs[i]); 3111 } 3112 Constraints += InOutConstraints; 3113 3114 // Labels 3115 SmallVector<llvm::BasicBlock *, 16> Transfer; 3116 llvm::BasicBlock *Fallthrough = nullptr; 3117 bool IsGCCAsmGoto = false; 3118 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) { 3119 IsGCCAsmGoto = GS->isAsmGoto(); 3120 if (IsGCCAsmGoto) { 3121 for (const auto *E : GS->labels()) { 3122 JumpDest Dest = getJumpDestForLabel(E->getLabel()); 3123 Transfer.push_back(Dest.getBlock()); 3124 if (!Constraints.empty()) 3125 Constraints += ','; 3126 Constraints += "!i"; 3127 } 3128 Fallthrough = createBasicBlock("asm.fallthrough"); 3129 } 3130 } 3131 3132 bool HasUnwindClobber = false; 3133 3134 // Clobbers 3135 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { 3136 std::string Clobber = S.getClobber(i); 3137 3138 if (Clobber == "memory") 3139 ReadOnly = ReadNone = false; 3140 else if (Clobber == "unwind") { 3141 HasUnwindClobber = true; 3142 continue; 3143 } else if (Clobber != "cc") { 3144 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber); 3145 if (CGM.getCodeGenOpts().StackClashProtector && 3146 getTarget().isSPRegName(Clobber)) { 3147 CGM.getDiags().Report(S.getAsmLoc(), 3148 diag::warn_stack_clash_protection_inline_asm); 3149 } 3150 } 3151 3152 if (isa<MSAsmStmt>(&S)) { 3153 if (Clobber == "eax" || Clobber == "edx") { 3154 if (Constraints.find("=&A") != std::string::npos) 3155 continue; 3156 std::string::size_type position1 = 3157 Constraints.find("={" + Clobber + "}"); 3158 if (position1 != std::string::npos) { 3159 Constraints.insert(position1 + 1, "&"); 3160 continue; 3161 } 3162 std::string::size_type position2 = Constraints.find("=A"); 3163 if (position2 != std::string::npos) { 3164 Constraints.insert(position2 + 1, "&"); 3165 continue; 3166 } 3167 } 3168 } 3169 if (!Constraints.empty()) 3170 Constraints += ','; 3171 3172 Constraints += "~{"; 3173 Constraints += Clobber; 3174 Constraints += '}'; 3175 } 3176 3177 assert(!(HasUnwindClobber && IsGCCAsmGoto) && 3178 "unwind clobber can't be used with asm goto"); 3179 3180 // Add machine specific clobbers 3181 std::string_view MachineClobbers = getTarget().getClobbers(); 3182 if (!MachineClobbers.empty()) { 3183 if (!Constraints.empty()) 3184 Constraints += ','; 3185 Constraints += MachineClobbers; 3186 } 3187 3188 llvm::Type *ResultType; 3189 if (ResultRegTypes.empty()) 3190 ResultType = VoidTy; 3191 else if (ResultRegTypes.size() == 1) 3192 ResultType = ResultRegTypes[0]; 3193 else 3194 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes); 3195 3196 llvm::FunctionType *FTy = 3197 llvm::FunctionType::get(ResultType, ArgTypes, false); 3198 3199 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; 3200 3201 llvm::InlineAsm::AsmDialect GnuAsmDialect = 3202 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT 3203 ? llvm::InlineAsm::AD_ATT 3204 : llvm::InlineAsm::AD_Intel; 3205 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ? 3206 llvm::InlineAsm::AD_Intel : GnuAsmDialect; 3207 3208 llvm::InlineAsm *IA = llvm::InlineAsm::get( 3209 FTy, AsmString, Constraints, HasSideEffect, 3210 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber); 3211 std::vector<llvm::Value*> RegResults; 3212 llvm::CallBrInst *CBR; 3213 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>> 3214 CBRRegResults; 3215 if (IsGCCAsmGoto) { 3216 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args); 3217 EmitBlock(Fallthrough); 3218 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly, 3219 ReadNone, InNoMergeAttributedStmt, 3220 InNoConvergentAttributedStmt, S, ResultRegTypes, 3221 ArgElemTypes, *this, RegResults); 3222 // Because we are emitting code top to bottom, we don't have enough 3223 // information at this point to know precisely whether we have a critical 3224 // edge. If we have outputs, split all indirect destinations. 3225 if (!RegResults.empty()) { 3226 unsigned i = 0; 3227 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) { 3228 llvm::Twine SynthName = Dest->getName() + ".split"; 3229 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName); 3230 llvm::IRBuilderBase::InsertPointGuard IPG(Builder); 3231 Builder.SetInsertPoint(SynthBB); 3232 3233 if (ResultRegTypes.size() == 1) { 3234 CBRRegResults[SynthBB].push_back(CBR); 3235 } else { 3236 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) { 3237 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult"); 3238 CBRRegResults[SynthBB].push_back(Tmp); 3239 } 3240 } 3241 3242 EmitBranch(Dest); 3243 EmitBlock(SynthBB); 3244 CBR->setIndirectDest(i++, SynthBB); 3245 } 3246 } 3247 } else if (HasUnwindClobber) { 3248 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, ""); 3249 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true, 3250 ReadOnly, ReadNone, InNoMergeAttributedStmt, 3251 InNoConvergentAttributedStmt, S, ResultRegTypes, 3252 ArgElemTypes, *this, RegResults); 3253 } else { 3254 llvm::CallInst *Result = 3255 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA)); 3256 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false, 3257 ReadOnly, ReadNone, InNoMergeAttributedStmt, 3258 InNoConvergentAttributedStmt, S, ResultRegTypes, 3259 ArgElemTypes, *this, RegResults); 3260 } 3261 3262 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes, 3263 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast, 3264 ResultRegIsFlagReg); 3265 3266 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a 3267 // different insertion point; one for each indirect destination and with 3268 // CBRRegResults rather than RegResults. 3269 if (IsGCCAsmGoto && !CBRRegResults.empty()) { 3270 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) { 3271 llvm::IRBuilderBase::InsertPointGuard IPG(Builder); 3272 Builder.SetInsertPoint(Succ, --(Succ->end())); 3273 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes, 3274 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys, 3275 ResultTypeRequiresCast, ResultRegIsFlagReg); 3276 } 3277 } 3278 } 3279 3280 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) { 3281 const RecordDecl *RD = S.getCapturedRecordDecl(); 3282 QualType RecordTy = getContext().getRecordType(RD); 3283 3284 // Initialize the captured struct. 3285 LValue SlotLV = 3286 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy); 3287 3288 RecordDecl::field_iterator CurField = RD->field_begin(); 3289 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 3290 E = S.capture_init_end(); 3291 I != E; ++I, ++CurField) { 3292 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField); 3293 if (CurField->hasCapturedVLAType()) { 3294 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV); 3295 } else { 3296 EmitInitializerForField(*CurField, LV, *I); 3297 } 3298 } 3299 3300 return SlotLV; 3301 } 3302 3303 /// Generate an outlined function for the body of a CapturedStmt, store any 3304 /// captured variables into the captured struct, and call the outlined function. 3305 llvm::Function * 3306 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) { 3307 LValue CapStruct = InitCapturedStruct(S); 3308 3309 // Emit the CapturedDecl 3310 CodeGenFunction CGF(CGM, true); 3311 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K)); 3312 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S); 3313 delete CGF.CapturedStmtInfo; 3314 3315 // Emit call to the helper function. 3316 EmitCallOrInvoke(F, CapStruct.getPointer(*this)); 3317 3318 return F; 3319 } 3320 3321 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) { 3322 LValue CapStruct = InitCapturedStruct(S); 3323 return CapStruct.getAddress(); 3324 } 3325 3326 /// Creates the outlined function for a CapturedStmt. 3327 llvm::Function * 3328 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) { 3329 assert(CapturedStmtInfo && 3330 "CapturedStmtInfo should be set when generating the captured function"); 3331 const CapturedDecl *CD = S.getCapturedDecl(); 3332 const RecordDecl *RD = S.getCapturedRecordDecl(); 3333 SourceLocation Loc = S.getBeginLoc(); 3334 assert(CD->hasBody() && "missing CapturedDecl body"); 3335 3336 // Build the argument list. 3337 ASTContext &Ctx = CGM.getContext(); 3338 FunctionArgList Args; 3339 Args.append(CD->param_begin(), CD->param_end()); 3340 3341 // Create the function declaration. 3342 const CGFunctionInfo &FuncInfo = 3343 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args); 3344 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 3345 3346 llvm::Function *F = 3347 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 3348 CapturedStmtInfo->getHelperName(), &CGM.getModule()); 3349 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 3350 if (CD->isNothrow()) 3351 F->addFnAttr(llvm::Attribute::NoUnwind); 3352 3353 // Generate the function. 3354 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(), 3355 CD->getBody()->getBeginLoc()); 3356 // Set the context parameter in CapturedStmtInfo. 3357 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam()); 3358 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr)); 3359 3360 // Initialize variable-length arrays. 3361 LValue Base = MakeNaturalAlignRawAddrLValue( 3362 CapturedStmtInfo->getContextValue(), Ctx.getTagDeclType(RD)); 3363 for (auto *FD : RD->fields()) { 3364 if (FD->hasCapturedVLAType()) { 3365 auto *ExprArg = 3366 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc()) 3367 .getScalarVal(); 3368 auto VAT = FD->getCapturedVLAType(); 3369 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 3370 } 3371 } 3372 3373 // If 'this' is captured, load it into CXXThisValue. 3374 if (CapturedStmtInfo->isCXXThisExprCaptured()) { 3375 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl(); 3376 LValue ThisLValue = EmitLValueForField(Base, FD); 3377 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal(); 3378 } 3379 3380 PGO->assignRegionCounters(GlobalDecl(CD), F); 3381 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 3382 FinishFunction(CD->getBodyRBrace()); 3383 3384 return F; 3385 } 3386 3387 // Returns the first convergence entry/loop/anchor instruction found in |BB|. 3388 // std::nullptr otherwise. 3389 static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) { 3390 for (auto &I : *BB) { 3391 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I)) 3392 return CI; 3393 } 3394 return nullptr; 3395 } 3396 3397 llvm::CallBase * 3398 CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) { 3399 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back(); 3400 assert(ParentToken); 3401 3402 llvm::Value *bundleArgs[] = {ParentToken}; 3403 llvm::OperandBundleDef OB("convergencectrl", bundleArgs); 3404 auto *Output = llvm::CallBase::addOperandBundle( 3405 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator()); 3406 Input->replaceAllUsesWith(Output); 3407 Input->eraseFromParent(); 3408 return Output; 3409 } 3410 3411 llvm::ConvergenceControlInst * 3412 CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) { 3413 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back(); 3414 assert(ParentToken); 3415 return llvm::ConvergenceControlInst::CreateLoop(*BB, ParentToken); 3416 } 3417 3418 llvm::ConvergenceControlInst * 3419 CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) { 3420 llvm::BasicBlock *BB = &F->getEntryBlock(); 3421 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB); 3422 if (Token) 3423 return Token; 3424 3425 // Adding a convergence token requires the function to be marked as 3426 // convergent. 3427 F->setConvergent(); 3428 return llvm::ConvergenceControlInst::CreateEntry(*BB); 3429 } 3430