1 //=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines analysis_warnings::[Policy,Executor]. 10 // Together they are used by Sema to issue warnings based on inexpensive 11 // static analysis algorithms in libAnalysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "clang/Sema/AnalysisBasedWarnings.h" 16 #include "clang/AST/DeclCXX.h" 17 #include "clang/AST/DeclObjC.h" 18 #include "clang/AST/EvaluatedExprVisitor.h" 19 #include "clang/AST/ExprCXX.h" 20 #include "clang/AST/ExprObjC.h" 21 #include "clang/AST/ParentMap.h" 22 #include "clang/AST/RecursiveASTVisitor.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/AST/StmtObjC.h" 25 #include "clang/AST/StmtVisitor.h" 26 #include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h" 27 #include "clang/Analysis/Analyses/CalledOnceCheck.h" 28 #include "clang/Analysis/Analyses/Consumed.h" 29 #include "clang/Analysis/Analyses/ReachableCode.h" 30 #include "clang/Analysis/Analyses/ThreadSafety.h" 31 #include "clang/Analysis/Analyses/UninitializedValues.h" 32 #include "clang/Analysis/AnalysisDeclContext.h" 33 #include "clang/Analysis/CFG.h" 34 #include "clang/Analysis/CFGStmtMap.h" 35 #include "clang/Basic/SourceLocation.h" 36 #include "clang/Basic/SourceManager.h" 37 #include "clang/Lex/Preprocessor.h" 38 #include "clang/Sema/ScopeInfo.h" 39 #include "clang/Sema/SemaInternal.h" 40 #include "llvm/ADT/ArrayRef.h" 41 #include "llvm/ADT/BitVector.h" 42 #include "llvm/ADT/MapVector.h" 43 #include "llvm/ADT/SmallString.h" 44 #include "llvm/ADT/SmallVector.h" 45 #include "llvm/ADT/StringRef.h" 46 #include "llvm/Support/Casting.h" 47 #include <algorithm> 48 #include <deque> 49 #include <iterator> 50 51 using namespace clang; 52 53 //===----------------------------------------------------------------------===// 54 // Unreachable code analysis. 55 //===----------------------------------------------------------------------===// 56 57 namespace { 58 class UnreachableCodeHandler : public reachable_code::Callback { 59 Sema &S; 60 SourceRange PreviousSilenceableCondVal; 61 62 public: 63 UnreachableCodeHandler(Sema &s) : S(s) {} 64 65 void HandleUnreachable(reachable_code::UnreachableKind UK, 66 SourceLocation L, 67 SourceRange SilenceableCondVal, 68 SourceRange R1, 69 SourceRange R2) override { 70 // Avoid reporting multiple unreachable code diagnostics that are 71 // triggered by the same conditional value. 72 if (PreviousSilenceableCondVal.isValid() && 73 SilenceableCondVal.isValid() && 74 PreviousSilenceableCondVal == SilenceableCondVal) 75 return; 76 PreviousSilenceableCondVal = SilenceableCondVal; 77 78 unsigned diag = diag::warn_unreachable; 79 switch (UK) { 80 case reachable_code::UK_Break: 81 diag = diag::warn_unreachable_break; 82 break; 83 case reachable_code::UK_Return: 84 diag = diag::warn_unreachable_return; 85 break; 86 case reachable_code::UK_Loop_Increment: 87 diag = diag::warn_unreachable_loop_increment; 88 break; 89 case reachable_code::UK_Other: 90 break; 91 } 92 93 S.Diag(L, diag) << R1 << R2; 94 95 SourceLocation Open = SilenceableCondVal.getBegin(); 96 if (Open.isValid()) { 97 SourceLocation Close = SilenceableCondVal.getEnd(); 98 Close = S.getLocForEndOfToken(Close); 99 if (Close.isValid()) { 100 S.Diag(Open, diag::note_unreachable_silence) 101 << FixItHint::CreateInsertion(Open, "/* DISABLES CODE */ (") 102 << FixItHint::CreateInsertion(Close, ")"); 103 } 104 } 105 } 106 }; 107 } // anonymous namespace 108 109 /// CheckUnreachable - Check for unreachable code. 110 static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) { 111 // As a heuristic prune all diagnostics not in the main file. Currently 112 // the majority of warnings in headers are false positives. These 113 // are largely caused by configuration state, e.g. preprocessor 114 // defined code, etc. 115 // 116 // Note that this is also a performance optimization. Analyzing 117 // headers many times can be expensive. 118 if (!S.getSourceManager().isInMainFile(AC.getDecl()->getBeginLoc())) 119 return; 120 121 UnreachableCodeHandler UC(S); 122 reachable_code::FindUnreachableCode(AC, S.getPreprocessor(), UC); 123 } 124 125 namespace { 126 /// Warn on logical operator errors in CFGBuilder 127 class LogicalErrorHandler : public CFGCallback { 128 Sema &S; 129 130 public: 131 LogicalErrorHandler(Sema &S) : S(S) {} 132 133 static bool HasMacroID(const Expr *E) { 134 if (E->getExprLoc().isMacroID()) 135 return true; 136 137 // Recurse to children. 138 for (const Stmt *SubStmt : E->children()) 139 if (const Expr *SubExpr = dyn_cast_or_null<Expr>(SubStmt)) 140 if (HasMacroID(SubExpr)) 141 return true; 142 143 return false; 144 } 145 146 void compareAlwaysTrue(const BinaryOperator *B, bool isAlwaysTrue) override { 147 if (HasMacroID(B)) 148 return; 149 150 SourceRange DiagRange = B->getSourceRange(); 151 S.Diag(B->getExprLoc(), diag::warn_tautological_overlap_comparison) 152 << DiagRange << isAlwaysTrue; 153 } 154 155 void compareBitwiseEquality(const BinaryOperator *B, 156 bool isAlwaysTrue) override { 157 if (HasMacroID(B)) 158 return; 159 160 SourceRange DiagRange = B->getSourceRange(); 161 S.Diag(B->getExprLoc(), diag::warn_comparison_bitwise_always) 162 << DiagRange << isAlwaysTrue; 163 } 164 165 void compareBitwiseOr(const BinaryOperator *B) override { 166 if (HasMacroID(B)) 167 return; 168 169 SourceRange DiagRange = B->getSourceRange(); 170 S.Diag(B->getExprLoc(), diag::warn_comparison_bitwise_or) << DiagRange; 171 } 172 173 static bool hasActiveDiagnostics(DiagnosticsEngine &Diags, 174 SourceLocation Loc) { 175 return !Diags.isIgnored(diag::warn_tautological_overlap_comparison, Loc) || 176 !Diags.isIgnored(diag::warn_comparison_bitwise_or, Loc); 177 } 178 }; 179 } // anonymous namespace 180 181 //===----------------------------------------------------------------------===// 182 // Check for infinite self-recursion in functions 183 //===----------------------------------------------------------------------===// 184 185 // Returns true if the function is called anywhere within the CFGBlock. 186 // For member functions, the additional condition of being call from the 187 // this pointer is required. 188 static bool hasRecursiveCallInPath(const FunctionDecl *FD, CFGBlock &Block) { 189 // Process all the Stmt's in this block to find any calls to FD. 190 for (const auto &B : Block) { 191 if (B.getKind() != CFGElement::Statement) 192 continue; 193 194 const CallExpr *CE = dyn_cast<CallExpr>(B.getAs<CFGStmt>()->getStmt()); 195 if (!CE || !CE->getCalleeDecl() || 196 CE->getCalleeDecl()->getCanonicalDecl() != FD) 197 continue; 198 199 // Skip function calls which are qualified with a templated class. 200 if (const DeclRefExpr *DRE = 201 dyn_cast<DeclRefExpr>(CE->getCallee()->IgnoreParenImpCasts())) { 202 if (NestedNameSpecifier *NNS = DRE->getQualifier()) { 203 if (NNS->getKind() == NestedNameSpecifier::TypeSpec && 204 isa<TemplateSpecializationType>(NNS->getAsType())) { 205 continue; 206 } 207 } 208 } 209 210 const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE); 211 if (!MCE || isa<CXXThisExpr>(MCE->getImplicitObjectArgument()) || 212 !MCE->getMethodDecl()->isVirtual()) 213 return true; 214 } 215 return false; 216 } 217 218 // Returns true if every path from the entry block passes through a call to FD. 219 static bool checkForRecursiveFunctionCall(const FunctionDecl *FD, CFG *cfg) { 220 llvm::SmallPtrSet<CFGBlock *, 16> Visited; 221 llvm::SmallVector<CFGBlock *, 16> WorkList; 222 // Keep track of whether we found at least one recursive path. 223 bool foundRecursion = false; 224 225 const unsigned ExitID = cfg->getExit().getBlockID(); 226 227 // Seed the work list with the entry block. 228 WorkList.push_back(&cfg->getEntry()); 229 230 while (!WorkList.empty()) { 231 CFGBlock *Block = WorkList.pop_back_val(); 232 233 for (auto I = Block->succ_begin(), E = Block->succ_end(); I != E; ++I) { 234 if (CFGBlock *SuccBlock = *I) { 235 if (!Visited.insert(SuccBlock).second) 236 continue; 237 238 // Found a path to the exit node without a recursive call. 239 if (ExitID == SuccBlock->getBlockID()) 240 return false; 241 242 // If the successor block contains a recursive call, end analysis there. 243 if (hasRecursiveCallInPath(FD, *SuccBlock)) { 244 foundRecursion = true; 245 continue; 246 } 247 248 WorkList.push_back(SuccBlock); 249 } 250 } 251 } 252 return foundRecursion; 253 } 254 255 static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD, 256 const Stmt *Body, AnalysisDeclContext &AC) { 257 FD = FD->getCanonicalDecl(); 258 259 // Only run on non-templated functions and non-templated members of 260 // templated classes. 261 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate && 262 FD->getTemplatedKind() != FunctionDecl::TK_MemberSpecialization) 263 return; 264 265 CFG *cfg = AC.getCFG(); 266 if (!cfg) return; 267 268 // If the exit block is unreachable, skip processing the function. 269 if (cfg->getExit().pred_empty()) 270 return; 271 272 // Emit diagnostic if a recursive function call is detected for all paths. 273 if (checkForRecursiveFunctionCall(FD, cfg)) 274 S.Diag(Body->getBeginLoc(), diag::warn_infinite_recursive_function); 275 } 276 277 //===----------------------------------------------------------------------===// 278 // Check for throw in a non-throwing function. 279 //===----------------------------------------------------------------------===// 280 281 /// Determine whether an exception thrown by E, unwinding from ThrowBlock, 282 /// can reach ExitBlock. 283 static bool throwEscapes(Sema &S, const CXXThrowExpr *E, CFGBlock &ThrowBlock, 284 CFG *Body) { 285 SmallVector<CFGBlock *, 16> Stack; 286 llvm::BitVector Queued(Body->getNumBlockIDs()); 287 288 Stack.push_back(&ThrowBlock); 289 Queued[ThrowBlock.getBlockID()] = true; 290 291 while (!Stack.empty()) { 292 CFGBlock &UnwindBlock = *Stack.back(); 293 Stack.pop_back(); 294 295 for (auto &Succ : UnwindBlock.succs()) { 296 if (!Succ.isReachable() || Queued[Succ->getBlockID()]) 297 continue; 298 299 if (Succ->getBlockID() == Body->getExit().getBlockID()) 300 return true; 301 302 if (auto *Catch = 303 dyn_cast_or_null<CXXCatchStmt>(Succ->getLabel())) { 304 QualType Caught = Catch->getCaughtType(); 305 if (Caught.isNull() || // catch (...) catches everything 306 !E->getSubExpr() || // throw; is considered cuaght by any handler 307 S.handlerCanCatch(Caught, E->getSubExpr()->getType())) 308 // Exception doesn't escape via this path. 309 break; 310 } else { 311 Stack.push_back(Succ); 312 Queued[Succ->getBlockID()] = true; 313 } 314 } 315 } 316 317 return false; 318 } 319 320 static void visitReachableThrows( 321 CFG *BodyCFG, 322 llvm::function_ref<void(const CXXThrowExpr *, CFGBlock &)> Visit) { 323 llvm::BitVector Reachable(BodyCFG->getNumBlockIDs()); 324 clang::reachable_code::ScanReachableFromBlock(&BodyCFG->getEntry(), Reachable); 325 for (CFGBlock *B : *BodyCFG) { 326 if (!Reachable[B->getBlockID()]) 327 continue; 328 for (CFGElement &E : *B) { 329 Optional<CFGStmt> S = E.getAs<CFGStmt>(); 330 if (!S) 331 continue; 332 if (auto *Throw = dyn_cast<CXXThrowExpr>(S->getStmt())) 333 Visit(Throw, *B); 334 } 335 } 336 } 337 338 static void EmitDiagForCXXThrowInNonThrowingFunc(Sema &S, SourceLocation OpLoc, 339 const FunctionDecl *FD) { 340 if (!S.getSourceManager().isInSystemHeader(OpLoc) && 341 FD->getTypeSourceInfo()) { 342 S.Diag(OpLoc, diag::warn_throw_in_noexcept_func) << FD; 343 if (S.getLangOpts().CPlusPlus11 && 344 (isa<CXXDestructorDecl>(FD) || 345 FD->getDeclName().getCXXOverloadedOperator() == OO_Delete || 346 FD->getDeclName().getCXXOverloadedOperator() == OO_Array_Delete)) { 347 if (const auto *Ty = FD->getTypeSourceInfo()->getType()-> 348 getAs<FunctionProtoType>()) 349 S.Diag(FD->getLocation(), diag::note_throw_in_dtor) 350 << !isa<CXXDestructorDecl>(FD) << !Ty->hasExceptionSpec() 351 << FD->getExceptionSpecSourceRange(); 352 } else 353 S.Diag(FD->getLocation(), diag::note_throw_in_function) 354 << FD->getExceptionSpecSourceRange(); 355 } 356 } 357 358 static void checkThrowInNonThrowingFunc(Sema &S, const FunctionDecl *FD, 359 AnalysisDeclContext &AC) { 360 CFG *BodyCFG = AC.getCFG(); 361 if (!BodyCFG) 362 return; 363 if (BodyCFG->getExit().pred_empty()) 364 return; 365 visitReachableThrows(BodyCFG, [&](const CXXThrowExpr *Throw, CFGBlock &Block) { 366 if (throwEscapes(S, Throw, Block, BodyCFG)) 367 EmitDiagForCXXThrowInNonThrowingFunc(S, Throw->getThrowLoc(), FD); 368 }); 369 } 370 371 static bool isNoexcept(const FunctionDecl *FD) { 372 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 373 if (FPT->isNothrow() || FD->hasAttr<NoThrowAttr>()) 374 return true; 375 return false; 376 } 377 378 //===----------------------------------------------------------------------===// 379 // Check for missing return value. 380 //===----------------------------------------------------------------------===// 381 382 enum ControlFlowKind { 383 UnknownFallThrough, 384 NeverFallThrough, 385 MaybeFallThrough, 386 AlwaysFallThrough, 387 NeverFallThroughOrReturn 388 }; 389 390 /// CheckFallThrough - Check that we don't fall off the end of a 391 /// Statement that should return a value. 392 /// 393 /// \returns AlwaysFallThrough iff we always fall off the end of the statement, 394 /// MaybeFallThrough iff we might or might not fall off the end, 395 /// NeverFallThroughOrReturn iff we never fall off the end of the statement or 396 /// return. We assume NeverFallThrough iff we never fall off the end of the 397 /// statement but we may return. We assume that functions not marked noreturn 398 /// will return. 399 static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) { 400 CFG *cfg = AC.getCFG(); 401 if (!cfg) return UnknownFallThrough; 402 403 // The CFG leaves in dead things, and we don't want the dead code paths to 404 // confuse us, so we mark all live things first. 405 llvm::BitVector live(cfg->getNumBlockIDs()); 406 unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(), 407 live); 408 409 bool AddEHEdges = AC.getAddEHEdges(); 410 if (!AddEHEdges && count != cfg->getNumBlockIDs()) 411 // When there are things remaining dead, and we didn't add EH edges 412 // from CallExprs to the catch clauses, we have to go back and 413 // mark them as live. 414 for (const auto *B : *cfg) { 415 if (!live[B->getBlockID()]) { 416 if (B->pred_begin() == B->pred_end()) { 417 const Stmt *Term = B->getTerminatorStmt(); 418 if (Term && isa<CXXTryStmt>(Term)) 419 // When not adding EH edges from calls, catch clauses 420 // can otherwise seem dead. Avoid noting them as dead. 421 count += reachable_code::ScanReachableFromBlock(B, live); 422 continue; 423 } 424 } 425 } 426 427 // Now we know what is live, we check the live precessors of the exit block 428 // and look for fall through paths, being careful to ignore normal returns, 429 // and exceptional paths. 430 bool HasLiveReturn = false; 431 bool HasFakeEdge = false; 432 bool HasPlainEdge = false; 433 bool HasAbnormalEdge = false; 434 435 // Ignore default cases that aren't likely to be reachable because all 436 // enums in a switch(X) have explicit case statements. 437 CFGBlock::FilterOptions FO; 438 FO.IgnoreDefaultsWithCoveredEnums = 1; 439 440 for (CFGBlock::filtered_pred_iterator I = 441 cfg->getExit().filtered_pred_start_end(FO); 442 I.hasMore(); ++I) { 443 const CFGBlock &B = **I; 444 if (!live[B.getBlockID()]) 445 continue; 446 447 // Skip blocks which contain an element marked as no-return. They don't 448 // represent actually viable edges into the exit block, so mark them as 449 // abnormal. 450 if (B.hasNoReturnElement()) { 451 HasAbnormalEdge = true; 452 continue; 453 } 454 455 // Destructors can appear after the 'return' in the CFG. This is 456 // normal. We need to look pass the destructors for the return 457 // statement (if it exists). 458 CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend(); 459 460 for ( ; ri != re ; ++ri) 461 if (ri->getAs<CFGStmt>()) 462 break; 463 464 // No more CFGElements in the block? 465 if (ri == re) { 466 const Stmt *Term = B.getTerminatorStmt(); 467 if (Term && (isa<CXXTryStmt>(Term) || isa<ObjCAtTryStmt>(Term))) { 468 HasAbnormalEdge = true; 469 continue; 470 } 471 // A labeled empty statement, or the entry block... 472 HasPlainEdge = true; 473 continue; 474 } 475 476 CFGStmt CS = ri->castAs<CFGStmt>(); 477 const Stmt *S = CS.getStmt(); 478 if (isa<ReturnStmt>(S) || isa<CoreturnStmt>(S)) { 479 HasLiveReturn = true; 480 continue; 481 } 482 if (isa<ObjCAtThrowStmt>(S)) { 483 HasFakeEdge = true; 484 continue; 485 } 486 if (isa<CXXThrowExpr>(S)) { 487 HasFakeEdge = true; 488 continue; 489 } 490 if (isa<MSAsmStmt>(S)) { 491 // TODO: Verify this is correct. 492 HasFakeEdge = true; 493 HasLiveReturn = true; 494 continue; 495 } 496 if (isa<CXXTryStmt>(S)) { 497 HasAbnormalEdge = true; 498 continue; 499 } 500 if (!llvm::is_contained(B.succs(), &cfg->getExit())) { 501 HasAbnormalEdge = true; 502 continue; 503 } 504 505 HasPlainEdge = true; 506 } 507 if (!HasPlainEdge) { 508 if (HasLiveReturn) 509 return NeverFallThrough; 510 return NeverFallThroughOrReturn; 511 } 512 if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) 513 return MaybeFallThrough; 514 // This says AlwaysFallThrough for calls to functions that are not marked 515 // noreturn, that don't return. If people would like this warning to be more 516 // accurate, such functions should be marked as noreturn. 517 return AlwaysFallThrough; 518 } 519 520 namespace { 521 522 struct CheckFallThroughDiagnostics { 523 unsigned diag_MaybeFallThrough_HasNoReturn; 524 unsigned diag_MaybeFallThrough_ReturnsNonVoid; 525 unsigned diag_AlwaysFallThrough_HasNoReturn; 526 unsigned diag_AlwaysFallThrough_ReturnsNonVoid; 527 unsigned diag_NeverFallThroughOrReturn; 528 enum { Function, Block, Lambda, Coroutine } funMode; 529 SourceLocation FuncLoc; 530 531 static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) { 532 CheckFallThroughDiagnostics D; 533 D.FuncLoc = Func->getLocation(); 534 D.diag_MaybeFallThrough_HasNoReturn = 535 diag::warn_falloff_noreturn_function; 536 D.diag_MaybeFallThrough_ReturnsNonVoid = 537 diag::warn_maybe_falloff_nonvoid_function; 538 D.diag_AlwaysFallThrough_HasNoReturn = 539 diag::warn_falloff_noreturn_function; 540 D.diag_AlwaysFallThrough_ReturnsNonVoid = 541 diag::warn_falloff_nonvoid_function; 542 543 // Don't suggest that virtual functions be marked "noreturn", since they 544 // might be overridden by non-noreturn functions. 545 bool isVirtualMethod = false; 546 if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func)) 547 isVirtualMethod = Method->isVirtual(); 548 549 // Don't suggest that template instantiations be marked "noreturn" 550 bool isTemplateInstantiation = false; 551 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func)) 552 isTemplateInstantiation = Function->isTemplateInstantiation(); 553 554 if (!isVirtualMethod && !isTemplateInstantiation) 555 D.diag_NeverFallThroughOrReturn = 556 diag::warn_suggest_noreturn_function; 557 else 558 D.diag_NeverFallThroughOrReturn = 0; 559 560 D.funMode = Function; 561 return D; 562 } 563 564 static CheckFallThroughDiagnostics MakeForCoroutine(const Decl *Func) { 565 CheckFallThroughDiagnostics D; 566 D.FuncLoc = Func->getLocation(); 567 D.diag_MaybeFallThrough_HasNoReturn = 0; 568 D.diag_MaybeFallThrough_ReturnsNonVoid = 569 diag::warn_maybe_falloff_nonvoid_coroutine; 570 D.diag_AlwaysFallThrough_HasNoReturn = 0; 571 D.diag_AlwaysFallThrough_ReturnsNonVoid = 572 diag::warn_falloff_nonvoid_coroutine; 573 D.funMode = Coroutine; 574 return D; 575 } 576 577 static CheckFallThroughDiagnostics MakeForBlock() { 578 CheckFallThroughDiagnostics D; 579 D.diag_MaybeFallThrough_HasNoReturn = 580 diag::err_noreturn_block_has_return_expr; 581 D.diag_MaybeFallThrough_ReturnsNonVoid = 582 diag::err_maybe_falloff_nonvoid_block; 583 D.diag_AlwaysFallThrough_HasNoReturn = 584 diag::err_noreturn_block_has_return_expr; 585 D.diag_AlwaysFallThrough_ReturnsNonVoid = 586 diag::err_falloff_nonvoid_block; 587 D.diag_NeverFallThroughOrReturn = 0; 588 D.funMode = Block; 589 return D; 590 } 591 592 static CheckFallThroughDiagnostics MakeForLambda() { 593 CheckFallThroughDiagnostics D; 594 D.diag_MaybeFallThrough_HasNoReturn = 595 diag::err_noreturn_lambda_has_return_expr; 596 D.diag_MaybeFallThrough_ReturnsNonVoid = 597 diag::warn_maybe_falloff_nonvoid_lambda; 598 D.diag_AlwaysFallThrough_HasNoReturn = 599 diag::err_noreturn_lambda_has_return_expr; 600 D.diag_AlwaysFallThrough_ReturnsNonVoid = 601 diag::warn_falloff_nonvoid_lambda; 602 D.diag_NeverFallThroughOrReturn = 0; 603 D.funMode = Lambda; 604 return D; 605 } 606 607 bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid, 608 bool HasNoReturn) const { 609 if (funMode == Function) { 610 return (ReturnsVoid || 611 D.isIgnored(diag::warn_maybe_falloff_nonvoid_function, 612 FuncLoc)) && 613 (!HasNoReturn || 614 D.isIgnored(diag::warn_noreturn_function_has_return_expr, 615 FuncLoc)) && 616 (!ReturnsVoid || 617 D.isIgnored(diag::warn_suggest_noreturn_block, FuncLoc)); 618 } 619 if (funMode == Coroutine) { 620 return (ReturnsVoid || 621 D.isIgnored(diag::warn_maybe_falloff_nonvoid_function, FuncLoc) || 622 D.isIgnored(diag::warn_maybe_falloff_nonvoid_coroutine, 623 FuncLoc)) && 624 (!HasNoReturn); 625 } 626 // For blocks / lambdas. 627 return ReturnsVoid && !HasNoReturn; 628 } 629 }; 630 631 } // anonymous namespace 632 633 /// CheckFallThroughForBody - Check that we don't fall off the end of a 634 /// function that should return a value. Check that we don't fall off the end 635 /// of a noreturn function. We assume that functions and blocks not marked 636 /// noreturn will return. 637 static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body, 638 QualType BlockType, 639 const CheckFallThroughDiagnostics &CD, 640 AnalysisDeclContext &AC, 641 sema::FunctionScopeInfo *FSI) { 642 643 bool ReturnsVoid = false; 644 bool HasNoReturn = false; 645 bool IsCoroutine = FSI->isCoroutine(); 646 647 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 648 if (const auto *CBody = dyn_cast<CoroutineBodyStmt>(Body)) 649 ReturnsVoid = CBody->getFallthroughHandler() != nullptr; 650 else 651 ReturnsVoid = FD->getReturnType()->isVoidType(); 652 HasNoReturn = FD->isNoReturn(); 653 } 654 else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) { 655 ReturnsVoid = MD->getReturnType()->isVoidType(); 656 HasNoReturn = MD->hasAttr<NoReturnAttr>(); 657 } 658 else if (isa<BlockDecl>(D)) { 659 if (const FunctionType *FT = 660 BlockType->getPointeeType()->getAs<FunctionType>()) { 661 if (FT->getReturnType()->isVoidType()) 662 ReturnsVoid = true; 663 if (FT->getNoReturnAttr()) 664 HasNoReturn = true; 665 } 666 } 667 668 DiagnosticsEngine &Diags = S.getDiagnostics(); 669 670 // Short circuit for compilation speed. 671 if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn)) 672 return; 673 SourceLocation LBrace = Body->getBeginLoc(), RBrace = Body->getEndLoc(); 674 auto EmitDiag = [&](SourceLocation Loc, unsigned DiagID) { 675 if (IsCoroutine) 676 S.Diag(Loc, DiagID) << FSI->CoroutinePromise->getType(); 677 else 678 S.Diag(Loc, DiagID); 679 }; 680 681 // cpu_dispatch functions permit empty function bodies for ICC compatibility. 682 if (D->getAsFunction() && D->getAsFunction()->isCPUDispatchMultiVersion()) 683 return; 684 685 // Either in a function body compound statement, or a function-try-block. 686 switch (CheckFallThrough(AC)) { 687 case UnknownFallThrough: 688 break; 689 690 case MaybeFallThrough: 691 if (HasNoReturn) 692 EmitDiag(RBrace, CD.diag_MaybeFallThrough_HasNoReturn); 693 else if (!ReturnsVoid) 694 EmitDiag(RBrace, CD.diag_MaybeFallThrough_ReturnsNonVoid); 695 break; 696 case AlwaysFallThrough: 697 if (HasNoReturn) 698 EmitDiag(RBrace, CD.diag_AlwaysFallThrough_HasNoReturn); 699 else if (!ReturnsVoid) 700 EmitDiag(RBrace, CD.diag_AlwaysFallThrough_ReturnsNonVoid); 701 break; 702 case NeverFallThroughOrReturn: 703 if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) { 704 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 705 S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 0 << FD; 706 } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { 707 S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 1 << MD; 708 } else { 709 S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn); 710 } 711 } 712 break; 713 case NeverFallThrough: 714 break; 715 } 716 } 717 718 //===----------------------------------------------------------------------===// 719 // -Wuninitialized 720 //===----------------------------------------------------------------------===// 721 722 namespace { 723 /// ContainsReference - A visitor class to search for references to 724 /// a particular declaration (the needle) within any evaluated component of an 725 /// expression (recursively). 726 class ContainsReference : public ConstEvaluatedExprVisitor<ContainsReference> { 727 bool FoundReference; 728 const DeclRefExpr *Needle; 729 730 public: 731 typedef ConstEvaluatedExprVisitor<ContainsReference> Inherited; 732 733 ContainsReference(ASTContext &Context, const DeclRefExpr *Needle) 734 : Inherited(Context), FoundReference(false), Needle(Needle) {} 735 736 void VisitExpr(const Expr *E) { 737 // Stop evaluating if we already have a reference. 738 if (FoundReference) 739 return; 740 741 Inherited::VisitExpr(E); 742 } 743 744 void VisitDeclRefExpr(const DeclRefExpr *E) { 745 if (E == Needle) 746 FoundReference = true; 747 else 748 Inherited::VisitDeclRefExpr(E); 749 } 750 751 bool doesContainReference() const { return FoundReference; } 752 }; 753 } // anonymous namespace 754 755 static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) { 756 QualType VariableTy = VD->getType().getCanonicalType(); 757 if (VariableTy->isBlockPointerType() && 758 !VD->hasAttr<BlocksAttr>()) { 759 S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization) 760 << VD->getDeclName() 761 << FixItHint::CreateInsertion(VD->getLocation(), "__block "); 762 return true; 763 } 764 765 // Don't issue a fixit if there is already an initializer. 766 if (VD->getInit()) 767 return false; 768 769 // Don't suggest a fixit inside macros. 770 if (VD->getEndLoc().isMacroID()) 771 return false; 772 773 SourceLocation Loc = S.getLocForEndOfToken(VD->getEndLoc()); 774 775 // Suggest possible initialization (if any). 776 std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc); 777 if (Init.empty()) 778 return false; 779 780 S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName() 781 << FixItHint::CreateInsertion(Loc, Init); 782 return true; 783 } 784 785 /// Create a fixit to remove an if-like statement, on the assumption that its 786 /// condition is CondVal. 787 static void CreateIfFixit(Sema &S, const Stmt *If, const Stmt *Then, 788 const Stmt *Else, bool CondVal, 789 FixItHint &Fixit1, FixItHint &Fixit2) { 790 if (CondVal) { 791 // If condition is always true, remove all but the 'then'. 792 Fixit1 = FixItHint::CreateRemoval( 793 CharSourceRange::getCharRange(If->getBeginLoc(), Then->getBeginLoc())); 794 if (Else) { 795 SourceLocation ElseKwLoc = S.getLocForEndOfToken(Then->getEndLoc()); 796 Fixit2 = 797 FixItHint::CreateRemoval(SourceRange(ElseKwLoc, Else->getEndLoc())); 798 } 799 } else { 800 // If condition is always false, remove all but the 'else'. 801 if (Else) 802 Fixit1 = FixItHint::CreateRemoval(CharSourceRange::getCharRange( 803 If->getBeginLoc(), Else->getBeginLoc())); 804 else 805 Fixit1 = FixItHint::CreateRemoval(If->getSourceRange()); 806 } 807 } 808 809 /// DiagUninitUse -- Helper function to produce a diagnostic for an 810 /// uninitialized use of a variable. 811 static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use, 812 bool IsCapturedByBlock) { 813 bool Diagnosed = false; 814 815 switch (Use.getKind()) { 816 case UninitUse::Always: 817 S.Diag(Use.getUser()->getBeginLoc(), diag::warn_uninit_var) 818 << VD->getDeclName() << IsCapturedByBlock 819 << Use.getUser()->getSourceRange(); 820 return; 821 822 case UninitUse::AfterDecl: 823 case UninitUse::AfterCall: 824 S.Diag(VD->getLocation(), diag::warn_sometimes_uninit_var) 825 << VD->getDeclName() << IsCapturedByBlock 826 << (Use.getKind() == UninitUse::AfterDecl ? 4 : 5) 827 << const_cast<DeclContext*>(VD->getLexicalDeclContext()) 828 << VD->getSourceRange(); 829 S.Diag(Use.getUser()->getBeginLoc(), diag::note_uninit_var_use) 830 << IsCapturedByBlock << Use.getUser()->getSourceRange(); 831 return; 832 833 case UninitUse::Maybe: 834 case UninitUse::Sometimes: 835 // Carry on to report sometimes-uninitialized branches, if possible, 836 // or a 'may be used uninitialized' diagnostic otherwise. 837 break; 838 } 839 840 // Diagnose each branch which leads to a sometimes-uninitialized use. 841 for (UninitUse::branch_iterator I = Use.branch_begin(), E = Use.branch_end(); 842 I != E; ++I) { 843 assert(Use.getKind() == UninitUse::Sometimes); 844 845 const Expr *User = Use.getUser(); 846 const Stmt *Term = I->Terminator; 847 848 // Information used when building the diagnostic. 849 unsigned DiagKind; 850 StringRef Str; 851 SourceRange Range; 852 853 // FixIts to suppress the diagnostic by removing the dead condition. 854 // For all binary terminators, branch 0 is taken if the condition is true, 855 // and branch 1 is taken if the condition is false. 856 int RemoveDiagKind = -1; 857 const char *FixitStr = 858 S.getLangOpts().CPlusPlus ? (I->Output ? "true" : "false") 859 : (I->Output ? "1" : "0"); 860 FixItHint Fixit1, Fixit2; 861 862 switch (Term ? Term->getStmtClass() : Stmt::DeclStmtClass) { 863 default: 864 // Don't know how to report this. Just fall back to 'may be used 865 // uninitialized'. FIXME: Can this happen? 866 continue; 867 868 // "condition is true / condition is false". 869 case Stmt::IfStmtClass: { 870 const IfStmt *IS = cast<IfStmt>(Term); 871 DiagKind = 0; 872 Str = "if"; 873 Range = IS->getCond()->getSourceRange(); 874 RemoveDiagKind = 0; 875 CreateIfFixit(S, IS, IS->getThen(), IS->getElse(), 876 I->Output, Fixit1, Fixit2); 877 break; 878 } 879 case Stmt::ConditionalOperatorClass: { 880 const ConditionalOperator *CO = cast<ConditionalOperator>(Term); 881 DiagKind = 0; 882 Str = "?:"; 883 Range = CO->getCond()->getSourceRange(); 884 RemoveDiagKind = 0; 885 CreateIfFixit(S, CO, CO->getTrueExpr(), CO->getFalseExpr(), 886 I->Output, Fixit1, Fixit2); 887 break; 888 } 889 case Stmt::BinaryOperatorClass: { 890 const BinaryOperator *BO = cast<BinaryOperator>(Term); 891 if (!BO->isLogicalOp()) 892 continue; 893 DiagKind = 0; 894 Str = BO->getOpcodeStr(); 895 Range = BO->getLHS()->getSourceRange(); 896 RemoveDiagKind = 0; 897 if ((BO->getOpcode() == BO_LAnd && I->Output) || 898 (BO->getOpcode() == BO_LOr && !I->Output)) 899 // true && y -> y, false || y -> y. 900 Fixit1 = FixItHint::CreateRemoval( 901 SourceRange(BO->getBeginLoc(), BO->getOperatorLoc())); 902 else 903 // false && y -> false, true || y -> true. 904 Fixit1 = FixItHint::CreateReplacement(BO->getSourceRange(), FixitStr); 905 break; 906 } 907 908 // "loop is entered / loop is exited". 909 case Stmt::WhileStmtClass: 910 DiagKind = 1; 911 Str = "while"; 912 Range = cast<WhileStmt>(Term)->getCond()->getSourceRange(); 913 RemoveDiagKind = 1; 914 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 915 break; 916 case Stmt::ForStmtClass: 917 DiagKind = 1; 918 Str = "for"; 919 Range = cast<ForStmt>(Term)->getCond()->getSourceRange(); 920 RemoveDiagKind = 1; 921 if (I->Output) 922 Fixit1 = FixItHint::CreateRemoval(Range); 923 else 924 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 925 break; 926 case Stmt::CXXForRangeStmtClass: 927 if (I->Output == 1) { 928 // The use occurs if a range-based for loop's body never executes. 929 // That may be impossible, and there's no syntactic fix for this, 930 // so treat it as a 'may be uninitialized' case. 931 continue; 932 } 933 DiagKind = 1; 934 Str = "for"; 935 Range = cast<CXXForRangeStmt>(Term)->getRangeInit()->getSourceRange(); 936 break; 937 938 // "condition is true / loop is exited". 939 case Stmt::DoStmtClass: 940 DiagKind = 2; 941 Str = "do"; 942 Range = cast<DoStmt>(Term)->getCond()->getSourceRange(); 943 RemoveDiagKind = 1; 944 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 945 break; 946 947 // "switch case is taken". 948 case Stmt::CaseStmtClass: 949 DiagKind = 3; 950 Str = "case"; 951 Range = cast<CaseStmt>(Term)->getLHS()->getSourceRange(); 952 break; 953 case Stmt::DefaultStmtClass: 954 DiagKind = 3; 955 Str = "default"; 956 Range = cast<DefaultStmt>(Term)->getDefaultLoc(); 957 break; 958 } 959 960 S.Diag(Range.getBegin(), diag::warn_sometimes_uninit_var) 961 << VD->getDeclName() << IsCapturedByBlock << DiagKind 962 << Str << I->Output << Range; 963 S.Diag(User->getBeginLoc(), diag::note_uninit_var_use) 964 << IsCapturedByBlock << User->getSourceRange(); 965 if (RemoveDiagKind != -1) 966 S.Diag(Fixit1.RemoveRange.getBegin(), diag::note_uninit_fixit_remove_cond) 967 << RemoveDiagKind << Str << I->Output << Fixit1 << Fixit2; 968 969 Diagnosed = true; 970 } 971 972 if (!Diagnosed) 973 S.Diag(Use.getUser()->getBeginLoc(), diag::warn_maybe_uninit_var) 974 << VD->getDeclName() << IsCapturedByBlock 975 << Use.getUser()->getSourceRange(); 976 } 977 978 /// Diagnose uninitialized const reference usages. 979 static bool DiagnoseUninitializedConstRefUse(Sema &S, const VarDecl *VD, 980 const UninitUse &Use) { 981 S.Diag(Use.getUser()->getBeginLoc(), diag::warn_uninit_const_reference) 982 << VD->getDeclName() << Use.getUser()->getSourceRange(); 983 return true; 984 } 985 986 /// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an 987 /// uninitialized variable. This manages the different forms of diagnostic 988 /// emitted for particular types of uses. Returns true if the use was diagnosed 989 /// as a warning. If a particular use is one we omit warnings for, returns 990 /// false. 991 static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD, 992 const UninitUse &Use, 993 bool alwaysReportSelfInit = false) { 994 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Use.getUser())) { 995 // Inspect the initializer of the variable declaration which is 996 // being referenced prior to its initialization. We emit 997 // specialized diagnostics for self-initialization, and we 998 // specifically avoid warning about self references which take the 999 // form of: 1000 // 1001 // int x = x; 1002 // 1003 // This is used to indicate to GCC that 'x' is intentionally left 1004 // uninitialized. Proven code paths which access 'x' in 1005 // an uninitialized state after this will still warn. 1006 if (const Expr *Initializer = VD->getInit()) { 1007 if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts()) 1008 return false; 1009 1010 ContainsReference CR(S.Context, DRE); 1011 CR.Visit(Initializer); 1012 if (CR.doesContainReference()) { 1013 S.Diag(DRE->getBeginLoc(), diag::warn_uninit_self_reference_in_init) 1014 << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange(); 1015 return true; 1016 } 1017 } 1018 1019 DiagUninitUse(S, VD, Use, false); 1020 } else { 1021 const BlockExpr *BE = cast<BlockExpr>(Use.getUser()); 1022 if (VD->getType()->isBlockPointerType() && !VD->hasAttr<BlocksAttr>()) 1023 S.Diag(BE->getBeginLoc(), 1024 diag::warn_uninit_byref_blockvar_captured_by_block) 1025 << VD->getDeclName() 1026 << VD->getType().getQualifiers().hasObjCLifetime(); 1027 else 1028 DiagUninitUse(S, VD, Use, true); 1029 } 1030 1031 // Report where the variable was declared when the use wasn't within 1032 // the initializer of that declaration & we didn't already suggest 1033 // an initialization fixit. 1034 if (!SuggestInitializationFixit(S, VD)) 1035 S.Diag(VD->getBeginLoc(), diag::note_var_declared_here) 1036 << VD->getDeclName(); 1037 1038 return true; 1039 } 1040 1041 namespace { 1042 class FallthroughMapper : public RecursiveASTVisitor<FallthroughMapper> { 1043 public: 1044 FallthroughMapper(Sema &S) 1045 : FoundSwitchStatements(false), 1046 S(S) { 1047 } 1048 1049 bool foundSwitchStatements() const { return FoundSwitchStatements; } 1050 1051 void markFallthroughVisited(const AttributedStmt *Stmt) { 1052 bool Found = FallthroughStmts.erase(Stmt); 1053 assert(Found); 1054 (void)Found; 1055 } 1056 1057 typedef llvm::SmallPtrSet<const AttributedStmt*, 8> AttrStmts; 1058 1059 const AttrStmts &getFallthroughStmts() const { 1060 return FallthroughStmts; 1061 } 1062 1063 void fillReachableBlocks(CFG *Cfg) { 1064 assert(ReachableBlocks.empty() && "ReachableBlocks already filled"); 1065 std::deque<const CFGBlock *> BlockQueue; 1066 1067 ReachableBlocks.insert(&Cfg->getEntry()); 1068 BlockQueue.push_back(&Cfg->getEntry()); 1069 // Mark all case blocks reachable to avoid problems with switching on 1070 // constants, covered enums, etc. 1071 // These blocks can contain fall-through annotations, and we don't want to 1072 // issue a warn_fallthrough_attr_unreachable for them. 1073 for (const auto *B : *Cfg) { 1074 const Stmt *L = B->getLabel(); 1075 if (L && isa<SwitchCase>(L) && ReachableBlocks.insert(B).second) 1076 BlockQueue.push_back(B); 1077 } 1078 1079 while (!BlockQueue.empty()) { 1080 const CFGBlock *P = BlockQueue.front(); 1081 BlockQueue.pop_front(); 1082 for (const CFGBlock *B : P->succs()) { 1083 if (B && ReachableBlocks.insert(B).second) 1084 BlockQueue.push_back(B); 1085 } 1086 } 1087 } 1088 1089 bool checkFallThroughIntoBlock(const CFGBlock &B, int &AnnotatedCnt, 1090 bool IsTemplateInstantiation) { 1091 assert(!ReachableBlocks.empty() && "ReachableBlocks empty"); 1092 1093 int UnannotatedCnt = 0; 1094 AnnotatedCnt = 0; 1095 1096 std::deque<const CFGBlock*> BlockQueue(B.pred_begin(), B.pred_end()); 1097 while (!BlockQueue.empty()) { 1098 const CFGBlock *P = BlockQueue.front(); 1099 BlockQueue.pop_front(); 1100 if (!P) continue; 1101 1102 const Stmt *Term = P->getTerminatorStmt(); 1103 if (Term && isa<SwitchStmt>(Term)) 1104 continue; // Switch statement, good. 1105 1106 const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(P->getLabel()); 1107 if (SW && SW->getSubStmt() == B.getLabel() && P->begin() == P->end()) 1108 continue; // Previous case label has no statements, good. 1109 1110 const LabelStmt *L = dyn_cast_or_null<LabelStmt>(P->getLabel()); 1111 if (L && L->getSubStmt() == B.getLabel() && P->begin() == P->end()) 1112 continue; // Case label is preceded with a normal label, good. 1113 1114 if (!ReachableBlocks.count(P)) { 1115 for (const CFGElement &Elem : llvm::reverse(*P)) { 1116 if (Optional<CFGStmt> CS = Elem.getAs<CFGStmt>()) { 1117 if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) { 1118 // Don't issue a warning for an unreachable fallthrough 1119 // attribute in template instantiations as it may not be 1120 // unreachable in all instantiations of the template. 1121 if (!IsTemplateInstantiation) 1122 S.Diag(AS->getBeginLoc(), 1123 diag::warn_unreachable_fallthrough_attr); 1124 markFallthroughVisited(AS); 1125 ++AnnotatedCnt; 1126 break; 1127 } 1128 // Don't care about other unreachable statements. 1129 } 1130 } 1131 // If there are no unreachable statements, this may be a special 1132 // case in CFG: 1133 // case X: { 1134 // A a; // A has a destructor. 1135 // break; 1136 // } 1137 // // <<<< This place is represented by a 'hanging' CFG block. 1138 // case Y: 1139 continue; 1140 } 1141 1142 const Stmt *LastStmt = getLastStmt(*P); 1143 if (const AttributedStmt *AS = asFallThroughAttr(LastStmt)) { 1144 markFallthroughVisited(AS); 1145 ++AnnotatedCnt; 1146 continue; // Fallthrough annotation, good. 1147 } 1148 1149 if (!LastStmt) { // This block contains no executable statements. 1150 // Traverse its predecessors. 1151 std::copy(P->pred_begin(), P->pred_end(), 1152 std::back_inserter(BlockQueue)); 1153 continue; 1154 } 1155 1156 ++UnannotatedCnt; 1157 } 1158 return !!UnannotatedCnt; 1159 } 1160 1161 // RecursiveASTVisitor setup. 1162 bool shouldWalkTypesOfTypeLocs() const { return false; } 1163 1164 bool VisitAttributedStmt(AttributedStmt *S) { 1165 if (asFallThroughAttr(S)) 1166 FallthroughStmts.insert(S); 1167 return true; 1168 } 1169 1170 bool VisitSwitchStmt(SwitchStmt *S) { 1171 FoundSwitchStatements = true; 1172 return true; 1173 } 1174 1175 // We don't want to traverse local type declarations. We analyze their 1176 // methods separately. 1177 bool TraverseDecl(Decl *D) { return true; } 1178 1179 // We analyze lambda bodies separately. Skip them here. 1180 bool TraverseLambdaExpr(LambdaExpr *LE) { 1181 // Traverse the captures, but not the body. 1182 for (const auto C : zip(LE->captures(), LE->capture_inits())) 1183 TraverseLambdaCapture(LE, &std::get<0>(C), std::get<1>(C)); 1184 return true; 1185 } 1186 1187 private: 1188 1189 static const AttributedStmt *asFallThroughAttr(const Stmt *S) { 1190 if (const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(S)) { 1191 if (hasSpecificAttr<FallThroughAttr>(AS->getAttrs())) 1192 return AS; 1193 } 1194 return nullptr; 1195 } 1196 1197 static const Stmt *getLastStmt(const CFGBlock &B) { 1198 if (const Stmt *Term = B.getTerminatorStmt()) 1199 return Term; 1200 for (const CFGElement &Elem : llvm::reverse(B)) 1201 if (Optional<CFGStmt> CS = Elem.getAs<CFGStmt>()) 1202 return CS->getStmt(); 1203 // Workaround to detect a statement thrown out by CFGBuilder: 1204 // case X: {} case Y: 1205 // case X: ; case Y: 1206 if (const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(B.getLabel())) 1207 if (!isa<SwitchCase>(SW->getSubStmt())) 1208 return SW->getSubStmt(); 1209 1210 return nullptr; 1211 } 1212 1213 bool FoundSwitchStatements; 1214 AttrStmts FallthroughStmts; 1215 Sema &S; 1216 llvm::SmallPtrSet<const CFGBlock *, 16> ReachableBlocks; 1217 }; 1218 } // anonymous namespace 1219 1220 static StringRef getFallthroughAttrSpelling(Preprocessor &PP, 1221 SourceLocation Loc) { 1222 TokenValue FallthroughTokens[] = { 1223 tok::l_square, tok::l_square, 1224 PP.getIdentifierInfo("fallthrough"), 1225 tok::r_square, tok::r_square 1226 }; 1227 1228 TokenValue ClangFallthroughTokens[] = { 1229 tok::l_square, tok::l_square, PP.getIdentifierInfo("clang"), 1230 tok::coloncolon, PP.getIdentifierInfo("fallthrough"), 1231 tok::r_square, tok::r_square 1232 }; 1233 1234 bool PreferClangAttr = !PP.getLangOpts().CPlusPlus17 && !PP.getLangOpts().C2x; 1235 1236 StringRef MacroName; 1237 if (PreferClangAttr) 1238 MacroName = PP.getLastMacroWithSpelling(Loc, ClangFallthroughTokens); 1239 if (MacroName.empty()) 1240 MacroName = PP.getLastMacroWithSpelling(Loc, FallthroughTokens); 1241 if (MacroName.empty() && !PreferClangAttr) 1242 MacroName = PP.getLastMacroWithSpelling(Loc, ClangFallthroughTokens); 1243 if (MacroName.empty()) { 1244 if (!PreferClangAttr) 1245 MacroName = "[[fallthrough]]"; 1246 else if (PP.getLangOpts().CPlusPlus) 1247 MacroName = "[[clang::fallthrough]]"; 1248 else 1249 MacroName = "__attribute__((fallthrough))"; 1250 } 1251 return MacroName; 1252 } 1253 1254 static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC, 1255 bool PerFunction) { 1256 FallthroughMapper FM(S); 1257 FM.TraverseStmt(AC.getBody()); 1258 1259 if (!FM.foundSwitchStatements()) 1260 return; 1261 1262 if (PerFunction && FM.getFallthroughStmts().empty()) 1263 return; 1264 1265 CFG *Cfg = AC.getCFG(); 1266 1267 if (!Cfg) 1268 return; 1269 1270 FM.fillReachableBlocks(Cfg); 1271 1272 for (const CFGBlock *B : llvm::reverse(*Cfg)) { 1273 const Stmt *Label = B->getLabel(); 1274 1275 if (!Label || !isa<SwitchCase>(Label)) 1276 continue; 1277 1278 int AnnotatedCnt; 1279 1280 bool IsTemplateInstantiation = false; 1281 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(AC.getDecl())) 1282 IsTemplateInstantiation = Function->isTemplateInstantiation(); 1283 if (!FM.checkFallThroughIntoBlock(*B, AnnotatedCnt, 1284 IsTemplateInstantiation)) 1285 continue; 1286 1287 S.Diag(Label->getBeginLoc(), 1288 PerFunction ? diag::warn_unannotated_fallthrough_per_function 1289 : diag::warn_unannotated_fallthrough); 1290 1291 if (!AnnotatedCnt) { 1292 SourceLocation L = Label->getBeginLoc(); 1293 if (L.isMacroID()) 1294 continue; 1295 1296 const Stmt *Term = B->getTerminatorStmt(); 1297 // Skip empty cases. 1298 while (B->empty() && !Term && B->succ_size() == 1) { 1299 B = *B->succ_begin(); 1300 Term = B->getTerminatorStmt(); 1301 } 1302 if (!(B->empty() && Term && isa<BreakStmt>(Term))) { 1303 Preprocessor &PP = S.getPreprocessor(); 1304 StringRef AnnotationSpelling = getFallthroughAttrSpelling(PP, L); 1305 SmallString<64> TextToInsert(AnnotationSpelling); 1306 TextToInsert += "; "; 1307 S.Diag(L, diag::note_insert_fallthrough_fixit) 1308 << AnnotationSpelling 1309 << FixItHint::CreateInsertion(L, TextToInsert); 1310 } 1311 S.Diag(L, diag::note_insert_break_fixit) 1312 << FixItHint::CreateInsertion(L, "break; "); 1313 } 1314 } 1315 1316 for (const auto *F : FM.getFallthroughStmts()) 1317 S.Diag(F->getBeginLoc(), diag::err_fallthrough_attr_invalid_placement); 1318 } 1319 1320 static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM, 1321 const Stmt *S) { 1322 assert(S); 1323 1324 do { 1325 switch (S->getStmtClass()) { 1326 case Stmt::ForStmtClass: 1327 case Stmt::WhileStmtClass: 1328 case Stmt::CXXForRangeStmtClass: 1329 case Stmt::ObjCForCollectionStmtClass: 1330 return true; 1331 case Stmt::DoStmtClass: { 1332 Expr::EvalResult Result; 1333 if (!cast<DoStmt>(S)->getCond()->EvaluateAsInt(Result, Ctx)) 1334 return true; 1335 return Result.Val.getInt().getBoolValue(); 1336 } 1337 default: 1338 break; 1339 } 1340 } while ((S = PM.getParent(S))); 1341 1342 return false; 1343 } 1344 1345 static void diagnoseRepeatedUseOfWeak(Sema &S, 1346 const sema::FunctionScopeInfo *CurFn, 1347 const Decl *D, 1348 const ParentMap &PM) { 1349 typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy; 1350 typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap; 1351 typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector; 1352 typedef std::pair<const Stmt *, WeakObjectUseMap::const_iterator> 1353 StmtUsesPair; 1354 1355 ASTContext &Ctx = S.getASTContext(); 1356 1357 const WeakObjectUseMap &WeakMap = CurFn->getWeakObjectUses(); 1358 1359 // Extract all weak objects that are referenced more than once. 1360 SmallVector<StmtUsesPair, 8> UsesByStmt; 1361 for (WeakObjectUseMap::const_iterator I = WeakMap.begin(), E = WeakMap.end(); 1362 I != E; ++I) { 1363 const WeakUseVector &Uses = I->second; 1364 1365 // Find the first read of the weak object. 1366 WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end(); 1367 for ( ; UI != UE; ++UI) { 1368 if (UI->isUnsafe()) 1369 break; 1370 } 1371 1372 // If there were only writes to this object, don't warn. 1373 if (UI == UE) 1374 continue; 1375 1376 // If there was only one read, followed by any number of writes, and the 1377 // read is not within a loop, don't warn. Additionally, don't warn in a 1378 // loop if the base object is a local variable -- local variables are often 1379 // changed in loops. 1380 if (UI == Uses.begin()) { 1381 WeakUseVector::const_iterator UI2 = UI; 1382 for (++UI2; UI2 != UE; ++UI2) 1383 if (UI2->isUnsafe()) 1384 break; 1385 1386 if (UI2 == UE) { 1387 if (!isInLoop(Ctx, PM, UI->getUseExpr())) 1388 continue; 1389 1390 const WeakObjectProfileTy &Profile = I->first; 1391 if (!Profile.isExactProfile()) 1392 continue; 1393 1394 const NamedDecl *Base = Profile.getBase(); 1395 if (!Base) 1396 Base = Profile.getProperty(); 1397 assert(Base && "A profile always has a base or property."); 1398 1399 if (const VarDecl *BaseVar = dyn_cast<VarDecl>(Base)) 1400 if (BaseVar->hasLocalStorage() && !isa<ParmVarDecl>(Base)) 1401 continue; 1402 } 1403 } 1404 1405 UsesByStmt.push_back(StmtUsesPair(UI->getUseExpr(), I)); 1406 } 1407 1408 if (UsesByStmt.empty()) 1409 return; 1410 1411 // Sort by first use so that we emit the warnings in a deterministic order. 1412 SourceManager &SM = S.getSourceManager(); 1413 llvm::sort(UsesByStmt, 1414 [&SM](const StmtUsesPair &LHS, const StmtUsesPair &RHS) { 1415 return SM.isBeforeInTranslationUnit(LHS.first->getBeginLoc(), 1416 RHS.first->getBeginLoc()); 1417 }); 1418 1419 // Classify the current code body for better warning text. 1420 // This enum should stay in sync with the cases in 1421 // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak. 1422 // FIXME: Should we use a common classification enum and the same set of 1423 // possibilities all throughout Sema? 1424 enum { 1425 Function, 1426 Method, 1427 Block, 1428 Lambda 1429 } FunctionKind; 1430 1431 if (isa<sema::BlockScopeInfo>(CurFn)) 1432 FunctionKind = Block; 1433 else if (isa<sema::LambdaScopeInfo>(CurFn)) 1434 FunctionKind = Lambda; 1435 else if (isa<ObjCMethodDecl>(D)) 1436 FunctionKind = Method; 1437 else 1438 FunctionKind = Function; 1439 1440 // Iterate through the sorted problems and emit warnings for each. 1441 for (const auto &P : UsesByStmt) { 1442 const Stmt *FirstRead = P.first; 1443 const WeakObjectProfileTy &Key = P.second->first; 1444 const WeakUseVector &Uses = P.second->second; 1445 1446 // For complicated expressions like 'a.b.c' and 'x.b.c', WeakObjectProfileTy 1447 // may not contain enough information to determine that these are different 1448 // properties. We can only be 100% sure of a repeated use in certain cases, 1449 // and we adjust the diagnostic kind accordingly so that the less certain 1450 // case can be turned off if it is too noisy. 1451 unsigned DiagKind; 1452 if (Key.isExactProfile()) 1453 DiagKind = diag::warn_arc_repeated_use_of_weak; 1454 else 1455 DiagKind = diag::warn_arc_possible_repeated_use_of_weak; 1456 1457 // Classify the weak object being accessed for better warning text. 1458 // This enum should stay in sync with the cases in 1459 // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak. 1460 enum { 1461 Variable, 1462 Property, 1463 ImplicitProperty, 1464 Ivar 1465 } ObjectKind; 1466 1467 const NamedDecl *KeyProp = Key.getProperty(); 1468 if (isa<VarDecl>(KeyProp)) 1469 ObjectKind = Variable; 1470 else if (isa<ObjCPropertyDecl>(KeyProp)) 1471 ObjectKind = Property; 1472 else if (isa<ObjCMethodDecl>(KeyProp)) 1473 ObjectKind = ImplicitProperty; 1474 else if (isa<ObjCIvarDecl>(KeyProp)) 1475 ObjectKind = Ivar; 1476 else 1477 llvm_unreachable("Unexpected weak object kind!"); 1478 1479 // Do not warn about IBOutlet weak property receivers being set to null 1480 // since they are typically only used from the main thread. 1481 if (const ObjCPropertyDecl *Prop = dyn_cast<ObjCPropertyDecl>(KeyProp)) 1482 if (Prop->hasAttr<IBOutletAttr>()) 1483 continue; 1484 1485 // Show the first time the object was read. 1486 S.Diag(FirstRead->getBeginLoc(), DiagKind) 1487 << int(ObjectKind) << KeyProp << int(FunctionKind) 1488 << FirstRead->getSourceRange(); 1489 1490 // Print all the other accesses as notes. 1491 for (const auto &Use : Uses) { 1492 if (Use.getUseExpr() == FirstRead) 1493 continue; 1494 S.Diag(Use.getUseExpr()->getBeginLoc(), 1495 diag::note_arc_weak_also_accessed_here) 1496 << Use.getUseExpr()->getSourceRange(); 1497 } 1498 } 1499 } 1500 1501 namespace clang { 1502 namespace { 1503 typedef SmallVector<PartialDiagnosticAt, 1> OptionalNotes; 1504 typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag; 1505 typedef std::list<DelayedDiag> DiagList; 1506 1507 struct SortDiagBySourceLocation { 1508 SourceManager &SM; 1509 SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {} 1510 1511 bool operator()(const DelayedDiag &left, const DelayedDiag &right) { 1512 // Although this call will be slow, this is only called when outputting 1513 // multiple warnings. 1514 return SM.isBeforeInTranslationUnit(left.first.first, right.first.first); 1515 } 1516 }; 1517 } // anonymous namespace 1518 } // namespace clang 1519 1520 namespace { 1521 class UninitValsDiagReporter : public UninitVariablesHandler { 1522 Sema &S; 1523 typedef SmallVector<UninitUse, 2> UsesVec; 1524 typedef llvm::PointerIntPair<UsesVec *, 1, bool> MappedType; 1525 // Prefer using MapVector to DenseMap, so that iteration order will be 1526 // the same as insertion order. This is needed to obtain a deterministic 1527 // order of diagnostics when calling flushDiagnostics(). 1528 typedef llvm::MapVector<const VarDecl *, MappedType> UsesMap; 1529 UsesMap uses; 1530 UsesMap constRefUses; 1531 1532 public: 1533 UninitValsDiagReporter(Sema &S) : S(S) {} 1534 ~UninitValsDiagReporter() override { flushDiagnostics(); } 1535 1536 MappedType &getUses(UsesMap &um, const VarDecl *vd) { 1537 MappedType &V = um[vd]; 1538 if (!V.getPointer()) 1539 V.setPointer(new UsesVec()); 1540 return V; 1541 } 1542 1543 void handleUseOfUninitVariable(const VarDecl *vd, 1544 const UninitUse &use) override { 1545 getUses(uses, vd).getPointer()->push_back(use); 1546 } 1547 1548 void handleConstRefUseOfUninitVariable(const VarDecl *vd, 1549 const UninitUse &use) override { 1550 getUses(constRefUses, vd).getPointer()->push_back(use); 1551 } 1552 1553 void handleSelfInit(const VarDecl *vd) override { 1554 getUses(uses, vd).setInt(true); 1555 getUses(constRefUses, vd).setInt(true); 1556 } 1557 1558 void flushDiagnostics() { 1559 for (const auto &P : uses) { 1560 const VarDecl *vd = P.first; 1561 const MappedType &V = P.second; 1562 1563 UsesVec *vec = V.getPointer(); 1564 bool hasSelfInit = V.getInt(); 1565 1566 // Specially handle the case where we have uses of an uninitialized 1567 // variable, but the root cause is an idiomatic self-init. We want 1568 // to report the diagnostic at the self-init since that is the root cause. 1569 if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec)) 1570 DiagnoseUninitializedUse(S, vd, 1571 UninitUse(vd->getInit()->IgnoreParenCasts(), 1572 /* isAlwaysUninit */ true), 1573 /* alwaysReportSelfInit */ true); 1574 else { 1575 // Sort the uses by their SourceLocations. While not strictly 1576 // guaranteed to produce them in line/column order, this will provide 1577 // a stable ordering. 1578 llvm::sort(vec->begin(), vec->end(), 1579 [](const UninitUse &a, const UninitUse &b) { 1580 // Prefer a more confident report over a less confident one. 1581 if (a.getKind() != b.getKind()) 1582 return a.getKind() > b.getKind(); 1583 return a.getUser()->getBeginLoc() < b.getUser()->getBeginLoc(); 1584 }); 1585 1586 for (const auto &U : *vec) { 1587 // If we have self-init, downgrade all uses to 'may be uninitialized'. 1588 UninitUse Use = hasSelfInit ? UninitUse(U.getUser(), false) : U; 1589 1590 if (DiagnoseUninitializedUse(S, vd, Use)) 1591 // Skip further diagnostics for this variable. We try to warn only 1592 // on the first point at which a variable is used uninitialized. 1593 break; 1594 } 1595 } 1596 1597 // Release the uses vector. 1598 delete vec; 1599 } 1600 1601 uses.clear(); 1602 1603 // Flush all const reference uses diags. 1604 for (const auto &P : constRefUses) { 1605 const VarDecl *vd = P.first; 1606 const MappedType &V = P.second; 1607 1608 UsesVec *vec = V.getPointer(); 1609 bool hasSelfInit = V.getInt(); 1610 1611 if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec)) 1612 DiagnoseUninitializedUse(S, vd, 1613 UninitUse(vd->getInit()->IgnoreParenCasts(), 1614 /* isAlwaysUninit */ true), 1615 /* alwaysReportSelfInit */ true); 1616 else { 1617 for (const auto &U : *vec) { 1618 if (DiagnoseUninitializedConstRefUse(S, vd, U)) 1619 break; 1620 } 1621 } 1622 1623 // Release the uses vector. 1624 delete vec; 1625 } 1626 1627 constRefUses.clear(); 1628 } 1629 1630 private: 1631 static bool hasAlwaysUninitializedUse(const UsesVec* vec) { 1632 return llvm::any_of(*vec, [](const UninitUse &U) { 1633 return U.getKind() == UninitUse::Always || 1634 U.getKind() == UninitUse::AfterCall || 1635 U.getKind() == UninitUse::AfterDecl; 1636 }); 1637 } 1638 }; 1639 1640 /// Inter-procedural data for the called-once checker. 1641 class CalledOnceInterProceduralData { 1642 public: 1643 // Add the delayed warning for the given block. 1644 void addDelayedWarning(const BlockDecl *Block, 1645 PartialDiagnosticAt &&Warning) { 1646 DelayedBlockWarnings[Block].emplace_back(std::move(Warning)); 1647 } 1648 // Report all of the warnings we've gathered for the given block. 1649 void flushWarnings(const BlockDecl *Block, Sema &S) { 1650 for (const PartialDiagnosticAt &Delayed : DelayedBlockWarnings[Block]) 1651 S.Diag(Delayed.first, Delayed.second); 1652 1653 discardWarnings(Block); 1654 } 1655 // Discard all of the warnings we've gathered for the given block. 1656 void discardWarnings(const BlockDecl *Block) { 1657 DelayedBlockWarnings.erase(Block); 1658 } 1659 1660 private: 1661 using DelayedDiagnostics = SmallVector<PartialDiagnosticAt, 2>; 1662 llvm::DenseMap<const BlockDecl *, DelayedDiagnostics> DelayedBlockWarnings; 1663 }; 1664 1665 class CalledOnceCheckReporter : public CalledOnceCheckHandler { 1666 public: 1667 CalledOnceCheckReporter(Sema &S, CalledOnceInterProceduralData &Data) 1668 : S(S), Data(Data) {} 1669 void handleDoubleCall(const ParmVarDecl *Parameter, const Expr *Call, 1670 const Expr *PrevCall, bool IsCompletionHandler, 1671 bool Poised) override { 1672 auto DiagToReport = IsCompletionHandler 1673 ? diag::warn_completion_handler_called_twice 1674 : diag::warn_called_once_gets_called_twice; 1675 S.Diag(Call->getBeginLoc(), DiagToReport) << Parameter; 1676 S.Diag(PrevCall->getBeginLoc(), diag::note_called_once_gets_called_twice) 1677 << Poised; 1678 } 1679 1680 void handleNeverCalled(const ParmVarDecl *Parameter, 1681 bool IsCompletionHandler) override { 1682 auto DiagToReport = IsCompletionHandler 1683 ? diag::warn_completion_handler_never_called 1684 : diag::warn_called_once_never_called; 1685 S.Diag(Parameter->getBeginLoc(), DiagToReport) 1686 << Parameter << /* Captured */ false; 1687 } 1688 1689 void handleNeverCalled(const ParmVarDecl *Parameter, const Decl *Function, 1690 const Stmt *Where, NeverCalledReason Reason, 1691 bool IsCalledDirectly, 1692 bool IsCompletionHandler) override { 1693 auto DiagToReport = IsCompletionHandler 1694 ? diag::warn_completion_handler_never_called_when 1695 : diag::warn_called_once_never_called_when; 1696 PartialDiagnosticAt Warning(Where->getBeginLoc(), S.PDiag(DiagToReport) 1697 << Parameter 1698 << IsCalledDirectly 1699 << (unsigned)Reason); 1700 1701 if (const auto *Block = dyn_cast<BlockDecl>(Function)) { 1702 // We shouldn't report these warnings on blocks immediately 1703 Data.addDelayedWarning(Block, std::move(Warning)); 1704 } else { 1705 S.Diag(Warning.first, Warning.second); 1706 } 1707 } 1708 1709 void handleCapturedNeverCalled(const ParmVarDecl *Parameter, 1710 const Decl *Where, 1711 bool IsCompletionHandler) override { 1712 auto DiagToReport = IsCompletionHandler 1713 ? diag::warn_completion_handler_never_called 1714 : diag::warn_called_once_never_called; 1715 S.Diag(Where->getBeginLoc(), DiagToReport) 1716 << Parameter << /* Captured */ true; 1717 } 1718 1719 void 1720 handleBlockThatIsGuaranteedToBeCalledOnce(const BlockDecl *Block) override { 1721 Data.flushWarnings(Block, S); 1722 } 1723 1724 void handleBlockWithNoGuarantees(const BlockDecl *Block) override { 1725 Data.discardWarnings(Block); 1726 } 1727 1728 private: 1729 Sema &S; 1730 CalledOnceInterProceduralData &Data; 1731 }; 1732 1733 constexpr unsigned CalledOnceWarnings[] = { 1734 diag::warn_called_once_never_called, 1735 diag::warn_called_once_never_called_when, 1736 diag::warn_called_once_gets_called_twice}; 1737 1738 constexpr unsigned CompletionHandlerWarnings[]{ 1739 diag::warn_completion_handler_never_called, 1740 diag::warn_completion_handler_never_called_when, 1741 diag::warn_completion_handler_called_twice}; 1742 1743 bool shouldAnalyzeCalledOnceImpl(llvm::ArrayRef<unsigned> DiagIDs, 1744 const DiagnosticsEngine &Diags, 1745 SourceLocation At) { 1746 return llvm::any_of(DiagIDs, [&Diags, At](unsigned DiagID) { 1747 return !Diags.isIgnored(DiagID, At); 1748 }); 1749 } 1750 1751 bool shouldAnalyzeCalledOnceConventions(const DiagnosticsEngine &Diags, 1752 SourceLocation At) { 1753 return shouldAnalyzeCalledOnceImpl(CompletionHandlerWarnings, Diags, At); 1754 } 1755 1756 bool shouldAnalyzeCalledOnceParameters(const DiagnosticsEngine &Diags, 1757 SourceLocation At) { 1758 return shouldAnalyzeCalledOnceImpl(CalledOnceWarnings, Diags, At) || 1759 shouldAnalyzeCalledOnceConventions(Diags, At); 1760 } 1761 } // anonymous namespace 1762 1763 //===----------------------------------------------------------------------===// 1764 // -Wthread-safety 1765 //===----------------------------------------------------------------------===// 1766 namespace clang { 1767 namespace threadSafety { 1768 namespace { 1769 class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler { 1770 Sema &S; 1771 DiagList Warnings; 1772 SourceLocation FunLocation, FunEndLocation; 1773 1774 const FunctionDecl *CurrentFunction; 1775 bool Verbose; 1776 1777 OptionalNotes getNotes() const { 1778 if (Verbose && CurrentFunction) { 1779 PartialDiagnosticAt FNote(CurrentFunction->getBody()->getBeginLoc(), 1780 S.PDiag(diag::note_thread_warning_in_fun) 1781 << CurrentFunction); 1782 return OptionalNotes(1, FNote); 1783 } 1784 return OptionalNotes(); 1785 } 1786 1787 OptionalNotes getNotes(const PartialDiagnosticAt &Note) const { 1788 OptionalNotes ONS(1, Note); 1789 if (Verbose && CurrentFunction) { 1790 PartialDiagnosticAt FNote(CurrentFunction->getBody()->getBeginLoc(), 1791 S.PDiag(diag::note_thread_warning_in_fun) 1792 << CurrentFunction); 1793 ONS.push_back(std::move(FNote)); 1794 } 1795 return ONS; 1796 } 1797 1798 OptionalNotes getNotes(const PartialDiagnosticAt &Note1, 1799 const PartialDiagnosticAt &Note2) const { 1800 OptionalNotes ONS; 1801 ONS.push_back(Note1); 1802 ONS.push_back(Note2); 1803 if (Verbose && CurrentFunction) { 1804 PartialDiagnosticAt FNote(CurrentFunction->getBody()->getBeginLoc(), 1805 S.PDiag(diag::note_thread_warning_in_fun) 1806 << CurrentFunction); 1807 ONS.push_back(std::move(FNote)); 1808 } 1809 return ONS; 1810 } 1811 1812 OptionalNotes makeLockedHereNote(SourceLocation LocLocked, StringRef Kind) { 1813 return LocLocked.isValid() 1814 ? getNotes(PartialDiagnosticAt( 1815 LocLocked, S.PDiag(diag::note_locked_here) << Kind)) 1816 : getNotes(); 1817 } 1818 1819 OptionalNotes makeUnlockedHereNote(SourceLocation LocUnlocked, 1820 StringRef Kind) { 1821 return LocUnlocked.isValid() 1822 ? getNotes(PartialDiagnosticAt( 1823 LocUnlocked, S.PDiag(diag::note_unlocked_here) << Kind)) 1824 : getNotes(); 1825 } 1826 1827 public: 1828 ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL) 1829 : S(S), FunLocation(FL), FunEndLocation(FEL), 1830 CurrentFunction(nullptr), Verbose(false) {} 1831 1832 void setVerbose(bool b) { Verbose = b; } 1833 1834 /// Emit all buffered diagnostics in order of sourcelocation. 1835 /// We need to output diagnostics produced while iterating through 1836 /// the lockset in deterministic order, so this function orders diagnostics 1837 /// and outputs them. 1838 void emitDiagnostics() { 1839 Warnings.sort(SortDiagBySourceLocation(S.getSourceManager())); 1840 for (const auto &Diag : Warnings) { 1841 S.Diag(Diag.first.first, Diag.first.second); 1842 for (const auto &Note : Diag.second) 1843 S.Diag(Note.first, Note.second); 1844 } 1845 } 1846 1847 void handleInvalidLockExp(StringRef Kind, SourceLocation Loc) override { 1848 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_cannot_resolve_lock) 1849 << Loc); 1850 Warnings.emplace_back(std::move(Warning), getNotes()); 1851 } 1852 1853 void handleUnmatchedUnlock(StringRef Kind, Name LockName, SourceLocation Loc, 1854 SourceLocation LocPreviousUnlock) override { 1855 if (Loc.isInvalid()) 1856 Loc = FunLocation; 1857 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_unlock_but_no_lock) 1858 << Kind << LockName); 1859 Warnings.emplace_back(std::move(Warning), 1860 makeUnlockedHereNote(LocPreviousUnlock, Kind)); 1861 } 1862 1863 void handleIncorrectUnlockKind(StringRef Kind, Name LockName, 1864 LockKind Expected, LockKind Received, 1865 SourceLocation LocLocked, 1866 SourceLocation LocUnlock) override { 1867 if (LocUnlock.isInvalid()) 1868 LocUnlock = FunLocation; 1869 PartialDiagnosticAt Warning( 1870 LocUnlock, S.PDiag(diag::warn_unlock_kind_mismatch) 1871 << Kind << LockName << Received << Expected); 1872 Warnings.emplace_back(std::move(Warning), 1873 makeLockedHereNote(LocLocked, Kind)); 1874 } 1875 1876 void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation LocLocked, 1877 SourceLocation LocDoubleLock) override { 1878 if (LocDoubleLock.isInvalid()) 1879 LocDoubleLock = FunLocation; 1880 PartialDiagnosticAt Warning(LocDoubleLock, S.PDiag(diag::warn_double_lock) 1881 << Kind << LockName); 1882 Warnings.emplace_back(std::move(Warning), 1883 makeLockedHereNote(LocLocked, Kind)); 1884 } 1885 1886 void handleMutexHeldEndOfScope(StringRef Kind, Name LockName, 1887 SourceLocation LocLocked, 1888 SourceLocation LocEndOfScope, 1889 LockErrorKind LEK) override { 1890 unsigned DiagID = 0; 1891 switch (LEK) { 1892 case LEK_LockedSomePredecessors: 1893 DiagID = diag::warn_lock_some_predecessors; 1894 break; 1895 case LEK_LockedSomeLoopIterations: 1896 DiagID = diag::warn_expecting_lock_held_on_loop; 1897 break; 1898 case LEK_LockedAtEndOfFunction: 1899 DiagID = diag::warn_no_unlock; 1900 break; 1901 case LEK_NotLockedAtEndOfFunction: 1902 DiagID = diag::warn_expecting_locked; 1903 break; 1904 } 1905 if (LocEndOfScope.isInvalid()) 1906 LocEndOfScope = FunEndLocation; 1907 1908 PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << Kind 1909 << LockName); 1910 Warnings.emplace_back(std::move(Warning), 1911 makeLockedHereNote(LocLocked, Kind)); 1912 } 1913 1914 void handleExclusiveAndShared(StringRef Kind, Name LockName, 1915 SourceLocation Loc1, 1916 SourceLocation Loc2) override { 1917 PartialDiagnosticAt Warning(Loc1, 1918 S.PDiag(diag::warn_lock_exclusive_and_shared) 1919 << Kind << LockName); 1920 PartialDiagnosticAt Note(Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) 1921 << Kind << LockName); 1922 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1923 } 1924 1925 void handleNoMutexHeld(StringRef Kind, const NamedDecl *D, 1926 ProtectedOperationKind POK, AccessKind AK, 1927 SourceLocation Loc) override { 1928 assert((POK == POK_VarAccess || POK == POK_VarDereference) && 1929 "Only works for variables"); 1930 unsigned DiagID = POK == POK_VarAccess? 1931 diag::warn_variable_requires_any_lock: 1932 diag::warn_var_deref_requires_any_lock; 1933 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) 1934 << D << getLockKindFromAccessKind(AK)); 1935 Warnings.emplace_back(std::move(Warning), getNotes()); 1936 } 1937 1938 void handleMutexNotHeld(StringRef Kind, const NamedDecl *D, 1939 ProtectedOperationKind POK, Name LockName, 1940 LockKind LK, SourceLocation Loc, 1941 Name *PossibleMatch) override { 1942 unsigned DiagID = 0; 1943 if (PossibleMatch) { 1944 switch (POK) { 1945 case POK_VarAccess: 1946 DiagID = diag::warn_variable_requires_lock_precise; 1947 break; 1948 case POK_VarDereference: 1949 DiagID = diag::warn_var_deref_requires_lock_precise; 1950 break; 1951 case POK_FunctionCall: 1952 DiagID = diag::warn_fun_requires_lock_precise; 1953 break; 1954 case POK_PassByRef: 1955 DiagID = diag::warn_guarded_pass_by_reference; 1956 break; 1957 case POK_PtPassByRef: 1958 DiagID = diag::warn_pt_guarded_pass_by_reference; 1959 break; 1960 } 1961 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind 1962 << D 1963 << LockName << LK); 1964 PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match) 1965 << *PossibleMatch); 1966 if (Verbose && POK == POK_VarAccess) { 1967 PartialDiagnosticAt VNote(D->getLocation(), 1968 S.PDiag(diag::note_guarded_by_declared_here) 1969 << D->getDeclName()); 1970 Warnings.emplace_back(std::move(Warning), getNotes(Note, VNote)); 1971 } else 1972 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1973 } else { 1974 switch (POK) { 1975 case POK_VarAccess: 1976 DiagID = diag::warn_variable_requires_lock; 1977 break; 1978 case POK_VarDereference: 1979 DiagID = diag::warn_var_deref_requires_lock; 1980 break; 1981 case POK_FunctionCall: 1982 DiagID = diag::warn_fun_requires_lock; 1983 break; 1984 case POK_PassByRef: 1985 DiagID = diag::warn_guarded_pass_by_reference; 1986 break; 1987 case POK_PtPassByRef: 1988 DiagID = diag::warn_pt_guarded_pass_by_reference; 1989 break; 1990 } 1991 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind 1992 << D 1993 << LockName << LK); 1994 if (Verbose && POK == POK_VarAccess) { 1995 PartialDiagnosticAt Note(D->getLocation(), 1996 S.PDiag(diag::note_guarded_by_declared_here)); 1997 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1998 } else 1999 Warnings.emplace_back(std::move(Warning), getNotes()); 2000 } 2001 } 2002 2003 void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg, 2004 SourceLocation Loc) override { 2005 PartialDiagnosticAt Warning(Loc, 2006 S.PDiag(diag::warn_acquire_requires_negative_cap) 2007 << Kind << LockName << Neg); 2008 Warnings.emplace_back(std::move(Warning), getNotes()); 2009 } 2010 2011 void handleNegativeNotHeld(const NamedDecl *D, Name LockName, 2012 SourceLocation Loc) override { 2013 PartialDiagnosticAt Warning( 2014 Loc, S.PDiag(diag::warn_fun_requires_negative_cap) << D << LockName); 2015 Warnings.emplace_back(std::move(Warning), getNotes()); 2016 } 2017 2018 void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName, 2019 SourceLocation Loc) override { 2020 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_fun_excludes_mutex) 2021 << Kind << FunName << LockName); 2022 Warnings.emplace_back(std::move(Warning), getNotes()); 2023 } 2024 2025 void handleLockAcquiredBefore(StringRef Kind, Name L1Name, Name L2Name, 2026 SourceLocation Loc) override { 2027 PartialDiagnosticAt Warning(Loc, 2028 S.PDiag(diag::warn_acquired_before) << Kind << L1Name << L2Name); 2029 Warnings.emplace_back(std::move(Warning), getNotes()); 2030 } 2031 2032 void handleBeforeAfterCycle(Name L1Name, SourceLocation Loc) override { 2033 PartialDiagnosticAt Warning(Loc, 2034 S.PDiag(diag::warn_acquired_before_after_cycle) << L1Name); 2035 Warnings.emplace_back(std::move(Warning), getNotes()); 2036 } 2037 2038 void enterFunction(const FunctionDecl* FD) override { 2039 CurrentFunction = FD; 2040 } 2041 2042 void leaveFunction(const FunctionDecl* FD) override { 2043 CurrentFunction = nullptr; 2044 } 2045 }; 2046 } // anonymous namespace 2047 } // namespace threadSafety 2048 } // namespace clang 2049 2050 //===----------------------------------------------------------------------===// 2051 // -Wconsumed 2052 //===----------------------------------------------------------------------===// 2053 2054 namespace clang { 2055 namespace consumed { 2056 namespace { 2057 class ConsumedWarningsHandler : public ConsumedWarningsHandlerBase { 2058 2059 Sema &S; 2060 DiagList Warnings; 2061 2062 public: 2063 2064 ConsumedWarningsHandler(Sema &S) : S(S) {} 2065 2066 void emitDiagnostics() override { 2067 Warnings.sort(SortDiagBySourceLocation(S.getSourceManager())); 2068 for (const auto &Diag : Warnings) { 2069 S.Diag(Diag.first.first, Diag.first.second); 2070 for (const auto &Note : Diag.second) 2071 S.Diag(Note.first, Note.second); 2072 } 2073 } 2074 2075 void warnLoopStateMismatch(SourceLocation Loc, 2076 StringRef VariableName) override { 2077 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_loop_state_mismatch) << 2078 VariableName); 2079 2080 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2081 } 2082 2083 void warnParamReturnTypestateMismatch(SourceLocation Loc, 2084 StringRef VariableName, 2085 StringRef ExpectedState, 2086 StringRef ObservedState) override { 2087 2088 PartialDiagnosticAt Warning(Loc, S.PDiag( 2089 diag::warn_param_return_typestate_mismatch) << VariableName << 2090 ExpectedState << ObservedState); 2091 2092 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2093 } 2094 2095 void warnParamTypestateMismatch(SourceLocation Loc, StringRef ExpectedState, 2096 StringRef ObservedState) override { 2097 2098 PartialDiagnosticAt Warning(Loc, S.PDiag( 2099 diag::warn_param_typestate_mismatch) << ExpectedState << ObservedState); 2100 2101 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2102 } 2103 2104 void warnReturnTypestateForUnconsumableType(SourceLocation Loc, 2105 StringRef TypeName) override { 2106 PartialDiagnosticAt Warning(Loc, S.PDiag( 2107 diag::warn_return_typestate_for_unconsumable_type) << TypeName); 2108 2109 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2110 } 2111 2112 void warnReturnTypestateMismatch(SourceLocation Loc, StringRef ExpectedState, 2113 StringRef ObservedState) override { 2114 2115 PartialDiagnosticAt Warning(Loc, S.PDiag( 2116 diag::warn_return_typestate_mismatch) << ExpectedState << ObservedState); 2117 2118 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2119 } 2120 2121 void warnUseOfTempInInvalidState(StringRef MethodName, StringRef State, 2122 SourceLocation Loc) override { 2123 2124 PartialDiagnosticAt Warning(Loc, S.PDiag( 2125 diag::warn_use_of_temp_in_invalid_state) << MethodName << State); 2126 2127 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2128 } 2129 2130 void warnUseInInvalidState(StringRef MethodName, StringRef VariableName, 2131 StringRef State, SourceLocation Loc) override { 2132 2133 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_use_in_invalid_state) << 2134 MethodName << VariableName << State); 2135 2136 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2137 } 2138 }; 2139 } // anonymous namespace 2140 } // namespace consumed 2141 } // namespace clang 2142 2143 //===----------------------------------------------------------------------===// 2144 // AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based 2145 // warnings on a function, method, or block. 2146 //===----------------------------------------------------------------------===// 2147 2148 sema::AnalysisBasedWarnings::Policy::Policy() { 2149 enableCheckFallThrough = 1; 2150 enableCheckUnreachable = 0; 2151 enableThreadSafetyAnalysis = 0; 2152 enableConsumedAnalysis = 0; 2153 } 2154 2155 /// InterProceduralData aims to be a storage of whatever data should be passed 2156 /// between analyses of different functions. 2157 /// 2158 /// At the moment, its primary goal is to make the information gathered during 2159 /// the analysis of the blocks available during the analysis of the enclosing 2160 /// function. This is important due to the fact that blocks are analyzed before 2161 /// the enclosed function is even parsed fully, so it is not viable to access 2162 /// anything in the outer scope while analyzing the block. On the other hand, 2163 /// re-building CFG for blocks and re-analyzing them when we do have all the 2164 /// information (i.e. during the analysis of the enclosing function) seems to be 2165 /// ill-designed. 2166 class sema::AnalysisBasedWarnings::InterProceduralData { 2167 public: 2168 // It is important to analyze blocks within functions because it's a very 2169 // common pattern to capture completion handler parameters by blocks. 2170 CalledOnceInterProceduralData CalledOnceData; 2171 }; 2172 2173 static unsigned isEnabled(DiagnosticsEngine &D, unsigned diag) { 2174 return (unsigned)!D.isIgnored(diag, SourceLocation()); 2175 } 2176 2177 sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s) 2178 : S(s), IPData(std::make_unique<InterProceduralData>()), 2179 NumFunctionsAnalyzed(0), NumFunctionsWithBadCFGs(0), NumCFGBlocks(0), 2180 MaxCFGBlocksPerFunction(0), NumUninitAnalysisFunctions(0), 2181 NumUninitAnalysisVariables(0), MaxUninitAnalysisVariablesPerFunction(0), 2182 NumUninitAnalysisBlockVisits(0), 2183 MaxUninitAnalysisBlockVisitsPerFunction(0) { 2184 2185 using namespace diag; 2186 DiagnosticsEngine &D = S.getDiagnostics(); 2187 2188 DefaultPolicy.enableCheckUnreachable = 2189 isEnabled(D, warn_unreachable) || isEnabled(D, warn_unreachable_break) || 2190 isEnabled(D, warn_unreachable_return) || 2191 isEnabled(D, warn_unreachable_loop_increment); 2192 2193 DefaultPolicy.enableThreadSafetyAnalysis = isEnabled(D, warn_double_lock); 2194 2195 DefaultPolicy.enableConsumedAnalysis = 2196 isEnabled(D, warn_use_in_invalid_state); 2197 } 2198 2199 // We need this here for unique_ptr with forward declared class. 2200 sema::AnalysisBasedWarnings::~AnalysisBasedWarnings() = default; 2201 2202 static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) { 2203 for (const auto &D : fscope->PossiblyUnreachableDiags) 2204 S.Diag(D.Loc, D.PD); 2205 } 2206 2207 void clang::sema::AnalysisBasedWarnings::IssueWarnings( 2208 sema::AnalysisBasedWarnings::Policy P, sema::FunctionScopeInfo *fscope, 2209 const Decl *D, QualType BlockType) { 2210 2211 // We avoid doing analysis-based warnings when there are errors for 2212 // two reasons: 2213 // (1) The CFGs often can't be constructed (if the body is invalid), so 2214 // don't bother trying. 2215 // (2) The code already has problems; running the analysis just takes more 2216 // time. 2217 DiagnosticsEngine &Diags = S.getDiagnostics(); 2218 2219 // Do not do any analysis if we are going to just ignore them. 2220 if (Diags.getIgnoreAllWarnings() || 2221 (Diags.getSuppressSystemWarnings() && 2222 S.SourceMgr.isInSystemHeader(D->getLocation()))) 2223 return; 2224 2225 // For code in dependent contexts, we'll do this at instantiation time. 2226 if (cast<DeclContext>(D)->isDependentContext()) 2227 return; 2228 2229 if (S.hasUncompilableErrorOccurred()) { 2230 // Flush out any possibly unreachable diagnostics. 2231 flushDiagnostics(S, fscope); 2232 return; 2233 } 2234 2235 const Stmt *Body = D->getBody(); 2236 assert(Body); 2237 2238 // Construct the analysis context with the specified CFG build options. 2239 AnalysisDeclContext AC(/* AnalysisDeclContextManager */ nullptr, D); 2240 2241 // Don't generate EH edges for CallExprs as we'd like to avoid the n^2 2242 // explosion for destructors that can result and the compile time hit. 2243 AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true; 2244 AC.getCFGBuildOptions().AddEHEdges = false; 2245 AC.getCFGBuildOptions().AddInitializers = true; 2246 AC.getCFGBuildOptions().AddImplicitDtors = true; 2247 AC.getCFGBuildOptions().AddTemporaryDtors = true; 2248 AC.getCFGBuildOptions().AddCXXNewAllocator = false; 2249 AC.getCFGBuildOptions().AddCXXDefaultInitExprInCtors = true; 2250 2251 // Force that certain expressions appear as CFGElements in the CFG. This 2252 // is used to speed up various analyses. 2253 // FIXME: This isn't the right factoring. This is here for initial 2254 // prototyping, but we need a way for analyses to say what expressions they 2255 // expect to always be CFGElements and then fill in the BuildOptions 2256 // appropriately. This is essentially a layering violation. 2257 if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis || 2258 P.enableConsumedAnalysis) { 2259 // Unreachable code analysis and thread safety require a linearized CFG. 2260 AC.getCFGBuildOptions().setAllAlwaysAdd(); 2261 } 2262 else { 2263 AC.getCFGBuildOptions() 2264 .setAlwaysAdd(Stmt::BinaryOperatorClass) 2265 .setAlwaysAdd(Stmt::CompoundAssignOperatorClass) 2266 .setAlwaysAdd(Stmt::BlockExprClass) 2267 .setAlwaysAdd(Stmt::CStyleCastExprClass) 2268 .setAlwaysAdd(Stmt::DeclRefExprClass) 2269 .setAlwaysAdd(Stmt::ImplicitCastExprClass) 2270 .setAlwaysAdd(Stmt::UnaryOperatorClass); 2271 } 2272 2273 // Install the logical handler. 2274 llvm::Optional<LogicalErrorHandler> LEH; 2275 if (LogicalErrorHandler::hasActiveDiagnostics(Diags, D->getBeginLoc())) { 2276 LEH.emplace(S); 2277 AC.getCFGBuildOptions().Observer = &*LEH; 2278 } 2279 2280 // Emit delayed diagnostics. 2281 if (!fscope->PossiblyUnreachableDiags.empty()) { 2282 bool analyzed = false; 2283 2284 // Register the expressions with the CFGBuilder. 2285 for (const auto &D : fscope->PossiblyUnreachableDiags) { 2286 for (const Stmt *S : D.Stmts) 2287 AC.registerForcedBlockExpression(S); 2288 } 2289 2290 if (AC.getCFG()) { 2291 analyzed = true; 2292 for (const auto &D : fscope->PossiblyUnreachableDiags) { 2293 bool AllReachable = true; 2294 for (const Stmt *S : D.Stmts) { 2295 const CFGBlock *block = AC.getBlockForRegisteredExpression(S); 2296 CFGReverseBlockReachabilityAnalysis *cra = 2297 AC.getCFGReachablityAnalysis(); 2298 // FIXME: We should be able to assert that block is non-null, but 2299 // the CFG analysis can skip potentially-evaluated expressions in 2300 // edge cases; see test/Sema/vla-2.c. 2301 if (block && cra) { 2302 // Can this block be reached from the entrance? 2303 if (!cra->isReachable(&AC.getCFG()->getEntry(), block)) { 2304 AllReachable = false; 2305 break; 2306 } 2307 } 2308 // If we cannot map to a basic block, assume the statement is 2309 // reachable. 2310 } 2311 2312 if (AllReachable) 2313 S.Diag(D.Loc, D.PD); 2314 } 2315 } 2316 2317 if (!analyzed) 2318 flushDiagnostics(S, fscope); 2319 } 2320 2321 // Warning: check missing 'return' 2322 if (P.enableCheckFallThrough) { 2323 const CheckFallThroughDiagnostics &CD = 2324 (isa<BlockDecl>(D) 2325 ? CheckFallThroughDiagnostics::MakeForBlock() 2326 : (isa<CXXMethodDecl>(D) && 2327 cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call && 2328 cast<CXXMethodDecl>(D)->getParent()->isLambda()) 2329 ? CheckFallThroughDiagnostics::MakeForLambda() 2330 : (fscope->isCoroutine() 2331 ? CheckFallThroughDiagnostics::MakeForCoroutine(D) 2332 : CheckFallThroughDiagnostics::MakeForFunction(D))); 2333 CheckFallThroughForBody(S, D, Body, BlockType, CD, AC, fscope); 2334 } 2335 2336 // Warning: check for unreachable code 2337 if (P.enableCheckUnreachable) { 2338 // Only check for unreachable code on non-template instantiations. 2339 // Different template instantiations can effectively change the control-flow 2340 // and it is very difficult to prove that a snippet of code in a template 2341 // is unreachable for all instantiations. 2342 bool isTemplateInstantiation = false; 2343 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) 2344 isTemplateInstantiation = Function->isTemplateInstantiation(); 2345 if (!isTemplateInstantiation) 2346 CheckUnreachable(S, AC); 2347 } 2348 2349 // Check for thread safety violations 2350 if (P.enableThreadSafetyAnalysis) { 2351 SourceLocation FL = AC.getDecl()->getLocation(); 2352 SourceLocation FEL = AC.getDecl()->getEndLoc(); 2353 threadSafety::ThreadSafetyReporter Reporter(S, FL, FEL); 2354 if (!Diags.isIgnored(diag::warn_thread_safety_beta, D->getBeginLoc())) 2355 Reporter.setIssueBetaWarnings(true); 2356 if (!Diags.isIgnored(diag::warn_thread_safety_verbose, D->getBeginLoc())) 2357 Reporter.setVerbose(true); 2358 2359 threadSafety::runThreadSafetyAnalysis(AC, Reporter, 2360 &S.ThreadSafetyDeclCache); 2361 Reporter.emitDiagnostics(); 2362 } 2363 2364 // Check for violations of consumed properties. 2365 if (P.enableConsumedAnalysis) { 2366 consumed::ConsumedWarningsHandler WarningHandler(S); 2367 consumed::ConsumedAnalyzer Analyzer(WarningHandler); 2368 Analyzer.run(AC); 2369 } 2370 2371 if (!Diags.isIgnored(diag::warn_uninit_var, D->getBeginLoc()) || 2372 !Diags.isIgnored(diag::warn_sometimes_uninit_var, D->getBeginLoc()) || 2373 !Diags.isIgnored(diag::warn_maybe_uninit_var, D->getBeginLoc()) || 2374 !Diags.isIgnored(diag::warn_uninit_const_reference, D->getBeginLoc())) { 2375 if (CFG *cfg = AC.getCFG()) { 2376 UninitValsDiagReporter reporter(S); 2377 UninitVariablesAnalysisStats stats; 2378 std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats)); 2379 runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC, 2380 reporter, stats); 2381 2382 if (S.CollectStats && stats.NumVariablesAnalyzed > 0) { 2383 ++NumUninitAnalysisFunctions; 2384 NumUninitAnalysisVariables += stats.NumVariablesAnalyzed; 2385 NumUninitAnalysisBlockVisits += stats.NumBlockVisits; 2386 MaxUninitAnalysisVariablesPerFunction = 2387 std::max(MaxUninitAnalysisVariablesPerFunction, 2388 stats.NumVariablesAnalyzed); 2389 MaxUninitAnalysisBlockVisitsPerFunction = 2390 std::max(MaxUninitAnalysisBlockVisitsPerFunction, 2391 stats.NumBlockVisits); 2392 } 2393 } 2394 } 2395 2396 // Check for violations of "called once" parameter properties. 2397 if (S.getLangOpts().ObjC && !S.getLangOpts().CPlusPlus && 2398 shouldAnalyzeCalledOnceParameters(Diags, D->getBeginLoc())) { 2399 if (AC.getCFG()) { 2400 CalledOnceCheckReporter Reporter(S, IPData->CalledOnceData); 2401 checkCalledOnceParameters( 2402 AC, Reporter, 2403 shouldAnalyzeCalledOnceConventions(Diags, D->getBeginLoc())); 2404 } 2405 } 2406 2407 bool FallThroughDiagFull = 2408 !Diags.isIgnored(diag::warn_unannotated_fallthrough, D->getBeginLoc()); 2409 bool FallThroughDiagPerFunction = !Diags.isIgnored( 2410 diag::warn_unannotated_fallthrough_per_function, D->getBeginLoc()); 2411 if (FallThroughDiagFull || FallThroughDiagPerFunction || 2412 fscope->HasFallthroughStmt) { 2413 DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull); 2414 } 2415 2416 if (S.getLangOpts().ObjCWeak && 2417 !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, D->getBeginLoc())) 2418 diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap()); 2419 2420 2421 // Check for infinite self-recursion in functions 2422 if (!Diags.isIgnored(diag::warn_infinite_recursive_function, 2423 D->getBeginLoc())) { 2424 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 2425 checkRecursiveFunction(S, FD, Body, AC); 2426 } 2427 } 2428 2429 // Check for throw out of non-throwing function. 2430 if (!Diags.isIgnored(diag::warn_throw_in_noexcept_func, D->getBeginLoc())) 2431 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) 2432 if (S.getLangOpts().CPlusPlus && isNoexcept(FD)) 2433 checkThrowInNonThrowingFunc(S, FD, AC); 2434 2435 // If none of the previous checks caused a CFG build, trigger one here 2436 // for the logical error handler. 2437 if (LogicalErrorHandler::hasActiveDiagnostics(Diags, D->getBeginLoc())) { 2438 AC.getCFG(); 2439 } 2440 2441 // Collect statistics about the CFG if it was built. 2442 if (S.CollectStats && AC.isCFGBuilt()) { 2443 ++NumFunctionsAnalyzed; 2444 if (CFG *cfg = AC.getCFG()) { 2445 // If we successfully built a CFG for this context, record some more 2446 // detail information about it. 2447 NumCFGBlocks += cfg->getNumBlockIDs(); 2448 MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction, 2449 cfg->getNumBlockIDs()); 2450 } else { 2451 ++NumFunctionsWithBadCFGs; 2452 } 2453 } 2454 } 2455 2456 void clang::sema::AnalysisBasedWarnings::PrintStats() const { 2457 llvm::errs() << "\n*** Analysis Based Warnings Stats:\n"; 2458 2459 unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs; 2460 unsigned AvgCFGBlocksPerFunction = 2461 !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt; 2462 llvm::errs() << NumFunctionsAnalyzed << " functions analyzed (" 2463 << NumFunctionsWithBadCFGs << " w/o CFGs).\n" 2464 << " " << NumCFGBlocks << " CFG blocks built.\n" 2465 << " " << AvgCFGBlocksPerFunction 2466 << " average CFG blocks per function.\n" 2467 << " " << MaxCFGBlocksPerFunction 2468 << " max CFG blocks per function.\n"; 2469 2470 unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0 2471 : NumUninitAnalysisVariables/NumUninitAnalysisFunctions; 2472 unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0 2473 : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions; 2474 llvm::errs() << NumUninitAnalysisFunctions 2475 << " functions analyzed for uninitialiazed variables\n" 2476 << " " << NumUninitAnalysisVariables << " variables analyzed.\n" 2477 << " " << AvgUninitVariablesPerFunction 2478 << " average variables per function.\n" 2479 << " " << MaxUninitAnalysisVariablesPerFunction 2480 << " max variables per function.\n" 2481 << " " << NumUninitAnalysisBlockVisits << " block visits.\n" 2482 << " " << AvgUninitBlockVisitsPerFunction 2483 << " average block visits per function.\n" 2484 << " " << MaxUninitAnalysisBlockVisitsPerFunction 2485 << " max block visits per function.\n"; 2486 } 2487