1 //=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines analysis_warnings::[Policy,Executor]. 10 // Together they are used by Sema to issue warnings based on inexpensive 11 // static analysis algorithms in libAnalysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "clang/Sema/AnalysisBasedWarnings.h" 16 #include "clang/AST/DeclCXX.h" 17 #include "clang/AST/DeclObjC.h" 18 #include "clang/AST/EvaluatedExprVisitor.h" 19 #include "clang/AST/ExprCXX.h" 20 #include "clang/AST/ExprObjC.h" 21 #include "clang/AST/ParentMap.h" 22 #include "clang/AST/RecursiveASTVisitor.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/AST/StmtObjC.h" 25 #include "clang/AST/StmtVisitor.h" 26 #include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h" 27 #include "clang/Analysis/Analyses/CalledOnceCheck.h" 28 #include "clang/Analysis/Analyses/Consumed.h" 29 #include "clang/Analysis/Analyses/ReachableCode.h" 30 #include "clang/Analysis/Analyses/ThreadSafety.h" 31 #include "clang/Analysis/Analyses/UninitializedValues.h" 32 #include "clang/Analysis/AnalysisDeclContext.h" 33 #include "clang/Analysis/CFG.h" 34 #include "clang/Analysis/CFGStmtMap.h" 35 #include "clang/Basic/SourceLocation.h" 36 #include "clang/Basic/SourceManager.h" 37 #include "clang/Lex/Preprocessor.h" 38 #include "clang/Sema/ScopeInfo.h" 39 #include "clang/Sema/SemaInternal.h" 40 #include "llvm/ADT/ArrayRef.h" 41 #include "llvm/ADT/BitVector.h" 42 #include "llvm/ADT/MapVector.h" 43 #include "llvm/ADT/SmallString.h" 44 #include "llvm/ADT/SmallVector.h" 45 #include "llvm/ADT/StringRef.h" 46 #include "llvm/Support/Casting.h" 47 #include <algorithm> 48 #include <deque> 49 #include <iterator> 50 51 using namespace clang; 52 53 //===----------------------------------------------------------------------===// 54 // Unreachable code analysis. 55 //===----------------------------------------------------------------------===// 56 57 namespace { 58 class UnreachableCodeHandler : public reachable_code::Callback { 59 Sema &S; 60 SourceRange PreviousSilenceableCondVal; 61 62 public: 63 UnreachableCodeHandler(Sema &s) : S(s) {} 64 65 void HandleUnreachable(reachable_code::UnreachableKind UK, 66 SourceLocation L, 67 SourceRange SilenceableCondVal, 68 SourceRange R1, 69 SourceRange R2) override { 70 // Avoid reporting multiple unreachable code diagnostics that are 71 // triggered by the same conditional value. 72 if (PreviousSilenceableCondVal.isValid() && 73 SilenceableCondVal.isValid() && 74 PreviousSilenceableCondVal == SilenceableCondVal) 75 return; 76 PreviousSilenceableCondVal = SilenceableCondVal; 77 78 unsigned diag = diag::warn_unreachable; 79 switch (UK) { 80 case reachable_code::UK_Break: 81 diag = diag::warn_unreachable_break; 82 break; 83 case reachable_code::UK_Return: 84 diag = diag::warn_unreachable_return; 85 break; 86 case reachable_code::UK_Loop_Increment: 87 diag = diag::warn_unreachable_loop_increment; 88 break; 89 case reachable_code::UK_Other: 90 break; 91 } 92 93 S.Diag(L, diag) << R1 << R2; 94 95 SourceLocation Open = SilenceableCondVal.getBegin(); 96 if (Open.isValid()) { 97 SourceLocation Close = SilenceableCondVal.getEnd(); 98 Close = S.getLocForEndOfToken(Close); 99 if (Close.isValid()) { 100 S.Diag(Open, diag::note_unreachable_silence) 101 << FixItHint::CreateInsertion(Open, "/* DISABLES CODE */ (") 102 << FixItHint::CreateInsertion(Close, ")"); 103 } 104 } 105 } 106 }; 107 } // anonymous namespace 108 109 /// CheckUnreachable - Check for unreachable code. 110 static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) { 111 // As a heuristic prune all diagnostics not in the main file. Currently 112 // the majority of warnings in headers are false positives. These 113 // are largely caused by configuration state, e.g. preprocessor 114 // defined code, etc. 115 // 116 // Note that this is also a performance optimization. Analyzing 117 // headers many times can be expensive. 118 if (!S.getSourceManager().isInMainFile(AC.getDecl()->getBeginLoc())) 119 return; 120 121 UnreachableCodeHandler UC(S); 122 reachable_code::FindUnreachableCode(AC, S.getPreprocessor(), UC); 123 } 124 125 namespace { 126 /// Warn on logical operator errors in CFGBuilder 127 class LogicalErrorHandler : public CFGCallback { 128 Sema &S; 129 130 public: 131 LogicalErrorHandler(Sema &S) : S(S) {} 132 133 static bool HasMacroID(const Expr *E) { 134 if (E->getExprLoc().isMacroID()) 135 return true; 136 137 // Recurse to children. 138 for (const Stmt *SubStmt : E->children()) 139 if (const Expr *SubExpr = dyn_cast_or_null<Expr>(SubStmt)) 140 if (HasMacroID(SubExpr)) 141 return true; 142 143 return false; 144 } 145 146 void compareAlwaysTrue(const BinaryOperator *B, bool isAlwaysTrue) override { 147 if (HasMacroID(B)) 148 return; 149 150 SourceRange DiagRange = B->getSourceRange(); 151 S.Diag(B->getExprLoc(), diag::warn_tautological_overlap_comparison) 152 << DiagRange << isAlwaysTrue; 153 } 154 155 void compareBitwiseEquality(const BinaryOperator *B, 156 bool isAlwaysTrue) override { 157 if (HasMacroID(B)) 158 return; 159 160 SourceRange DiagRange = B->getSourceRange(); 161 S.Diag(B->getExprLoc(), diag::warn_comparison_bitwise_always) 162 << DiagRange << isAlwaysTrue; 163 } 164 165 void compareBitwiseOr(const BinaryOperator *B) override { 166 if (HasMacroID(B)) 167 return; 168 169 SourceRange DiagRange = B->getSourceRange(); 170 S.Diag(B->getExprLoc(), diag::warn_comparison_bitwise_or) << DiagRange; 171 } 172 173 static bool hasActiveDiagnostics(DiagnosticsEngine &Diags, 174 SourceLocation Loc) { 175 return !Diags.isIgnored(diag::warn_tautological_overlap_comparison, Loc) || 176 !Diags.isIgnored(diag::warn_comparison_bitwise_or, Loc); 177 } 178 }; 179 } // anonymous namespace 180 181 //===----------------------------------------------------------------------===// 182 // Check for infinite self-recursion in functions 183 //===----------------------------------------------------------------------===// 184 185 // Returns true if the function is called anywhere within the CFGBlock. 186 // For member functions, the additional condition of being call from the 187 // this pointer is required. 188 static bool hasRecursiveCallInPath(const FunctionDecl *FD, CFGBlock &Block) { 189 // Process all the Stmt's in this block to find any calls to FD. 190 for (const auto &B : Block) { 191 if (B.getKind() != CFGElement::Statement) 192 continue; 193 194 const CallExpr *CE = dyn_cast<CallExpr>(B.getAs<CFGStmt>()->getStmt()); 195 if (!CE || !CE->getCalleeDecl() || 196 CE->getCalleeDecl()->getCanonicalDecl() != FD) 197 continue; 198 199 // Skip function calls which are qualified with a templated class. 200 if (const DeclRefExpr *DRE = 201 dyn_cast<DeclRefExpr>(CE->getCallee()->IgnoreParenImpCasts())) { 202 if (NestedNameSpecifier *NNS = DRE->getQualifier()) { 203 if (NNS->getKind() == NestedNameSpecifier::TypeSpec && 204 isa<TemplateSpecializationType>(NNS->getAsType())) { 205 continue; 206 } 207 } 208 } 209 210 const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE); 211 if (!MCE || isa<CXXThisExpr>(MCE->getImplicitObjectArgument()) || 212 !MCE->getMethodDecl()->isVirtual()) 213 return true; 214 } 215 return false; 216 } 217 218 // Returns true if every path from the entry block passes through a call to FD. 219 static bool checkForRecursiveFunctionCall(const FunctionDecl *FD, CFG *cfg) { 220 llvm::SmallPtrSet<CFGBlock *, 16> Visited; 221 llvm::SmallVector<CFGBlock *, 16> WorkList; 222 // Keep track of whether we found at least one recursive path. 223 bool foundRecursion = false; 224 225 const unsigned ExitID = cfg->getExit().getBlockID(); 226 227 // Seed the work list with the entry block. 228 WorkList.push_back(&cfg->getEntry()); 229 230 while (!WorkList.empty()) { 231 CFGBlock *Block = WorkList.pop_back_val(); 232 233 for (auto I = Block->succ_begin(), E = Block->succ_end(); I != E; ++I) { 234 if (CFGBlock *SuccBlock = *I) { 235 if (!Visited.insert(SuccBlock).second) 236 continue; 237 238 // Found a path to the exit node without a recursive call. 239 if (ExitID == SuccBlock->getBlockID()) 240 return false; 241 242 // If the successor block contains a recursive call, end analysis there. 243 if (hasRecursiveCallInPath(FD, *SuccBlock)) { 244 foundRecursion = true; 245 continue; 246 } 247 248 WorkList.push_back(SuccBlock); 249 } 250 } 251 } 252 return foundRecursion; 253 } 254 255 static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD, 256 const Stmt *Body, AnalysisDeclContext &AC) { 257 FD = FD->getCanonicalDecl(); 258 259 // Only run on non-templated functions and non-templated members of 260 // templated classes. 261 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate && 262 FD->getTemplatedKind() != FunctionDecl::TK_MemberSpecialization) 263 return; 264 265 CFG *cfg = AC.getCFG(); 266 if (!cfg) return; 267 268 // If the exit block is unreachable, skip processing the function. 269 if (cfg->getExit().pred_empty()) 270 return; 271 272 // Emit diagnostic if a recursive function call is detected for all paths. 273 if (checkForRecursiveFunctionCall(FD, cfg)) 274 S.Diag(Body->getBeginLoc(), diag::warn_infinite_recursive_function); 275 } 276 277 //===----------------------------------------------------------------------===// 278 // Check for throw in a non-throwing function. 279 //===----------------------------------------------------------------------===// 280 281 /// Determine whether an exception thrown by E, unwinding from ThrowBlock, 282 /// can reach ExitBlock. 283 static bool throwEscapes(Sema &S, const CXXThrowExpr *E, CFGBlock &ThrowBlock, 284 CFG *Body) { 285 SmallVector<CFGBlock *, 16> Stack; 286 llvm::BitVector Queued(Body->getNumBlockIDs()); 287 288 Stack.push_back(&ThrowBlock); 289 Queued[ThrowBlock.getBlockID()] = true; 290 291 while (!Stack.empty()) { 292 CFGBlock &UnwindBlock = *Stack.back(); 293 Stack.pop_back(); 294 295 for (auto &Succ : UnwindBlock.succs()) { 296 if (!Succ.isReachable() || Queued[Succ->getBlockID()]) 297 continue; 298 299 if (Succ->getBlockID() == Body->getExit().getBlockID()) 300 return true; 301 302 if (auto *Catch = 303 dyn_cast_or_null<CXXCatchStmt>(Succ->getLabel())) { 304 QualType Caught = Catch->getCaughtType(); 305 if (Caught.isNull() || // catch (...) catches everything 306 !E->getSubExpr() || // throw; is considered cuaght by any handler 307 S.handlerCanCatch(Caught, E->getSubExpr()->getType())) 308 // Exception doesn't escape via this path. 309 break; 310 } else { 311 Stack.push_back(Succ); 312 Queued[Succ->getBlockID()] = true; 313 } 314 } 315 } 316 317 return false; 318 } 319 320 static void visitReachableThrows( 321 CFG *BodyCFG, 322 llvm::function_ref<void(const CXXThrowExpr *, CFGBlock &)> Visit) { 323 llvm::BitVector Reachable(BodyCFG->getNumBlockIDs()); 324 clang::reachable_code::ScanReachableFromBlock(&BodyCFG->getEntry(), Reachable); 325 for (CFGBlock *B : *BodyCFG) { 326 if (!Reachable[B->getBlockID()]) 327 continue; 328 for (CFGElement &E : *B) { 329 Optional<CFGStmt> S = E.getAs<CFGStmt>(); 330 if (!S) 331 continue; 332 if (auto *Throw = dyn_cast<CXXThrowExpr>(S->getStmt())) 333 Visit(Throw, *B); 334 } 335 } 336 } 337 338 static void EmitDiagForCXXThrowInNonThrowingFunc(Sema &S, SourceLocation OpLoc, 339 const FunctionDecl *FD) { 340 if (!S.getSourceManager().isInSystemHeader(OpLoc) && 341 FD->getTypeSourceInfo()) { 342 S.Diag(OpLoc, diag::warn_throw_in_noexcept_func) << FD; 343 if (S.getLangOpts().CPlusPlus11 && 344 (isa<CXXDestructorDecl>(FD) || 345 FD->getDeclName().getCXXOverloadedOperator() == OO_Delete || 346 FD->getDeclName().getCXXOverloadedOperator() == OO_Array_Delete)) { 347 if (const auto *Ty = FD->getTypeSourceInfo()->getType()-> 348 getAs<FunctionProtoType>()) 349 S.Diag(FD->getLocation(), diag::note_throw_in_dtor) 350 << !isa<CXXDestructorDecl>(FD) << !Ty->hasExceptionSpec() 351 << FD->getExceptionSpecSourceRange(); 352 } else 353 S.Diag(FD->getLocation(), diag::note_throw_in_function) 354 << FD->getExceptionSpecSourceRange(); 355 } 356 } 357 358 static void checkThrowInNonThrowingFunc(Sema &S, const FunctionDecl *FD, 359 AnalysisDeclContext &AC) { 360 CFG *BodyCFG = AC.getCFG(); 361 if (!BodyCFG) 362 return; 363 if (BodyCFG->getExit().pred_empty()) 364 return; 365 visitReachableThrows(BodyCFG, [&](const CXXThrowExpr *Throw, CFGBlock &Block) { 366 if (throwEscapes(S, Throw, Block, BodyCFG)) 367 EmitDiagForCXXThrowInNonThrowingFunc(S, Throw->getThrowLoc(), FD); 368 }); 369 } 370 371 static bool isNoexcept(const FunctionDecl *FD) { 372 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 373 if (FPT->isNothrow() || FD->hasAttr<NoThrowAttr>()) 374 return true; 375 return false; 376 } 377 378 //===----------------------------------------------------------------------===// 379 // Check for missing return value. 380 //===----------------------------------------------------------------------===// 381 382 enum ControlFlowKind { 383 UnknownFallThrough, 384 NeverFallThrough, 385 MaybeFallThrough, 386 AlwaysFallThrough, 387 NeverFallThroughOrReturn 388 }; 389 390 /// CheckFallThrough - Check that we don't fall off the end of a 391 /// Statement that should return a value. 392 /// 393 /// \returns AlwaysFallThrough iff we always fall off the end of the statement, 394 /// MaybeFallThrough iff we might or might not fall off the end, 395 /// NeverFallThroughOrReturn iff we never fall off the end of the statement or 396 /// return. We assume NeverFallThrough iff we never fall off the end of the 397 /// statement but we may return. We assume that functions not marked noreturn 398 /// will return. 399 static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) { 400 CFG *cfg = AC.getCFG(); 401 if (!cfg) return UnknownFallThrough; 402 403 // The CFG leaves in dead things, and we don't want the dead code paths to 404 // confuse us, so we mark all live things first. 405 llvm::BitVector live(cfg->getNumBlockIDs()); 406 unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(), 407 live); 408 409 bool AddEHEdges = AC.getAddEHEdges(); 410 if (!AddEHEdges && count != cfg->getNumBlockIDs()) 411 // When there are things remaining dead, and we didn't add EH edges 412 // from CallExprs to the catch clauses, we have to go back and 413 // mark them as live. 414 for (const auto *B : *cfg) { 415 if (!live[B->getBlockID()]) { 416 if (B->pred_begin() == B->pred_end()) { 417 const Stmt *Term = B->getTerminatorStmt(); 418 if (Term && isa<CXXTryStmt>(Term)) 419 // When not adding EH edges from calls, catch clauses 420 // can otherwise seem dead. Avoid noting them as dead. 421 count += reachable_code::ScanReachableFromBlock(B, live); 422 continue; 423 } 424 } 425 } 426 427 // Now we know what is live, we check the live precessors of the exit block 428 // and look for fall through paths, being careful to ignore normal returns, 429 // and exceptional paths. 430 bool HasLiveReturn = false; 431 bool HasFakeEdge = false; 432 bool HasPlainEdge = false; 433 bool HasAbnormalEdge = false; 434 435 // Ignore default cases that aren't likely to be reachable because all 436 // enums in a switch(X) have explicit case statements. 437 CFGBlock::FilterOptions FO; 438 FO.IgnoreDefaultsWithCoveredEnums = 1; 439 440 for (CFGBlock::filtered_pred_iterator I = 441 cfg->getExit().filtered_pred_start_end(FO); 442 I.hasMore(); ++I) { 443 const CFGBlock &B = **I; 444 if (!live[B.getBlockID()]) 445 continue; 446 447 // Skip blocks which contain an element marked as no-return. They don't 448 // represent actually viable edges into the exit block, so mark them as 449 // abnormal. 450 if (B.hasNoReturnElement()) { 451 HasAbnormalEdge = true; 452 continue; 453 } 454 455 // Destructors can appear after the 'return' in the CFG. This is 456 // normal. We need to look pass the destructors for the return 457 // statement (if it exists). 458 CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend(); 459 460 for ( ; ri != re ; ++ri) 461 if (ri->getAs<CFGStmt>()) 462 break; 463 464 // No more CFGElements in the block? 465 if (ri == re) { 466 const Stmt *Term = B.getTerminatorStmt(); 467 if (Term && (isa<CXXTryStmt>(Term) || isa<ObjCAtTryStmt>(Term))) { 468 HasAbnormalEdge = true; 469 continue; 470 } 471 // A labeled empty statement, or the entry block... 472 HasPlainEdge = true; 473 continue; 474 } 475 476 CFGStmt CS = ri->castAs<CFGStmt>(); 477 const Stmt *S = CS.getStmt(); 478 if (isa<ReturnStmt>(S) || isa<CoreturnStmt>(S)) { 479 HasLiveReturn = true; 480 continue; 481 } 482 if (isa<ObjCAtThrowStmt>(S)) { 483 HasFakeEdge = true; 484 continue; 485 } 486 if (isa<CXXThrowExpr>(S)) { 487 HasFakeEdge = true; 488 continue; 489 } 490 if (isa<MSAsmStmt>(S)) { 491 // TODO: Verify this is correct. 492 HasFakeEdge = true; 493 HasLiveReturn = true; 494 continue; 495 } 496 if (isa<CXXTryStmt>(S)) { 497 HasAbnormalEdge = true; 498 continue; 499 } 500 if (!llvm::is_contained(B.succs(), &cfg->getExit())) { 501 HasAbnormalEdge = true; 502 continue; 503 } 504 505 HasPlainEdge = true; 506 } 507 if (!HasPlainEdge) { 508 if (HasLiveReturn) 509 return NeverFallThrough; 510 return NeverFallThroughOrReturn; 511 } 512 if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) 513 return MaybeFallThrough; 514 // This says AlwaysFallThrough for calls to functions that are not marked 515 // noreturn, that don't return. If people would like this warning to be more 516 // accurate, such functions should be marked as noreturn. 517 return AlwaysFallThrough; 518 } 519 520 namespace { 521 522 struct CheckFallThroughDiagnostics { 523 unsigned diag_MaybeFallThrough_HasNoReturn; 524 unsigned diag_MaybeFallThrough_ReturnsNonVoid; 525 unsigned diag_AlwaysFallThrough_HasNoReturn; 526 unsigned diag_AlwaysFallThrough_ReturnsNonVoid; 527 unsigned diag_NeverFallThroughOrReturn; 528 enum { Function, Block, Lambda, Coroutine } funMode; 529 SourceLocation FuncLoc; 530 531 static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) { 532 CheckFallThroughDiagnostics D; 533 D.FuncLoc = Func->getLocation(); 534 D.diag_MaybeFallThrough_HasNoReturn = 535 diag::warn_falloff_noreturn_function; 536 D.diag_MaybeFallThrough_ReturnsNonVoid = 537 diag::warn_maybe_falloff_nonvoid_function; 538 D.diag_AlwaysFallThrough_HasNoReturn = 539 diag::warn_falloff_noreturn_function; 540 D.diag_AlwaysFallThrough_ReturnsNonVoid = 541 diag::warn_falloff_nonvoid_function; 542 543 // Don't suggest that virtual functions be marked "noreturn", since they 544 // might be overridden by non-noreturn functions. 545 bool isVirtualMethod = false; 546 if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func)) 547 isVirtualMethod = Method->isVirtual(); 548 549 // Don't suggest that template instantiations be marked "noreturn" 550 bool isTemplateInstantiation = false; 551 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func)) 552 isTemplateInstantiation = Function->isTemplateInstantiation(); 553 554 if (!isVirtualMethod && !isTemplateInstantiation) 555 D.diag_NeverFallThroughOrReturn = 556 diag::warn_suggest_noreturn_function; 557 else 558 D.diag_NeverFallThroughOrReturn = 0; 559 560 D.funMode = Function; 561 return D; 562 } 563 564 static CheckFallThroughDiagnostics MakeForCoroutine(const Decl *Func) { 565 CheckFallThroughDiagnostics D; 566 D.FuncLoc = Func->getLocation(); 567 D.diag_MaybeFallThrough_HasNoReturn = 0; 568 D.diag_MaybeFallThrough_ReturnsNonVoid = 569 diag::warn_maybe_falloff_nonvoid_coroutine; 570 D.diag_AlwaysFallThrough_HasNoReturn = 0; 571 D.diag_AlwaysFallThrough_ReturnsNonVoid = 572 diag::warn_falloff_nonvoid_coroutine; 573 D.funMode = Coroutine; 574 return D; 575 } 576 577 static CheckFallThroughDiagnostics MakeForBlock() { 578 CheckFallThroughDiagnostics D; 579 D.diag_MaybeFallThrough_HasNoReturn = 580 diag::err_noreturn_block_has_return_expr; 581 D.diag_MaybeFallThrough_ReturnsNonVoid = 582 diag::err_maybe_falloff_nonvoid_block; 583 D.diag_AlwaysFallThrough_HasNoReturn = 584 diag::err_noreturn_block_has_return_expr; 585 D.diag_AlwaysFallThrough_ReturnsNonVoid = 586 diag::err_falloff_nonvoid_block; 587 D.diag_NeverFallThroughOrReturn = 0; 588 D.funMode = Block; 589 return D; 590 } 591 592 static CheckFallThroughDiagnostics MakeForLambda() { 593 CheckFallThroughDiagnostics D; 594 D.diag_MaybeFallThrough_HasNoReturn = 595 diag::err_noreturn_lambda_has_return_expr; 596 D.diag_MaybeFallThrough_ReturnsNonVoid = 597 diag::warn_maybe_falloff_nonvoid_lambda; 598 D.diag_AlwaysFallThrough_HasNoReturn = 599 diag::err_noreturn_lambda_has_return_expr; 600 D.diag_AlwaysFallThrough_ReturnsNonVoid = 601 diag::warn_falloff_nonvoid_lambda; 602 D.diag_NeverFallThroughOrReturn = 0; 603 D.funMode = Lambda; 604 return D; 605 } 606 607 bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid, 608 bool HasNoReturn) const { 609 if (funMode == Function) { 610 return (ReturnsVoid || 611 D.isIgnored(diag::warn_maybe_falloff_nonvoid_function, 612 FuncLoc)) && 613 (!HasNoReturn || 614 D.isIgnored(diag::warn_noreturn_function_has_return_expr, 615 FuncLoc)) && 616 (!ReturnsVoid || 617 D.isIgnored(diag::warn_suggest_noreturn_block, FuncLoc)); 618 } 619 if (funMode == Coroutine) { 620 return (ReturnsVoid || 621 D.isIgnored(diag::warn_maybe_falloff_nonvoid_function, FuncLoc) || 622 D.isIgnored(diag::warn_maybe_falloff_nonvoid_coroutine, 623 FuncLoc)) && 624 (!HasNoReturn); 625 } 626 // For blocks / lambdas. 627 return ReturnsVoid && !HasNoReturn; 628 } 629 }; 630 631 } // anonymous namespace 632 633 /// CheckFallThroughForBody - Check that we don't fall off the end of a 634 /// function that should return a value. Check that we don't fall off the end 635 /// of a noreturn function. We assume that functions and blocks not marked 636 /// noreturn will return. 637 static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body, 638 QualType BlockType, 639 const CheckFallThroughDiagnostics &CD, 640 AnalysisDeclContext &AC, 641 sema::FunctionScopeInfo *FSI) { 642 643 bool ReturnsVoid = false; 644 bool HasNoReturn = false; 645 bool IsCoroutine = FSI->isCoroutine(); 646 647 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 648 if (const auto *CBody = dyn_cast<CoroutineBodyStmt>(Body)) 649 ReturnsVoid = CBody->getFallthroughHandler() != nullptr; 650 else 651 ReturnsVoid = FD->getReturnType()->isVoidType(); 652 HasNoReturn = FD->isNoReturn(); 653 } 654 else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) { 655 ReturnsVoid = MD->getReturnType()->isVoidType(); 656 HasNoReturn = MD->hasAttr<NoReturnAttr>(); 657 } 658 else if (isa<BlockDecl>(D)) { 659 if (const FunctionType *FT = 660 BlockType->getPointeeType()->getAs<FunctionType>()) { 661 if (FT->getReturnType()->isVoidType()) 662 ReturnsVoid = true; 663 if (FT->getNoReturnAttr()) 664 HasNoReturn = true; 665 } 666 } 667 668 DiagnosticsEngine &Diags = S.getDiagnostics(); 669 670 // Short circuit for compilation speed. 671 if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn)) 672 return; 673 SourceLocation LBrace = Body->getBeginLoc(), RBrace = Body->getEndLoc(); 674 auto EmitDiag = [&](SourceLocation Loc, unsigned DiagID) { 675 if (IsCoroutine) 676 S.Diag(Loc, DiagID) << FSI->CoroutinePromise->getType(); 677 else 678 S.Diag(Loc, DiagID); 679 }; 680 681 // cpu_dispatch functions permit empty function bodies for ICC compatibility. 682 if (D->getAsFunction() && D->getAsFunction()->isCPUDispatchMultiVersion()) 683 return; 684 685 // Either in a function body compound statement, or a function-try-block. 686 switch (CheckFallThrough(AC)) { 687 case UnknownFallThrough: 688 break; 689 690 case MaybeFallThrough: 691 if (HasNoReturn) 692 EmitDiag(RBrace, CD.diag_MaybeFallThrough_HasNoReturn); 693 else if (!ReturnsVoid) 694 EmitDiag(RBrace, CD.diag_MaybeFallThrough_ReturnsNonVoid); 695 break; 696 case AlwaysFallThrough: 697 if (HasNoReturn) 698 EmitDiag(RBrace, CD.diag_AlwaysFallThrough_HasNoReturn); 699 else if (!ReturnsVoid) 700 EmitDiag(RBrace, CD.diag_AlwaysFallThrough_ReturnsNonVoid); 701 break; 702 case NeverFallThroughOrReturn: 703 if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) { 704 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 705 S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 0 << FD; 706 } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { 707 S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 1 << MD; 708 } else { 709 S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn); 710 } 711 } 712 break; 713 case NeverFallThrough: 714 break; 715 } 716 } 717 718 //===----------------------------------------------------------------------===// 719 // -Wuninitialized 720 //===----------------------------------------------------------------------===// 721 722 namespace { 723 /// ContainsReference - A visitor class to search for references to 724 /// a particular declaration (the needle) within any evaluated component of an 725 /// expression (recursively). 726 class ContainsReference : public ConstEvaluatedExprVisitor<ContainsReference> { 727 bool FoundReference; 728 const DeclRefExpr *Needle; 729 730 public: 731 typedef ConstEvaluatedExprVisitor<ContainsReference> Inherited; 732 733 ContainsReference(ASTContext &Context, const DeclRefExpr *Needle) 734 : Inherited(Context), FoundReference(false), Needle(Needle) {} 735 736 void VisitExpr(const Expr *E) { 737 // Stop evaluating if we already have a reference. 738 if (FoundReference) 739 return; 740 741 Inherited::VisitExpr(E); 742 } 743 744 void VisitDeclRefExpr(const DeclRefExpr *E) { 745 if (E == Needle) 746 FoundReference = true; 747 else 748 Inherited::VisitDeclRefExpr(E); 749 } 750 751 bool doesContainReference() const { return FoundReference; } 752 }; 753 } // anonymous namespace 754 755 static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) { 756 QualType VariableTy = VD->getType().getCanonicalType(); 757 if (VariableTy->isBlockPointerType() && 758 !VD->hasAttr<BlocksAttr>()) { 759 S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization) 760 << VD->getDeclName() 761 << FixItHint::CreateInsertion(VD->getLocation(), "__block "); 762 return true; 763 } 764 765 // Don't issue a fixit if there is already an initializer. 766 if (VD->getInit()) 767 return false; 768 769 // Don't suggest a fixit inside macros. 770 if (VD->getEndLoc().isMacroID()) 771 return false; 772 773 SourceLocation Loc = S.getLocForEndOfToken(VD->getEndLoc()); 774 775 // Suggest possible initialization (if any). 776 std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc); 777 if (Init.empty()) 778 return false; 779 780 S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName() 781 << FixItHint::CreateInsertion(Loc, Init); 782 return true; 783 } 784 785 /// Create a fixit to remove an if-like statement, on the assumption that its 786 /// condition is CondVal. 787 static void CreateIfFixit(Sema &S, const Stmt *If, const Stmt *Then, 788 const Stmt *Else, bool CondVal, 789 FixItHint &Fixit1, FixItHint &Fixit2) { 790 if (CondVal) { 791 // If condition is always true, remove all but the 'then'. 792 Fixit1 = FixItHint::CreateRemoval( 793 CharSourceRange::getCharRange(If->getBeginLoc(), Then->getBeginLoc())); 794 if (Else) { 795 SourceLocation ElseKwLoc = S.getLocForEndOfToken(Then->getEndLoc()); 796 Fixit2 = 797 FixItHint::CreateRemoval(SourceRange(ElseKwLoc, Else->getEndLoc())); 798 } 799 } else { 800 // If condition is always false, remove all but the 'else'. 801 if (Else) 802 Fixit1 = FixItHint::CreateRemoval(CharSourceRange::getCharRange( 803 If->getBeginLoc(), Else->getBeginLoc())); 804 else 805 Fixit1 = FixItHint::CreateRemoval(If->getSourceRange()); 806 } 807 } 808 809 /// DiagUninitUse -- Helper function to produce a diagnostic for an 810 /// uninitialized use of a variable. 811 static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use, 812 bool IsCapturedByBlock) { 813 bool Diagnosed = false; 814 815 switch (Use.getKind()) { 816 case UninitUse::Always: 817 S.Diag(Use.getUser()->getBeginLoc(), diag::warn_uninit_var) 818 << VD->getDeclName() << IsCapturedByBlock 819 << Use.getUser()->getSourceRange(); 820 return; 821 822 case UninitUse::AfterDecl: 823 case UninitUse::AfterCall: 824 S.Diag(VD->getLocation(), diag::warn_sometimes_uninit_var) 825 << VD->getDeclName() << IsCapturedByBlock 826 << (Use.getKind() == UninitUse::AfterDecl ? 4 : 5) 827 << const_cast<DeclContext*>(VD->getLexicalDeclContext()) 828 << VD->getSourceRange(); 829 S.Diag(Use.getUser()->getBeginLoc(), diag::note_uninit_var_use) 830 << IsCapturedByBlock << Use.getUser()->getSourceRange(); 831 return; 832 833 case UninitUse::Maybe: 834 case UninitUse::Sometimes: 835 // Carry on to report sometimes-uninitialized branches, if possible, 836 // or a 'may be used uninitialized' diagnostic otherwise. 837 break; 838 } 839 840 // Diagnose each branch which leads to a sometimes-uninitialized use. 841 for (UninitUse::branch_iterator I = Use.branch_begin(), E = Use.branch_end(); 842 I != E; ++I) { 843 assert(Use.getKind() == UninitUse::Sometimes); 844 845 const Expr *User = Use.getUser(); 846 const Stmt *Term = I->Terminator; 847 848 // Information used when building the diagnostic. 849 unsigned DiagKind; 850 StringRef Str; 851 SourceRange Range; 852 853 // FixIts to suppress the diagnostic by removing the dead condition. 854 // For all binary terminators, branch 0 is taken if the condition is true, 855 // and branch 1 is taken if the condition is false. 856 int RemoveDiagKind = -1; 857 const char *FixitStr = 858 S.getLangOpts().CPlusPlus ? (I->Output ? "true" : "false") 859 : (I->Output ? "1" : "0"); 860 FixItHint Fixit1, Fixit2; 861 862 switch (Term ? Term->getStmtClass() : Stmt::DeclStmtClass) { 863 default: 864 // Don't know how to report this. Just fall back to 'may be used 865 // uninitialized'. FIXME: Can this happen? 866 continue; 867 868 // "condition is true / condition is false". 869 case Stmt::IfStmtClass: { 870 const IfStmt *IS = cast<IfStmt>(Term); 871 DiagKind = 0; 872 Str = "if"; 873 Range = IS->getCond()->getSourceRange(); 874 RemoveDiagKind = 0; 875 CreateIfFixit(S, IS, IS->getThen(), IS->getElse(), 876 I->Output, Fixit1, Fixit2); 877 break; 878 } 879 case Stmt::ConditionalOperatorClass: { 880 const ConditionalOperator *CO = cast<ConditionalOperator>(Term); 881 DiagKind = 0; 882 Str = "?:"; 883 Range = CO->getCond()->getSourceRange(); 884 RemoveDiagKind = 0; 885 CreateIfFixit(S, CO, CO->getTrueExpr(), CO->getFalseExpr(), 886 I->Output, Fixit1, Fixit2); 887 break; 888 } 889 case Stmt::BinaryOperatorClass: { 890 const BinaryOperator *BO = cast<BinaryOperator>(Term); 891 if (!BO->isLogicalOp()) 892 continue; 893 DiagKind = 0; 894 Str = BO->getOpcodeStr(); 895 Range = BO->getLHS()->getSourceRange(); 896 RemoveDiagKind = 0; 897 if ((BO->getOpcode() == BO_LAnd && I->Output) || 898 (BO->getOpcode() == BO_LOr && !I->Output)) 899 // true && y -> y, false || y -> y. 900 Fixit1 = FixItHint::CreateRemoval( 901 SourceRange(BO->getBeginLoc(), BO->getOperatorLoc())); 902 else 903 // false && y -> false, true || y -> true. 904 Fixit1 = FixItHint::CreateReplacement(BO->getSourceRange(), FixitStr); 905 break; 906 } 907 908 // "loop is entered / loop is exited". 909 case Stmt::WhileStmtClass: 910 DiagKind = 1; 911 Str = "while"; 912 Range = cast<WhileStmt>(Term)->getCond()->getSourceRange(); 913 RemoveDiagKind = 1; 914 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 915 break; 916 case Stmt::ForStmtClass: 917 DiagKind = 1; 918 Str = "for"; 919 Range = cast<ForStmt>(Term)->getCond()->getSourceRange(); 920 RemoveDiagKind = 1; 921 if (I->Output) 922 Fixit1 = FixItHint::CreateRemoval(Range); 923 else 924 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 925 break; 926 case Stmt::CXXForRangeStmtClass: 927 if (I->Output == 1) { 928 // The use occurs if a range-based for loop's body never executes. 929 // That may be impossible, and there's no syntactic fix for this, 930 // so treat it as a 'may be uninitialized' case. 931 continue; 932 } 933 DiagKind = 1; 934 Str = "for"; 935 Range = cast<CXXForRangeStmt>(Term)->getRangeInit()->getSourceRange(); 936 break; 937 938 // "condition is true / loop is exited". 939 case Stmt::DoStmtClass: 940 DiagKind = 2; 941 Str = "do"; 942 Range = cast<DoStmt>(Term)->getCond()->getSourceRange(); 943 RemoveDiagKind = 1; 944 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 945 break; 946 947 // "switch case is taken". 948 case Stmt::CaseStmtClass: 949 DiagKind = 3; 950 Str = "case"; 951 Range = cast<CaseStmt>(Term)->getLHS()->getSourceRange(); 952 break; 953 case Stmt::DefaultStmtClass: 954 DiagKind = 3; 955 Str = "default"; 956 Range = cast<DefaultStmt>(Term)->getDefaultLoc(); 957 break; 958 } 959 960 S.Diag(Range.getBegin(), diag::warn_sometimes_uninit_var) 961 << VD->getDeclName() << IsCapturedByBlock << DiagKind 962 << Str << I->Output << Range; 963 S.Diag(User->getBeginLoc(), diag::note_uninit_var_use) 964 << IsCapturedByBlock << User->getSourceRange(); 965 if (RemoveDiagKind != -1) 966 S.Diag(Fixit1.RemoveRange.getBegin(), diag::note_uninit_fixit_remove_cond) 967 << RemoveDiagKind << Str << I->Output << Fixit1 << Fixit2; 968 969 Diagnosed = true; 970 } 971 972 if (!Diagnosed) 973 S.Diag(Use.getUser()->getBeginLoc(), diag::warn_maybe_uninit_var) 974 << VD->getDeclName() << IsCapturedByBlock 975 << Use.getUser()->getSourceRange(); 976 } 977 978 /// Diagnose uninitialized const reference usages. 979 static bool DiagnoseUninitializedConstRefUse(Sema &S, const VarDecl *VD, 980 const UninitUse &Use) { 981 S.Diag(Use.getUser()->getBeginLoc(), diag::warn_uninit_const_reference) 982 << VD->getDeclName() << Use.getUser()->getSourceRange(); 983 return true; 984 } 985 986 /// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an 987 /// uninitialized variable. This manages the different forms of diagnostic 988 /// emitted for particular types of uses. Returns true if the use was diagnosed 989 /// as a warning. If a particular use is one we omit warnings for, returns 990 /// false. 991 static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD, 992 const UninitUse &Use, 993 bool alwaysReportSelfInit = false) { 994 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Use.getUser())) { 995 // Inspect the initializer of the variable declaration which is 996 // being referenced prior to its initialization. We emit 997 // specialized diagnostics for self-initialization, and we 998 // specifically avoid warning about self references which take the 999 // form of: 1000 // 1001 // int x = x; 1002 // 1003 // This is used to indicate to GCC that 'x' is intentionally left 1004 // uninitialized. Proven code paths which access 'x' in 1005 // an uninitialized state after this will still warn. 1006 if (const Expr *Initializer = VD->getInit()) { 1007 if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts()) 1008 return false; 1009 1010 ContainsReference CR(S.Context, DRE); 1011 CR.Visit(Initializer); 1012 if (CR.doesContainReference()) { 1013 S.Diag(DRE->getBeginLoc(), diag::warn_uninit_self_reference_in_init) 1014 << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange(); 1015 return true; 1016 } 1017 } 1018 1019 DiagUninitUse(S, VD, Use, false); 1020 } else { 1021 const BlockExpr *BE = cast<BlockExpr>(Use.getUser()); 1022 if (VD->getType()->isBlockPointerType() && !VD->hasAttr<BlocksAttr>()) 1023 S.Diag(BE->getBeginLoc(), 1024 diag::warn_uninit_byref_blockvar_captured_by_block) 1025 << VD->getDeclName() 1026 << VD->getType().getQualifiers().hasObjCLifetime(); 1027 else 1028 DiagUninitUse(S, VD, Use, true); 1029 } 1030 1031 // Report where the variable was declared when the use wasn't within 1032 // the initializer of that declaration & we didn't already suggest 1033 // an initialization fixit. 1034 if (!SuggestInitializationFixit(S, VD)) 1035 S.Diag(VD->getBeginLoc(), diag::note_var_declared_here) 1036 << VD->getDeclName(); 1037 1038 return true; 1039 } 1040 1041 namespace { 1042 class FallthroughMapper : public RecursiveASTVisitor<FallthroughMapper> { 1043 public: 1044 FallthroughMapper(Sema &S) 1045 : FoundSwitchStatements(false), 1046 S(S) { 1047 } 1048 1049 bool foundSwitchStatements() const { return FoundSwitchStatements; } 1050 1051 void markFallthroughVisited(const AttributedStmt *Stmt) { 1052 bool Found = FallthroughStmts.erase(Stmt); 1053 assert(Found); 1054 (void)Found; 1055 } 1056 1057 typedef llvm::SmallPtrSet<const AttributedStmt*, 8> AttrStmts; 1058 1059 const AttrStmts &getFallthroughStmts() const { 1060 return FallthroughStmts; 1061 } 1062 1063 void fillReachableBlocks(CFG *Cfg) { 1064 assert(ReachableBlocks.empty() && "ReachableBlocks already filled"); 1065 std::deque<const CFGBlock *> BlockQueue; 1066 1067 ReachableBlocks.insert(&Cfg->getEntry()); 1068 BlockQueue.push_back(&Cfg->getEntry()); 1069 // Mark all case blocks reachable to avoid problems with switching on 1070 // constants, covered enums, etc. 1071 // These blocks can contain fall-through annotations, and we don't want to 1072 // issue a warn_fallthrough_attr_unreachable for them. 1073 for (const auto *B : *Cfg) { 1074 const Stmt *L = B->getLabel(); 1075 if (L && isa<SwitchCase>(L) && ReachableBlocks.insert(B).second) 1076 BlockQueue.push_back(B); 1077 } 1078 1079 while (!BlockQueue.empty()) { 1080 const CFGBlock *P = BlockQueue.front(); 1081 BlockQueue.pop_front(); 1082 for (const CFGBlock *B : P->succs()) { 1083 if (B && ReachableBlocks.insert(B).second) 1084 BlockQueue.push_back(B); 1085 } 1086 } 1087 } 1088 1089 bool checkFallThroughIntoBlock(const CFGBlock &B, int &AnnotatedCnt, 1090 bool IsTemplateInstantiation) { 1091 assert(!ReachableBlocks.empty() && "ReachableBlocks empty"); 1092 1093 int UnannotatedCnt = 0; 1094 AnnotatedCnt = 0; 1095 1096 std::deque<const CFGBlock*> BlockQueue(B.pred_begin(), B.pred_end()); 1097 while (!BlockQueue.empty()) { 1098 const CFGBlock *P = BlockQueue.front(); 1099 BlockQueue.pop_front(); 1100 if (!P) continue; 1101 1102 const Stmt *Term = P->getTerminatorStmt(); 1103 if (Term && isa<SwitchStmt>(Term)) 1104 continue; // Switch statement, good. 1105 1106 const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(P->getLabel()); 1107 if (SW && SW->getSubStmt() == B.getLabel() && P->begin() == P->end()) 1108 continue; // Previous case label has no statements, good. 1109 1110 const LabelStmt *L = dyn_cast_or_null<LabelStmt>(P->getLabel()); 1111 if (L && L->getSubStmt() == B.getLabel() && P->begin() == P->end()) 1112 continue; // Case label is preceded with a normal label, good. 1113 1114 if (!ReachableBlocks.count(P)) { 1115 for (const CFGElement &Elem : llvm::reverse(*P)) { 1116 if (Optional<CFGStmt> CS = Elem.getAs<CFGStmt>()) { 1117 if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) { 1118 // Don't issue a warning for an unreachable fallthrough 1119 // attribute in template instantiations as it may not be 1120 // unreachable in all instantiations of the template. 1121 if (!IsTemplateInstantiation) 1122 S.Diag(AS->getBeginLoc(), 1123 diag::warn_unreachable_fallthrough_attr); 1124 markFallthroughVisited(AS); 1125 ++AnnotatedCnt; 1126 break; 1127 } 1128 // Don't care about other unreachable statements. 1129 } 1130 } 1131 // If there are no unreachable statements, this may be a special 1132 // case in CFG: 1133 // case X: { 1134 // A a; // A has a destructor. 1135 // break; 1136 // } 1137 // // <<<< This place is represented by a 'hanging' CFG block. 1138 // case Y: 1139 continue; 1140 } 1141 1142 const Stmt *LastStmt = getLastStmt(*P); 1143 if (const AttributedStmt *AS = asFallThroughAttr(LastStmt)) { 1144 markFallthroughVisited(AS); 1145 ++AnnotatedCnt; 1146 continue; // Fallthrough annotation, good. 1147 } 1148 1149 if (!LastStmt) { // This block contains no executable statements. 1150 // Traverse its predecessors. 1151 std::copy(P->pred_begin(), P->pred_end(), 1152 std::back_inserter(BlockQueue)); 1153 continue; 1154 } 1155 1156 ++UnannotatedCnt; 1157 } 1158 return !!UnannotatedCnt; 1159 } 1160 1161 // RecursiveASTVisitor setup. 1162 bool shouldWalkTypesOfTypeLocs() const { return false; } 1163 1164 bool VisitAttributedStmt(AttributedStmt *S) { 1165 if (asFallThroughAttr(S)) 1166 FallthroughStmts.insert(S); 1167 return true; 1168 } 1169 1170 bool VisitSwitchStmt(SwitchStmt *S) { 1171 FoundSwitchStatements = true; 1172 return true; 1173 } 1174 1175 // We don't want to traverse local type declarations. We analyze their 1176 // methods separately. 1177 bool TraverseDecl(Decl *D) { return true; } 1178 1179 // We analyze lambda bodies separately. Skip them here. 1180 bool TraverseLambdaExpr(LambdaExpr *LE) { 1181 // Traverse the captures, but not the body. 1182 for (const auto C : zip(LE->captures(), LE->capture_inits())) 1183 TraverseLambdaCapture(LE, &std::get<0>(C), std::get<1>(C)); 1184 return true; 1185 } 1186 1187 private: 1188 1189 static const AttributedStmt *asFallThroughAttr(const Stmt *S) { 1190 if (const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(S)) { 1191 if (hasSpecificAttr<FallThroughAttr>(AS->getAttrs())) 1192 return AS; 1193 } 1194 return nullptr; 1195 } 1196 1197 static const Stmt *getLastStmt(const CFGBlock &B) { 1198 if (const Stmt *Term = B.getTerminatorStmt()) 1199 return Term; 1200 for (const CFGElement &Elem : llvm::reverse(B)) 1201 if (Optional<CFGStmt> CS = Elem.getAs<CFGStmt>()) 1202 return CS->getStmt(); 1203 // Workaround to detect a statement thrown out by CFGBuilder: 1204 // case X: {} case Y: 1205 // case X: ; case Y: 1206 if (const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(B.getLabel())) 1207 if (!isa<SwitchCase>(SW->getSubStmt())) 1208 return SW->getSubStmt(); 1209 1210 return nullptr; 1211 } 1212 1213 bool FoundSwitchStatements; 1214 AttrStmts FallthroughStmts; 1215 Sema &S; 1216 llvm::SmallPtrSet<const CFGBlock *, 16> ReachableBlocks; 1217 }; 1218 } // anonymous namespace 1219 1220 static StringRef getFallthroughAttrSpelling(Preprocessor &PP, 1221 SourceLocation Loc) { 1222 TokenValue FallthroughTokens[] = { 1223 tok::l_square, tok::l_square, 1224 PP.getIdentifierInfo("fallthrough"), 1225 tok::r_square, tok::r_square 1226 }; 1227 1228 TokenValue ClangFallthroughTokens[] = { 1229 tok::l_square, tok::l_square, PP.getIdentifierInfo("clang"), 1230 tok::coloncolon, PP.getIdentifierInfo("fallthrough"), 1231 tok::r_square, tok::r_square 1232 }; 1233 1234 bool PreferClangAttr = !PP.getLangOpts().CPlusPlus17 && !PP.getLangOpts().C2x; 1235 1236 StringRef MacroName; 1237 if (PreferClangAttr) 1238 MacroName = PP.getLastMacroWithSpelling(Loc, ClangFallthroughTokens); 1239 if (MacroName.empty()) 1240 MacroName = PP.getLastMacroWithSpelling(Loc, FallthroughTokens); 1241 if (MacroName.empty() && !PreferClangAttr) 1242 MacroName = PP.getLastMacroWithSpelling(Loc, ClangFallthroughTokens); 1243 if (MacroName.empty()) { 1244 if (!PreferClangAttr) 1245 MacroName = "[[fallthrough]]"; 1246 else if (PP.getLangOpts().CPlusPlus) 1247 MacroName = "[[clang::fallthrough]]"; 1248 else 1249 MacroName = "__attribute__((fallthrough))"; 1250 } 1251 return MacroName; 1252 } 1253 1254 static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC, 1255 bool PerFunction) { 1256 FallthroughMapper FM(S); 1257 FM.TraverseStmt(AC.getBody()); 1258 1259 if (!FM.foundSwitchStatements()) 1260 return; 1261 1262 if (PerFunction && FM.getFallthroughStmts().empty()) 1263 return; 1264 1265 CFG *Cfg = AC.getCFG(); 1266 1267 if (!Cfg) 1268 return; 1269 1270 FM.fillReachableBlocks(Cfg); 1271 1272 for (const CFGBlock *B : llvm::reverse(*Cfg)) { 1273 const Stmt *Label = B->getLabel(); 1274 1275 if (!isa_and_nonnull<SwitchCase>(Label)) 1276 continue; 1277 1278 int AnnotatedCnt; 1279 1280 bool IsTemplateInstantiation = false; 1281 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(AC.getDecl())) 1282 IsTemplateInstantiation = Function->isTemplateInstantiation(); 1283 if (!FM.checkFallThroughIntoBlock(*B, AnnotatedCnt, 1284 IsTemplateInstantiation)) 1285 continue; 1286 1287 S.Diag(Label->getBeginLoc(), 1288 PerFunction ? diag::warn_unannotated_fallthrough_per_function 1289 : diag::warn_unannotated_fallthrough); 1290 1291 if (!AnnotatedCnt) { 1292 SourceLocation L = Label->getBeginLoc(); 1293 if (L.isMacroID()) 1294 continue; 1295 1296 const Stmt *Term = B->getTerminatorStmt(); 1297 // Skip empty cases. 1298 while (B->empty() && !Term && B->succ_size() == 1) { 1299 B = *B->succ_begin(); 1300 Term = B->getTerminatorStmt(); 1301 } 1302 if (!(B->empty() && Term && isa<BreakStmt>(Term))) { 1303 Preprocessor &PP = S.getPreprocessor(); 1304 StringRef AnnotationSpelling = getFallthroughAttrSpelling(PP, L); 1305 SmallString<64> TextToInsert(AnnotationSpelling); 1306 TextToInsert += "; "; 1307 S.Diag(L, diag::note_insert_fallthrough_fixit) 1308 << AnnotationSpelling 1309 << FixItHint::CreateInsertion(L, TextToInsert); 1310 } 1311 S.Diag(L, diag::note_insert_break_fixit) 1312 << FixItHint::CreateInsertion(L, "break; "); 1313 } 1314 } 1315 1316 for (const auto *F : FM.getFallthroughStmts()) 1317 S.Diag(F->getBeginLoc(), diag::err_fallthrough_attr_invalid_placement); 1318 } 1319 1320 static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM, 1321 const Stmt *S) { 1322 assert(S); 1323 1324 do { 1325 switch (S->getStmtClass()) { 1326 case Stmt::ForStmtClass: 1327 case Stmt::WhileStmtClass: 1328 case Stmt::CXXForRangeStmtClass: 1329 case Stmt::ObjCForCollectionStmtClass: 1330 return true; 1331 case Stmt::DoStmtClass: { 1332 Expr::EvalResult Result; 1333 if (!cast<DoStmt>(S)->getCond()->EvaluateAsInt(Result, Ctx)) 1334 return true; 1335 return Result.Val.getInt().getBoolValue(); 1336 } 1337 default: 1338 break; 1339 } 1340 } while ((S = PM.getParent(S))); 1341 1342 return false; 1343 } 1344 1345 static void diagnoseRepeatedUseOfWeak(Sema &S, 1346 const sema::FunctionScopeInfo *CurFn, 1347 const Decl *D, 1348 const ParentMap &PM) { 1349 typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy; 1350 typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap; 1351 typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector; 1352 typedef std::pair<const Stmt *, WeakObjectUseMap::const_iterator> 1353 StmtUsesPair; 1354 1355 ASTContext &Ctx = S.getASTContext(); 1356 1357 const WeakObjectUseMap &WeakMap = CurFn->getWeakObjectUses(); 1358 1359 // Extract all weak objects that are referenced more than once. 1360 SmallVector<StmtUsesPair, 8> UsesByStmt; 1361 for (WeakObjectUseMap::const_iterator I = WeakMap.begin(), E = WeakMap.end(); 1362 I != E; ++I) { 1363 const WeakUseVector &Uses = I->second; 1364 1365 // Find the first read of the weak object. 1366 WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end(); 1367 for ( ; UI != UE; ++UI) { 1368 if (UI->isUnsafe()) 1369 break; 1370 } 1371 1372 // If there were only writes to this object, don't warn. 1373 if (UI == UE) 1374 continue; 1375 1376 // If there was only one read, followed by any number of writes, and the 1377 // read is not within a loop, don't warn. Additionally, don't warn in a 1378 // loop if the base object is a local variable -- local variables are often 1379 // changed in loops. 1380 if (UI == Uses.begin()) { 1381 WeakUseVector::const_iterator UI2 = UI; 1382 for (++UI2; UI2 != UE; ++UI2) 1383 if (UI2->isUnsafe()) 1384 break; 1385 1386 if (UI2 == UE) { 1387 if (!isInLoop(Ctx, PM, UI->getUseExpr())) 1388 continue; 1389 1390 const WeakObjectProfileTy &Profile = I->first; 1391 if (!Profile.isExactProfile()) 1392 continue; 1393 1394 const NamedDecl *Base = Profile.getBase(); 1395 if (!Base) 1396 Base = Profile.getProperty(); 1397 assert(Base && "A profile always has a base or property."); 1398 1399 if (const VarDecl *BaseVar = dyn_cast<VarDecl>(Base)) 1400 if (BaseVar->hasLocalStorage() && !isa<ParmVarDecl>(Base)) 1401 continue; 1402 } 1403 } 1404 1405 UsesByStmt.push_back(StmtUsesPair(UI->getUseExpr(), I)); 1406 } 1407 1408 if (UsesByStmt.empty()) 1409 return; 1410 1411 // Sort by first use so that we emit the warnings in a deterministic order. 1412 SourceManager &SM = S.getSourceManager(); 1413 llvm::sort(UsesByStmt, 1414 [&SM](const StmtUsesPair &LHS, const StmtUsesPair &RHS) { 1415 return SM.isBeforeInTranslationUnit(LHS.first->getBeginLoc(), 1416 RHS.first->getBeginLoc()); 1417 }); 1418 1419 // Classify the current code body for better warning text. 1420 // This enum should stay in sync with the cases in 1421 // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak. 1422 // FIXME: Should we use a common classification enum and the same set of 1423 // possibilities all throughout Sema? 1424 enum { 1425 Function, 1426 Method, 1427 Block, 1428 Lambda 1429 } FunctionKind; 1430 1431 if (isa<sema::BlockScopeInfo>(CurFn)) 1432 FunctionKind = Block; 1433 else if (isa<sema::LambdaScopeInfo>(CurFn)) 1434 FunctionKind = Lambda; 1435 else if (isa<ObjCMethodDecl>(D)) 1436 FunctionKind = Method; 1437 else 1438 FunctionKind = Function; 1439 1440 // Iterate through the sorted problems and emit warnings for each. 1441 for (const auto &P : UsesByStmt) { 1442 const Stmt *FirstRead = P.first; 1443 const WeakObjectProfileTy &Key = P.second->first; 1444 const WeakUseVector &Uses = P.second->second; 1445 1446 // For complicated expressions like 'a.b.c' and 'x.b.c', WeakObjectProfileTy 1447 // may not contain enough information to determine that these are different 1448 // properties. We can only be 100% sure of a repeated use in certain cases, 1449 // and we adjust the diagnostic kind accordingly so that the less certain 1450 // case can be turned off if it is too noisy. 1451 unsigned DiagKind; 1452 if (Key.isExactProfile()) 1453 DiagKind = diag::warn_arc_repeated_use_of_weak; 1454 else 1455 DiagKind = diag::warn_arc_possible_repeated_use_of_weak; 1456 1457 // Classify the weak object being accessed for better warning text. 1458 // This enum should stay in sync with the cases in 1459 // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak. 1460 enum { 1461 Variable, 1462 Property, 1463 ImplicitProperty, 1464 Ivar 1465 } ObjectKind; 1466 1467 const NamedDecl *KeyProp = Key.getProperty(); 1468 if (isa<VarDecl>(KeyProp)) 1469 ObjectKind = Variable; 1470 else if (isa<ObjCPropertyDecl>(KeyProp)) 1471 ObjectKind = Property; 1472 else if (isa<ObjCMethodDecl>(KeyProp)) 1473 ObjectKind = ImplicitProperty; 1474 else if (isa<ObjCIvarDecl>(KeyProp)) 1475 ObjectKind = Ivar; 1476 else 1477 llvm_unreachable("Unexpected weak object kind!"); 1478 1479 // Do not warn about IBOutlet weak property receivers being set to null 1480 // since they are typically only used from the main thread. 1481 if (const ObjCPropertyDecl *Prop = dyn_cast<ObjCPropertyDecl>(KeyProp)) 1482 if (Prop->hasAttr<IBOutletAttr>()) 1483 continue; 1484 1485 // Show the first time the object was read. 1486 S.Diag(FirstRead->getBeginLoc(), DiagKind) 1487 << int(ObjectKind) << KeyProp << int(FunctionKind) 1488 << FirstRead->getSourceRange(); 1489 1490 // Print all the other accesses as notes. 1491 for (const auto &Use : Uses) { 1492 if (Use.getUseExpr() == FirstRead) 1493 continue; 1494 S.Diag(Use.getUseExpr()->getBeginLoc(), 1495 diag::note_arc_weak_also_accessed_here) 1496 << Use.getUseExpr()->getSourceRange(); 1497 } 1498 } 1499 } 1500 1501 namespace clang { 1502 namespace { 1503 typedef SmallVector<PartialDiagnosticAt, 1> OptionalNotes; 1504 typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag; 1505 typedef std::list<DelayedDiag> DiagList; 1506 1507 struct SortDiagBySourceLocation { 1508 SourceManager &SM; 1509 SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {} 1510 1511 bool operator()(const DelayedDiag &left, const DelayedDiag &right) { 1512 // Although this call will be slow, this is only called when outputting 1513 // multiple warnings. 1514 return SM.isBeforeInTranslationUnit(left.first.first, right.first.first); 1515 } 1516 }; 1517 } // anonymous namespace 1518 } // namespace clang 1519 1520 namespace { 1521 class UninitValsDiagReporter : public UninitVariablesHandler { 1522 Sema &S; 1523 typedef SmallVector<UninitUse, 2> UsesVec; 1524 typedef llvm::PointerIntPair<UsesVec *, 1, bool> MappedType; 1525 // Prefer using MapVector to DenseMap, so that iteration order will be 1526 // the same as insertion order. This is needed to obtain a deterministic 1527 // order of diagnostics when calling flushDiagnostics(). 1528 typedef llvm::MapVector<const VarDecl *, MappedType> UsesMap; 1529 UsesMap uses; 1530 UsesMap constRefUses; 1531 1532 public: 1533 UninitValsDiagReporter(Sema &S) : S(S) {} 1534 ~UninitValsDiagReporter() override { flushDiagnostics(); } 1535 1536 MappedType &getUses(UsesMap &um, const VarDecl *vd) { 1537 MappedType &V = um[vd]; 1538 if (!V.getPointer()) 1539 V.setPointer(new UsesVec()); 1540 return V; 1541 } 1542 1543 void handleUseOfUninitVariable(const VarDecl *vd, 1544 const UninitUse &use) override { 1545 getUses(uses, vd).getPointer()->push_back(use); 1546 } 1547 1548 void handleConstRefUseOfUninitVariable(const VarDecl *vd, 1549 const UninitUse &use) override { 1550 getUses(constRefUses, vd).getPointer()->push_back(use); 1551 } 1552 1553 void handleSelfInit(const VarDecl *vd) override { 1554 getUses(uses, vd).setInt(true); 1555 getUses(constRefUses, vd).setInt(true); 1556 } 1557 1558 void flushDiagnostics() { 1559 for (const auto &P : uses) { 1560 const VarDecl *vd = P.first; 1561 const MappedType &V = P.second; 1562 1563 UsesVec *vec = V.getPointer(); 1564 bool hasSelfInit = V.getInt(); 1565 1566 // Specially handle the case where we have uses of an uninitialized 1567 // variable, but the root cause is an idiomatic self-init. We want 1568 // to report the diagnostic at the self-init since that is the root cause. 1569 if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec)) 1570 DiagnoseUninitializedUse(S, vd, 1571 UninitUse(vd->getInit()->IgnoreParenCasts(), 1572 /* isAlwaysUninit */ true), 1573 /* alwaysReportSelfInit */ true); 1574 else { 1575 // Sort the uses by their SourceLocations. While not strictly 1576 // guaranteed to produce them in line/column order, this will provide 1577 // a stable ordering. 1578 llvm::sort(*vec, [](const UninitUse &a, const UninitUse &b) { 1579 // Prefer a more confident report over a less confident one. 1580 if (a.getKind() != b.getKind()) 1581 return a.getKind() > b.getKind(); 1582 return a.getUser()->getBeginLoc() < b.getUser()->getBeginLoc(); 1583 }); 1584 1585 for (const auto &U : *vec) { 1586 // If we have self-init, downgrade all uses to 'may be uninitialized'. 1587 UninitUse Use = hasSelfInit ? UninitUse(U.getUser(), false) : U; 1588 1589 if (DiagnoseUninitializedUse(S, vd, Use)) 1590 // Skip further diagnostics for this variable. We try to warn only 1591 // on the first point at which a variable is used uninitialized. 1592 break; 1593 } 1594 } 1595 1596 // Release the uses vector. 1597 delete vec; 1598 } 1599 1600 uses.clear(); 1601 1602 // Flush all const reference uses diags. 1603 for (const auto &P : constRefUses) { 1604 const VarDecl *vd = P.first; 1605 const MappedType &V = P.second; 1606 1607 UsesVec *vec = V.getPointer(); 1608 bool hasSelfInit = V.getInt(); 1609 1610 if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec)) 1611 DiagnoseUninitializedUse(S, vd, 1612 UninitUse(vd->getInit()->IgnoreParenCasts(), 1613 /* isAlwaysUninit */ true), 1614 /* alwaysReportSelfInit */ true); 1615 else { 1616 for (const auto &U : *vec) { 1617 if (DiagnoseUninitializedConstRefUse(S, vd, U)) 1618 break; 1619 } 1620 } 1621 1622 // Release the uses vector. 1623 delete vec; 1624 } 1625 1626 constRefUses.clear(); 1627 } 1628 1629 private: 1630 static bool hasAlwaysUninitializedUse(const UsesVec* vec) { 1631 return llvm::any_of(*vec, [](const UninitUse &U) { 1632 return U.getKind() == UninitUse::Always || 1633 U.getKind() == UninitUse::AfterCall || 1634 U.getKind() == UninitUse::AfterDecl; 1635 }); 1636 } 1637 }; 1638 1639 /// Inter-procedural data for the called-once checker. 1640 class CalledOnceInterProceduralData { 1641 public: 1642 // Add the delayed warning for the given block. 1643 void addDelayedWarning(const BlockDecl *Block, 1644 PartialDiagnosticAt &&Warning) { 1645 DelayedBlockWarnings[Block].emplace_back(std::move(Warning)); 1646 } 1647 // Report all of the warnings we've gathered for the given block. 1648 void flushWarnings(const BlockDecl *Block, Sema &S) { 1649 for (const PartialDiagnosticAt &Delayed : DelayedBlockWarnings[Block]) 1650 S.Diag(Delayed.first, Delayed.second); 1651 1652 discardWarnings(Block); 1653 } 1654 // Discard all of the warnings we've gathered for the given block. 1655 void discardWarnings(const BlockDecl *Block) { 1656 DelayedBlockWarnings.erase(Block); 1657 } 1658 1659 private: 1660 using DelayedDiagnostics = SmallVector<PartialDiagnosticAt, 2>; 1661 llvm::DenseMap<const BlockDecl *, DelayedDiagnostics> DelayedBlockWarnings; 1662 }; 1663 1664 class CalledOnceCheckReporter : public CalledOnceCheckHandler { 1665 public: 1666 CalledOnceCheckReporter(Sema &S, CalledOnceInterProceduralData &Data) 1667 : S(S), Data(Data) {} 1668 void handleDoubleCall(const ParmVarDecl *Parameter, const Expr *Call, 1669 const Expr *PrevCall, bool IsCompletionHandler, 1670 bool Poised) override { 1671 auto DiagToReport = IsCompletionHandler 1672 ? diag::warn_completion_handler_called_twice 1673 : diag::warn_called_once_gets_called_twice; 1674 S.Diag(Call->getBeginLoc(), DiagToReport) << Parameter; 1675 S.Diag(PrevCall->getBeginLoc(), diag::note_called_once_gets_called_twice) 1676 << Poised; 1677 } 1678 1679 void handleNeverCalled(const ParmVarDecl *Parameter, 1680 bool IsCompletionHandler) override { 1681 auto DiagToReport = IsCompletionHandler 1682 ? diag::warn_completion_handler_never_called 1683 : diag::warn_called_once_never_called; 1684 S.Diag(Parameter->getBeginLoc(), DiagToReport) 1685 << Parameter << /* Captured */ false; 1686 } 1687 1688 void handleNeverCalled(const ParmVarDecl *Parameter, const Decl *Function, 1689 const Stmt *Where, NeverCalledReason Reason, 1690 bool IsCalledDirectly, 1691 bool IsCompletionHandler) override { 1692 auto DiagToReport = IsCompletionHandler 1693 ? diag::warn_completion_handler_never_called_when 1694 : diag::warn_called_once_never_called_when; 1695 PartialDiagnosticAt Warning(Where->getBeginLoc(), S.PDiag(DiagToReport) 1696 << Parameter 1697 << IsCalledDirectly 1698 << (unsigned)Reason); 1699 1700 if (const auto *Block = dyn_cast<BlockDecl>(Function)) { 1701 // We shouldn't report these warnings on blocks immediately 1702 Data.addDelayedWarning(Block, std::move(Warning)); 1703 } else { 1704 S.Diag(Warning.first, Warning.second); 1705 } 1706 } 1707 1708 void handleCapturedNeverCalled(const ParmVarDecl *Parameter, 1709 const Decl *Where, 1710 bool IsCompletionHandler) override { 1711 auto DiagToReport = IsCompletionHandler 1712 ? diag::warn_completion_handler_never_called 1713 : diag::warn_called_once_never_called; 1714 S.Diag(Where->getBeginLoc(), DiagToReport) 1715 << Parameter << /* Captured */ true; 1716 } 1717 1718 void 1719 handleBlockThatIsGuaranteedToBeCalledOnce(const BlockDecl *Block) override { 1720 Data.flushWarnings(Block, S); 1721 } 1722 1723 void handleBlockWithNoGuarantees(const BlockDecl *Block) override { 1724 Data.discardWarnings(Block); 1725 } 1726 1727 private: 1728 Sema &S; 1729 CalledOnceInterProceduralData &Data; 1730 }; 1731 1732 constexpr unsigned CalledOnceWarnings[] = { 1733 diag::warn_called_once_never_called, 1734 diag::warn_called_once_never_called_when, 1735 diag::warn_called_once_gets_called_twice}; 1736 1737 constexpr unsigned CompletionHandlerWarnings[]{ 1738 diag::warn_completion_handler_never_called, 1739 diag::warn_completion_handler_never_called_when, 1740 diag::warn_completion_handler_called_twice}; 1741 1742 bool shouldAnalyzeCalledOnceImpl(llvm::ArrayRef<unsigned> DiagIDs, 1743 const DiagnosticsEngine &Diags, 1744 SourceLocation At) { 1745 return llvm::any_of(DiagIDs, [&Diags, At](unsigned DiagID) { 1746 return !Diags.isIgnored(DiagID, At); 1747 }); 1748 } 1749 1750 bool shouldAnalyzeCalledOnceConventions(const DiagnosticsEngine &Diags, 1751 SourceLocation At) { 1752 return shouldAnalyzeCalledOnceImpl(CompletionHandlerWarnings, Diags, At); 1753 } 1754 1755 bool shouldAnalyzeCalledOnceParameters(const DiagnosticsEngine &Diags, 1756 SourceLocation At) { 1757 return shouldAnalyzeCalledOnceImpl(CalledOnceWarnings, Diags, At) || 1758 shouldAnalyzeCalledOnceConventions(Diags, At); 1759 } 1760 } // anonymous namespace 1761 1762 //===----------------------------------------------------------------------===// 1763 // -Wthread-safety 1764 //===----------------------------------------------------------------------===// 1765 namespace clang { 1766 namespace threadSafety { 1767 namespace { 1768 class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler { 1769 Sema &S; 1770 DiagList Warnings; 1771 SourceLocation FunLocation, FunEndLocation; 1772 1773 const FunctionDecl *CurrentFunction; 1774 bool Verbose; 1775 1776 OptionalNotes getNotes() const { 1777 if (Verbose && CurrentFunction) { 1778 PartialDiagnosticAt FNote(CurrentFunction->getBody()->getBeginLoc(), 1779 S.PDiag(diag::note_thread_warning_in_fun) 1780 << CurrentFunction); 1781 return OptionalNotes(1, FNote); 1782 } 1783 return OptionalNotes(); 1784 } 1785 1786 OptionalNotes getNotes(const PartialDiagnosticAt &Note) const { 1787 OptionalNotes ONS(1, Note); 1788 if (Verbose && CurrentFunction) { 1789 PartialDiagnosticAt FNote(CurrentFunction->getBody()->getBeginLoc(), 1790 S.PDiag(diag::note_thread_warning_in_fun) 1791 << CurrentFunction); 1792 ONS.push_back(std::move(FNote)); 1793 } 1794 return ONS; 1795 } 1796 1797 OptionalNotes getNotes(const PartialDiagnosticAt &Note1, 1798 const PartialDiagnosticAt &Note2) const { 1799 OptionalNotes ONS; 1800 ONS.push_back(Note1); 1801 ONS.push_back(Note2); 1802 if (Verbose && CurrentFunction) { 1803 PartialDiagnosticAt FNote(CurrentFunction->getBody()->getBeginLoc(), 1804 S.PDiag(diag::note_thread_warning_in_fun) 1805 << CurrentFunction); 1806 ONS.push_back(std::move(FNote)); 1807 } 1808 return ONS; 1809 } 1810 1811 OptionalNotes makeLockedHereNote(SourceLocation LocLocked, StringRef Kind) { 1812 return LocLocked.isValid() 1813 ? getNotes(PartialDiagnosticAt( 1814 LocLocked, S.PDiag(diag::note_locked_here) << Kind)) 1815 : getNotes(); 1816 } 1817 1818 OptionalNotes makeUnlockedHereNote(SourceLocation LocUnlocked, 1819 StringRef Kind) { 1820 return LocUnlocked.isValid() 1821 ? getNotes(PartialDiagnosticAt( 1822 LocUnlocked, S.PDiag(diag::note_unlocked_here) << Kind)) 1823 : getNotes(); 1824 } 1825 1826 public: 1827 ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL) 1828 : S(S), FunLocation(FL), FunEndLocation(FEL), 1829 CurrentFunction(nullptr), Verbose(false) {} 1830 1831 void setVerbose(bool b) { Verbose = b; } 1832 1833 /// Emit all buffered diagnostics in order of sourcelocation. 1834 /// We need to output diagnostics produced while iterating through 1835 /// the lockset in deterministic order, so this function orders diagnostics 1836 /// and outputs them. 1837 void emitDiagnostics() { 1838 Warnings.sort(SortDiagBySourceLocation(S.getSourceManager())); 1839 for (const auto &Diag : Warnings) { 1840 S.Diag(Diag.first.first, Diag.first.second); 1841 for (const auto &Note : Diag.second) 1842 S.Diag(Note.first, Note.second); 1843 } 1844 } 1845 1846 void handleInvalidLockExp(SourceLocation Loc) override { 1847 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_cannot_resolve_lock) 1848 << Loc); 1849 Warnings.emplace_back(std::move(Warning), getNotes()); 1850 } 1851 1852 void handleUnmatchedUnlock(StringRef Kind, Name LockName, SourceLocation Loc, 1853 SourceLocation LocPreviousUnlock) override { 1854 if (Loc.isInvalid()) 1855 Loc = FunLocation; 1856 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_unlock_but_no_lock) 1857 << Kind << LockName); 1858 Warnings.emplace_back(std::move(Warning), 1859 makeUnlockedHereNote(LocPreviousUnlock, Kind)); 1860 } 1861 1862 void handleIncorrectUnlockKind(StringRef Kind, Name LockName, 1863 LockKind Expected, LockKind Received, 1864 SourceLocation LocLocked, 1865 SourceLocation LocUnlock) override { 1866 if (LocUnlock.isInvalid()) 1867 LocUnlock = FunLocation; 1868 PartialDiagnosticAt Warning( 1869 LocUnlock, S.PDiag(diag::warn_unlock_kind_mismatch) 1870 << Kind << LockName << Received << Expected); 1871 Warnings.emplace_back(std::move(Warning), 1872 makeLockedHereNote(LocLocked, Kind)); 1873 } 1874 1875 void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation LocLocked, 1876 SourceLocation LocDoubleLock) override { 1877 if (LocDoubleLock.isInvalid()) 1878 LocDoubleLock = FunLocation; 1879 PartialDiagnosticAt Warning(LocDoubleLock, S.PDiag(diag::warn_double_lock) 1880 << Kind << LockName); 1881 Warnings.emplace_back(std::move(Warning), 1882 makeLockedHereNote(LocLocked, Kind)); 1883 } 1884 1885 void handleMutexHeldEndOfScope(StringRef Kind, Name LockName, 1886 SourceLocation LocLocked, 1887 SourceLocation LocEndOfScope, 1888 LockErrorKind LEK) override { 1889 unsigned DiagID = 0; 1890 switch (LEK) { 1891 case LEK_LockedSomePredecessors: 1892 DiagID = diag::warn_lock_some_predecessors; 1893 break; 1894 case LEK_LockedSomeLoopIterations: 1895 DiagID = diag::warn_expecting_lock_held_on_loop; 1896 break; 1897 case LEK_LockedAtEndOfFunction: 1898 DiagID = diag::warn_no_unlock; 1899 break; 1900 case LEK_NotLockedAtEndOfFunction: 1901 DiagID = diag::warn_expecting_locked; 1902 break; 1903 } 1904 if (LocEndOfScope.isInvalid()) 1905 LocEndOfScope = FunEndLocation; 1906 1907 PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << Kind 1908 << LockName); 1909 Warnings.emplace_back(std::move(Warning), 1910 makeLockedHereNote(LocLocked, Kind)); 1911 } 1912 1913 void handleExclusiveAndShared(StringRef Kind, Name LockName, 1914 SourceLocation Loc1, 1915 SourceLocation Loc2) override { 1916 PartialDiagnosticAt Warning(Loc1, 1917 S.PDiag(diag::warn_lock_exclusive_and_shared) 1918 << Kind << LockName); 1919 PartialDiagnosticAt Note(Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) 1920 << Kind << LockName); 1921 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1922 } 1923 1924 void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK, 1925 AccessKind AK, SourceLocation Loc) override { 1926 assert((POK == POK_VarAccess || POK == POK_VarDereference) && 1927 "Only works for variables"); 1928 unsigned DiagID = POK == POK_VarAccess? 1929 diag::warn_variable_requires_any_lock: 1930 diag::warn_var_deref_requires_any_lock; 1931 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) 1932 << D << getLockKindFromAccessKind(AK)); 1933 Warnings.emplace_back(std::move(Warning), getNotes()); 1934 } 1935 1936 void handleMutexNotHeld(StringRef Kind, const NamedDecl *D, 1937 ProtectedOperationKind POK, Name LockName, 1938 LockKind LK, SourceLocation Loc, 1939 Name *PossibleMatch) override { 1940 unsigned DiagID = 0; 1941 if (PossibleMatch) { 1942 switch (POK) { 1943 case POK_VarAccess: 1944 DiagID = diag::warn_variable_requires_lock_precise; 1945 break; 1946 case POK_VarDereference: 1947 DiagID = diag::warn_var_deref_requires_lock_precise; 1948 break; 1949 case POK_FunctionCall: 1950 DiagID = diag::warn_fun_requires_lock_precise; 1951 break; 1952 case POK_PassByRef: 1953 DiagID = diag::warn_guarded_pass_by_reference; 1954 break; 1955 case POK_PtPassByRef: 1956 DiagID = diag::warn_pt_guarded_pass_by_reference; 1957 break; 1958 } 1959 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind 1960 << D 1961 << LockName << LK); 1962 PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match) 1963 << *PossibleMatch); 1964 if (Verbose && POK == POK_VarAccess) { 1965 PartialDiagnosticAt VNote(D->getLocation(), 1966 S.PDiag(diag::note_guarded_by_declared_here) 1967 << D->getDeclName()); 1968 Warnings.emplace_back(std::move(Warning), getNotes(Note, VNote)); 1969 } else 1970 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1971 } else { 1972 switch (POK) { 1973 case POK_VarAccess: 1974 DiagID = diag::warn_variable_requires_lock; 1975 break; 1976 case POK_VarDereference: 1977 DiagID = diag::warn_var_deref_requires_lock; 1978 break; 1979 case POK_FunctionCall: 1980 DiagID = diag::warn_fun_requires_lock; 1981 break; 1982 case POK_PassByRef: 1983 DiagID = diag::warn_guarded_pass_by_reference; 1984 break; 1985 case POK_PtPassByRef: 1986 DiagID = diag::warn_pt_guarded_pass_by_reference; 1987 break; 1988 } 1989 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind 1990 << D 1991 << LockName << LK); 1992 if (Verbose && POK == POK_VarAccess) { 1993 PartialDiagnosticAt Note(D->getLocation(), 1994 S.PDiag(diag::note_guarded_by_declared_here)); 1995 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1996 } else 1997 Warnings.emplace_back(std::move(Warning), getNotes()); 1998 } 1999 } 2000 2001 void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg, 2002 SourceLocation Loc) override { 2003 PartialDiagnosticAt Warning(Loc, 2004 S.PDiag(diag::warn_acquire_requires_negative_cap) 2005 << Kind << LockName << Neg); 2006 Warnings.emplace_back(std::move(Warning), getNotes()); 2007 } 2008 2009 void handleNegativeNotHeld(const NamedDecl *D, Name LockName, 2010 SourceLocation Loc) override { 2011 PartialDiagnosticAt Warning( 2012 Loc, S.PDiag(diag::warn_fun_requires_negative_cap) << D << LockName); 2013 Warnings.emplace_back(std::move(Warning), getNotes()); 2014 } 2015 2016 void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName, 2017 SourceLocation Loc) override { 2018 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_fun_excludes_mutex) 2019 << Kind << FunName << LockName); 2020 Warnings.emplace_back(std::move(Warning), getNotes()); 2021 } 2022 2023 void handleLockAcquiredBefore(StringRef Kind, Name L1Name, Name L2Name, 2024 SourceLocation Loc) override { 2025 PartialDiagnosticAt Warning(Loc, 2026 S.PDiag(diag::warn_acquired_before) << Kind << L1Name << L2Name); 2027 Warnings.emplace_back(std::move(Warning), getNotes()); 2028 } 2029 2030 void handleBeforeAfterCycle(Name L1Name, SourceLocation Loc) override { 2031 PartialDiagnosticAt Warning(Loc, 2032 S.PDiag(diag::warn_acquired_before_after_cycle) << L1Name); 2033 Warnings.emplace_back(std::move(Warning), getNotes()); 2034 } 2035 2036 void enterFunction(const FunctionDecl* FD) override { 2037 CurrentFunction = FD; 2038 } 2039 2040 void leaveFunction(const FunctionDecl* FD) override { 2041 CurrentFunction = nullptr; 2042 } 2043 }; 2044 } // anonymous namespace 2045 } // namespace threadSafety 2046 } // namespace clang 2047 2048 //===----------------------------------------------------------------------===// 2049 // -Wconsumed 2050 //===----------------------------------------------------------------------===// 2051 2052 namespace clang { 2053 namespace consumed { 2054 namespace { 2055 class ConsumedWarningsHandler : public ConsumedWarningsHandlerBase { 2056 2057 Sema &S; 2058 DiagList Warnings; 2059 2060 public: 2061 2062 ConsumedWarningsHandler(Sema &S) : S(S) {} 2063 2064 void emitDiagnostics() override { 2065 Warnings.sort(SortDiagBySourceLocation(S.getSourceManager())); 2066 for (const auto &Diag : Warnings) { 2067 S.Diag(Diag.first.first, Diag.first.second); 2068 for (const auto &Note : Diag.second) 2069 S.Diag(Note.first, Note.second); 2070 } 2071 } 2072 2073 void warnLoopStateMismatch(SourceLocation Loc, 2074 StringRef VariableName) override { 2075 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_loop_state_mismatch) << 2076 VariableName); 2077 2078 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2079 } 2080 2081 void warnParamReturnTypestateMismatch(SourceLocation Loc, 2082 StringRef VariableName, 2083 StringRef ExpectedState, 2084 StringRef ObservedState) override { 2085 2086 PartialDiagnosticAt Warning(Loc, S.PDiag( 2087 diag::warn_param_return_typestate_mismatch) << VariableName << 2088 ExpectedState << ObservedState); 2089 2090 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2091 } 2092 2093 void warnParamTypestateMismatch(SourceLocation Loc, StringRef ExpectedState, 2094 StringRef ObservedState) override { 2095 2096 PartialDiagnosticAt Warning(Loc, S.PDiag( 2097 diag::warn_param_typestate_mismatch) << ExpectedState << ObservedState); 2098 2099 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2100 } 2101 2102 void warnReturnTypestateForUnconsumableType(SourceLocation Loc, 2103 StringRef TypeName) override { 2104 PartialDiagnosticAt Warning(Loc, S.PDiag( 2105 diag::warn_return_typestate_for_unconsumable_type) << TypeName); 2106 2107 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2108 } 2109 2110 void warnReturnTypestateMismatch(SourceLocation Loc, StringRef ExpectedState, 2111 StringRef ObservedState) override { 2112 2113 PartialDiagnosticAt Warning(Loc, S.PDiag( 2114 diag::warn_return_typestate_mismatch) << ExpectedState << ObservedState); 2115 2116 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2117 } 2118 2119 void warnUseOfTempInInvalidState(StringRef MethodName, StringRef State, 2120 SourceLocation Loc) override { 2121 2122 PartialDiagnosticAt Warning(Loc, S.PDiag( 2123 diag::warn_use_of_temp_in_invalid_state) << MethodName << State); 2124 2125 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2126 } 2127 2128 void warnUseInInvalidState(StringRef MethodName, StringRef VariableName, 2129 StringRef State, SourceLocation Loc) override { 2130 2131 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_use_in_invalid_state) << 2132 MethodName << VariableName << State); 2133 2134 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 2135 } 2136 }; 2137 } // anonymous namespace 2138 } // namespace consumed 2139 } // namespace clang 2140 2141 //===----------------------------------------------------------------------===// 2142 // AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based 2143 // warnings on a function, method, or block. 2144 //===----------------------------------------------------------------------===// 2145 2146 sema::AnalysisBasedWarnings::Policy::Policy() { 2147 enableCheckFallThrough = 1; 2148 enableCheckUnreachable = 0; 2149 enableThreadSafetyAnalysis = 0; 2150 enableConsumedAnalysis = 0; 2151 } 2152 2153 /// InterProceduralData aims to be a storage of whatever data should be passed 2154 /// between analyses of different functions. 2155 /// 2156 /// At the moment, its primary goal is to make the information gathered during 2157 /// the analysis of the blocks available during the analysis of the enclosing 2158 /// function. This is important due to the fact that blocks are analyzed before 2159 /// the enclosed function is even parsed fully, so it is not viable to access 2160 /// anything in the outer scope while analyzing the block. On the other hand, 2161 /// re-building CFG for blocks and re-analyzing them when we do have all the 2162 /// information (i.e. during the analysis of the enclosing function) seems to be 2163 /// ill-designed. 2164 class sema::AnalysisBasedWarnings::InterProceduralData { 2165 public: 2166 // It is important to analyze blocks within functions because it's a very 2167 // common pattern to capture completion handler parameters by blocks. 2168 CalledOnceInterProceduralData CalledOnceData; 2169 }; 2170 2171 static unsigned isEnabled(DiagnosticsEngine &D, unsigned diag) { 2172 return (unsigned)!D.isIgnored(diag, SourceLocation()); 2173 } 2174 2175 sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s) 2176 : S(s), IPData(std::make_unique<InterProceduralData>()), 2177 NumFunctionsAnalyzed(0), NumFunctionsWithBadCFGs(0), NumCFGBlocks(0), 2178 MaxCFGBlocksPerFunction(0), NumUninitAnalysisFunctions(0), 2179 NumUninitAnalysisVariables(0), MaxUninitAnalysisVariablesPerFunction(0), 2180 NumUninitAnalysisBlockVisits(0), 2181 MaxUninitAnalysisBlockVisitsPerFunction(0) { 2182 2183 using namespace diag; 2184 DiagnosticsEngine &D = S.getDiagnostics(); 2185 2186 DefaultPolicy.enableCheckUnreachable = 2187 isEnabled(D, warn_unreachable) || isEnabled(D, warn_unreachable_break) || 2188 isEnabled(D, warn_unreachable_return) || 2189 isEnabled(D, warn_unreachable_loop_increment); 2190 2191 DefaultPolicy.enableThreadSafetyAnalysis = isEnabled(D, warn_double_lock); 2192 2193 DefaultPolicy.enableConsumedAnalysis = 2194 isEnabled(D, warn_use_in_invalid_state); 2195 } 2196 2197 // We need this here for unique_ptr with forward declared class. 2198 sema::AnalysisBasedWarnings::~AnalysisBasedWarnings() = default; 2199 2200 static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) { 2201 for (const auto &D : fscope->PossiblyUnreachableDiags) 2202 S.Diag(D.Loc, D.PD); 2203 } 2204 2205 void clang::sema::AnalysisBasedWarnings::IssueWarnings( 2206 sema::AnalysisBasedWarnings::Policy P, sema::FunctionScopeInfo *fscope, 2207 const Decl *D, QualType BlockType) { 2208 2209 // We avoid doing analysis-based warnings when there are errors for 2210 // two reasons: 2211 // (1) The CFGs often can't be constructed (if the body is invalid), so 2212 // don't bother trying. 2213 // (2) The code already has problems; running the analysis just takes more 2214 // time. 2215 DiagnosticsEngine &Diags = S.getDiagnostics(); 2216 2217 // Do not do any analysis if we are going to just ignore them. 2218 if (Diags.getIgnoreAllWarnings() || 2219 (Diags.getSuppressSystemWarnings() && 2220 S.SourceMgr.isInSystemHeader(D->getLocation()))) 2221 return; 2222 2223 // For code in dependent contexts, we'll do this at instantiation time. 2224 if (cast<DeclContext>(D)->isDependentContext()) 2225 return; 2226 2227 if (S.hasUncompilableErrorOccurred()) { 2228 // Flush out any possibly unreachable diagnostics. 2229 flushDiagnostics(S, fscope); 2230 return; 2231 } 2232 2233 const Stmt *Body = D->getBody(); 2234 assert(Body); 2235 2236 // Construct the analysis context with the specified CFG build options. 2237 AnalysisDeclContext AC(/* AnalysisDeclContextManager */ nullptr, D); 2238 2239 // Don't generate EH edges for CallExprs as we'd like to avoid the n^2 2240 // explosion for destructors that can result and the compile time hit. 2241 AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true; 2242 AC.getCFGBuildOptions().AddEHEdges = false; 2243 AC.getCFGBuildOptions().AddInitializers = true; 2244 AC.getCFGBuildOptions().AddImplicitDtors = true; 2245 AC.getCFGBuildOptions().AddTemporaryDtors = true; 2246 AC.getCFGBuildOptions().AddCXXNewAllocator = false; 2247 AC.getCFGBuildOptions().AddCXXDefaultInitExprInCtors = true; 2248 2249 // Force that certain expressions appear as CFGElements in the CFG. This 2250 // is used to speed up various analyses. 2251 // FIXME: This isn't the right factoring. This is here for initial 2252 // prototyping, but we need a way for analyses to say what expressions they 2253 // expect to always be CFGElements and then fill in the BuildOptions 2254 // appropriately. This is essentially a layering violation. 2255 if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis || 2256 P.enableConsumedAnalysis) { 2257 // Unreachable code analysis and thread safety require a linearized CFG. 2258 AC.getCFGBuildOptions().setAllAlwaysAdd(); 2259 } 2260 else { 2261 AC.getCFGBuildOptions() 2262 .setAlwaysAdd(Stmt::BinaryOperatorClass) 2263 .setAlwaysAdd(Stmt::CompoundAssignOperatorClass) 2264 .setAlwaysAdd(Stmt::BlockExprClass) 2265 .setAlwaysAdd(Stmt::CStyleCastExprClass) 2266 .setAlwaysAdd(Stmt::DeclRefExprClass) 2267 .setAlwaysAdd(Stmt::ImplicitCastExprClass) 2268 .setAlwaysAdd(Stmt::UnaryOperatorClass); 2269 } 2270 2271 // Install the logical handler. 2272 llvm::Optional<LogicalErrorHandler> LEH; 2273 if (LogicalErrorHandler::hasActiveDiagnostics(Diags, D->getBeginLoc())) { 2274 LEH.emplace(S); 2275 AC.getCFGBuildOptions().Observer = &*LEH; 2276 } 2277 2278 // Emit delayed diagnostics. 2279 if (!fscope->PossiblyUnreachableDiags.empty()) { 2280 bool analyzed = false; 2281 2282 // Register the expressions with the CFGBuilder. 2283 for (const auto &D : fscope->PossiblyUnreachableDiags) { 2284 for (const Stmt *S : D.Stmts) 2285 AC.registerForcedBlockExpression(S); 2286 } 2287 2288 if (AC.getCFG()) { 2289 analyzed = true; 2290 for (const auto &D : fscope->PossiblyUnreachableDiags) { 2291 bool AllReachable = true; 2292 for (const Stmt *S : D.Stmts) { 2293 const CFGBlock *block = AC.getBlockForRegisteredExpression(S); 2294 CFGReverseBlockReachabilityAnalysis *cra = 2295 AC.getCFGReachablityAnalysis(); 2296 // FIXME: We should be able to assert that block is non-null, but 2297 // the CFG analysis can skip potentially-evaluated expressions in 2298 // edge cases; see test/Sema/vla-2.c. 2299 if (block && cra) { 2300 // Can this block be reached from the entrance? 2301 if (!cra->isReachable(&AC.getCFG()->getEntry(), block)) { 2302 AllReachable = false; 2303 break; 2304 } 2305 } 2306 // If we cannot map to a basic block, assume the statement is 2307 // reachable. 2308 } 2309 2310 if (AllReachable) 2311 S.Diag(D.Loc, D.PD); 2312 } 2313 } 2314 2315 if (!analyzed) 2316 flushDiagnostics(S, fscope); 2317 } 2318 2319 // Warning: check missing 'return' 2320 if (P.enableCheckFallThrough) { 2321 const CheckFallThroughDiagnostics &CD = 2322 (isa<BlockDecl>(D) 2323 ? CheckFallThroughDiagnostics::MakeForBlock() 2324 : (isa<CXXMethodDecl>(D) && 2325 cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call && 2326 cast<CXXMethodDecl>(D)->getParent()->isLambda()) 2327 ? CheckFallThroughDiagnostics::MakeForLambda() 2328 : (fscope->isCoroutine() 2329 ? CheckFallThroughDiagnostics::MakeForCoroutine(D) 2330 : CheckFallThroughDiagnostics::MakeForFunction(D))); 2331 CheckFallThroughForBody(S, D, Body, BlockType, CD, AC, fscope); 2332 } 2333 2334 // Warning: check for unreachable code 2335 if (P.enableCheckUnreachable) { 2336 // Only check for unreachable code on non-template instantiations. 2337 // Different template instantiations can effectively change the control-flow 2338 // and it is very difficult to prove that a snippet of code in a template 2339 // is unreachable for all instantiations. 2340 bool isTemplateInstantiation = false; 2341 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) 2342 isTemplateInstantiation = Function->isTemplateInstantiation(); 2343 if (!isTemplateInstantiation) 2344 CheckUnreachable(S, AC); 2345 } 2346 2347 // Check for thread safety violations 2348 if (P.enableThreadSafetyAnalysis) { 2349 SourceLocation FL = AC.getDecl()->getLocation(); 2350 SourceLocation FEL = AC.getDecl()->getEndLoc(); 2351 threadSafety::ThreadSafetyReporter Reporter(S, FL, FEL); 2352 if (!Diags.isIgnored(diag::warn_thread_safety_beta, D->getBeginLoc())) 2353 Reporter.setIssueBetaWarnings(true); 2354 if (!Diags.isIgnored(diag::warn_thread_safety_verbose, D->getBeginLoc())) 2355 Reporter.setVerbose(true); 2356 2357 threadSafety::runThreadSafetyAnalysis(AC, Reporter, 2358 &S.ThreadSafetyDeclCache); 2359 Reporter.emitDiagnostics(); 2360 } 2361 2362 // Check for violations of consumed properties. 2363 if (P.enableConsumedAnalysis) { 2364 consumed::ConsumedWarningsHandler WarningHandler(S); 2365 consumed::ConsumedAnalyzer Analyzer(WarningHandler); 2366 Analyzer.run(AC); 2367 } 2368 2369 if (!Diags.isIgnored(diag::warn_uninit_var, D->getBeginLoc()) || 2370 !Diags.isIgnored(diag::warn_sometimes_uninit_var, D->getBeginLoc()) || 2371 !Diags.isIgnored(diag::warn_maybe_uninit_var, D->getBeginLoc()) || 2372 !Diags.isIgnored(diag::warn_uninit_const_reference, D->getBeginLoc())) { 2373 if (CFG *cfg = AC.getCFG()) { 2374 UninitValsDiagReporter reporter(S); 2375 UninitVariablesAnalysisStats stats; 2376 std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats)); 2377 runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC, 2378 reporter, stats); 2379 2380 if (S.CollectStats && stats.NumVariablesAnalyzed > 0) { 2381 ++NumUninitAnalysisFunctions; 2382 NumUninitAnalysisVariables += stats.NumVariablesAnalyzed; 2383 NumUninitAnalysisBlockVisits += stats.NumBlockVisits; 2384 MaxUninitAnalysisVariablesPerFunction = 2385 std::max(MaxUninitAnalysisVariablesPerFunction, 2386 stats.NumVariablesAnalyzed); 2387 MaxUninitAnalysisBlockVisitsPerFunction = 2388 std::max(MaxUninitAnalysisBlockVisitsPerFunction, 2389 stats.NumBlockVisits); 2390 } 2391 } 2392 } 2393 2394 // Check for violations of "called once" parameter properties. 2395 if (S.getLangOpts().ObjC && !S.getLangOpts().CPlusPlus && 2396 shouldAnalyzeCalledOnceParameters(Diags, D->getBeginLoc())) { 2397 if (AC.getCFG()) { 2398 CalledOnceCheckReporter Reporter(S, IPData->CalledOnceData); 2399 checkCalledOnceParameters( 2400 AC, Reporter, 2401 shouldAnalyzeCalledOnceConventions(Diags, D->getBeginLoc())); 2402 } 2403 } 2404 2405 bool FallThroughDiagFull = 2406 !Diags.isIgnored(diag::warn_unannotated_fallthrough, D->getBeginLoc()); 2407 bool FallThroughDiagPerFunction = !Diags.isIgnored( 2408 diag::warn_unannotated_fallthrough_per_function, D->getBeginLoc()); 2409 if (FallThroughDiagFull || FallThroughDiagPerFunction || 2410 fscope->HasFallthroughStmt) { 2411 DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull); 2412 } 2413 2414 if (S.getLangOpts().ObjCWeak && 2415 !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, D->getBeginLoc())) 2416 diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap()); 2417 2418 2419 // Check for infinite self-recursion in functions 2420 if (!Diags.isIgnored(diag::warn_infinite_recursive_function, 2421 D->getBeginLoc())) { 2422 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 2423 checkRecursiveFunction(S, FD, Body, AC); 2424 } 2425 } 2426 2427 // Check for throw out of non-throwing function. 2428 if (!Diags.isIgnored(diag::warn_throw_in_noexcept_func, D->getBeginLoc())) 2429 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) 2430 if (S.getLangOpts().CPlusPlus && isNoexcept(FD)) 2431 checkThrowInNonThrowingFunc(S, FD, AC); 2432 2433 // If none of the previous checks caused a CFG build, trigger one here 2434 // for the logical error handler. 2435 if (LogicalErrorHandler::hasActiveDiagnostics(Diags, D->getBeginLoc())) { 2436 AC.getCFG(); 2437 } 2438 2439 // Collect statistics about the CFG if it was built. 2440 if (S.CollectStats && AC.isCFGBuilt()) { 2441 ++NumFunctionsAnalyzed; 2442 if (CFG *cfg = AC.getCFG()) { 2443 // If we successfully built a CFG for this context, record some more 2444 // detail information about it. 2445 NumCFGBlocks += cfg->getNumBlockIDs(); 2446 MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction, 2447 cfg->getNumBlockIDs()); 2448 } else { 2449 ++NumFunctionsWithBadCFGs; 2450 } 2451 } 2452 } 2453 2454 void clang::sema::AnalysisBasedWarnings::PrintStats() const { 2455 llvm::errs() << "\n*** Analysis Based Warnings Stats:\n"; 2456 2457 unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs; 2458 unsigned AvgCFGBlocksPerFunction = 2459 !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt; 2460 llvm::errs() << NumFunctionsAnalyzed << " functions analyzed (" 2461 << NumFunctionsWithBadCFGs << " w/o CFGs).\n" 2462 << " " << NumCFGBlocks << " CFG blocks built.\n" 2463 << " " << AvgCFGBlocksPerFunction 2464 << " average CFG blocks per function.\n" 2465 << " " << MaxCFGBlocksPerFunction 2466 << " max CFG blocks per function.\n"; 2467 2468 unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0 2469 : NumUninitAnalysisVariables/NumUninitAnalysisFunctions; 2470 unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0 2471 : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions; 2472 llvm::errs() << NumUninitAnalysisFunctions 2473 << " functions analyzed for uninitialiazed variables\n" 2474 << " " << NumUninitAnalysisVariables << " variables analyzed.\n" 2475 << " " << AvgUninitVariablesPerFunction 2476 << " average variables per function.\n" 2477 << " " << MaxUninitAnalysisVariablesPerFunction 2478 << " max variables per function.\n" 2479 << " " << NumUninitAnalysisBlockVisits << " block visits.\n" 2480 << " " << AvgUninitBlockVisitsPerFunction 2481 << " average block visits per function.\n" 2482 << " " << MaxUninitAnalysisBlockVisitsPerFunction 2483 << " max block visits per function.\n"; 2484 } 2485