1 //=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines analysis_warnings::[Policy,Executor]. 10 // Together they are used by Sema to issue warnings based on inexpensive 11 // static analysis algorithms in libAnalysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "clang/Sema/AnalysisBasedWarnings.h" 16 #include "clang/AST/DeclCXX.h" 17 #include "clang/AST/DeclObjC.h" 18 #include "clang/AST/EvaluatedExprVisitor.h" 19 #include "clang/AST/ExprCXX.h" 20 #include "clang/AST/ExprObjC.h" 21 #include "clang/AST/ParentMap.h" 22 #include "clang/AST/RecursiveASTVisitor.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/AST/StmtObjC.h" 25 #include "clang/AST/StmtVisitor.h" 26 #include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h" 27 #include "clang/Analysis/Analyses/Consumed.h" 28 #include "clang/Analysis/Analyses/ReachableCode.h" 29 #include "clang/Analysis/Analyses/ThreadSafety.h" 30 #include "clang/Analysis/Analyses/UninitializedValues.h" 31 #include "clang/Analysis/AnalysisDeclContext.h" 32 #include "clang/Analysis/CFG.h" 33 #include "clang/Analysis/CFGStmtMap.h" 34 #include "clang/Basic/SourceLocation.h" 35 #include "clang/Basic/SourceManager.h" 36 #include "clang/Lex/Preprocessor.h" 37 #include "clang/Sema/ScopeInfo.h" 38 #include "clang/Sema/SemaInternal.h" 39 #include "llvm/ADT/BitVector.h" 40 #include "llvm/ADT/MapVector.h" 41 #include "llvm/ADT/SmallString.h" 42 #include "llvm/ADT/SmallVector.h" 43 #include "llvm/ADT/StringRef.h" 44 #include "llvm/Support/Casting.h" 45 #include <algorithm> 46 #include <deque> 47 #include <iterator> 48 49 using namespace clang; 50 51 //===----------------------------------------------------------------------===// 52 // Unreachable code analysis. 53 //===----------------------------------------------------------------------===// 54 55 namespace { 56 class UnreachableCodeHandler : public reachable_code::Callback { 57 Sema &S; 58 SourceRange PreviousSilenceableCondVal; 59 60 public: 61 UnreachableCodeHandler(Sema &s) : S(s) {} 62 63 void HandleUnreachable(reachable_code::UnreachableKind UK, 64 SourceLocation L, 65 SourceRange SilenceableCondVal, 66 SourceRange R1, 67 SourceRange R2) override { 68 // Avoid reporting multiple unreachable code diagnostics that are 69 // triggered by the same conditional value. 70 if (PreviousSilenceableCondVal.isValid() && 71 SilenceableCondVal.isValid() && 72 PreviousSilenceableCondVal == SilenceableCondVal) 73 return; 74 PreviousSilenceableCondVal = SilenceableCondVal; 75 76 unsigned diag = diag::warn_unreachable; 77 switch (UK) { 78 case reachable_code::UK_Break: 79 diag = diag::warn_unreachable_break; 80 break; 81 case reachable_code::UK_Return: 82 diag = diag::warn_unreachable_return; 83 break; 84 case reachable_code::UK_Loop_Increment: 85 diag = diag::warn_unreachable_loop_increment; 86 break; 87 case reachable_code::UK_Other: 88 break; 89 } 90 91 S.Diag(L, diag) << R1 << R2; 92 93 SourceLocation Open = SilenceableCondVal.getBegin(); 94 if (Open.isValid()) { 95 SourceLocation Close = SilenceableCondVal.getEnd(); 96 Close = S.getLocForEndOfToken(Close); 97 if (Close.isValid()) { 98 S.Diag(Open, diag::note_unreachable_silence) 99 << FixItHint::CreateInsertion(Open, "/* DISABLES CODE */ (") 100 << FixItHint::CreateInsertion(Close, ")"); 101 } 102 } 103 } 104 }; 105 } // anonymous namespace 106 107 /// CheckUnreachable - Check for unreachable code. 108 static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) { 109 // As a heuristic prune all diagnostics not in the main file. Currently 110 // the majority of warnings in headers are false positives. These 111 // are largely caused by configuration state, e.g. preprocessor 112 // defined code, etc. 113 // 114 // Note that this is also a performance optimization. Analyzing 115 // headers many times can be expensive. 116 if (!S.getSourceManager().isInMainFile(AC.getDecl()->getBeginLoc())) 117 return; 118 119 UnreachableCodeHandler UC(S); 120 reachable_code::FindUnreachableCode(AC, S.getPreprocessor(), UC); 121 } 122 123 namespace { 124 /// Warn on logical operator errors in CFGBuilder 125 class LogicalErrorHandler : public CFGCallback { 126 Sema &S; 127 128 public: 129 LogicalErrorHandler(Sema &S) : CFGCallback(), S(S) {} 130 131 static bool HasMacroID(const Expr *E) { 132 if (E->getExprLoc().isMacroID()) 133 return true; 134 135 // Recurse to children. 136 for (const Stmt *SubStmt : E->children()) 137 if (const Expr *SubExpr = dyn_cast_or_null<Expr>(SubStmt)) 138 if (HasMacroID(SubExpr)) 139 return true; 140 141 return false; 142 } 143 144 void compareAlwaysTrue(const BinaryOperator *B, bool isAlwaysTrue) override { 145 if (HasMacroID(B)) 146 return; 147 148 SourceRange DiagRange = B->getSourceRange(); 149 S.Diag(B->getExprLoc(), diag::warn_tautological_overlap_comparison) 150 << DiagRange << isAlwaysTrue; 151 } 152 153 void compareBitwiseEquality(const BinaryOperator *B, 154 bool isAlwaysTrue) override { 155 if (HasMacroID(B)) 156 return; 157 158 SourceRange DiagRange = B->getSourceRange(); 159 S.Diag(B->getExprLoc(), diag::warn_comparison_bitwise_always) 160 << DiagRange << isAlwaysTrue; 161 } 162 }; 163 } // anonymous namespace 164 165 //===----------------------------------------------------------------------===// 166 // Check for infinite self-recursion in functions 167 //===----------------------------------------------------------------------===// 168 169 // Returns true if the function is called anywhere within the CFGBlock. 170 // For member functions, the additional condition of being call from the 171 // this pointer is required. 172 static bool hasRecursiveCallInPath(const FunctionDecl *FD, CFGBlock &Block) { 173 // Process all the Stmt's in this block to find any calls to FD. 174 for (const auto &B : Block) { 175 if (B.getKind() != CFGElement::Statement) 176 continue; 177 178 const CallExpr *CE = dyn_cast<CallExpr>(B.getAs<CFGStmt>()->getStmt()); 179 if (!CE || !CE->getCalleeDecl() || 180 CE->getCalleeDecl()->getCanonicalDecl() != FD) 181 continue; 182 183 // Skip function calls which are qualified with a templated class. 184 if (const DeclRefExpr *DRE = 185 dyn_cast<DeclRefExpr>(CE->getCallee()->IgnoreParenImpCasts())) { 186 if (NestedNameSpecifier *NNS = DRE->getQualifier()) { 187 if (NNS->getKind() == NestedNameSpecifier::TypeSpec && 188 isa<TemplateSpecializationType>(NNS->getAsType())) { 189 continue; 190 } 191 } 192 } 193 194 const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE); 195 if (!MCE || isa<CXXThisExpr>(MCE->getImplicitObjectArgument()) || 196 !MCE->getMethodDecl()->isVirtual()) 197 return true; 198 } 199 return false; 200 } 201 202 // Returns true if every path from the entry block passes through a call to FD. 203 static bool checkForRecursiveFunctionCall(const FunctionDecl *FD, CFG *cfg) { 204 llvm::SmallPtrSet<CFGBlock *, 16> Visited; 205 llvm::SmallVector<CFGBlock *, 16> WorkList; 206 // Keep track of whether we found at least one recursive path. 207 bool foundRecursion = false; 208 209 const unsigned ExitID = cfg->getExit().getBlockID(); 210 211 // Seed the work list with the entry block. 212 WorkList.push_back(&cfg->getEntry()); 213 214 while (!WorkList.empty()) { 215 CFGBlock *Block = WorkList.pop_back_val(); 216 217 for (auto I = Block->succ_begin(), E = Block->succ_end(); I != E; ++I) { 218 if (CFGBlock *SuccBlock = *I) { 219 if (!Visited.insert(SuccBlock).second) 220 continue; 221 222 // Found a path to the exit node without a recursive call. 223 if (ExitID == SuccBlock->getBlockID()) 224 return false; 225 226 // If the successor block contains a recursive call, end analysis there. 227 if (hasRecursiveCallInPath(FD, *SuccBlock)) { 228 foundRecursion = true; 229 continue; 230 } 231 232 WorkList.push_back(SuccBlock); 233 } 234 } 235 } 236 return foundRecursion; 237 } 238 239 static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD, 240 const Stmt *Body, AnalysisDeclContext &AC) { 241 FD = FD->getCanonicalDecl(); 242 243 // Only run on non-templated functions and non-templated members of 244 // templated classes. 245 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate && 246 FD->getTemplatedKind() != FunctionDecl::TK_MemberSpecialization) 247 return; 248 249 CFG *cfg = AC.getCFG(); 250 if (!cfg) return; 251 252 // If the exit block is unreachable, skip processing the function. 253 if (cfg->getExit().pred_empty()) 254 return; 255 256 // Emit diagnostic if a recursive function call is detected for all paths. 257 if (checkForRecursiveFunctionCall(FD, cfg)) 258 S.Diag(Body->getBeginLoc(), diag::warn_infinite_recursive_function); 259 } 260 261 //===----------------------------------------------------------------------===// 262 // Check for throw in a non-throwing function. 263 //===----------------------------------------------------------------------===// 264 265 /// Determine whether an exception thrown by E, unwinding from ThrowBlock, 266 /// can reach ExitBlock. 267 static bool throwEscapes(Sema &S, const CXXThrowExpr *E, CFGBlock &ThrowBlock, 268 CFG *Body) { 269 SmallVector<CFGBlock *, 16> Stack; 270 llvm::BitVector Queued(Body->getNumBlockIDs()); 271 272 Stack.push_back(&ThrowBlock); 273 Queued[ThrowBlock.getBlockID()] = true; 274 275 while (!Stack.empty()) { 276 CFGBlock &UnwindBlock = *Stack.back(); 277 Stack.pop_back(); 278 279 for (auto &Succ : UnwindBlock.succs()) { 280 if (!Succ.isReachable() || Queued[Succ->getBlockID()]) 281 continue; 282 283 if (Succ->getBlockID() == Body->getExit().getBlockID()) 284 return true; 285 286 if (auto *Catch = 287 dyn_cast_or_null<CXXCatchStmt>(Succ->getLabel())) { 288 QualType Caught = Catch->getCaughtType(); 289 if (Caught.isNull() || // catch (...) catches everything 290 !E->getSubExpr() || // throw; is considered cuaght by any handler 291 S.handlerCanCatch(Caught, E->getSubExpr()->getType())) 292 // Exception doesn't escape via this path. 293 break; 294 } else { 295 Stack.push_back(Succ); 296 Queued[Succ->getBlockID()] = true; 297 } 298 } 299 } 300 301 return false; 302 } 303 304 static void visitReachableThrows( 305 CFG *BodyCFG, 306 llvm::function_ref<void(const CXXThrowExpr *, CFGBlock &)> Visit) { 307 llvm::BitVector Reachable(BodyCFG->getNumBlockIDs()); 308 clang::reachable_code::ScanReachableFromBlock(&BodyCFG->getEntry(), Reachable); 309 for (CFGBlock *B : *BodyCFG) { 310 if (!Reachable[B->getBlockID()]) 311 continue; 312 for (CFGElement &E : *B) { 313 Optional<CFGStmt> S = E.getAs<CFGStmt>(); 314 if (!S) 315 continue; 316 if (auto *Throw = dyn_cast<CXXThrowExpr>(S->getStmt())) 317 Visit(Throw, *B); 318 } 319 } 320 } 321 322 static void EmitDiagForCXXThrowInNonThrowingFunc(Sema &S, SourceLocation OpLoc, 323 const FunctionDecl *FD) { 324 if (!S.getSourceManager().isInSystemHeader(OpLoc) && 325 FD->getTypeSourceInfo()) { 326 S.Diag(OpLoc, diag::warn_throw_in_noexcept_func) << FD; 327 if (S.getLangOpts().CPlusPlus11 && 328 (isa<CXXDestructorDecl>(FD) || 329 FD->getDeclName().getCXXOverloadedOperator() == OO_Delete || 330 FD->getDeclName().getCXXOverloadedOperator() == OO_Array_Delete)) { 331 if (const auto *Ty = FD->getTypeSourceInfo()->getType()-> 332 getAs<FunctionProtoType>()) 333 S.Diag(FD->getLocation(), diag::note_throw_in_dtor) 334 << !isa<CXXDestructorDecl>(FD) << !Ty->hasExceptionSpec() 335 << FD->getExceptionSpecSourceRange(); 336 } else 337 S.Diag(FD->getLocation(), diag::note_throw_in_function) 338 << FD->getExceptionSpecSourceRange(); 339 } 340 } 341 342 static void checkThrowInNonThrowingFunc(Sema &S, const FunctionDecl *FD, 343 AnalysisDeclContext &AC) { 344 CFG *BodyCFG = AC.getCFG(); 345 if (!BodyCFG) 346 return; 347 if (BodyCFG->getExit().pred_empty()) 348 return; 349 visitReachableThrows(BodyCFG, [&](const CXXThrowExpr *Throw, CFGBlock &Block) { 350 if (throwEscapes(S, Throw, Block, BodyCFG)) 351 EmitDiagForCXXThrowInNonThrowingFunc(S, Throw->getThrowLoc(), FD); 352 }); 353 } 354 355 static bool isNoexcept(const FunctionDecl *FD) { 356 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 357 if (FPT->isNothrow() || FD->hasAttr<NoThrowAttr>()) 358 return true; 359 return false; 360 } 361 362 //===----------------------------------------------------------------------===// 363 // Check for missing return value. 364 //===----------------------------------------------------------------------===// 365 366 enum ControlFlowKind { 367 UnknownFallThrough, 368 NeverFallThrough, 369 MaybeFallThrough, 370 AlwaysFallThrough, 371 NeverFallThroughOrReturn 372 }; 373 374 /// CheckFallThrough - Check that we don't fall off the end of a 375 /// Statement that should return a value. 376 /// 377 /// \returns AlwaysFallThrough iff we always fall off the end of the statement, 378 /// MaybeFallThrough iff we might or might not fall off the end, 379 /// NeverFallThroughOrReturn iff we never fall off the end of the statement or 380 /// return. We assume NeverFallThrough iff we never fall off the end of the 381 /// statement but we may return. We assume that functions not marked noreturn 382 /// will return. 383 static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) { 384 CFG *cfg = AC.getCFG(); 385 if (!cfg) return UnknownFallThrough; 386 387 // The CFG leaves in dead things, and we don't want the dead code paths to 388 // confuse us, so we mark all live things first. 389 llvm::BitVector live(cfg->getNumBlockIDs()); 390 unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(), 391 live); 392 393 bool AddEHEdges = AC.getAddEHEdges(); 394 if (!AddEHEdges && count != cfg->getNumBlockIDs()) 395 // When there are things remaining dead, and we didn't add EH edges 396 // from CallExprs to the catch clauses, we have to go back and 397 // mark them as live. 398 for (const auto *B : *cfg) { 399 if (!live[B->getBlockID()]) { 400 if (B->pred_begin() == B->pred_end()) { 401 const Stmt *Term = B->getTerminatorStmt(); 402 if (Term && isa<CXXTryStmt>(Term)) 403 // When not adding EH edges from calls, catch clauses 404 // can otherwise seem dead. Avoid noting them as dead. 405 count += reachable_code::ScanReachableFromBlock(B, live); 406 continue; 407 } 408 } 409 } 410 411 // Now we know what is live, we check the live precessors of the exit block 412 // and look for fall through paths, being careful to ignore normal returns, 413 // and exceptional paths. 414 bool HasLiveReturn = false; 415 bool HasFakeEdge = false; 416 bool HasPlainEdge = false; 417 bool HasAbnormalEdge = false; 418 419 // Ignore default cases that aren't likely to be reachable because all 420 // enums in a switch(X) have explicit case statements. 421 CFGBlock::FilterOptions FO; 422 FO.IgnoreDefaultsWithCoveredEnums = 1; 423 424 for (CFGBlock::filtered_pred_iterator I = 425 cfg->getExit().filtered_pred_start_end(FO); 426 I.hasMore(); ++I) { 427 const CFGBlock &B = **I; 428 if (!live[B.getBlockID()]) 429 continue; 430 431 // Skip blocks which contain an element marked as no-return. They don't 432 // represent actually viable edges into the exit block, so mark them as 433 // abnormal. 434 if (B.hasNoReturnElement()) { 435 HasAbnormalEdge = true; 436 continue; 437 } 438 439 // Destructors can appear after the 'return' in the CFG. This is 440 // normal. We need to look pass the destructors for the return 441 // statement (if it exists). 442 CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend(); 443 444 for ( ; ri != re ; ++ri) 445 if (ri->getAs<CFGStmt>()) 446 break; 447 448 // No more CFGElements in the block? 449 if (ri == re) { 450 const Stmt *Term = B.getTerminatorStmt(); 451 if (Term && isa<CXXTryStmt>(Term)) { 452 HasAbnormalEdge = true; 453 continue; 454 } 455 // A labeled empty statement, or the entry block... 456 HasPlainEdge = true; 457 continue; 458 } 459 460 CFGStmt CS = ri->castAs<CFGStmt>(); 461 const Stmt *S = CS.getStmt(); 462 if (isa<ReturnStmt>(S) || isa<CoreturnStmt>(S)) { 463 HasLiveReturn = true; 464 continue; 465 } 466 if (isa<ObjCAtThrowStmt>(S)) { 467 HasFakeEdge = true; 468 continue; 469 } 470 if (isa<CXXThrowExpr>(S)) { 471 HasFakeEdge = true; 472 continue; 473 } 474 if (isa<MSAsmStmt>(S)) { 475 // TODO: Verify this is correct. 476 HasFakeEdge = true; 477 HasLiveReturn = true; 478 continue; 479 } 480 if (isa<CXXTryStmt>(S)) { 481 HasAbnormalEdge = true; 482 continue; 483 } 484 if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit()) 485 == B.succ_end()) { 486 HasAbnormalEdge = true; 487 continue; 488 } 489 490 HasPlainEdge = true; 491 } 492 if (!HasPlainEdge) { 493 if (HasLiveReturn) 494 return NeverFallThrough; 495 return NeverFallThroughOrReturn; 496 } 497 if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) 498 return MaybeFallThrough; 499 // This says AlwaysFallThrough for calls to functions that are not marked 500 // noreturn, that don't return. If people would like this warning to be more 501 // accurate, such functions should be marked as noreturn. 502 return AlwaysFallThrough; 503 } 504 505 namespace { 506 507 struct CheckFallThroughDiagnostics { 508 unsigned diag_MaybeFallThrough_HasNoReturn; 509 unsigned diag_MaybeFallThrough_ReturnsNonVoid; 510 unsigned diag_AlwaysFallThrough_HasNoReturn; 511 unsigned diag_AlwaysFallThrough_ReturnsNonVoid; 512 unsigned diag_NeverFallThroughOrReturn; 513 enum { Function, Block, Lambda, Coroutine } funMode; 514 SourceLocation FuncLoc; 515 516 static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) { 517 CheckFallThroughDiagnostics D; 518 D.FuncLoc = Func->getLocation(); 519 D.diag_MaybeFallThrough_HasNoReturn = 520 diag::warn_falloff_noreturn_function; 521 D.diag_MaybeFallThrough_ReturnsNonVoid = 522 diag::warn_maybe_falloff_nonvoid_function; 523 D.diag_AlwaysFallThrough_HasNoReturn = 524 diag::warn_falloff_noreturn_function; 525 D.diag_AlwaysFallThrough_ReturnsNonVoid = 526 diag::warn_falloff_nonvoid_function; 527 528 // Don't suggest that virtual functions be marked "noreturn", since they 529 // might be overridden by non-noreturn functions. 530 bool isVirtualMethod = false; 531 if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func)) 532 isVirtualMethod = Method->isVirtual(); 533 534 // Don't suggest that template instantiations be marked "noreturn" 535 bool isTemplateInstantiation = false; 536 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func)) 537 isTemplateInstantiation = Function->isTemplateInstantiation(); 538 539 if (!isVirtualMethod && !isTemplateInstantiation) 540 D.diag_NeverFallThroughOrReturn = 541 diag::warn_suggest_noreturn_function; 542 else 543 D.diag_NeverFallThroughOrReturn = 0; 544 545 D.funMode = Function; 546 return D; 547 } 548 549 static CheckFallThroughDiagnostics MakeForCoroutine(const Decl *Func) { 550 CheckFallThroughDiagnostics D; 551 D.FuncLoc = Func->getLocation(); 552 D.diag_MaybeFallThrough_HasNoReturn = 0; 553 D.diag_MaybeFallThrough_ReturnsNonVoid = 554 diag::warn_maybe_falloff_nonvoid_coroutine; 555 D.diag_AlwaysFallThrough_HasNoReturn = 0; 556 D.diag_AlwaysFallThrough_ReturnsNonVoid = 557 diag::warn_falloff_nonvoid_coroutine; 558 D.funMode = Coroutine; 559 return D; 560 } 561 562 static CheckFallThroughDiagnostics MakeForBlock() { 563 CheckFallThroughDiagnostics D; 564 D.diag_MaybeFallThrough_HasNoReturn = 565 diag::err_noreturn_block_has_return_expr; 566 D.diag_MaybeFallThrough_ReturnsNonVoid = 567 diag::err_maybe_falloff_nonvoid_block; 568 D.diag_AlwaysFallThrough_HasNoReturn = 569 diag::err_noreturn_block_has_return_expr; 570 D.diag_AlwaysFallThrough_ReturnsNonVoid = 571 diag::err_falloff_nonvoid_block; 572 D.diag_NeverFallThroughOrReturn = 0; 573 D.funMode = Block; 574 return D; 575 } 576 577 static CheckFallThroughDiagnostics MakeForLambda() { 578 CheckFallThroughDiagnostics D; 579 D.diag_MaybeFallThrough_HasNoReturn = 580 diag::err_noreturn_lambda_has_return_expr; 581 D.diag_MaybeFallThrough_ReturnsNonVoid = 582 diag::warn_maybe_falloff_nonvoid_lambda; 583 D.diag_AlwaysFallThrough_HasNoReturn = 584 diag::err_noreturn_lambda_has_return_expr; 585 D.diag_AlwaysFallThrough_ReturnsNonVoid = 586 diag::warn_falloff_nonvoid_lambda; 587 D.diag_NeverFallThroughOrReturn = 0; 588 D.funMode = Lambda; 589 return D; 590 } 591 592 bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid, 593 bool HasNoReturn) const { 594 if (funMode == Function) { 595 return (ReturnsVoid || 596 D.isIgnored(diag::warn_maybe_falloff_nonvoid_function, 597 FuncLoc)) && 598 (!HasNoReturn || 599 D.isIgnored(diag::warn_noreturn_function_has_return_expr, 600 FuncLoc)) && 601 (!ReturnsVoid || 602 D.isIgnored(diag::warn_suggest_noreturn_block, FuncLoc)); 603 } 604 if (funMode == Coroutine) { 605 return (ReturnsVoid || 606 D.isIgnored(diag::warn_maybe_falloff_nonvoid_function, FuncLoc) || 607 D.isIgnored(diag::warn_maybe_falloff_nonvoid_coroutine, 608 FuncLoc)) && 609 (!HasNoReturn); 610 } 611 // For blocks / lambdas. 612 return ReturnsVoid && !HasNoReturn; 613 } 614 }; 615 616 } // anonymous namespace 617 618 /// CheckFallThroughForBody - Check that we don't fall off the end of a 619 /// function that should return a value. Check that we don't fall off the end 620 /// of a noreturn function. We assume that functions and blocks not marked 621 /// noreturn will return. 622 static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body, 623 QualType BlockType, 624 const CheckFallThroughDiagnostics &CD, 625 AnalysisDeclContext &AC, 626 sema::FunctionScopeInfo *FSI) { 627 628 bool ReturnsVoid = false; 629 bool HasNoReturn = false; 630 bool IsCoroutine = FSI->isCoroutine(); 631 632 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 633 if (const auto *CBody = dyn_cast<CoroutineBodyStmt>(Body)) 634 ReturnsVoid = CBody->getFallthroughHandler() != nullptr; 635 else 636 ReturnsVoid = FD->getReturnType()->isVoidType(); 637 HasNoReturn = FD->isNoReturn(); 638 } 639 else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) { 640 ReturnsVoid = MD->getReturnType()->isVoidType(); 641 HasNoReturn = MD->hasAttr<NoReturnAttr>(); 642 } 643 else if (isa<BlockDecl>(D)) { 644 if (const FunctionType *FT = 645 BlockType->getPointeeType()->getAs<FunctionType>()) { 646 if (FT->getReturnType()->isVoidType()) 647 ReturnsVoid = true; 648 if (FT->getNoReturnAttr()) 649 HasNoReturn = true; 650 } 651 } 652 653 DiagnosticsEngine &Diags = S.getDiagnostics(); 654 655 // Short circuit for compilation speed. 656 if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn)) 657 return; 658 SourceLocation LBrace = Body->getBeginLoc(), RBrace = Body->getEndLoc(); 659 auto EmitDiag = [&](SourceLocation Loc, unsigned DiagID) { 660 if (IsCoroutine) 661 S.Diag(Loc, DiagID) << FSI->CoroutinePromise->getType(); 662 else 663 S.Diag(Loc, DiagID); 664 }; 665 666 // cpu_dispatch functions permit empty function bodies for ICC compatibility. 667 if (D->getAsFunction() && D->getAsFunction()->isCPUDispatchMultiVersion()) 668 return; 669 670 // Either in a function body compound statement, or a function-try-block. 671 switch (CheckFallThrough(AC)) { 672 case UnknownFallThrough: 673 break; 674 675 case MaybeFallThrough: 676 if (HasNoReturn) 677 EmitDiag(RBrace, CD.diag_MaybeFallThrough_HasNoReturn); 678 else if (!ReturnsVoid) 679 EmitDiag(RBrace, CD.diag_MaybeFallThrough_ReturnsNonVoid); 680 break; 681 case AlwaysFallThrough: 682 if (HasNoReturn) 683 EmitDiag(RBrace, CD.diag_AlwaysFallThrough_HasNoReturn); 684 else if (!ReturnsVoid) 685 EmitDiag(RBrace, CD.diag_AlwaysFallThrough_ReturnsNonVoid); 686 break; 687 case NeverFallThroughOrReturn: 688 if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) { 689 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 690 S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 0 << FD; 691 } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { 692 S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 1 << MD; 693 } else { 694 S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn); 695 } 696 } 697 break; 698 case NeverFallThrough: 699 break; 700 } 701 } 702 703 //===----------------------------------------------------------------------===// 704 // -Wuninitialized 705 //===----------------------------------------------------------------------===// 706 707 namespace { 708 /// ContainsReference - A visitor class to search for references to 709 /// a particular declaration (the needle) within any evaluated component of an 710 /// expression (recursively). 711 class ContainsReference : public ConstEvaluatedExprVisitor<ContainsReference> { 712 bool FoundReference; 713 const DeclRefExpr *Needle; 714 715 public: 716 typedef ConstEvaluatedExprVisitor<ContainsReference> Inherited; 717 718 ContainsReference(ASTContext &Context, const DeclRefExpr *Needle) 719 : Inherited(Context), FoundReference(false), Needle(Needle) {} 720 721 void VisitExpr(const Expr *E) { 722 // Stop evaluating if we already have a reference. 723 if (FoundReference) 724 return; 725 726 Inherited::VisitExpr(E); 727 } 728 729 void VisitDeclRefExpr(const DeclRefExpr *E) { 730 if (E == Needle) 731 FoundReference = true; 732 else 733 Inherited::VisitDeclRefExpr(E); 734 } 735 736 bool doesContainReference() const { return FoundReference; } 737 }; 738 } // anonymous namespace 739 740 static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) { 741 QualType VariableTy = VD->getType().getCanonicalType(); 742 if (VariableTy->isBlockPointerType() && 743 !VD->hasAttr<BlocksAttr>()) { 744 S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization) 745 << VD->getDeclName() 746 << FixItHint::CreateInsertion(VD->getLocation(), "__block "); 747 return true; 748 } 749 750 // Don't issue a fixit if there is already an initializer. 751 if (VD->getInit()) 752 return false; 753 754 // Don't suggest a fixit inside macros. 755 if (VD->getEndLoc().isMacroID()) 756 return false; 757 758 SourceLocation Loc = S.getLocForEndOfToken(VD->getEndLoc()); 759 760 // Suggest possible initialization (if any). 761 std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc); 762 if (Init.empty()) 763 return false; 764 765 S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName() 766 << FixItHint::CreateInsertion(Loc, Init); 767 return true; 768 } 769 770 /// Create a fixit to remove an if-like statement, on the assumption that its 771 /// condition is CondVal. 772 static void CreateIfFixit(Sema &S, const Stmt *If, const Stmt *Then, 773 const Stmt *Else, bool CondVal, 774 FixItHint &Fixit1, FixItHint &Fixit2) { 775 if (CondVal) { 776 // If condition is always true, remove all but the 'then'. 777 Fixit1 = FixItHint::CreateRemoval( 778 CharSourceRange::getCharRange(If->getBeginLoc(), Then->getBeginLoc())); 779 if (Else) { 780 SourceLocation ElseKwLoc = S.getLocForEndOfToken(Then->getEndLoc()); 781 Fixit2 = 782 FixItHint::CreateRemoval(SourceRange(ElseKwLoc, Else->getEndLoc())); 783 } 784 } else { 785 // If condition is always false, remove all but the 'else'. 786 if (Else) 787 Fixit1 = FixItHint::CreateRemoval(CharSourceRange::getCharRange( 788 If->getBeginLoc(), Else->getBeginLoc())); 789 else 790 Fixit1 = FixItHint::CreateRemoval(If->getSourceRange()); 791 } 792 } 793 794 /// DiagUninitUse -- Helper function to produce a diagnostic for an 795 /// uninitialized use of a variable. 796 static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use, 797 bool IsCapturedByBlock) { 798 bool Diagnosed = false; 799 800 switch (Use.getKind()) { 801 case UninitUse::Always: 802 S.Diag(Use.getUser()->getBeginLoc(), diag::warn_uninit_var) 803 << VD->getDeclName() << IsCapturedByBlock 804 << Use.getUser()->getSourceRange(); 805 return; 806 807 case UninitUse::AfterDecl: 808 case UninitUse::AfterCall: 809 S.Diag(VD->getLocation(), diag::warn_sometimes_uninit_var) 810 << VD->getDeclName() << IsCapturedByBlock 811 << (Use.getKind() == UninitUse::AfterDecl ? 4 : 5) 812 << const_cast<DeclContext*>(VD->getLexicalDeclContext()) 813 << VD->getSourceRange(); 814 S.Diag(Use.getUser()->getBeginLoc(), diag::note_uninit_var_use) 815 << IsCapturedByBlock << Use.getUser()->getSourceRange(); 816 return; 817 818 case UninitUse::Maybe: 819 case UninitUse::Sometimes: 820 // Carry on to report sometimes-uninitialized branches, if possible, 821 // or a 'may be used uninitialized' diagnostic otherwise. 822 break; 823 } 824 825 // Diagnose each branch which leads to a sometimes-uninitialized use. 826 for (UninitUse::branch_iterator I = Use.branch_begin(), E = Use.branch_end(); 827 I != E; ++I) { 828 assert(Use.getKind() == UninitUse::Sometimes); 829 830 const Expr *User = Use.getUser(); 831 const Stmt *Term = I->Terminator; 832 833 // Information used when building the diagnostic. 834 unsigned DiagKind; 835 StringRef Str; 836 SourceRange Range; 837 838 // FixIts to suppress the diagnostic by removing the dead condition. 839 // For all binary terminators, branch 0 is taken if the condition is true, 840 // and branch 1 is taken if the condition is false. 841 int RemoveDiagKind = -1; 842 const char *FixitStr = 843 S.getLangOpts().CPlusPlus ? (I->Output ? "true" : "false") 844 : (I->Output ? "1" : "0"); 845 FixItHint Fixit1, Fixit2; 846 847 switch (Term ? Term->getStmtClass() : Stmt::DeclStmtClass) { 848 default: 849 // Don't know how to report this. Just fall back to 'may be used 850 // uninitialized'. FIXME: Can this happen? 851 continue; 852 853 // "condition is true / condition is false". 854 case Stmt::IfStmtClass: { 855 const IfStmt *IS = cast<IfStmt>(Term); 856 DiagKind = 0; 857 Str = "if"; 858 Range = IS->getCond()->getSourceRange(); 859 RemoveDiagKind = 0; 860 CreateIfFixit(S, IS, IS->getThen(), IS->getElse(), 861 I->Output, Fixit1, Fixit2); 862 break; 863 } 864 case Stmt::ConditionalOperatorClass: { 865 const ConditionalOperator *CO = cast<ConditionalOperator>(Term); 866 DiagKind = 0; 867 Str = "?:"; 868 Range = CO->getCond()->getSourceRange(); 869 RemoveDiagKind = 0; 870 CreateIfFixit(S, CO, CO->getTrueExpr(), CO->getFalseExpr(), 871 I->Output, Fixit1, Fixit2); 872 break; 873 } 874 case Stmt::BinaryOperatorClass: { 875 const BinaryOperator *BO = cast<BinaryOperator>(Term); 876 if (!BO->isLogicalOp()) 877 continue; 878 DiagKind = 0; 879 Str = BO->getOpcodeStr(); 880 Range = BO->getLHS()->getSourceRange(); 881 RemoveDiagKind = 0; 882 if ((BO->getOpcode() == BO_LAnd && I->Output) || 883 (BO->getOpcode() == BO_LOr && !I->Output)) 884 // true && y -> y, false || y -> y. 885 Fixit1 = FixItHint::CreateRemoval( 886 SourceRange(BO->getBeginLoc(), BO->getOperatorLoc())); 887 else 888 // false && y -> false, true || y -> true. 889 Fixit1 = FixItHint::CreateReplacement(BO->getSourceRange(), FixitStr); 890 break; 891 } 892 893 // "loop is entered / loop is exited". 894 case Stmt::WhileStmtClass: 895 DiagKind = 1; 896 Str = "while"; 897 Range = cast<WhileStmt>(Term)->getCond()->getSourceRange(); 898 RemoveDiagKind = 1; 899 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 900 break; 901 case Stmt::ForStmtClass: 902 DiagKind = 1; 903 Str = "for"; 904 Range = cast<ForStmt>(Term)->getCond()->getSourceRange(); 905 RemoveDiagKind = 1; 906 if (I->Output) 907 Fixit1 = FixItHint::CreateRemoval(Range); 908 else 909 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 910 break; 911 case Stmt::CXXForRangeStmtClass: 912 if (I->Output == 1) { 913 // The use occurs if a range-based for loop's body never executes. 914 // That may be impossible, and there's no syntactic fix for this, 915 // so treat it as a 'may be uninitialized' case. 916 continue; 917 } 918 DiagKind = 1; 919 Str = "for"; 920 Range = cast<CXXForRangeStmt>(Term)->getRangeInit()->getSourceRange(); 921 break; 922 923 // "condition is true / loop is exited". 924 case Stmt::DoStmtClass: 925 DiagKind = 2; 926 Str = "do"; 927 Range = cast<DoStmt>(Term)->getCond()->getSourceRange(); 928 RemoveDiagKind = 1; 929 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 930 break; 931 932 // "switch case is taken". 933 case Stmt::CaseStmtClass: 934 DiagKind = 3; 935 Str = "case"; 936 Range = cast<CaseStmt>(Term)->getLHS()->getSourceRange(); 937 break; 938 case Stmt::DefaultStmtClass: 939 DiagKind = 3; 940 Str = "default"; 941 Range = cast<DefaultStmt>(Term)->getDefaultLoc(); 942 break; 943 } 944 945 S.Diag(Range.getBegin(), diag::warn_sometimes_uninit_var) 946 << VD->getDeclName() << IsCapturedByBlock << DiagKind 947 << Str << I->Output << Range; 948 S.Diag(User->getBeginLoc(), diag::note_uninit_var_use) 949 << IsCapturedByBlock << User->getSourceRange(); 950 if (RemoveDiagKind != -1) 951 S.Diag(Fixit1.RemoveRange.getBegin(), diag::note_uninit_fixit_remove_cond) 952 << RemoveDiagKind << Str << I->Output << Fixit1 << Fixit2; 953 954 Diagnosed = true; 955 } 956 957 if (!Diagnosed) 958 S.Diag(Use.getUser()->getBeginLoc(), diag::warn_maybe_uninit_var) 959 << VD->getDeclName() << IsCapturedByBlock 960 << Use.getUser()->getSourceRange(); 961 } 962 963 /// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an 964 /// uninitialized variable. This manages the different forms of diagnostic 965 /// emitted for particular types of uses. Returns true if the use was diagnosed 966 /// as a warning. If a particular use is one we omit warnings for, returns 967 /// false. 968 static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD, 969 const UninitUse &Use, 970 bool alwaysReportSelfInit = false) { 971 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Use.getUser())) { 972 // Inspect the initializer of the variable declaration which is 973 // being referenced prior to its initialization. We emit 974 // specialized diagnostics for self-initialization, and we 975 // specifically avoid warning about self references which take the 976 // form of: 977 // 978 // int x = x; 979 // 980 // This is used to indicate to GCC that 'x' is intentionally left 981 // uninitialized. Proven code paths which access 'x' in 982 // an uninitialized state after this will still warn. 983 if (const Expr *Initializer = VD->getInit()) { 984 if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts()) 985 return false; 986 987 ContainsReference CR(S.Context, DRE); 988 CR.Visit(Initializer); 989 if (CR.doesContainReference()) { 990 S.Diag(DRE->getBeginLoc(), diag::warn_uninit_self_reference_in_init) 991 << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange(); 992 return true; 993 } 994 } 995 996 DiagUninitUse(S, VD, Use, false); 997 } else { 998 const BlockExpr *BE = cast<BlockExpr>(Use.getUser()); 999 if (VD->getType()->isBlockPointerType() && !VD->hasAttr<BlocksAttr>()) 1000 S.Diag(BE->getBeginLoc(), 1001 diag::warn_uninit_byref_blockvar_captured_by_block) 1002 << VD->getDeclName() 1003 << VD->getType().getQualifiers().hasObjCLifetime(); 1004 else 1005 DiagUninitUse(S, VD, Use, true); 1006 } 1007 1008 // Report where the variable was declared when the use wasn't within 1009 // the initializer of that declaration & we didn't already suggest 1010 // an initialization fixit. 1011 if (!SuggestInitializationFixit(S, VD)) 1012 S.Diag(VD->getBeginLoc(), diag::note_var_declared_here) 1013 << VD->getDeclName(); 1014 1015 return true; 1016 } 1017 1018 namespace { 1019 class FallthroughMapper : public RecursiveASTVisitor<FallthroughMapper> { 1020 public: 1021 FallthroughMapper(Sema &S) 1022 : FoundSwitchStatements(false), 1023 S(S) { 1024 } 1025 1026 bool foundSwitchStatements() const { return FoundSwitchStatements; } 1027 1028 void markFallthroughVisited(const AttributedStmt *Stmt) { 1029 bool Found = FallthroughStmts.erase(Stmt); 1030 assert(Found); 1031 (void)Found; 1032 } 1033 1034 typedef llvm::SmallPtrSet<const AttributedStmt*, 8> AttrStmts; 1035 1036 const AttrStmts &getFallthroughStmts() const { 1037 return FallthroughStmts; 1038 } 1039 1040 void fillReachableBlocks(CFG *Cfg) { 1041 assert(ReachableBlocks.empty() && "ReachableBlocks already filled"); 1042 std::deque<const CFGBlock *> BlockQueue; 1043 1044 ReachableBlocks.insert(&Cfg->getEntry()); 1045 BlockQueue.push_back(&Cfg->getEntry()); 1046 // Mark all case blocks reachable to avoid problems with switching on 1047 // constants, covered enums, etc. 1048 // These blocks can contain fall-through annotations, and we don't want to 1049 // issue a warn_fallthrough_attr_unreachable for them. 1050 for (const auto *B : *Cfg) { 1051 const Stmt *L = B->getLabel(); 1052 if (L && isa<SwitchCase>(L) && ReachableBlocks.insert(B).second) 1053 BlockQueue.push_back(B); 1054 } 1055 1056 while (!BlockQueue.empty()) { 1057 const CFGBlock *P = BlockQueue.front(); 1058 BlockQueue.pop_front(); 1059 for (CFGBlock::const_succ_iterator I = P->succ_begin(), 1060 E = P->succ_end(); 1061 I != E; ++I) { 1062 if (*I && ReachableBlocks.insert(*I).second) 1063 BlockQueue.push_back(*I); 1064 } 1065 } 1066 } 1067 1068 bool checkFallThroughIntoBlock(const CFGBlock &B, int &AnnotatedCnt, 1069 bool IsTemplateInstantiation) { 1070 assert(!ReachableBlocks.empty() && "ReachableBlocks empty"); 1071 1072 int UnannotatedCnt = 0; 1073 AnnotatedCnt = 0; 1074 1075 std::deque<const CFGBlock*> BlockQueue(B.pred_begin(), B.pred_end()); 1076 while (!BlockQueue.empty()) { 1077 const CFGBlock *P = BlockQueue.front(); 1078 BlockQueue.pop_front(); 1079 if (!P) continue; 1080 1081 const Stmt *Term = P->getTerminatorStmt(); 1082 if (Term && isa<SwitchStmt>(Term)) 1083 continue; // Switch statement, good. 1084 1085 const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(P->getLabel()); 1086 if (SW && SW->getSubStmt() == B.getLabel() && P->begin() == P->end()) 1087 continue; // Previous case label has no statements, good. 1088 1089 const LabelStmt *L = dyn_cast_or_null<LabelStmt>(P->getLabel()); 1090 if (L && L->getSubStmt() == B.getLabel() && P->begin() == P->end()) 1091 continue; // Case label is preceded with a normal label, good. 1092 1093 if (!ReachableBlocks.count(P)) { 1094 for (CFGBlock::const_reverse_iterator ElemIt = P->rbegin(), 1095 ElemEnd = P->rend(); 1096 ElemIt != ElemEnd; ++ElemIt) { 1097 if (Optional<CFGStmt> CS = ElemIt->getAs<CFGStmt>()) { 1098 if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) { 1099 // Don't issue a warning for an unreachable fallthrough 1100 // attribute in template instantiations as it may not be 1101 // unreachable in all instantiations of the template. 1102 if (!IsTemplateInstantiation) 1103 S.Diag(AS->getBeginLoc(), 1104 diag::warn_fallthrough_attr_unreachable); 1105 markFallthroughVisited(AS); 1106 ++AnnotatedCnt; 1107 break; 1108 } 1109 // Don't care about other unreachable statements. 1110 } 1111 } 1112 // If there are no unreachable statements, this may be a special 1113 // case in CFG: 1114 // case X: { 1115 // A a; // A has a destructor. 1116 // break; 1117 // } 1118 // // <<<< This place is represented by a 'hanging' CFG block. 1119 // case Y: 1120 continue; 1121 } 1122 1123 const Stmt *LastStmt = getLastStmt(*P); 1124 if (const AttributedStmt *AS = asFallThroughAttr(LastStmt)) { 1125 markFallthroughVisited(AS); 1126 ++AnnotatedCnt; 1127 continue; // Fallthrough annotation, good. 1128 } 1129 1130 if (!LastStmt) { // This block contains no executable statements. 1131 // Traverse its predecessors. 1132 std::copy(P->pred_begin(), P->pred_end(), 1133 std::back_inserter(BlockQueue)); 1134 continue; 1135 } 1136 1137 ++UnannotatedCnt; 1138 } 1139 return !!UnannotatedCnt; 1140 } 1141 1142 // RecursiveASTVisitor setup. 1143 bool shouldWalkTypesOfTypeLocs() const { return false; } 1144 1145 bool VisitAttributedStmt(AttributedStmt *S) { 1146 if (asFallThroughAttr(S)) 1147 FallthroughStmts.insert(S); 1148 return true; 1149 } 1150 1151 bool VisitSwitchStmt(SwitchStmt *S) { 1152 FoundSwitchStatements = true; 1153 return true; 1154 } 1155 1156 // We don't want to traverse local type declarations. We analyze their 1157 // methods separately. 1158 bool TraverseDecl(Decl *D) { return true; } 1159 1160 // We analyze lambda bodies separately. Skip them here. 1161 bool TraverseLambdaExpr(LambdaExpr *LE) { 1162 // Traverse the captures, but not the body. 1163 for (const auto &C : zip(LE->captures(), LE->capture_inits())) 1164 TraverseLambdaCapture(LE, &std::get<0>(C), std::get<1>(C)); 1165 return true; 1166 } 1167 1168 private: 1169 1170 static const AttributedStmt *asFallThroughAttr(const Stmt *S) { 1171 if (const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(S)) { 1172 if (hasSpecificAttr<FallThroughAttr>(AS->getAttrs())) 1173 return AS; 1174 } 1175 return nullptr; 1176 } 1177 1178 static const Stmt *getLastStmt(const CFGBlock &B) { 1179 if (const Stmt *Term = B.getTerminatorStmt()) 1180 return Term; 1181 for (CFGBlock::const_reverse_iterator ElemIt = B.rbegin(), 1182 ElemEnd = B.rend(); 1183 ElemIt != ElemEnd; ++ElemIt) { 1184 if (Optional<CFGStmt> CS = ElemIt->getAs<CFGStmt>()) 1185 return CS->getStmt(); 1186 } 1187 // Workaround to detect a statement thrown out by CFGBuilder: 1188 // case X: {} case Y: 1189 // case X: ; case Y: 1190 if (const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(B.getLabel())) 1191 if (!isa<SwitchCase>(SW->getSubStmt())) 1192 return SW->getSubStmt(); 1193 1194 return nullptr; 1195 } 1196 1197 bool FoundSwitchStatements; 1198 AttrStmts FallthroughStmts; 1199 Sema &S; 1200 llvm::SmallPtrSet<const CFGBlock *, 16> ReachableBlocks; 1201 }; 1202 } // anonymous namespace 1203 1204 static StringRef getFallthroughAttrSpelling(Preprocessor &PP, 1205 SourceLocation Loc) { 1206 TokenValue FallthroughTokens[] = { 1207 tok::l_square, tok::l_square, 1208 PP.getIdentifierInfo("fallthrough"), 1209 tok::r_square, tok::r_square 1210 }; 1211 1212 TokenValue ClangFallthroughTokens[] = { 1213 tok::l_square, tok::l_square, PP.getIdentifierInfo("clang"), 1214 tok::coloncolon, PP.getIdentifierInfo("fallthrough"), 1215 tok::r_square, tok::r_square 1216 }; 1217 1218 bool PreferClangAttr = !PP.getLangOpts().CPlusPlus17; 1219 1220 StringRef MacroName; 1221 if (PreferClangAttr) 1222 MacroName = PP.getLastMacroWithSpelling(Loc, ClangFallthroughTokens); 1223 if (MacroName.empty()) 1224 MacroName = PP.getLastMacroWithSpelling(Loc, FallthroughTokens); 1225 if (MacroName.empty() && !PreferClangAttr) 1226 MacroName = PP.getLastMacroWithSpelling(Loc, ClangFallthroughTokens); 1227 if (MacroName.empty()) 1228 MacroName = PreferClangAttr ? "[[clang::fallthrough]]" : "[[fallthrough]]"; 1229 return MacroName; 1230 } 1231 1232 static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC, 1233 bool PerFunction) { 1234 // Only perform this analysis when using [[]] attributes. There is no good 1235 // workflow for this warning when not using C++11. There is no good way to 1236 // silence the warning (no attribute is available) unless we are using 1237 // [[]] attributes. One could use pragmas to silence the warning, but as a 1238 // general solution that is gross and not in the spirit of this warning. 1239 // 1240 // NOTE: This an intermediate solution. There are on-going discussions on 1241 // how to properly support this warning outside of C++11 with an annotation. 1242 if (!AC.getASTContext().getLangOpts().DoubleSquareBracketAttributes) 1243 return; 1244 1245 FallthroughMapper FM(S); 1246 FM.TraverseStmt(AC.getBody()); 1247 1248 if (!FM.foundSwitchStatements()) 1249 return; 1250 1251 if (PerFunction && FM.getFallthroughStmts().empty()) 1252 return; 1253 1254 CFG *Cfg = AC.getCFG(); 1255 1256 if (!Cfg) 1257 return; 1258 1259 FM.fillReachableBlocks(Cfg); 1260 1261 for (const CFGBlock *B : llvm::reverse(*Cfg)) { 1262 const Stmt *Label = B->getLabel(); 1263 1264 if (!Label || !isa<SwitchCase>(Label)) 1265 continue; 1266 1267 int AnnotatedCnt; 1268 1269 bool IsTemplateInstantiation = false; 1270 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(AC.getDecl())) 1271 IsTemplateInstantiation = Function->isTemplateInstantiation(); 1272 if (!FM.checkFallThroughIntoBlock(*B, AnnotatedCnt, 1273 IsTemplateInstantiation)) 1274 continue; 1275 1276 S.Diag(Label->getBeginLoc(), 1277 PerFunction ? diag::warn_unannotated_fallthrough_per_function 1278 : diag::warn_unannotated_fallthrough); 1279 1280 if (!AnnotatedCnt) { 1281 SourceLocation L = Label->getBeginLoc(); 1282 if (L.isMacroID()) 1283 continue; 1284 if (S.getLangOpts().CPlusPlus11) { 1285 const Stmt *Term = B->getTerminatorStmt(); 1286 // Skip empty cases. 1287 while (B->empty() && !Term && B->succ_size() == 1) { 1288 B = *B->succ_begin(); 1289 Term = B->getTerminatorStmt(); 1290 } 1291 if (!(B->empty() && Term && isa<BreakStmt>(Term))) { 1292 Preprocessor &PP = S.getPreprocessor(); 1293 StringRef AnnotationSpelling = getFallthroughAttrSpelling(PP, L); 1294 SmallString<64> TextToInsert(AnnotationSpelling); 1295 TextToInsert += "; "; 1296 S.Diag(L, diag::note_insert_fallthrough_fixit) << 1297 AnnotationSpelling << 1298 FixItHint::CreateInsertion(L, TextToInsert); 1299 } 1300 } 1301 S.Diag(L, diag::note_insert_break_fixit) << 1302 FixItHint::CreateInsertion(L, "break; "); 1303 } 1304 } 1305 1306 for (const auto *F : FM.getFallthroughStmts()) 1307 S.Diag(F->getBeginLoc(), diag::err_fallthrough_attr_invalid_placement); 1308 } 1309 1310 static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM, 1311 const Stmt *S) { 1312 assert(S); 1313 1314 do { 1315 switch (S->getStmtClass()) { 1316 case Stmt::ForStmtClass: 1317 case Stmt::WhileStmtClass: 1318 case Stmt::CXXForRangeStmtClass: 1319 case Stmt::ObjCForCollectionStmtClass: 1320 return true; 1321 case Stmt::DoStmtClass: { 1322 Expr::EvalResult Result; 1323 if (!cast<DoStmt>(S)->getCond()->EvaluateAsInt(Result, Ctx)) 1324 return true; 1325 return Result.Val.getInt().getBoolValue(); 1326 } 1327 default: 1328 break; 1329 } 1330 } while ((S = PM.getParent(S))); 1331 1332 return false; 1333 } 1334 1335 static void diagnoseRepeatedUseOfWeak(Sema &S, 1336 const sema::FunctionScopeInfo *CurFn, 1337 const Decl *D, 1338 const ParentMap &PM) { 1339 typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy; 1340 typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap; 1341 typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector; 1342 typedef std::pair<const Stmt *, WeakObjectUseMap::const_iterator> 1343 StmtUsesPair; 1344 1345 ASTContext &Ctx = S.getASTContext(); 1346 1347 const WeakObjectUseMap &WeakMap = CurFn->getWeakObjectUses(); 1348 1349 // Extract all weak objects that are referenced more than once. 1350 SmallVector<StmtUsesPair, 8> UsesByStmt; 1351 for (WeakObjectUseMap::const_iterator I = WeakMap.begin(), E = WeakMap.end(); 1352 I != E; ++I) { 1353 const WeakUseVector &Uses = I->second; 1354 1355 // Find the first read of the weak object. 1356 WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end(); 1357 for ( ; UI != UE; ++UI) { 1358 if (UI->isUnsafe()) 1359 break; 1360 } 1361 1362 // If there were only writes to this object, don't warn. 1363 if (UI == UE) 1364 continue; 1365 1366 // If there was only one read, followed by any number of writes, and the 1367 // read is not within a loop, don't warn. Additionally, don't warn in a 1368 // loop if the base object is a local variable -- local variables are often 1369 // changed in loops. 1370 if (UI == Uses.begin()) { 1371 WeakUseVector::const_iterator UI2 = UI; 1372 for (++UI2; UI2 != UE; ++UI2) 1373 if (UI2->isUnsafe()) 1374 break; 1375 1376 if (UI2 == UE) { 1377 if (!isInLoop(Ctx, PM, UI->getUseExpr())) 1378 continue; 1379 1380 const WeakObjectProfileTy &Profile = I->first; 1381 if (!Profile.isExactProfile()) 1382 continue; 1383 1384 const NamedDecl *Base = Profile.getBase(); 1385 if (!Base) 1386 Base = Profile.getProperty(); 1387 assert(Base && "A profile always has a base or property."); 1388 1389 if (const VarDecl *BaseVar = dyn_cast<VarDecl>(Base)) 1390 if (BaseVar->hasLocalStorage() && !isa<ParmVarDecl>(Base)) 1391 continue; 1392 } 1393 } 1394 1395 UsesByStmt.push_back(StmtUsesPair(UI->getUseExpr(), I)); 1396 } 1397 1398 if (UsesByStmt.empty()) 1399 return; 1400 1401 // Sort by first use so that we emit the warnings in a deterministic order. 1402 SourceManager &SM = S.getSourceManager(); 1403 llvm::sort(UsesByStmt, 1404 [&SM](const StmtUsesPair &LHS, const StmtUsesPair &RHS) { 1405 return SM.isBeforeInTranslationUnit(LHS.first->getBeginLoc(), 1406 RHS.first->getBeginLoc()); 1407 }); 1408 1409 // Classify the current code body for better warning text. 1410 // This enum should stay in sync with the cases in 1411 // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak. 1412 // FIXME: Should we use a common classification enum and the same set of 1413 // possibilities all throughout Sema? 1414 enum { 1415 Function, 1416 Method, 1417 Block, 1418 Lambda 1419 } FunctionKind; 1420 1421 if (isa<sema::BlockScopeInfo>(CurFn)) 1422 FunctionKind = Block; 1423 else if (isa<sema::LambdaScopeInfo>(CurFn)) 1424 FunctionKind = Lambda; 1425 else if (isa<ObjCMethodDecl>(D)) 1426 FunctionKind = Method; 1427 else 1428 FunctionKind = Function; 1429 1430 // Iterate through the sorted problems and emit warnings for each. 1431 for (const auto &P : UsesByStmt) { 1432 const Stmt *FirstRead = P.first; 1433 const WeakObjectProfileTy &Key = P.second->first; 1434 const WeakUseVector &Uses = P.second->second; 1435 1436 // For complicated expressions like 'a.b.c' and 'x.b.c', WeakObjectProfileTy 1437 // may not contain enough information to determine that these are different 1438 // properties. We can only be 100% sure of a repeated use in certain cases, 1439 // and we adjust the diagnostic kind accordingly so that the less certain 1440 // case can be turned off if it is too noisy. 1441 unsigned DiagKind; 1442 if (Key.isExactProfile()) 1443 DiagKind = diag::warn_arc_repeated_use_of_weak; 1444 else 1445 DiagKind = diag::warn_arc_possible_repeated_use_of_weak; 1446 1447 // Classify the weak object being accessed for better warning text. 1448 // This enum should stay in sync with the cases in 1449 // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak. 1450 enum { 1451 Variable, 1452 Property, 1453 ImplicitProperty, 1454 Ivar 1455 } ObjectKind; 1456 1457 const NamedDecl *KeyProp = Key.getProperty(); 1458 if (isa<VarDecl>(KeyProp)) 1459 ObjectKind = Variable; 1460 else if (isa<ObjCPropertyDecl>(KeyProp)) 1461 ObjectKind = Property; 1462 else if (isa<ObjCMethodDecl>(KeyProp)) 1463 ObjectKind = ImplicitProperty; 1464 else if (isa<ObjCIvarDecl>(KeyProp)) 1465 ObjectKind = Ivar; 1466 else 1467 llvm_unreachable("Unexpected weak object kind!"); 1468 1469 // Do not warn about IBOutlet weak property receivers being set to null 1470 // since they are typically only used from the main thread. 1471 if (const ObjCPropertyDecl *Prop = dyn_cast<ObjCPropertyDecl>(KeyProp)) 1472 if (Prop->hasAttr<IBOutletAttr>()) 1473 continue; 1474 1475 // Show the first time the object was read. 1476 S.Diag(FirstRead->getBeginLoc(), DiagKind) 1477 << int(ObjectKind) << KeyProp << int(FunctionKind) 1478 << FirstRead->getSourceRange(); 1479 1480 // Print all the other accesses as notes. 1481 for (const auto &Use : Uses) { 1482 if (Use.getUseExpr() == FirstRead) 1483 continue; 1484 S.Diag(Use.getUseExpr()->getBeginLoc(), 1485 diag::note_arc_weak_also_accessed_here) 1486 << Use.getUseExpr()->getSourceRange(); 1487 } 1488 } 1489 } 1490 1491 namespace { 1492 class UninitValsDiagReporter : public UninitVariablesHandler { 1493 Sema &S; 1494 typedef SmallVector<UninitUse, 2> UsesVec; 1495 typedef llvm::PointerIntPair<UsesVec *, 1, bool> MappedType; 1496 // Prefer using MapVector to DenseMap, so that iteration order will be 1497 // the same as insertion order. This is needed to obtain a deterministic 1498 // order of diagnostics when calling flushDiagnostics(). 1499 typedef llvm::MapVector<const VarDecl *, MappedType> UsesMap; 1500 UsesMap uses; 1501 1502 public: 1503 UninitValsDiagReporter(Sema &S) : S(S) {} 1504 ~UninitValsDiagReporter() override { flushDiagnostics(); } 1505 1506 MappedType &getUses(const VarDecl *vd) { 1507 MappedType &V = uses[vd]; 1508 if (!V.getPointer()) 1509 V.setPointer(new UsesVec()); 1510 return V; 1511 } 1512 1513 void handleUseOfUninitVariable(const VarDecl *vd, 1514 const UninitUse &use) override { 1515 getUses(vd).getPointer()->push_back(use); 1516 } 1517 1518 void handleSelfInit(const VarDecl *vd) override { 1519 getUses(vd).setInt(true); 1520 } 1521 1522 void flushDiagnostics() { 1523 for (const auto &P : uses) { 1524 const VarDecl *vd = P.first; 1525 const MappedType &V = P.second; 1526 1527 UsesVec *vec = V.getPointer(); 1528 bool hasSelfInit = V.getInt(); 1529 1530 // Specially handle the case where we have uses of an uninitialized 1531 // variable, but the root cause is an idiomatic self-init. We want 1532 // to report the diagnostic at the self-init since that is the root cause. 1533 if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec)) 1534 DiagnoseUninitializedUse(S, vd, 1535 UninitUse(vd->getInit()->IgnoreParenCasts(), 1536 /* isAlwaysUninit */ true), 1537 /* alwaysReportSelfInit */ true); 1538 else { 1539 // Sort the uses by their SourceLocations. While not strictly 1540 // guaranteed to produce them in line/column order, this will provide 1541 // a stable ordering. 1542 llvm::sort(vec->begin(), vec->end(), 1543 [](const UninitUse &a, const UninitUse &b) { 1544 // Prefer a more confident report over a less confident one. 1545 if (a.getKind() != b.getKind()) 1546 return a.getKind() > b.getKind(); 1547 return a.getUser()->getBeginLoc() < b.getUser()->getBeginLoc(); 1548 }); 1549 1550 for (const auto &U : *vec) { 1551 // If we have self-init, downgrade all uses to 'may be uninitialized'. 1552 UninitUse Use = hasSelfInit ? UninitUse(U.getUser(), false) : U; 1553 1554 if (DiagnoseUninitializedUse(S, vd, Use)) 1555 // Skip further diagnostics for this variable. We try to warn only 1556 // on the first point at which a variable is used uninitialized. 1557 break; 1558 } 1559 } 1560 1561 // Release the uses vector. 1562 delete vec; 1563 } 1564 1565 uses.clear(); 1566 } 1567 1568 private: 1569 static bool hasAlwaysUninitializedUse(const UsesVec* vec) { 1570 return std::any_of(vec->begin(), vec->end(), [](const UninitUse &U) { 1571 return U.getKind() == UninitUse::Always || 1572 U.getKind() == UninitUse::AfterCall || 1573 U.getKind() == UninitUse::AfterDecl; 1574 }); 1575 } 1576 }; 1577 } // anonymous namespace 1578 1579 namespace clang { 1580 namespace { 1581 typedef SmallVector<PartialDiagnosticAt, 1> OptionalNotes; 1582 typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag; 1583 typedef std::list<DelayedDiag> DiagList; 1584 1585 struct SortDiagBySourceLocation { 1586 SourceManager &SM; 1587 SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {} 1588 1589 bool operator()(const DelayedDiag &left, const DelayedDiag &right) { 1590 // Although this call will be slow, this is only called when outputting 1591 // multiple warnings. 1592 return SM.isBeforeInTranslationUnit(left.first.first, right.first.first); 1593 } 1594 }; 1595 } // anonymous namespace 1596 } // namespace clang 1597 1598 //===----------------------------------------------------------------------===// 1599 // -Wthread-safety 1600 //===----------------------------------------------------------------------===// 1601 namespace clang { 1602 namespace threadSafety { 1603 namespace { 1604 class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler { 1605 Sema &S; 1606 DiagList Warnings; 1607 SourceLocation FunLocation, FunEndLocation; 1608 1609 const FunctionDecl *CurrentFunction; 1610 bool Verbose; 1611 1612 OptionalNotes getNotes() const { 1613 if (Verbose && CurrentFunction) { 1614 PartialDiagnosticAt FNote(CurrentFunction->getBody()->getBeginLoc(), 1615 S.PDiag(diag::note_thread_warning_in_fun) 1616 << CurrentFunction); 1617 return OptionalNotes(1, FNote); 1618 } 1619 return OptionalNotes(); 1620 } 1621 1622 OptionalNotes getNotes(const PartialDiagnosticAt &Note) const { 1623 OptionalNotes ONS(1, Note); 1624 if (Verbose && CurrentFunction) { 1625 PartialDiagnosticAt FNote(CurrentFunction->getBody()->getBeginLoc(), 1626 S.PDiag(diag::note_thread_warning_in_fun) 1627 << CurrentFunction); 1628 ONS.push_back(std::move(FNote)); 1629 } 1630 return ONS; 1631 } 1632 1633 OptionalNotes getNotes(const PartialDiagnosticAt &Note1, 1634 const PartialDiagnosticAt &Note2) const { 1635 OptionalNotes ONS; 1636 ONS.push_back(Note1); 1637 ONS.push_back(Note2); 1638 if (Verbose && CurrentFunction) { 1639 PartialDiagnosticAt FNote(CurrentFunction->getBody()->getBeginLoc(), 1640 S.PDiag(diag::note_thread_warning_in_fun) 1641 << CurrentFunction); 1642 ONS.push_back(std::move(FNote)); 1643 } 1644 return ONS; 1645 } 1646 1647 OptionalNotes makeLockedHereNote(SourceLocation LocLocked, StringRef Kind) { 1648 return LocLocked.isValid() 1649 ? getNotes(PartialDiagnosticAt( 1650 LocLocked, S.PDiag(diag::note_locked_here) << Kind)) 1651 : getNotes(); 1652 } 1653 1654 public: 1655 ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL) 1656 : S(S), FunLocation(FL), FunEndLocation(FEL), 1657 CurrentFunction(nullptr), Verbose(false) {} 1658 1659 void setVerbose(bool b) { Verbose = b; } 1660 1661 /// Emit all buffered diagnostics in order of sourcelocation. 1662 /// We need to output diagnostics produced while iterating through 1663 /// the lockset in deterministic order, so this function orders diagnostics 1664 /// and outputs them. 1665 void emitDiagnostics() { 1666 Warnings.sort(SortDiagBySourceLocation(S.getSourceManager())); 1667 for (const auto &Diag : Warnings) { 1668 S.Diag(Diag.first.first, Diag.first.second); 1669 for (const auto &Note : Diag.second) 1670 S.Diag(Note.first, Note.second); 1671 } 1672 } 1673 1674 void handleInvalidLockExp(StringRef Kind, SourceLocation Loc) override { 1675 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_cannot_resolve_lock) 1676 << Loc); 1677 Warnings.emplace_back(std::move(Warning), getNotes()); 1678 } 1679 1680 void handleUnmatchedUnlock(StringRef Kind, Name LockName, 1681 SourceLocation Loc) override { 1682 if (Loc.isInvalid()) 1683 Loc = FunLocation; 1684 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_unlock_but_no_lock) 1685 << Kind << LockName); 1686 Warnings.emplace_back(std::move(Warning), getNotes()); 1687 } 1688 1689 void handleIncorrectUnlockKind(StringRef Kind, Name LockName, 1690 LockKind Expected, LockKind Received, 1691 SourceLocation LocLocked, 1692 SourceLocation LocUnlock) override { 1693 if (LocUnlock.isInvalid()) 1694 LocUnlock = FunLocation; 1695 PartialDiagnosticAt Warning( 1696 LocUnlock, S.PDiag(diag::warn_unlock_kind_mismatch) 1697 << Kind << LockName << Received << Expected); 1698 Warnings.emplace_back(std::move(Warning), 1699 makeLockedHereNote(LocLocked, Kind)); 1700 } 1701 1702 void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation LocLocked, 1703 SourceLocation LocDoubleLock) override { 1704 if (LocDoubleLock.isInvalid()) 1705 LocDoubleLock = FunLocation; 1706 PartialDiagnosticAt Warning(LocDoubleLock, S.PDiag(diag::warn_double_lock) 1707 << Kind << LockName); 1708 Warnings.emplace_back(std::move(Warning), 1709 makeLockedHereNote(LocLocked, Kind)); 1710 } 1711 1712 void handleMutexHeldEndOfScope(StringRef Kind, Name LockName, 1713 SourceLocation LocLocked, 1714 SourceLocation LocEndOfScope, 1715 LockErrorKind LEK) override { 1716 unsigned DiagID = 0; 1717 switch (LEK) { 1718 case LEK_LockedSomePredecessors: 1719 DiagID = diag::warn_lock_some_predecessors; 1720 break; 1721 case LEK_LockedSomeLoopIterations: 1722 DiagID = diag::warn_expecting_lock_held_on_loop; 1723 break; 1724 case LEK_LockedAtEndOfFunction: 1725 DiagID = diag::warn_no_unlock; 1726 break; 1727 case LEK_NotLockedAtEndOfFunction: 1728 DiagID = diag::warn_expecting_locked; 1729 break; 1730 } 1731 if (LocEndOfScope.isInvalid()) 1732 LocEndOfScope = FunEndLocation; 1733 1734 PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << Kind 1735 << LockName); 1736 Warnings.emplace_back(std::move(Warning), 1737 makeLockedHereNote(LocLocked, Kind)); 1738 } 1739 1740 void handleExclusiveAndShared(StringRef Kind, Name LockName, 1741 SourceLocation Loc1, 1742 SourceLocation Loc2) override { 1743 PartialDiagnosticAt Warning(Loc1, 1744 S.PDiag(diag::warn_lock_exclusive_and_shared) 1745 << Kind << LockName); 1746 PartialDiagnosticAt Note(Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) 1747 << Kind << LockName); 1748 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1749 } 1750 1751 void handleNoMutexHeld(StringRef Kind, const NamedDecl *D, 1752 ProtectedOperationKind POK, AccessKind AK, 1753 SourceLocation Loc) override { 1754 assert((POK == POK_VarAccess || POK == POK_VarDereference) && 1755 "Only works for variables"); 1756 unsigned DiagID = POK == POK_VarAccess? 1757 diag::warn_variable_requires_any_lock: 1758 diag::warn_var_deref_requires_any_lock; 1759 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) 1760 << D << getLockKindFromAccessKind(AK)); 1761 Warnings.emplace_back(std::move(Warning), getNotes()); 1762 } 1763 1764 void handleMutexNotHeld(StringRef Kind, const NamedDecl *D, 1765 ProtectedOperationKind POK, Name LockName, 1766 LockKind LK, SourceLocation Loc, 1767 Name *PossibleMatch) override { 1768 unsigned DiagID = 0; 1769 if (PossibleMatch) { 1770 switch (POK) { 1771 case POK_VarAccess: 1772 DiagID = diag::warn_variable_requires_lock_precise; 1773 break; 1774 case POK_VarDereference: 1775 DiagID = diag::warn_var_deref_requires_lock_precise; 1776 break; 1777 case POK_FunctionCall: 1778 DiagID = diag::warn_fun_requires_lock_precise; 1779 break; 1780 case POK_PassByRef: 1781 DiagID = diag::warn_guarded_pass_by_reference; 1782 break; 1783 case POK_PtPassByRef: 1784 DiagID = diag::warn_pt_guarded_pass_by_reference; 1785 break; 1786 } 1787 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind 1788 << D 1789 << LockName << LK); 1790 PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match) 1791 << *PossibleMatch); 1792 if (Verbose && POK == POK_VarAccess) { 1793 PartialDiagnosticAt VNote(D->getLocation(), 1794 S.PDiag(diag::note_guarded_by_declared_here) 1795 << D->getNameAsString()); 1796 Warnings.emplace_back(std::move(Warning), getNotes(Note, VNote)); 1797 } else 1798 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1799 } else { 1800 switch (POK) { 1801 case POK_VarAccess: 1802 DiagID = diag::warn_variable_requires_lock; 1803 break; 1804 case POK_VarDereference: 1805 DiagID = diag::warn_var_deref_requires_lock; 1806 break; 1807 case POK_FunctionCall: 1808 DiagID = diag::warn_fun_requires_lock; 1809 break; 1810 case POK_PassByRef: 1811 DiagID = diag::warn_guarded_pass_by_reference; 1812 break; 1813 case POK_PtPassByRef: 1814 DiagID = diag::warn_pt_guarded_pass_by_reference; 1815 break; 1816 } 1817 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind 1818 << D 1819 << LockName << LK); 1820 if (Verbose && POK == POK_VarAccess) { 1821 PartialDiagnosticAt Note(D->getLocation(), 1822 S.PDiag(diag::note_guarded_by_declared_here)); 1823 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1824 } else 1825 Warnings.emplace_back(std::move(Warning), getNotes()); 1826 } 1827 } 1828 1829 void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg, 1830 SourceLocation Loc) override { 1831 PartialDiagnosticAt Warning(Loc, 1832 S.PDiag(diag::warn_acquire_requires_negative_cap) 1833 << Kind << LockName << Neg); 1834 Warnings.emplace_back(std::move(Warning), getNotes()); 1835 } 1836 1837 void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName, 1838 SourceLocation Loc) override { 1839 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_fun_excludes_mutex) 1840 << Kind << FunName << LockName); 1841 Warnings.emplace_back(std::move(Warning), getNotes()); 1842 } 1843 1844 void handleLockAcquiredBefore(StringRef Kind, Name L1Name, Name L2Name, 1845 SourceLocation Loc) override { 1846 PartialDiagnosticAt Warning(Loc, 1847 S.PDiag(diag::warn_acquired_before) << Kind << L1Name << L2Name); 1848 Warnings.emplace_back(std::move(Warning), getNotes()); 1849 } 1850 1851 void handleBeforeAfterCycle(Name L1Name, SourceLocation Loc) override { 1852 PartialDiagnosticAt Warning(Loc, 1853 S.PDiag(diag::warn_acquired_before_after_cycle) << L1Name); 1854 Warnings.emplace_back(std::move(Warning), getNotes()); 1855 } 1856 1857 void enterFunction(const FunctionDecl* FD) override { 1858 CurrentFunction = FD; 1859 } 1860 1861 void leaveFunction(const FunctionDecl* FD) override { 1862 CurrentFunction = nullptr; 1863 } 1864 }; 1865 } // anonymous namespace 1866 } // namespace threadSafety 1867 } // namespace clang 1868 1869 //===----------------------------------------------------------------------===// 1870 // -Wconsumed 1871 //===----------------------------------------------------------------------===// 1872 1873 namespace clang { 1874 namespace consumed { 1875 namespace { 1876 class ConsumedWarningsHandler : public ConsumedWarningsHandlerBase { 1877 1878 Sema &S; 1879 DiagList Warnings; 1880 1881 public: 1882 1883 ConsumedWarningsHandler(Sema &S) : S(S) {} 1884 1885 void emitDiagnostics() override { 1886 Warnings.sort(SortDiagBySourceLocation(S.getSourceManager())); 1887 for (const auto &Diag : Warnings) { 1888 S.Diag(Diag.first.first, Diag.first.second); 1889 for (const auto &Note : Diag.second) 1890 S.Diag(Note.first, Note.second); 1891 } 1892 } 1893 1894 void warnLoopStateMismatch(SourceLocation Loc, 1895 StringRef VariableName) override { 1896 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_loop_state_mismatch) << 1897 VariableName); 1898 1899 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1900 } 1901 1902 void warnParamReturnTypestateMismatch(SourceLocation Loc, 1903 StringRef VariableName, 1904 StringRef ExpectedState, 1905 StringRef ObservedState) override { 1906 1907 PartialDiagnosticAt Warning(Loc, S.PDiag( 1908 diag::warn_param_return_typestate_mismatch) << VariableName << 1909 ExpectedState << ObservedState); 1910 1911 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1912 } 1913 1914 void warnParamTypestateMismatch(SourceLocation Loc, StringRef ExpectedState, 1915 StringRef ObservedState) override { 1916 1917 PartialDiagnosticAt Warning(Loc, S.PDiag( 1918 diag::warn_param_typestate_mismatch) << ExpectedState << ObservedState); 1919 1920 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1921 } 1922 1923 void warnReturnTypestateForUnconsumableType(SourceLocation Loc, 1924 StringRef TypeName) override { 1925 PartialDiagnosticAt Warning(Loc, S.PDiag( 1926 diag::warn_return_typestate_for_unconsumable_type) << TypeName); 1927 1928 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1929 } 1930 1931 void warnReturnTypestateMismatch(SourceLocation Loc, StringRef ExpectedState, 1932 StringRef ObservedState) override { 1933 1934 PartialDiagnosticAt Warning(Loc, S.PDiag( 1935 diag::warn_return_typestate_mismatch) << ExpectedState << ObservedState); 1936 1937 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1938 } 1939 1940 void warnUseOfTempInInvalidState(StringRef MethodName, StringRef State, 1941 SourceLocation Loc) override { 1942 1943 PartialDiagnosticAt Warning(Loc, S.PDiag( 1944 diag::warn_use_of_temp_in_invalid_state) << MethodName << State); 1945 1946 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1947 } 1948 1949 void warnUseInInvalidState(StringRef MethodName, StringRef VariableName, 1950 StringRef State, SourceLocation Loc) override { 1951 1952 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_use_in_invalid_state) << 1953 MethodName << VariableName << State); 1954 1955 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1956 } 1957 }; 1958 } // anonymous namespace 1959 } // namespace consumed 1960 } // namespace clang 1961 1962 //===----------------------------------------------------------------------===// 1963 // AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based 1964 // warnings on a function, method, or block. 1965 //===----------------------------------------------------------------------===// 1966 1967 clang::sema::AnalysisBasedWarnings::Policy::Policy() { 1968 enableCheckFallThrough = 1; 1969 enableCheckUnreachable = 0; 1970 enableThreadSafetyAnalysis = 0; 1971 enableConsumedAnalysis = 0; 1972 } 1973 1974 static unsigned isEnabled(DiagnosticsEngine &D, unsigned diag) { 1975 return (unsigned)!D.isIgnored(diag, SourceLocation()); 1976 } 1977 1978 clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s) 1979 : S(s), 1980 NumFunctionsAnalyzed(0), 1981 NumFunctionsWithBadCFGs(0), 1982 NumCFGBlocks(0), 1983 MaxCFGBlocksPerFunction(0), 1984 NumUninitAnalysisFunctions(0), 1985 NumUninitAnalysisVariables(0), 1986 MaxUninitAnalysisVariablesPerFunction(0), 1987 NumUninitAnalysisBlockVisits(0), 1988 MaxUninitAnalysisBlockVisitsPerFunction(0) { 1989 1990 using namespace diag; 1991 DiagnosticsEngine &D = S.getDiagnostics(); 1992 1993 DefaultPolicy.enableCheckUnreachable = 1994 isEnabled(D, warn_unreachable) || 1995 isEnabled(D, warn_unreachable_break) || 1996 isEnabled(D, warn_unreachable_return) || 1997 isEnabled(D, warn_unreachable_loop_increment); 1998 1999 DefaultPolicy.enableThreadSafetyAnalysis = 2000 isEnabled(D, warn_double_lock); 2001 2002 DefaultPolicy.enableConsumedAnalysis = 2003 isEnabled(D, warn_use_in_invalid_state); 2004 } 2005 2006 static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) { 2007 for (const auto &D : fscope->PossiblyUnreachableDiags) 2008 S.Diag(D.Loc, D.PD); 2009 } 2010 2011 void clang::sema:: 2012 AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P, 2013 sema::FunctionScopeInfo *fscope, 2014 const Decl *D, QualType BlockType) { 2015 2016 // We avoid doing analysis-based warnings when there are errors for 2017 // two reasons: 2018 // (1) The CFGs often can't be constructed (if the body is invalid), so 2019 // don't bother trying. 2020 // (2) The code already has problems; running the analysis just takes more 2021 // time. 2022 DiagnosticsEngine &Diags = S.getDiagnostics(); 2023 2024 // Do not do any analysis if we are going to just ignore them. 2025 if (Diags.getIgnoreAllWarnings() || 2026 (Diags.getSuppressSystemWarnings() && 2027 S.SourceMgr.isInSystemHeader(D->getLocation()))) 2028 return; 2029 2030 // For code in dependent contexts, we'll do this at instantiation time. 2031 if (cast<DeclContext>(D)->isDependentContext()) 2032 return; 2033 2034 if (Diags.hasUncompilableErrorOccurred()) { 2035 // Flush out any possibly unreachable diagnostics. 2036 flushDiagnostics(S, fscope); 2037 return; 2038 } 2039 2040 const Stmt *Body = D->getBody(); 2041 assert(Body); 2042 2043 // Construct the analysis context with the specified CFG build options. 2044 AnalysisDeclContext AC(/* AnalysisDeclContextManager */ nullptr, D); 2045 2046 // Don't generate EH edges for CallExprs as we'd like to avoid the n^2 2047 // explosion for destructors that can result and the compile time hit. 2048 AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true; 2049 AC.getCFGBuildOptions().AddEHEdges = false; 2050 AC.getCFGBuildOptions().AddInitializers = true; 2051 AC.getCFGBuildOptions().AddImplicitDtors = true; 2052 AC.getCFGBuildOptions().AddTemporaryDtors = true; 2053 AC.getCFGBuildOptions().AddCXXNewAllocator = false; 2054 AC.getCFGBuildOptions().AddCXXDefaultInitExprInCtors = true; 2055 2056 // Force that certain expressions appear as CFGElements in the CFG. This 2057 // is used to speed up various analyses. 2058 // FIXME: This isn't the right factoring. This is here for initial 2059 // prototyping, but we need a way for analyses to say what expressions they 2060 // expect to always be CFGElements and then fill in the BuildOptions 2061 // appropriately. This is essentially a layering violation. 2062 if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis || 2063 P.enableConsumedAnalysis) { 2064 // Unreachable code analysis and thread safety require a linearized CFG. 2065 AC.getCFGBuildOptions().setAllAlwaysAdd(); 2066 } 2067 else { 2068 AC.getCFGBuildOptions() 2069 .setAlwaysAdd(Stmt::BinaryOperatorClass) 2070 .setAlwaysAdd(Stmt::CompoundAssignOperatorClass) 2071 .setAlwaysAdd(Stmt::BlockExprClass) 2072 .setAlwaysAdd(Stmt::CStyleCastExprClass) 2073 .setAlwaysAdd(Stmt::DeclRefExprClass) 2074 .setAlwaysAdd(Stmt::ImplicitCastExprClass) 2075 .setAlwaysAdd(Stmt::UnaryOperatorClass) 2076 .setAlwaysAdd(Stmt::AttributedStmtClass); 2077 } 2078 2079 // Install the logical handler for -Wtautological-overlap-compare 2080 llvm::Optional<LogicalErrorHandler> LEH; 2081 if (!Diags.isIgnored(diag::warn_tautological_overlap_comparison, 2082 D->getBeginLoc())) { 2083 LEH.emplace(S); 2084 AC.getCFGBuildOptions().Observer = &*LEH; 2085 } 2086 2087 // Emit delayed diagnostics. 2088 if (!fscope->PossiblyUnreachableDiags.empty()) { 2089 bool analyzed = false; 2090 2091 // Register the expressions with the CFGBuilder. 2092 for (const auto &D : fscope->PossiblyUnreachableDiags) { 2093 for (const Stmt *S : D.Stmts) 2094 AC.registerForcedBlockExpression(S); 2095 } 2096 2097 if (AC.getCFG()) { 2098 analyzed = true; 2099 for (const auto &D : fscope->PossiblyUnreachableDiags) { 2100 bool AllReachable = true; 2101 for (const Stmt *S : D.Stmts) { 2102 const CFGBlock *block = AC.getBlockForRegisteredExpression(S); 2103 CFGReverseBlockReachabilityAnalysis *cra = 2104 AC.getCFGReachablityAnalysis(); 2105 // FIXME: We should be able to assert that block is non-null, but 2106 // the CFG analysis can skip potentially-evaluated expressions in 2107 // edge cases; see test/Sema/vla-2.c. 2108 if (block && cra) { 2109 // Can this block be reached from the entrance? 2110 if (!cra->isReachable(&AC.getCFG()->getEntry(), block)) { 2111 AllReachable = false; 2112 break; 2113 } 2114 } 2115 // If we cannot map to a basic block, assume the statement is 2116 // reachable. 2117 } 2118 2119 if (AllReachable) 2120 S.Diag(D.Loc, D.PD); 2121 } 2122 } 2123 2124 if (!analyzed) 2125 flushDiagnostics(S, fscope); 2126 } 2127 2128 // Warning: check missing 'return' 2129 if (P.enableCheckFallThrough) { 2130 const CheckFallThroughDiagnostics &CD = 2131 (isa<BlockDecl>(D) 2132 ? CheckFallThroughDiagnostics::MakeForBlock() 2133 : (isa<CXXMethodDecl>(D) && 2134 cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call && 2135 cast<CXXMethodDecl>(D)->getParent()->isLambda()) 2136 ? CheckFallThroughDiagnostics::MakeForLambda() 2137 : (fscope->isCoroutine() 2138 ? CheckFallThroughDiagnostics::MakeForCoroutine(D) 2139 : CheckFallThroughDiagnostics::MakeForFunction(D))); 2140 CheckFallThroughForBody(S, D, Body, BlockType, CD, AC, fscope); 2141 } 2142 2143 // Warning: check for unreachable code 2144 if (P.enableCheckUnreachable) { 2145 // Only check for unreachable code on non-template instantiations. 2146 // Different template instantiations can effectively change the control-flow 2147 // and it is very difficult to prove that a snippet of code in a template 2148 // is unreachable for all instantiations. 2149 bool isTemplateInstantiation = false; 2150 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) 2151 isTemplateInstantiation = Function->isTemplateInstantiation(); 2152 if (!isTemplateInstantiation) 2153 CheckUnreachable(S, AC); 2154 } 2155 2156 // Check for thread safety violations 2157 if (P.enableThreadSafetyAnalysis) { 2158 SourceLocation FL = AC.getDecl()->getLocation(); 2159 SourceLocation FEL = AC.getDecl()->getEndLoc(); 2160 threadSafety::ThreadSafetyReporter Reporter(S, FL, FEL); 2161 if (!Diags.isIgnored(diag::warn_thread_safety_beta, D->getBeginLoc())) 2162 Reporter.setIssueBetaWarnings(true); 2163 if (!Diags.isIgnored(diag::warn_thread_safety_verbose, D->getBeginLoc())) 2164 Reporter.setVerbose(true); 2165 2166 threadSafety::runThreadSafetyAnalysis(AC, Reporter, 2167 &S.ThreadSafetyDeclCache); 2168 Reporter.emitDiagnostics(); 2169 } 2170 2171 // Check for violations of consumed properties. 2172 if (P.enableConsumedAnalysis) { 2173 consumed::ConsumedWarningsHandler WarningHandler(S); 2174 consumed::ConsumedAnalyzer Analyzer(WarningHandler); 2175 Analyzer.run(AC); 2176 } 2177 2178 if (!Diags.isIgnored(diag::warn_uninit_var, D->getBeginLoc()) || 2179 !Diags.isIgnored(diag::warn_sometimes_uninit_var, D->getBeginLoc()) || 2180 !Diags.isIgnored(diag::warn_maybe_uninit_var, D->getBeginLoc())) { 2181 if (CFG *cfg = AC.getCFG()) { 2182 UninitValsDiagReporter reporter(S); 2183 UninitVariablesAnalysisStats stats; 2184 std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats)); 2185 runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC, 2186 reporter, stats); 2187 2188 if (S.CollectStats && stats.NumVariablesAnalyzed > 0) { 2189 ++NumUninitAnalysisFunctions; 2190 NumUninitAnalysisVariables += stats.NumVariablesAnalyzed; 2191 NumUninitAnalysisBlockVisits += stats.NumBlockVisits; 2192 MaxUninitAnalysisVariablesPerFunction = 2193 std::max(MaxUninitAnalysisVariablesPerFunction, 2194 stats.NumVariablesAnalyzed); 2195 MaxUninitAnalysisBlockVisitsPerFunction = 2196 std::max(MaxUninitAnalysisBlockVisitsPerFunction, 2197 stats.NumBlockVisits); 2198 } 2199 } 2200 } 2201 2202 bool FallThroughDiagFull = 2203 !Diags.isIgnored(diag::warn_unannotated_fallthrough, D->getBeginLoc()); 2204 bool FallThroughDiagPerFunction = !Diags.isIgnored( 2205 diag::warn_unannotated_fallthrough_per_function, D->getBeginLoc()); 2206 if (FallThroughDiagFull || FallThroughDiagPerFunction || 2207 fscope->HasFallthroughStmt) { 2208 DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull); 2209 } 2210 2211 if (S.getLangOpts().ObjCWeak && 2212 !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, D->getBeginLoc())) 2213 diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap()); 2214 2215 2216 // Check for infinite self-recursion in functions 2217 if (!Diags.isIgnored(diag::warn_infinite_recursive_function, 2218 D->getBeginLoc())) { 2219 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 2220 checkRecursiveFunction(S, FD, Body, AC); 2221 } 2222 } 2223 2224 // Check for throw out of non-throwing function. 2225 if (!Diags.isIgnored(diag::warn_throw_in_noexcept_func, D->getBeginLoc())) 2226 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) 2227 if (S.getLangOpts().CPlusPlus && isNoexcept(FD)) 2228 checkThrowInNonThrowingFunc(S, FD, AC); 2229 2230 // If none of the previous checks caused a CFG build, trigger one here 2231 // for -Wtautological-overlap-compare 2232 if (!Diags.isIgnored(diag::warn_tautological_overlap_comparison, 2233 D->getBeginLoc())) { 2234 AC.getCFG(); 2235 } 2236 2237 // Collect statistics about the CFG if it was built. 2238 if (S.CollectStats && AC.isCFGBuilt()) { 2239 ++NumFunctionsAnalyzed; 2240 if (CFG *cfg = AC.getCFG()) { 2241 // If we successfully built a CFG for this context, record some more 2242 // detail information about it. 2243 NumCFGBlocks += cfg->getNumBlockIDs(); 2244 MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction, 2245 cfg->getNumBlockIDs()); 2246 } else { 2247 ++NumFunctionsWithBadCFGs; 2248 } 2249 } 2250 } 2251 2252 void clang::sema::AnalysisBasedWarnings::PrintStats() const { 2253 llvm::errs() << "\n*** Analysis Based Warnings Stats:\n"; 2254 2255 unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs; 2256 unsigned AvgCFGBlocksPerFunction = 2257 !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt; 2258 llvm::errs() << NumFunctionsAnalyzed << " functions analyzed (" 2259 << NumFunctionsWithBadCFGs << " w/o CFGs).\n" 2260 << " " << NumCFGBlocks << " CFG blocks built.\n" 2261 << " " << AvgCFGBlocksPerFunction 2262 << " average CFG blocks per function.\n" 2263 << " " << MaxCFGBlocksPerFunction 2264 << " max CFG blocks per function.\n"; 2265 2266 unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0 2267 : NumUninitAnalysisVariables/NumUninitAnalysisFunctions; 2268 unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0 2269 : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions; 2270 llvm::errs() << NumUninitAnalysisFunctions 2271 << " functions analyzed for uninitialiazed variables\n" 2272 << " " << NumUninitAnalysisVariables << " variables analyzed.\n" 2273 << " " << AvgUninitVariablesPerFunction 2274 << " average variables per function.\n" 2275 << " " << MaxUninitAnalysisVariablesPerFunction 2276 << " max variables per function.\n" 2277 << " " << NumUninitAnalysisBlockVisits << " block visits.\n" 2278 << " " << AvgUninitBlockVisitsPerFunction 2279 << " average block visits per function.\n" 2280 << " " << MaxUninitAnalysisBlockVisitsPerFunction 2281 << " max block visits per function.\n"; 2282 } 2283