1 //=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines analysis_warnings::[Policy,Executor]. 11 // Together they are used by Sema to issue warnings based on inexpensive 12 // static analysis algorithms in libAnalysis. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "clang/Sema/AnalysisBasedWarnings.h" 17 #include "clang/AST/DeclCXX.h" 18 #include "clang/AST/DeclObjC.h" 19 #include "clang/AST/EvaluatedExprVisitor.h" 20 #include "clang/AST/ExprCXX.h" 21 #include "clang/AST/ExprObjC.h" 22 #include "clang/AST/ParentMap.h" 23 #include "clang/AST/RecursiveASTVisitor.h" 24 #include "clang/AST/StmtCXX.h" 25 #include "clang/AST/StmtObjC.h" 26 #include "clang/AST/StmtVisitor.h" 27 #include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h" 28 #include "clang/Analysis/Analyses/Consumed.h" 29 #include "clang/Analysis/Analyses/ReachableCode.h" 30 #include "clang/Analysis/Analyses/ThreadSafety.h" 31 #include "clang/Analysis/Analyses/UninitializedValues.h" 32 #include "clang/Analysis/AnalysisContext.h" 33 #include "clang/Analysis/CFG.h" 34 #include "clang/Analysis/CFGStmtMap.h" 35 #include "clang/Basic/SourceLocation.h" 36 #include "clang/Basic/SourceManager.h" 37 #include "clang/Lex/Preprocessor.h" 38 #include "clang/Sema/ScopeInfo.h" 39 #include "clang/Sema/SemaInternal.h" 40 #include "llvm/ADT/ArrayRef.h" 41 #include "llvm/ADT/BitVector.h" 42 #include "llvm/ADT/FoldingSet.h" 43 #include "llvm/ADT/ImmutableMap.h" 44 #include "llvm/ADT/MapVector.h" 45 #include "llvm/ADT/PostOrderIterator.h" 46 #include "llvm/ADT/SmallString.h" 47 #include "llvm/ADT/SmallVector.h" 48 #include "llvm/ADT/StringRef.h" 49 #include "llvm/Support/Casting.h" 50 #include <algorithm> 51 #include <deque> 52 #include <iterator> 53 #include <vector> 54 55 using namespace clang; 56 57 //===----------------------------------------------------------------------===// 58 // Unreachable code analysis. 59 //===----------------------------------------------------------------------===// 60 61 namespace { 62 class UnreachableCodeHandler : public reachable_code::Callback { 63 Sema &S; 64 public: 65 UnreachableCodeHandler(Sema &s) : S(s) {} 66 67 void HandleUnreachable(reachable_code::UnreachableKind UK, 68 SourceLocation L, 69 SourceRange SilenceableCondVal, 70 SourceRange R1, 71 SourceRange R2) override { 72 unsigned diag = diag::warn_unreachable; 73 switch (UK) { 74 case reachable_code::UK_Break: 75 diag = diag::warn_unreachable_break; 76 break; 77 case reachable_code::UK_Return: 78 diag = diag::warn_unreachable_return; 79 break; 80 case reachable_code::UK_Loop_Increment: 81 diag = diag::warn_unreachable_loop_increment; 82 break; 83 case reachable_code::UK_Other: 84 break; 85 } 86 87 S.Diag(L, diag) << R1 << R2; 88 89 SourceLocation Open = SilenceableCondVal.getBegin(); 90 if (Open.isValid()) { 91 SourceLocation Close = SilenceableCondVal.getEnd(); 92 Close = S.getLocForEndOfToken(Close); 93 if (Close.isValid()) { 94 S.Diag(Open, diag::note_unreachable_silence) 95 << FixItHint::CreateInsertion(Open, "/* DISABLES CODE */ (") 96 << FixItHint::CreateInsertion(Close, ")"); 97 } 98 } 99 } 100 }; 101 } // anonymous namespace 102 103 /// CheckUnreachable - Check for unreachable code. 104 static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) { 105 // As a heuristic prune all diagnostics not in the main file. Currently 106 // the majority of warnings in headers are false positives. These 107 // are largely caused by configuration state, e.g. preprocessor 108 // defined code, etc. 109 // 110 // Note that this is also a performance optimization. Analyzing 111 // headers many times can be expensive. 112 if (!S.getSourceManager().isInMainFile(AC.getDecl()->getLocStart())) 113 return; 114 115 UnreachableCodeHandler UC(S); 116 reachable_code::FindUnreachableCode(AC, S.getPreprocessor(), UC); 117 } 118 119 namespace { 120 /// \brief Warn on logical operator errors in CFGBuilder 121 class LogicalErrorHandler : public CFGCallback { 122 Sema &S; 123 124 public: 125 LogicalErrorHandler(Sema &S) : CFGCallback(), S(S) {} 126 127 static bool HasMacroID(const Expr *E) { 128 if (E->getExprLoc().isMacroID()) 129 return true; 130 131 // Recurse to children. 132 for (const Stmt *SubStmt : E->children()) 133 if (const Expr *SubExpr = dyn_cast_or_null<Expr>(SubStmt)) 134 if (HasMacroID(SubExpr)) 135 return true; 136 137 return false; 138 } 139 140 void compareAlwaysTrue(const BinaryOperator *B, bool isAlwaysTrue) override { 141 if (HasMacroID(B)) 142 return; 143 144 SourceRange DiagRange = B->getSourceRange(); 145 S.Diag(B->getExprLoc(), diag::warn_tautological_overlap_comparison) 146 << DiagRange << isAlwaysTrue; 147 } 148 149 void compareBitwiseEquality(const BinaryOperator *B, 150 bool isAlwaysTrue) override { 151 if (HasMacroID(B)) 152 return; 153 154 SourceRange DiagRange = B->getSourceRange(); 155 S.Diag(B->getExprLoc(), diag::warn_comparison_bitwise_always) 156 << DiagRange << isAlwaysTrue; 157 } 158 }; 159 } // anonymous namespace 160 161 //===----------------------------------------------------------------------===// 162 // Check for infinite self-recursion in functions 163 //===----------------------------------------------------------------------===// 164 165 // Returns true if the function is called anywhere within the CFGBlock. 166 // For member functions, the additional condition of being call from the 167 // this pointer is required. 168 static bool hasRecursiveCallInPath(const FunctionDecl *FD, CFGBlock &Block) { 169 // Process all the Stmt's in this block to find any calls to FD. 170 for (const auto &B : Block) { 171 if (B.getKind() != CFGElement::Statement) 172 continue; 173 174 const CallExpr *CE = dyn_cast<CallExpr>(B.getAs<CFGStmt>()->getStmt()); 175 if (!CE || !CE->getCalleeDecl() || 176 CE->getCalleeDecl()->getCanonicalDecl() != FD) 177 continue; 178 179 // Skip function calls which are qualified with a templated class. 180 if (const DeclRefExpr *DRE = 181 dyn_cast<DeclRefExpr>(CE->getCallee()->IgnoreParenImpCasts())) { 182 if (NestedNameSpecifier *NNS = DRE->getQualifier()) { 183 if (NNS->getKind() == NestedNameSpecifier::TypeSpec && 184 isa<TemplateSpecializationType>(NNS->getAsType())) { 185 continue; 186 } 187 } 188 } 189 190 const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE); 191 if (!MCE || isa<CXXThisExpr>(MCE->getImplicitObjectArgument()) || 192 !MCE->getMethodDecl()->isVirtual()) 193 return true; 194 } 195 return false; 196 } 197 198 // All blocks are in one of three states. States are ordered so that blocks 199 // can only move to higher states. 200 enum RecursiveState { 201 FoundNoPath, 202 FoundPath, 203 FoundPathWithNoRecursiveCall 204 }; 205 206 // Returns true if there exists a path to the exit block and every path 207 // to the exit block passes through a call to FD. 208 static bool checkForRecursiveFunctionCall(const FunctionDecl *FD, CFG *cfg) { 209 210 const unsigned ExitID = cfg->getExit().getBlockID(); 211 212 // Mark all nodes as FoundNoPath, then set the status of the entry block. 213 SmallVector<RecursiveState, 16> States(cfg->getNumBlockIDs(), FoundNoPath); 214 States[cfg->getEntry().getBlockID()] = FoundPathWithNoRecursiveCall; 215 216 // Make the processing stack and seed it with the entry block. 217 SmallVector<CFGBlock *, 16> Stack; 218 Stack.push_back(&cfg->getEntry()); 219 220 while (!Stack.empty()) { 221 CFGBlock *CurBlock = Stack.back(); 222 Stack.pop_back(); 223 224 unsigned ID = CurBlock->getBlockID(); 225 RecursiveState CurState = States[ID]; 226 227 if (CurState == FoundPathWithNoRecursiveCall) { 228 // Found a path to the exit node without a recursive call. 229 if (ExitID == ID) 230 return false; 231 232 // Only change state if the block has a recursive call. 233 if (hasRecursiveCallInPath(FD, *CurBlock)) 234 CurState = FoundPath; 235 } 236 237 // Loop over successor blocks and add them to the Stack if their state 238 // changes. 239 for (auto I = CurBlock->succ_begin(), E = CurBlock->succ_end(); I != E; ++I) 240 if (*I) { 241 unsigned next_ID = (*I)->getBlockID(); 242 if (States[next_ID] < CurState) { 243 States[next_ID] = CurState; 244 Stack.push_back(*I); 245 } 246 } 247 } 248 249 // Return true if the exit node is reachable, and only reachable through 250 // a recursive call. 251 return States[ExitID] == FoundPath; 252 } 253 254 static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD, 255 const Stmt *Body, AnalysisDeclContext &AC) { 256 FD = FD->getCanonicalDecl(); 257 258 // Only run on non-templated functions and non-templated members of 259 // templated classes. 260 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate && 261 FD->getTemplatedKind() != FunctionDecl::TK_MemberSpecialization) 262 return; 263 264 CFG *cfg = AC.getCFG(); 265 if (!cfg) return; 266 267 // If the exit block is unreachable, skip processing the function. 268 if (cfg->getExit().pred_empty()) 269 return; 270 271 // Emit diagnostic if a recursive function call is detected for all paths. 272 if (checkForRecursiveFunctionCall(FD, cfg)) 273 S.Diag(Body->getLocStart(), diag::warn_infinite_recursive_function); 274 } 275 276 //===----------------------------------------------------------------------===// 277 // Check for missing return value. 278 //===----------------------------------------------------------------------===// 279 280 enum ControlFlowKind { 281 UnknownFallThrough, 282 NeverFallThrough, 283 MaybeFallThrough, 284 AlwaysFallThrough, 285 NeverFallThroughOrReturn 286 }; 287 288 /// CheckFallThrough - Check that we don't fall off the end of a 289 /// Statement that should return a value. 290 /// 291 /// \returns AlwaysFallThrough iff we always fall off the end of the statement, 292 /// MaybeFallThrough iff we might or might not fall off the end, 293 /// NeverFallThroughOrReturn iff we never fall off the end of the statement or 294 /// return. We assume NeverFallThrough iff we never fall off the end of the 295 /// statement but we may return. We assume that functions not marked noreturn 296 /// will return. 297 static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) { 298 CFG *cfg = AC.getCFG(); 299 if (!cfg) return UnknownFallThrough; 300 301 // The CFG leaves in dead things, and we don't want the dead code paths to 302 // confuse us, so we mark all live things first. 303 llvm::BitVector live(cfg->getNumBlockIDs()); 304 unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(), 305 live); 306 307 bool AddEHEdges = AC.getAddEHEdges(); 308 if (!AddEHEdges && count != cfg->getNumBlockIDs()) 309 // When there are things remaining dead, and we didn't add EH edges 310 // from CallExprs to the catch clauses, we have to go back and 311 // mark them as live. 312 for (const auto *B : *cfg) { 313 if (!live[B->getBlockID()]) { 314 if (B->pred_begin() == B->pred_end()) { 315 if (B->getTerminator() && isa<CXXTryStmt>(B->getTerminator())) 316 // When not adding EH edges from calls, catch clauses 317 // can otherwise seem dead. Avoid noting them as dead. 318 count += reachable_code::ScanReachableFromBlock(B, live); 319 continue; 320 } 321 } 322 } 323 324 // Now we know what is live, we check the live precessors of the exit block 325 // and look for fall through paths, being careful to ignore normal returns, 326 // and exceptional paths. 327 bool HasLiveReturn = false; 328 bool HasFakeEdge = false; 329 bool HasPlainEdge = false; 330 bool HasAbnormalEdge = false; 331 332 // Ignore default cases that aren't likely to be reachable because all 333 // enums in a switch(X) have explicit case statements. 334 CFGBlock::FilterOptions FO; 335 FO.IgnoreDefaultsWithCoveredEnums = 1; 336 337 for (CFGBlock::filtered_pred_iterator 338 I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) { 339 const CFGBlock& B = **I; 340 if (!live[B.getBlockID()]) 341 continue; 342 343 // Skip blocks which contain an element marked as no-return. They don't 344 // represent actually viable edges into the exit block, so mark them as 345 // abnormal. 346 if (B.hasNoReturnElement()) { 347 HasAbnormalEdge = true; 348 continue; 349 } 350 351 // Destructors can appear after the 'return' in the CFG. This is 352 // normal. We need to look pass the destructors for the return 353 // statement (if it exists). 354 CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend(); 355 356 for ( ; ri != re ; ++ri) 357 if (ri->getAs<CFGStmt>()) 358 break; 359 360 // No more CFGElements in the block? 361 if (ri == re) { 362 if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { 363 HasAbnormalEdge = true; 364 continue; 365 } 366 // A labeled empty statement, or the entry block... 367 HasPlainEdge = true; 368 continue; 369 } 370 371 CFGStmt CS = ri->castAs<CFGStmt>(); 372 const Stmt *S = CS.getStmt(); 373 if (isa<ReturnStmt>(S)) { 374 HasLiveReturn = true; 375 continue; 376 } 377 if (isa<ObjCAtThrowStmt>(S)) { 378 HasFakeEdge = true; 379 continue; 380 } 381 if (isa<CXXThrowExpr>(S)) { 382 HasFakeEdge = true; 383 continue; 384 } 385 if (isa<MSAsmStmt>(S)) { 386 // TODO: Verify this is correct. 387 HasFakeEdge = true; 388 HasLiveReturn = true; 389 continue; 390 } 391 if (isa<CXXTryStmt>(S)) { 392 HasAbnormalEdge = true; 393 continue; 394 } 395 if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit()) 396 == B.succ_end()) { 397 HasAbnormalEdge = true; 398 continue; 399 } 400 401 HasPlainEdge = true; 402 } 403 if (!HasPlainEdge) { 404 if (HasLiveReturn) 405 return NeverFallThrough; 406 return NeverFallThroughOrReturn; 407 } 408 if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) 409 return MaybeFallThrough; 410 // This says AlwaysFallThrough for calls to functions that are not marked 411 // noreturn, that don't return. If people would like this warning to be more 412 // accurate, such functions should be marked as noreturn. 413 return AlwaysFallThrough; 414 } 415 416 namespace { 417 418 struct CheckFallThroughDiagnostics { 419 unsigned diag_MaybeFallThrough_HasNoReturn; 420 unsigned diag_MaybeFallThrough_ReturnsNonVoid; 421 unsigned diag_AlwaysFallThrough_HasNoReturn; 422 unsigned diag_AlwaysFallThrough_ReturnsNonVoid; 423 unsigned diag_NeverFallThroughOrReturn; 424 enum { Function, Block, Lambda } funMode; 425 SourceLocation FuncLoc; 426 427 static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) { 428 CheckFallThroughDiagnostics D; 429 D.FuncLoc = Func->getLocation(); 430 D.diag_MaybeFallThrough_HasNoReturn = 431 diag::warn_falloff_noreturn_function; 432 D.diag_MaybeFallThrough_ReturnsNonVoid = 433 diag::warn_maybe_falloff_nonvoid_function; 434 D.diag_AlwaysFallThrough_HasNoReturn = 435 diag::warn_falloff_noreturn_function; 436 D.diag_AlwaysFallThrough_ReturnsNonVoid = 437 diag::warn_falloff_nonvoid_function; 438 439 // Don't suggest that virtual functions be marked "noreturn", since they 440 // might be overridden by non-noreturn functions. 441 bool isVirtualMethod = false; 442 if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func)) 443 isVirtualMethod = Method->isVirtual(); 444 445 // Don't suggest that template instantiations be marked "noreturn" 446 bool isTemplateInstantiation = false; 447 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func)) 448 isTemplateInstantiation = Function->isTemplateInstantiation(); 449 450 if (!isVirtualMethod && !isTemplateInstantiation) 451 D.diag_NeverFallThroughOrReturn = 452 diag::warn_suggest_noreturn_function; 453 else 454 D.diag_NeverFallThroughOrReturn = 0; 455 456 D.funMode = Function; 457 return D; 458 } 459 460 static CheckFallThroughDiagnostics MakeForBlock() { 461 CheckFallThroughDiagnostics D; 462 D.diag_MaybeFallThrough_HasNoReturn = 463 diag::err_noreturn_block_has_return_expr; 464 D.diag_MaybeFallThrough_ReturnsNonVoid = 465 diag::err_maybe_falloff_nonvoid_block; 466 D.diag_AlwaysFallThrough_HasNoReturn = 467 diag::err_noreturn_block_has_return_expr; 468 D.diag_AlwaysFallThrough_ReturnsNonVoid = 469 diag::err_falloff_nonvoid_block; 470 D.diag_NeverFallThroughOrReturn = 0; 471 D.funMode = Block; 472 return D; 473 } 474 475 static CheckFallThroughDiagnostics MakeForLambda() { 476 CheckFallThroughDiagnostics D; 477 D.diag_MaybeFallThrough_HasNoReturn = 478 diag::err_noreturn_lambda_has_return_expr; 479 D.diag_MaybeFallThrough_ReturnsNonVoid = 480 diag::warn_maybe_falloff_nonvoid_lambda; 481 D.diag_AlwaysFallThrough_HasNoReturn = 482 diag::err_noreturn_lambda_has_return_expr; 483 D.diag_AlwaysFallThrough_ReturnsNonVoid = 484 diag::warn_falloff_nonvoid_lambda; 485 D.diag_NeverFallThroughOrReturn = 0; 486 D.funMode = Lambda; 487 return D; 488 } 489 490 bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid, 491 bool HasNoReturn) const { 492 if (funMode == Function) { 493 return (ReturnsVoid || 494 D.isIgnored(diag::warn_maybe_falloff_nonvoid_function, 495 FuncLoc)) && 496 (!HasNoReturn || 497 D.isIgnored(diag::warn_noreturn_function_has_return_expr, 498 FuncLoc)) && 499 (!ReturnsVoid || 500 D.isIgnored(diag::warn_suggest_noreturn_block, FuncLoc)); 501 } 502 503 // For blocks / lambdas. 504 return ReturnsVoid && !HasNoReturn; 505 } 506 }; 507 508 } // anonymous namespace 509 510 /// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a 511 /// function that should return a value. Check that we don't fall off the end 512 /// of a noreturn function. We assume that functions and blocks not marked 513 /// noreturn will return. 514 static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body, 515 const BlockExpr *blkExpr, 516 const CheckFallThroughDiagnostics& CD, 517 AnalysisDeclContext &AC) { 518 519 bool ReturnsVoid = false; 520 bool HasNoReturn = false; 521 522 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 523 ReturnsVoid = FD->getReturnType()->isVoidType(); 524 HasNoReturn = FD->isNoReturn(); 525 } 526 else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { 527 ReturnsVoid = MD->getReturnType()->isVoidType(); 528 HasNoReturn = MD->hasAttr<NoReturnAttr>(); 529 } 530 else if (isa<BlockDecl>(D)) { 531 QualType BlockTy = blkExpr->getType(); 532 if (const FunctionType *FT = 533 BlockTy->getPointeeType()->getAs<FunctionType>()) { 534 if (FT->getReturnType()->isVoidType()) 535 ReturnsVoid = true; 536 if (FT->getNoReturnAttr()) 537 HasNoReturn = true; 538 } 539 } 540 541 DiagnosticsEngine &Diags = S.getDiagnostics(); 542 543 // Short circuit for compilation speed. 544 if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn)) 545 return; 546 547 SourceLocation LBrace = Body->getLocStart(), RBrace = Body->getLocEnd(); 548 // Either in a function body compound statement, or a function-try-block. 549 switch (CheckFallThrough(AC)) { 550 case UnknownFallThrough: 551 break; 552 553 case MaybeFallThrough: 554 if (HasNoReturn) 555 S.Diag(RBrace, CD.diag_MaybeFallThrough_HasNoReturn); 556 else if (!ReturnsVoid) 557 S.Diag(RBrace, CD.diag_MaybeFallThrough_ReturnsNonVoid); 558 break; 559 case AlwaysFallThrough: 560 if (HasNoReturn) 561 S.Diag(RBrace, CD.diag_AlwaysFallThrough_HasNoReturn); 562 else if (!ReturnsVoid) 563 S.Diag(RBrace, CD.diag_AlwaysFallThrough_ReturnsNonVoid); 564 break; 565 case NeverFallThroughOrReturn: 566 if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) { 567 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 568 S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 0 << FD; 569 } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { 570 S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 1 << MD; 571 } else { 572 S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn); 573 } 574 } 575 break; 576 case NeverFallThrough: 577 break; 578 } 579 } 580 581 //===----------------------------------------------------------------------===// 582 // -Wuninitialized 583 //===----------------------------------------------------------------------===// 584 585 namespace { 586 /// ContainsReference - A visitor class to search for references to 587 /// a particular declaration (the needle) within any evaluated component of an 588 /// expression (recursively). 589 class ContainsReference : public ConstEvaluatedExprVisitor<ContainsReference> { 590 bool FoundReference; 591 const DeclRefExpr *Needle; 592 593 public: 594 typedef ConstEvaluatedExprVisitor<ContainsReference> Inherited; 595 596 ContainsReference(ASTContext &Context, const DeclRefExpr *Needle) 597 : Inherited(Context), FoundReference(false), Needle(Needle) {} 598 599 void VisitExpr(const Expr *E) { 600 // Stop evaluating if we already have a reference. 601 if (FoundReference) 602 return; 603 604 Inherited::VisitExpr(E); 605 } 606 607 void VisitDeclRefExpr(const DeclRefExpr *E) { 608 if (E == Needle) 609 FoundReference = true; 610 else 611 Inherited::VisitDeclRefExpr(E); 612 } 613 614 bool doesContainReference() const { return FoundReference; } 615 }; 616 } // anonymous namespace 617 618 static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) { 619 QualType VariableTy = VD->getType().getCanonicalType(); 620 if (VariableTy->isBlockPointerType() && 621 !VD->hasAttr<BlocksAttr>()) { 622 S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization) 623 << VD->getDeclName() 624 << FixItHint::CreateInsertion(VD->getLocation(), "__block "); 625 return true; 626 } 627 628 // Don't issue a fixit if there is already an initializer. 629 if (VD->getInit()) 630 return false; 631 632 // Don't suggest a fixit inside macros. 633 if (VD->getLocEnd().isMacroID()) 634 return false; 635 636 SourceLocation Loc = S.getLocForEndOfToken(VD->getLocEnd()); 637 638 // Suggest possible initialization (if any). 639 std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc); 640 if (Init.empty()) 641 return false; 642 643 S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName() 644 << FixItHint::CreateInsertion(Loc, Init); 645 return true; 646 } 647 648 /// Create a fixit to remove an if-like statement, on the assumption that its 649 /// condition is CondVal. 650 static void CreateIfFixit(Sema &S, const Stmt *If, const Stmt *Then, 651 const Stmt *Else, bool CondVal, 652 FixItHint &Fixit1, FixItHint &Fixit2) { 653 if (CondVal) { 654 // If condition is always true, remove all but the 'then'. 655 Fixit1 = FixItHint::CreateRemoval( 656 CharSourceRange::getCharRange(If->getLocStart(), 657 Then->getLocStart())); 658 if (Else) { 659 SourceLocation ElseKwLoc = S.getLocForEndOfToken(Then->getLocEnd()); 660 Fixit2 = FixItHint::CreateRemoval( 661 SourceRange(ElseKwLoc, Else->getLocEnd())); 662 } 663 } else { 664 // If condition is always false, remove all but the 'else'. 665 if (Else) 666 Fixit1 = FixItHint::CreateRemoval( 667 CharSourceRange::getCharRange(If->getLocStart(), 668 Else->getLocStart())); 669 else 670 Fixit1 = FixItHint::CreateRemoval(If->getSourceRange()); 671 } 672 } 673 674 /// DiagUninitUse -- Helper function to produce a diagnostic for an 675 /// uninitialized use of a variable. 676 static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use, 677 bool IsCapturedByBlock) { 678 bool Diagnosed = false; 679 680 switch (Use.getKind()) { 681 case UninitUse::Always: 682 S.Diag(Use.getUser()->getLocStart(), diag::warn_uninit_var) 683 << VD->getDeclName() << IsCapturedByBlock 684 << Use.getUser()->getSourceRange(); 685 return; 686 687 case UninitUse::AfterDecl: 688 case UninitUse::AfterCall: 689 S.Diag(VD->getLocation(), diag::warn_sometimes_uninit_var) 690 << VD->getDeclName() << IsCapturedByBlock 691 << (Use.getKind() == UninitUse::AfterDecl ? 4 : 5) 692 << const_cast<DeclContext*>(VD->getLexicalDeclContext()) 693 << VD->getSourceRange(); 694 S.Diag(Use.getUser()->getLocStart(), diag::note_uninit_var_use) 695 << IsCapturedByBlock << Use.getUser()->getSourceRange(); 696 return; 697 698 case UninitUse::Maybe: 699 case UninitUse::Sometimes: 700 // Carry on to report sometimes-uninitialized branches, if possible, 701 // or a 'may be used uninitialized' diagnostic otherwise. 702 break; 703 } 704 705 // Diagnose each branch which leads to a sometimes-uninitialized use. 706 for (UninitUse::branch_iterator I = Use.branch_begin(), E = Use.branch_end(); 707 I != E; ++I) { 708 assert(Use.getKind() == UninitUse::Sometimes); 709 710 const Expr *User = Use.getUser(); 711 const Stmt *Term = I->Terminator; 712 713 // Information used when building the diagnostic. 714 unsigned DiagKind; 715 StringRef Str; 716 SourceRange Range; 717 718 // FixIts to suppress the diagnostic by removing the dead condition. 719 // For all binary terminators, branch 0 is taken if the condition is true, 720 // and branch 1 is taken if the condition is false. 721 int RemoveDiagKind = -1; 722 const char *FixitStr = 723 S.getLangOpts().CPlusPlus ? (I->Output ? "true" : "false") 724 : (I->Output ? "1" : "0"); 725 FixItHint Fixit1, Fixit2; 726 727 switch (Term ? Term->getStmtClass() : Stmt::DeclStmtClass) { 728 default: 729 // Don't know how to report this. Just fall back to 'may be used 730 // uninitialized'. FIXME: Can this happen? 731 continue; 732 733 // "condition is true / condition is false". 734 case Stmt::IfStmtClass: { 735 const IfStmt *IS = cast<IfStmt>(Term); 736 DiagKind = 0; 737 Str = "if"; 738 Range = IS->getCond()->getSourceRange(); 739 RemoveDiagKind = 0; 740 CreateIfFixit(S, IS, IS->getThen(), IS->getElse(), 741 I->Output, Fixit1, Fixit2); 742 break; 743 } 744 case Stmt::ConditionalOperatorClass: { 745 const ConditionalOperator *CO = cast<ConditionalOperator>(Term); 746 DiagKind = 0; 747 Str = "?:"; 748 Range = CO->getCond()->getSourceRange(); 749 RemoveDiagKind = 0; 750 CreateIfFixit(S, CO, CO->getTrueExpr(), CO->getFalseExpr(), 751 I->Output, Fixit1, Fixit2); 752 break; 753 } 754 case Stmt::BinaryOperatorClass: { 755 const BinaryOperator *BO = cast<BinaryOperator>(Term); 756 if (!BO->isLogicalOp()) 757 continue; 758 DiagKind = 0; 759 Str = BO->getOpcodeStr(); 760 Range = BO->getLHS()->getSourceRange(); 761 RemoveDiagKind = 0; 762 if ((BO->getOpcode() == BO_LAnd && I->Output) || 763 (BO->getOpcode() == BO_LOr && !I->Output)) 764 // true && y -> y, false || y -> y. 765 Fixit1 = FixItHint::CreateRemoval(SourceRange(BO->getLocStart(), 766 BO->getOperatorLoc())); 767 else 768 // false && y -> false, true || y -> true. 769 Fixit1 = FixItHint::CreateReplacement(BO->getSourceRange(), FixitStr); 770 break; 771 } 772 773 // "loop is entered / loop is exited". 774 case Stmt::WhileStmtClass: 775 DiagKind = 1; 776 Str = "while"; 777 Range = cast<WhileStmt>(Term)->getCond()->getSourceRange(); 778 RemoveDiagKind = 1; 779 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 780 break; 781 case Stmt::ForStmtClass: 782 DiagKind = 1; 783 Str = "for"; 784 Range = cast<ForStmt>(Term)->getCond()->getSourceRange(); 785 RemoveDiagKind = 1; 786 if (I->Output) 787 Fixit1 = FixItHint::CreateRemoval(Range); 788 else 789 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 790 break; 791 case Stmt::CXXForRangeStmtClass: 792 if (I->Output == 1) { 793 // The use occurs if a range-based for loop's body never executes. 794 // That may be impossible, and there's no syntactic fix for this, 795 // so treat it as a 'may be uninitialized' case. 796 continue; 797 } 798 DiagKind = 1; 799 Str = "for"; 800 Range = cast<CXXForRangeStmt>(Term)->getRangeInit()->getSourceRange(); 801 break; 802 803 // "condition is true / loop is exited". 804 case Stmt::DoStmtClass: 805 DiagKind = 2; 806 Str = "do"; 807 Range = cast<DoStmt>(Term)->getCond()->getSourceRange(); 808 RemoveDiagKind = 1; 809 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 810 break; 811 812 // "switch case is taken". 813 case Stmt::CaseStmtClass: 814 DiagKind = 3; 815 Str = "case"; 816 Range = cast<CaseStmt>(Term)->getLHS()->getSourceRange(); 817 break; 818 case Stmt::DefaultStmtClass: 819 DiagKind = 3; 820 Str = "default"; 821 Range = cast<DefaultStmt>(Term)->getDefaultLoc(); 822 break; 823 } 824 825 S.Diag(Range.getBegin(), diag::warn_sometimes_uninit_var) 826 << VD->getDeclName() << IsCapturedByBlock << DiagKind 827 << Str << I->Output << Range; 828 S.Diag(User->getLocStart(), diag::note_uninit_var_use) 829 << IsCapturedByBlock << User->getSourceRange(); 830 if (RemoveDiagKind != -1) 831 S.Diag(Fixit1.RemoveRange.getBegin(), diag::note_uninit_fixit_remove_cond) 832 << RemoveDiagKind << Str << I->Output << Fixit1 << Fixit2; 833 834 Diagnosed = true; 835 } 836 837 if (!Diagnosed) 838 S.Diag(Use.getUser()->getLocStart(), diag::warn_maybe_uninit_var) 839 << VD->getDeclName() << IsCapturedByBlock 840 << Use.getUser()->getSourceRange(); 841 } 842 843 /// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an 844 /// uninitialized variable. This manages the different forms of diagnostic 845 /// emitted for particular types of uses. Returns true if the use was diagnosed 846 /// as a warning. If a particular use is one we omit warnings for, returns 847 /// false. 848 static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD, 849 const UninitUse &Use, 850 bool alwaysReportSelfInit = false) { 851 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Use.getUser())) { 852 // Inspect the initializer of the variable declaration which is 853 // being referenced prior to its initialization. We emit 854 // specialized diagnostics for self-initialization, and we 855 // specifically avoid warning about self references which take the 856 // form of: 857 // 858 // int x = x; 859 // 860 // This is used to indicate to GCC that 'x' is intentionally left 861 // uninitialized. Proven code paths which access 'x' in 862 // an uninitialized state after this will still warn. 863 if (const Expr *Initializer = VD->getInit()) { 864 if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts()) 865 return false; 866 867 ContainsReference CR(S.Context, DRE); 868 CR.Visit(Initializer); 869 if (CR.doesContainReference()) { 870 S.Diag(DRE->getLocStart(), 871 diag::warn_uninit_self_reference_in_init) 872 << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange(); 873 return true; 874 } 875 } 876 877 DiagUninitUse(S, VD, Use, false); 878 } else { 879 const BlockExpr *BE = cast<BlockExpr>(Use.getUser()); 880 if (VD->getType()->isBlockPointerType() && !VD->hasAttr<BlocksAttr>()) 881 S.Diag(BE->getLocStart(), 882 diag::warn_uninit_byref_blockvar_captured_by_block) 883 << VD->getDeclName(); 884 else 885 DiagUninitUse(S, VD, Use, true); 886 } 887 888 // Report where the variable was declared when the use wasn't within 889 // the initializer of that declaration & we didn't already suggest 890 // an initialization fixit. 891 if (!SuggestInitializationFixit(S, VD)) 892 S.Diag(VD->getLocStart(), diag::note_var_declared_here) 893 << VD->getDeclName(); 894 895 return true; 896 } 897 898 namespace { 899 class FallthroughMapper : public RecursiveASTVisitor<FallthroughMapper> { 900 public: 901 FallthroughMapper(Sema &S) 902 : FoundSwitchStatements(false), 903 S(S) { 904 } 905 906 bool foundSwitchStatements() const { return FoundSwitchStatements; } 907 908 void markFallthroughVisited(const AttributedStmt *Stmt) { 909 bool Found = FallthroughStmts.erase(Stmt); 910 assert(Found); 911 (void)Found; 912 } 913 914 typedef llvm::SmallPtrSet<const AttributedStmt*, 8> AttrStmts; 915 916 const AttrStmts &getFallthroughStmts() const { 917 return FallthroughStmts; 918 } 919 920 void fillReachableBlocks(CFG *Cfg) { 921 assert(ReachableBlocks.empty() && "ReachableBlocks already filled"); 922 std::deque<const CFGBlock *> BlockQueue; 923 924 ReachableBlocks.insert(&Cfg->getEntry()); 925 BlockQueue.push_back(&Cfg->getEntry()); 926 // Mark all case blocks reachable to avoid problems with switching on 927 // constants, covered enums, etc. 928 // These blocks can contain fall-through annotations, and we don't want to 929 // issue a warn_fallthrough_attr_unreachable for them. 930 for (const auto *B : *Cfg) { 931 const Stmt *L = B->getLabel(); 932 if (L && isa<SwitchCase>(L) && ReachableBlocks.insert(B).second) 933 BlockQueue.push_back(B); 934 } 935 936 while (!BlockQueue.empty()) { 937 const CFGBlock *P = BlockQueue.front(); 938 BlockQueue.pop_front(); 939 for (CFGBlock::const_succ_iterator I = P->succ_begin(), 940 E = P->succ_end(); 941 I != E; ++I) { 942 if (*I && ReachableBlocks.insert(*I).second) 943 BlockQueue.push_back(*I); 944 } 945 } 946 } 947 948 bool checkFallThroughIntoBlock(const CFGBlock &B, int &AnnotatedCnt) { 949 assert(!ReachableBlocks.empty() && "ReachableBlocks empty"); 950 951 int UnannotatedCnt = 0; 952 AnnotatedCnt = 0; 953 954 std::deque<const CFGBlock*> BlockQueue(B.pred_begin(), B.pred_end()); 955 while (!BlockQueue.empty()) { 956 const CFGBlock *P = BlockQueue.front(); 957 BlockQueue.pop_front(); 958 if (!P) continue; 959 960 const Stmt *Term = P->getTerminator(); 961 if (Term && isa<SwitchStmt>(Term)) 962 continue; // Switch statement, good. 963 964 const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(P->getLabel()); 965 if (SW && SW->getSubStmt() == B.getLabel() && P->begin() == P->end()) 966 continue; // Previous case label has no statements, good. 967 968 const LabelStmt *L = dyn_cast_or_null<LabelStmt>(P->getLabel()); 969 if (L && L->getSubStmt() == B.getLabel() && P->begin() == P->end()) 970 continue; // Case label is preceded with a normal label, good. 971 972 if (!ReachableBlocks.count(P)) { 973 for (CFGBlock::const_reverse_iterator ElemIt = P->rbegin(), 974 ElemEnd = P->rend(); 975 ElemIt != ElemEnd; ++ElemIt) { 976 if (Optional<CFGStmt> CS = ElemIt->getAs<CFGStmt>()) { 977 if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) { 978 S.Diag(AS->getLocStart(), 979 diag::warn_fallthrough_attr_unreachable); 980 markFallthroughVisited(AS); 981 ++AnnotatedCnt; 982 break; 983 } 984 // Don't care about other unreachable statements. 985 } 986 } 987 // If there are no unreachable statements, this may be a special 988 // case in CFG: 989 // case X: { 990 // A a; // A has a destructor. 991 // break; 992 // } 993 // // <<<< This place is represented by a 'hanging' CFG block. 994 // case Y: 995 continue; 996 } 997 998 const Stmt *LastStmt = getLastStmt(*P); 999 if (const AttributedStmt *AS = asFallThroughAttr(LastStmt)) { 1000 markFallthroughVisited(AS); 1001 ++AnnotatedCnt; 1002 continue; // Fallthrough annotation, good. 1003 } 1004 1005 if (!LastStmt) { // This block contains no executable statements. 1006 // Traverse its predecessors. 1007 std::copy(P->pred_begin(), P->pred_end(), 1008 std::back_inserter(BlockQueue)); 1009 continue; 1010 } 1011 1012 ++UnannotatedCnt; 1013 } 1014 return !!UnannotatedCnt; 1015 } 1016 1017 // RecursiveASTVisitor setup. 1018 bool shouldWalkTypesOfTypeLocs() const { return false; } 1019 1020 bool VisitAttributedStmt(AttributedStmt *S) { 1021 if (asFallThroughAttr(S)) 1022 FallthroughStmts.insert(S); 1023 return true; 1024 } 1025 1026 bool VisitSwitchStmt(SwitchStmt *S) { 1027 FoundSwitchStatements = true; 1028 return true; 1029 } 1030 1031 // We don't want to traverse local type declarations. We analyze their 1032 // methods separately. 1033 bool TraverseDecl(Decl *D) { return true; } 1034 1035 // We analyze lambda bodies separately. Skip them here. 1036 bool TraverseLambdaBody(LambdaExpr *LE) { return true; } 1037 1038 private: 1039 1040 static const AttributedStmt *asFallThroughAttr(const Stmt *S) { 1041 if (const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(S)) { 1042 if (hasSpecificAttr<FallThroughAttr>(AS->getAttrs())) 1043 return AS; 1044 } 1045 return nullptr; 1046 } 1047 1048 static const Stmt *getLastStmt(const CFGBlock &B) { 1049 if (const Stmt *Term = B.getTerminator()) 1050 return Term; 1051 for (CFGBlock::const_reverse_iterator ElemIt = B.rbegin(), 1052 ElemEnd = B.rend(); 1053 ElemIt != ElemEnd; ++ElemIt) { 1054 if (Optional<CFGStmt> CS = ElemIt->getAs<CFGStmt>()) 1055 return CS->getStmt(); 1056 } 1057 // Workaround to detect a statement thrown out by CFGBuilder: 1058 // case X: {} case Y: 1059 // case X: ; case Y: 1060 if (const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(B.getLabel())) 1061 if (!isa<SwitchCase>(SW->getSubStmt())) 1062 return SW->getSubStmt(); 1063 1064 return nullptr; 1065 } 1066 1067 bool FoundSwitchStatements; 1068 AttrStmts FallthroughStmts; 1069 Sema &S; 1070 llvm::SmallPtrSet<const CFGBlock *, 16> ReachableBlocks; 1071 }; 1072 } // anonymous namespace 1073 1074 static StringRef getFallthroughAttrSpelling(Preprocessor &PP, 1075 SourceLocation Loc) { 1076 TokenValue FallthroughTokens[] = { 1077 tok::l_square, tok::l_square, 1078 PP.getIdentifierInfo("fallthrough"), 1079 tok::r_square, tok::r_square 1080 }; 1081 1082 TokenValue ClangFallthroughTokens[] = { 1083 tok::l_square, tok::l_square, PP.getIdentifierInfo("clang"), 1084 tok::coloncolon, PP.getIdentifierInfo("fallthrough"), 1085 tok::r_square, tok::r_square 1086 }; 1087 1088 bool PreferClangAttr = !PP.getLangOpts().CPlusPlus1z; 1089 1090 StringRef MacroName; 1091 if (PreferClangAttr) 1092 MacroName = PP.getLastMacroWithSpelling(Loc, ClangFallthroughTokens); 1093 if (MacroName.empty()) 1094 MacroName = PP.getLastMacroWithSpelling(Loc, FallthroughTokens); 1095 if (MacroName.empty() && !PreferClangAttr) 1096 MacroName = PP.getLastMacroWithSpelling(Loc, ClangFallthroughTokens); 1097 if (MacroName.empty()) 1098 MacroName = PreferClangAttr ? "[[clang::fallthrough]]" : "[[fallthrough]]"; 1099 return MacroName; 1100 } 1101 1102 static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC, 1103 bool PerFunction) { 1104 // Only perform this analysis when using C++11. There is no good workflow 1105 // for this warning when not using C++11. There is no good way to silence 1106 // the warning (no attribute is available) unless we are using C++11's support 1107 // for generalized attributes. Once could use pragmas to silence the warning, 1108 // but as a general solution that is gross and not in the spirit of this 1109 // warning. 1110 // 1111 // NOTE: This an intermediate solution. There are on-going discussions on 1112 // how to properly support this warning outside of C++11 with an annotation. 1113 if (!AC.getASTContext().getLangOpts().CPlusPlus11) 1114 return; 1115 1116 FallthroughMapper FM(S); 1117 FM.TraverseStmt(AC.getBody()); 1118 1119 if (!FM.foundSwitchStatements()) 1120 return; 1121 1122 if (PerFunction && FM.getFallthroughStmts().empty()) 1123 return; 1124 1125 CFG *Cfg = AC.getCFG(); 1126 1127 if (!Cfg) 1128 return; 1129 1130 FM.fillReachableBlocks(Cfg); 1131 1132 for (const CFGBlock *B : llvm::reverse(*Cfg)) { 1133 const Stmt *Label = B->getLabel(); 1134 1135 if (!Label || !isa<SwitchCase>(Label)) 1136 continue; 1137 1138 int AnnotatedCnt; 1139 1140 if (!FM.checkFallThroughIntoBlock(*B, AnnotatedCnt)) 1141 continue; 1142 1143 S.Diag(Label->getLocStart(), 1144 PerFunction ? diag::warn_unannotated_fallthrough_per_function 1145 : diag::warn_unannotated_fallthrough); 1146 1147 if (!AnnotatedCnt) { 1148 SourceLocation L = Label->getLocStart(); 1149 if (L.isMacroID()) 1150 continue; 1151 if (S.getLangOpts().CPlusPlus11) { 1152 const Stmt *Term = B->getTerminator(); 1153 // Skip empty cases. 1154 while (B->empty() && !Term && B->succ_size() == 1) { 1155 B = *B->succ_begin(); 1156 Term = B->getTerminator(); 1157 } 1158 if (!(B->empty() && Term && isa<BreakStmt>(Term))) { 1159 Preprocessor &PP = S.getPreprocessor(); 1160 StringRef AnnotationSpelling = getFallthroughAttrSpelling(PP, L); 1161 SmallString<64> TextToInsert(AnnotationSpelling); 1162 TextToInsert += "; "; 1163 S.Diag(L, diag::note_insert_fallthrough_fixit) << 1164 AnnotationSpelling << 1165 FixItHint::CreateInsertion(L, TextToInsert); 1166 } 1167 } 1168 S.Diag(L, diag::note_insert_break_fixit) << 1169 FixItHint::CreateInsertion(L, "break; "); 1170 } 1171 } 1172 1173 for (const auto *F : FM.getFallthroughStmts()) 1174 S.Diag(F->getLocStart(), diag::err_fallthrough_attr_invalid_placement); 1175 } 1176 1177 static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM, 1178 const Stmt *S) { 1179 assert(S); 1180 1181 do { 1182 switch (S->getStmtClass()) { 1183 case Stmt::ForStmtClass: 1184 case Stmt::WhileStmtClass: 1185 case Stmt::CXXForRangeStmtClass: 1186 case Stmt::ObjCForCollectionStmtClass: 1187 return true; 1188 case Stmt::DoStmtClass: { 1189 const Expr *Cond = cast<DoStmt>(S)->getCond(); 1190 llvm::APSInt Val; 1191 if (!Cond->EvaluateAsInt(Val, Ctx)) 1192 return true; 1193 return Val.getBoolValue(); 1194 } 1195 default: 1196 break; 1197 } 1198 } while ((S = PM.getParent(S))); 1199 1200 return false; 1201 } 1202 1203 static void diagnoseRepeatedUseOfWeak(Sema &S, 1204 const sema::FunctionScopeInfo *CurFn, 1205 const Decl *D, 1206 const ParentMap &PM) { 1207 typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy; 1208 typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap; 1209 typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector; 1210 typedef std::pair<const Stmt *, WeakObjectUseMap::const_iterator> 1211 StmtUsesPair; 1212 1213 ASTContext &Ctx = S.getASTContext(); 1214 1215 const WeakObjectUseMap &WeakMap = CurFn->getWeakObjectUses(); 1216 1217 // Extract all weak objects that are referenced more than once. 1218 SmallVector<StmtUsesPair, 8> UsesByStmt; 1219 for (WeakObjectUseMap::const_iterator I = WeakMap.begin(), E = WeakMap.end(); 1220 I != E; ++I) { 1221 const WeakUseVector &Uses = I->second; 1222 1223 // Find the first read of the weak object. 1224 WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end(); 1225 for ( ; UI != UE; ++UI) { 1226 if (UI->isUnsafe()) 1227 break; 1228 } 1229 1230 // If there were only writes to this object, don't warn. 1231 if (UI == UE) 1232 continue; 1233 1234 // If there was only one read, followed by any number of writes, and the 1235 // read is not within a loop, don't warn. Additionally, don't warn in a 1236 // loop if the base object is a local variable -- local variables are often 1237 // changed in loops. 1238 if (UI == Uses.begin()) { 1239 WeakUseVector::const_iterator UI2 = UI; 1240 for (++UI2; UI2 != UE; ++UI2) 1241 if (UI2->isUnsafe()) 1242 break; 1243 1244 if (UI2 == UE) { 1245 if (!isInLoop(Ctx, PM, UI->getUseExpr())) 1246 continue; 1247 1248 const WeakObjectProfileTy &Profile = I->first; 1249 if (!Profile.isExactProfile()) 1250 continue; 1251 1252 const NamedDecl *Base = Profile.getBase(); 1253 if (!Base) 1254 Base = Profile.getProperty(); 1255 assert(Base && "A profile always has a base or property."); 1256 1257 if (const VarDecl *BaseVar = dyn_cast<VarDecl>(Base)) 1258 if (BaseVar->hasLocalStorage() && !isa<ParmVarDecl>(Base)) 1259 continue; 1260 } 1261 } 1262 1263 UsesByStmt.push_back(StmtUsesPair(UI->getUseExpr(), I)); 1264 } 1265 1266 if (UsesByStmt.empty()) 1267 return; 1268 1269 // Sort by first use so that we emit the warnings in a deterministic order. 1270 SourceManager &SM = S.getSourceManager(); 1271 std::sort(UsesByStmt.begin(), UsesByStmt.end(), 1272 [&SM](const StmtUsesPair &LHS, const StmtUsesPair &RHS) { 1273 return SM.isBeforeInTranslationUnit(LHS.first->getLocStart(), 1274 RHS.first->getLocStart()); 1275 }); 1276 1277 // Classify the current code body for better warning text. 1278 // This enum should stay in sync with the cases in 1279 // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak. 1280 // FIXME: Should we use a common classification enum and the same set of 1281 // possibilities all throughout Sema? 1282 enum { 1283 Function, 1284 Method, 1285 Block, 1286 Lambda 1287 } FunctionKind; 1288 1289 if (isa<sema::BlockScopeInfo>(CurFn)) 1290 FunctionKind = Block; 1291 else if (isa<sema::LambdaScopeInfo>(CurFn)) 1292 FunctionKind = Lambda; 1293 else if (isa<ObjCMethodDecl>(D)) 1294 FunctionKind = Method; 1295 else 1296 FunctionKind = Function; 1297 1298 // Iterate through the sorted problems and emit warnings for each. 1299 for (const auto &P : UsesByStmt) { 1300 const Stmt *FirstRead = P.first; 1301 const WeakObjectProfileTy &Key = P.second->first; 1302 const WeakUseVector &Uses = P.second->second; 1303 1304 // For complicated expressions like 'a.b.c' and 'x.b.c', WeakObjectProfileTy 1305 // may not contain enough information to determine that these are different 1306 // properties. We can only be 100% sure of a repeated use in certain cases, 1307 // and we adjust the diagnostic kind accordingly so that the less certain 1308 // case can be turned off if it is too noisy. 1309 unsigned DiagKind; 1310 if (Key.isExactProfile()) 1311 DiagKind = diag::warn_arc_repeated_use_of_weak; 1312 else 1313 DiagKind = diag::warn_arc_possible_repeated_use_of_weak; 1314 1315 // Classify the weak object being accessed for better warning text. 1316 // This enum should stay in sync with the cases in 1317 // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak. 1318 enum { 1319 Variable, 1320 Property, 1321 ImplicitProperty, 1322 Ivar 1323 } ObjectKind; 1324 1325 const NamedDecl *KeyProp = Key.getProperty(); 1326 if (isa<VarDecl>(KeyProp)) 1327 ObjectKind = Variable; 1328 else if (isa<ObjCPropertyDecl>(KeyProp)) 1329 ObjectKind = Property; 1330 else if (isa<ObjCMethodDecl>(KeyProp)) 1331 ObjectKind = ImplicitProperty; 1332 else if (isa<ObjCIvarDecl>(KeyProp)) 1333 ObjectKind = Ivar; 1334 else 1335 llvm_unreachable("Unexpected weak object kind!"); 1336 1337 // Do not warn about IBOutlet weak property receivers being set to null 1338 // since they are typically only used from the main thread. 1339 if (const ObjCPropertyDecl *Prop = dyn_cast<ObjCPropertyDecl>(KeyProp)) 1340 if (Prop->hasAttr<IBOutletAttr>()) 1341 continue; 1342 1343 // Show the first time the object was read. 1344 S.Diag(FirstRead->getLocStart(), DiagKind) 1345 << int(ObjectKind) << KeyProp << int(FunctionKind) 1346 << FirstRead->getSourceRange(); 1347 1348 // Print all the other accesses as notes. 1349 for (const auto &Use : Uses) { 1350 if (Use.getUseExpr() == FirstRead) 1351 continue; 1352 S.Diag(Use.getUseExpr()->getLocStart(), 1353 diag::note_arc_weak_also_accessed_here) 1354 << Use.getUseExpr()->getSourceRange(); 1355 } 1356 } 1357 } 1358 1359 namespace { 1360 class UninitValsDiagReporter : public UninitVariablesHandler { 1361 Sema &S; 1362 typedef SmallVector<UninitUse, 2> UsesVec; 1363 typedef llvm::PointerIntPair<UsesVec *, 1, bool> MappedType; 1364 // Prefer using MapVector to DenseMap, so that iteration order will be 1365 // the same as insertion order. This is needed to obtain a deterministic 1366 // order of diagnostics when calling flushDiagnostics(). 1367 typedef llvm::MapVector<const VarDecl *, MappedType> UsesMap; 1368 UsesMap uses; 1369 1370 public: 1371 UninitValsDiagReporter(Sema &S) : S(S) {} 1372 ~UninitValsDiagReporter() override { flushDiagnostics(); } 1373 1374 MappedType &getUses(const VarDecl *vd) { 1375 MappedType &V = uses[vd]; 1376 if (!V.getPointer()) 1377 V.setPointer(new UsesVec()); 1378 return V; 1379 } 1380 1381 void handleUseOfUninitVariable(const VarDecl *vd, 1382 const UninitUse &use) override { 1383 getUses(vd).getPointer()->push_back(use); 1384 } 1385 1386 void handleSelfInit(const VarDecl *vd) override { 1387 getUses(vd).setInt(true); 1388 } 1389 1390 void flushDiagnostics() { 1391 for (const auto &P : uses) { 1392 const VarDecl *vd = P.first; 1393 const MappedType &V = P.second; 1394 1395 UsesVec *vec = V.getPointer(); 1396 bool hasSelfInit = V.getInt(); 1397 1398 // Specially handle the case where we have uses of an uninitialized 1399 // variable, but the root cause is an idiomatic self-init. We want 1400 // to report the diagnostic at the self-init since that is the root cause. 1401 if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec)) 1402 DiagnoseUninitializedUse(S, vd, 1403 UninitUse(vd->getInit()->IgnoreParenCasts(), 1404 /* isAlwaysUninit */ true), 1405 /* alwaysReportSelfInit */ true); 1406 else { 1407 // Sort the uses by their SourceLocations. While not strictly 1408 // guaranteed to produce them in line/column order, this will provide 1409 // a stable ordering. 1410 std::sort(vec->begin(), vec->end(), 1411 [](const UninitUse &a, const UninitUse &b) { 1412 // Prefer a more confident report over a less confident one. 1413 if (a.getKind() != b.getKind()) 1414 return a.getKind() > b.getKind(); 1415 return a.getUser()->getLocStart() < b.getUser()->getLocStart(); 1416 }); 1417 1418 for (const auto &U : *vec) { 1419 // If we have self-init, downgrade all uses to 'may be uninitialized'. 1420 UninitUse Use = hasSelfInit ? UninitUse(U.getUser(), false) : U; 1421 1422 if (DiagnoseUninitializedUse(S, vd, Use)) 1423 // Skip further diagnostics for this variable. We try to warn only 1424 // on the first point at which a variable is used uninitialized. 1425 break; 1426 } 1427 } 1428 1429 // Release the uses vector. 1430 delete vec; 1431 } 1432 1433 uses.clear(); 1434 } 1435 1436 private: 1437 static bool hasAlwaysUninitializedUse(const UsesVec* vec) { 1438 return std::any_of(vec->begin(), vec->end(), [](const UninitUse &U) { 1439 return U.getKind() == UninitUse::Always || 1440 U.getKind() == UninitUse::AfterCall || 1441 U.getKind() == UninitUse::AfterDecl; 1442 }); 1443 } 1444 }; 1445 } // anonymous namespace 1446 1447 namespace clang { 1448 namespace { 1449 typedef SmallVector<PartialDiagnosticAt, 1> OptionalNotes; 1450 typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag; 1451 typedef std::list<DelayedDiag> DiagList; 1452 1453 struct SortDiagBySourceLocation { 1454 SourceManager &SM; 1455 SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {} 1456 1457 bool operator()(const DelayedDiag &left, const DelayedDiag &right) { 1458 // Although this call will be slow, this is only called when outputting 1459 // multiple warnings. 1460 return SM.isBeforeInTranslationUnit(left.first.first, right.first.first); 1461 } 1462 }; 1463 } // anonymous namespace 1464 } // namespace clang 1465 1466 //===----------------------------------------------------------------------===// 1467 // -Wthread-safety 1468 //===----------------------------------------------------------------------===// 1469 namespace clang { 1470 namespace threadSafety { 1471 namespace { 1472 class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler { 1473 Sema &S; 1474 DiagList Warnings; 1475 SourceLocation FunLocation, FunEndLocation; 1476 1477 const FunctionDecl *CurrentFunction; 1478 bool Verbose; 1479 1480 OptionalNotes getNotes() const { 1481 if (Verbose && CurrentFunction) { 1482 PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(), 1483 S.PDiag(diag::note_thread_warning_in_fun) 1484 << CurrentFunction->getNameAsString()); 1485 return OptionalNotes(1, FNote); 1486 } 1487 return OptionalNotes(); 1488 } 1489 1490 OptionalNotes getNotes(const PartialDiagnosticAt &Note) const { 1491 OptionalNotes ONS(1, Note); 1492 if (Verbose && CurrentFunction) { 1493 PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(), 1494 S.PDiag(diag::note_thread_warning_in_fun) 1495 << CurrentFunction->getNameAsString()); 1496 ONS.push_back(std::move(FNote)); 1497 } 1498 return ONS; 1499 } 1500 1501 OptionalNotes getNotes(const PartialDiagnosticAt &Note1, 1502 const PartialDiagnosticAt &Note2) const { 1503 OptionalNotes ONS; 1504 ONS.push_back(Note1); 1505 ONS.push_back(Note2); 1506 if (Verbose && CurrentFunction) { 1507 PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(), 1508 S.PDiag(diag::note_thread_warning_in_fun) 1509 << CurrentFunction->getNameAsString()); 1510 ONS.push_back(std::move(FNote)); 1511 } 1512 return ONS; 1513 } 1514 1515 // Helper functions 1516 void warnLockMismatch(unsigned DiagID, StringRef Kind, Name LockName, 1517 SourceLocation Loc) { 1518 // Gracefully handle rare cases when the analysis can't get a more 1519 // precise source location. 1520 if (!Loc.isValid()) 1521 Loc = FunLocation; 1522 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind << LockName); 1523 Warnings.emplace_back(std::move(Warning), getNotes()); 1524 } 1525 1526 public: 1527 ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL) 1528 : S(S), FunLocation(FL), FunEndLocation(FEL), 1529 CurrentFunction(nullptr), Verbose(false) {} 1530 1531 void setVerbose(bool b) { Verbose = b; } 1532 1533 /// \brief Emit all buffered diagnostics in order of sourcelocation. 1534 /// We need to output diagnostics produced while iterating through 1535 /// the lockset in deterministic order, so this function orders diagnostics 1536 /// and outputs them. 1537 void emitDiagnostics() { 1538 Warnings.sort(SortDiagBySourceLocation(S.getSourceManager())); 1539 for (const auto &Diag : Warnings) { 1540 S.Diag(Diag.first.first, Diag.first.second); 1541 for (const auto &Note : Diag.second) 1542 S.Diag(Note.first, Note.second); 1543 } 1544 } 1545 1546 void handleInvalidLockExp(StringRef Kind, SourceLocation Loc) override { 1547 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_cannot_resolve_lock) 1548 << Loc); 1549 Warnings.emplace_back(std::move(Warning), getNotes()); 1550 } 1551 1552 void handleUnmatchedUnlock(StringRef Kind, Name LockName, 1553 SourceLocation Loc) override { 1554 warnLockMismatch(diag::warn_unlock_but_no_lock, Kind, LockName, Loc); 1555 } 1556 1557 void handleIncorrectUnlockKind(StringRef Kind, Name LockName, 1558 LockKind Expected, LockKind Received, 1559 SourceLocation Loc) override { 1560 if (Loc.isInvalid()) 1561 Loc = FunLocation; 1562 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_unlock_kind_mismatch) 1563 << Kind << LockName << Received 1564 << Expected); 1565 Warnings.emplace_back(std::move(Warning), getNotes()); 1566 } 1567 1568 void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation Loc) override { 1569 warnLockMismatch(diag::warn_double_lock, Kind, LockName, Loc); 1570 } 1571 1572 void handleMutexHeldEndOfScope(StringRef Kind, Name LockName, 1573 SourceLocation LocLocked, 1574 SourceLocation LocEndOfScope, 1575 LockErrorKind LEK) override { 1576 unsigned DiagID = 0; 1577 switch (LEK) { 1578 case LEK_LockedSomePredecessors: 1579 DiagID = diag::warn_lock_some_predecessors; 1580 break; 1581 case LEK_LockedSomeLoopIterations: 1582 DiagID = diag::warn_expecting_lock_held_on_loop; 1583 break; 1584 case LEK_LockedAtEndOfFunction: 1585 DiagID = diag::warn_no_unlock; 1586 break; 1587 case LEK_NotLockedAtEndOfFunction: 1588 DiagID = diag::warn_expecting_locked; 1589 break; 1590 } 1591 if (LocEndOfScope.isInvalid()) 1592 LocEndOfScope = FunEndLocation; 1593 1594 PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << Kind 1595 << LockName); 1596 if (LocLocked.isValid()) { 1597 PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here) 1598 << Kind); 1599 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1600 return; 1601 } 1602 Warnings.emplace_back(std::move(Warning), getNotes()); 1603 } 1604 1605 void handleExclusiveAndShared(StringRef Kind, Name LockName, 1606 SourceLocation Loc1, 1607 SourceLocation Loc2) override { 1608 PartialDiagnosticAt Warning(Loc1, 1609 S.PDiag(diag::warn_lock_exclusive_and_shared) 1610 << Kind << LockName); 1611 PartialDiagnosticAt Note(Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) 1612 << Kind << LockName); 1613 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1614 } 1615 1616 void handleNoMutexHeld(StringRef Kind, const NamedDecl *D, 1617 ProtectedOperationKind POK, AccessKind AK, 1618 SourceLocation Loc) override { 1619 assert((POK == POK_VarAccess || POK == POK_VarDereference) && 1620 "Only works for variables"); 1621 unsigned DiagID = POK == POK_VarAccess? 1622 diag::warn_variable_requires_any_lock: 1623 diag::warn_var_deref_requires_any_lock; 1624 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) 1625 << D->getNameAsString() << getLockKindFromAccessKind(AK)); 1626 Warnings.emplace_back(std::move(Warning), getNotes()); 1627 } 1628 1629 void handleMutexNotHeld(StringRef Kind, const NamedDecl *D, 1630 ProtectedOperationKind POK, Name LockName, 1631 LockKind LK, SourceLocation Loc, 1632 Name *PossibleMatch) override { 1633 unsigned DiagID = 0; 1634 if (PossibleMatch) { 1635 switch (POK) { 1636 case POK_VarAccess: 1637 DiagID = diag::warn_variable_requires_lock_precise; 1638 break; 1639 case POK_VarDereference: 1640 DiagID = diag::warn_var_deref_requires_lock_precise; 1641 break; 1642 case POK_FunctionCall: 1643 DiagID = diag::warn_fun_requires_lock_precise; 1644 break; 1645 case POK_PassByRef: 1646 DiagID = diag::warn_guarded_pass_by_reference; 1647 break; 1648 case POK_PtPassByRef: 1649 DiagID = diag::warn_pt_guarded_pass_by_reference; 1650 break; 1651 } 1652 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind 1653 << D->getNameAsString() 1654 << LockName << LK); 1655 PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match) 1656 << *PossibleMatch); 1657 if (Verbose && POK == POK_VarAccess) { 1658 PartialDiagnosticAt VNote(D->getLocation(), 1659 S.PDiag(diag::note_guarded_by_declared_here) 1660 << D->getNameAsString()); 1661 Warnings.emplace_back(std::move(Warning), getNotes(Note, VNote)); 1662 } else 1663 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1664 } else { 1665 switch (POK) { 1666 case POK_VarAccess: 1667 DiagID = diag::warn_variable_requires_lock; 1668 break; 1669 case POK_VarDereference: 1670 DiagID = diag::warn_var_deref_requires_lock; 1671 break; 1672 case POK_FunctionCall: 1673 DiagID = diag::warn_fun_requires_lock; 1674 break; 1675 case POK_PassByRef: 1676 DiagID = diag::warn_guarded_pass_by_reference; 1677 break; 1678 case POK_PtPassByRef: 1679 DiagID = diag::warn_pt_guarded_pass_by_reference; 1680 break; 1681 } 1682 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind 1683 << D->getNameAsString() 1684 << LockName << LK); 1685 if (Verbose && POK == POK_VarAccess) { 1686 PartialDiagnosticAt Note(D->getLocation(), 1687 S.PDiag(diag::note_guarded_by_declared_here) 1688 << D->getNameAsString()); 1689 Warnings.emplace_back(std::move(Warning), getNotes(Note)); 1690 } else 1691 Warnings.emplace_back(std::move(Warning), getNotes()); 1692 } 1693 } 1694 1695 void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg, 1696 SourceLocation Loc) override { 1697 PartialDiagnosticAt Warning(Loc, 1698 S.PDiag(diag::warn_acquire_requires_negative_cap) 1699 << Kind << LockName << Neg); 1700 Warnings.emplace_back(std::move(Warning), getNotes()); 1701 } 1702 1703 void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName, 1704 SourceLocation Loc) override { 1705 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_fun_excludes_mutex) 1706 << Kind << FunName << LockName); 1707 Warnings.emplace_back(std::move(Warning), getNotes()); 1708 } 1709 1710 void handleLockAcquiredBefore(StringRef Kind, Name L1Name, Name L2Name, 1711 SourceLocation Loc) override { 1712 PartialDiagnosticAt Warning(Loc, 1713 S.PDiag(diag::warn_acquired_before) << Kind << L1Name << L2Name); 1714 Warnings.emplace_back(std::move(Warning), getNotes()); 1715 } 1716 1717 void handleBeforeAfterCycle(Name L1Name, SourceLocation Loc) override { 1718 PartialDiagnosticAt Warning(Loc, 1719 S.PDiag(diag::warn_acquired_before_after_cycle) << L1Name); 1720 Warnings.emplace_back(std::move(Warning), getNotes()); 1721 } 1722 1723 void enterFunction(const FunctionDecl* FD) override { 1724 CurrentFunction = FD; 1725 } 1726 1727 void leaveFunction(const FunctionDecl* FD) override { 1728 CurrentFunction = nullptr; 1729 } 1730 }; 1731 } // anonymous namespace 1732 } // namespace threadSafety 1733 } // namespace clang 1734 1735 //===----------------------------------------------------------------------===// 1736 // -Wconsumed 1737 //===----------------------------------------------------------------------===// 1738 1739 namespace clang { 1740 namespace consumed { 1741 namespace { 1742 class ConsumedWarningsHandler : public ConsumedWarningsHandlerBase { 1743 1744 Sema &S; 1745 DiagList Warnings; 1746 1747 public: 1748 1749 ConsumedWarningsHandler(Sema &S) : S(S) {} 1750 1751 void emitDiagnostics() override { 1752 Warnings.sort(SortDiagBySourceLocation(S.getSourceManager())); 1753 for (const auto &Diag : Warnings) { 1754 S.Diag(Diag.first.first, Diag.first.second); 1755 for (const auto &Note : Diag.second) 1756 S.Diag(Note.first, Note.second); 1757 } 1758 } 1759 1760 void warnLoopStateMismatch(SourceLocation Loc, 1761 StringRef VariableName) override { 1762 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_loop_state_mismatch) << 1763 VariableName); 1764 1765 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1766 } 1767 1768 void warnParamReturnTypestateMismatch(SourceLocation Loc, 1769 StringRef VariableName, 1770 StringRef ExpectedState, 1771 StringRef ObservedState) override { 1772 1773 PartialDiagnosticAt Warning(Loc, S.PDiag( 1774 diag::warn_param_return_typestate_mismatch) << VariableName << 1775 ExpectedState << ObservedState); 1776 1777 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1778 } 1779 1780 void warnParamTypestateMismatch(SourceLocation Loc, StringRef ExpectedState, 1781 StringRef ObservedState) override { 1782 1783 PartialDiagnosticAt Warning(Loc, S.PDiag( 1784 diag::warn_param_typestate_mismatch) << ExpectedState << ObservedState); 1785 1786 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1787 } 1788 1789 void warnReturnTypestateForUnconsumableType(SourceLocation Loc, 1790 StringRef TypeName) override { 1791 PartialDiagnosticAt Warning(Loc, S.PDiag( 1792 diag::warn_return_typestate_for_unconsumable_type) << TypeName); 1793 1794 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1795 } 1796 1797 void warnReturnTypestateMismatch(SourceLocation Loc, StringRef ExpectedState, 1798 StringRef ObservedState) override { 1799 1800 PartialDiagnosticAt Warning(Loc, S.PDiag( 1801 diag::warn_return_typestate_mismatch) << ExpectedState << ObservedState); 1802 1803 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1804 } 1805 1806 void warnUseOfTempInInvalidState(StringRef MethodName, StringRef State, 1807 SourceLocation Loc) override { 1808 1809 PartialDiagnosticAt Warning(Loc, S.PDiag( 1810 diag::warn_use_of_temp_in_invalid_state) << MethodName << State); 1811 1812 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1813 } 1814 1815 void warnUseInInvalidState(StringRef MethodName, StringRef VariableName, 1816 StringRef State, SourceLocation Loc) override { 1817 1818 PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_use_in_invalid_state) << 1819 MethodName << VariableName << State); 1820 1821 Warnings.emplace_back(std::move(Warning), OptionalNotes()); 1822 } 1823 }; 1824 } // anonymous namespace 1825 } // namespace consumed 1826 } // namespace clang 1827 1828 //===----------------------------------------------------------------------===// 1829 // AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based 1830 // warnings on a function, method, or block. 1831 //===----------------------------------------------------------------------===// 1832 1833 clang::sema::AnalysisBasedWarnings::Policy::Policy() { 1834 enableCheckFallThrough = 1; 1835 enableCheckUnreachable = 0; 1836 enableThreadSafetyAnalysis = 0; 1837 enableConsumedAnalysis = 0; 1838 } 1839 1840 static unsigned isEnabled(DiagnosticsEngine &D, unsigned diag) { 1841 return (unsigned)!D.isIgnored(diag, SourceLocation()); 1842 } 1843 1844 clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s) 1845 : S(s), 1846 NumFunctionsAnalyzed(0), 1847 NumFunctionsWithBadCFGs(0), 1848 NumCFGBlocks(0), 1849 MaxCFGBlocksPerFunction(0), 1850 NumUninitAnalysisFunctions(0), 1851 NumUninitAnalysisVariables(0), 1852 MaxUninitAnalysisVariablesPerFunction(0), 1853 NumUninitAnalysisBlockVisits(0), 1854 MaxUninitAnalysisBlockVisitsPerFunction(0) { 1855 1856 using namespace diag; 1857 DiagnosticsEngine &D = S.getDiagnostics(); 1858 1859 DefaultPolicy.enableCheckUnreachable = 1860 isEnabled(D, warn_unreachable) || 1861 isEnabled(D, warn_unreachable_break) || 1862 isEnabled(D, warn_unreachable_return) || 1863 isEnabled(D, warn_unreachable_loop_increment); 1864 1865 DefaultPolicy.enableThreadSafetyAnalysis = 1866 isEnabled(D, warn_double_lock); 1867 1868 DefaultPolicy.enableConsumedAnalysis = 1869 isEnabled(D, warn_use_in_invalid_state); 1870 } 1871 1872 static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) { 1873 for (const auto &D : fscope->PossiblyUnreachableDiags) 1874 S.Diag(D.Loc, D.PD); 1875 } 1876 1877 void clang::sema:: 1878 AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P, 1879 sema::FunctionScopeInfo *fscope, 1880 const Decl *D, const BlockExpr *blkExpr) { 1881 1882 // We avoid doing analysis-based warnings when there are errors for 1883 // two reasons: 1884 // (1) The CFGs often can't be constructed (if the body is invalid), so 1885 // don't bother trying. 1886 // (2) The code already has problems; running the analysis just takes more 1887 // time. 1888 DiagnosticsEngine &Diags = S.getDiagnostics(); 1889 1890 // Do not do any analysis for declarations in system headers if we are 1891 // going to just ignore them. 1892 if (Diags.getSuppressSystemWarnings() && 1893 S.SourceMgr.isInSystemHeader(D->getLocation())) 1894 return; 1895 1896 // For code in dependent contexts, we'll do this at instantiation time. 1897 if (cast<DeclContext>(D)->isDependentContext()) 1898 return; 1899 1900 if (Diags.hasUncompilableErrorOccurred()) { 1901 // Flush out any possibly unreachable diagnostics. 1902 flushDiagnostics(S, fscope); 1903 return; 1904 } 1905 1906 const Stmt *Body = D->getBody(); 1907 assert(Body); 1908 1909 // Construct the analysis context with the specified CFG build options. 1910 AnalysisDeclContext AC(/* AnalysisDeclContextManager */ nullptr, D); 1911 1912 // Don't generate EH edges for CallExprs as we'd like to avoid the n^2 1913 // explosion for destructors that can result and the compile time hit. 1914 AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true; 1915 AC.getCFGBuildOptions().AddEHEdges = false; 1916 AC.getCFGBuildOptions().AddInitializers = true; 1917 AC.getCFGBuildOptions().AddImplicitDtors = true; 1918 AC.getCFGBuildOptions().AddTemporaryDtors = true; 1919 AC.getCFGBuildOptions().AddCXXNewAllocator = false; 1920 AC.getCFGBuildOptions().AddCXXDefaultInitExprInCtors = true; 1921 1922 // Force that certain expressions appear as CFGElements in the CFG. This 1923 // is used to speed up various analyses. 1924 // FIXME: This isn't the right factoring. This is here for initial 1925 // prototyping, but we need a way for analyses to say what expressions they 1926 // expect to always be CFGElements and then fill in the BuildOptions 1927 // appropriately. This is essentially a layering violation. 1928 if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis || 1929 P.enableConsumedAnalysis) { 1930 // Unreachable code analysis and thread safety require a linearized CFG. 1931 AC.getCFGBuildOptions().setAllAlwaysAdd(); 1932 } 1933 else { 1934 AC.getCFGBuildOptions() 1935 .setAlwaysAdd(Stmt::BinaryOperatorClass) 1936 .setAlwaysAdd(Stmt::CompoundAssignOperatorClass) 1937 .setAlwaysAdd(Stmt::BlockExprClass) 1938 .setAlwaysAdd(Stmt::CStyleCastExprClass) 1939 .setAlwaysAdd(Stmt::DeclRefExprClass) 1940 .setAlwaysAdd(Stmt::ImplicitCastExprClass) 1941 .setAlwaysAdd(Stmt::UnaryOperatorClass) 1942 .setAlwaysAdd(Stmt::AttributedStmtClass); 1943 } 1944 1945 // Install the logical handler for -Wtautological-overlap-compare 1946 std::unique_ptr<LogicalErrorHandler> LEH; 1947 if (!Diags.isIgnored(diag::warn_tautological_overlap_comparison, 1948 D->getLocStart())) { 1949 LEH.reset(new LogicalErrorHandler(S)); 1950 AC.getCFGBuildOptions().Observer = LEH.get(); 1951 } 1952 1953 // Emit delayed diagnostics. 1954 if (!fscope->PossiblyUnreachableDiags.empty()) { 1955 bool analyzed = false; 1956 1957 // Register the expressions with the CFGBuilder. 1958 for (const auto &D : fscope->PossiblyUnreachableDiags) { 1959 if (D.stmt) 1960 AC.registerForcedBlockExpression(D.stmt); 1961 } 1962 1963 if (AC.getCFG()) { 1964 analyzed = true; 1965 for (const auto &D : fscope->PossiblyUnreachableDiags) { 1966 bool processed = false; 1967 if (D.stmt) { 1968 const CFGBlock *block = AC.getBlockForRegisteredExpression(D.stmt); 1969 CFGReverseBlockReachabilityAnalysis *cra = 1970 AC.getCFGReachablityAnalysis(); 1971 // FIXME: We should be able to assert that block is non-null, but 1972 // the CFG analysis can skip potentially-evaluated expressions in 1973 // edge cases; see test/Sema/vla-2.c. 1974 if (block && cra) { 1975 // Can this block be reached from the entrance? 1976 if (cra->isReachable(&AC.getCFG()->getEntry(), block)) 1977 S.Diag(D.Loc, D.PD); 1978 processed = true; 1979 } 1980 } 1981 if (!processed) { 1982 // Emit the warning anyway if we cannot map to a basic block. 1983 S.Diag(D.Loc, D.PD); 1984 } 1985 } 1986 } 1987 1988 if (!analyzed) 1989 flushDiagnostics(S, fscope); 1990 } 1991 1992 // Warning: check missing 'return' 1993 if (P.enableCheckFallThrough) { 1994 const CheckFallThroughDiagnostics &CD = 1995 (isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock() 1996 : (isa<CXXMethodDecl>(D) && 1997 cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call && 1998 cast<CXXMethodDecl>(D)->getParent()->isLambda()) 1999 ? CheckFallThroughDiagnostics::MakeForLambda() 2000 : CheckFallThroughDiagnostics::MakeForFunction(D)); 2001 CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC); 2002 } 2003 2004 // Warning: check for unreachable code 2005 if (P.enableCheckUnreachable) { 2006 // Only check for unreachable code on non-template instantiations. 2007 // Different template instantiations can effectively change the control-flow 2008 // and it is very difficult to prove that a snippet of code in a template 2009 // is unreachable for all instantiations. 2010 bool isTemplateInstantiation = false; 2011 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) 2012 isTemplateInstantiation = Function->isTemplateInstantiation(); 2013 if (!isTemplateInstantiation) 2014 CheckUnreachable(S, AC); 2015 } 2016 2017 // Check for thread safety violations 2018 if (P.enableThreadSafetyAnalysis) { 2019 SourceLocation FL = AC.getDecl()->getLocation(); 2020 SourceLocation FEL = AC.getDecl()->getLocEnd(); 2021 threadSafety::ThreadSafetyReporter Reporter(S, FL, FEL); 2022 if (!Diags.isIgnored(diag::warn_thread_safety_beta, D->getLocStart())) 2023 Reporter.setIssueBetaWarnings(true); 2024 if (!Diags.isIgnored(diag::warn_thread_safety_verbose, D->getLocStart())) 2025 Reporter.setVerbose(true); 2026 2027 threadSafety::runThreadSafetyAnalysis(AC, Reporter, 2028 &S.ThreadSafetyDeclCache); 2029 Reporter.emitDiagnostics(); 2030 } 2031 2032 // Check for violations of consumed properties. 2033 if (P.enableConsumedAnalysis) { 2034 consumed::ConsumedWarningsHandler WarningHandler(S); 2035 consumed::ConsumedAnalyzer Analyzer(WarningHandler); 2036 Analyzer.run(AC); 2037 } 2038 2039 if (!Diags.isIgnored(diag::warn_uninit_var, D->getLocStart()) || 2040 !Diags.isIgnored(diag::warn_sometimes_uninit_var, D->getLocStart()) || 2041 !Diags.isIgnored(diag::warn_maybe_uninit_var, D->getLocStart())) { 2042 if (CFG *cfg = AC.getCFG()) { 2043 UninitValsDiagReporter reporter(S); 2044 UninitVariablesAnalysisStats stats; 2045 std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats)); 2046 runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC, 2047 reporter, stats); 2048 2049 if (S.CollectStats && stats.NumVariablesAnalyzed > 0) { 2050 ++NumUninitAnalysisFunctions; 2051 NumUninitAnalysisVariables += stats.NumVariablesAnalyzed; 2052 NumUninitAnalysisBlockVisits += stats.NumBlockVisits; 2053 MaxUninitAnalysisVariablesPerFunction = 2054 std::max(MaxUninitAnalysisVariablesPerFunction, 2055 stats.NumVariablesAnalyzed); 2056 MaxUninitAnalysisBlockVisitsPerFunction = 2057 std::max(MaxUninitAnalysisBlockVisitsPerFunction, 2058 stats.NumBlockVisits); 2059 } 2060 } 2061 } 2062 2063 bool FallThroughDiagFull = 2064 !Diags.isIgnored(diag::warn_unannotated_fallthrough, D->getLocStart()); 2065 bool FallThroughDiagPerFunction = !Diags.isIgnored( 2066 diag::warn_unannotated_fallthrough_per_function, D->getLocStart()); 2067 if (FallThroughDiagFull || FallThroughDiagPerFunction || 2068 fscope->HasFallthroughStmt) { 2069 DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull); 2070 } 2071 2072 if (S.getLangOpts().ObjCWeak && 2073 !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, D->getLocStart())) 2074 diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap()); 2075 2076 2077 // Check for infinite self-recursion in functions 2078 if (!Diags.isIgnored(diag::warn_infinite_recursive_function, 2079 D->getLocStart())) { 2080 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 2081 checkRecursiveFunction(S, FD, Body, AC); 2082 } 2083 } 2084 2085 // If none of the previous checks caused a CFG build, trigger one here 2086 // for -Wtautological-overlap-compare 2087 if (!Diags.isIgnored(diag::warn_tautological_overlap_comparison, 2088 D->getLocStart())) { 2089 AC.getCFG(); 2090 } 2091 2092 // Collect statistics about the CFG if it was built. 2093 if (S.CollectStats && AC.isCFGBuilt()) { 2094 ++NumFunctionsAnalyzed; 2095 if (CFG *cfg = AC.getCFG()) { 2096 // If we successfully built a CFG for this context, record some more 2097 // detail information about it. 2098 NumCFGBlocks += cfg->getNumBlockIDs(); 2099 MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction, 2100 cfg->getNumBlockIDs()); 2101 } else { 2102 ++NumFunctionsWithBadCFGs; 2103 } 2104 } 2105 } 2106 2107 void clang::sema::AnalysisBasedWarnings::PrintStats() const { 2108 llvm::errs() << "\n*** Analysis Based Warnings Stats:\n"; 2109 2110 unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs; 2111 unsigned AvgCFGBlocksPerFunction = 2112 !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt; 2113 llvm::errs() << NumFunctionsAnalyzed << " functions analyzed (" 2114 << NumFunctionsWithBadCFGs << " w/o CFGs).\n" 2115 << " " << NumCFGBlocks << " CFG blocks built.\n" 2116 << " " << AvgCFGBlocksPerFunction 2117 << " average CFG blocks per function.\n" 2118 << " " << MaxCFGBlocksPerFunction 2119 << " max CFG blocks per function.\n"; 2120 2121 unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0 2122 : NumUninitAnalysisVariables/NumUninitAnalysisFunctions; 2123 unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0 2124 : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions; 2125 llvm::errs() << NumUninitAnalysisFunctions 2126 << " functions analyzed for uninitialiazed variables\n" 2127 << " " << NumUninitAnalysisVariables << " variables analyzed.\n" 2128 << " " << AvgUninitVariablesPerFunction 2129 << " average variables per function.\n" 2130 << " " << MaxUninitAnalysisVariablesPerFunction 2131 << " max variables per function.\n" 2132 << " " << NumUninitAnalysisBlockVisits << " block visits.\n" 2133 << " " << AvgUninitBlockVisitsPerFunction 2134 << " average block visits per function.\n" 2135 << " " << MaxUninitAnalysisBlockVisitsPerFunction 2136 << " max block visits per function.\n"; 2137 } 2138