1 //===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // A intra-procedural analysis for thread safety (e.g. deadlocks and race 11 // conditions), based off of an annotation system. 12 // 13 // See http://clang.llvm.org/docs/LanguageExtensions.html#thread-safety-annotation-checking 14 // for more information. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "clang/Analysis/Analyses/ThreadSafety.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclCXX.h" 21 #include "clang/AST/ExprCXX.h" 22 #include "clang/AST/StmtCXX.h" 23 #include "clang/AST/StmtVisitor.h" 24 #include "clang/Analysis/Analyses/PostOrderCFGView.h" 25 #include "clang/Analysis/AnalysisContext.h" 26 #include "clang/Analysis/CFG.h" 27 #include "clang/Analysis/CFGStmtMap.h" 28 #include "clang/Basic/OperatorKinds.h" 29 #include "clang/Basic/SourceLocation.h" 30 #include "clang/Basic/SourceManager.h" 31 #include "llvm/ADT/BitVector.h" 32 #include "llvm/ADT/FoldingSet.h" 33 #include "llvm/ADT/ImmutableMap.h" 34 #include "llvm/ADT/PostOrderIterator.h" 35 #include "llvm/ADT/SmallVector.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include <algorithm> 39 #include <utility> 40 #include <vector> 41 42 using namespace clang; 43 using namespace thread_safety; 44 45 // Key method definition 46 ThreadSafetyHandler::~ThreadSafetyHandler() {} 47 48 namespace { 49 50 /// SExpr implements a simple expression language that is used to store, 51 /// compare, and pretty-print C++ expressions. Unlike a clang Expr, a SExpr 52 /// does not capture surface syntax, and it does not distinguish between 53 /// C++ concepts, like pointers and references, that have no real semantic 54 /// differences. This simplicity allows SExprs to be meaningfully compared, 55 /// e.g. 56 /// (x) = x 57 /// (*this).foo = this->foo 58 /// *&a = a 59 /// 60 /// Thread-safety analysis works by comparing lock expressions. Within the 61 /// body of a function, an expression such as "x->foo->bar.mu" will resolve to 62 /// a particular mutex object at run-time. Subsequent occurrences of the same 63 /// expression (where "same" means syntactic equality) will refer to the same 64 /// run-time object if three conditions hold: 65 /// (1) Local variables in the expression, such as "x" have not changed. 66 /// (2) Values on the heap that affect the expression have not changed. 67 /// (3) The expression involves only pure function calls. 68 /// 69 /// The current implementation assumes, but does not verify, that multiple uses 70 /// of the same lock expression satisfies these criteria. 71 class SExpr { 72 private: 73 enum ExprOp { 74 EOP_Nop, ///< No-op 75 EOP_Wildcard, ///< Matches anything. 76 EOP_Universal, ///< Universal lock. 77 EOP_This, ///< This keyword. 78 EOP_NVar, ///< Named variable. 79 EOP_LVar, ///< Local variable. 80 EOP_Dot, ///< Field access 81 EOP_Call, ///< Function call 82 EOP_MCall, ///< Method call 83 EOP_Index, ///< Array index 84 EOP_Unary, ///< Unary operation 85 EOP_Binary, ///< Binary operation 86 EOP_Unknown ///< Catchall for everything else 87 }; 88 89 90 class SExprNode { 91 private: 92 unsigned char Op; ///< Opcode of the root node 93 unsigned char Flags; ///< Additional opcode-specific data 94 unsigned short Sz; ///< Number of child nodes 95 const void* Data; ///< Additional opcode-specific data 96 97 public: 98 SExprNode(ExprOp O, unsigned F, const void* D) 99 : Op(static_cast<unsigned char>(O)), 100 Flags(static_cast<unsigned char>(F)), Sz(1), Data(D) 101 { } 102 103 unsigned size() const { return Sz; } 104 void setSize(unsigned S) { Sz = S; } 105 106 ExprOp kind() const { return static_cast<ExprOp>(Op); } 107 108 const NamedDecl* getNamedDecl() const { 109 assert(Op == EOP_NVar || Op == EOP_LVar || Op == EOP_Dot); 110 return reinterpret_cast<const NamedDecl*>(Data); 111 } 112 113 const NamedDecl* getFunctionDecl() const { 114 assert(Op == EOP_Call || Op == EOP_MCall); 115 return reinterpret_cast<const NamedDecl*>(Data); 116 } 117 118 bool isArrow() const { return Op == EOP_Dot && Flags == 1; } 119 void setArrow(bool A) { Flags = A ? 1 : 0; } 120 121 unsigned arity() const { 122 switch (Op) { 123 case EOP_Nop: return 0; 124 case EOP_Wildcard: return 0; 125 case EOP_Universal: return 0; 126 case EOP_NVar: return 0; 127 case EOP_LVar: return 0; 128 case EOP_This: return 0; 129 case EOP_Dot: return 1; 130 case EOP_Call: return Flags+1; // First arg is function. 131 case EOP_MCall: return Flags+1; // First arg is implicit obj. 132 case EOP_Index: return 2; 133 case EOP_Unary: return 1; 134 case EOP_Binary: return 2; 135 case EOP_Unknown: return Flags; 136 } 137 return 0; 138 } 139 140 bool operator==(const SExprNode& Other) const { 141 // Ignore flags and size -- they don't matter. 142 return (Op == Other.Op && 143 Data == Other.Data); 144 } 145 146 bool operator!=(const SExprNode& Other) const { 147 return !(*this == Other); 148 } 149 150 bool matches(const SExprNode& Other) const { 151 return (*this == Other) || 152 (Op == EOP_Wildcard) || 153 (Other.Op == EOP_Wildcard); 154 } 155 }; 156 157 158 /// \brief Encapsulates the lexical context of a function call. The lexical 159 /// context includes the arguments to the call, including the implicit object 160 /// argument. When an attribute containing a mutex expression is attached to 161 /// a method, the expression may refer to formal parameters of the method. 162 /// Actual arguments must be substituted for formal parameters to derive 163 /// the appropriate mutex expression in the lexical context where the function 164 /// is called. PrevCtx holds the context in which the arguments themselves 165 /// should be evaluated; multiple calling contexts can be chained together 166 /// by the lock_returned attribute. 167 struct CallingContext { 168 const NamedDecl* AttrDecl; // The decl to which the attribute is attached. 169 const Expr* SelfArg; // Implicit object argument -- e.g. 'this' 170 bool SelfArrow; // is Self referred to with -> or .? 171 unsigned NumArgs; // Number of funArgs 172 const Expr* const* FunArgs; // Function arguments 173 CallingContext* PrevCtx; // The previous context; or 0 if none. 174 175 CallingContext(const NamedDecl *D = 0, const Expr *S = 0, 176 unsigned N = 0, const Expr* const *A = 0, 177 CallingContext *P = 0) 178 : AttrDecl(D), SelfArg(S), SelfArrow(false), 179 NumArgs(N), FunArgs(A), PrevCtx(P) 180 { } 181 }; 182 183 typedef SmallVector<SExprNode, 4> NodeVector; 184 185 private: 186 // A SExpr is a list of SExprNodes in prefix order. The Size field allows 187 // the list to be traversed as a tree. 188 NodeVector NodeVec; 189 190 private: 191 unsigned makeNop() { 192 NodeVec.push_back(SExprNode(EOP_Nop, 0, 0)); 193 return NodeVec.size()-1; 194 } 195 196 unsigned makeWildcard() { 197 NodeVec.push_back(SExprNode(EOP_Wildcard, 0, 0)); 198 return NodeVec.size()-1; 199 } 200 201 unsigned makeUniversal() { 202 NodeVec.push_back(SExprNode(EOP_Universal, 0, 0)); 203 return NodeVec.size()-1; 204 } 205 206 unsigned makeNamedVar(const NamedDecl *D) { 207 NodeVec.push_back(SExprNode(EOP_NVar, 0, D)); 208 return NodeVec.size()-1; 209 } 210 211 unsigned makeLocalVar(const NamedDecl *D) { 212 NodeVec.push_back(SExprNode(EOP_LVar, 0, D)); 213 return NodeVec.size()-1; 214 } 215 216 unsigned makeThis() { 217 NodeVec.push_back(SExprNode(EOP_This, 0, 0)); 218 return NodeVec.size()-1; 219 } 220 221 unsigned makeDot(const NamedDecl *D, bool Arrow) { 222 NodeVec.push_back(SExprNode(EOP_Dot, Arrow ? 1 : 0, D)); 223 return NodeVec.size()-1; 224 } 225 226 unsigned makeCall(unsigned NumArgs, const NamedDecl *D) { 227 NodeVec.push_back(SExprNode(EOP_Call, NumArgs, D)); 228 return NodeVec.size()-1; 229 } 230 231 // Grab the very first declaration of virtual method D 232 const CXXMethodDecl* getFirstVirtualDecl(const CXXMethodDecl *D) { 233 while (true) { 234 D = D->getCanonicalDecl(); 235 CXXMethodDecl::method_iterator I = D->begin_overridden_methods(), 236 E = D->end_overridden_methods(); 237 if (I == E) 238 return D; // Method does not override anything 239 D = *I; // FIXME: this does not work with multiple inheritance. 240 } 241 return 0; 242 } 243 244 unsigned makeMCall(unsigned NumArgs, const CXXMethodDecl *D) { 245 NodeVec.push_back(SExprNode(EOP_MCall, NumArgs, getFirstVirtualDecl(D))); 246 return NodeVec.size()-1; 247 } 248 249 unsigned makeIndex() { 250 NodeVec.push_back(SExprNode(EOP_Index, 0, 0)); 251 return NodeVec.size()-1; 252 } 253 254 unsigned makeUnary() { 255 NodeVec.push_back(SExprNode(EOP_Unary, 0, 0)); 256 return NodeVec.size()-1; 257 } 258 259 unsigned makeBinary() { 260 NodeVec.push_back(SExprNode(EOP_Binary, 0, 0)); 261 return NodeVec.size()-1; 262 } 263 264 unsigned makeUnknown(unsigned Arity) { 265 NodeVec.push_back(SExprNode(EOP_Unknown, Arity, 0)); 266 return NodeVec.size()-1; 267 } 268 269 /// Build an SExpr from the given C++ expression. 270 /// Recursive function that terminates on DeclRefExpr. 271 /// Note: this function merely creates a SExpr; it does not check to 272 /// ensure that the original expression is a valid mutex expression. 273 /// 274 /// NDeref returns the number of Derefence and AddressOf operations 275 /// preceeding the Expr; this is used to decide whether to pretty-print 276 /// SExprs with . or ->. 277 unsigned buildSExpr(const Expr *Exp, CallingContext* CallCtx, 278 int* NDeref = 0) { 279 if (!Exp) 280 return 0; 281 282 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) { 283 const NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl()); 284 const ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(ND); 285 if (PV) { 286 const FunctionDecl *FD = 287 cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl(); 288 unsigned i = PV->getFunctionScopeIndex(); 289 290 if (CallCtx && CallCtx->FunArgs && 291 FD == CallCtx->AttrDecl->getCanonicalDecl()) { 292 // Substitute call arguments for references to function parameters 293 assert(i < CallCtx->NumArgs); 294 return buildSExpr(CallCtx->FunArgs[i], CallCtx->PrevCtx, NDeref); 295 } 296 // Map the param back to the param of the original function declaration. 297 makeNamedVar(FD->getParamDecl(i)); 298 return 1; 299 } 300 // Not a function parameter -- just store the reference. 301 makeNamedVar(ND); 302 return 1; 303 } else if (isa<CXXThisExpr>(Exp)) { 304 // Substitute parent for 'this' 305 if (CallCtx && CallCtx->SelfArg) { 306 if (!CallCtx->SelfArrow && NDeref) 307 // 'this' is a pointer, but self is not, so need to take address. 308 --(*NDeref); 309 return buildSExpr(CallCtx->SelfArg, CallCtx->PrevCtx, NDeref); 310 } 311 else { 312 makeThis(); 313 return 1; 314 } 315 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) { 316 const NamedDecl *ND = ME->getMemberDecl(); 317 int ImplicitDeref = ME->isArrow() ? 1 : 0; 318 unsigned Root = makeDot(ND, false); 319 unsigned Sz = buildSExpr(ME->getBase(), CallCtx, &ImplicitDeref); 320 NodeVec[Root].setArrow(ImplicitDeref > 0); 321 NodeVec[Root].setSize(Sz + 1); 322 return Sz + 1; 323 } else if (const CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(Exp)) { 324 // When calling a function with a lock_returned attribute, replace 325 // the function call with the expression in lock_returned. 326 const CXXMethodDecl* MD = 327 cast<CXXMethodDecl>(CMCE->getMethodDecl()->getMostRecentDecl()); 328 if (LockReturnedAttr* At = MD->getAttr<LockReturnedAttr>()) { 329 CallingContext LRCallCtx(CMCE->getMethodDecl()); 330 LRCallCtx.SelfArg = CMCE->getImplicitObjectArgument(); 331 LRCallCtx.SelfArrow = 332 dyn_cast<MemberExpr>(CMCE->getCallee())->isArrow(); 333 LRCallCtx.NumArgs = CMCE->getNumArgs(); 334 LRCallCtx.FunArgs = CMCE->getArgs(); 335 LRCallCtx.PrevCtx = CallCtx; 336 return buildSExpr(At->getArg(), &LRCallCtx); 337 } 338 // Hack to treat smart pointers and iterators as pointers; 339 // ignore any method named get(). 340 if (CMCE->getMethodDecl()->getNameAsString() == "get" && 341 CMCE->getNumArgs() == 0) { 342 if (NDeref && dyn_cast<MemberExpr>(CMCE->getCallee())->isArrow()) 343 ++(*NDeref); 344 return buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx, NDeref); 345 } 346 unsigned NumCallArgs = CMCE->getNumArgs(); 347 unsigned Root = makeMCall(NumCallArgs, CMCE->getMethodDecl()); 348 unsigned Sz = buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx); 349 const Expr* const* CallArgs = CMCE->getArgs(); 350 for (unsigned i = 0; i < NumCallArgs; ++i) { 351 Sz += buildSExpr(CallArgs[i], CallCtx); 352 } 353 NodeVec[Root].setSize(Sz + 1); 354 return Sz + 1; 355 } else if (const CallExpr *CE = dyn_cast<CallExpr>(Exp)) { 356 const FunctionDecl* FD = 357 cast<FunctionDecl>(CE->getDirectCallee()->getMostRecentDecl()); 358 if (LockReturnedAttr* At = FD->getAttr<LockReturnedAttr>()) { 359 CallingContext LRCallCtx(CE->getDirectCallee()); 360 LRCallCtx.NumArgs = CE->getNumArgs(); 361 LRCallCtx.FunArgs = CE->getArgs(); 362 LRCallCtx.PrevCtx = CallCtx; 363 return buildSExpr(At->getArg(), &LRCallCtx); 364 } 365 // Treat smart pointers and iterators as pointers; 366 // ignore the * and -> operators. 367 if (const CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(CE)) { 368 OverloadedOperatorKind k = OE->getOperator(); 369 if (k == OO_Star) { 370 if (NDeref) ++(*NDeref); 371 return buildSExpr(OE->getArg(0), CallCtx, NDeref); 372 } 373 else if (k == OO_Arrow) { 374 return buildSExpr(OE->getArg(0), CallCtx, NDeref); 375 } 376 } 377 unsigned NumCallArgs = CE->getNumArgs(); 378 unsigned Root = makeCall(NumCallArgs, 0); 379 unsigned Sz = buildSExpr(CE->getCallee(), CallCtx); 380 const Expr* const* CallArgs = CE->getArgs(); 381 for (unsigned i = 0; i < NumCallArgs; ++i) { 382 Sz += buildSExpr(CallArgs[i], CallCtx); 383 } 384 NodeVec[Root].setSize(Sz+1); 385 return Sz+1; 386 } else if (const BinaryOperator *BOE = dyn_cast<BinaryOperator>(Exp)) { 387 unsigned Root = makeBinary(); 388 unsigned Sz = buildSExpr(BOE->getLHS(), CallCtx); 389 Sz += buildSExpr(BOE->getRHS(), CallCtx); 390 NodeVec[Root].setSize(Sz); 391 return Sz; 392 } else if (const UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp)) { 393 // Ignore & and * operators -- they're no-ops. 394 // However, we try to figure out whether the expression is a pointer, 395 // so we can use . and -> appropriately in error messages. 396 if (UOE->getOpcode() == UO_Deref) { 397 if (NDeref) ++(*NDeref); 398 return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref); 399 } 400 if (UOE->getOpcode() == UO_AddrOf) { 401 if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UOE->getSubExpr())) { 402 if (DRE->getDecl()->isCXXInstanceMember()) { 403 // This is a pointer-to-member expression, e.g. &MyClass::mu_. 404 // We interpret this syntax specially, as a wildcard. 405 unsigned Root = makeDot(DRE->getDecl(), false); 406 makeWildcard(); 407 NodeVec[Root].setSize(2); 408 return 2; 409 } 410 } 411 if (NDeref) --(*NDeref); 412 return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref); 413 } 414 unsigned Root = makeUnary(); 415 unsigned Sz = buildSExpr(UOE->getSubExpr(), CallCtx); 416 NodeVec[Root].setSize(Sz); 417 return Sz; 418 } else if (const ArraySubscriptExpr *ASE = 419 dyn_cast<ArraySubscriptExpr>(Exp)) { 420 unsigned Root = makeIndex(); 421 unsigned Sz = buildSExpr(ASE->getBase(), CallCtx); 422 Sz += buildSExpr(ASE->getIdx(), CallCtx); 423 NodeVec[Root].setSize(Sz); 424 return Sz; 425 } else if (const AbstractConditionalOperator *CE = 426 dyn_cast<AbstractConditionalOperator>(Exp)) { 427 unsigned Root = makeUnknown(3); 428 unsigned Sz = buildSExpr(CE->getCond(), CallCtx); 429 Sz += buildSExpr(CE->getTrueExpr(), CallCtx); 430 Sz += buildSExpr(CE->getFalseExpr(), CallCtx); 431 NodeVec[Root].setSize(Sz); 432 return Sz; 433 } else if (const ChooseExpr *CE = dyn_cast<ChooseExpr>(Exp)) { 434 unsigned Root = makeUnknown(3); 435 unsigned Sz = buildSExpr(CE->getCond(), CallCtx); 436 Sz += buildSExpr(CE->getLHS(), CallCtx); 437 Sz += buildSExpr(CE->getRHS(), CallCtx); 438 NodeVec[Root].setSize(Sz); 439 return Sz; 440 } else if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) { 441 return buildSExpr(CE->getSubExpr(), CallCtx, NDeref); 442 } else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) { 443 return buildSExpr(PE->getSubExpr(), CallCtx, NDeref); 444 } else if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Exp)) { 445 return buildSExpr(EWC->getSubExpr(), CallCtx, NDeref); 446 } else if (const CXXBindTemporaryExpr *E = dyn_cast<CXXBindTemporaryExpr>(Exp)) { 447 return buildSExpr(E->getSubExpr(), CallCtx, NDeref); 448 } else if (isa<CharacterLiteral>(Exp) || 449 isa<CXXNullPtrLiteralExpr>(Exp) || 450 isa<GNUNullExpr>(Exp) || 451 isa<CXXBoolLiteralExpr>(Exp) || 452 isa<FloatingLiteral>(Exp) || 453 isa<ImaginaryLiteral>(Exp) || 454 isa<IntegerLiteral>(Exp) || 455 isa<StringLiteral>(Exp) || 456 isa<ObjCStringLiteral>(Exp)) { 457 makeNop(); 458 return 1; // FIXME: Ignore literals for now 459 } else { 460 makeNop(); 461 return 1; // Ignore. FIXME: mark as invalid expression? 462 } 463 } 464 465 /// \brief Construct a SExpr from an expression. 466 /// \param MutexExp The original mutex expression within an attribute 467 /// \param DeclExp An expression involving the Decl on which the attribute 468 /// occurs. 469 /// \param D The declaration to which the lock/unlock attribute is attached. 470 void buildSExprFromExpr(const Expr *MutexExp, const Expr *DeclExp, 471 const NamedDecl *D, VarDecl *SelfDecl = 0) { 472 CallingContext CallCtx(D); 473 474 if (MutexExp) { 475 if (const StringLiteral* SLit = dyn_cast<StringLiteral>(MutexExp)) { 476 if (SLit->getString() == StringRef("*")) 477 // The "*" expr is a universal lock, which essentially turns off 478 // checks until it is removed from the lockset. 479 makeUniversal(); 480 else 481 // Ignore other string literals for now. 482 makeNop(); 483 return; 484 } 485 } 486 487 // If we are processing a raw attribute expression, with no substitutions. 488 if (DeclExp == 0) { 489 buildSExpr(MutexExp, 0); 490 return; 491 } 492 493 // Examine DeclExp to find SelfArg and FunArgs, which are used to substitute 494 // for formal parameters when we call buildMutexID later. 495 if (const MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) { 496 CallCtx.SelfArg = ME->getBase(); 497 CallCtx.SelfArrow = ME->isArrow(); 498 } else if (const CXXMemberCallExpr *CE = 499 dyn_cast<CXXMemberCallExpr>(DeclExp)) { 500 CallCtx.SelfArg = CE->getImplicitObjectArgument(); 501 CallCtx.SelfArrow = dyn_cast<MemberExpr>(CE->getCallee())->isArrow(); 502 CallCtx.NumArgs = CE->getNumArgs(); 503 CallCtx.FunArgs = CE->getArgs(); 504 } else if (const CallExpr *CE = 505 dyn_cast<CallExpr>(DeclExp)) { 506 CallCtx.NumArgs = CE->getNumArgs(); 507 CallCtx.FunArgs = CE->getArgs(); 508 } else if (const CXXConstructExpr *CE = 509 dyn_cast<CXXConstructExpr>(DeclExp)) { 510 CallCtx.SelfArg = 0; // Will be set below 511 CallCtx.NumArgs = CE->getNumArgs(); 512 CallCtx.FunArgs = CE->getArgs(); 513 } else if (D && isa<CXXDestructorDecl>(D)) { 514 // There's no such thing as a "destructor call" in the AST. 515 CallCtx.SelfArg = DeclExp; 516 } 517 518 // Hack to handle constructors, where self cannot be recovered from 519 // the expression. 520 if (SelfDecl && !CallCtx.SelfArg) { 521 DeclRefExpr SelfDRE(SelfDecl, false, SelfDecl->getType(), VK_LValue, 522 SelfDecl->getLocation()); 523 CallCtx.SelfArg = &SelfDRE; 524 525 // If the attribute has no arguments, then assume the argument is "this". 526 if (MutexExp == 0) 527 buildSExpr(CallCtx.SelfArg, 0); 528 else // For most attributes. 529 buildSExpr(MutexExp, &CallCtx); 530 return; 531 } 532 533 // If the attribute has no arguments, then assume the argument is "this". 534 if (MutexExp == 0) 535 buildSExpr(CallCtx.SelfArg, 0); 536 else // For most attributes. 537 buildSExpr(MutexExp, &CallCtx); 538 } 539 540 /// \brief Get index of next sibling of node i. 541 unsigned getNextSibling(unsigned i) const { 542 return i + NodeVec[i].size(); 543 } 544 545 public: 546 explicit SExpr(clang::Decl::EmptyShell e) { NodeVec.clear(); } 547 548 /// \param MutexExp The original mutex expression within an attribute 549 /// \param DeclExp An expression involving the Decl on which the attribute 550 /// occurs. 551 /// \param D The declaration to which the lock/unlock attribute is attached. 552 /// Caller must check isValid() after construction. 553 SExpr(const Expr* MutexExp, const Expr *DeclExp, const NamedDecl* D, 554 VarDecl *SelfDecl=0) { 555 buildSExprFromExpr(MutexExp, DeclExp, D, SelfDecl); 556 } 557 558 /// Return true if this is a valid decl sequence. 559 /// Caller must call this by hand after construction to handle errors. 560 bool isValid() const { 561 return !NodeVec.empty(); 562 } 563 564 bool shouldIgnore() const { 565 // Nop is a mutex that we have decided to deliberately ignore. 566 assert(NodeVec.size() > 0 && "Invalid Mutex"); 567 return NodeVec[0].kind() == EOP_Nop; 568 } 569 570 bool isUniversal() const { 571 assert(NodeVec.size() > 0 && "Invalid Mutex"); 572 return NodeVec[0].kind() == EOP_Universal; 573 } 574 575 /// Issue a warning about an invalid lock expression 576 static void warnInvalidLock(ThreadSafetyHandler &Handler, 577 const Expr *MutexExp, 578 const Expr *DeclExp, const NamedDecl* D) { 579 SourceLocation Loc; 580 if (DeclExp) 581 Loc = DeclExp->getExprLoc(); 582 583 // FIXME: add a note about the attribute location in MutexExp or D 584 if (Loc.isValid()) 585 Handler.handleInvalidLockExp(Loc); 586 } 587 588 bool operator==(const SExpr &other) const { 589 return NodeVec == other.NodeVec; 590 } 591 592 bool operator!=(const SExpr &other) const { 593 return !(*this == other); 594 } 595 596 bool matches(const SExpr &Other, unsigned i = 0, unsigned j = 0) const { 597 if (NodeVec[i].matches(Other.NodeVec[j])) { 598 unsigned ni = NodeVec[i].arity(); 599 unsigned nj = Other.NodeVec[j].arity(); 600 unsigned n = (ni < nj) ? ni : nj; 601 bool Result = true; 602 unsigned ci = i+1; // first child of i 603 unsigned cj = j+1; // first child of j 604 for (unsigned k = 0; k < n; 605 ++k, ci=getNextSibling(ci), cj = Other.getNextSibling(cj)) { 606 Result = Result && matches(Other, ci, cj); 607 } 608 return Result; 609 } 610 return false; 611 } 612 613 // A partial match between a.mu and b.mu returns true a and b have the same 614 // type (and thus mu refers to the same mutex declaration), regardless of 615 // whether a and b are different objects or not. 616 bool partiallyMatches(const SExpr &Other) const { 617 if (NodeVec[0].kind() == EOP_Dot) 618 return NodeVec[0].matches(Other.NodeVec[0]); 619 return false; 620 } 621 622 /// \brief Pretty print a lock expression for use in error messages. 623 std::string toString(unsigned i = 0) const { 624 assert(isValid()); 625 if (i >= NodeVec.size()) 626 return ""; 627 628 const SExprNode* N = &NodeVec[i]; 629 switch (N->kind()) { 630 case EOP_Nop: 631 return "_"; 632 case EOP_Wildcard: 633 return "(?)"; 634 case EOP_Universal: 635 return "*"; 636 case EOP_This: 637 return "this"; 638 case EOP_NVar: 639 case EOP_LVar: { 640 return N->getNamedDecl()->getNameAsString(); 641 } 642 case EOP_Dot: { 643 if (NodeVec[i+1].kind() == EOP_Wildcard) { 644 std::string S = "&"; 645 S += N->getNamedDecl()->getQualifiedNameAsString(); 646 return S; 647 } 648 std::string FieldName = N->getNamedDecl()->getNameAsString(); 649 if (NodeVec[i+1].kind() == EOP_This) 650 return FieldName; 651 652 std::string S = toString(i+1); 653 if (N->isArrow()) 654 return S + "->" + FieldName; 655 else 656 return S + "." + FieldName; 657 } 658 case EOP_Call: { 659 std::string S = toString(i+1) + "("; 660 unsigned NumArgs = N->arity()-1; 661 unsigned ci = getNextSibling(i+1); 662 for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) { 663 S += toString(ci); 664 if (k+1 < NumArgs) S += ","; 665 } 666 S += ")"; 667 return S; 668 } 669 case EOP_MCall: { 670 std::string S = ""; 671 if (NodeVec[i+1].kind() != EOP_This) 672 S = toString(i+1) + "."; 673 if (const NamedDecl *D = N->getFunctionDecl()) 674 S += D->getNameAsString() + "("; 675 else 676 S += "#("; 677 unsigned NumArgs = N->arity()-1; 678 unsigned ci = getNextSibling(i+1); 679 for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) { 680 S += toString(ci); 681 if (k+1 < NumArgs) S += ","; 682 } 683 S += ")"; 684 return S; 685 } 686 case EOP_Index: { 687 std::string S1 = toString(i+1); 688 std::string S2 = toString(i+1 + NodeVec[i+1].size()); 689 return S1 + "[" + S2 + "]"; 690 } 691 case EOP_Unary: { 692 std::string S = toString(i+1); 693 return "#" + S; 694 } 695 case EOP_Binary: { 696 std::string S1 = toString(i+1); 697 std::string S2 = toString(i+1 + NodeVec[i+1].size()); 698 return "(" + S1 + "#" + S2 + ")"; 699 } 700 case EOP_Unknown: { 701 unsigned NumChildren = N->arity(); 702 if (NumChildren == 0) 703 return "(...)"; 704 std::string S = "("; 705 unsigned ci = i+1; 706 for (unsigned j = 0; j < NumChildren; ++j, ci = getNextSibling(ci)) { 707 S += toString(ci); 708 if (j+1 < NumChildren) S += "#"; 709 } 710 S += ")"; 711 return S; 712 } 713 } 714 return ""; 715 } 716 }; 717 718 719 720 /// \brief A short list of SExprs 721 class MutexIDList : public SmallVector<SExpr, 3> { 722 public: 723 /// \brief Return true if the list contains the specified SExpr 724 /// Performs a linear search, because these lists are almost always very small. 725 bool contains(const SExpr& M) { 726 for (iterator I=begin(),E=end(); I != E; ++I) 727 if ((*I) == M) return true; 728 return false; 729 } 730 731 /// \brief Push M onto list, bud discard duplicates 732 void push_back_nodup(const SExpr& M) { 733 if (!contains(M)) push_back(M); 734 } 735 }; 736 737 738 739 /// \brief This is a helper class that stores info about the most recent 740 /// accquire of a Lock. 741 /// 742 /// The main body of the analysis maps MutexIDs to LockDatas. 743 struct LockData { 744 SourceLocation AcquireLoc; 745 746 /// \brief LKind stores whether a lock is held shared or exclusively. 747 /// Note that this analysis does not currently support either re-entrant 748 /// locking or lock "upgrading" and "downgrading" between exclusive and 749 /// shared. 750 /// 751 /// FIXME: add support for re-entrant locking and lock up/downgrading 752 LockKind LKind; 753 bool Asserted; // for asserted locks 754 bool Managed; // for ScopedLockable objects 755 SExpr UnderlyingMutex; // for ScopedLockable objects 756 757 LockData(SourceLocation AcquireLoc, LockKind LKind, bool M=false, 758 bool Asrt=false) 759 : AcquireLoc(AcquireLoc), LKind(LKind), Asserted(Asrt), Managed(M), 760 UnderlyingMutex(Decl::EmptyShell()) 761 {} 762 763 LockData(SourceLocation AcquireLoc, LockKind LKind, const SExpr &Mu) 764 : AcquireLoc(AcquireLoc), LKind(LKind), Asserted(false), Managed(false), 765 UnderlyingMutex(Mu) 766 {} 767 768 bool operator==(const LockData &other) const { 769 return AcquireLoc == other.AcquireLoc && LKind == other.LKind; 770 } 771 772 bool operator!=(const LockData &other) const { 773 return !(*this == other); 774 } 775 776 void Profile(llvm::FoldingSetNodeID &ID) const { 777 ID.AddInteger(AcquireLoc.getRawEncoding()); 778 ID.AddInteger(LKind); 779 } 780 781 bool isAtLeast(LockKind LK) { 782 return (LK == LK_Shared) || (LKind == LK_Exclusive); 783 } 784 }; 785 786 787 /// \brief A FactEntry stores a single fact that is known at a particular point 788 /// in the program execution. Currently, this is information regarding a lock 789 /// that is held at that point. 790 struct FactEntry { 791 SExpr MutID; 792 LockData LDat; 793 794 FactEntry(const SExpr& M, const LockData& L) 795 : MutID(M), LDat(L) 796 { } 797 }; 798 799 800 typedef unsigned short FactID; 801 802 /// \brief FactManager manages the memory for all facts that are created during 803 /// the analysis of a single routine. 804 class FactManager { 805 private: 806 std::vector<FactEntry> Facts; 807 808 public: 809 FactID newLock(const SExpr& M, const LockData& L) { 810 Facts.push_back(FactEntry(M,L)); 811 return static_cast<unsigned short>(Facts.size() - 1); 812 } 813 814 const FactEntry& operator[](FactID F) const { return Facts[F]; } 815 FactEntry& operator[](FactID F) { return Facts[F]; } 816 }; 817 818 819 /// \brief A FactSet is the set of facts that are known to be true at a 820 /// particular program point. FactSets must be small, because they are 821 /// frequently copied, and are thus implemented as a set of indices into a 822 /// table maintained by a FactManager. A typical FactSet only holds 1 or 2 823 /// locks, so we can get away with doing a linear search for lookup. Note 824 /// that a hashtable or map is inappropriate in this case, because lookups 825 /// may involve partial pattern matches, rather than exact matches. 826 class FactSet { 827 private: 828 typedef SmallVector<FactID, 4> FactVec; 829 830 FactVec FactIDs; 831 832 public: 833 typedef FactVec::iterator iterator; 834 typedef FactVec::const_iterator const_iterator; 835 836 iterator begin() { return FactIDs.begin(); } 837 const_iterator begin() const { return FactIDs.begin(); } 838 839 iterator end() { return FactIDs.end(); } 840 const_iterator end() const { return FactIDs.end(); } 841 842 bool isEmpty() const { return FactIDs.size() == 0; } 843 844 FactID addLock(FactManager& FM, const SExpr& M, const LockData& L) { 845 FactID F = FM.newLock(M, L); 846 FactIDs.push_back(F); 847 return F; 848 } 849 850 bool removeLock(FactManager& FM, const SExpr& M) { 851 unsigned n = FactIDs.size(); 852 if (n == 0) 853 return false; 854 855 for (unsigned i = 0; i < n-1; ++i) { 856 if (FM[FactIDs[i]].MutID.matches(M)) { 857 FactIDs[i] = FactIDs[n-1]; 858 FactIDs.pop_back(); 859 return true; 860 } 861 } 862 if (FM[FactIDs[n-1]].MutID.matches(M)) { 863 FactIDs.pop_back(); 864 return true; 865 } 866 return false; 867 } 868 869 // Returns an iterator 870 iterator findLockIter(FactManager &FM, const SExpr &M) { 871 for (iterator I = begin(), E = end(); I != E; ++I) { 872 const SExpr &Exp = FM[*I].MutID; 873 if (Exp.matches(M)) 874 return I; 875 } 876 return end(); 877 } 878 879 LockData* findLock(FactManager &FM, const SExpr &M) const { 880 for (const_iterator I = begin(), E = end(); I != E; ++I) { 881 const SExpr &Exp = FM[*I].MutID; 882 if (Exp.matches(M)) 883 return &FM[*I].LDat; 884 } 885 return 0; 886 } 887 888 LockData* findLockUniv(FactManager &FM, const SExpr &M) const { 889 for (const_iterator I = begin(), E = end(); I != E; ++I) { 890 const SExpr &Exp = FM[*I].MutID; 891 if (Exp.matches(M) || Exp.isUniversal()) 892 return &FM[*I].LDat; 893 } 894 return 0; 895 } 896 897 FactEntry* findPartialMatch(FactManager &FM, const SExpr &M) const { 898 for (const_iterator I=begin(), E=end(); I != E; ++I) { 899 const SExpr& Exp = FM[*I].MutID; 900 if (Exp.partiallyMatches(M)) return &FM[*I]; 901 } 902 return 0; 903 } 904 }; 905 906 907 908 /// A Lockset maps each SExpr (defined above) to information about how it has 909 /// been locked. 910 typedef llvm::ImmutableMap<SExpr, LockData> Lockset; 911 typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext; 912 913 class LocalVariableMap; 914 915 /// A side (entry or exit) of a CFG node. 916 enum CFGBlockSide { CBS_Entry, CBS_Exit }; 917 918 /// CFGBlockInfo is a struct which contains all the information that is 919 /// maintained for each block in the CFG. See LocalVariableMap for more 920 /// information about the contexts. 921 struct CFGBlockInfo { 922 FactSet EntrySet; // Lockset held at entry to block 923 FactSet ExitSet; // Lockset held at exit from block 924 LocalVarContext EntryContext; // Context held at entry to block 925 LocalVarContext ExitContext; // Context held at exit from block 926 SourceLocation EntryLoc; // Location of first statement in block 927 SourceLocation ExitLoc; // Location of last statement in block. 928 unsigned EntryIndex; // Used to replay contexts later 929 bool Reachable; // Is this block reachable? 930 931 const FactSet &getSet(CFGBlockSide Side) const { 932 return Side == CBS_Entry ? EntrySet : ExitSet; 933 } 934 SourceLocation getLocation(CFGBlockSide Side) const { 935 return Side == CBS_Entry ? EntryLoc : ExitLoc; 936 } 937 938 private: 939 CFGBlockInfo(LocalVarContext EmptyCtx) 940 : EntryContext(EmptyCtx), ExitContext(EmptyCtx), Reachable(false) 941 { } 942 943 public: 944 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M); 945 }; 946 947 948 949 // A LocalVariableMap maintains a map from local variables to their currently 950 // valid definitions. It provides SSA-like functionality when traversing the 951 // CFG. Like SSA, each definition or assignment to a variable is assigned a 952 // unique name (an integer), which acts as the SSA name for that definition. 953 // The total set of names is shared among all CFG basic blocks. 954 // Unlike SSA, we do not rewrite expressions to replace local variables declrefs 955 // with their SSA-names. Instead, we compute a Context for each point in the 956 // code, which maps local variables to the appropriate SSA-name. This map 957 // changes with each assignment. 958 // 959 // The map is computed in a single pass over the CFG. Subsequent analyses can 960 // then query the map to find the appropriate Context for a statement, and use 961 // that Context to look up the definitions of variables. 962 class LocalVariableMap { 963 public: 964 typedef LocalVarContext Context; 965 966 /// A VarDefinition consists of an expression, representing the value of the 967 /// variable, along with the context in which that expression should be 968 /// interpreted. A reference VarDefinition does not itself contain this 969 /// information, but instead contains a pointer to a previous VarDefinition. 970 struct VarDefinition { 971 public: 972 friend class LocalVariableMap; 973 974 const NamedDecl *Dec; // The original declaration for this variable. 975 const Expr *Exp; // The expression for this variable, OR 976 unsigned Ref; // Reference to another VarDefinition 977 Context Ctx; // The map with which Exp should be interpreted. 978 979 bool isReference() { return !Exp; } 980 981 private: 982 // Create ordinary variable definition 983 VarDefinition(const NamedDecl *D, const Expr *E, Context C) 984 : Dec(D), Exp(E), Ref(0), Ctx(C) 985 { } 986 987 // Create reference to previous definition 988 VarDefinition(const NamedDecl *D, unsigned R, Context C) 989 : Dec(D), Exp(0), Ref(R), Ctx(C) 990 { } 991 }; 992 993 private: 994 Context::Factory ContextFactory; 995 std::vector<VarDefinition> VarDefinitions; 996 std::vector<unsigned> CtxIndices; 997 std::vector<std::pair<Stmt*, Context> > SavedContexts; 998 999 public: 1000 LocalVariableMap() { 1001 // index 0 is a placeholder for undefined variables (aka phi-nodes). 1002 VarDefinitions.push_back(VarDefinition(0, 0u, getEmptyContext())); 1003 } 1004 1005 /// Look up a definition, within the given context. 1006 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) { 1007 const unsigned *i = Ctx.lookup(D); 1008 if (!i) 1009 return 0; 1010 assert(*i < VarDefinitions.size()); 1011 return &VarDefinitions[*i]; 1012 } 1013 1014 /// Look up the definition for D within the given context. Returns 1015 /// NULL if the expression is not statically known. If successful, also 1016 /// modifies Ctx to hold the context of the return Expr. 1017 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) { 1018 const unsigned *P = Ctx.lookup(D); 1019 if (!P) 1020 return 0; 1021 1022 unsigned i = *P; 1023 while (i > 0) { 1024 if (VarDefinitions[i].Exp) { 1025 Ctx = VarDefinitions[i].Ctx; 1026 return VarDefinitions[i].Exp; 1027 } 1028 i = VarDefinitions[i].Ref; 1029 } 1030 return 0; 1031 } 1032 1033 Context getEmptyContext() { return ContextFactory.getEmptyMap(); } 1034 1035 /// Return the next context after processing S. This function is used by 1036 /// clients of the class to get the appropriate context when traversing the 1037 /// CFG. It must be called for every assignment or DeclStmt. 1038 Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) { 1039 if (SavedContexts[CtxIndex+1].first == S) { 1040 CtxIndex++; 1041 Context Result = SavedContexts[CtxIndex].second; 1042 return Result; 1043 } 1044 return C; 1045 } 1046 1047 void dumpVarDefinitionName(unsigned i) { 1048 if (i == 0) { 1049 llvm::errs() << "Undefined"; 1050 return; 1051 } 1052 const NamedDecl *Dec = VarDefinitions[i].Dec; 1053 if (!Dec) { 1054 llvm::errs() << "<<NULL>>"; 1055 return; 1056 } 1057 Dec->printName(llvm::errs()); 1058 llvm::errs() << "." << i << " " << ((const void*) Dec); 1059 } 1060 1061 /// Dumps an ASCII representation of the variable map to llvm::errs() 1062 void dump() { 1063 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) { 1064 const Expr *Exp = VarDefinitions[i].Exp; 1065 unsigned Ref = VarDefinitions[i].Ref; 1066 1067 dumpVarDefinitionName(i); 1068 llvm::errs() << " = "; 1069 if (Exp) Exp->dump(); 1070 else { 1071 dumpVarDefinitionName(Ref); 1072 llvm::errs() << "\n"; 1073 } 1074 } 1075 } 1076 1077 /// Dumps an ASCII representation of a Context to llvm::errs() 1078 void dumpContext(Context C) { 1079 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { 1080 const NamedDecl *D = I.getKey(); 1081 D->printName(llvm::errs()); 1082 const unsigned *i = C.lookup(D); 1083 llvm::errs() << " -> "; 1084 dumpVarDefinitionName(*i); 1085 llvm::errs() << "\n"; 1086 } 1087 } 1088 1089 /// Builds the variable map. 1090 void traverseCFG(CFG *CFGraph, PostOrderCFGView *SortedGraph, 1091 std::vector<CFGBlockInfo> &BlockInfo); 1092 1093 protected: 1094 // Get the current context index 1095 unsigned getContextIndex() { return SavedContexts.size()-1; } 1096 1097 // Save the current context for later replay 1098 void saveContext(Stmt *S, Context C) { 1099 SavedContexts.push_back(std::make_pair(S,C)); 1100 } 1101 1102 // Adds a new definition to the given context, and returns a new context. 1103 // This method should be called when declaring a new variable. 1104 Context addDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) { 1105 assert(!Ctx.contains(D)); 1106 unsigned newID = VarDefinitions.size(); 1107 Context NewCtx = ContextFactory.add(Ctx, D, newID); 1108 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 1109 return NewCtx; 1110 } 1111 1112 // Add a new reference to an existing definition. 1113 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) { 1114 unsigned newID = VarDefinitions.size(); 1115 Context NewCtx = ContextFactory.add(Ctx, D, newID); 1116 VarDefinitions.push_back(VarDefinition(D, i, Ctx)); 1117 return NewCtx; 1118 } 1119 1120 // Updates a definition only if that definition is already in the map. 1121 // This method should be called when assigning to an existing variable. 1122 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) { 1123 if (Ctx.contains(D)) { 1124 unsigned newID = VarDefinitions.size(); 1125 Context NewCtx = ContextFactory.remove(Ctx, D); 1126 NewCtx = ContextFactory.add(NewCtx, D, newID); 1127 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 1128 return NewCtx; 1129 } 1130 return Ctx; 1131 } 1132 1133 // Removes a definition from the context, but keeps the variable name 1134 // as a valid variable. The index 0 is a placeholder for cleared definitions. 1135 Context clearDefinition(const NamedDecl *D, Context Ctx) { 1136 Context NewCtx = Ctx; 1137 if (NewCtx.contains(D)) { 1138 NewCtx = ContextFactory.remove(NewCtx, D); 1139 NewCtx = ContextFactory.add(NewCtx, D, 0); 1140 } 1141 return NewCtx; 1142 } 1143 1144 // Remove a definition entirely frmo the context. 1145 Context removeDefinition(const NamedDecl *D, Context Ctx) { 1146 Context NewCtx = Ctx; 1147 if (NewCtx.contains(D)) { 1148 NewCtx = ContextFactory.remove(NewCtx, D); 1149 } 1150 return NewCtx; 1151 } 1152 1153 Context intersectContexts(Context C1, Context C2); 1154 Context createReferenceContext(Context C); 1155 void intersectBackEdge(Context C1, Context C2); 1156 1157 friend class VarMapBuilder; 1158 }; 1159 1160 1161 // This has to be defined after LocalVariableMap. 1162 CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) { 1163 return CFGBlockInfo(M.getEmptyContext()); 1164 } 1165 1166 1167 /// Visitor which builds a LocalVariableMap 1168 class VarMapBuilder : public StmtVisitor<VarMapBuilder> { 1169 public: 1170 LocalVariableMap* VMap; 1171 LocalVariableMap::Context Ctx; 1172 1173 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C) 1174 : VMap(VM), Ctx(C) {} 1175 1176 void VisitDeclStmt(DeclStmt *S); 1177 void VisitBinaryOperator(BinaryOperator *BO); 1178 }; 1179 1180 1181 // Add new local variables to the variable map 1182 void VarMapBuilder::VisitDeclStmt(DeclStmt *S) { 1183 bool modifiedCtx = false; 1184 DeclGroupRef DGrp = S->getDeclGroup(); 1185 for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) { 1186 if (VarDecl *VD = dyn_cast_or_null<VarDecl>(*I)) { 1187 Expr *E = VD->getInit(); 1188 1189 // Add local variables with trivial type to the variable map 1190 QualType T = VD->getType(); 1191 if (T.isTrivialType(VD->getASTContext())) { 1192 Ctx = VMap->addDefinition(VD, E, Ctx); 1193 modifiedCtx = true; 1194 } 1195 } 1196 } 1197 if (modifiedCtx) 1198 VMap->saveContext(S, Ctx); 1199 } 1200 1201 // Update local variable definitions in variable map 1202 void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) { 1203 if (!BO->isAssignmentOp()) 1204 return; 1205 1206 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); 1207 1208 // Update the variable map and current context. 1209 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) { 1210 ValueDecl *VDec = DRE->getDecl(); 1211 if (Ctx.lookup(VDec)) { 1212 if (BO->getOpcode() == BO_Assign) 1213 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx); 1214 else 1215 // FIXME -- handle compound assignment operators 1216 Ctx = VMap->clearDefinition(VDec, Ctx); 1217 VMap->saveContext(BO, Ctx); 1218 } 1219 } 1220 } 1221 1222 1223 // Computes the intersection of two contexts. The intersection is the 1224 // set of variables which have the same definition in both contexts; 1225 // variables with different definitions are discarded. 1226 LocalVariableMap::Context 1227 LocalVariableMap::intersectContexts(Context C1, Context C2) { 1228 Context Result = C1; 1229 for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) { 1230 const NamedDecl *Dec = I.getKey(); 1231 unsigned i1 = I.getData(); 1232 const unsigned *i2 = C2.lookup(Dec); 1233 if (!i2) // variable doesn't exist on second path 1234 Result = removeDefinition(Dec, Result); 1235 else if (*i2 != i1) // variable exists, but has different definition 1236 Result = clearDefinition(Dec, Result); 1237 } 1238 return Result; 1239 } 1240 1241 // For every variable in C, create a new variable that refers to the 1242 // definition in C. Return a new context that contains these new variables. 1243 // (We use this for a naive implementation of SSA on loop back-edges.) 1244 LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) { 1245 Context Result = getEmptyContext(); 1246 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { 1247 const NamedDecl *Dec = I.getKey(); 1248 unsigned i = I.getData(); 1249 Result = addReference(Dec, i, Result); 1250 } 1251 return Result; 1252 } 1253 1254 // This routine also takes the intersection of C1 and C2, but it does so by 1255 // altering the VarDefinitions. C1 must be the result of an earlier call to 1256 // createReferenceContext. 1257 void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { 1258 for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) { 1259 const NamedDecl *Dec = I.getKey(); 1260 unsigned i1 = I.getData(); 1261 VarDefinition *VDef = &VarDefinitions[i1]; 1262 assert(VDef->isReference()); 1263 1264 const unsigned *i2 = C2.lookup(Dec); 1265 if (!i2 || (*i2 != i1)) 1266 VDef->Ref = 0; // Mark this variable as undefined 1267 } 1268 } 1269 1270 1271 // Traverse the CFG in topological order, so all predecessors of a block 1272 // (excluding back-edges) are visited before the block itself. At 1273 // each point in the code, we calculate a Context, which holds the set of 1274 // variable definitions which are visible at that point in execution. 1275 // Visible variables are mapped to their definitions using an array that 1276 // contains all definitions. 1277 // 1278 // At join points in the CFG, the set is computed as the intersection of 1279 // the incoming sets along each edge, E.g. 1280 // 1281 // { Context | VarDefinitions } 1282 // int x = 0; { x -> x1 | x1 = 0 } 1283 // int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 1284 // if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... } 1285 // else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... } 1286 // ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... } 1287 // 1288 // This is essentially a simpler and more naive version of the standard SSA 1289 // algorithm. Those definitions that remain in the intersection are from blocks 1290 // that strictly dominate the current block. We do not bother to insert proper 1291 // phi nodes, because they are not used in our analysis; instead, wherever 1292 // a phi node would be required, we simply remove that definition from the 1293 // context (E.g. x above). 1294 // 1295 // The initial traversal does not capture back-edges, so those need to be 1296 // handled on a separate pass. Whenever the first pass encounters an 1297 // incoming back edge, it duplicates the context, creating new definitions 1298 // that refer back to the originals. (These correspond to places where SSA 1299 // might have to insert a phi node.) On the second pass, these definitions are 1300 // set to NULL if the variable has changed on the back-edge (i.e. a phi 1301 // node was actually required.) E.g. 1302 // 1303 // { Context | VarDefinitions } 1304 // int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 1305 // while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; } 1306 // x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... } 1307 // ... { y -> y1 | x3 = 2, x2 = 1, ... } 1308 // 1309 void LocalVariableMap::traverseCFG(CFG *CFGraph, 1310 PostOrderCFGView *SortedGraph, 1311 std::vector<CFGBlockInfo> &BlockInfo) { 1312 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 1313 1314 CtxIndices.resize(CFGraph->getNumBlockIDs()); 1315 1316 for (PostOrderCFGView::iterator I = SortedGraph->begin(), 1317 E = SortedGraph->end(); I!= E; ++I) { 1318 const CFGBlock *CurrBlock = *I; 1319 int CurrBlockID = CurrBlock->getBlockID(); 1320 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 1321 1322 VisitedBlocks.insert(CurrBlock); 1323 1324 // Calculate the entry context for the current block 1325 bool HasBackEdges = false; 1326 bool CtxInit = true; 1327 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 1328 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 1329 // if *PI -> CurrBlock is a back edge, so skip it 1330 if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) { 1331 HasBackEdges = true; 1332 continue; 1333 } 1334 1335 int PrevBlockID = (*PI)->getBlockID(); 1336 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 1337 1338 if (CtxInit) { 1339 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext; 1340 CtxInit = false; 1341 } 1342 else { 1343 CurrBlockInfo->EntryContext = 1344 intersectContexts(CurrBlockInfo->EntryContext, 1345 PrevBlockInfo->ExitContext); 1346 } 1347 } 1348 1349 // Duplicate the context if we have back-edges, so we can call 1350 // intersectBackEdges later. 1351 if (HasBackEdges) 1352 CurrBlockInfo->EntryContext = 1353 createReferenceContext(CurrBlockInfo->EntryContext); 1354 1355 // Create a starting context index for the current block 1356 saveContext(0, CurrBlockInfo->EntryContext); 1357 CurrBlockInfo->EntryIndex = getContextIndex(); 1358 1359 // Visit all the statements in the basic block. 1360 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext); 1361 for (CFGBlock::const_iterator BI = CurrBlock->begin(), 1362 BE = CurrBlock->end(); BI != BE; ++BI) { 1363 switch (BI->getKind()) { 1364 case CFGElement::Statement: { 1365 CFGStmt CS = BI->castAs<CFGStmt>(); 1366 VMapBuilder.Visit(const_cast<Stmt*>(CS.getStmt())); 1367 break; 1368 } 1369 default: 1370 break; 1371 } 1372 } 1373 CurrBlockInfo->ExitContext = VMapBuilder.Ctx; 1374 1375 // Mark variables on back edges as "unknown" if they've been changed. 1376 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 1377 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 1378 // if CurrBlock -> *SI is *not* a back edge 1379 if (*SI == 0 || !VisitedBlocks.alreadySet(*SI)) 1380 continue; 1381 1382 CFGBlock *FirstLoopBlock = *SI; 1383 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext; 1384 Context LoopEnd = CurrBlockInfo->ExitContext; 1385 intersectBackEdge(LoopBegin, LoopEnd); 1386 } 1387 } 1388 1389 // Put an extra entry at the end of the indexed context array 1390 unsigned exitID = CFGraph->getExit().getBlockID(); 1391 saveContext(0, BlockInfo[exitID].ExitContext); 1392 } 1393 1394 /// Find the appropriate source locations to use when producing diagnostics for 1395 /// each block in the CFG. 1396 static void findBlockLocations(CFG *CFGraph, 1397 PostOrderCFGView *SortedGraph, 1398 std::vector<CFGBlockInfo> &BlockInfo) { 1399 for (PostOrderCFGView::iterator I = SortedGraph->begin(), 1400 E = SortedGraph->end(); I!= E; ++I) { 1401 const CFGBlock *CurrBlock = *I; 1402 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()]; 1403 1404 // Find the source location of the last statement in the block, if the 1405 // block is not empty. 1406 if (const Stmt *S = CurrBlock->getTerminator()) { 1407 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart(); 1408 } else { 1409 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(), 1410 BE = CurrBlock->rend(); BI != BE; ++BI) { 1411 // FIXME: Handle other CFGElement kinds. 1412 if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) { 1413 CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart(); 1414 break; 1415 } 1416 } 1417 } 1418 1419 if (!CurrBlockInfo->ExitLoc.isInvalid()) { 1420 // This block contains at least one statement. Find the source location 1421 // of the first statement in the block. 1422 for (CFGBlock::const_iterator BI = CurrBlock->begin(), 1423 BE = CurrBlock->end(); BI != BE; ++BI) { 1424 // FIXME: Handle other CFGElement kinds. 1425 if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) { 1426 CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart(); 1427 break; 1428 } 1429 } 1430 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() && 1431 CurrBlock != &CFGraph->getExit()) { 1432 // The block is empty, and has a single predecessor. Use its exit 1433 // location. 1434 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = 1435 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc; 1436 } 1437 } 1438 } 1439 1440 /// \brief Class which implements the core thread safety analysis routines. 1441 class ThreadSafetyAnalyzer { 1442 friend class BuildLockset; 1443 1444 ThreadSafetyHandler &Handler; 1445 LocalVariableMap LocalVarMap; 1446 FactManager FactMan; 1447 std::vector<CFGBlockInfo> BlockInfo; 1448 1449 public: 1450 ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {} 1451 1452 void addLock(FactSet &FSet, const SExpr &Mutex, const LockData &LDat); 1453 void removeLock(FactSet &FSet, const SExpr &Mutex, 1454 SourceLocation UnlockLoc, bool FullyRemove=false); 1455 1456 template <typename AttrType> 1457 void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp, 1458 const NamedDecl *D, VarDecl *SelfDecl=0); 1459 1460 template <class AttrType> 1461 void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp, 1462 const NamedDecl *D, 1463 const CFGBlock *PredBlock, const CFGBlock *CurrBlock, 1464 Expr *BrE, bool Neg); 1465 1466 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C, 1467 bool &Negate); 1468 1469 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet, 1470 const CFGBlock* PredBlock, 1471 const CFGBlock *CurrBlock); 1472 1473 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, 1474 SourceLocation JoinLoc, 1475 LockErrorKind LEK1, LockErrorKind LEK2, 1476 bool Modify=true); 1477 1478 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, 1479 SourceLocation JoinLoc, LockErrorKind LEK1, 1480 bool Modify=true) { 1481 intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify); 1482 } 1483 1484 void runAnalysis(AnalysisDeclContext &AC); 1485 }; 1486 1487 1488 /// \brief Add a new lock to the lockset, warning if the lock is already there. 1489 /// \param Mutex -- the Mutex expression for the lock 1490 /// \param LDat -- the LockData for the lock 1491 void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const SExpr &Mutex, 1492 const LockData &LDat) { 1493 // FIXME: deal with acquired before/after annotations. 1494 // FIXME: Don't always warn when we have support for reentrant locks. 1495 if (Mutex.shouldIgnore()) 1496 return; 1497 1498 if (FSet.findLock(FactMan, Mutex)) { 1499 if (!LDat.Asserted) 1500 Handler.handleDoubleLock(Mutex.toString(), LDat.AcquireLoc); 1501 } else { 1502 FSet.addLock(FactMan, Mutex, LDat); 1503 } 1504 } 1505 1506 1507 /// \brief Remove a lock from the lockset, warning if the lock is not there. 1508 /// \param Mutex The lock expression corresponding to the lock to be removed 1509 /// \param UnlockLoc The source location of the unlock (only used in error msg) 1510 void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, 1511 const SExpr &Mutex, 1512 SourceLocation UnlockLoc, 1513 bool FullyRemove) { 1514 if (Mutex.shouldIgnore()) 1515 return; 1516 1517 const LockData *LDat = FSet.findLock(FactMan, Mutex); 1518 if (!LDat) { 1519 Handler.handleUnmatchedUnlock(Mutex.toString(), UnlockLoc); 1520 return; 1521 } 1522 1523 if (LDat->UnderlyingMutex.isValid()) { 1524 // This is scoped lockable object, which manages the real mutex. 1525 if (FullyRemove) { 1526 // We're destroying the managing object. 1527 // Remove the underlying mutex if it exists; but don't warn. 1528 if (FSet.findLock(FactMan, LDat->UnderlyingMutex)) 1529 FSet.removeLock(FactMan, LDat->UnderlyingMutex); 1530 } else { 1531 // We're releasing the underlying mutex, but not destroying the 1532 // managing object. Warn on dual release. 1533 if (!FSet.findLock(FactMan, LDat->UnderlyingMutex)) { 1534 Handler.handleUnmatchedUnlock(LDat->UnderlyingMutex.toString(), 1535 UnlockLoc); 1536 } 1537 FSet.removeLock(FactMan, LDat->UnderlyingMutex); 1538 return; 1539 } 1540 } 1541 FSet.removeLock(FactMan, Mutex); 1542 } 1543 1544 1545 /// \brief Extract the list of mutexIDs from the attribute on an expression, 1546 /// and push them onto Mtxs, discarding any duplicates. 1547 template <typename AttrType> 1548 void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, 1549 Expr *Exp, const NamedDecl *D, 1550 VarDecl *SelfDecl) { 1551 typedef typename AttrType::args_iterator iterator_type; 1552 1553 if (Attr->args_size() == 0) { 1554 // The mutex held is the "this" object. 1555 SExpr Mu(0, Exp, D, SelfDecl); 1556 if (!Mu.isValid()) 1557 SExpr::warnInvalidLock(Handler, 0, Exp, D); 1558 else 1559 Mtxs.push_back_nodup(Mu); 1560 return; 1561 } 1562 1563 for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) { 1564 SExpr Mu(*I, Exp, D, SelfDecl); 1565 if (!Mu.isValid()) 1566 SExpr::warnInvalidLock(Handler, *I, Exp, D); 1567 else 1568 Mtxs.push_back_nodup(Mu); 1569 } 1570 } 1571 1572 1573 /// \brief Extract the list of mutexIDs from a trylock attribute. If the 1574 /// trylock applies to the given edge, then push them onto Mtxs, discarding 1575 /// any duplicates. 1576 template <class AttrType> 1577 void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, 1578 Expr *Exp, const NamedDecl *D, 1579 const CFGBlock *PredBlock, 1580 const CFGBlock *CurrBlock, 1581 Expr *BrE, bool Neg) { 1582 // Find out which branch has the lock 1583 bool branch = 0; 1584 if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) { 1585 branch = BLE->getValue(); 1586 } 1587 else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) { 1588 branch = ILE->getValue().getBoolValue(); 1589 } 1590 int branchnum = branch ? 0 : 1; 1591 if (Neg) branchnum = !branchnum; 1592 1593 // If we've taken the trylock branch, then add the lock 1594 int i = 0; 1595 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(), 1596 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) { 1597 if (*SI == CurrBlock && i == branchnum) { 1598 getMutexIDs(Mtxs, Attr, Exp, D); 1599 } 1600 } 1601 } 1602 1603 1604 bool getStaticBooleanValue(Expr* E, bool& TCond) { 1605 if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) { 1606 TCond = false; 1607 return true; 1608 } else if (CXXBoolLiteralExpr *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) { 1609 TCond = BLE->getValue(); 1610 return true; 1611 } else if (IntegerLiteral *ILE = dyn_cast<IntegerLiteral>(E)) { 1612 TCond = ILE->getValue().getBoolValue(); 1613 return true; 1614 } else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) { 1615 return getStaticBooleanValue(CE->getSubExpr(), TCond); 1616 } 1617 return false; 1618 } 1619 1620 1621 // If Cond can be traced back to a function call, return the call expression. 1622 // The negate variable should be called with false, and will be set to true 1623 // if the function call is negated, e.g. if (!mu.tryLock(...)) 1624 const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond, 1625 LocalVarContext C, 1626 bool &Negate) { 1627 if (!Cond) 1628 return 0; 1629 1630 if (const CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) { 1631 return CallExp; 1632 } 1633 else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) { 1634 return getTrylockCallExpr(PE->getSubExpr(), C, Negate); 1635 } 1636 else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) { 1637 return getTrylockCallExpr(CE->getSubExpr(), C, Negate); 1638 } 1639 else if (const ExprWithCleanups* EWC = dyn_cast<ExprWithCleanups>(Cond)) { 1640 return getTrylockCallExpr(EWC->getSubExpr(), C, Negate); 1641 } 1642 else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) { 1643 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C); 1644 return getTrylockCallExpr(E, C, Negate); 1645 } 1646 else if (const UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) { 1647 if (UOP->getOpcode() == UO_LNot) { 1648 Negate = !Negate; 1649 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate); 1650 } 1651 return 0; 1652 } 1653 else if (const BinaryOperator *BOP = dyn_cast<BinaryOperator>(Cond)) { 1654 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) { 1655 if (BOP->getOpcode() == BO_NE) 1656 Negate = !Negate; 1657 1658 bool TCond = false; 1659 if (getStaticBooleanValue(BOP->getRHS(), TCond)) { 1660 if (!TCond) Negate = !Negate; 1661 return getTrylockCallExpr(BOP->getLHS(), C, Negate); 1662 } 1663 else if (getStaticBooleanValue(BOP->getLHS(), TCond)) { 1664 if (!TCond) Negate = !Negate; 1665 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1666 } 1667 return 0; 1668 } 1669 return 0; 1670 } 1671 // FIXME -- handle && and || as well. 1672 return 0; 1673 } 1674 1675 1676 /// \brief Find the lockset that holds on the edge between PredBlock 1677 /// and CurrBlock. The edge set is the exit set of PredBlock (passed 1678 /// as the ExitSet parameter) plus any trylocks, which are conditionally held. 1679 void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, 1680 const FactSet &ExitSet, 1681 const CFGBlock *PredBlock, 1682 const CFGBlock *CurrBlock) { 1683 Result = ExitSet; 1684 1685 if (!PredBlock->getTerminatorCondition()) 1686 return; 1687 1688 bool Negate = false; 1689 const Stmt *Cond = PredBlock->getTerminatorCondition(); 1690 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()]; 1691 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext; 1692 1693 CallExpr *Exp = 1694 const_cast<CallExpr*>(getTrylockCallExpr(Cond, LVarCtx, Negate)); 1695 if (!Exp) 1696 return; 1697 1698 NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 1699 if(!FunDecl || !FunDecl->hasAttrs()) 1700 return; 1701 1702 1703 MutexIDList ExclusiveLocksToAdd; 1704 MutexIDList SharedLocksToAdd; 1705 1706 // If the condition is a call to a Trylock function, then grab the attributes 1707 AttrVec &ArgAttrs = FunDecl->getAttrs(); 1708 for (unsigned i = 0; i < ArgAttrs.size(); ++i) { 1709 Attr *Attr = ArgAttrs[i]; 1710 switch (Attr->getKind()) { 1711 case attr::ExclusiveTrylockFunction: { 1712 ExclusiveTrylockFunctionAttr *A = 1713 cast<ExclusiveTrylockFunctionAttr>(Attr); 1714 getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, 1715 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1716 break; 1717 } 1718 case attr::SharedTrylockFunction: { 1719 SharedTrylockFunctionAttr *A = 1720 cast<SharedTrylockFunctionAttr>(Attr); 1721 getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, 1722 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1723 break; 1724 } 1725 default: 1726 break; 1727 } 1728 } 1729 1730 // Add and remove locks. 1731 SourceLocation Loc = Exp->getExprLoc(); 1732 for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) { 1733 addLock(Result, ExclusiveLocksToAdd[i], 1734 LockData(Loc, LK_Exclusive)); 1735 } 1736 for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) { 1737 addLock(Result, SharedLocksToAdd[i], 1738 LockData(Loc, LK_Shared)); 1739 } 1740 } 1741 1742 1743 /// \brief We use this class to visit different types of expressions in 1744 /// CFGBlocks, and build up the lockset. 1745 /// An expression may cause us to add or remove locks from the lockset, or else 1746 /// output error messages related to missing locks. 1747 /// FIXME: In future, we may be able to not inherit from a visitor. 1748 class BuildLockset : public StmtVisitor<BuildLockset> { 1749 friend class ThreadSafetyAnalyzer; 1750 1751 ThreadSafetyAnalyzer *Analyzer; 1752 FactSet FSet; 1753 LocalVariableMap::Context LVarCtx; 1754 unsigned CtxIndex; 1755 1756 // Helper functions 1757 const ValueDecl *getValueDecl(const Expr *Exp); 1758 1759 void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK, 1760 Expr *MutexExp, ProtectedOperationKind POK); 1761 void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp); 1762 1763 void checkAccess(const Expr *Exp, AccessKind AK); 1764 void checkPtAccess(const Expr *Exp, AccessKind AK); 1765 1766 void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = 0); 1767 1768 public: 1769 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info) 1770 : StmtVisitor<BuildLockset>(), 1771 Analyzer(Anlzr), 1772 FSet(Info.EntrySet), 1773 LVarCtx(Info.EntryContext), 1774 CtxIndex(Info.EntryIndex) 1775 {} 1776 1777 void VisitUnaryOperator(UnaryOperator *UO); 1778 void VisitBinaryOperator(BinaryOperator *BO); 1779 void VisitCastExpr(CastExpr *CE); 1780 void VisitCallExpr(CallExpr *Exp); 1781 void VisitCXXConstructExpr(CXXConstructExpr *Exp); 1782 void VisitDeclStmt(DeclStmt *S); 1783 }; 1784 1785 1786 /// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs 1787 const ValueDecl *BuildLockset::getValueDecl(const Expr *Exp) { 1788 if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Exp)) 1789 return getValueDecl(CE->getSubExpr()); 1790 1791 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp)) 1792 return DR->getDecl(); 1793 1794 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) 1795 return ME->getMemberDecl(); 1796 1797 return 0; 1798 } 1799 1800 /// \brief Warn if the LSet does not contain a lock sufficient to protect access 1801 /// of at least the passed in AccessKind. 1802 void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, 1803 AccessKind AK, Expr *MutexExp, 1804 ProtectedOperationKind POK) { 1805 LockKind LK = getLockKindFromAccessKind(AK); 1806 1807 SExpr Mutex(MutexExp, Exp, D); 1808 if (!Mutex.isValid()) { 1809 SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D); 1810 return; 1811 } else if (Mutex.shouldIgnore()) { 1812 return; 1813 } 1814 1815 LockData* LDat = FSet.findLockUniv(Analyzer->FactMan, Mutex); 1816 bool NoError = true; 1817 if (!LDat) { 1818 // No exact match found. Look for a partial match. 1819 FactEntry* FEntry = FSet.findPartialMatch(Analyzer->FactMan, Mutex); 1820 if (FEntry) { 1821 // Warn that there's no precise match. 1822 LDat = &FEntry->LDat; 1823 std::string PartMatchStr = FEntry->MutID.toString(); 1824 StringRef PartMatchName(PartMatchStr); 1825 Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK, 1826 Exp->getExprLoc(), &PartMatchName); 1827 } else { 1828 // Warn that there's no match at all. 1829 Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK, 1830 Exp->getExprLoc()); 1831 } 1832 NoError = false; 1833 } 1834 // Make sure the mutex we found is the right kind. 1835 if (NoError && LDat && !LDat->isAtLeast(LK)) 1836 Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK, 1837 Exp->getExprLoc()); 1838 } 1839 1840 /// \brief Warn if the LSet contains the given lock. 1841 void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr* Exp, 1842 Expr *MutexExp) { 1843 SExpr Mutex(MutexExp, Exp, D); 1844 if (!Mutex.isValid()) { 1845 SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D); 1846 return; 1847 } 1848 1849 LockData* LDat = FSet.findLock(Analyzer->FactMan, Mutex); 1850 if (LDat) { 1851 std::string DeclName = D->getNameAsString(); 1852 StringRef DeclNameSR (DeclName); 1853 Analyzer->Handler.handleFunExcludesLock(DeclNameSR, Mutex.toString(), 1854 Exp->getExprLoc()); 1855 } 1856 } 1857 1858 1859 /// \brief Checks guarded_by and pt_guarded_by attributes. 1860 /// Whenever we identify an access (read or write) to a DeclRefExpr that is 1861 /// marked with guarded_by, we must ensure the appropriate mutexes are held. 1862 /// Similarly, we check if the access is to an expression that dereferences 1863 /// a pointer marked with pt_guarded_by. 1864 void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK) { 1865 Exp = Exp->IgnoreParenCasts(); 1866 1867 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp)) { 1868 // For dereferences 1869 if (UO->getOpcode() == clang::UO_Deref) 1870 checkPtAccess(UO->getSubExpr(), AK); 1871 return; 1872 } 1873 1874 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) { 1875 if (ME->isArrow()) 1876 checkPtAccess(ME->getBase(), AK); 1877 else 1878 checkAccess(ME->getBase(), AK); 1879 } 1880 1881 const ValueDecl *D = getValueDecl(Exp); 1882 if (!D || !D->hasAttrs()) 1883 return; 1884 1885 if (D->getAttr<GuardedVarAttr>() && FSet.isEmpty()) 1886 Analyzer->Handler.handleNoMutexHeld(D, POK_VarAccess, AK, 1887 Exp->getExprLoc()); 1888 1889 const AttrVec &ArgAttrs = D->getAttrs(); 1890 for (unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i) 1891 if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i])) 1892 warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess); 1893 } 1894 1895 /// \brief Checks pt_guarded_by and pt_guarded_var attributes. 1896 void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK) { 1897 Exp = Exp->IgnoreParenCasts(); 1898 1899 const ValueDecl *D = getValueDecl(Exp); 1900 if (!D || !D->hasAttrs()) 1901 return; 1902 1903 if (D->getAttr<PtGuardedVarAttr>() && FSet.isEmpty()) 1904 Analyzer->Handler.handleNoMutexHeld(D, POK_VarDereference, AK, 1905 Exp->getExprLoc()); 1906 1907 const AttrVec &ArgAttrs = D->getAttrs(); 1908 for (unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i) 1909 if (PtGuardedByAttr *GBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i])) 1910 warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarDereference); 1911 } 1912 1913 1914 /// \brief Process a function call, method call, constructor call, 1915 /// or destructor call. This involves looking at the attributes on the 1916 /// corresponding function/method/constructor/destructor, issuing warnings, 1917 /// and updating the locksets accordingly. 1918 /// 1919 /// FIXME: For classes annotated with one of the guarded annotations, we need 1920 /// to treat const method calls as reads and non-const method calls as writes, 1921 /// and check that the appropriate locks are held. Non-const method calls with 1922 /// the same signature as const method calls can be also treated as reads. 1923 /// 1924 void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) { 1925 SourceLocation Loc = Exp->getExprLoc(); 1926 const AttrVec &ArgAttrs = D->getAttrs(); 1927 MutexIDList ExclusiveLocksToAdd; 1928 MutexIDList SharedLocksToAdd; 1929 MutexIDList LocksToRemove; 1930 1931 for(unsigned i = 0; i < ArgAttrs.size(); ++i) { 1932 Attr *At = const_cast<Attr*>(ArgAttrs[i]); 1933 switch (At->getKind()) { 1934 // When we encounter an exclusive lock function, we need to add the lock 1935 // to our lockset with kind exclusive. 1936 case attr::ExclusiveLockFunction: { 1937 ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(At); 1938 Analyzer->getMutexIDs(ExclusiveLocksToAdd, A, Exp, D, VD); 1939 break; 1940 } 1941 1942 // When we encounter a shared lock function, we need to add the lock 1943 // to our lockset with kind shared. 1944 case attr::SharedLockFunction: { 1945 SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(At); 1946 Analyzer->getMutexIDs(SharedLocksToAdd, A, Exp, D, VD); 1947 break; 1948 } 1949 1950 // An assert will add a lock to the lockset, but will not generate 1951 // a warning if it is already there, and will not generate a warning 1952 // if it is not removed. 1953 case attr::AssertExclusiveLock: { 1954 AssertExclusiveLockAttr *A = cast<AssertExclusiveLockAttr>(At); 1955 1956 MutexIDList AssertLocks; 1957 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1958 for (unsigned i=0,n=AssertLocks.size(); i<n; ++i) { 1959 Analyzer->addLock(FSet, AssertLocks[i], 1960 LockData(Loc, LK_Exclusive, false, true)); 1961 } 1962 break; 1963 } 1964 case attr::AssertSharedLock: { 1965 AssertSharedLockAttr *A = cast<AssertSharedLockAttr>(At); 1966 1967 MutexIDList AssertLocks; 1968 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1969 for (unsigned i=0,n=AssertLocks.size(); i<n; ++i) { 1970 Analyzer->addLock(FSet, AssertLocks[i], 1971 LockData(Loc, LK_Shared, false, true)); 1972 } 1973 break; 1974 } 1975 1976 // When we encounter an unlock function, we need to remove unlocked 1977 // mutexes from the lockset, and flag a warning if they are not there. 1978 case attr::UnlockFunction: { 1979 UnlockFunctionAttr *A = cast<UnlockFunctionAttr>(At); 1980 Analyzer->getMutexIDs(LocksToRemove, A, Exp, D, VD); 1981 break; 1982 } 1983 1984 case attr::ExclusiveLocksRequired: { 1985 ExclusiveLocksRequiredAttr *A = cast<ExclusiveLocksRequiredAttr>(At); 1986 1987 for (ExclusiveLocksRequiredAttr::args_iterator 1988 I = A->args_begin(), E = A->args_end(); I != E; ++I) 1989 warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall); 1990 break; 1991 } 1992 1993 case attr::SharedLocksRequired: { 1994 SharedLocksRequiredAttr *A = cast<SharedLocksRequiredAttr>(At); 1995 1996 for (SharedLocksRequiredAttr::args_iterator I = A->args_begin(), 1997 E = A->args_end(); I != E; ++I) 1998 warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall); 1999 break; 2000 } 2001 2002 case attr::LocksExcluded: { 2003 LocksExcludedAttr *A = cast<LocksExcludedAttr>(At); 2004 2005 for (LocksExcludedAttr::args_iterator I = A->args_begin(), 2006 E = A->args_end(); I != E; ++I) { 2007 warnIfMutexHeld(D, Exp, *I); 2008 } 2009 break; 2010 } 2011 2012 // Ignore other (non thread-safety) attributes 2013 default: 2014 break; 2015 } 2016 } 2017 2018 // Figure out if we're calling the constructor of scoped lockable class 2019 bool isScopedVar = false; 2020 if (VD) { 2021 if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) { 2022 const CXXRecordDecl* PD = CD->getParent(); 2023 if (PD && PD->getAttr<ScopedLockableAttr>()) 2024 isScopedVar = true; 2025 } 2026 } 2027 2028 // Add locks. 2029 for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) { 2030 Analyzer->addLock(FSet, ExclusiveLocksToAdd[i], 2031 LockData(Loc, LK_Exclusive, isScopedVar)); 2032 } 2033 for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) { 2034 Analyzer->addLock(FSet, SharedLocksToAdd[i], 2035 LockData(Loc, LK_Shared, isScopedVar)); 2036 } 2037 2038 // Add the managing object as a dummy mutex, mapped to the underlying mutex. 2039 // FIXME -- this doesn't work if we acquire multiple locks. 2040 if (isScopedVar) { 2041 SourceLocation MLoc = VD->getLocation(); 2042 DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation()); 2043 SExpr SMutex(&DRE, 0, 0); 2044 2045 for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) { 2046 Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Exclusive, 2047 ExclusiveLocksToAdd[i])); 2048 } 2049 for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) { 2050 Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Shared, 2051 SharedLocksToAdd[i])); 2052 } 2053 } 2054 2055 // Remove locks. 2056 // FIXME -- should only fully remove if the attribute refers to 'this'. 2057 bool Dtor = isa<CXXDestructorDecl>(D); 2058 for (unsigned i=0,n=LocksToRemove.size(); i<n; ++i) { 2059 Analyzer->removeLock(FSet, LocksToRemove[i], Loc, Dtor); 2060 } 2061 } 2062 2063 2064 /// \brief For unary operations which read and write a variable, we need to 2065 /// check whether we hold any required mutexes. Reads are checked in 2066 /// VisitCastExpr. 2067 void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) { 2068 switch (UO->getOpcode()) { 2069 case clang::UO_PostDec: 2070 case clang::UO_PostInc: 2071 case clang::UO_PreDec: 2072 case clang::UO_PreInc: { 2073 checkAccess(UO->getSubExpr(), AK_Written); 2074 break; 2075 } 2076 default: 2077 break; 2078 } 2079 } 2080 2081 /// For binary operations which assign to a variable (writes), we need to check 2082 /// whether we hold any required mutexes. 2083 /// FIXME: Deal with non-primitive types. 2084 void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) { 2085 if (!BO->isAssignmentOp()) 2086 return; 2087 2088 // adjust the context 2089 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx); 2090 2091 checkAccess(BO->getLHS(), AK_Written); 2092 } 2093 2094 /// Whenever we do an LValue to Rvalue cast, we are reading a variable and 2095 /// need to ensure we hold any required mutexes. 2096 /// FIXME: Deal with non-primitive types. 2097 void BuildLockset::VisitCastExpr(CastExpr *CE) { 2098 if (CE->getCastKind() != CK_LValueToRValue) 2099 return; 2100 checkAccess(CE->getSubExpr(), AK_Read); 2101 } 2102 2103 2104 void BuildLockset::VisitCallExpr(CallExpr *Exp) { 2105 if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp)) { 2106 MemberExpr *ME = dyn_cast<MemberExpr>(CE->getCallee()); 2107 // ME can be null when calling a method pointer 2108 CXXMethodDecl *MD = CE->getMethodDecl(); 2109 2110 if (ME && MD) { 2111 if (ME->isArrow()) { 2112 if (MD->isConst()) { 2113 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); 2114 } else { // FIXME -- should be AK_Written 2115 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); 2116 } 2117 } else { 2118 if (MD->isConst()) 2119 checkAccess(CE->getImplicitObjectArgument(), AK_Read); 2120 else // FIXME -- should be AK_Written 2121 checkAccess(CE->getImplicitObjectArgument(), AK_Read); 2122 } 2123 } 2124 } else if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) { 2125 switch (OE->getOperator()) { 2126 case OO_Equal: { 2127 const Expr *Target = OE->getArg(0); 2128 const Expr *Source = OE->getArg(1); 2129 checkAccess(Target, AK_Written); 2130 checkAccess(Source, AK_Read); 2131 break; 2132 } 2133 default: { 2134 const Expr *Source = OE->getArg(0); 2135 checkAccess(Source, AK_Read); 2136 break; 2137 } 2138 } 2139 } 2140 NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 2141 if(!D || !D->hasAttrs()) 2142 return; 2143 handleCall(Exp, D); 2144 } 2145 2146 void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) { 2147 const CXXConstructorDecl *D = Exp->getConstructor(); 2148 if (D && D->isCopyConstructor()) { 2149 const Expr* Source = Exp->getArg(0); 2150 checkAccess(Source, AK_Read); 2151 } 2152 // FIXME -- only handles constructors in DeclStmt below. 2153 } 2154 2155 void BuildLockset::VisitDeclStmt(DeclStmt *S) { 2156 // adjust the context 2157 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx); 2158 2159 DeclGroupRef DGrp = S->getDeclGroup(); 2160 for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) { 2161 Decl *D = *I; 2162 if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) { 2163 Expr *E = VD->getInit(); 2164 // handle constructors that involve temporaries 2165 if (ExprWithCleanups *EWC = dyn_cast_or_null<ExprWithCleanups>(E)) 2166 E = EWC->getSubExpr(); 2167 2168 if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) { 2169 NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor()); 2170 if (!CtorD || !CtorD->hasAttrs()) 2171 return; 2172 handleCall(CE, CtorD, VD); 2173 } 2174 } 2175 } 2176 } 2177 2178 2179 2180 /// \brief Compute the intersection of two locksets and issue warnings for any 2181 /// locks in the symmetric difference. 2182 /// 2183 /// This function is used at a merge point in the CFG when comparing the lockset 2184 /// of each branch being merged. For example, given the following sequence: 2185 /// A; if () then B; else C; D; we need to check that the lockset after B and C 2186 /// are the same. In the event of a difference, we use the intersection of these 2187 /// two locksets at the start of D. 2188 /// 2189 /// \param FSet1 The first lockset. 2190 /// \param FSet2 The second lockset. 2191 /// \param JoinLoc The location of the join point for error reporting 2192 /// \param LEK1 The error message to report if a mutex is missing from LSet1 2193 /// \param LEK2 The error message to report if a mutex is missing from Lset2 2194 void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1, 2195 const FactSet &FSet2, 2196 SourceLocation JoinLoc, 2197 LockErrorKind LEK1, 2198 LockErrorKind LEK2, 2199 bool Modify) { 2200 FactSet FSet1Orig = FSet1; 2201 2202 // Find locks in FSet2 that conflict or are not in FSet1, and warn. 2203 for (FactSet::const_iterator I = FSet2.begin(), E = FSet2.end(); 2204 I != E; ++I) { 2205 const SExpr &FSet2Mutex = FactMan[*I].MutID; 2206 const LockData &LDat2 = FactMan[*I].LDat; 2207 FactSet::iterator I1 = FSet1.findLockIter(FactMan, FSet2Mutex); 2208 2209 if (I1 != FSet1.end()) { 2210 const LockData* LDat1 = &FactMan[*I1].LDat; 2211 if (LDat1->LKind != LDat2.LKind) { 2212 Handler.handleExclusiveAndShared(FSet2Mutex.toString(), 2213 LDat2.AcquireLoc, 2214 LDat1->AcquireLoc); 2215 if (Modify && LDat1->LKind != LK_Exclusive) { 2216 // Take the exclusive lock, which is the one in FSet2. 2217 *I1 = *I; 2218 } 2219 } 2220 else if (LDat1->Asserted && !LDat2.Asserted) { 2221 // The non-asserted lock in FSet2 is the one we want to track. 2222 *I1 = *I; 2223 } 2224 } else { 2225 if (LDat2.UnderlyingMutex.isValid()) { 2226 if (FSet2.findLock(FactMan, LDat2.UnderlyingMutex)) { 2227 // If this is a scoped lock that manages another mutex, and if the 2228 // underlying mutex is still held, then warn about the underlying 2229 // mutex. 2230 Handler.handleMutexHeldEndOfScope(LDat2.UnderlyingMutex.toString(), 2231 LDat2.AcquireLoc, 2232 JoinLoc, LEK1); 2233 } 2234 } 2235 else if (!LDat2.Managed && !FSet2Mutex.isUniversal() && !LDat2.Asserted) 2236 Handler.handleMutexHeldEndOfScope(FSet2Mutex.toString(), 2237 LDat2.AcquireLoc, 2238 JoinLoc, LEK1); 2239 } 2240 } 2241 2242 // Find locks in FSet1 that are not in FSet2, and remove them. 2243 for (FactSet::const_iterator I = FSet1Orig.begin(), E = FSet1Orig.end(); 2244 I != E; ++I) { 2245 const SExpr &FSet1Mutex = FactMan[*I].MutID; 2246 const LockData &LDat1 = FactMan[*I].LDat; 2247 2248 if (!FSet2.findLock(FactMan, FSet1Mutex)) { 2249 if (LDat1.UnderlyingMutex.isValid()) { 2250 if (FSet1Orig.findLock(FactMan, LDat1.UnderlyingMutex)) { 2251 // If this is a scoped lock that manages another mutex, and if the 2252 // underlying mutex is still held, then warn about the underlying 2253 // mutex. 2254 Handler.handleMutexHeldEndOfScope(LDat1.UnderlyingMutex.toString(), 2255 LDat1.AcquireLoc, 2256 JoinLoc, LEK1); 2257 } 2258 } 2259 else if (!LDat1.Managed && !FSet1Mutex.isUniversal() && !LDat1.Asserted) 2260 Handler.handleMutexHeldEndOfScope(FSet1Mutex.toString(), 2261 LDat1.AcquireLoc, 2262 JoinLoc, LEK2); 2263 if (Modify) 2264 FSet1.removeLock(FactMan, FSet1Mutex); 2265 } 2266 } 2267 } 2268 2269 2270 // Return true if block B never continues to its successors. 2271 inline bool neverReturns(const CFGBlock* B) { 2272 if (B->hasNoReturnElement()) 2273 return true; 2274 if (B->empty()) 2275 return false; 2276 2277 CFGElement Last = B->back(); 2278 if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) { 2279 if (isa<CXXThrowExpr>(S->getStmt())) 2280 return true; 2281 } 2282 return false; 2283 } 2284 2285 2286 /// \brief Check a function's CFG for thread-safety violations. 2287 /// 2288 /// We traverse the blocks in the CFG, compute the set of mutexes that are held 2289 /// at the end of each block, and issue warnings for thread safety violations. 2290 /// Each block in the CFG is traversed exactly once. 2291 void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { 2292 CFG *CFGraph = AC.getCFG(); 2293 if (!CFGraph) return; 2294 const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl()); 2295 2296 // AC.dumpCFG(true); 2297 2298 if (!D) 2299 return; // Ignore anonymous functions for now. 2300 if (D->getAttr<NoThreadSafetyAnalysisAttr>()) 2301 return; 2302 // FIXME: Do something a bit more intelligent inside constructor and 2303 // destructor code. Constructors and destructors must assume unique access 2304 // to 'this', so checks on member variable access is disabled, but we should 2305 // still enable checks on other objects. 2306 if (isa<CXXConstructorDecl>(D)) 2307 return; // Don't check inside constructors. 2308 if (isa<CXXDestructorDecl>(D)) 2309 return; // Don't check inside destructors. 2310 2311 BlockInfo.resize(CFGraph->getNumBlockIDs(), 2312 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap)); 2313 2314 // We need to explore the CFG via a "topological" ordering. 2315 // That way, we will be guaranteed to have information about required 2316 // predecessor locksets when exploring a new block. 2317 PostOrderCFGView *SortedGraph = AC.getAnalysis<PostOrderCFGView>(); 2318 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 2319 2320 // Mark entry block as reachable 2321 BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true; 2322 2323 // Compute SSA names for local variables 2324 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo); 2325 2326 // Fill in source locations for all CFGBlocks. 2327 findBlockLocations(CFGraph, SortedGraph, BlockInfo); 2328 2329 MutexIDList ExclusiveLocksAcquired; 2330 MutexIDList SharedLocksAcquired; 2331 MutexIDList LocksReleased; 2332 2333 // Add locks from exclusive_locks_required and shared_locks_required 2334 // to initial lockset. Also turn off checking for lock and unlock functions. 2335 // FIXME: is there a more intelligent way to check lock/unlock functions? 2336 if (!SortedGraph->empty() && D->hasAttrs()) { 2337 const CFGBlock *FirstBlock = *SortedGraph->begin(); 2338 FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet; 2339 const AttrVec &ArgAttrs = D->getAttrs(); 2340 2341 MutexIDList ExclusiveLocksToAdd; 2342 MutexIDList SharedLocksToAdd; 2343 2344 SourceLocation Loc = D->getLocation(); 2345 for (unsigned i = 0; i < ArgAttrs.size(); ++i) { 2346 Attr *Attr = ArgAttrs[i]; 2347 Loc = Attr->getLocation(); 2348 if (ExclusiveLocksRequiredAttr *A 2349 = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) { 2350 getMutexIDs(ExclusiveLocksToAdd, A, (Expr*) 0, D); 2351 } else if (SharedLocksRequiredAttr *A 2352 = dyn_cast<SharedLocksRequiredAttr>(Attr)) { 2353 getMutexIDs(SharedLocksToAdd, A, (Expr*) 0, D); 2354 } else if (UnlockFunctionAttr *A = dyn_cast<UnlockFunctionAttr>(Attr)) { 2355 if (!Handler.issueBetaWarnings()) 2356 return; 2357 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation. 2358 // We must ignore such methods. 2359 if (A->args_size() == 0) 2360 return; 2361 // FIXME -- deal with exclusive vs. shared unlock functions? 2362 getMutexIDs(ExclusiveLocksToAdd, A, (Expr*) 0, D); 2363 getMutexIDs(LocksReleased, A, (Expr*) 0, D); 2364 } else if (ExclusiveLockFunctionAttr *A 2365 = dyn_cast<ExclusiveLockFunctionAttr>(Attr)) { 2366 if (!Handler.issueBetaWarnings()) 2367 return; 2368 if (A->args_size() == 0) 2369 return; 2370 getMutexIDs(ExclusiveLocksAcquired, A, (Expr*) 0, D); 2371 } else if (SharedLockFunctionAttr *A 2372 = dyn_cast<SharedLockFunctionAttr>(Attr)) { 2373 if (!Handler.issueBetaWarnings()) 2374 return; 2375 if (A->args_size() == 0) 2376 return; 2377 getMutexIDs(SharedLocksAcquired, A, (Expr*) 0, D); 2378 } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) { 2379 // Don't try to check trylock functions for now 2380 return; 2381 } else if (isa<SharedTrylockFunctionAttr>(Attr)) { 2382 // Don't try to check trylock functions for now 2383 return; 2384 } 2385 } 2386 2387 // FIXME -- Loc can be wrong here. 2388 for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) { 2389 addLock(InitialLockset, ExclusiveLocksToAdd[i], 2390 LockData(Loc, LK_Exclusive)); 2391 } 2392 for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) { 2393 addLock(InitialLockset, SharedLocksToAdd[i], 2394 LockData(Loc, LK_Shared)); 2395 } 2396 } 2397 2398 for (PostOrderCFGView::iterator I = SortedGraph->begin(), 2399 E = SortedGraph->end(); I!= E; ++I) { 2400 const CFGBlock *CurrBlock = *I; 2401 int CurrBlockID = CurrBlock->getBlockID(); 2402 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 2403 2404 // Use the default initial lockset in case there are no predecessors. 2405 VisitedBlocks.insert(CurrBlock); 2406 2407 // Iterate through the predecessor blocks and warn if the lockset for all 2408 // predecessors is not the same. We take the entry lockset of the current 2409 // block to be the intersection of all previous locksets. 2410 // FIXME: By keeping the intersection, we may output more errors in future 2411 // for a lock which is not in the intersection, but was in the union. We 2412 // may want to also keep the union in future. As an example, let's say 2413 // the intersection contains Mutex L, and the union contains L and M. 2414 // Later we unlock M. At this point, we would output an error because we 2415 // never locked M; although the real error is probably that we forgot to 2416 // lock M on all code paths. Conversely, let's say that later we lock M. 2417 // In this case, we should compare against the intersection instead of the 2418 // union because the real error is probably that we forgot to unlock M on 2419 // all code paths. 2420 bool LocksetInitialized = false; 2421 SmallVector<CFGBlock *, 8> SpecialBlocks; 2422 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 2423 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 2424 2425 // if *PI -> CurrBlock is a back edge 2426 if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) 2427 continue; 2428 2429 int PrevBlockID = (*PI)->getBlockID(); 2430 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2431 2432 // Ignore edges from blocks that can't return. 2433 if (neverReturns(*PI) || !PrevBlockInfo->Reachable) 2434 continue; 2435 2436 // Okay, we can reach this block from the entry. 2437 CurrBlockInfo->Reachable = true; 2438 2439 // If the previous block ended in a 'continue' or 'break' statement, then 2440 // a difference in locksets is probably due to a bug in that block, rather 2441 // than in some other predecessor. In that case, keep the other 2442 // predecessor's lockset. 2443 if (const Stmt *Terminator = (*PI)->getTerminator()) { 2444 if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) { 2445 SpecialBlocks.push_back(*PI); 2446 continue; 2447 } 2448 } 2449 2450 FactSet PrevLockset; 2451 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock); 2452 2453 if (!LocksetInitialized) { 2454 CurrBlockInfo->EntrySet = PrevLockset; 2455 LocksetInitialized = true; 2456 } else { 2457 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, 2458 CurrBlockInfo->EntryLoc, 2459 LEK_LockedSomePredecessors); 2460 } 2461 } 2462 2463 // Skip rest of block if it's not reachable. 2464 if (!CurrBlockInfo->Reachable) 2465 continue; 2466 2467 // Process continue and break blocks. Assume that the lockset for the 2468 // resulting block is unaffected by any discrepancies in them. 2469 for (unsigned SpecialI = 0, SpecialN = SpecialBlocks.size(); 2470 SpecialI < SpecialN; ++SpecialI) { 2471 CFGBlock *PrevBlock = SpecialBlocks[SpecialI]; 2472 int PrevBlockID = PrevBlock->getBlockID(); 2473 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2474 2475 if (!LocksetInitialized) { 2476 CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet; 2477 LocksetInitialized = true; 2478 } else { 2479 // Determine whether this edge is a loop terminator for diagnostic 2480 // purposes. FIXME: A 'break' statement might be a loop terminator, but 2481 // it might also be part of a switch. Also, a subsequent destructor 2482 // might add to the lockset, in which case the real issue might be a 2483 // double lock on the other path. 2484 const Stmt *Terminator = PrevBlock->getTerminator(); 2485 bool IsLoop = Terminator && isa<ContinueStmt>(Terminator); 2486 2487 FactSet PrevLockset; 2488 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, 2489 PrevBlock, CurrBlock); 2490 2491 // Do not update EntrySet. 2492 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, 2493 PrevBlockInfo->ExitLoc, 2494 IsLoop ? LEK_LockedSomeLoopIterations 2495 : LEK_LockedSomePredecessors, 2496 false); 2497 } 2498 } 2499 2500 BuildLockset LocksetBuilder(this, *CurrBlockInfo); 2501 2502 // Visit all the statements in the basic block. 2503 for (CFGBlock::const_iterator BI = CurrBlock->begin(), 2504 BE = CurrBlock->end(); BI != BE; ++BI) { 2505 switch (BI->getKind()) { 2506 case CFGElement::Statement: { 2507 CFGStmt CS = BI->castAs<CFGStmt>(); 2508 LocksetBuilder.Visit(const_cast<Stmt*>(CS.getStmt())); 2509 break; 2510 } 2511 // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now. 2512 case CFGElement::AutomaticObjectDtor: { 2513 CFGAutomaticObjDtor AD = BI->castAs<CFGAutomaticObjDtor>(); 2514 CXXDestructorDecl *DD = const_cast<CXXDestructorDecl *>( 2515 AD.getDestructorDecl(AC.getASTContext())); 2516 if (!DD->hasAttrs()) 2517 break; 2518 2519 // Create a dummy expression, 2520 VarDecl *VD = const_cast<VarDecl*>(AD.getVarDecl()); 2521 DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, 2522 AD.getTriggerStmt()->getLocEnd()); 2523 LocksetBuilder.handleCall(&DRE, DD); 2524 break; 2525 } 2526 default: 2527 break; 2528 } 2529 } 2530 CurrBlockInfo->ExitSet = LocksetBuilder.FSet; 2531 2532 // For every back edge from CurrBlock (the end of the loop) to another block 2533 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to 2534 // the one held at the beginning of FirstLoopBlock. We can look up the 2535 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. 2536 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 2537 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 2538 2539 // if CurrBlock -> *SI is *not* a back edge 2540 if (*SI == 0 || !VisitedBlocks.alreadySet(*SI)) 2541 continue; 2542 2543 CFGBlock *FirstLoopBlock = *SI; 2544 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()]; 2545 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID]; 2546 intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet, 2547 PreLoop->EntryLoc, 2548 LEK_LockedSomeLoopIterations, 2549 false); 2550 } 2551 } 2552 2553 CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()]; 2554 CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()]; 2555 2556 // Skip the final check if the exit block is unreachable. 2557 if (!Final->Reachable) 2558 return; 2559 2560 // By default, we expect all locks held on entry to be held on exit. 2561 FactSet ExpectedExitSet = Initial->EntrySet; 2562 2563 // Adjust the expected exit set by adding or removing locks, as declared 2564 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then 2565 // issue the appropriate warning. 2566 // FIXME: the location here is not quite right. 2567 for (unsigned i=0,n=ExclusiveLocksAcquired.size(); i<n; ++i) { 2568 ExpectedExitSet.addLock(FactMan, ExclusiveLocksAcquired[i], 2569 LockData(D->getLocation(), LK_Exclusive)); 2570 } 2571 for (unsigned i=0,n=SharedLocksAcquired.size(); i<n; ++i) { 2572 ExpectedExitSet.addLock(FactMan, SharedLocksAcquired[i], 2573 LockData(D->getLocation(), LK_Shared)); 2574 } 2575 for (unsigned i=0,n=LocksReleased.size(); i<n; ++i) { 2576 ExpectedExitSet.removeLock(FactMan, LocksReleased[i]); 2577 } 2578 2579 // FIXME: Should we call this function for all blocks which exit the function? 2580 intersectAndWarn(ExpectedExitSet, Final->ExitSet, 2581 Final->ExitLoc, 2582 LEK_LockedAtEndOfFunction, 2583 LEK_NotLockedAtEndOfFunction, 2584 false); 2585 } 2586 2587 } // end anonymous namespace 2588 2589 2590 namespace clang { 2591 namespace thread_safety { 2592 2593 /// \brief Check a function's CFG for thread-safety violations. 2594 /// 2595 /// We traverse the blocks in the CFG, compute the set of mutexes that are held 2596 /// at the end of each block, and issue warnings for thread safety violations. 2597 /// Each block in the CFG is traversed exactly once. 2598 void runThreadSafetyAnalysis(AnalysisDeclContext &AC, 2599 ThreadSafetyHandler &Handler) { 2600 ThreadSafetyAnalyzer Analyzer(Handler); 2601 Analyzer.runAnalysis(AC); 2602 } 2603 2604 /// \brief Helper function that returns a LockKind required for the given level 2605 /// of access. 2606 LockKind getLockKindFromAccessKind(AccessKind AK) { 2607 switch (AK) { 2608 case AK_Read : 2609 return LK_Shared; 2610 case AK_Written : 2611 return LK_Exclusive; 2612 } 2613 llvm_unreachable("Unknown AccessKind"); 2614 } 2615 2616 }} // end namespace clang::thread_safety 2617