1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines ExprEngine's support for calls and returns. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "ExprEngine" 15 16 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" 17 #include "clang/AST/CXXInheritance.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/ParentMap.h" 20 #include "clang/Analysis/Analyses/LiveVariables.h" 21 #include "clang/StaticAnalyzer/Core/CheckerManager.h" 22 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Support/SaveAndRestore.h" 26 27 using namespace clang; 28 using namespace ento; 29 30 STATISTIC(NumOfDynamicDispatchPathSplits, 31 "The # of times we split the path due to imprecise dynamic dispatch info"); 32 33 STATISTIC(NumInlinedCalls, 34 "The # of times we inlined a call"); 35 36 STATISTIC(NumReachedInlineCountMax, 37 "The # of times we reached inline count maximum"); 38 39 void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) { 40 // Get the entry block in the CFG of the callee. 41 const StackFrameContext *calleeCtx = CE.getCalleeContext(); 42 const CFG *CalleeCFG = calleeCtx->getCFG(); 43 const CFGBlock *Entry = &(CalleeCFG->getEntry()); 44 45 // Validate the CFG. 46 assert(Entry->empty()); 47 assert(Entry->succ_size() == 1); 48 49 // Get the solitary sucessor. 50 const CFGBlock *Succ = *(Entry->succ_begin()); 51 52 // Construct an edge representing the starting location in the callee. 53 BlockEdge Loc(Entry, Succ, calleeCtx); 54 55 ProgramStateRef state = Pred->getState(); 56 57 // Construct a new node and add it to the worklist. 58 bool isNew; 59 ExplodedNode *Node = G.getNode(Loc, state, false, &isNew); 60 Node->addPredecessor(Pred, G); 61 if (isNew) 62 Engine.getWorkList()->enqueue(Node); 63 } 64 65 // Find the last statement on the path to the exploded node and the 66 // corresponding Block. 67 static std::pair<const Stmt*, 68 const CFGBlock*> getLastStmt(const ExplodedNode *Node) { 69 const Stmt *S = 0; 70 const CFGBlock *Blk = 0; 71 const StackFrameContext *SF = 72 Node->getLocation().getLocationContext()->getCurrentStackFrame(); 73 74 // Back up through the ExplodedGraph until we reach a statement node in this 75 // stack frame. 76 while (Node) { 77 const ProgramPoint &PP = Node->getLocation(); 78 79 if (PP.getLocationContext()->getCurrentStackFrame() == SF) { 80 if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) { 81 S = SP->getStmt(); 82 break; 83 } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) { 84 S = CEE->getCalleeContext()->getCallSite(); 85 if (S) 86 break; 87 88 // If there is no statement, this is an implicitly-generated call. 89 // We'll walk backwards over it and then continue the loop to find 90 // an actual statement. 91 Optional<CallEnter> CE; 92 do { 93 Node = Node->getFirstPred(); 94 CE = Node->getLocationAs<CallEnter>(); 95 } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext()); 96 97 // Continue searching the graph. 98 } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) { 99 Blk = BE->getSrc(); 100 } 101 } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) { 102 // If we reached the CallEnter for this function, it has no statements. 103 if (CE->getCalleeContext() == SF) 104 break; 105 } 106 107 if (Node->pred_empty()) 108 return std::pair<const Stmt*, const CFGBlock*>((Stmt*)0, (CFGBlock*)0); 109 110 Node = *Node->pred_begin(); 111 } 112 113 return std::pair<const Stmt*, const CFGBlock*>(S, Blk); 114 } 115 116 /// Adjusts a return value when the called function's return type does not 117 /// match the caller's expression type. This can happen when a dynamic call 118 /// is devirtualized, and the overridding method has a covariant (more specific) 119 /// return type than the parent's method. For C++ objects, this means we need 120 /// to add base casts. 121 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy, 122 StoreManager &StoreMgr) { 123 // For now, the only adjustments we handle apply only to locations. 124 if (!V.getAs<Loc>()) 125 return V; 126 127 // If the types already match, don't do any unnecessary work. 128 ExpectedTy = ExpectedTy.getCanonicalType(); 129 ActualTy = ActualTy.getCanonicalType(); 130 if (ExpectedTy == ActualTy) 131 return V; 132 133 // No adjustment is needed between Objective-C pointer types. 134 if (ExpectedTy->isObjCObjectPointerType() && 135 ActualTy->isObjCObjectPointerType()) 136 return V; 137 138 // C++ object pointers may need "derived-to-base" casts. 139 const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl(); 140 const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl(); 141 if (ExpectedClass && ActualClass) { 142 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 143 /*DetectVirtual=*/false); 144 if (ActualClass->isDerivedFrom(ExpectedClass, Paths) && 145 !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) { 146 return StoreMgr.evalDerivedToBase(V, Paths.front()); 147 } 148 } 149 150 // Unfortunately, Objective-C does not enforce that overridden methods have 151 // covariant return types, so we can't assert that that never happens. 152 // Be safe and return UnknownVal(). 153 return UnknownVal(); 154 } 155 156 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC, 157 ExplodedNode *Pred, 158 ExplodedNodeSet &Dst) { 159 // Find the last statement in the function and the corresponding basic block. 160 const Stmt *LastSt = 0; 161 const CFGBlock *Blk = 0; 162 llvm::tie(LastSt, Blk) = getLastStmt(Pred); 163 if (!Blk || !LastSt) { 164 Dst.Add(Pred); 165 return; 166 } 167 168 // Here, we destroy the current location context. We use the current 169 // function's entire body as a diagnostic statement, with which the program 170 // point will be associated. However, we only want to use LastStmt as a 171 // reference for what to clean up if it's a ReturnStmt; otherwise, everything 172 // is dead. 173 SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC); 174 const LocationContext *LCtx = Pred->getLocationContext(); 175 removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx, 176 LCtx->getAnalysisDeclContext()->getBody(), 177 ProgramPoint::PostStmtPurgeDeadSymbolsKind); 178 } 179 180 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call, 181 const StackFrameContext *calleeCtx) { 182 const Decl *RuntimeCallee = calleeCtx->getDecl(); 183 const Decl *StaticDecl = Call->getDecl(); 184 assert(RuntimeCallee); 185 if (!StaticDecl) 186 return true; 187 return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl(); 188 } 189 190 /// Returns true if the CXXConstructExpr \p E was intended to construct a 191 /// prvalue for the region in \p V. 192 /// 193 /// Note that we can't just test for rvalue vs. glvalue because 194 /// CXXConstructExprs embedded in DeclStmts and initializers are considered 195 /// rvalues by the AST, and the analyzer would like to treat them as lvalues. 196 static bool isTemporaryPRValue(const CXXConstructExpr *E, SVal V) { 197 if (E->isGLValue()) 198 return false; 199 200 const MemRegion *MR = V.getAsRegion(); 201 if (!MR) 202 return false; 203 204 return isa<CXXTempObjectRegion>(MR); 205 } 206 207 /// The call exit is simulated with a sequence of nodes, which occur between 208 /// CallExitBegin and CallExitEnd. The following operations occur between the 209 /// two program points: 210 /// 1. CallExitBegin (triggers the start of call exit sequence) 211 /// 2. Bind the return value 212 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee. 213 /// 4. CallExitEnd (switch to the caller context) 214 /// 5. PostStmt<CallExpr> 215 void ExprEngine::processCallExit(ExplodedNode *CEBNode) { 216 // Step 1 CEBNode was generated before the call. 217 218 const StackFrameContext *calleeCtx = 219 CEBNode->getLocationContext()->getCurrentStackFrame(); 220 221 // The parent context might not be a stack frame, so make sure we 222 // look up the first enclosing stack frame. 223 const StackFrameContext *callerCtx = 224 calleeCtx->getParent()->getCurrentStackFrame(); 225 226 const Stmt *CE = calleeCtx->getCallSite(); 227 ProgramStateRef state = CEBNode->getState(); 228 // Find the last statement in the function and the corresponding basic block. 229 const Stmt *LastSt = 0; 230 const CFGBlock *Blk = 0; 231 llvm::tie(LastSt, Blk) = getLastStmt(CEBNode); 232 233 // Generate a CallEvent /before/ cleaning the state, so that we can get the 234 // correct value for 'this' (if necessary). 235 CallEventManager &CEMgr = getStateManager().getCallEventManager(); 236 CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state); 237 238 // Step 2: generate node with bound return value: CEBNode -> BindedRetNode. 239 240 // If the callee returns an expression, bind its value to CallExpr. 241 if (CE) { 242 if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) { 243 const LocationContext *LCtx = CEBNode->getLocationContext(); 244 SVal V = state->getSVal(RS, LCtx); 245 246 // Ensure that the return type matches the type of the returned Expr. 247 if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) { 248 QualType ReturnedTy = 249 CallEvent::getDeclaredResultType(calleeCtx->getDecl()); 250 if (!ReturnedTy.isNull()) { 251 if (const Expr *Ex = dyn_cast<Expr>(CE)) { 252 V = adjustReturnValue(V, Ex->getType(), ReturnedTy, 253 getStoreManager()); 254 } 255 } 256 } 257 258 state = state->BindExpr(CE, callerCtx, V); 259 } 260 261 // Bind the constructed object value to CXXConstructExpr. 262 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) { 263 loc::MemRegionVal This = 264 svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx); 265 SVal ThisV = state->getSVal(This); 266 267 // If the constructed object is a temporary prvalue, get its bindings. 268 if (isTemporaryPRValue(CCE, ThisV)) 269 ThisV = state->getSVal(ThisV.castAs<Loc>()); 270 271 state = state->BindExpr(CCE, callerCtx, ThisV); 272 } 273 } 274 275 // Step 3: BindedRetNode -> CleanedNodes 276 // If we can find a statement and a block in the inlined function, run remove 277 // dead bindings before returning from the call. This is important to ensure 278 // that we report the issues such as leaks in the stack contexts in which 279 // they occurred. 280 ExplodedNodeSet CleanedNodes; 281 if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) { 282 static SimpleProgramPointTag retValBind("ExprEngine : Bind Return Value"); 283 PostStmt Loc(LastSt, calleeCtx, &retValBind); 284 bool isNew; 285 ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew); 286 BindedRetNode->addPredecessor(CEBNode, G); 287 if (!isNew) 288 return; 289 290 NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode); 291 currBldrCtx = &Ctx; 292 // Here, we call the Symbol Reaper with 0 statement and callee location 293 // context, telling it to clean up everything in the callee's context 294 // (and its children). We use the callee's function body as a diagnostic 295 // statement, with which the program point will be associated. 296 removeDead(BindedRetNode, CleanedNodes, 0, calleeCtx, 297 calleeCtx->getAnalysisDeclContext()->getBody(), 298 ProgramPoint::PostStmtPurgeDeadSymbolsKind); 299 currBldrCtx = 0; 300 } else { 301 CleanedNodes.Add(CEBNode); 302 } 303 304 for (ExplodedNodeSet::iterator I = CleanedNodes.begin(), 305 E = CleanedNodes.end(); I != E; ++I) { 306 307 // Step 4: Generate the CallExit and leave the callee's context. 308 // CleanedNodes -> CEENode 309 CallExitEnd Loc(calleeCtx, callerCtx); 310 bool isNew; 311 ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState(); 312 ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew); 313 CEENode->addPredecessor(*I, G); 314 if (!isNew) 315 return; 316 317 // Step 5: Perform the post-condition check of the CallExpr and enqueue the 318 // result onto the work list. 319 // CEENode -> Dst -> WorkList 320 NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode); 321 SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx, 322 &Ctx); 323 SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex()); 324 325 CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState); 326 327 ExplodedNodeSet DstPostCall; 328 getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode, 329 *UpdatedCall, *this, 330 /*WasInlined=*/true); 331 332 ExplodedNodeSet Dst; 333 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) { 334 getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg, 335 *this, 336 /*WasInlined=*/true); 337 } else if (CE) { 338 getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE, 339 *this, /*WasInlined=*/true); 340 } else { 341 Dst.insert(DstPostCall); 342 } 343 344 // Enqueue the next element in the block. 345 for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end(); 346 PSI != PSE; ++PSI) { 347 Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), 348 calleeCtx->getIndex()+1); 349 } 350 } 351 } 352 353 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx, 354 bool &IsRecursive, unsigned &StackDepth) { 355 IsRecursive = false; 356 StackDepth = 0; 357 358 while (LCtx) { 359 if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) { 360 const Decl *DI = SFC->getDecl(); 361 362 // Mark recursive (and mutually recursive) functions and always count 363 // them when measuring the stack depth. 364 if (DI == D) { 365 IsRecursive = true; 366 ++StackDepth; 367 LCtx = LCtx->getParent(); 368 continue; 369 } 370 371 // Do not count the small functions when determining the stack depth. 372 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI); 373 const CFG *CalleeCFG = CalleeADC->getCFG(); 374 if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize()) 375 ++StackDepth; 376 } 377 LCtx = LCtx->getParent(); 378 } 379 380 } 381 382 static bool IsInStdNamespace(const FunctionDecl *FD) { 383 const DeclContext *DC = FD->getEnclosingNamespaceContext(); 384 const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC); 385 if (!ND) 386 return false; 387 388 while (const DeclContext *Parent = ND->getParent()) { 389 if (!isa<NamespaceDecl>(Parent)) 390 break; 391 ND = cast<NamespaceDecl>(Parent); 392 } 393 394 return ND->getName() == "std"; 395 } 396 397 // The GDM component containing the dynamic dispatch bifurcation info. When 398 // the exact type of the receiver is not known, we want to explore both paths - 399 // one on which we do inline it and the other one on which we don't. This is 400 // done to ensure we do not drop coverage. 401 // This is the map from the receiver region to a bool, specifying either we 402 // consider this region's information precise or not along the given path. 403 namespace { 404 enum DynamicDispatchMode { 405 DynamicDispatchModeInlined = 1, 406 DynamicDispatchModeConservative 407 }; 408 } 409 REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap, 410 CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *, 411 unsigned)) 412 413 bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, 414 NodeBuilder &Bldr, ExplodedNode *Pred, 415 ProgramStateRef State) { 416 assert(D); 417 418 const LocationContext *CurLC = Pred->getLocationContext(); 419 const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame(); 420 const LocationContext *ParentOfCallee = CallerSFC; 421 if (Call.getKind() == CE_Block) { 422 const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); 423 assert(BR && "If we have the block definition we should have its region"); 424 AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); 425 ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, 426 cast<BlockDecl>(D), 427 BR); 428 } 429 430 // This may be NULL, but that's fine. 431 const Expr *CallE = Call.getOriginExpr(); 432 433 // Construct a new stack frame for the callee. 434 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); 435 const StackFrameContext *CalleeSFC = 436 CalleeADC->getStackFrame(ParentOfCallee, CallE, 437 currBldrCtx->getBlock(), 438 currStmtIdx); 439 440 441 CallEnter Loc(CallE, CalleeSFC, CurLC); 442 443 // Construct a new state which contains the mapping from actual to 444 // formal arguments. 445 State = State->enterStackFrame(Call, CalleeSFC); 446 447 bool isNew; 448 if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) { 449 N->addPredecessor(Pred, G); 450 if (isNew) 451 Engine.getWorkList()->enqueue(N); 452 } 453 454 // If we decided to inline the call, the successor has been manually 455 // added onto the work list so remove it from the node builder. 456 Bldr.takeNodes(Pred); 457 458 NumInlinedCalls++; 459 460 // Mark the decl as visited. 461 if (VisitedCallees) 462 VisitedCallees->insert(D); 463 464 return true; 465 } 466 467 static ProgramStateRef getInlineFailedState(ProgramStateRef State, 468 const Stmt *CallE) { 469 const void *ReplayState = State->get<ReplayWithoutInlining>(); 470 if (!ReplayState) 471 return 0; 472 473 assert(ReplayState == CallE && "Backtracked to the wrong call."); 474 (void)CallE; 475 476 return State->remove<ReplayWithoutInlining>(); 477 } 478 479 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred, 480 ExplodedNodeSet &dst) { 481 // Perform the previsit of the CallExpr. 482 ExplodedNodeSet dstPreVisit; 483 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this); 484 485 // Get the call in its initial state. We use this as a template to perform 486 // all the checks. 487 CallEventManager &CEMgr = getStateManager().getCallEventManager(); 488 CallEventRef<> CallTemplate 489 = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext()); 490 491 // Evaluate the function call. We try each of the checkers 492 // to see if the can evaluate the function call. 493 ExplodedNodeSet dstCallEvaluated; 494 for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end(); 495 I != E; ++I) { 496 evalCall(dstCallEvaluated, *I, *CallTemplate); 497 } 498 499 // Finally, perform the post-condition check of the CallExpr and store 500 // the created nodes in 'Dst'. 501 // Note that if the call was inlined, dstCallEvaluated will be empty. 502 // The post-CallExpr check will occur in processCallExit. 503 getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE, 504 *this); 505 } 506 507 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred, 508 const CallEvent &Call) { 509 // WARNING: At this time, the state attached to 'Call' may be older than the 510 // state in 'Pred'. This is a minor optimization since CheckerManager will 511 // use an updated CallEvent instance when calling checkers, but if 'Call' is 512 // ever used directly in this function all callers should be updated to pass 513 // the most recent state. (It is probably not worth doing the work here since 514 // for some callers this will not be necessary.) 515 516 // Run any pre-call checks using the generic call interface. 517 ExplodedNodeSet dstPreVisit; 518 getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this); 519 520 // Actually evaluate the function call. We try each of the checkers 521 // to see if the can evaluate the function call, and get a callback at 522 // defaultEvalCall if all of them fail. 523 ExplodedNodeSet dstCallEvaluated; 524 getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit, 525 Call, *this); 526 527 // Finally, run any post-call checks. 528 getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated, 529 Call, *this); 530 } 531 532 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call, 533 const LocationContext *LCtx, 534 ProgramStateRef State) { 535 const Expr *E = Call.getOriginExpr(); 536 if (!E) 537 return State; 538 539 // Some method families have known return values. 540 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) { 541 switch (Msg->getMethodFamily()) { 542 default: 543 break; 544 case OMF_autorelease: 545 case OMF_retain: 546 case OMF_self: { 547 // These methods return their receivers. 548 return State->BindExpr(E, LCtx, Msg->getReceiverSVal()); 549 } 550 } 551 } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){ 552 SVal ThisV = C->getCXXThisVal(); 553 554 // If the constructed object is a temporary prvalue, get its bindings. 555 if (isTemporaryPRValue(cast<CXXConstructExpr>(E), ThisV)) 556 ThisV = State->getSVal(ThisV.castAs<Loc>()); 557 558 return State->BindExpr(E, LCtx, ThisV); 559 } 560 561 // Conjure a symbol if the return value is unknown. 562 QualType ResultTy = Call.getResultType(); 563 SValBuilder &SVB = getSValBuilder(); 564 unsigned Count = currBldrCtx->blockCount(); 565 SVal R = SVB.conjureSymbolVal(0, E, LCtx, ResultTy, Count); 566 return State->BindExpr(E, LCtx, R); 567 } 568 569 // Conservatively evaluate call by invalidating regions and binding 570 // a conjured return value. 571 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr, 572 ExplodedNode *Pred, 573 ProgramStateRef State) { 574 State = Call.invalidateRegions(currBldrCtx->blockCount(), State); 575 State = bindReturnValue(Call, Pred->getLocationContext(), State); 576 577 // And make the result node. 578 Bldr.generateNode(Call.getProgramPoint(), State, Pred); 579 } 580 581 static bool shouldInlineCallKind(const CallEvent &Call, 582 const ExplodedNode *Pred, 583 AnalyzerOptions &Opts) { 584 const LocationContext *CurLC = Pred->getLocationContext(); 585 const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame(); 586 switch (Call.getKind()) { 587 case CE_Function: 588 case CE_Block: 589 break; 590 case CE_CXXMember: 591 case CE_CXXMemberOperator: 592 if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions)) 593 return false; 594 break; 595 case CE_CXXConstructor: { 596 if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors)) 597 return false; 598 599 const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call); 600 601 // FIXME: We don't handle constructors or destructors for arrays properly. 602 const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion(); 603 if (Target && isa<ElementRegion>(Target)) 604 return false; 605 606 // FIXME: This is a hack. We don't use the correct region for a new 607 // expression, so if we inline the constructor its result will just be 608 // thrown away. This short-term hack is tracked in <rdar://problem/12180598> 609 // and the longer-term possible fix is discussed in PR12014. 610 const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr(); 611 if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr)) 612 if (isa<CXXNewExpr>(Parent)) 613 return false; 614 615 // Inlining constructors requires including initializers in the CFG. 616 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); 617 assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers"); 618 (void)ADC; 619 620 // If the destructor is trivial, it's always safe to inline the constructor. 621 if (Ctor.getDecl()->getParent()->hasTrivialDestructor()) 622 break; 623 624 // For other types, only inline constructors if destructor inlining is 625 // also enabled. 626 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) 627 return false; 628 629 // FIXME: This is a hack. We don't handle temporary destructors 630 // right now, so we shouldn't inline their constructors. 631 if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) 632 if (!Target || !isa<DeclRegion>(Target)) 633 return false; 634 635 break; 636 } 637 case CE_CXXDestructor: { 638 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) 639 return false; 640 641 // Inlining destructors requires building the CFG correctly. 642 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); 643 assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors"); 644 (void)ADC; 645 646 const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call); 647 648 // FIXME: We don't handle constructors or destructors for arrays properly. 649 const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion(); 650 if (Target && isa<ElementRegion>(Target)) 651 return false; 652 653 break; 654 } 655 case CE_CXXAllocator: 656 // Do not inline allocators until we model deallocators. 657 // This is unfortunate, but basically necessary for smart pointers and such. 658 return false; 659 case CE_ObjCMessage: 660 if (!Opts.mayInlineObjCMethod()) 661 return false; 662 if (!(Opts.getIPAMode() == IPAK_DynamicDispatch || 663 Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate)) 664 return false; 665 break; 666 } 667 return true; 668 } 669 670 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D, 671 const ExplodedNode *Pred) { 672 if (!D) 673 return false; 674 675 AnalysisManager &AMgr = getAnalysisManager(); 676 AnalyzerOptions &Opts = AMgr.options; 677 AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager(); 678 AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D); 679 680 // The auto-synthesized bodies are essential to inline as they are 681 // usually small and commonly used. Note: we should do this check early on to 682 // ensure we always inline these calls. 683 if (CalleeADC->isBodyAutosynthesized()) 684 return true; 685 686 if (HowToInline == Inline_None) 687 return false; 688 689 // Check if we should inline a call based on its kind. 690 if (!shouldInlineCallKind(Call, Pred, Opts)) 691 return false; 692 693 // It is possible that the CFG cannot be constructed. 694 // Be safe, and check if the CalleeCFG is valid. 695 const CFG *CalleeCFG = CalleeADC->getCFG(); 696 if (!CalleeCFG) 697 return false; 698 699 // Do not inline if recursive or we've reached max stack frame count. 700 bool IsRecursive = false; 701 unsigned StackDepth = 0; 702 examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth); 703 if ((StackDepth >= Opts.InlineMaxStackDepth) && 704 ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()) 705 || IsRecursive)) 706 return false; 707 708 // Do not inline if it took too long to inline previously. 709 if (Engine.FunctionSummaries->hasReachedMaxBlockCount(D)) 710 return false; 711 712 // Or if the function is too big. 713 if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize()) 714 return false; 715 716 // Do not inline variadic calls (for now). 717 if (Call.isVariadic()) 718 return false; 719 720 // Check our template policy. 721 if (getContext().getLangOpts().CPlusPlus) { 722 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 723 // Conditionally allow the inlining of template functions. 724 if (!Opts.mayInlineTemplateFunctions()) 725 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate) 726 return false; 727 728 // Conditionally allow the inlining of C++ standard library functions. 729 if (!Opts.mayInlineCXXStandardLibrary()) 730 if (getContext().getSourceManager().isInSystemHeader(FD->getLocation())) 731 if (IsInStdNamespace(FD)) 732 return false; 733 } 734 } 735 736 // It is possible that the live variables analysis cannot be 737 // run. If so, bail out. 738 if (!CalleeADC->getAnalysis<RelaxedLiveVariables>()) 739 return false; 740 741 // Do not inline large functions too many times. 742 if ((Engine.FunctionSummaries->getNumTimesInlined(D) > 743 Opts.getMaxTimesInlineLarge()) && 744 CalleeCFG->getNumBlockIDs() > 13) { 745 NumReachedInlineCountMax++; 746 return false; 747 } 748 Engine.FunctionSummaries->bumpNumTimesInlined(D); 749 750 return true; 751 } 752 753 static bool isTrivialObjectAssignment(const CallEvent &Call) { 754 const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call); 755 if (!ICall) 756 return false; 757 758 const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl()); 759 if (!MD) 760 return false; 761 if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator())) 762 return false; 763 764 return MD->isTrivial(); 765 } 766 767 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred, 768 const CallEvent &CallTemplate) { 769 // Make sure we have the most recent state attached to the call. 770 ProgramStateRef State = Pred->getState(); 771 CallEventRef<> Call = CallTemplate.cloneWithState(State); 772 773 // Special-case trivial assignment operators. 774 if (isTrivialObjectAssignment(*Call)) { 775 performTrivialCopy(Bldr, Pred, *Call); 776 return; 777 } 778 779 // Try to inline the call. 780 // The origin expression here is just used as a kind of checksum; 781 // this should still be safe even for CallEvents that don't come from exprs. 782 const Expr *E = Call->getOriginExpr(); 783 784 ProgramStateRef InlinedFailedState = getInlineFailedState(State, E); 785 if (InlinedFailedState) { 786 // If we already tried once and failed, make sure we don't retry later. 787 State = InlinedFailedState; 788 } else { 789 RuntimeDefinition RD = Call->getRuntimeDefinition(); 790 const Decl *D = RD.getDecl(); 791 if (shouldInlineCall(*Call, D, Pred)) { 792 if (RD.mayHaveOtherDefinitions()) { 793 AnalyzerOptions &Options = getAnalysisManager().options; 794 795 // Explore with and without inlining the call. 796 if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) { 797 BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred); 798 return; 799 } 800 801 // Don't inline if we're not in any dynamic dispatch mode. 802 if (Options.getIPAMode() != IPAK_DynamicDispatch) { 803 conservativeEvalCall(*Call, Bldr, Pred, State); 804 return; 805 } 806 } 807 808 // We are not bifurcating and we do have a Decl, so just inline. 809 if (inlineCall(*Call, D, Bldr, Pred, State)) 810 return; 811 } 812 } 813 814 // If we can't inline it, handle the return value and invalidate the regions. 815 conservativeEvalCall(*Call, Bldr, Pred, State); 816 } 817 818 void ExprEngine::BifurcateCall(const MemRegion *BifurReg, 819 const CallEvent &Call, const Decl *D, 820 NodeBuilder &Bldr, ExplodedNode *Pred) { 821 assert(BifurReg); 822 BifurReg = BifurReg->StripCasts(); 823 824 // Check if we've performed the split already - note, we only want 825 // to split the path once per memory region. 826 ProgramStateRef State = Pred->getState(); 827 const unsigned *BState = 828 State->get<DynamicDispatchBifurcationMap>(BifurReg); 829 if (BState) { 830 // If we are on "inline path", keep inlining if possible. 831 if (*BState == DynamicDispatchModeInlined) 832 if (inlineCall(Call, D, Bldr, Pred, State)) 833 return; 834 // If inline failed, or we are on the path where we assume we 835 // don't have enough info about the receiver to inline, conjure the 836 // return value and invalidate the regions. 837 conservativeEvalCall(Call, Bldr, Pred, State); 838 return; 839 } 840 841 // If we got here, this is the first time we process a message to this 842 // region, so split the path. 843 ProgramStateRef IState = 844 State->set<DynamicDispatchBifurcationMap>(BifurReg, 845 DynamicDispatchModeInlined); 846 inlineCall(Call, D, Bldr, Pred, IState); 847 848 ProgramStateRef NoIState = 849 State->set<DynamicDispatchBifurcationMap>(BifurReg, 850 DynamicDispatchModeConservative); 851 conservativeEvalCall(Call, Bldr, Pred, NoIState); 852 853 NumOfDynamicDispatchPathSplits++; 854 return; 855 } 856 857 858 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred, 859 ExplodedNodeSet &Dst) { 860 861 ExplodedNodeSet dstPreVisit; 862 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this); 863 864 StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx); 865 866 if (RS->getRetValue()) { 867 for (ExplodedNodeSet::iterator it = dstPreVisit.begin(), 868 ei = dstPreVisit.end(); it != ei; ++it) { 869 B.generateNode(RS, *it, (*it)->getState()); 870 } 871 } 872 } 873