xref: /freebsd/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  This file defines ExprEngine's support for calls and returns.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PrettyStackTraceLocationContext.h"
14 #include "clang/AST/CXXInheritance.h"
15 #include "clang/AST/Decl.h"
16 #include "clang/AST/DeclCXX.h"
17 #include "clang/Analysis/Analyses/LiveVariables.h"
18 #include "clang/Analysis/ConstructionContext.h"
19 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
20 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
21 #include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
22 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Support/Casting.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/SaveAndRestore.h"
28 #include <optional>
29 
30 using namespace clang;
31 using namespace ento;
32 
33 #define DEBUG_TYPE "ExprEngine"
34 
35 STATISTIC(NumOfDynamicDispatchPathSplits,
36   "The # of times we split the path due to imprecise dynamic dispatch info");
37 
38 STATISTIC(NumInlinedCalls,
39   "The # of times we inlined a call");
40 
41 STATISTIC(NumReachedInlineCountMax,
42   "The # of times we reached inline count maximum");
43 
processCallEnter(NodeBuilderContext & BC,CallEnter CE,ExplodedNode * Pred)44 void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE,
45                                   ExplodedNode *Pred) {
46   // Get the entry block in the CFG of the callee.
47   const StackFrameContext *calleeCtx = CE.getCalleeContext();
48   PrettyStackTraceLocationContext CrashInfo(calleeCtx);
49   const CFGBlock *Entry = CE.getEntry();
50 
51   // Validate the CFG.
52   assert(Entry->empty());
53   assert(Entry->succ_size() == 1);
54 
55   // Get the solitary successor.
56   const CFGBlock *Succ = *(Entry->succ_begin());
57 
58   // Construct an edge representing the starting location in the callee.
59   BlockEdge Loc(Entry, Succ, calleeCtx);
60 
61   ProgramStateRef state = Pred->getState();
62 
63   // Construct a new node, notify checkers that analysis of the function has
64   // begun, and add the resultant nodes to the worklist.
65   bool isNew;
66   ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
67   Node->addPredecessor(Pred, G);
68   if (isNew) {
69     ExplodedNodeSet DstBegin;
70     processBeginOfFunction(BC, Node, DstBegin, Loc);
71     Engine.enqueue(DstBegin);
72   }
73 }
74 
75 // Find the last statement on the path to the exploded node and the
76 // corresponding Block.
77 static std::pair<const Stmt*,
getLastStmt(const ExplodedNode * Node)78                  const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
79   const Stmt *S = nullptr;
80   const CFGBlock *Blk = nullptr;
81   const StackFrameContext *SF = Node->getStackFrame();
82 
83   // Back up through the ExplodedGraph until we reach a statement node in this
84   // stack frame.
85   while (Node) {
86     const ProgramPoint &PP = Node->getLocation();
87 
88     if (PP.getStackFrame() == SF) {
89       if (std::optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
90         S = SP->getStmt();
91         break;
92       } else if (std::optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
93         S = CEE->getCalleeContext()->getCallSite();
94         if (S)
95           break;
96 
97         // If there is no statement, this is an implicitly-generated call.
98         // We'll walk backwards over it and then continue the loop to find
99         // an actual statement.
100         std::optional<CallEnter> CE;
101         do {
102           Node = Node->getFirstPred();
103           CE = Node->getLocationAs<CallEnter>();
104         } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
105 
106         // Continue searching the graph.
107       } else if (std::optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
108         Blk = BE->getSrc();
109       }
110     } else if (std::optional<CallEnter> CE = PP.getAs<CallEnter>()) {
111       // If we reached the CallEnter for this function, it has no statements.
112       if (CE->getCalleeContext() == SF)
113         break;
114     }
115 
116     if (Node->pred_empty())
117       return std::make_pair(nullptr, nullptr);
118 
119     Node = *Node->pred_begin();
120   }
121 
122   return std::make_pair(S, Blk);
123 }
124 
125 /// Adjusts a return value when the called function's return type does not
126 /// match the caller's expression type. This can happen when a dynamic call
127 /// is devirtualized, and the overriding method has a covariant (more specific)
128 /// return type than the parent's method. For C++ objects, this means we need
129 /// to add base casts.
adjustReturnValue(SVal V,QualType ExpectedTy,QualType ActualTy,StoreManager & StoreMgr)130 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
131                               StoreManager &StoreMgr) {
132   // For now, the only adjustments we handle apply only to locations.
133   if (!isa<Loc>(V))
134     return V;
135 
136   // If the types already match, don't do any unnecessary work.
137   ExpectedTy = ExpectedTy.getCanonicalType();
138   ActualTy = ActualTy.getCanonicalType();
139   if (ExpectedTy == ActualTy)
140     return V;
141 
142   // No adjustment is needed between Objective-C pointer types.
143   if (ExpectedTy->isObjCObjectPointerType() &&
144       ActualTy->isObjCObjectPointerType())
145     return V;
146 
147   // C++ object pointers may need "derived-to-base" casts.
148   const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
149   const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
150   if (ExpectedClass && ActualClass) {
151     CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
152                        /*DetectVirtual=*/false);
153     if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
154         !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
155       return StoreMgr.evalDerivedToBase(V, Paths.front());
156     }
157   }
158 
159   // Unfortunately, Objective-C does not enforce that overridden methods have
160   // covariant return types, so we can't assert that that never happens.
161   // Be safe and return UnknownVal().
162   return UnknownVal();
163 }
164 
removeDeadOnEndOfFunction(NodeBuilderContext & BC,ExplodedNode * Pred,ExplodedNodeSet & Dst)165 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
166                                            ExplodedNode *Pred,
167                                            ExplodedNodeSet &Dst) {
168   // Find the last statement in the function and the corresponding basic block.
169   const Stmt *LastSt = nullptr;
170   const CFGBlock *Blk = nullptr;
171   std::tie(LastSt, Blk) = getLastStmt(Pred);
172   if (!Blk || !LastSt) {
173     Dst.Add(Pred);
174     return;
175   }
176 
177   // Here, we destroy the current location context. We use the current
178   // function's entire body as a diagnostic statement, with which the program
179   // point will be associated. However, we only want to use LastStmt as a
180   // reference for what to clean up if it's a ReturnStmt; otherwise, everything
181   // is dead.
182   SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
183   const LocationContext *LCtx = Pred->getLocationContext();
184   removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
185              LCtx->getAnalysisDeclContext()->getBody(),
186              ProgramPoint::PostStmtPurgeDeadSymbolsKind);
187 }
188 
wasDifferentDeclUsedForInlining(CallEventRef<> Call,const StackFrameContext * calleeCtx)189 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
190     const StackFrameContext *calleeCtx) {
191   const Decl *RuntimeCallee = calleeCtx->getDecl();
192   const Decl *StaticDecl = Call->getDecl();
193   assert(RuntimeCallee);
194   if (!StaticDecl)
195     return true;
196   return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
197 }
198 
199 // Returns the number of elements in the array currently being destructed.
200 // If the element count is not found 0 will be returned.
getElementCountOfArrayBeingDestructed(const CallEvent & Call,const ProgramStateRef State,SValBuilder & SVB)201 static unsigned getElementCountOfArrayBeingDestructed(
202     const CallEvent &Call, const ProgramStateRef State, SValBuilder &SVB) {
203   assert(isa<CXXDestructorCall>(Call) &&
204          "The call event is not a destructor call!");
205 
206   const auto &DtorCall = cast<CXXDestructorCall>(Call);
207 
208   auto ThisVal = DtorCall.getCXXThisVal();
209 
210   if (auto ThisElementRegion = dyn_cast<ElementRegion>(ThisVal.getAsRegion())) {
211     auto ArrayRegion = ThisElementRegion->getAsArrayOffset().getRegion();
212     auto ElementType = ThisElementRegion->getElementType();
213 
214     auto ElementCount =
215         getDynamicElementCount(State, ArrayRegion, SVB, ElementType);
216 
217     if (!ElementCount.isConstant())
218       return 0;
219 
220     return ElementCount.getAsInteger()->getLimitedValue();
221   }
222 
223   return 0;
224 }
225 
removeStateTraitsUsedForArrayEvaluation(ProgramStateRef State,const CXXConstructExpr * E,const LocationContext * LCtx)226 ProgramStateRef ExprEngine::removeStateTraitsUsedForArrayEvaluation(
227     ProgramStateRef State, const CXXConstructExpr *E,
228     const LocationContext *LCtx) {
229 
230   assert(LCtx && "Location context must be provided!");
231 
232   if (E) {
233     if (getPendingInitLoop(State, E, LCtx))
234       State = removePendingInitLoop(State, E, LCtx);
235 
236     if (getIndexOfElementToConstruct(State, E, LCtx))
237       State = removeIndexOfElementToConstruct(State, E, LCtx);
238   }
239 
240   if (getPendingArrayDestruction(State, LCtx))
241     State = removePendingArrayDestruction(State, LCtx);
242 
243   return State;
244 }
245 
246 /// The call exit is simulated with a sequence of nodes, which occur between
247 /// CallExitBegin and CallExitEnd. The following operations occur between the
248 /// two program points:
249 /// 1. CallExitBegin (triggers the start of call exit sequence)
250 /// 2. Bind the return value
251 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
252 /// 4. CallExitEnd (switch to the caller context)
253 /// 5. PostStmt<CallExpr>
processCallExit(ExplodedNode * CEBNode)254 void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
255   // Step 1 CEBNode was generated before the call.
256   PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
257   const StackFrameContext *calleeCtx = CEBNode->getStackFrame();
258 
259   // The parent context might not be a stack frame, so make sure we
260   // look up the first enclosing stack frame.
261   const StackFrameContext *callerCtx =
262     calleeCtx->getParent()->getStackFrame();
263 
264   const Stmt *CE = calleeCtx->getCallSite();
265   ProgramStateRef state = CEBNode->getState();
266   // Find the last statement in the function and the corresponding basic block.
267   const Stmt *LastSt = nullptr;
268   const CFGBlock *Blk = nullptr;
269   std::tie(LastSt, Blk) = getLastStmt(CEBNode);
270 
271   // Generate a CallEvent /before/ cleaning the state, so that we can get the
272   // correct value for 'this' (if necessary).
273   CallEventManager &CEMgr = getStateManager().getCallEventManager();
274   CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
275 
276   // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
277 
278   // If this variable is set to 'true' the analyzer will evaluate the call
279   // statement we are about to exit again, instead of continuing the execution
280   // from the statement after the call. This is useful for non-POD type array
281   // construction where the CXXConstructExpr is referenced only once in the CFG,
282   // but we want to evaluate it as many times as many elements the array has.
283   bool ShouldRepeatCall = false;
284 
285   if (const auto *DtorDecl =
286           dyn_cast_or_null<CXXDestructorDecl>(Call->getDecl())) {
287     if (auto Idx = getPendingArrayDestruction(state, callerCtx)) {
288       ShouldRepeatCall = *Idx > 0;
289 
290       auto ThisVal = svalBuilder.getCXXThis(DtorDecl->getParent(), calleeCtx);
291       state = state->killBinding(ThisVal);
292     }
293   }
294 
295   // If the callee returns an expression, bind its value to CallExpr.
296   if (CE) {
297     if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
298       const LocationContext *LCtx = CEBNode->getLocationContext();
299       SVal V = state->getSVal(RS, LCtx);
300 
301       // Ensure that the return type matches the type of the returned Expr.
302       if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
303         QualType ReturnedTy =
304           CallEvent::getDeclaredResultType(calleeCtx->getDecl());
305         if (!ReturnedTy.isNull()) {
306           if (const Expr *Ex = dyn_cast<Expr>(CE)) {
307             V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
308                                   getStoreManager());
309           }
310         }
311       }
312 
313       state = state->BindExpr(CE, callerCtx, V);
314     }
315 
316     // Bind the constructed object value to CXXConstructExpr.
317     if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
318       loc::MemRegionVal This =
319         svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
320       SVal ThisV = state->getSVal(This);
321       ThisV = state->getSVal(ThisV.castAs<Loc>());
322       state = state->BindExpr(CCE, callerCtx, ThisV);
323 
324       ShouldRepeatCall = shouldRepeatCtorCall(state, CCE, callerCtx);
325     }
326 
327     if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
328       // We are currently evaluating a CXXNewAllocator CFGElement. It takes a
329       // while to reach the actual CXXNewExpr element from here, so keep the
330       // region for later use.
331       // Additionally cast the return value of the inlined operator new
332       // (which is of type 'void *') to the correct object type.
333       SVal AllocV = state->getSVal(CNE, callerCtx);
334       AllocV = svalBuilder.evalCast(
335           AllocV, CNE->getType(),
336           getContext().getPointerType(getContext().VoidTy));
337 
338       state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(),
339                                          AllocV);
340     }
341   }
342 
343   if (!ShouldRepeatCall) {
344     state = removeStateTraitsUsedForArrayEvaluation(
345         state, dyn_cast_or_null<CXXConstructExpr>(CE), callerCtx);
346   }
347 
348   // Step 3: BindedRetNode -> CleanedNodes
349   // If we can find a statement and a block in the inlined function, run remove
350   // dead bindings before returning from the call. This is important to ensure
351   // that we report the issues such as leaks in the stack contexts in which
352   // they occurred.
353   ExplodedNodeSet CleanedNodes;
354   if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
355     static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
356     PostStmt Loc(LastSt, calleeCtx, &retValBind);
357     bool isNew;
358     ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
359     BindedRetNode->addPredecessor(CEBNode, G);
360     if (!isNew)
361       return;
362 
363     NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
364     currBldrCtx = &Ctx;
365     // Here, we call the Symbol Reaper with 0 statement and callee location
366     // context, telling it to clean up everything in the callee's context
367     // (and its children). We use the callee's function body as a diagnostic
368     // statement, with which the program point will be associated.
369     removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
370                calleeCtx->getAnalysisDeclContext()->getBody(),
371                ProgramPoint::PostStmtPurgeDeadSymbolsKind);
372     currBldrCtx = nullptr;
373   } else {
374     CleanedNodes.Add(CEBNode);
375   }
376 
377   for (ExplodedNode *N : CleanedNodes) {
378     // Step 4: Generate the CallExit and leave the callee's context.
379     // CleanedNodes -> CEENode
380     CallExitEnd Loc(calleeCtx, callerCtx);
381     bool isNew;
382     ProgramStateRef CEEState = (N == CEBNode) ? state : N->getState();
383 
384     ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
385     CEENode->addPredecessor(N, G);
386     if (!isNew)
387       return;
388 
389     // Step 5: Perform the post-condition check of the CallExpr and enqueue the
390     // result onto the work list.
391     // CEENode -> Dst -> WorkList
392     NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
393     SaveAndRestore<const NodeBuilderContext *> NBCSave(currBldrCtx, &Ctx);
394     SaveAndRestore CBISave(currStmtIdx, calleeCtx->getIndex());
395 
396     CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
397 
398     ExplodedNodeSet DstPostCall;
399     if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) {
400       ExplodedNodeSet DstPostPostCallCallback;
401       getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
402                                                  CEENode, *UpdatedCall, *this,
403                                                  /*wasInlined=*/true);
404       for (ExplodedNode *I : DstPostPostCallCallback) {
405         getCheckerManager().runCheckersForNewAllocator(
406             cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this,
407             /*wasInlined=*/true);
408       }
409     } else {
410       getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
411                                                  *UpdatedCall, *this,
412                                                  /*wasInlined=*/true);
413     }
414     ExplodedNodeSet Dst;
415     if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
416       getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
417                                                         *this,
418                                                         /*wasInlined=*/true);
419     } else if (CE &&
420                !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
421                  AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) {
422       getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
423                                                  *this, /*wasInlined=*/true);
424     } else {
425       Dst.insert(DstPostCall);
426     }
427 
428     // Enqueue the next element in the block.
429     for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
430          PSI != PSE; ++PSI) {
431       unsigned Idx = calleeCtx->getIndex() + (ShouldRepeatCall ? 0 : 1);
432 
433       Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), Idx);
434     }
435   }
436 }
437 
isSmall(AnalysisDeclContext * ADC) const438 bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const {
439   // When there are no branches in the function, it means that there's no
440   // exponential complexity introduced by inlining such function.
441   // Such functions also don't trigger various fundamental problems
442   // with our inlining mechanism, such as the problem of
443   // inlined defensive checks. Hence isLinear().
444   const CFG *Cfg = ADC->getCFG();
445   return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize;
446 }
447 
isLarge(AnalysisDeclContext * ADC) const448 bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const {
449   const CFG *Cfg = ADC->getCFG();
450   return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge;
451 }
452 
isHuge(AnalysisDeclContext * ADC) const453 bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const {
454   const CFG *Cfg = ADC->getCFG();
455   return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize;
456 }
457 
examineStackFrames(const Decl * D,const LocationContext * LCtx,bool & IsRecursive,unsigned & StackDepth)458 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
459                                bool &IsRecursive, unsigned &StackDepth) {
460   IsRecursive = false;
461   StackDepth = 0;
462 
463   while (LCtx) {
464     if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
465       const Decl *DI = SFC->getDecl();
466 
467       // Mark recursive (and mutually recursive) functions and always count
468       // them when measuring the stack depth.
469       if (DI == D) {
470         IsRecursive = true;
471         ++StackDepth;
472         LCtx = LCtx->getParent();
473         continue;
474       }
475 
476       // Do not count the small functions when determining the stack depth.
477       AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
478       if (!isSmall(CalleeADC))
479         ++StackDepth;
480     }
481     LCtx = LCtx->getParent();
482   }
483 }
484 
485 // The GDM component containing the dynamic dispatch bifurcation info. When
486 // the exact type of the receiver is not known, we want to explore both paths -
487 // one on which we do inline it and the other one on which we don't. This is
488 // done to ensure we do not drop coverage.
489 // This is the map from the receiver region to a bool, specifying either we
490 // consider this region's information precise or not along the given path.
491 namespace {
492   enum DynamicDispatchMode {
493     DynamicDispatchModeInlined = 1,
494     DynamicDispatchModeConservative
495   };
496 } // end anonymous namespace
497 
REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,const MemRegion *,unsigned)498 REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
499                                const MemRegion *, unsigned)
500 REGISTER_TRAIT_WITH_PROGRAMSTATE(CTUDispatchBifurcation, bool)
501 
502 void ExprEngine::ctuBifurcate(const CallEvent &Call, const Decl *D,
503                               NodeBuilder &Bldr, ExplodedNode *Pred,
504                               ProgramStateRef State) {
505   ProgramStateRef ConservativeEvalState = nullptr;
506   if (Call.isForeign() && !isSecondPhaseCTU()) {
507     const auto IK = AMgr.options.getCTUPhase1Inlining();
508     const bool DoInline = IK == CTUPhase1InliningKind::All ||
509                           (IK == CTUPhase1InliningKind::Small &&
510                            isSmall(AMgr.getAnalysisDeclContext(D)));
511     if (DoInline) {
512       inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
513       return;
514     }
515     const bool BState = State->get<CTUDispatchBifurcation>();
516     if (!BState) { // This is the first time we see this foreign function.
517       // Enqueue it to be analyzed in the second (ctu) phase.
518       inlineCall(Engine.getCTUWorkList(), Call, D, Bldr, Pred, State);
519       // Conservatively evaluate in the first phase.
520       ConservativeEvalState = State->set<CTUDispatchBifurcation>(true);
521       conservativeEvalCall(Call, Bldr, Pred, ConservativeEvalState);
522     } else {
523       conservativeEvalCall(Call, Bldr, Pred, State);
524     }
525     return;
526   }
527   inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
528 }
529 
inlineCall(WorkList * WList,const CallEvent & Call,const Decl * D,NodeBuilder & Bldr,ExplodedNode * Pred,ProgramStateRef State)530 void ExprEngine::inlineCall(WorkList *WList, const CallEvent &Call,
531                             const Decl *D, NodeBuilder &Bldr,
532                             ExplodedNode *Pred, ProgramStateRef State) {
533   assert(D);
534 
535   const LocationContext *CurLC = Pred->getLocationContext();
536   const StackFrameContext *CallerSFC = CurLC->getStackFrame();
537   const LocationContext *ParentOfCallee = CallerSFC;
538   if (Call.getKind() == CE_Block &&
539       !cast<BlockCall>(Call).isConversionFromLambda()) {
540     const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
541     assert(BR && "If we have the block definition we should have its region");
542     AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
543     ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
544                                                          cast<BlockDecl>(D),
545                                                          BR);
546   }
547 
548   // This may be NULL, but that's fine.
549   const Expr *CallE = Call.getOriginExpr();
550 
551   // Construct a new stack frame for the callee.
552   AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
553   const StackFrameContext *CalleeSFC =
554       CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(),
555                                currBldrCtx->blockCount(), currStmtIdx);
556 
557   CallEnter Loc(CallE, CalleeSFC, CurLC);
558 
559   // Construct a new state which contains the mapping from actual to
560   // formal arguments.
561   State = State->enterStackFrame(Call, CalleeSFC);
562 
563   bool isNew;
564   if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
565     N->addPredecessor(Pred, G);
566     if (isNew)
567       WList->enqueue(N);
568   }
569 
570   // If we decided to inline the call, the successor has been manually
571   // added onto the work list so remove it from the node builder.
572   Bldr.takeNodes(Pred);
573 
574   NumInlinedCalls++;
575   Engine.FunctionSummaries->bumpNumTimesInlined(D);
576 
577   // Do not mark as visited in the 2nd run (CTUWList), so the function will
578   // be visited as top-level, this way we won't loose reports in non-ctu
579   // mode. Considering the case when a function in a foreign TU calls back
580   // into the main TU.
581   // Note, during the 1st run, it doesn't matter if we mark the foreign
582   // functions as visited (or not) because they can never appear as a top level
583   // function in the main TU.
584   if (!isSecondPhaseCTU())
585     // Mark the decl as visited.
586     if (VisitedCallees)
587       VisitedCallees->insert(D);
588 }
589 
getInlineFailedState(ProgramStateRef State,const Stmt * CallE)590 static ProgramStateRef getInlineFailedState(ProgramStateRef State,
591                                             const Stmt *CallE) {
592   const void *ReplayState = State->get<ReplayWithoutInlining>();
593   if (!ReplayState)
594     return nullptr;
595 
596   assert(ReplayState == CallE && "Backtracked to the wrong call.");
597   (void)CallE;
598 
599   return State->remove<ReplayWithoutInlining>();
600 }
601 
VisitCallExpr(const CallExpr * CE,ExplodedNode * Pred,ExplodedNodeSet & dst)602 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
603                                ExplodedNodeSet &dst) {
604   // Perform the previsit of the CallExpr.
605   ExplodedNodeSet dstPreVisit;
606   getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
607 
608   // Get the call in its initial state. We use this as a template to perform
609   // all the checks.
610   CallEventManager &CEMgr = getStateManager().getCallEventManager();
611   CallEventRef<> CallTemplate = CEMgr.getSimpleCall(
612       CE, Pred->getState(), Pred->getLocationContext(), getCFGElementRef());
613 
614   // Evaluate the function call.  We try each of the checkers
615   // to see if the can evaluate the function call.
616   ExplodedNodeSet dstCallEvaluated;
617   for (ExplodedNode *N : dstPreVisit) {
618     evalCall(dstCallEvaluated, N, *CallTemplate);
619   }
620 
621   // Finally, perform the post-condition check of the CallExpr and store
622   // the created nodes in 'Dst'.
623   // Note that if the call was inlined, dstCallEvaluated will be empty.
624   // The post-CallExpr check will occur in processCallExit.
625   getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
626                                              *this);
627 }
628 
finishArgumentConstruction(ProgramStateRef State,const CallEvent & Call)629 ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
630                                                        const CallEvent &Call) {
631   const Expr *E = Call.getOriginExpr();
632   // FIXME: Constructors to placement arguments of operator new
633   // are not supported yet.
634   if (!E || isa<CXXNewExpr>(E))
635     return State;
636 
637   const LocationContext *LC = Call.getLocationContext();
638   for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) {
639     unsigned I = Call.getASTArgumentIndex(CallI);
640     if (std::optional<SVal> V = getObjectUnderConstruction(State, {E, I}, LC)) {
641       SVal VV = *V;
642       (void)VV;
643       assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion())
644                  ->getStackFrame()->getParent()
645                  ->getStackFrame() == LC->getStackFrame());
646       State = finishObjectConstruction(State, {E, I}, LC);
647     }
648   }
649 
650   return State;
651 }
652 
finishArgumentConstruction(ExplodedNodeSet & Dst,ExplodedNode * Pred,const CallEvent & Call)653 void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
654                                             ExplodedNode *Pred,
655                                             const CallEvent &Call) {
656   ProgramStateRef State = Pred->getState();
657   ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
658   if (CleanedState == State) {
659     Dst.insert(Pred);
660     return;
661   }
662 
663   const Expr *E = Call.getOriginExpr();
664   const LocationContext *LC = Call.getLocationContext();
665   NodeBuilder B(Pred, Dst, *currBldrCtx);
666   static SimpleProgramPointTag Tag("ExprEngine",
667                                    "Finish argument construction");
668   PreStmt PP(E, LC, &Tag);
669   B.generateNode(PP, CleanedState, Pred);
670 }
671 
evalCall(ExplodedNodeSet & Dst,ExplodedNode * Pred,const CallEvent & Call)672 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
673                           const CallEvent &Call) {
674   // WARNING: At this time, the state attached to 'Call' may be older than the
675   // state in 'Pred'. This is a minor optimization since CheckerManager will
676   // use an updated CallEvent instance when calling checkers, but if 'Call' is
677   // ever used directly in this function all callers should be updated to pass
678   // the most recent state. (It is probably not worth doing the work here since
679   // for some callers this will not be necessary.)
680 
681   // Run any pre-call checks using the generic call interface.
682   ExplodedNodeSet dstPreVisit;
683   getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
684                                             Call, *this);
685 
686   // Actually evaluate the function call.  We try each of the checkers
687   // to see if the can evaluate the function call, and get a callback at
688   // defaultEvalCall if all of them fail.
689   ExplodedNodeSet dstCallEvaluated;
690   getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
691                                              Call, *this, EvalCallOptions());
692 
693   // If there were other constructors called for object-type arguments
694   // of this call, clean them up.
695   ExplodedNodeSet dstArgumentCleanup;
696   for (ExplodedNode *I : dstCallEvaluated)
697     finishArgumentConstruction(dstArgumentCleanup, I, Call);
698 
699   ExplodedNodeSet dstPostCall;
700   getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
701                                              Call, *this);
702 
703   // Escaping symbols conjured during invalidating the regions above.
704   // Note that, for inlined calls the nodes were put back into the worklist,
705   // so we can assume that every node belongs to a conservative call at this
706   // point.
707 
708   // Run pointerEscape callback with the newly conjured symbols.
709   SmallVector<std::pair<SVal, SVal>, 8> Escaped;
710   for (ExplodedNode *I : dstPostCall) {
711     NodeBuilder B(I, Dst, *currBldrCtx);
712     ProgramStateRef State = I->getState();
713     Escaped.clear();
714     {
715       unsigned Arg = -1;
716       for (const ParmVarDecl *PVD : Call.parameters()) {
717         ++Arg;
718         QualType ParamTy = PVD->getType();
719         if (ParamTy.isNull() ||
720             (!ParamTy->isPointerType() && !ParamTy->isReferenceType()))
721           continue;
722         QualType Pointee = ParamTy->getPointeeType();
723         if (Pointee.isConstQualified() || Pointee->isVoidType())
724           continue;
725         if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
726           Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
727       }
728     }
729 
730     State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
731                                         PSK_EscapeOutParameters, &Call);
732 
733     if (State == I->getState())
734       Dst.insert(I);
735     else
736       B.generateNode(I->getLocation(), State, I);
737   }
738 }
739 
bindReturnValue(const CallEvent & Call,const LocationContext * LCtx,ProgramStateRef State)740 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
741                                             const LocationContext *LCtx,
742                                             ProgramStateRef State) {
743   const Expr *E = Call.getOriginExpr();
744   if (!E)
745     return State;
746 
747   // Some method families have known return values.
748   if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
749     switch (Msg->getMethodFamily()) {
750     default:
751       break;
752     case OMF_autorelease:
753     case OMF_retain:
754     case OMF_self: {
755       // These methods return their receivers.
756       return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
757     }
758     }
759   } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
760     SVal ThisV = C->getCXXThisVal();
761     ThisV = State->getSVal(ThisV.castAs<Loc>());
762     return State->BindExpr(E, LCtx, ThisV);
763   }
764 
765   SVal R;
766   QualType ResultTy = Call.getResultType();
767   unsigned Count = currBldrCtx->blockCount();
768   if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) {
769     // Conjure a temporary if the function returns an object by value.
770     SVal Target;
771     assert(RTC->getStmt() == Call.getOriginExpr());
772     EvalCallOptions CallOpts; // FIXME: We won't really need those.
773     std::tie(State, Target) = handleConstructionContext(
774         Call.getOriginExpr(), State, currBldrCtx, LCtx,
775         RTC->getConstructionContext(), CallOpts);
776     const MemRegion *TargetR = Target.getAsRegion();
777     assert(TargetR);
778     // Invalidate the region so that it didn't look uninitialized. If this is
779     // a field or element constructor, we do not want to invalidate
780     // the whole structure. Pointer escape is meaningless because
781     // the structure is a product of conservative evaluation
782     // and therefore contains nothing interesting at this point.
783     RegionAndSymbolInvalidationTraits ITraits;
784     ITraits.setTrait(TargetR,
785         RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
786     State = State->invalidateRegions(TargetR, E, Count, LCtx,
787                                      /* CausesPointerEscape=*/false, nullptr,
788                                      &Call, &ITraits);
789 
790     R = State->getSVal(Target.castAs<Loc>(), E->getType());
791   } else {
792     // Conjure a symbol if the return value is unknown.
793 
794     // See if we need to conjure a heap pointer instead of
795     // a regular unknown pointer.
796     const auto *CNE = dyn_cast<CXXNewExpr>(E);
797     if (CNE && CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
798       R = svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count);
799       const MemRegion *MR = R.getAsRegion()->StripCasts();
800 
801       // Store the extent of the allocated object(s).
802       SVal ElementCount;
803       if (const Expr *SizeExpr = CNE->getArraySize().value_or(nullptr)) {
804         ElementCount = State->getSVal(SizeExpr, LCtx);
805       } else {
806         ElementCount = svalBuilder.makeIntVal(1, /*IsUnsigned=*/true);
807       }
808 
809       SVal ElementSize = getElementExtent(CNE->getAllocatedType(), svalBuilder);
810 
811       SVal Size =
812           svalBuilder.evalBinOp(State, BO_Mul, ElementCount, ElementSize,
813                                 svalBuilder.getArrayIndexType());
814 
815       // FIXME: This line is to prevent a crash. For more details please check
816       // issue #56264.
817       if (Size.isUndef())
818         Size = UnknownVal();
819 
820       State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>(),
821                                svalBuilder);
822     } else {
823       R = svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
824     }
825   }
826   return State->BindExpr(E, LCtx, R);
827 }
828 
829 // Conservatively evaluate call by invalidating regions and binding
830 // a conjured return value.
conservativeEvalCall(const CallEvent & Call,NodeBuilder & Bldr,ExplodedNode * Pred,ProgramStateRef State)831 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
832                                       ExplodedNode *Pred, ProgramStateRef State) {
833   State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
834   State = bindReturnValue(Call, Pred->getLocationContext(), State);
835 
836   // And make the result node.
837   static SimpleProgramPointTag PT("ExprEngine", "Conservative eval call");
838   Bldr.generateNode(Call.getProgramPoint(false, &PT), State, Pred);
839 }
840 
841 ExprEngine::CallInlinePolicy
mayInlineCallKind(const CallEvent & Call,const ExplodedNode * Pred,AnalyzerOptions & Opts,const EvalCallOptions & CallOpts)842 ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
843                               AnalyzerOptions &Opts,
844                               const EvalCallOptions &CallOpts) {
845   const LocationContext *CurLC = Pred->getLocationContext();
846   const StackFrameContext *CallerSFC = CurLC->getStackFrame();
847   switch (Call.getKind()) {
848   case CE_Function:
849   case CE_CXXStaticOperator:
850   case CE_Block:
851     break;
852   case CE_CXXMember:
853   case CE_CXXMemberOperator:
854     if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
855       return CIP_DisallowedAlways;
856     break;
857   case CE_CXXConstructor: {
858     if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
859       return CIP_DisallowedAlways;
860 
861     const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
862 
863     const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
864 
865     auto CCE = getCurrentCFGElement().getAs<CFGConstructor>();
866     const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
867                                         : nullptr;
868 
869     if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) &&
870         !Opts.MayInlineCXXAllocator)
871       return CIP_DisallowedOnce;
872 
873     if (CallOpts.IsArrayCtorOrDtor) {
874       if (!shouldInlineArrayConstruction(Pred->getState(), CtorExpr, CurLC))
875         return CIP_DisallowedOnce;
876     }
877 
878     // Inlining constructors requires including initializers in the CFG.
879     const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
880     assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
881     (void)ADC;
882 
883     // If the destructor is trivial, it's always safe to inline the constructor.
884     if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
885       break;
886 
887     // For other types, only inline constructors if destructor inlining is
888     // also enabled.
889     if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
890       return CIP_DisallowedAlways;
891 
892     if (CtorExpr->getConstructionKind() == CXXConstructionKind::Complete) {
893       // If we don't handle temporary destructors, we shouldn't inline
894       // their constructors.
895       if (CallOpts.IsTemporaryCtorOrDtor &&
896           !Opts.ShouldIncludeTemporaryDtorsInCFG)
897         return CIP_DisallowedOnce;
898 
899       // If we did not find the correct this-region, it would be pointless
900       // to inline the constructor. Instead we will simply invalidate
901       // the fake temporary target.
902       if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
903         return CIP_DisallowedOnce;
904 
905       // If the temporary is lifetime-extended by binding it to a reference-type
906       // field within an aggregate, automatic destructors don't work properly.
907       if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate)
908         return CIP_DisallowedOnce;
909     }
910 
911     break;
912   }
913   case CE_CXXInheritedConstructor: {
914     // This doesn't really increase the cost of inlining ever, because
915     // the stack frame of the inherited constructor is trivial.
916     return CIP_Allowed;
917   }
918   case CE_CXXDestructor: {
919     if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
920       return CIP_DisallowedAlways;
921 
922     // Inlining destructors requires building the CFG correctly.
923     const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
924     assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
925     (void)ADC;
926 
927     if (CallOpts.IsArrayCtorOrDtor) {
928       if (!shouldInlineArrayDestruction(getElementCountOfArrayBeingDestructed(
929               Call, Pred->getState(), svalBuilder))) {
930         return CIP_DisallowedOnce;
931       }
932     }
933 
934     // Allow disabling temporary destructor inlining with a separate option.
935     if (CallOpts.IsTemporaryCtorOrDtor &&
936         !Opts.MayInlineCXXTemporaryDtors)
937       return CIP_DisallowedOnce;
938 
939     // If we did not find the correct this-region, it would be pointless
940     // to inline the destructor. Instead we will simply invalidate
941     // the fake temporary target.
942     if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
943       return CIP_DisallowedOnce;
944     break;
945   }
946   case CE_CXXDeallocator:
947     [[fallthrough]];
948   case CE_CXXAllocator:
949     if (Opts.MayInlineCXXAllocator)
950       break;
951     // Do not inline allocators until we model deallocators.
952     // This is unfortunate, but basically necessary for smart pointers and such.
953     return CIP_DisallowedAlways;
954   case CE_ObjCMessage:
955     if (!Opts.MayInlineObjCMethod)
956       return CIP_DisallowedAlways;
957     if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
958           Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
959       return CIP_DisallowedAlways;
960     break;
961   }
962 
963   return CIP_Allowed;
964 }
965 
966 /// Returns true if the given C++ class contains a member with the given name.
hasMember(const ASTContext & Ctx,const CXXRecordDecl * RD,StringRef Name)967 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
968                       StringRef Name) {
969   const IdentifierInfo &II = Ctx.Idents.get(Name);
970   return RD->hasMemberName(Ctx.DeclarationNames.getIdentifier(&II));
971 }
972 
973 /// Returns true if the given C++ class is a container or iterator.
974 ///
975 /// Our heuristic for this is whether it contains a method named 'begin()' or a
976 /// nested type named 'iterator' or 'iterator_category'.
isContainerClass(const ASTContext & Ctx,const CXXRecordDecl * RD)977 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
978   return hasMember(Ctx, RD, "begin") ||
979          hasMember(Ctx, RD, "iterator") ||
980          hasMember(Ctx, RD, "iterator_category");
981 }
982 
983 /// Returns true if the given function refers to a method of a C++ container
984 /// or iterator.
985 ///
986 /// We generally do a poor job modeling most containers right now, and might
987 /// prefer not to inline their methods.
isContainerMethod(const ASTContext & Ctx,const FunctionDecl * FD)988 static bool isContainerMethod(const ASTContext &Ctx,
989                               const FunctionDecl *FD) {
990   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
991     return isContainerClass(Ctx, MD->getParent());
992   return false;
993 }
994 
995 /// Returns true if the given function is the destructor of a class named
996 /// "shared_ptr".
isCXXSharedPtrDtor(const FunctionDecl * FD)997 static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
998   const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
999   if (!Dtor)
1000     return false;
1001 
1002   const CXXRecordDecl *RD = Dtor->getParent();
1003   if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
1004     if (II->isStr("shared_ptr"))
1005         return true;
1006 
1007   return false;
1008 }
1009 
1010 /// Returns true if the function in \p CalleeADC may be inlined in general.
1011 ///
1012 /// This checks static properties of the function, such as its signature and
1013 /// CFG, to determine whether the analyzer should ever consider inlining it,
1014 /// in any context.
mayInlineDecl(AnalysisDeclContext * CalleeADC) const1015 bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const {
1016   AnalyzerOptions &Opts = AMgr.getAnalyzerOptions();
1017   // FIXME: Do not inline variadic calls.
1018   if (CallEvent::isVariadic(CalleeADC->getDecl()))
1019     return false;
1020 
1021   // Check certain C++-related inlining policies.
1022   ASTContext &Ctx = CalleeADC->getASTContext();
1023   if (Ctx.getLangOpts().CPlusPlus) {
1024     if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
1025       // Conditionally control the inlining of template functions.
1026       if (!Opts.MayInlineTemplateFunctions)
1027         if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
1028           return false;
1029 
1030       // Conditionally control the inlining of C++ standard library functions.
1031       if (!Opts.MayInlineCXXStandardLibrary)
1032         if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
1033           if (AnalysisDeclContext::isInStdNamespace(FD))
1034             return false;
1035 
1036       // Conditionally control the inlining of methods on objects that look
1037       // like C++ containers.
1038       if (!Opts.MayInlineCXXContainerMethods)
1039         if (!AMgr.isInCodeFile(FD->getLocation()))
1040           if (isContainerMethod(Ctx, FD))
1041             return false;
1042 
1043       // Conditionally control the inlining of the destructor of C++ shared_ptr.
1044       // We don't currently do a good job modeling shared_ptr because we can't
1045       // see the reference count, so treating as opaque is probably the best
1046       // idea.
1047       if (!Opts.MayInlineCXXSharedPtrDtor)
1048         if (isCXXSharedPtrDtor(FD))
1049           return false;
1050     }
1051   }
1052 
1053   // It is possible that the CFG cannot be constructed.
1054   // Be safe, and check if the CalleeCFG is valid.
1055   const CFG *CalleeCFG = CalleeADC->getCFG();
1056   if (!CalleeCFG)
1057     return false;
1058 
1059   // Do not inline large functions.
1060   if (isHuge(CalleeADC))
1061     return false;
1062 
1063   // It is possible that the live variables analysis cannot be
1064   // run.  If so, bail out.
1065   if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
1066     return false;
1067 
1068   return true;
1069 }
1070 
shouldInlineCall(const CallEvent & Call,const Decl * D,const ExplodedNode * Pred,const EvalCallOptions & CallOpts)1071 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
1072                                   const ExplodedNode *Pred,
1073                                   const EvalCallOptions &CallOpts) {
1074   if (!D)
1075     return false;
1076 
1077   AnalysisManager &AMgr = getAnalysisManager();
1078   AnalyzerOptions &Opts = AMgr.options;
1079   AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
1080   AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
1081 
1082   // The auto-synthesized bodies are essential to inline as they are
1083   // usually small and commonly used. Note: we should do this check early on to
1084   // ensure we always inline these calls.
1085   if (CalleeADC->isBodyAutosynthesized())
1086     return true;
1087 
1088   if (!AMgr.shouldInlineCall())
1089     return false;
1090 
1091   // Check if this function has been marked as non-inlinable.
1092   std::optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
1093   if (MayInline) {
1094     if (!*MayInline)
1095       return false;
1096 
1097   } else {
1098     // We haven't actually checked the static properties of this function yet.
1099     // Do that now, and record our decision in the function summaries.
1100     if (mayInlineDecl(CalleeADC)) {
1101       Engine.FunctionSummaries->markMayInline(D);
1102     } else {
1103       Engine.FunctionSummaries->markShouldNotInline(D);
1104       return false;
1105     }
1106   }
1107 
1108   // Check if we should inline a call based on its kind.
1109   // FIXME: this checks both static and dynamic properties of the call, which
1110   // means we're redoing a bit of work that could be cached in the function
1111   // summary.
1112   CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
1113   if (CIP != CIP_Allowed) {
1114     if (CIP == CIP_DisallowedAlways) {
1115       assert(!MayInline || *MayInline);
1116       Engine.FunctionSummaries->markShouldNotInline(D);
1117     }
1118     return false;
1119   }
1120 
1121   // Do not inline if recursive or we've reached max stack frame count.
1122   bool IsRecursive = false;
1123   unsigned StackDepth = 0;
1124   examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
1125   if ((StackDepth >= Opts.InlineMaxStackDepth) &&
1126       (!isSmall(CalleeADC) || IsRecursive))
1127     return false;
1128 
1129   // Do not inline large functions too many times.
1130   if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
1131        Opts.MaxTimesInlineLarge) &&
1132       isLarge(CalleeADC)) {
1133     NumReachedInlineCountMax++;
1134     return false;
1135   }
1136 
1137   if (HowToInline == Inline_Minimal && (!isSmall(CalleeADC) || IsRecursive))
1138     return false;
1139 
1140   return true;
1141 }
1142 
shouldInlineArrayConstruction(const ProgramStateRef State,const CXXConstructExpr * CE,const LocationContext * LCtx)1143 bool ExprEngine::shouldInlineArrayConstruction(const ProgramStateRef State,
1144                                                const CXXConstructExpr *CE,
1145                                                const LocationContext *LCtx) {
1146   if (!CE)
1147     return false;
1148 
1149   // FIXME: Handle other arrays types.
1150   if (const auto *CAT = dyn_cast<ConstantArrayType>(CE->getType())) {
1151     unsigned ArrSize = getContext().getConstantArrayElementCount(CAT);
1152 
1153     // This might seem conter-intuitive at first glance, but the functions are
1154     // closely related. Reasoning about destructors depends only on the type
1155     // of the expression that initialized the memory region, which is the
1156     // CXXConstructExpr. So to avoid code repetition, the work is delegated
1157     // to the function that reasons about destructor inlining. Also note that
1158     // if the constructors of the array elements are inlined, the destructors
1159     // can also be inlined and if the destructors can be inline, it's safe to
1160     // inline the constructors.
1161     return shouldInlineArrayDestruction(ArrSize);
1162   }
1163 
1164   // Check if we're inside an ArrayInitLoopExpr, and it's sufficiently small.
1165   if (auto Size = getPendingInitLoop(State, CE, LCtx))
1166     return shouldInlineArrayDestruction(*Size);
1167 
1168   return false;
1169 }
1170 
shouldInlineArrayDestruction(uint64_t Size)1171 bool ExprEngine::shouldInlineArrayDestruction(uint64_t Size) {
1172 
1173   uint64_t maxAllowedSize = AMgr.options.maxBlockVisitOnPath;
1174 
1175   // Declaring a 0 element array is also possible.
1176   return Size <= maxAllowedSize && Size > 0;
1177 }
1178 
shouldRepeatCtorCall(ProgramStateRef State,const CXXConstructExpr * E,const LocationContext * LCtx)1179 bool ExprEngine::shouldRepeatCtorCall(ProgramStateRef State,
1180                                       const CXXConstructExpr *E,
1181                                       const LocationContext *LCtx) {
1182 
1183   if (!E)
1184     return false;
1185 
1186   auto Ty = E->getType();
1187 
1188   // FIXME: Handle non constant array types
1189   if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty)) {
1190     unsigned Size = getContext().getConstantArrayElementCount(CAT);
1191     return Size > getIndexOfElementToConstruct(State, E, LCtx);
1192   }
1193 
1194   if (auto Size = getPendingInitLoop(State, E, LCtx))
1195     return Size > getIndexOfElementToConstruct(State, E, LCtx);
1196 
1197   return false;
1198 }
1199 
isTrivialObjectAssignment(const CallEvent & Call)1200 static bool isTrivialObjectAssignment(const CallEvent &Call) {
1201   const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
1202   if (!ICall)
1203     return false;
1204 
1205   const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
1206   if (!MD)
1207     return false;
1208   if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
1209     return false;
1210 
1211   return MD->isTrivial();
1212 }
1213 
defaultEvalCall(NodeBuilder & Bldr,ExplodedNode * Pred,const CallEvent & CallTemplate,const EvalCallOptions & CallOpts)1214 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
1215                                  const CallEvent &CallTemplate,
1216                                  const EvalCallOptions &CallOpts) {
1217   // Make sure we have the most recent state attached to the call.
1218   ProgramStateRef State = Pred->getState();
1219   CallEventRef<> Call = CallTemplate.cloneWithState(State);
1220 
1221   // Special-case trivial assignment operators.
1222   if (isTrivialObjectAssignment(*Call)) {
1223     performTrivialCopy(Bldr, Pred, *Call);
1224     return;
1225   }
1226 
1227   // Try to inline the call.
1228   // The origin expression here is just used as a kind of checksum;
1229   // this should still be safe even for CallEvents that don't come from exprs.
1230   const Expr *E = Call->getOriginExpr();
1231 
1232   ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
1233   if (InlinedFailedState) {
1234     // If we already tried once and failed, make sure we don't retry later.
1235     State = InlinedFailedState;
1236   } else {
1237     RuntimeDefinition RD = Call->getRuntimeDefinition();
1238     Call->setForeign(RD.isForeign());
1239     const Decl *D = RD.getDecl();
1240     if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
1241       if (RD.mayHaveOtherDefinitions()) {
1242         AnalyzerOptions &Options = getAnalysisManager().options;
1243 
1244         // Explore with and without inlining the call.
1245         if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
1246           BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
1247           return;
1248         }
1249 
1250         // Don't inline if we're not in any dynamic dispatch mode.
1251         if (Options.getIPAMode() != IPAK_DynamicDispatch) {
1252           conservativeEvalCall(*Call, Bldr, Pred, State);
1253           return;
1254         }
1255       }
1256       ctuBifurcate(*Call, D, Bldr, Pred, State);
1257       return;
1258     }
1259   }
1260 
1261   // If we can't inline it, clean up the state traits used only if the function
1262   // is inlined.
1263   State = removeStateTraitsUsedForArrayEvaluation(
1264       State, dyn_cast_or_null<CXXConstructExpr>(E), Call->getLocationContext());
1265 
1266   // Also handle the return value and invalidate the regions.
1267   conservativeEvalCall(*Call, Bldr, Pred, State);
1268 }
1269 
BifurcateCall(const MemRegion * BifurReg,const CallEvent & Call,const Decl * D,NodeBuilder & Bldr,ExplodedNode * Pred)1270 void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
1271                                const CallEvent &Call, const Decl *D,
1272                                NodeBuilder &Bldr, ExplodedNode *Pred) {
1273   assert(BifurReg);
1274   BifurReg = BifurReg->StripCasts();
1275 
1276   // Check if we've performed the split already - note, we only want
1277   // to split the path once per memory region.
1278   ProgramStateRef State = Pred->getState();
1279   const unsigned *BState =
1280                         State->get<DynamicDispatchBifurcationMap>(BifurReg);
1281   if (BState) {
1282     // If we are on "inline path", keep inlining if possible.
1283     if (*BState == DynamicDispatchModeInlined)
1284       ctuBifurcate(Call, D, Bldr, Pred, State);
1285     // If inline failed, or we are on the path where we assume we
1286     // don't have enough info about the receiver to inline, conjure the
1287     // return value and invalidate the regions.
1288     conservativeEvalCall(Call, Bldr, Pred, State);
1289     return;
1290   }
1291 
1292   // If we got here, this is the first time we process a message to this
1293   // region, so split the path.
1294   ProgramStateRef IState =
1295       State->set<DynamicDispatchBifurcationMap>(BifurReg,
1296                                                DynamicDispatchModeInlined);
1297   ctuBifurcate(Call, D, Bldr, Pred, IState);
1298 
1299   ProgramStateRef NoIState =
1300       State->set<DynamicDispatchBifurcationMap>(BifurReg,
1301                                                DynamicDispatchModeConservative);
1302   conservativeEvalCall(Call, Bldr, Pred, NoIState);
1303 
1304   NumOfDynamicDispatchPathSplits++;
1305 }
1306 
VisitReturnStmt(const ReturnStmt * RS,ExplodedNode * Pred,ExplodedNodeSet & Dst)1307 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
1308                                  ExplodedNodeSet &Dst) {
1309   ExplodedNodeSet dstPreVisit;
1310   getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
1311 
1312   StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
1313 
1314   if (RS->getRetValue()) {
1315     for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
1316                                   ei = dstPreVisit.end(); it != ei; ++it) {
1317       B.generateNode(RS, *it, (*it)->getState());
1318     }
1319   }
1320 }
1321