1//===- ThreadSafety.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// A intra-procedural analysis for thread safety (e.g. deadlocks and race
10// conditions), based off of an annotation system.
11//
12// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
13// for more information.
14//
15//===----------------------------------------------------------------------===//
16
17#include "clang/Analysis/Analyses/ThreadSafety.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclGroup.h"
22#include "clang/AST/Expr.h"
23#include "clang/AST/ExprCXX.h"
24#include "clang/AST/OperationKinds.h"
25#include "clang/AST/Stmt.h"
26#include "clang/AST/StmtVisitor.h"
27#include "clang/AST/Type.h"
28#include "clang/Analysis/Analyses/PostOrderCFGView.h"
29#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
30#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
31#include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
32#include "clang/Analysis/AnalysisDeclContext.h"
33#include "clang/Analysis/CFG.h"
34#include "clang/Basic/Builtins.h"
35#include "clang/Basic/LLVM.h"
36#include "clang/Basic/OperatorKinds.h"
37#include "clang/Basic/SourceLocation.h"
38#include "clang/Basic/Specifiers.h"
39#include "llvm/ADT/DenseMap.h"
40#include "llvm/ADT/ImmutableMap.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/ScopeExit.h"
43#include "llvm/ADT/SmallVector.h"
44#include "llvm/ADT/StringRef.h"
45#include "llvm/Support/Allocator.h"
46#include "llvm/Support/Casting.h"
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/TrailingObjects.h"
49#include "llvm/Support/raw_ostream.h"
50#include <cassert>
51#include <functional>
52#include <iterator>
53#include <memory>
54#include <optional>
55#include <string>
56#include <utility>
57#include <vector>
58
59using namespace clang;
60using namespace threadSafety;
61
62// Key method definition
63ThreadSafetyHandler::~ThreadSafetyHandler() = default;
64
65/// Issue a warning about an invalid lock expression
66static void warnInvalidLock(ThreadSafetyHandler &Handler,
67 const Expr *MutexExp, const NamedDecl *D,
68 const Expr *DeclExp, StringRef Kind) {
69 SourceLocation Loc;
70 if (DeclExp)
71 Loc = DeclExp->getExprLoc();
72
73 // FIXME: add a note about the attribute location in MutexExp or D
74 if (Loc.isValid())
75 Handler.handleInvalidLockExp(Loc);
76}
77
78namespace {
79
80/// A set of CapabilityExpr objects, which are compiled from thread safety
81/// attributes on a function.
82class CapExprSet : public SmallVector<CapabilityExpr, 4> {
83public:
84 /// Push M onto list, but discard duplicates.
85 void push_back_nodup(const CapabilityExpr &CapE) {
86 if (llvm::none_of(Range&: *this, P: [=](const CapabilityExpr &CapE2) {
87 return CapE.equals(other: CapE2);
88 }))
89 push_back(Elt: CapE);
90 }
91};
92
93class FactManager;
94class FactSet;
95
96/// This is a helper class that stores a fact that is known at a
97/// particular point in program execution. Currently, a fact is a capability,
98/// along with additional information, such as where it was acquired, whether
99/// it is exclusive or shared, etc.
100class FactEntry : public CapabilityExpr {
101public:
102 enum FactEntryKind { Lockable, ScopedLockable };
103
104 /// Where a fact comes from.
105 enum SourceKind {
106 Acquired, ///< The fact has been directly acquired.
107 Asserted, ///< The fact has been asserted to be held.
108 Declared, ///< The fact is assumed to be held by callers.
109 Managed, ///< The fact has been acquired through a scoped capability.
110 };
111
112private:
113 const FactEntryKind Kind : 8;
114
115 /// Exclusive or shared.
116 LockKind LKind : 8;
117
118 /// How it was acquired.
119 SourceKind Source : 8;
120
121 /// Where it was acquired.
122 SourceLocation AcquireLoc;
123
124protected:
125 ~FactEntry() = default;
126
127public:
128 FactEntry(FactEntryKind FK, const CapabilityExpr &CE, LockKind LK,
129 SourceLocation Loc, SourceKind Src)
130 : CapabilityExpr(CE), Kind(FK), LKind(LK), Source(Src), AcquireLoc(Loc) {}
131
132 LockKind kind() const { return LKind; }
133 SourceLocation loc() const { return AcquireLoc; }
134 FactEntryKind getFactEntryKind() const { return Kind; }
135
136 bool asserted() const { return Source == Asserted; }
137 bool declared() const { return Source == Declared; }
138 bool managed() const { return Source == Managed; }
139
140 virtual void
141 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
142 SourceLocation JoinLoc, LockErrorKind LEK,
143 ThreadSafetyHandler &Handler) const = 0;
144 virtual void handleLock(FactSet &FSet, FactManager &FactMan,
145 const FactEntry &entry,
146 ThreadSafetyHandler &Handler) const = 0;
147 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
148 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
149 bool FullyRemove,
150 ThreadSafetyHandler &Handler) const = 0;
151
152 // Return true if LKind >= LK, where exclusive > shared
153 bool isAtLeast(LockKind LK) const {
154 return (LKind == LK_Exclusive) || (LK == LK_Shared);
155 }
156};
157
158using FactID = unsigned short;
159
160/// FactManager manages the memory for all facts that are created during
161/// the analysis of a single routine.
162class FactManager {
163private:
164 llvm::BumpPtrAllocator &Alloc;
165 std::vector<const FactEntry *> Facts;
166
167public:
168 FactManager(llvm::BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
169
170 template <typename T, typename... ArgTypes>
171 T *createFact(ArgTypes &&...Args) {
172 static_assert(std::is_trivially_destructible_v<T>);
173 return T::create(Alloc, std::forward<ArgTypes>(Args)...);
174 }
175
176 FactID newFact(const FactEntry *Entry) {
177 Facts.push_back(x: Entry);
178 assert(Facts.size() - 1 <= std::numeric_limits<FactID>::max() &&
179 "FactID space exhausted");
180 return static_cast<unsigned short>(Facts.size() - 1);
181 }
182
183 const FactEntry &operator[](FactID F) const { return *Facts[F]; }
184};
185
186/// A FactSet is the set of facts that are known to be true at a
187/// particular program point. FactSets must be small, because they are
188/// frequently copied, and are thus implemented as a set of indices into a
189/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
190/// locks, so we can get away with doing a linear search for lookup. Note
191/// that a hashtable or map is inappropriate in this case, because lookups
192/// may involve partial pattern matches, rather than exact matches.
193class FactSet {
194private:
195 using FactVec = SmallVector<FactID, 4>;
196
197 FactVec FactIDs;
198
199public:
200 using iterator = FactVec::iterator;
201 using const_iterator = FactVec::const_iterator;
202
203 iterator begin() { return FactIDs.begin(); }
204 const_iterator begin() const { return FactIDs.begin(); }
205
206 iterator end() { return FactIDs.end(); }
207 const_iterator end() const { return FactIDs.end(); }
208
209 bool isEmpty() const { return FactIDs.size() == 0; }
210
211 // Return true if the set contains only negative facts
212 bool isEmpty(FactManager &FactMan) const {
213 for (const auto FID : *this) {
214 if (!FactMan[FID].negative())
215 return false;
216 }
217 return true;
218 }
219
220 void addLockByID(FactID ID) { FactIDs.push_back(Elt: ID); }
221
222 FactID addLock(FactManager &FM, const FactEntry *Entry) {
223 FactID F = FM.newFact(Entry);
224 FactIDs.push_back(Elt: F);
225 return F;
226 }
227
228 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
229 unsigned n = FactIDs.size();
230 if (n == 0)
231 return false;
232
233 for (unsigned i = 0; i < n-1; ++i) {
234 if (FM[FactIDs[i]].matches(other: CapE)) {
235 FactIDs[i] = FactIDs[n-1];
236 FactIDs.pop_back();
237 return true;
238 }
239 }
240 if (FM[FactIDs[n-1]].matches(other: CapE)) {
241 FactIDs.pop_back();
242 return true;
243 }
244 return false;
245 }
246
247 std::optional<FactID> replaceLock(FactManager &FM, iterator It,
248 const FactEntry *Entry) {
249 if (It == end())
250 return std::nullopt;
251 FactID F = FM.newFact(Entry);
252 *It = F;
253 return F;
254 }
255
256 std::optional<FactID> replaceLock(FactManager &FM, const CapabilityExpr &CapE,
257 const FactEntry *Entry) {
258 return replaceLock(FM, It: findLockIter(FM, CapE), Entry);
259 }
260
261 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
262 return llvm::find_if(Range&: *this,
263 P: [&](FactID ID) { return FM[ID].matches(other: CapE); });
264 }
265
266 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
267 auto I =
268 llvm::find_if(Range: *this, P: [&](FactID ID) { return FM[ID].matches(other: CapE); });
269 return I != end() ? &FM[*I] : nullptr;
270 }
271
272 const FactEntry *findLockUniv(FactManager &FM,
273 const CapabilityExpr &CapE) const {
274 auto I = llvm::find_if(
275 Range: *this, P: [&](FactID ID) -> bool { return FM[ID].matchesUniv(CapE); });
276 return I != end() ? &FM[*I] : nullptr;
277 }
278
279 const FactEntry *findPartialMatch(FactManager &FM,
280 const CapabilityExpr &CapE) const {
281 auto I = llvm::find_if(Range: *this, P: [&](FactID ID) -> bool {
282 return FM[ID].partiallyMatches(other: CapE);
283 });
284 return I != end() ? &FM[*I] : nullptr;
285 }
286
287 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
288 auto I = llvm::find_if(
289 Range: *this, P: [&](FactID ID) -> bool { return FM[ID].valueDecl() == Vd; });
290 return I != end();
291 }
292};
293
294class ThreadSafetyAnalyzer;
295
296} // namespace
297
298namespace clang {
299namespace threadSafety {
300
301class BeforeSet {
302private:
303 using BeforeVect = SmallVector<const ValueDecl *, 4>;
304
305 struct BeforeInfo {
306 BeforeVect Vect;
307 int Visited = 0;
308
309 BeforeInfo() = default;
310 BeforeInfo(BeforeInfo &&) = default;
311 };
312
313 using BeforeMap =
314 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
315 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
316
317public:
318 BeforeSet() = default;
319
320 BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
321 ThreadSafetyAnalyzer& Analyzer);
322
323 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
324 ThreadSafetyAnalyzer &Analyzer);
325
326 void checkBeforeAfter(const ValueDecl* Vd,
327 const FactSet& FSet,
328 ThreadSafetyAnalyzer& Analyzer,
329 SourceLocation Loc, StringRef CapKind);
330
331private:
332 BeforeMap BMap;
333 CycleMap CycMap;
334};
335
336} // namespace threadSafety
337} // namespace clang
338
339namespace {
340
341class LocalVariableMap;
342
343using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
344
345/// A side (entry or exit) of a CFG node.
346enum CFGBlockSide { CBS_Entry, CBS_Exit };
347
348/// CFGBlockInfo is a struct which contains all the information that is
349/// maintained for each block in the CFG. See LocalVariableMap for more
350/// information about the contexts.
351struct CFGBlockInfo {
352 // Lockset held at entry to block
353 FactSet EntrySet;
354
355 // Lockset held at exit from block
356 FactSet ExitSet;
357
358 // Context held at entry to block
359 LocalVarContext EntryContext;
360
361 // Context held at exit from block
362 LocalVarContext ExitContext;
363
364 // Location of first statement in block
365 SourceLocation EntryLoc;
366
367 // Location of last statement in block.
368 SourceLocation ExitLoc;
369
370 // Used to replay contexts later
371 unsigned EntryIndex;
372
373 // Is this block reachable?
374 bool Reachable = false;
375
376 const FactSet &getSet(CFGBlockSide Side) const {
377 return Side == CBS_Entry ? EntrySet : ExitSet;
378 }
379
380 SourceLocation getLocation(CFGBlockSide Side) const {
381 return Side == CBS_Entry ? EntryLoc : ExitLoc;
382 }
383
384private:
385 CFGBlockInfo(LocalVarContext EmptyCtx)
386 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
387
388public:
389 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
390};
391
392// A LocalVariableMap maintains a map from local variables to their currently
393// valid definitions. It provides SSA-like functionality when traversing the
394// CFG. Like SSA, each definition or assignment to a variable is assigned a
395// unique name (an integer), which acts as the SSA name for that definition.
396// The total set of names is shared among all CFG basic blocks.
397// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
398// with their SSA-names. Instead, we compute a Context for each point in the
399// code, which maps local variables to the appropriate SSA-name. This map
400// changes with each assignment.
401//
402// The map is computed in a single pass over the CFG. Subsequent analyses can
403// then query the map to find the appropriate Context for a statement, and use
404// that Context to look up the definitions of variables.
405class LocalVariableMap {
406public:
407 using Context = LocalVarContext;
408
409 /// A VarDefinition consists of an expression, representing the value of the
410 /// variable, along with the context in which that expression should be
411 /// interpreted. A reference VarDefinition does not itself contain this
412 /// information, but instead contains a pointer to a previous VarDefinition.
413 struct VarDefinition {
414 public:
415 friend class LocalVariableMap;
416
417 // The original declaration for this variable.
418 const NamedDecl *Dec;
419
420 // The expression for this variable, OR
421 const Expr *Exp = nullptr;
422
423 // Direct reference to another VarDefinition
424 unsigned DirectRef = 0;
425
426 // Reference to underlying canonical non-reference VarDefinition.
427 unsigned CanonicalRef = 0;
428
429 // The map with which Exp should be interpreted.
430 Context Ctx;
431
432 bool isReference() const { return !Exp; }
433
434 void invalidateRef() { DirectRef = CanonicalRef = 0; }
435
436 private:
437 // Create ordinary variable definition
438 VarDefinition(const NamedDecl *D, const Expr *E, Context C)
439 : Dec(D), Exp(E), Ctx(C) {}
440
441 // Create reference to previous definition
442 VarDefinition(const NamedDecl *D, unsigned DirectRef, unsigned CanonicalRef,
443 Context C)
444 : Dec(D), DirectRef(DirectRef), CanonicalRef(CanonicalRef), Ctx(C) {}
445 };
446
447private:
448 Context::Factory ContextFactory;
449 std::vector<VarDefinition> VarDefinitions;
450 std::vector<std::pair<const Stmt *, Context>> SavedContexts;
451
452public:
453 LocalVariableMap() {
454 // index 0 is a placeholder for undefined variables (aka phi-nodes).
455 VarDefinitions.push_back(x: VarDefinition(nullptr, 0, 0, getEmptyContext()));
456 }
457
458 /// Look up a definition, within the given context.
459 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
460 const unsigned *i = Ctx.lookup(K: D);
461 if (!i)
462 return nullptr;
463 assert(*i < VarDefinitions.size());
464 return &VarDefinitions[*i];
465 }
466
467 /// Look up the definition for D within the given context. Returns
468 /// NULL if the expression is not statically known. If successful, also
469 /// modifies Ctx to hold the context of the return Expr.
470 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
471 const unsigned *P = Ctx.lookup(K: D);
472 if (!P)
473 return nullptr;
474
475 unsigned i = *P;
476 while (i > 0) {
477 if (VarDefinitions[i].Exp) {
478 Ctx = VarDefinitions[i].Ctx;
479 return VarDefinitions[i].Exp;
480 }
481 i = VarDefinitions[i].DirectRef;
482 }
483 return nullptr;
484 }
485
486 Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
487
488 /// Return the next context after processing S. This function is used by
489 /// clients of the class to get the appropriate context when traversing the
490 /// CFG. It must be called for every assignment or DeclStmt.
491 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
492 if (SavedContexts[CtxIndex+1].first == S) {
493 CtxIndex++;
494 Context Result = SavedContexts[CtxIndex].second;
495 return Result;
496 }
497 return C;
498 }
499
500 void dumpVarDefinitionName(unsigned i) {
501 if (i == 0) {
502 llvm::errs() << "Undefined";
503 return;
504 }
505 const NamedDecl *Dec = VarDefinitions[i].Dec;
506 if (!Dec) {
507 llvm::errs() << "<<NULL>>";
508 return;
509 }
510 Dec->printName(OS&: llvm::errs());
511 llvm::errs() << "." << i << " " << ((const void*) Dec);
512 }
513
514 /// Dumps an ASCII representation of the variable map to llvm::errs()
515 void dump() {
516 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
517 const Expr *Exp = VarDefinitions[i].Exp;
518 unsigned Ref = VarDefinitions[i].DirectRef;
519
520 dumpVarDefinitionName(i);
521 llvm::errs() << " = ";
522 if (Exp) Exp->dump();
523 else {
524 dumpVarDefinitionName(i: Ref);
525 llvm::errs() << "\n";
526 }
527 }
528 }
529
530 /// Dumps an ASCII representation of a Context to llvm::errs()
531 void dumpContext(Context C) {
532 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
533 const NamedDecl *D = I.getKey();
534 D->printName(OS&: llvm::errs());
535 llvm::errs() << " -> ";
536 dumpVarDefinitionName(i: I.getData());
537 llvm::errs() << "\n";
538 }
539 }
540
541 /// Builds the variable map.
542 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
543 std::vector<CFGBlockInfo> &BlockInfo);
544
545protected:
546 friend class VarMapBuilder;
547
548 // Resolve any definition ID down to its non-reference base ID.
549 unsigned getCanonicalDefinitionID(unsigned ID) const {
550 while (ID > 0 && VarDefinitions[ID].isReference())
551 ID = VarDefinitions[ID].CanonicalRef;
552 return ID;
553 }
554
555 // Get the current context index
556 unsigned getContextIndex() { return SavedContexts.size()-1; }
557
558 // Save the current context for later replay
559 void saveContext(const Stmt *S, Context C) {
560 SavedContexts.push_back(x: std::make_pair(x&: S, y&: C));
561 }
562
563 // Adds a new definition to the given context, and returns a new context.
564 // This method should be called when declaring a new variable.
565 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
566 assert(!Ctx.contains(D));
567 unsigned newID = VarDefinitions.size();
568 Context NewCtx = ContextFactory.add(Old: Ctx, K: D, D: newID);
569 VarDefinitions.push_back(x: VarDefinition(D, Exp, Ctx));
570 return NewCtx;
571 }
572
573 // Add a new reference to an existing definition.
574 Context addReference(const NamedDecl *D, unsigned Ref, Context Ctx) {
575 unsigned newID = VarDefinitions.size();
576 Context NewCtx = ContextFactory.add(Old: Ctx, K: D, D: newID);
577 VarDefinitions.push_back(
578 x: VarDefinition(D, Ref, getCanonicalDefinitionID(ID: Ref), Ctx));
579 return NewCtx;
580 }
581
582 // Updates a definition only if that definition is already in the map.
583 // This method should be called when assigning to an existing variable.
584 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
585 if (Ctx.contains(K: D)) {
586 unsigned newID = VarDefinitions.size();
587 Context NewCtx = ContextFactory.remove(Old: Ctx, K: D);
588 NewCtx = ContextFactory.add(Old: NewCtx, K: D, D: newID);
589 VarDefinitions.push_back(x: VarDefinition(D, Exp, Ctx));
590 return NewCtx;
591 }
592 return Ctx;
593 }
594
595 // Removes a definition from the context, but keeps the variable name
596 // as a valid variable. The index 0 is a placeholder for cleared definitions.
597 Context clearDefinition(const NamedDecl *D, Context Ctx) {
598 Context NewCtx = Ctx;
599 if (NewCtx.contains(K: D)) {
600 NewCtx = ContextFactory.remove(Old: NewCtx, K: D);
601 NewCtx = ContextFactory.add(Old: NewCtx, K: D, D: 0);
602 }
603 return NewCtx;
604 }
605
606 // Remove a definition entirely frmo the context.
607 Context removeDefinition(const NamedDecl *D, Context Ctx) {
608 Context NewCtx = Ctx;
609 if (NewCtx.contains(K: D)) {
610 NewCtx = ContextFactory.remove(Old: NewCtx, K: D);
611 }
612 return NewCtx;
613 }
614
615 Context intersectContexts(Context C1, Context C2);
616 Context createReferenceContext(Context C);
617 void intersectBackEdge(Context C1, Context C2);
618};
619
620} // namespace
621
622// This has to be defined after LocalVariableMap.
623CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
624 return CFGBlockInfo(M.getEmptyContext());
625}
626
627namespace {
628
629/// Visitor which builds a LocalVariableMap
630class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
631public:
632 LocalVariableMap* VMap;
633 LocalVariableMap::Context Ctx;
634
635 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
636 : VMap(VM), Ctx(C) {}
637
638 void VisitDeclStmt(const DeclStmt *S);
639 void VisitBinaryOperator(const BinaryOperator *BO);
640 void VisitCallExpr(const CallExpr *CE);
641};
642
643} // namespace
644
645// Add new local variables to the variable map
646void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
647 bool modifiedCtx = false;
648 const DeclGroupRef DGrp = S->getDeclGroup();
649 for (const auto *D : DGrp) {
650 if (const auto *VD = dyn_cast_or_null<VarDecl>(Val: D)) {
651 const Expr *E = VD->getInit();
652
653 // Add local variables with trivial type to the variable map
654 QualType T = VD->getType();
655 if (T.isTrivialType(Context: VD->getASTContext())) {
656 Ctx = VMap->addDefinition(D: VD, Exp: E, Ctx);
657 modifiedCtx = true;
658 }
659 }
660 }
661 if (modifiedCtx)
662 VMap->saveContext(S, C: Ctx);
663}
664
665// Update local variable definitions in variable map
666void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
667 if (!BO->isAssignmentOp())
668 return;
669
670 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
671
672 // Update the variable map and current context.
673 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: LHSExp)) {
674 const ValueDecl *VDec = DRE->getDecl();
675 if (Ctx.lookup(K: VDec)) {
676 if (BO->getOpcode() == BO_Assign)
677 Ctx = VMap->updateDefinition(D: VDec, Exp: BO->getRHS(), Ctx);
678 else
679 // FIXME -- handle compound assignment operators
680 Ctx = VMap->clearDefinition(D: VDec, Ctx);
681 VMap->saveContext(S: BO, C: Ctx);
682 }
683 }
684}
685
686// Invalidates local variable definitions if variable escaped.
687void VarMapBuilder::VisitCallExpr(const CallExpr *CE) {
688 const FunctionDecl *FD = CE->getDirectCallee();
689 if (!FD)
690 return;
691
692 // Heuristic for likely-benign functions that pass by mutable reference. This
693 // is needed to avoid a slew of false positives due to mutable reference
694 // passing where the captured reference is usually passed on by-value.
695 if (const IdentifierInfo *II = FD->getIdentifier()) {
696 // Any kind of std::bind-like functions.
697 if (II->isStr(Str: "bind") || II->isStr(Str: "bind_front"))
698 return;
699 }
700
701 // Invalidate local variable definitions that are passed by non-const
702 // reference or non-const pointer.
703 for (unsigned Idx = 0; Idx < CE->getNumArgs(); ++Idx) {
704 if (Idx >= FD->getNumParams())
705 break;
706
707 const Expr *Arg = CE->getArg(Arg: Idx)->IgnoreParenImpCasts();
708 const ParmVarDecl *PVD = FD->getParamDecl(i: Idx);
709 QualType ParamType = PVD->getType();
710
711 // Potential reassignment if passed by non-const reference / pointer.
712 const ValueDecl *VDec = nullptr;
713 if (ParamType->isReferenceType() &&
714 !ParamType->getPointeeType().isConstQualified()) {
715 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Arg))
716 VDec = DRE->getDecl();
717 } else if (ParamType->isPointerType() &&
718 !ParamType->getPointeeType().isConstQualified()) {
719 Arg = Arg->IgnoreParenCasts();
720 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Arg)) {
721 if (UO->getOpcode() == UO_AddrOf) {
722 const Expr *SubE = UO->getSubExpr()->IgnoreParenCasts();
723 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: SubE))
724 VDec = DRE->getDecl();
725 }
726 }
727 }
728
729 if (VDec)
730 Ctx = VMap->clearDefinition(D: VDec, Ctx);
731 }
732 // Save the context after the call where escaped variables' definitions (if
733 // they exist) are cleared.
734 VMap->saveContext(S: CE, C: Ctx);
735}
736
737// Computes the intersection of two contexts. The intersection is the
738// set of variables which have the same definition in both contexts;
739// variables with different definitions are discarded.
740LocalVariableMap::Context
741LocalVariableMap::intersectContexts(Context C1, Context C2) {
742 Context Result = C1;
743 for (const auto &P : C1) {
744 const NamedDecl *Dec = P.first;
745 const unsigned *I2 = C2.lookup(K: Dec);
746 if (!I2) {
747 // The variable doesn't exist on second path.
748 Result = removeDefinition(D: Dec, Ctx: Result);
749 } else if (getCanonicalDefinitionID(ID: P.second) !=
750 getCanonicalDefinitionID(ID: *I2)) {
751 // If canonical definitions mismatch the underlying definitions are
752 // different, invalidate.
753 Result = clearDefinition(D: Dec, Ctx: Result);
754 }
755 }
756 return Result;
757}
758
759// For every variable in C, create a new variable that refers to the
760// definition in C. Return a new context that contains these new variables.
761// (We use this for a naive implementation of SSA on loop back-edges.)
762LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
763 Context Result = getEmptyContext();
764 for (const auto &P : C)
765 Result = addReference(D: P.first, Ref: P.second, Ctx: Result);
766 return Result;
767}
768
769// This routine also takes the intersection of C1 and C2, but it does so by
770// altering the VarDefinitions. C1 must be the result of an earlier call to
771// createReferenceContext.
772void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
773 for (const auto &P : C1) {
774 const unsigned I1 = P.second;
775 VarDefinition *VDef = &VarDefinitions[I1];
776 assert(VDef->isReference());
777
778 const unsigned *I2 = C2.lookup(K: P.first);
779 if (!I2) {
780 // Variable does not exist at the end of the loop, invalidate.
781 VDef->invalidateRef();
782 continue;
783 }
784
785 // Compare the canonical IDs. This correctly handles chains of references
786 // and determines if the variable is truly loop-invariant.
787 if (VDef->CanonicalRef != getCanonicalDefinitionID(ID: *I2))
788 VDef->invalidateRef(); // Mark this variable as undefined
789 }
790}
791
792// Traverse the CFG in topological order, so all predecessors of a block
793// (excluding back-edges) are visited before the block itself. At
794// each point in the code, we calculate a Context, which holds the set of
795// variable definitions which are visible at that point in execution.
796// Visible variables are mapped to their definitions using an array that
797// contains all definitions.
798//
799// At join points in the CFG, the set is computed as the intersection of
800// the incoming sets along each edge, E.g.
801//
802// { Context | VarDefinitions }
803// int x = 0; { x -> x1 | x1 = 0 }
804// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
805// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
806// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
807// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
808//
809// This is essentially a simpler and more naive version of the standard SSA
810// algorithm. Those definitions that remain in the intersection are from blocks
811// that strictly dominate the current block. We do not bother to insert proper
812// phi nodes, because they are not used in our analysis; instead, wherever
813// a phi node would be required, we simply remove that definition from the
814// context (E.g. x above).
815//
816// The initial traversal does not capture back-edges, so those need to be
817// handled on a separate pass. Whenever the first pass encounters an
818// incoming back edge, it duplicates the context, creating new definitions
819// that refer back to the originals. (These correspond to places where SSA
820// might have to insert a phi node.) On the second pass, these definitions are
821// set to NULL if the variable has changed on the back-edge (i.e. a phi
822// node was actually required.) E.g.
823//
824// { Context | VarDefinitions }
825// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
826// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
827// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
828// ... { y -> y1 | x3 = 2, x2 = 1, ... }
829void LocalVariableMap::traverseCFG(CFG *CFGraph,
830 const PostOrderCFGView *SortedGraph,
831 std::vector<CFGBlockInfo> &BlockInfo) {
832 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
833
834 for (const auto *CurrBlock : *SortedGraph) {
835 unsigned CurrBlockID = CurrBlock->getBlockID();
836 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
837
838 VisitedBlocks.insert(Block: CurrBlock);
839
840 // Calculate the entry context for the current block
841 bool HasBackEdges = false;
842 bool CtxInit = true;
843 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
844 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
845 // if *PI -> CurrBlock is a back edge, so skip it
846 if (*PI == nullptr || !VisitedBlocks.alreadySet(Block: *PI)) {
847 HasBackEdges = true;
848 continue;
849 }
850
851 unsigned PrevBlockID = (*PI)->getBlockID();
852 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
853
854 if (CtxInit) {
855 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
856 CtxInit = false;
857 }
858 else {
859 CurrBlockInfo->EntryContext =
860 intersectContexts(C1: CurrBlockInfo->EntryContext,
861 C2: PrevBlockInfo->ExitContext);
862 }
863 }
864
865 // Duplicate the context if we have back-edges, so we can call
866 // intersectBackEdges later.
867 if (HasBackEdges)
868 CurrBlockInfo->EntryContext =
869 createReferenceContext(C: CurrBlockInfo->EntryContext);
870
871 // Create a starting context index for the current block
872 saveContext(S: nullptr, C: CurrBlockInfo->EntryContext);
873 CurrBlockInfo->EntryIndex = getContextIndex();
874
875 // Visit all the statements in the basic block.
876 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
877 for (const auto &BI : *CurrBlock) {
878 switch (BI.getKind()) {
879 case CFGElement::Statement: {
880 CFGStmt CS = BI.castAs<CFGStmt>();
881 VMapBuilder.Visit(S: CS.getStmt());
882 break;
883 }
884 default:
885 break;
886 }
887 }
888 CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
889
890 // Mark variables on back edges as "unknown" if they've been changed.
891 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
892 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
893 // if CurrBlock -> *SI is *not* a back edge
894 if (*SI == nullptr || !VisitedBlocks.alreadySet(Block: *SI))
895 continue;
896
897 CFGBlock *FirstLoopBlock = *SI;
898 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
899 Context LoopEnd = CurrBlockInfo->ExitContext;
900 intersectBackEdge(C1: LoopBegin, C2: LoopEnd);
901 }
902 }
903
904 // Put an extra entry at the end of the indexed context array
905 unsigned exitID = CFGraph->getExit().getBlockID();
906 saveContext(S: nullptr, C: BlockInfo[exitID].ExitContext);
907}
908
909/// Find the appropriate source locations to use when producing diagnostics for
910/// each block in the CFG.
911static void findBlockLocations(CFG *CFGraph,
912 const PostOrderCFGView *SortedGraph,
913 std::vector<CFGBlockInfo> &BlockInfo) {
914 for (const auto *CurrBlock : *SortedGraph) {
915 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
916
917 // Find the source location of the last statement in the block, if the
918 // block is not empty.
919 if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
920 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
921 } else {
922 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
923 BE = CurrBlock->rend(); BI != BE; ++BI) {
924 // FIXME: Handle other CFGElement kinds.
925 if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
926 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
927 break;
928 }
929 }
930 }
931
932 if (CurrBlockInfo->ExitLoc.isValid()) {
933 // This block contains at least one statement. Find the source location
934 // of the first statement in the block.
935 for (const auto &BI : *CurrBlock) {
936 // FIXME: Handle other CFGElement kinds.
937 if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
938 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
939 break;
940 }
941 }
942 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
943 CurrBlock != &CFGraph->getExit()) {
944 // The block is empty, and has a single predecessor. Use its exit
945 // location.
946 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
947 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
948 } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) {
949 // The block is empty, and has a single successor. Use its entry
950 // location.
951 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
952 BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
953 }
954 }
955}
956
957namespace {
958
959class LockableFactEntry final : public FactEntry {
960private:
961 /// Reentrancy depth: incremented when a capability has been acquired
962 /// reentrantly (after initial acquisition). Always 0 for non-reentrant
963 /// capabilities.
964 unsigned int ReentrancyDepth = 0;
965
966 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
967 SourceKind Src)
968 : FactEntry(Lockable, CE, LK, Loc, Src) {}
969
970public:
971 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
972 const LockableFactEntry &Other) {
973 return new (Alloc) LockableFactEntry(Other);
974 }
975
976 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
977 const CapabilityExpr &CE, LockKind LK,
978 SourceLocation Loc,
979 SourceKind Src = Acquired) {
980 return new (Alloc) LockableFactEntry(CE, LK, Loc, Src);
981 }
982
983 unsigned int getReentrancyDepth() const { return ReentrancyDepth; }
984
985 void
986 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
987 SourceLocation JoinLoc, LockErrorKind LEK,
988 ThreadSafetyHandler &Handler) const override {
989 if (!asserted() && !negative() && !isUniversal()) {
990 Handler.handleMutexHeldEndOfScope(Kind: getKind(), LockName: toString(), LocLocked: loc(), LocEndOfScope: JoinLoc,
991 LEK);
992 }
993 }
994
995 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
996 ThreadSafetyHandler &Handler) const override {
997 if (const FactEntry *RFact = tryReenter(FactMan, ReenterKind: entry.kind())) {
998 // This capability has been reentrantly acquired.
999 FSet.replaceLock(FM&: FactMan, CapE: entry, Entry: RFact);
1000 } else {
1001 Handler.handleDoubleLock(Kind: entry.getKind(), LockName: entry.toString(), LocLocked: loc(),
1002 LocDoubleLock: entry.loc());
1003 }
1004 }
1005
1006 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1007 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1008 bool FullyRemove,
1009 ThreadSafetyHandler &Handler) const override {
1010 FSet.removeLock(FM&: FactMan, CapE: Cp);
1011
1012 if (const FactEntry *RFact = leaveReentrant(FactMan)) {
1013 // This capability remains reentrantly acquired.
1014 FSet.addLock(FM&: FactMan, Entry: RFact);
1015 } else if (!Cp.negative()) {
1016 FSet.addLock(FM&: FactMan, Entry: FactMan.createFact<LockableFactEntry>(
1017 Args: !Cp, Args: LK_Exclusive, Args&: UnlockLoc));
1018 }
1019 }
1020
1021 // Return an updated FactEntry if we can acquire this capability reentrant,
1022 // nullptr otherwise.
1023 const FactEntry *tryReenter(FactManager &FactMan,
1024 LockKind ReenterKind) const {
1025 if (!reentrant())
1026 return nullptr;
1027 if (kind() != ReenterKind)
1028 return nullptr;
1029 auto *NewFact = FactMan.createFact<LockableFactEntry>(Args: *this);
1030 NewFact->ReentrancyDepth++;
1031 return NewFact;
1032 }
1033
1034 // Return an updated FactEntry if we are releasing a capability previously
1035 // acquired reentrant, nullptr otherwise.
1036 const FactEntry *leaveReentrant(FactManager &FactMan) const {
1037 if (!ReentrancyDepth)
1038 return nullptr;
1039 assert(reentrant());
1040 auto *NewFact = FactMan.createFact<LockableFactEntry>(Args: *this);
1041 NewFact->ReentrancyDepth--;
1042 return NewFact;
1043 }
1044
1045 static bool classof(const FactEntry *A) {
1046 return A->getFactEntryKind() == Lockable;
1047 }
1048};
1049
1050enum UnderlyingCapabilityKind {
1051 UCK_Acquired, ///< Any kind of acquired capability.
1052 UCK_ReleasedShared, ///< Shared capability that was released.
1053 UCK_ReleasedExclusive, ///< Exclusive capability that was released.
1054};
1055
1056struct UnderlyingCapability {
1057 CapabilityExpr Cap;
1058 UnderlyingCapabilityKind Kind;
1059};
1060
1061class ScopedLockableFactEntry final
1062 : public FactEntry,
1063 private llvm::TrailingObjects<ScopedLockableFactEntry,
1064 UnderlyingCapability> {
1065 friend TrailingObjects;
1066
1067private:
1068 const unsigned ManagedCapacity;
1069 unsigned ManagedSize = 0;
1070
1071 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
1072 SourceKind Src, unsigned ManagedCapacity)
1073 : FactEntry(ScopedLockable, CE, LK_Exclusive, Loc, Src),
1074 ManagedCapacity(ManagedCapacity) {}
1075
1076 void addManaged(const CapabilityExpr &M, UnderlyingCapabilityKind UCK) {
1077 assert(ManagedSize < ManagedCapacity);
1078 new (getTrailingObjects() + ManagedSize) UnderlyingCapability{.Cap: M, .Kind: UCK};
1079 ++ManagedSize;
1080 }
1081
1082 ArrayRef<UnderlyingCapability> getManaged() const {
1083 return getTrailingObjects(N: ManagedSize);
1084 }
1085
1086public:
1087 static ScopedLockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
1088 const CapabilityExpr &CE,
1089 SourceLocation Loc, SourceKind Src,
1090 unsigned ManagedCapacity) {
1091 void *Storage =
1092 Alloc.Allocate(Size: totalSizeToAlloc<UnderlyingCapability>(Counts: ManagedCapacity),
1093 Alignment: alignof(ScopedLockableFactEntry));
1094 return new (Storage) ScopedLockableFactEntry(CE, Loc, Src, ManagedCapacity);
1095 }
1096
1097 CapExprSet getUnderlyingMutexes() const {
1098 CapExprSet UnderlyingMutexesSet;
1099 for (const UnderlyingCapability &UnderlyingMutex : getManaged())
1100 UnderlyingMutexesSet.push_back(Elt: UnderlyingMutex.Cap);
1101 return UnderlyingMutexesSet;
1102 }
1103
1104 /// \name Adding managed locks
1105 /// Capacity for managed locks must have been allocated via \ref create.
1106 /// There is no reallocation in case the capacity is exceeded!
1107 /// \{
1108 void addLock(const CapabilityExpr &M) { addManaged(M, UCK: UCK_Acquired); }
1109
1110 void addExclusiveUnlock(const CapabilityExpr &M) {
1111 addManaged(M, UCK: UCK_ReleasedExclusive);
1112 }
1113
1114 void addSharedUnlock(const CapabilityExpr &M) {
1115 addManaged(M, UCK: UCK_ReleasedShared);
1116 }
1117 /// \}
1118
1119 void
1120 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
1121 SourceLocation JoinLoc, LockErrorKind LEK,
1122 ThreadSafetyHandler &Handler) const override {
1123 if (LEK == LEK_LockedAtEndOfFunction || LEK == LEK_NotLockedAtEndOfFunction)
1124 return;
1125
1126 for (const auto &UnderlyingMutex : getManaged()) {
1127 const auto *Entry = FSet.findLock(FM&: FactMan, CapE: UnderlyingMutex.Cap);
1128 if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
1129 (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
1130 // If this scoped lock manages another mutex, and if the underlying
1131 // mutex is still/not held, then warn about the underlying mutex.
1132 Handler.handleMutexHeldEndOfScope(Kind: UnderlyingMutex.Cap.getKind(),
1133 LockName: UnderlyingMutex.Cap.toString(), LocLocked: loc(),
1134 LocEndOfScope: JoinLoc, LEK);
1135 }
1136 }
1137 }
1138
1139 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
1140 ThreadSafetyHandler &Handler) const override {
1141 for (const auto &UnderlyingMutex : getManaged()) {
1142 if (UnderlyingMutex.Kind == UCK_Acquired)
1143 lock(FSet, FactMan, Cp: UnderlyingMutex.Cap, kind: entry.kind(), loc: entry.loc(),
1144 Handler: &Handler);
1145 else
1146 unlock(FSet, FactMan, Cp: UnderlyingMutex.Cap, loc: entry.loc(), Handler: &Handler);
1147 }
1148 }
1149
1150 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1151 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1152 bool FullyRemove,
1153 ThreadSafetyHandler &Handler) const override {
1154 assert(!Cp.negative() && "Managing object cannot be negative.");
1155 for (const auto &UnderlyingMutex : getManaged()) {
1156 // Remove/lock the underlying mutex if it exists/is still unlocked; warn
1157 // on double unlocking/locking if we're not destroying the scoped object.
1158 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
1159 if (UnderlyingMutex.Kind == UCK_Acquired) {
1160 unlock(FSet, FactMan, Cp: UnderlyingMutex.Cap, loc: UnlockLoc, Handler: TSHandler);
1161 } else {
1162 LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
1163 ? LK_Shared
1164 : LK_Exclusive;
1165 lock(FSet, FactMan, Cp: UnderlyingMutex.Cap, kind, loc: UnlockLoc, Handler: TSHandler);
1166 }
1167 }
1168 if (FullyRemove)
1169 FSet.removeLock(FM&: FactMan, CapE: Cp);
1170 }
1171
1172 static bool classof(const FactEntry *A) {
1173 return A->getFactEntryKind() == ScopedLockable;
1174 }
1175
1176private:
1177 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1178 LockKind kind, SourceLocation loc,
1179 ThreadSafetyHandler *Handler) const {
1180 if (const auto It = FSet.findLockIter(FM&: FactMan, CapE: Cp); It != FSet.end()) {
1181 const auto &Fact = cast<LockableFactEntry>(Val: FactMan[*It]);
1182 if (const FactEntry *RFact = Fact.tryReenter(FactMan, ReenterKind: kind)) {
1183 // This capability has been reentrantly acquired.
1184 FSet.replaceLock(FM&: FactMan, It, Entry: RFact);
1185 } else if (Handler) {
1186 Handler->handleDoubleLock(Kind: Cp.getKind(), LockName: Cp.toString(), LocLocked: Fact.loc(), LocDoubleLock: loc);
1187 }
1188 } else {
1189 FSet.removeLock(FM&: FactMan, CapE: !Cp);
1190 FSet.addLock(FM&: FactMan, Entry: FactMan.createFact<LockableFactEntry>(Args: Cp, Args&: kind, Args&: loc,
1191 Args: Managed));
1192 }
1193 }
1194
1195 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1196 SourceLocation loc, ThreadSafetyHandler *Handler) const {
1197 if (const auto It = FSet.findLockIter(FM&: FactMan, CapE: Cp); It != FSet.end()) {
1198 const auto &Fact = cast<LockableFactEntry>(Val: FactMan[*It]);
1199 if (const FactEntry *RFact = Fact.leaveReentrant(FactMan)) {
1200 // This capability remains reentrantly acquired.
1201 FSet.replaceLock(FM&: FactMan, It, Entry: RFact);
1202 return;
1203 }
1204
1205 FSet.replaceLock(
1206 FM&: FactMan, It,
1207 Entry: FactMan.createFact<LockableFactEntry>(Args: !Cp, Args: LK_Exclusive, Args&: loc));
1208 } else if (Handler) {
1209 SourceLocation PrevLoc;
1210 if (const FactEntry *Neg = FSet.findLock(FM&: FactMan, CapE: !Cp))
1211 PrevLoc = Neg->loc();
1212 Handler->handleUnmatchedUnlock(Kind: Cp.getKind(), LockName: Cp.toString(), Loc: loc, LocPreviousUnlock: PrevLoc);
1213 }
1214 }
1215};
1216
1217/// Class which implements the core thread safety analysis routines.
1218class ThreadSafetyAnalyzer {
1219 friend class BuildLockset;
1220 friend class threadSafety::BeforeSet;
1221
1222 llvm::BumpPtrAllocator Bpa;
1223 threadSafety::til::MemRegionRef Arena;
1224 threadSafety::SExprBuilder SxBuilder;
1225
1226 ThreadSafetyHandler &Handler;
1227 const FunctionDecl *CurrentFunction;
1228 LocalVariableMap LocalVarMap;
1229 // Maps constructed objects to `this` placeholder prior to initialization.
1230 llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects;
1231 FactManager FactMan;
1232 std::vector<CFGBlockInfo> BlockInfo;
1233
1234 BeforeSet *GlobalBeforeSet;
1235
1236public:
1237 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet *Bset)
1238 : Arena(&Bpa), SxBuilder(Arena), Handler(H), FactMan(Bpa),
1239 GlobalBeforeSet(Bset) {}
1240
1241 bool inCurrentScope(const CapabilityExpr &CapE);
1242
1243 void addLock(FactSet &FSet, const FactEntry *Entry, bool ReqAttr = false);
1244 void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
1245 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
1246
1247 template <typename AttrType>
1248 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1249 const NamedDecl *D, til::SExpr *Self = nullptr);
1250
1251 template <class AttrType>
1252 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1253 const NamedDecl *D,
1254 const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1255 Expr *BrE, bool Neg);
1256
1257 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1258 bool &Negate);
1259
1260 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1261 const CFGBlock* PredBlock,
1262 const CFGBlock *CurrBlock);
1263
1264 bool join(const FactEntry &A, const FactEntry &B, SourceLocation JoinLoc,
1265 LockErrorKind EntryLEK);
1266
1267 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1268 SourceLocation JoinLoc, LockErrorKind EntryLEK,
1269 LockErrorKind ExitLEK);
1270
1271 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1272 SourceLocation JoinLoc, LockErrorKind LEK) {
1273 intersectAndWarn(EntrySet, ExitSet, JoinLoc, EntryLEK: LEK, ExitLEK: LEK);
1274 }
1275
1276 void runAnalysis(AnalysisDeclContext &AC);
1277
1278 void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D,
1279 const Expr *Exp, AccessKind AK, Expr *MutexExp,
1280 ProtectedOperationKind POK, til::SExpr *Self,
1281 SourceLocation Loc);
1282 void warnIfAnyMutexNotHeldForRead(const FactSet &FSet, const NamedDecl *D,
1283 const Expr *Exp,
1284 llvm::ArrayRef<Expr *> Args,
1285 ProtectedOperationKind POK,
1286 SourceLocation Loc);
1287 void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp,
1288 Expr *MutexExp, til::SExpr *Self, SourceLocation Loc);
1289
1290 void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1291 ProtectedOperationKind POK);
1292 void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1293 ProtectedOperationKind POK);
1294};
1295
1296} // namespace
1297
1298/// Process acquired_before and acquired_after attributes on Vd.
1299BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1300 ThreadSafetyAnalyzer& Analyzer) {
1301 // Create a new entry for Vd.
1302 BeforeInfo *Info = nullptr;
1303 {
1304 // Keep InfoPtr in its own scope in case BMap is modified later and the
1305 // reference becomes invalid.
1306 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1307 if (!InfoPtr)
1308 InfoPtr.reset(p: new BeforeInfo());
1309 Info = InfoPtr.get();
1310 }
1311
1312 for (const auto *At : Vd->attrs()) {
1313 switch (At->getKind()) {
1314 case attr::AcquiredBefore: {
1315 const auto *A = cast<AcquiredBeforeAttr>(Val: At);
1316
1317 // Read exprs from the attribute, and add them to BeforeVect.
1318 for (const auto *Arg : A->args()) {
1319 CapabilityExpr Cp =
1320 Analyzer.SxBuilder.translateAttrExpr(AttrExp: Arg, Ctx: nullptr);
1321 if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1322 Info->Vect.push_back(Elt: Cpvd);
1323 const auto It = BMap.find(Val: Cpvd);
1324 if (It == BMap.end())
1325 insertAttrExprs(Vd: Cpvd, Analyzer);
1326 }
1327 }
1328 break;
1329 }
1330 case attr::AcquiredAfter: {
1331 const auto *A = cast<AcquiredAfterAttr>(Val: At);
1332
1333 // Read exprs from the attribute, and add them to BeforeVect.
1334 for (const auto *Arg : A->args()) {
1335 CapabilityExpr Cp =
1336 Analyzer.SxBuilder.translateAttrExpr(AttrExp: Arg, Ctx: nullptr);
1337 if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1338 // Get entry for mutex listed in attribute
1339 BeforeInfo *ArgInfo = getBeforeInfoForDecl(Vd: ArgVd, Analyzer);
1340 ArgInfo->Vect.push_back(Elt: Vd);
1341 }
1342 }
1343 break;
1344 }
1345 default:
1346 break;
1347 }
1348 }
1349
1350 return Info;
1351}
1352
1353BeforeSet::BeforeInfo *
1354BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd,
1355 ThreadSafetyAnalyzer &Analyzer) {
1356 auto It = BMap.find(Val: Vd);
1357 BeforeInfo *Info = nullptr;
1358 if (It == BMap.end())
1359 Info = insertAttrExprs(Vd, Analyzer);
1360 else
1361 Info = It->second.get();
1362 assert(Info && "BMap contained nullptr?");
1363 return Info;
1364}
1365
1366/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
1367void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
1368 const FactSet& FSet,
1369 ThreadSafetyAnalyzer& Analyzer,
1370 SourceLocation Loc, StringRef CapKind) {
1371 SmallVector<BeforeInfo*, 8> InfoVect;
1372
1373 // Do a depth-first traversal of Vd.
1374 // Return true if there are cycles.
1375 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1376 if (!Vd)
1377 return false;
1378
1379 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1380
1381 if (Info->Visited == 1)
1382 return true;
1383
1384 if (Info->Visited == 2)
1385 return false;
1386
1387 if (Info->Vect.empty())
1388 return false;
1389
1390 InfoVect.push_back(Elt: Info);
1391 Info->Visited = 1;
1392 for (const auto *Vdb : Info->Vect) {
1393 // Exclude mutexes in our immediate before set.
1394 if (FSet.containsMutexDecl(FM&: Analyzer.FactMan, Vd: Vdb)) {
1395 StringRef L1 = StartVd->getName();
1396 StringRef L2 = Vdb->getName();
1397 Analyzer.Handler.handleLockAcquiredBefore(Kind: CapKind, L1Name: L1, L2Name: L2, Loc);
1398 }
1399 // Transitively search other before sets, and warn on cycles.
1400 if (traverse(Vdb)) {
1401 if (CycMap.try_emplace(Key: Vd, Args: true).second) {
1402 StringRef L1 = Vd->getName();
1403 Analyzer.Handler.handleBeforeAfterCycle(L1Name: L1, Loc: Vd->getLocation());
1404 }
1405 }
1406 }
1407 Info->Visited = 2;
1408 return false;
1409 };
1410
1411 traverse(StartVd);
1412
1413 for (auto *Info : InfoVect)
1414 Info->Visited = 0;
1415}
1416
1417/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
1418static const ValueDecl *getValueDecl(const Expr *Exp) {
1419 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Val: Exp))
1420 return getValueDecl(Exp: CE->getSubExpr());
1421
1422 if (const auto *DR = dyn_cast<DeclRefExpr>(Val: Exp))
1423 return DR->getDecl();
1424
1425 if (const auto *ME = dyn_cast<MemberExpr>(Val: Exp))
1426 return ME->getMemberDecl();
1427
1428 return nullptr;
1429}
1430
1431bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1432 const threadSafety::til::SExpr *SExp = CapE.sexpr();
1433 assert(SExp && "Null expressions should be ignored");
1434
1435 if (const auto *LP = dyn_cast<til::LiteralPtr>(Val: SExp)) {
1436 const ValueDecl *VD = LP->clangDecl();
1437 // Variables defined in a function are always inaccessible.
1438 if (!VD || !VD->isDefinedOutsideFunctionOrMethod())
1439 return false;
1440 // For now we consider static class members to be inaccessible.
1441 if (isa<CXXRecordDecl>(Val: VD->getDeclContext()))
1442 return false;
1443 // Global variables are always in scope.
1444 return true;
1445 }
1446
1447 // Members are in scope from methods of the same class.
1448 if (const auto *P = dyn_cast<til::Project>(Val: SExp)) {
1449 if (!isa_and_nonnull<CXXMethodDecl>(Val: CurrentFunction))
1450 return false;
1451 const ValueDecl *VD = P->clangDecl();
1452 return VD->getDeclContext() == CurrentFunction->getDeclContext();
1453 }
1454
1455 return false;
1456}
1457
1458/// Add a new lock to the lockset, warning if the lock is already there.
1459/// \param ReqAttr -- true if this is part of an initial Requires attribute.
1460void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry *Entry,
1461 bool ReqAttr) {
1462 if (Entry->shouldIgnore())
1463 return;
1464
1465 if (!ReqAttr && !Entry->negative()) {
1466 // look for the negative capability, and remove it from the fact set.
1467 CapabilityExpr NegC = !*Entry;
1468 const FactEntry *Nen = FSet.findLock(FM&: FactMan, CapE: NegC);
1469 if (Nen) {
1470 FSet.removeLock(FM&: FactMan, CapE: NegC);
1471 }
1472 else {
1473 if (inCurrentScope(CapE: *Entry) && !Entry->asserted() && !Entry->reentrant())
1474 Handler.handleNegativeNotHeld(Kind: Entry->getKind(), LockName: Entry->toString(),
1475 Neg: NegC.toString(), Loc: Entry->loc());
1476 }
1477 }
1478
1479 // Check before/after constraints
1480 if (!Entry->asserted() && !Entry->declared()) {
1481 GlobalBeforeSet->checkBeforeAfter(StartVd: Entry->valueDecl(), FSet, Analyzer&: *this,
1482 Loc: Entry->loc(), CapKind: Entry->getKind());
1483 }
1484
1485 if (const FactEntry *Cp = FSet.findLock(FM&: FactMan, CapE: *Entry)) {
1486 if (!Entry->asserted())
1487 Cp->handleLock(FSet, FactMan, entry: *Entry, Handler);
1488 } else {
1489 FSet.addLock(FM&: FactMan, Entry);
1490 }
1491}
1492
1493/// Remove a lock from the lockset, warning if the lock is not there.
1494/// \param UnlockLoc The source location of the unlock (only used in error msg)
1495void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1496 SourceLocation UnlockLoc,
1497 bool FullyRemove, LockKind ReceivedKind) {
1498 if (Cp.shouldIgnore())
1499 return;
1500
1501 const FactEntry *LDat = FSet.findLock(FM&: FactMan, CapE: Cp);
1502 if (!LDat) {
1503 SourceLocation PrevLoc;
1504 if (const FactEntry *Neg = FSet.findLock(FM&: FactMan, CapE: !Cp))
1505 PrevLoc = Neg->loc();
1506 Handler.handleUnmatchedUnlock(Kind: Cp.getKind(), LockName: Cp.toString(), Loc: UnlockLoc,
1507 LocPreviousUnlock: PrevLoc);
1508 return;
1509 }
1510
1511 // Generic lock removal doesn't care about lock kind mismatches, but
1512 // otherwise diagnose when the lock kinds are mismatched.
1513 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
1514 Handler.handleIncorrectUnlockKind(Kind: Cp.getKind(), LockName: Cp.toString(), Expected: LDat->kind(),
1515 Received: ReceivedKind, LocLocked: LDat->loc(), LocUnlock: UnlockLoc);
1516 }
1517
1518 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
1519}
1520
1521/// Extract the list of mutexIDs from the attribute on an expression,
1522/// and push them onto Mtxs, discarding any duplicates.
1523template <typename AttrType>
1524void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1525 const Expr *Exp, const NamedDecl *D,
1526 til::SExpr *Self) {
1527 if (Attr->args_size() == 0) {
1528 // The mutex held is the "this" object.
1529 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: nullptr, D, DeclExp: Exp, Self);
1530 if (Cp.isInvalid()) {
1531 warnInvalidLock(Handler, MutexExp: nullptr, D, DeclExp: Exp, Kind: Cp.getKind());
1532 return;
1533 }
1534 //else
1535 if (!Cp.shouldIgnore())
1536 Mtxs.push_back_nodup(CapE: Cp);
1537 return;
1538 }
1539
1540 for (const auto *Arg : Attr->args()) {
1541 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self);
1542 if (Cp.isInvalid()) {
1543 warnInvalidLock(Handler, MutexExp: nullptr, D, DeclExp: Exp, Kind: Cp.getKind());
1544 continue;
1545 }
1546 //else
1547 if (!Cp.shouldIgnore())
1548 Mtxs.push_back_nodup(CapE: Cp);
1549 }
1550}
1551
1552/// Extract the list of mutexIDs from a trylock attribute. If the
1553/// trylock applies to the given edge, then push them onto Mtxs, discarding
1554/// any duplicates.
1555template <class AttrType>
1556void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1557 const Expr *Exp, const NamedDecl *D,
1558 const CFGBlock *PredBlock,
1559 const CFGBlock *CurrBlock,
1560 Expr *BrE, bool Neg) {
1561 // Find out which branch has the lock
1562 bool branch = false;
1563 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(Val: BrE))
1564 branch = BLE->getValue();
1565 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(Val: BrE))
1566 branch = ILE->getValue().getBoolValue();
1567
1568 int branchnum = branch ? 0 : 1;
1569 if (Neg)
1570 branchnum = !branchnum;
1571
1572 // If we've taken the trylock branch, then add the lock
1573 int i = 0;
1574 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1575 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1576 if (*SI == CurrBlock && i == branchnum)
1577 getMutexIDs(Mtxs, Attr, Exp, D);
1578 }
1579}
1580
1581static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1582 if (isa<CXXNullPtrLiteralExpr>(Val: E) || isa<GNUNullExpr>(Val: E)) {
1583 TCond = false;
1584 return true;
1585 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(Val: E)) {
1586 TCond = BLE->getValue();
1587 return true;
1588 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(Val: E)) {
1589 TCond = ILE->getValue().getBoolValue();
1590 return true;
1591 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(Val: E))
1592 return getStaticBooleanValue(E: CE->getSubExpr(), TCond);
1593 return false;
1594}
1595
1596// If Cond can be traced back to a function call, return the call expression.
1597// The negate variable should be called with false, and will be set to true
1598// if the function call is negated, e.g. if (!mu.tryLock(...))
1599const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1600 LocalVarContext C,
1601 bool &Negate) {
1602 if (!Cond)
1603 return nullptr;
1604
1605 if (const auto *CallExp = dyn_cast<CallExpr>(Val: Cond)) {
1606 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
1607 return getTrylockCallExpr(Cond: CallExp->getArg(Arg: 0), C, Negate);
1608 return CallExp;
1609 }
1610 else if (const auto *PE = dyn_cast<ParenExpr>(Val: Cond))
1611 return getTrylockCallExpr(Cond: PE->getSubExpr(), C, Negate);
1612 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Val: Cond))
1613 return getTrylockCallExpr(Cond: CE->getSubExpr(), C, Negate);
1614 else if (const auto *FE = dyn_cast<FullExpr>(Val: Cond))
1615 return getTrylockCallExpr(Cond: FE->getSubExpr(), C, Negate);
1616 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Cond)) {
1617 const Expr *E = LocalVarMap.lookupExpr(D: DRE->getDecl(), Ctx&: C);
1618 return getTrylockCallExpr(Cond: E, C, Negate);
1619 }
1620 else if (const auto *UOP = dyn_cast<UnaryOperator>(Val: Cond)) {
1621 if (UOP->getOpcode() == UO_LNot) {
1622 Negate = !Negate;
1623 return getTrylockCallExpr(Cond: UOP->getSubExpr(), C, Negate);
1624 }
1625 return nullptr;
1626 }
1627 else if (const auto *BOP = dyn_cast<BinaryOperator>(Val: Cond)) {
1628 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1629 if (BOP->getOpcode() == BO_NE)
1630 Negate = !Negate;
1631
1632 bool TCond = false;
1633 if (getStaticBooleanValue(E: BOP->getRHS(), TCond)) {
1634 if (!TCond) Negate = !Negate;
1635 return getTrylockCallExpr(Cond: BOP->getLHS(), C, Negate);
1636 }
1637 TCond = false;
1638 if (getStaticBooleanValue(E: BOP->getLHS(), TCond)) {
1639 if (!TCond) Negate = !Negate;
1640 return getTrylockCallExpr(Cond: BOP->getRHS(), C, Negate);
1641 }
1642 return nullptr;
1643 }
1644 if (BOP->getOpcode() == BO_LAnd) {
1645 // LHS must have been evaluated in a different block.
1646 return getTrylockCallExpr(Cond: BOP->getRHS(), C, Negate);
1647 }
1648 if (BOP->getOpcode() == BO_LOr)
1649 return getTrylockCallExpr(Cond: BOP->getRHS(), C, Negate);
1650 return nullptr;
1651 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Val: Cond)) {
1652 bool TCond, FCond;
1653 if (getStaticBooleanValue(E: COP->getTrueExpr(), TCond) &&
1654 getStaticBooleanValue(E: COP->getFalseExpr(), TCond&: FCond)) {
1655 if (TCond && !FCond)
1656 return getTrylockCallExpr(Cond: COP->getCond(), C, Negate);
1657 if (!TCond && FCond) {
1658 Negate = !Negate;
1659 return getTrylockCallExpr(Cond: COP->getCond(), C, Negate);
1660 }
1661 }
1662 }
1663 return nullptr;
1664}
1665
1666/// Find the lockset that holds on the edge between PredBlock
1667/// and CurrBlock. The edge set is the exit set of PredBlock (passed
1668/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1669void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1670 const FactSet &ExitSet,
1671 const CFGBlock *PredBlock,
1672 const CFGBlock *CurrBlock) {
1673 Result = ExitSet;
1674
1675 const Stmt *Cond = PredBlock->getTerminatorCondition();
1676 // We don't acquire try-locks on ?: branches, only when its result is used.
1677 if (!Cond || isa<ConditionalOperator>(Val: PredBlock->getTerminatorStmt()))
1678 return;
1679
1680 bool Negate = false;
1681 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1682 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1683
1684 if (Handler.issueBetaWarnings()) {
1685 // Temporarily set the lookup context for SExprBuilder.
1686 SxBuilder.setLookupLocalVarExpr(
1687 [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
1688 return LocalVarMap.lookupExpr(D, Ctx);
1689 });
1690 }
1691 llvm::scope_exit Cleanup(
1692 [this] { SxBuilder.setLookupLocalVarExpr(nullptr); });
1693
1694 const auto *Exp = getTrylockCallExpr(Cond, C: LVarCtx, Negate);
1695 if (!Exp)
1696 return;
1697
1698 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Val: Exp->getCalleeDecl());
1699 if (!FunDecl || !FunDecl->hasAttr<TryAcquireCapabilityAttr>())
1700 return;
1701
1702 CapExprSet ExclusiveLocksToAdd;
1703 CapExprSet SharedLocksToAdd;
1704
1705 // If the condition is a call to a Trylock function, then grab the attributes
1706 for (const auto *Attr : FunDecl->specific_attrs<TryAcquireCapabilityAttr>())
1707 getMutexIDs(Mtxs&: Attr->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr,
1708 Exp, D: FunDecl, PredBlock, CurrBlock, BrE: Attr->getSuccessValue(),
1709 Neg: Negate);
1710
1711 // Add and remove locks.
1712 SourceLocation Loc = Exp->getExprLoc();
1713 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1714 addLock(FSet&: Result, Entry: FactMan.createFact<LockableFactEntry>(Args: ExclusiveLockToAdd,
1715 Args: LK_Exclusive, Args&: Loc));
1716 for (const auto &SharedLockToAdd : SharedLocksToAdd)
1717 addLock(FSet&: Result, Entry: FactMan.createFact<LockableFactEntry>(Args: SharedLockToAdd,
1718 Args: LK_Shared, Args&: Loc));
1719}
1720
1721namespace {
1722
1723/// We use this class to visit different types of expressions in
1724/// CFGBlocks, and build up the lockset.
1725/// An expression may cause us to add or remove locks from the lockset, or else
1726/// output error messages related to missing locks.
1727/// FIXME: In future, we may be able to not inherit from a visitor.
1728class BuildLockset : public ConstStmtVisitor<BuildLockset> {
1729 friend class ThreadSafetyAnalyzer;
1730
1731 ThreadSafetyAnalyzer *Analyzer;
1732 FactSet FSet;
1733 // The fact set for the function on exit.
1734 const FactSet &FunctionExitFSet;
1735 LocalVariableMap::Context LVarCtx;
1736 unsigned CtxIndex;
1737
1738 // To update the context used in attr-expr translation. If `S` is non-null,
1739 // the context is updated to the program point right after 'S'.
1740 void updateLocalVarMapCtx(const Stmt *S) {
1741 if (S)
1742 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, C: LVarCtx);
1743 if (!Analyzer->Handler.issueBetaWarnings())
1744 return;
1745 // The lookup closure needs to be reconstructed with the refreshed LVarCtx.
1746 Analyzer->SxBuilder.setLookupLocalVarExpr(
1747 [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
1748 return Analyzer->LocalVarMap.lookupExpr(D, Ctx);
1749 });
1750 }
1751
1752 // helper functions
1753
1754 void checkAccess(const Expr *Exp, AccessKind AK,
1755 ProtectedOperationKind POK = POK_VarAccess) {
1756 Analyzer->checkAccess(FSet, Exp, AK, POK);
1757 }
1758 void checkPtAccess(const Expr *Exp, AccessKind AK,
1759 ProtectedOperationKind POK = POK_VarAccess) {
1760 Analyzer->checkPtAccess(FSet, Exp, AK, POK);
1761 }
1762
1763 void handleCall(const Expr *Exp, const NamedDecl *D,
1764 til::SExpr *Self = nullptr,
1765 SourceLocation Loc = SourceLocation());
1766 void examineArguments(const FunctionDecl *FD,
1767 CallExpr::const_arg_iterator ArgBegin,
1768 CallExpr::const_arg_iterator ArgEnd,
1769 bool SkipFirstParam = false);
1770
1771public:
1772 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info,
1773 const FactSet &FunctionExitFSet)
1774 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1775 FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
1776 CtxIndex(Info.EntryIndex) {
1777 updateLocalVarMapCtx(S: nullptr);
1778 }
1779
1780 ~BuildLockset() { Analyzer->SxBuilder.setLookupLocalVarExpr(nullptr); }
1781 BuildLockset(const BuildLockset &) = delete;
1782 BuildLockset &operator=(const BuildLockset &) = delete;
1783
1784 void VisitUnaryOperator(const UnaryOperator *UO);
1785 void VisitBinaryOperator(const BinaryOperator *BO);
1786 void VisitCastExpr(const CastExpr *CE);
1787 void VisitCallExpr(const CallExpr *Exp);
1788 void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
1789 void VisitDeclStmt(const DeclStmt *S);
1790 void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp);
1791 void VisitReturnStmt(const ReturnStmt *S);
1792};
1793
1794} // namespace
1795
1796/// Warn if the LSet does not contain a lock sufficient to protect access
1797/// of at least the passed in AccessKind.
1798void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
1799 const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK,
1800 Expr *MutexExp, ProtectedOperationKind POK, til::SExpr *Self,
1801 SourceLocation Loc) {
1802 LockKind LK = getLockKindFromAccessKind(AK);
1803 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: MutexExp, D, DeclExp: Exp, Self);
1804 if (Cp.isInvalid()) {
1805 warnInvalidLock(Handler, MutexExp, D, DeclExp: Exp, Kind: Cp.getKind());
1806 return;
1807 } else if (Cp.shouldIgnore()) {
1808 return;
1809 }
1810
1811 if (Cp.negative()) {
1812 // Negative capabilities act like locks excluded
1813 const FactEntry *LDat = FSet.findLock(FM&: FactMan, CapE: !Cp);
1814 if (LDat) {
1815 Handler.handleFunExcludesLock(Kind: Cp.getKind(), FunName: D->getNameAsString(),
1816 LockName: (!Cp).toString(), Loc);
1817 return;
1818 }
1819
1820 // If this does not refer to a negative capability in the same class,
1821 // then stop here.
1822 if (!inCurrentScope(CapE: Cp))
1823 return;
1824
1825 // Otherwise the negative requirement must be propagated to the caller.
1826 LDat = FSet.findLock(FM&: FactMan, CapE: Cp);
1827 if (!LDat) {
1828 Handler.handleNegativeNotHeld(D, LockName: Cp.toString(), Loc);
1829 }
1830 return;
1831 }
1832
1833 const FactEntry *LDat = FSet.findLockUniv(FM&: FactMan, CapE: Cp);
1834 bool NoError = true;
1835 if (!LDat) {
1836 // No exact match found. Look for a partial match.
1837 LDat = FSet.findPartialMatch(FM&: FactMan, CapE: Cp);
1838 if (LDat) {
1839 // Warn that there's no precise match.
1840 std::string PartMatchStr = LDat->toString();
1841 StringRef PartMatchName(PartMatchStr);
1842 Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK, LockName: Cp.toString(), LK, Loc,
1843 PossibleMatch: &PartMatchName);
1844 } else {
1845 // Warn that there's no match at all.
1846 Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK, LockName: Cp.toString(), LK, Loc);
1847 }
1848 NoError = false;
1849 }
1850 // Make sure the mutex we found is the right kind.
1851 if (NoError && LDat && !LDat->isAtLeast(LK)) {
1852 Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK, LockName: Cp.toString(), LK, Loc);
1853 }
1854}
1855
1856void ThreadSafetyAnalyzer::warnIfAnyMutexNotHeldForRead(
1857 const FactSet &FSet, const NamedDecl *D, const Expr *Exp,
1858 llvm::ArrayRef<Expr *> Args, ProtectedOperationKind POK,
1859 SourceLocation Loc) {
1860 SmallVector<CapabilityExpr, 2> Caps;
1861 for (auto *Arg : Args) {
1862 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: Arg, D, DeclExp: Exp, Self: nullptr);
1863 if (Cp.isInvalid()) {
1864 warnInvalidLock(Handler, MutexExp: Arg, D, DeclExp: Exp, Kind: Cp.getKind());
1865 continue;
1866 }
1867 if (Cp.shouldIgnore())
1868 continue;
1869 const FactEntry *LDat = FSet.findLockUniv(FM&: FactMan, CapE: Cp);
1870 if (LDat && LDat->isAtLeast(LK: LK_Shared))
1871 return; // At least one held — read access is safe.
1872 // FIXME: try findPartialMatch as a fallback to support
1873 // -Wno-thread-safety-precise, as warnIfMutexNotHeld does.
1874 Caps.push_back(Elt: Cp);
1875 }
1876 if (Caps.empty())
1877 return;
1878 // Materialize names only now that we know we are going to warn.
1879 SmallVector<std::string, 2> NameStorage;
1880 SmallVector<StringRef, 2> Names;
1881 for (const auto &Cp : Caps) {
1882 NameStorage.push_back(Elt: Cp.toString());
1883 Names.push_back(Elt: NameStorage.back());
1884 }
1885 Handler.handleGuardedByAnyReadNotHeld(D, POK, LockNames: Names, Loc);
1886}
1887
1888/// Warn if the LSet contains the given lock.
1889void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet,
1890 const NamedDecl *D, const Expr *Exp,
1891 Expr *MutexExp, til::SExpr *Self,
1892 SourceLocation Loc) {
1893 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: MutexExp, D, DeclExp: Exp, Self);
1894 if (Cp.isInvalid()) {
1895 warnInvalidLock(Handler, MutexExp, D, DeclExp: Exp, Kind: Cp.getKind());
1896 return;
1897 } else if (Cp.shouldIgnore()) {
1898 return;
1899 }
1900
1901 const FactEntry *LDat = FSet.findLock(FM&: FactMan, CapE: Cp);
1902 if (LDat) {
1903 Handler.handleFunExcludesLock(Kind: Cp.getKind(), FunName: D->getNameAsString(),
1904 LockName: Cp.toString(), Loc);
1905 }
1906}
1907
1908/// Checks guarded_by and pt_guarded_by attributes.
1909/// Whenever we identify an access (read or write) to a DeclRefExpr that is
1910/// marked with guarded_by, we must ensure the appropriate mutexes are held.
1911/// Similarly, we check if the access is to an expression that dereferences
1912/// a pointer marked with pt_guarded_by.
1913void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp,
1914 AccessKind AK,
1915 ProtectedOperationKind POK) {
1916 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1917
1918 SourceLocation Loc = Exp->getExprLoc();
1919
1920 // Local variables of reference type cannot be re-assigned;
1921 // map them to their initializer.
1922 while (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Exp)) {
1923 const auto *VD = dyn_cast<VarDecl>(Val: DRE->getDecl()->getCanonicalDecl());
1924 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
1925 if (const auto *E = VD->getInit()) {
1926 // Guard against self-initialization. e.g., int &i = i;
1927 if (E == Exp)
1928 break;
1929 Exp = E->IgnoreImplicit()->IgnoreParenCasts();
1930 continue;
1931 }
1932 }
1933 break;
1934 }
1935
1936 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Exp)) {
1937 // For dereferences
1938 if (UO->getOpcode() == UO_Deref)
1939 checkPtAccess(FSet, Exp: UO->getSubExpr(), AK, POK);
1940 return;
1941 }
1942
1943 if (const auto *BO = dyn_cast<BinaryOperator>(Val: Exp)) {
1944 switch (BO->getOpcode()) {
1945 case BO_PtrMemD: // .*
1946 return checkAccess(FSet, Exp: BO->getLHS(), AK, POK);
1947 case BO_PtrMemI: // ->*
1948 return checkPtAccess(FSet, Exp: BO->getLHS(), AK, POK);
1949 default:
1950 return;
1951 }
1952 }
1953
1954 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Val: Exp)) {
1955 checkPtAccess(FSet, Exp: AE->getLHS(), AK, POK);
1956 return;
1957 }
1958
1959 if (const auto *ME = dyn_cast<MemberExpr>(Val: Exp)) {
1960 if (ME->isArrow())
1961 checkPtAccess(FSet, Exp: ME->getBase(), AK, POK);
1962 else
1963 checkAccess(FSet, Exp: ME->getBase(), AK, POK);
1964 }
1965
1966 const ValueDecl *D = getValueDecl(Exp);
1967 if (!D || !D->hasAttrs())
1968 return;
1969
1970 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) {
1971 Handler.handleNoMutexHeld(D, POK, AK, Loc);
1972 }
1973
1974 for (const auto *I : D->specific_attrs<GuardedByAttr>()) {
1975 if (AK == AK_Written || I->args_size() == 1) {
1976 // Write requires all capabilities; single-arg read uses the normal
1977 // per-lock warning path.
1978 for (auto *Arg : I->args())
1979 warnIfMutexNotHeld(FSet, D, Exp, AK, MutexExp: Arg, POK, Self: nullptr, Loc);
1980 } else {
1981 // Multi-arg read: holding any one of the listed capabilities is
1982 // sufficient (a writer must hold all, so any one prevents writes).
1983 warnIfAnyMutexNotHeldForRead(FSet, D, Exp, Args: I->args(), POK, Loc);
1984 }
1985 }
1986}
1987
1988/// Checks pt_guarded_by and pt_guarded_var attributes.
1989/// POK is the same operationKind that was passed to checkAccess.
1990void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp,
1991 AccessKind AK,
1992 ProtectedOperationKind POK) {
1993 // Strip off paren- and cast-expressions, checking if we encounter any other
1994 // operator that should be delegated to checkAccess() instead.
1995 while (true) {
1996 if (const auto *PE = dyn_cast<ParenExpr>(Val: Exp)) {
1997 Exp = PE->getSubExpr();
1998 continue;
1999 }
2000 if (const auto *CE = dyn_cast<CastExpr>(Val: Exp)) {
2001 if (CE->getCastKind() == CK_ArrayToPointerDecay) {
2002 // If it's an actual array, and not a pointer, then it's elements
2003 // are protected by GUARDED_BY, not PT_GUARDED_BY;
2004 checkAccess(FSet, Exp: CE->getSubExpr(), AK, POK);
2005 return;
2006 }
2007 Exp = CE->getSubExpr();
2008 continue;
2009 }
2010 break;
2011 }
2012
2013 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Exp)) {
2014 if (UO->getOpcode() == UO_AddrOf) {
2015 // Pointer access via pointer taken of variable, so the dereferenced
2016 // variable is not actually a pointer.
2017 checkAccess(FSet, Exp: UO->getSubExpr(), AK, POK);
2018 return;
2019 }
2020 }
2021
2022 // Pass by reference/pointer warnings are under a different flag.
2023 ProtectedOperationKind PtPOK = POK_VarDereference;
2024 switch (POK) {
2025 case POK_PassByRef:
2026 PtPOK = POK_PtPassByRef;
2027 break;
2028 case POK_ReturnByRef:
2029 PtPOK = POK_PtReturnByRef;
2030 break;
2031 case POK_PassPointer:
2032 PtPOK = POK_PtPassPointer;
2033 break;
2034 case POK_ReturnPointer:
2035 PtPOK = POK_PtReturnPointer;
2036 break;
2037 default:
2038 break;
2039 }
2040
2041 const ValueDecl *D = getValueDecl(Exp);
2042 if (!D || !D->hasAttrs())
2043 return;
2044
2045 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan))
2046 Handler.handleNoMutexHeld(D, POK: PtPOK, AK, Loc: Exp->getExprLoc());
2047
2048 for (auto const *I : D->specific_attrs<PtGuardedByAttr>()) {
2049 if (AK == AK_Written || I->args_size() == 1) {
2050 // Write requires all capabilities; single-arg read uses the normal
2051 // per-lock warning path.
2052 for (auto *Arg : I->args())
2053 warnIfMutexNotHeld(FSet, D, Exp, AK, MutexExp: Arg, POK: PtPOK, Self: nullptr,
2054 Loc: Exp->getExprLoc());
2055 } else {
2056 // Multi-arg read: holding any one of the listed capabilities is
2057 // sufficient (a writer must hold all, so any one prevents writes).
2058 warnIfAnyMutexNotHeldForRead(FSet, D, Exp, Args: I->args(), POK: PtPOK,
2059 Loc: Exp->getExprLoc());
2060 }
2061 }
2062}
2063
2064/// Process a function call, method call, constructor call,
2065/// or destructor call. This involves looking at the attributes on the
2066/// corresponding function/method/constructor/destructor, issuing warnings,
2067/// and updating the locksets accordingly.
2068///
2069/// FIXME: For classes annotated with one of the guarded annotations, we need
2070/// to treat const method calls as reads and non-const method calls as writes,
2071/// and check that the appropriate locks are held. Non-const method calls with
2072/// the same signature as const method calls can be also treated as reads.
2073///
2074/// \param Exp The call expression.
2075/// \param D The callee declaration.
2076/// \param Self If \p Exp = nullptr, the implicit this argument or the argument
2077/// of an implicitly called cleanup function.
2078/// \param Loc If \p Exp = nullptr, the location.
2079void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
2080 til::SExpr *Self, SourceLocation Loc) {
2081 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
2082 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
2083 CapExprSet ScopedReqsAndExcludes;
2084
2085 // Figure out if we're constructing an object of scoped lockable class
2086 CapabilityExpr Scp;
2087 if (Exp) {
2088 assert(!Self);
2089 const auto *TagT = Exp->getType()->getAs<TagType>();
2090 if (D->hasAttrs() && TagT && Exp->isPRValue()) {
2091 til::LiteralPtr *Placeholder =
2092 Analyzer->SxBuilder.createThisPlaceholder();
2093 [[maybe_unused]] auto inserted =
2094 Analyzer->ConstructedObjects.insert(KV: {Exp, Placeholder});
2095 assert(inserted.second && "Are we visiting the same expression again?");
2096 if (isa<CXXConstructExpr>(Val: Exp))
2097 Self = Placeholder;
2098 if (TagT->getDecl()->getMostRecentDecl()->hasAttr<ScopedLockableAttr>())
2099 Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
2100 }
2101
2102 assert(Loc.isInvalid());
2103 Loc = Exp->getExprLoc();
2104 }
2105
2106 for(const Attr *At : D->attrs()) {
2107 switch (At->getKind()) {
2108 // When we encounter a lock function, we need to add the lock to our
2109 // lockset.
2110 case attr::AcquireCapability: {
2111 const auto *A = cast<AcquireCapabilityAttr>(Val: At);
2112 Analyzer->getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd
2113 : ExclusiveLocksToAdd,
2114 Attr: A, Exp, D, Self);
2115 break;
2116 }
2117
2118 // An assert will add a lock to the lockset, but will not generate
2119 // a warning if it is already there, and will not generate a warning
2120 // if it is not removed.
2121 case attr::AssertCapability: {
2122 const auto *A = cast<AssertCapabilityAttr>(Val: At);
2123 CapExprSet AssertLocks;
2124 Analyzer->getMutexIDs(Mtxs&: AssertLocks, Attr: A, Exp, D, Self);
2125 for (const auto &AssertLock : AssertLocks)
2126 Analyzer->addLock(
2127 FSet, Entry: Analyzer->FactMan.createFact<LockableFactEntry>(
2128 Args: AssertLock, Args: A->isShared() ? LK_Shared : LK_Exclusive,
2129 Args&: Loc, Args: FactEntry::Asserted));
2130 break;
2131 }
2132
2133 // When we encounter an unlock function, we need to remove unlocked
2134 // mutexes from the lockset, and flag a warning if they are not there.
2135 case attr::ReleaseCapability: {
2136 const auto *A = cast<ReleaseCapabilityAttr>(Val: At);
2137 if (A->isGeneric())
2138 Analyzer->getMutexIDs(Mtxs&: GenericLocksToRemove, Attr: A, Exp, D, Self);
2139 else if (A->isShared())
2140 Analyzer->getMutexIDs(Mtxs&: SharedLocksToRemove, Attr: A, Exp, D, Self);
2141 else
2142 Analyzer->getMutexIDs(Mtxs&: ExclusiveLocksToRemove, Attr: A, Exp, D, Self);
2143 break;
2144 }
2145
2146 case attr::RequiresCapability: {
2147 const auto *A = cast<RequiresCapabilityAttr>(Val: At);
2148 for (auto *Arg : A->args()) {
2149 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2150 AK: A->isShared() ? AK_Read : AK_Written,
2151 MutexExp: Arg, POK: POK_FunctionCall, Self, Loc);
2152 // use for adopting a lock
2153 if (!Scp.shouldIgnore())
2154 Analyzer->getMutexIDs(Mtxs&: ScopedReqsAndExcludes, Attr: A, Exp, D, Self);
2155 }
2156 break;
2157 }
2158
2159 case attr::LocksExcluded: {
2160 const auto *A = cast<LocksExcludedAttr>(Val: At);
2161 for (auto *Arg : A->args()) {
2162 Analyzer->warnIfMutexHeld(FSet, D, Exp, MutexExp: Arg, Self, Loc);
2163 // use for deferring a lock
2164 if (!Scp.shouldIgnore())
2165 Analyzer->getMutexIDs(Mtxs&: ScopedReqsAndExcludes, Attr: A, Exp, D, Self);
2166 }
2167 break;
2168 }
2169
2170 // Ignore attributes unrelated to thread-safety
2171 default:
2172 break;
2173 }
2174 }
2175
2176 std::optional<CallExpr::const_arg_range> Args;
2177 if (Exp) {
2178 if (const auto *CE = dyn_cast<CallExpr>(Val: Exp))
2179 Args = CE->arguments();
2180 else if (const auto *CE = dyn_cast<CXXConstructExpr>(Val: Exp))
2181 Args = CE->arguments();
2182 else
2183 llvm_unreachable("Unknown call kind");
2184 }
2185 const auto *CalledFunction = dyn_cast<FunctionDecl>(Val: D);
2186 if (CalledFunction && Args.has_value()) {
2187 for (auto [Param, Arg] : zip(t: CalledFunction->parameters(), u&: *Args)) {
2188 CapExprSet DeclaredLocks;
2189 for (const Attr *At : Param->attrs()) {
2190 switch (At->getKind()) {
2191 case attr::AcquireCapability: {
2192 const auto *A = cast<AcquireCapabilityAttr>(Val: At);
2193 Analyzer->getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd
2194 : ExclusiveLocksToAdd,
2195 Attr: A, Exp, D, Self);
2196 Analyzer->getMutexIDs(Mtxs&: DeclaredLocks, Attr: A, Exp, D, Self);
2197 break;
2198 }
2199
2200 case attr::ReleaseCapability: {
2201 const auto *A = cast<ReleaseCapabilityAttr>(Val: At);
2202 if (A->isGeneric())
2203 Analyzer->getMutexIDs(Mtxs&: GenericLocksToRemove, Attr: A, Exp, D, Self);
2204 else if (A->isShared())
2205 Analyzer->getMutexIDs(Mtxs&: SharedLocksToRemove, Attr: A, Exp, D, Self);
2206 else
2207 Analyzer->getMutexIDs(Mtxs&: ExclusiveLocksToRemove, Attr: A, Exp, D, Self);
2208 Analyzer->getMutexIDs(Mtxs&: DeclaredLocks, Attr: A, Exp, D, Self);
2209 break;
2210 }
2211
2212 case attr::RequiresCapability: {
2213 const auto *A = cast<RequiresCapabilityAttr>(Val: At);
2214 for (auto *Arg : A->args())
2215 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2216 AK: A->isShared() ? AK_Read : AK_Written,
2217 MutexExp: Arg, POK: POK_FunctionCall, Self, Loc);
2218 Analyzer->getMutexIDs(Mtxs&: DeclaredLocks, Attr: A, Exp, D, Self);
2219 break;
2220 }
2221
2222 case attr::LocksExcluded: {
2223 const auto *A = cast<LocksExcludedAttr>(Val: At);
2224 for (auto *Arg : A->args())
2225 Analyzer->warnIfMutexHeld(FSet, D, Exp, MutexExp: Arg, Self, Loc);
2226 Analyzer->getMutexIDs(Mtxs&: DeclaredLocks, Attr: A, Exp, D, Self);
2227 break;
2228 }
2229
2230 default:
2231 break;
2232 }
2233 }
2234 if (DeclaredLocks.empty())
2235 continue;
2236 CapabilityExpr Cp(Analyzer->SxBuilder.translate(S: Arg, Ctx: nullptr),
2237 StringRef("mutex"), /*Neg=*/false, /*Reentrant=*/false);
2238 if (const auto *CBTE = dyn_cast<CXXBindTemporaryExpr>(Val: Arg->IgnoreCasts());
2239 Cp.isInvalid() && CBTE) {
2240 if (auto Object = Analyzer->ConstructedObjects.find(Val: CBTE->getSubExpr());
2241 Object != Analyzer->ConstructedObjects.end())
2242 Cp = CapabilityExpr(Object->second, StringRef("mutex"), /*Neg=*/false,
2243 /*Reentrant=*/false);
2244 }
2245 const FactEntry *Fact = FSet.findLock(FM&: Analyzer->FactMan, CapE: Cp);
2246 if (!Fact) {
2247 Analyzer->Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK: POK_FunctionCall,
2248 LockName: Cp.toString(), LK: LK_Exclusive,
2249 Loc: Exp->getExprLoc());
2250 continue;
2251 }
2252 const auto *Scope = cast<ScopedLockableFactEntry>(Val: Fact);
2253 for (const auto &[a, b] :
2254 zip_longest(t&: DeclaredLocks, u: Scope->getUnderlyingMutexes())) {
2255 if (!a.has_value()) {
2256 Analyzer->Handler.handleExpectFewerUnderlyingMutexes(
2257 Loc: Exp->getExprLoc(), DLoc: D->getLocation(), ScopeName: Scope->toString(),
2258 Kind: b.value().getKind(), Actual: b.value().toString());
2259 } else if (!b.has_value()) {
2260 Analyzer->Handler.handleExpectMoreUnderlyingMutexes(
2261 Loc: Exp->getExprLoc(), DLoc: D->getLocation(), ScopeName: Scope->toString(),
2262 Kind: a.value().getKind(), Expected: a.value().toString());
2263 } else if (!a.value().equals(other: b.value())) {
2264 Analyzer->Handler.handleUnmatchedUnderlyingMutexes(
2265 Loc: Exp->getExprLoc(), DLoc: D->getLocation(), ScopeName: Scope->toString(),
2266 Kind: a.value().getKind(), Expected: a.value().toString(), Actual: b.value().toString());
2267 break;
2268 }
2269 }
2270 }
2271 }
2272 // Remove locks first to allow lock upgrading/downgrading.
2273 // FIXME -- should only fully remove if the attribute refers to 'this'.
2274 bool Dtor = isa<CXXDestructorDecl>(Val: D);
2275 for (const auto &M : ExclusiveLocksToRemove)
2276 Analyzer->removeLock(FSet, Cp: M, UnlockLoc: Loc, FullyRemove: Dtor, ReceivedKind: LK_Exclusive);
2277 for (const auto &M : SharedLocksToRemove)
2278 Analyzer->removeLock(FSet, Cp: M, UnlockLoc: Loc, FullyRemove: Dtor, ReceivedKind: LK_Shared);
2279 for (const auto &M : GenericLocksToRemove)
2280 Analyzer->removeLock(FSet, Cp: M, UnlockLoc: Loc, FullyRemove: Dtor, ReceivedKind: LK_Generic);
2281
2282 // Add locks.
2283 FactEntry::SourceKind Source =
2284 !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
2285 for (const auto &M : ExclusiveLocksToAdd)
2286 Analyzer->addLock(FSet, Entry: Analyzer->FactMan.createFact<LockableFactEntry>(
2287 Args: M, Args: LK_Exclusive, Args&: Loc, Args&: Source));
2288 for (const auto &M : SharedLocksToAdd)
2289 Analyzer->addLock(FSet, Entry: Analyzer->FactMan.createFact<LockableFactEntry>(
2290 Args: M, Args: LK_Shared, Args&: Loc, Args&: Source));
2291
2292 if (!Scp.shouldIgnore()) {
2293 // Add the managing object as a dummy mutex, mapped to the underlying mutex.
2294 auto *ScopedEntry = Analyzer->FactMan.createFact<ScopedLockableFactEntry>(
2295 Args&: Scp, Args&: Loc, Args: FactEntry::Acquired,
2296 Args: ExclusiveLocksToAdd.size() + SharedLocksToAdd.size() +
2297 ScopedReqsAndExcludes.size() + ExclusiveLocksToRemove.size() +
2298 SharedLocksToRemove.size());
2299 for (const auto &M : ExclusiveLocksToAdd)
2300 ScopedEntry->addLock(M);
2301 for (const auto &M : SharedLocksToAdd)
2302 ScopedEntry->addLock(M);
2303 for (const auto &M : ScopedReqsAndExcludes)
2304 ScopedEntry->addLock(M);
2305 for (const auto &M : ExclusiveLocksToRemove)
2306 ScopedEntry->addExclusiveUnlock(M);
2307 for (const auto &M : SharedLocksToRemove)
2308 ScopedEntry->addSharedUnlock(M);
2309 Analyzer->addLock(FSet, Entry: ScopedEntry);
2310 }
2311}
2312
2313/// For unary operations which read and write a variable, we need to
2314/// check whether we hold any required mutexes. Reads are checked in
2315/// VisitCastExpr.
2316void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
2317 switch (UO->getOpcode()) {
2318 case UO_PostDec:
2319 case UO_PostInc:
2320 case UO_PreDec:
2321 case UO_PreInc:
2322 checkAccess(Exp: UO->getSubExpr(), AK: AK_Written);
2323 break;
2324 default:
2325 break;
2326 }
2327}
2328
2329/// For binary operations which assign to a variable (writes), we need to check
2330/// whether we hold any required mutexes.
2331/// FIXME: Deal with non-primitive types.
2332void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
2333 if (!BO->isAssignmentOp())
2334 return;
2335 checkAccess(Exp: BO->getLHS(), AK: AK_Written);
2336 updateLocalVarMapCtx(S: BO);
2337}
2338
2339/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
2340/// need to ensure we hold any required mutexes.
2341/// FIXME: Deal with non-primitive types.
2342void BuildLockset::VisitCastExpr(const CastExpr *CE) {
2343 if (CE->getCastKind() != CK_LValueToRValue)
2344 return;
2345 checkAccess(Exp: CE->getSubExpr(), AK: AK_Read);
2346}
2347
2348void BuildLockset::examineArguments(const FunctionDecl *FD,
2349 CallExpr::const_arg_iterator ArgBegin,
2350 CallExpr::const_arg_iterator ArgEnd,
2351 bool SkipFirstParam) {
2352 // Currently we can't do anything if we don't know the function declaration.
2353 if (!FD)
2354 return;
2355
2356 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
2357 // only turns off checking within the body of a function, but we also
2358 // use it to turn off checking in arguments to the function. This
2359 // could result in some false negatives, but the alternative is to
2360 // create yet another attribute.
2361 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
2362 return;
2363
2364 const ArrayRef<ParmVarDecl *> Params = FD->parameters();
2365 auto Param = Params.begin();
2366 if (SkipFirstParam)
2367 ++Param;
2368
2369 // There can be default arguments, so we stop when one iterator is at end().
2370 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
2371 ++Param, ++Arg) {
2372 QualType Qt = (*Param)->getType();
2373 if (Qt->isReferenceType())
2374 checkAccess(Exp: *Arg, AK: AK_Read, POK: POK_PassByRef);
2375 else if (Qt->isPointerType())
2376 checkPtAccess(Exp: *Arg, AK: AK_Read, POK: POK_PassPointer);
2377 }
2378}
2379
2380void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
2381 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Val: Exp)) {
2382 const auto *ME = dyn_cast<MemberExpr>(Val: CE->getCallee());
2383 // ME can be null when calling a method pointer
2384 const CXXMethodDecl *MD = CE->getMethodDecl();
2385
2386 if (ME && MD) {
2387 if (ME->isArrow()) {
2388 // Should perhaps be AK_Written if !MD->isConst().
2389 checkPtAccess(Exp: CE->getImplicitObjectArgument(), AK: AK_Read);
2390 } else {
2391 // Should perhaps be AK_Written if !MD->isConst().
2392 checkAccess(Exp: CE->getImplicitObjectArgument(), AK: AK_Read);
2393 }
2394 }
2395
2396 examineArguments(FD: CE->getDirectCallee(), ArgBegin: CE->arg_begin(), ArgEnd: CE->arg_end());
2397 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Val: Exp)) {
2398 OverloadedOperatorKind OEop = OE->getOperator();
2399 switch (OEop) {
2400 case OO_Equal:
2401 case OO_PlusEqual:
2402 case OO_MinusEqual:
2403 case OO_StarEqual:
2404 case OO_SlashEqual:
2405 case OO_PercentEqual:
2406 case OO_CaretEqual:
2407 case OO_AmpEqual:
2408 case OO_PipeEqual:
2409 case OO_LessLessEqual:
2410 case OO_GreaterGreaterEqual:
2411 checkAccess(Exp: OE->getArg(Arg: 1), AK: AK_Read);
2412 [[fallthrough]];
2413 case OO_PlusPlus:
2414 case OO_MinusMinus:
2415 checkAccess(Exp: OE->getArg(Arg: 0), AK: AK_Written);
2416 break;
2417 case OO_Star:
2418 case OO_ArrowStar:
2419 case OO_Arrow:
2420 case OO_Subscript:
2421 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
2422 // Grrr. operator* can be multiplication...
2423 checkPtAccess(Exp: OE->getArg(Arg: 0), AK: AK_Read);
2424 }
2425 [[fallthrough]];
2426 default: {
2427 // TODO: get rid of this, and rely on pass-by-ref instead.
2428 const Expr *Obj = OE->getArg(Arg: 0);
2429 checkAccess(Exp: Obj, AK: AK_Read);
2430 // Check the remaining arguments. For method operators, the first
2431 // argument is the implicit self argument, and doesn't appear in the
2432 // FunctionDecl, but for non-methods it does.
2433 const FunctionDecl *FD = OE->getDirectCallee();
2434 examineArguments(FD, ArgBegin: std::next(x: OE->arg_begin()), ArgEnd: OE->arg_end(),
2435 /*SkipFirstParam*/ !isa<CXXMethodDecl>(Val: FD));
2436 break;
2437 }
2438 }
2439 } else {
2440 examineArguments(FD: Exp->getDirectCallee(), ArgBegin: Exp->arg_begin(), ArgEnd: Exp->arg_end());
2441 }
2442
2443 auto *D = dyn_cast_or_null<NamedDecl>(Val: Exp->getCalleeDecl());
2444 if (D)
2445 handleCall(Exp, D);
2446 updateLocalVarMapCtx(S: Exp);
2447}
2448
2449void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
2450 const CXXConstructorDecl *D = Exp->getConstructor();
2451 if (D && D->isCopyConstructor()) {
2452 const Expr* Source = Exp->getArg(Arg: 0);
2453 checkAccess(Exp: Source, AK: AK_Read);
2454 } else {
2455 examineArguments(FD: D, ArgBegin: Exp->arg_begin(), ArgEnd: Exp->arg_end());
2456 }
2457 if (D && D->hasAttrs())
2458 handleCall(Exp, D);
2459}
2460
2461static const Expr *UnpackConstruction(const Expr *E) {
2462 if (auto *CE = dyn_cast<CastExpr>(Val: E))
2463 if (CE->getCastKind() == CK_NoOp)
2464 E = CE->getSubExpr()->IgnoreParens();
2465 if (auto *CE = dyn_cast<CastExpr>(Val: E))
2466 if (CE->getCastKind() == CK_ConstructorConversion ||
2467 CE->getCastKind() == CK_UserDefinedConversion)
2468 E = CE->getSubExpr();
2469 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(Val: E))
2470 E = BTE->getSubExpr();
2471 return E;
2472}
2473
2474void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
2475 for (auto *D : S->getDeclGroup()) {
2476 if (auto *VD = dyn_cast_or_null<VarDecl>(Val: D)) {
2477 const Expr *E = VD->getInit();
2478 if (!E)
2479 continue;
2480 E = E->IgnoreParens();
2481
2482 // handle constructors that involve temporaries
2483 if (auto *EWC = dyn_cast<ExprWithCleanups>(Val: E))
2484 E = EWC->getSubExpr()->IgnoreParens();
2485 E = UnpackConstruction(E);
2486
2487 if (auto Object = Analyzer->ConstructedObjects.find(Val: E);
2488 Object != Analyzer->ConstructedObjects.end()) {
2489 Object->second->setClangDecl(VD);
2490 Analyzer->ConstructedObjects.erase(I: Object);
2491 }
2492 }
2493 }
2494 updateLocalVarMapCtx(S);
2495}
2496
2497void BuildLockset::VisitMaterializeTemporaryExpr(
2498 const MaterializeTemporaryExpr *Exp) {
2499 if (const ValueDecl *ExtD = Exp->getExtendingDecl()) {
2500 if (auto Object = Analyzer->ConstructedObjects.find(
2501 Val: UnpackConstruction(E: Exp->getSubExpr()));
2502 Object != Analyzer->ConstructedObjects.end()) {
2503 Object->second->setClangDecl(ExtD);
2504 Analyzer->ConstructedObjects.erase(I: Object);
2505 }
2506 }
2507}
2508
2509void BuildLockset::VisitReturnStmt(const ReturnStmt *S) {
2510 if (Analyzer->CurrentFunction == nullptr)
2511 return;
2512 const Expr *RetVal = S->getRetValue();
2513 if (!RetVal)
2514 return;
2515
2516 // If returning by reference or pointer, check that the function requires the
2517 // appropriate capabilities.
2518 const QualType ReturnType =
2519 Analyzer->CurrentFunction->getReturnType().getCanonicalType();
2520 if (ReturnType->isLValueReferenceType()) {
2521 Analyzer->checkAccess(
2522 FSet: FunctionExitFSet, Exp: RetVal,
2523 AK: ReturnType->getPointeeType().isConstQualified() ? AK_Read : AK_Written,
2524 POK: POK_ReturnByRef);
2525 } else if (ReturnType->isPointerType()) {
2526 Analyzer->checkPtAccess(
2527 FSet: FunctionExitFSet, Exp: RetVal,
2528 AK: ReturnType->getPointeeType().isConstQualified() ? AK_Read : AK_Written,
2529 POK: POK_ReturnPointer);
2530 }
2531}
2532
2533/// Given two facts merging on a join point, possibly warn and decide whether to
2534/// keep or replace.
2535///
2536/// \return false if we should keep \p A, true if we should take \p B.
2537bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
2538 SourceLocation JoinLoc,
2539 LockErrorKind EntryLEK) {
2540 // Whether we can replace \p A by \p B.
2541 const bool CanModify = EntryLEK != LEK_LockedSomeLoopIterations;
2542 unsigned int ReentrancyDepthA = 0;
2543 unsigned int ReentrancyDepthB = 0;
2544
2545 if (const auto *LFE = dyn_cast<LockableFactEntry>(Val: &A))
2546 ReentrancyDepthA = LFE->getReentrancyDepth();
2547 if (const auto *LFE = dyn_cast<LockableFactEntry>(Val: &B))
2548 ReentrancyDepthB = LFE->getReentrancyDepth();
2549
2550 if (ReentrancyDepthA != ReentrancyDepthB) {
2551 Handler.handleMutexHeldEndOfScope(Kind: B.getKind(), LockName: B.toString(), LocLocked: B.loc(),
2552 LocEndOfScope: JoinLoc, LEK: EntryLEK,
2553 /*ReentrancyMismatch=*/true);
2554 // Pick the FactEntry with the greater reentrancy depth as the "good"
2555 // fact to reduce potential later warnings.
2556 return CanModify && ReentrancyDepthA < ReentrancyDepthB;
2557 } else if (A.kind() != B.kind()) {
2558 // For managed capabilities, the destructor should unlock in the right mode
2559 // anyway. For asserted capabilities no unlocking is needed.
2560 if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
2561 // The shared capability subsumes the exclusive capability, if possible.
2562 bool ShouldTakeB = B.kind() == LK_Shared;
2563 if (CanModify || !ShouldTakeB)
2564 return ShouldTakeB;
2565 }
2566 Handler.handleExclusiveAndShared(Kind: B.getKind(), LockName: B.toString(), Loc1: B.loc(),
2567 Loc2: A.loc());
2568 // Take the exclusive capability to reduce further warnings.
2569 return CanModify && B.kind() == LK_Exclusive;
2570 } else {
2571 // The non-asserted capability is the one we want to track.
2572 return CanModify && A.asserted() && !B.asserted();
2573 }
2574}
2575
2576/// Compute the intersection of two locksets and issue warnings for any
2577/// locks in the symmetric difference.
2578///
2579/// This function is used at a merge point in the CFG when comparing the lockset
2580/// of each branch being merged. For example, given the following sequence:
2581/// A; if () then B; else C; D; we need to check that the lockset after B and C
2582/// are the same. In the event of a difference, we use the intersection of these
2583/// two locksets at the start of D.
2584///
2585/// \param EntrySet A lockset for entry into a (possibly new) block.
2586/// \param ExitSet The lockset on exiting a preceding block.
2587/// \param JoinLoc The location of the join point for error reporting
2588/// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
2589/// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
2590void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
2591 const FactSet &ExitSet,
2592 SourceLocation JoinLoc,
2593 LockErrorKind EntryLEK,
2594 LockErrorKind ExitLEK) {
2595 FactSet EntrySetOrig = EntrySet;
2596
2597 // Find locks in ExitSet that conflict or are not in EntrySet, and warn.
2598 for (const auto &Fact : ExitSet) {
2599 const FactEntry &ExitFact = FactMan[Fact];
2600
2601 FactSet::iterator EntryIt = EntrySet.findLockIter(FM&: FactMan, CapE: ExitFact);
2602 if (EntryIt != EntrySet.end()) {
2603 if (join(A: FactMan[*EntryIt], B: ExitFact, JoinLoc, EntryLEK))
2604 *EntryIt = Fact;
2605 } else if (!ExitFact.managed() || EntryLEK == LEK_LockedAtEndOfFunction) {
2606 ExitFact.handleRemovalFromIntersection(FSet: ExitSet, FactMan, JoinLoc,
2607 LEK: EntryLEK, Handler);
2608 }
2609 }
2610
2611 // Find locks in EntrySet that are not in ExitSet, and remove them.
2612 for (const auto &Fact : EntrySetOrig) {
2613 const FactEntry *EntryFact = &FactMan[Fact];
2614 const FactEntry *ExitFact = ExitSet.findLock(FM&: FactMan, CapE: *EntryFact);
2615
2616 if (!ExitFact) {
2617 if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations ||
2618 ExitLEK == LEK_NotLockedAtEndOfFunction)
2619 EntryFact->handleRemovalFromIntersection(FSet: EntrySetOrig, FactMan, JoinLoc,
2620 LEK: ExitLEK, Handler);
2621 if (ExitLEK == LEK_LockedSomePredecessors)
2622 EntrySet.removeLock(FM&: FactMan, CapE: *EntryFact);
2623 }
2624 }
2625}
2626
2627// Return true if block B never continues to its successors.
2628static bool neverReturns(const CFGBlock *B) {
2629 if (B->hasNoReturnElement())
2630 return true;
2631 if (B->empty())
2632 return false;
2633
2634 CFGElement Last = B->back();
2635 if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2636 if (isa<CXXThrowExpr>(Val: S->getStmt()))
2637 return true;
2638 }
2639 return false;
2640}
2641
2642/// Check a function's CFG for thread-safety violations.
2643///
2644/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2645/// at the end of each block, and issue warnings for thread safety violations.
2646/// Each block in the CFG is traversed exactly once.
2647void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2648 // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2649 // For now, we just use the walker to set things up.
2650 threadSafety::CFGWalker walker;
2651 if (!walker.init(AC))
2652 return;
2653
2654 // AC.dumpCFG(true);
2655 // threadSafety::printSCFG(walker);
2656
2657 CFG *CFGraph = walker.getGraph();
2658 const NamedDecl *D = walker.getDecl();
2659 CurrentFunction = dyn_cast<FunctionDecl>(Val: D);
2660
2661 if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2662 return;
2663
2664 // FIXME: Do something a bit more intelligent inside constructor and
2665 // destructor code. Constructors and destructors must assume unique access
2666 // to 'this', so checks on member variable access is disabled, but we should
2667 // still enable checks on other objects.
2668 if (isa<CXXConstructorDecl>(Val: D))
2669 return; // Don't check inside constructors.
2670 if (isa<CXXDestructorDecl>(Val: D))
2671 return; // Don't check inside destructors.
2672
2673 Handler.enterFunction(FD: CurrentFunction);
2674
2675 BlockInfo.resize(new_size: CFGraph->getNumBlockIDs(),
2676 x: CFGBlockInfo::getEmptyBlockInfo(M&: LocalVarMap));
2677
2678 // We need to explore the CFG via a "topological" ordering.
2679 // That way, we will be guaranteed to have information about required
2680 // predecessor locksets when exploring a new block.
2681 const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2682 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2683
2684 CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
2685 CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
2686
2687 // Mark entry block as reachable
2688 Initial.Reachable = true;
2689
2690 // Compute SSA names for local variables
2691 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2692
2693 // Fill in source locations for all CFGBlocks.
2694 findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2695
2696 CapExprSet ExclusiveLocksAcquired;
2697 CapExprSet SharedLocksAcquired;
2698 CapExprSet LocksReleased;
2699
2700 // Add locks from exclusive_locks_required and shared_locks_required
2701 // to initial lockset. Also turn off checking for lock and unlock functions.
2702 // FIXME: is there a more intelligent way to check lock/unlock functions?
2703 if (!SortedGraph->empty()) {
2704 assert(*SortedGraph->begin() == &CFGraph->getEntry());
2705 FactSet &InitialLockset = Initial.EntrySet;
2706
2707 CapExprSet ExclusiveLocksToAdd;
2708 CapExprSet SharedLocksToAdd;
2709
2710 SourceLocation Loc = D->getLocation();
2711 for (const auto *Attr : D->attrs()) {
2712 Loc = Attr->getLocation();
2713 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Val: Attr)) {
2714 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr: A,
2715 Exp: nullptr, D);
2716 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Val: Attr)) {
2717 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2718 // We must ignore such methods.
2719 if (A->args_size() == 0)
2720 return;
2721 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr: A,
2722 Exp: nullptr, D);
2723 getMutexIDs(Mtxs&: LocksReleased, Attr: A, Exp: nullptr, D);
2724 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Val: Attr)) {
2725 if (A->args_size() == 0)
2726 return;
2727 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksAcquired
2728 : ExclusiveLocksAcquired,
2729 Attr: A, Exp: nullptr, D);
2730 } else if (isa<TryAcquireCapabilityAttr>(Val: Attr)) {
2731 // Don't try to check trylock functions for now.
2732 return;
2733 }
2734 }
2735 ArrayRef<ParmVarDecl *> Params;
2736 if (CurrentFunction)
2737 Params = CurrentFunction->getCanonicalDecl()->parameters();
2738 else if (auto CurrentMethod = dyn_cast<ObjCMethodDecl>(Val: D))
2739 Params = CurrentMethod->getCanonicalDecl()->parameters();
2740 else
2741 llvm_unreachable("Unknown function kind");
2742 for (const ParmVarDecl *Param : Params) {
2743 CapExprSet UnderlyingLocks;
2744 for (const auto *Attr : Param->attrs()) {
2745 Loc = Attr->getLocation();
2746 if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Val: Attr)) {
2747 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr: A,
2748 Exp: nullptr, D: Param);
2749 getMutexIDs(Mtxs&: LocksReleased, Attr: A, Exp: nullptr, D: Param);
2750 getMutexIDs(Mtxs&: UnderlyingLocks, Attr: A, Exp: nullptr, D: Param);
2751 } else if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Val: Attr)) {
2752 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr: A,
2753 Exp: nullptr, D: Param);
2754 getMutexIDs(Mtxs&: UnderlyingLocks, Attr: A, Exp: nullptr, D: Param);
2755 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Val: Attr)) {
2756 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksAcquired
2757 : ExclusiveLocksAcquired,
2758 Attr: A, Exp: nullptr, D: Param);
2759 getMutexIDs(Mtxs&: UnderlyingLocks, Attr: A, Exp: nullptr, D: Param);
2760 } else if (const auto *A = dyn_cast<LocksExcludedAttr>(Val: Attr)) {
2761 getMutexIDs(Mtxs&: UnderlyingLocks, Attr: A, Exp: nullptr, D: Param);
2762 }
2763 }
2764 if (UnderlyingLocks.empty())
2765 continue;
2766 CapabilityExpr Cp(SxBuilder.translateVariable(VD: Param, Ctx: nullptr),
2767 StringRef(),
2768 /*Neg=*/false, /*Reentrant=*/false);
2769 auto *ScopedEntry = FactMan.createFact<ScopedLockableFactEntry>(
2770 Args&: Cp, Args: Param->getLocation(), Args: FactEntry::Declared,
2771 Args: UnderlyingLocks.size());
2772 for (const CapabilityExpr &M : UnderlyingLocks)
2773 ScopedEntry->addLock(M);
2774 addLock(FSet&: InitialLockset, Entry: ScopedEntry, ReqAttr: true);
2775 }
2776
2777 // FIXME -- Loc can be wrong here.
2778 for (const auto &Mu : ExclusiveLocksToAdd) {
2779 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2780 Args: Mu, Args: LK_Exclusive, Args&: Loc, Args: FactEntry::Declared);
2781 addLock(FSet&: InitialLockset, Entry, ReqAttr: true);
2782 }
2783 for (const auto &Mu : SharedLocksToAdd) {
2784 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2785 Args: Mu, Args: LK_Shared, Args&: Loc, Args: FactEntry::Declared);
2786 addLock(FSet&: InitialLockset, Entry, ReqAttr: true);
2787 }
2788 }
2789
2790 // Compute the expected exit set.
2791 // By default, we expect all locks held on entry to be held on exit.
2792 FactSet ExpectedFunctionExitSet = Initial.EntrySet;
2793
2794 // Adjust the expected exit set by adding or removing locks, as declared
2795 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
2796 // issue the appropriate warning.
2797 // FIXME: the location here is not quite right.
2798 for (const auto &Lock : ExclusiveLocksAcquired)
2799 ExpectedFunctionExitSet.addLock(
2800 FM&: FactMan, Entry: FactMan.createFact<LockableFactEntry>(Args: Lock, Args: LK_Exclusive,
2801 Args: D->getLocation()));
2802 for (const auto &Lock : SharedLocksAcquired)
2803 ExpectedFunctionExitSet.addLock(
2804 FM&: FactMan, Entry: FactMan.createFact<LockableFactEntry>(Args: Lock, Args: LK_Shared,
2805 Args: D->getLocation()));
2806 for (const auto &Lock : LocksReleased)
2807 ExpectedFunctionExitSet.removeLock(FM&: FactMan, CapE: Lock);
2808
2809 for (const auto *CurrBlock : *SortedGraph) {
2810 unsigned CurrBlockID = CurrBlock->getBlockID();
2811 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2812
2813 // Use the default initial lockset in case there are no predecessors.
2814 VisitedBlocks.insert(Block: CurrBlock);
2815
2816 // Iterate through the predecessor blocks and warn if the lockset for all
2817 // predecessors is not the same. We take the entry lockset of the current
2818 // block to be the intersection of all previous locksets.
2819 // FIXME: By keeping the intersection, we may output more errors in future
2820 // for a lock which is not in the intersection, but was in the union. We
2821 // may want to also keep the union in future. As an example, let's say
2822 // the intersection contains Mutex L, and the union contains L and M.
2823 // Later we unlock M. At this point, we would output an error because we
2824 // never locked M; although the real error is probably that we forgot to
2825 // lock M on all code paths. Conversely, let's say that later we lock M.
2826 // In this case, we should compare against the intersection instead of the
2827 // union because the real error is probably that we forgot to unlock M on
2828 // all code paths.
2829 bool LocksetInitialized = false;
2830 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2831 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
2832 // if *PI -> CurrBlock is a back edge
2833 if (*PI == nullptr || !VisitedBlocks.alreadySet(Block: *PI))
2834 continue;
2835
2836 unsigned PrevBlockID = (*PI)->getBlockID();
2837 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2838
2839 // Ignore edges from blocks that can't return.
2840 if (neverReturns(B: *PI) || !PrevBlockInfo->Reachable)
2841 continue;
2842
2843 // Okay, we can reach this block from the entry.
2844 CurrBlockInfo->Reachable = true;
2845
2846 FactSet PrevLockset;
2847 getEdgeLockset(Result&: PrevLockset, ExitSet: PrevBlockInfo->ExitSet, PredBlock: *PI, CurrBlock);
2848
2849 if (!LocksetInitialized) {
2850 CurrBlockInfo->EntrySet = PrevLockset;
2851 LocksetInitialized = true;
2852 } else {
2853 // Surprisingly 'continue' doesn't always produce back edges, because
2854 // the CFG has empty "transition" blocks where they meet with the end
2855 // of the regular loop body. We still want to diagnose them as loop.
2856 intersectAndWarn(
2857 EntrySet&: CurrBlockInfo->EntrySet, ExitSet: PrevLockset, JoinLoc: CurrBlockInfo->EntryLoc,
2858 LEK: isa_and_nonnull<ContinueStmt>(Val: (*PI)->getTerminatorStmt())
2859 ? LEK_LockedSomeLoopIterations
2860 : LEK_LockedSomePredecessors);
2861 }
2862 }
2863
2864 // Skip rest of block if it's not reachable.
2865 if (!CurrBlockInfo->Reachable)
2866 continue;
2867
2868 BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet);
2869
2870 // Visit all the statements in the basic block.
2871 for (const auto &BI : *CurrBlock) {
2872 switch (BI.getKind()) {
2873 case CFGElement::Statement: {
2874 CFGStmt CS = BI.castAs<CFGStmt>();
2875 LocksetBuilder.Visit(S: CS.getStmt());
2876 break;
2877 }
2878 // Ignore BaseDtor and MemberDtor for now.
2879 case CFGElement::AutomaticObjectDtor: {
2880 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2881 const auto *DD = AD.getDestructorDecl(astContext&: AC.getASTContext());
2882 // Function parameters as they are constructed in caller's context and
2883 // the CFG does not contain the ctors. Ignore them as their
2884 // capabilities cannot be analysed because of this missing
2885 // information.
2886 if (isa_and_nonnull<ParmVarDecl>(Val: AD.getVarDecl()))
2887 break;
2888 if (!DD || !DD->hasAttrs())
2889 break;
2890
2891 LocksetBuilder.handleCall(
2892 Exp: nullptr, D: DD,
2893 Self: SxBuilder.translateVariable(VD: AD.getVarDecl(), Ctx: nullptr),
2894 Loc: AD.getTriggerStmt()->getEndLoc());
2895 break;
2896 }
2897
2898 case CFGElement::CleanupFunction: {
2899 const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>();
2900 LocksetBuilder.handleCall(
2901 /*Exp=*/nullptr, D: CF.getFunctionDecl(),
2902 Self: SxBuilder.translateVariable(VD: CF.getVarDecl(), Ctx: nullptr),
2903 Loc: CF.getVarDecl()->getLocation());
2904 break;
2905 }
2906
2907 case CFGElement::TemporaryDtor: {
2908 auto TD = BI.castAs<CFGTemporaryDtor>();
2909
2910 // Clean up constructed object even if there are no attributes to
2911 // keep the number of objects in limbo as small as possible.
2912 if (auto Object = ConstructedObjects.find(
2913 Val: TD.getBindTemporaryExpr()->getSubExpr());
2914 Object != ConstructedObjects.end()) {
2915 const auto *DD = TD.getDestructorDecl(astContext&: AC.getASTContext());
2916 if (DD->hasAttrs())
2917 // TODO: the location here isn't quite correct.
2918 LocksetBuilder.handleCall(Exp: nullptr, D: DD, Self: Object->second,
2919 Loc: TD.getBindTemporaryExpr()->getEndLoc());
2920 ConstructedObjects.erase(I: Object);
2921 }
2922 break;
2923 }
2924 default:
2925 break;
2926 }
2927 }
2928 CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2929
2930 // For every back edge from CurrBlock (the end of the loop) to another block
2931 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2932 // the one held at the beginning of FirstLoopBlock. We can look up the
2933 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2934 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2935 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
2936 // if CurrBlock -> *SI is *not* a back edge
2937 if (*SI == nullptr || !VisitedBlocks.alreadySet(Block: *SI))
2938 continue;
2939
2940 CFGBlock *FirstLoopBlock = *SI;
2941 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2942 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2943 intersectAndWarn(EntrySet&: PreLoop->EntrySet, ExitSet: LoopEnd->ExitSet, JoinLoc: PreLoop->EntryLoc,
2944 LEK: LEK_LockedSomeLoopIterations);
2945 }
2946 }
2947
2948 // Skip the final check if the exit block is unreachable.
2949 if (!Final.Reachable)
2950 return;
2951
2952 // FIXME: Should we call this function for all blocks which exit the function?
2953 intersectAndWarn(EntrySet&: ExpectedFunctionExitSet, ExitSet: Final.ExitSet, JoinLoc: Final.ExitLoc,
2954 EntryLEK: LEK_LockedAtEndOfFunction, ExitLEK: LEK_NotLockedAtEndOfFunction);
2955
2956 Handler.leaveFunction(FD: CurrentFunction);
2957}
2958
2959/// Check a function's CFG for thread-safety violations.
2960///
2961/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2962/// at the end of each block, and issue warnings for thread safety violations.
2963/// Each block in the CFG is traversed exactly once.
2964void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC,
2965 ThreadSafetyHandler &Handler,
2966 BeforeSet **BSet) {
2967 if (!*BSet)
2968 *BSet = new BeforeSet;
2969 ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2970 Analyzer.runAnalysis(AC);
2971}
2972
2973void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; }
2974
2975/// Helper function that returns a LockKind required for the given level
2976/// of access.
2977LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) {
2978 switch (AK) {
2979 case AK_Read :
2980 return LK_Shared;
2981 case AK_Written :
2982 return LK_Exclusive;
2983 }
2984 llvm_unreachable("Unknown AccessKind");
2985}
2986