1//===- ThreadSafety.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// A intra-procedural analysis for thread safety (e.g. deadlocks and race
10// conditions), based off of an annotation system.
11//
12// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
13// for more information.
14//
15//===----------------------------------------------------------------------===//
16
17#include "clang/Analysis/Analyses/ThreadSafety.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclGroup.h"
22#include "clang/AST/Expr.h"
23#include "clang/AST/ExprCXX.h"
24#include "clang/AST/OperationKinds.h"
25#include "clang/AST/Stmt.h"
26#include "clang/AST/StmtVisitor.h"
27#include "clang/AST/Type.h"
28#include "clang/Analysis/Analyses/PostOrderCFGView.h"
29#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
30#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
31#include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
32#include "clang/Analysis/AnalysisDeclContext.h"
33#include "clang/Analysis/CFG.h"
34#include "clang/Basic/Builtins.h"
35#include "clang/Basic/LLVM.h"
36#include "clang/Basic/OperatorKinds.h"
37#include "clang/Basic/SourceLocation.h"
38#include "clang/Basic/Specifiers.h"
39#include "llvm/ADT/DenseMap.h"
40#include "llvm/ADT/ImmutableMap.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/ScopeExit.h"
43#include "llvm/ADT/SmallVector.h"
44#include "llvm/ADT/StringRef.h"
45#include "llvm/Support/Allocator.h"
46#include "llvm/Support/Casting.h"
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/TrailingObjects.h"
49#include "llvm/Support/raw_ostream.h"
50#include <cassert>
51#include <functional>
52#include <iterator>
53#include <memory>
54#include <optional>
55#include <string>
56#include <utility>
57#include <vector>
58
59using namespace clang;
60using namespace threadSafety;
61
62// Key method definition
63ThreadSafetyHandler::~ThreadSafetyHandler() = default;
64
65/// Issue a warning about an invalid lock expression
66static void warnInvalidLock(ThreadSafetyHandler &Handler,
67 const Expr *MutexExp, const NamedDecl *D,
68 const Expr *DeclExp, StringRef Kind) {
69 SourceLocation Loc;
70 if (DeclExp)
71 Loc = DeclExp->getExprLoc();
72
73 // FIXME: add a note about the attribute location in MutexExp or D
74 if (Loc.isValid())
75 Handler.handleInvalidLockExp(Loc);
76}
77
78namespace {
79
80/// A set of CapabilityExpr objects, which are compiled from thread safety
81/// attributes on a function.
82class CapExprSet : public SmallVector<CapabilityExpr, 4> {
83public:
84 /// Push M onto list, but discard duplicates.
85 void push_back_nodup(const CapabilityExpr &CapE) {
86 if (llvm::none_of(Range&: *this, P: [=](const CapabilityExpr &CapE2) {
87 return CapE.equals(other: CapE2);
88 }))
89 push_back(Elt: CapE);
90 }
91};
92
93class FactManager;
94class FactSet;
95
96/// This is a helper class that stores a fact that is known at a
97/// particular point in program execution. Currently, a fact is a capability,
98/// along with additional information, such as where it was acquired, whether
99/// it is exclusive or shared, etc.
100class FactEntry : public CapabilityExpr {
101public:
102 enum FactEntryKind { Lockable, ScopedLockable };
103
104 /// Where a fact comes from.
105 enum SourceKind {
106 Acquired, ///< The fact has been directly acquired.
107 Asserted, ///< The fact has been asserted to be held.
108 Declared, ///< The fact is assumed to be held by callers.
109 Managed, ///< The fact has been acquired through a scoped capability.
110 };
111
112private:
113 const FactEntryKind Kind : 8;
114
115 /// Exclusive or shared.
116 LockKind LKind : 8;
117
118 /// How it was acquired.
119 SourceKind Source : 8;
120
121 /// Where it was acquired.
122 SourceLocation AcquireLoc;
123
124protected:
125 ~FactEntry() = default;
126
127public:
128 FactEntry(FactEntryKind FK, const CapabilityExpr &CE, LockKind LK,
129 SourceLocation Loc, SourceKind Src)
130 : CapabilityExpr(CE), Kind(FK), LKind(LK), Source(Src), AcquireLoc(Loc) {}
131
132 LockKind kind() const { return LKind; }
133 SourceLocation loc() const { return AcquireLoc; }
134 FactEntryKind getFactEntryKind() const { return Kind; }
135
136 bool asserted() const { return Source == Asserted; }
137 bool declared() const { return Source == Declared; }
138 bool managed() const { return Source == Managed; }
139
140 virtual void
141 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
142 SourceLocation JoinLoc, LockErrorKind LEK,
143 ThreadSafetyHandler &Handler) const = 0;
144 virtual void handleLock(FactSet &FSet, FactManager &FactMan,
145 const FactEntry &entry,
146 ThreadSafetyHandler &Handler) const = 0;
147 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
148 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
149 bool FullyRemove,
150 ThreadSafetyHandler &Handler) const = 0;
151
152 // Return true if LKind >= LK, where exclusive > shared
153 bool isAtLeast(LockKind LK) const {
154 return (LKind == LK_Exclusive) || (LK == LK_Shared);
155 }
156};
157
158using FactID = unsigned short;
159
160/// FactManager manages the memory for all facts that are created during
161/// the analysis of a single routine.
162class FactManager {
163private:
164 llvm::BumpPtrAllocator &Alloc;
165 std::vector<const FactEntry *> Facts;
166
167public:
168 FactManager(llvm::BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
169
170 template <typename T, typename... ArgTypes>
171 T *createFact(ArgTypes &&...Args) {
172 static_assert(std::is_trivially_destructible_v<T>);
173 return T::create(Alloc, std::forward<ArgTypes>(Args)...);
174 }
175
176 FactID newFact(const FactEntry *Entry) {
177 Facts.push_back(x: Entry);
178 assert(Facts.size() - 1 <= std::numeric_limits<FactID>::max() &&
179 "FactID space exhausted");
180 return static_cast<unsigned short>(Facts.size() - 1);
181 }
182
183 const FactEntry &operator[](FactID F) const { return *Facts[F]; }
184};
185
186/// A FactSet is the set of facts that are known to be true at a
187/// particular program point. FactSets must be small, because they are
188/// frequently copied, and are thus implemented as a set of indices into a
189/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
190/// locks, so we can get away with doing a linear search for lookup. Note
191/// that a hashtable or map is inappropriate in this case, because lookups
192/// may involve partial pattern matches, rather than exact matches.
193class FactSet {
194private:
195 using FactVec = SmallVector<FactID, 4>;
196
197 FactVec FactIDs;
198
199public:
200 using iterator = FactVec::iterator;
201 using const_iterator = FactVec::const_iterator;
202
203 iterator begin() { return FactIDs.begin(); }
204 const_iterator begin() const { return FactIDs.begin(); }
205
206 iterator end() { return FactIDs.end(); }
207 const_iterator end() const { return FactIDs.end(); }
208
209 bool isEmpty() const { return FactIDs.size() == 0; }
210
211 // Return true if the set contains only negative facts
212 bool isEmpty(FactManager &FactMan) const {
213 for (const auto FID : *this) {
214 if (!FactMan[FID].negative())
215 return false;
216 }
217 return true;
218 }
219
220 void addLockByID(FactID ID) { FactIDs.push_back(Elt: ID); }
221
222 FactID addLock(FactManager &FM, const FactEntry *Entry) {
223 FactID F = FM.newFact(Entry);
224 FactIDs.push_back(Elt: F);
225 return F;
226 }
227
228 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
229 unsigned n = FactIDs.size();
230 if (n == 0)
231 return false;
232
233 for (unsigned i = 0; i < n-1; ++i) {
234 if (FM[FactIDs[i]].matches(other: CapE)) {
235 FactIDs[i] = FactIDs[n-1];
236 FactIDs.pop_back();
237 return true;
238 }
239 }
240 if (FM[FactIDs[n-1]].matches(other: CapE)) {
241 FactIDs.pop_back();
242 return true;
243 }
244 return false;
245 }
246
247 std::optional<FactID> replaceLock(FactManager &FM, iterator It,
248 const FactEntry *Entry) {
249 if (It == end())
250 return std::nullopt;
251 FactID F = FM.newFact(Entry);
252 *It = F;
253 return F;
254 }
255
256 std::optional<FactID> replaceLock(FactManager &FM, const CapabilityExpr &CapE,
257 const FactEntry *Entry) {
258 return replaceLock(FM, It: findLockIter(FM, CapE), Entry);
259 }
260
261 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
262 return llvm::find_if(Range&: *this,
263 P: [&](FactID ID) { return FM[ID].matches(other: CapE); });
264 }
265
266 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
267 auto I =
268 llvm::find_if(Range: *this, P: [&](FactID ID) { return FM[ID].matches(other: CapE); });
269 return I != end() ? &FM[*I] : nullptr;
270 }
271
272 const FactEntry *findLockUniv(FactManager &FM,
273 const CapabilityExpr &CapE) const {
274 auto I = llvm::find_if(
275 Range: *this, P: [&](FactID ID) -> bool { return FM[ID].matchesUniv(CapE); });
276 return I != end() ? &FM[*I] : nullptr;
277 }
278
279 const FactEntry *findPartialMatch(FactManager &FM,
280 const CapabilityExpr &CapE) const {
281 auto I = llvm::find_if(Range: *this, P: [&](FactID ID) -> bool {
282 return FM[ID].partiallyMatches(other: CapE);
283 });
284 return I != end() ? &FM[*I] : nullptr;
285 }
286
287 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
288 auto I = llvm::find_if(
289 Range: *this, P: [&](FactID ID) -> bool { return FM[ID].valueDecl() == Vd; });
290 return I != end();
291 }
292};
293
294class ThreadSafetyAnalyzer;
295
296} // namespace
297
298namespace clang {
299namespace threadSafety {
300
301class BeforeSet {
302private:
303 using BeforeVect = SmallVector<const ValueDecl *, 4>;
304
305 struct BeforeInfo {
306 BeforeVect Vect;
307 int Visited = 0;
308
309 BeforeInfo() = default;
310 BeforeInfo(BeforeInfo &&) = default;
311 };
312
313 using BeforeMap =
314 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
315 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
316
317public:
318 BeforeSet() = default;
319
320 BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
321 ThreadSafetyAnalyzer& Analyzer);
322
323 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
324 ThreadSafetyAnalyzer &Analyzer);
325
326 void checkBeforeAfter(const ValueDecl* Vd,
327 const FactSet& FSet,
328 ThreadSafetyAnalyzer& Analyzer,
329 SourceLocation Loc, StringRef CapKind);
330
331private:
332 BeforeMap BMap;
333 CycleMap CycMap;
334};
335
336} // namespace threadSafety
337} // namespace clang
338
339namespace {
340
341class LocalVariableMap;
342
343using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
344
345/// A side (entry or exit) of a CFG node.
346enum CFGBlockSide { CBS_Entry, CBS_Exit };
347
348/// CFGBlockInfo is a struct which contains all the information that is
349/// maintained for each block in the CFG. See LocalVariableMap for more
350/// information about the contexts.
351struct CFGBlockInfo {
352 // Lockset held at entry to block
353 FactSet EntrySet;
354
355 // Lockset held at exit from block
356 FactSet ExitSet;
357
358 // Context held at entry to block
359 LocalVarContext EntryContext;
360
361 // Context held at exit from block
362 LocalVarContext ExitContext;
363
364 // Location of first statement in block
365 SourceLocation EntryLoc;
366
367 // Location of last statement in block.
368 SourceLocation ExitLoc;
369
370 // Used to replay contexts later
371 unsigned EntryIndex;
372
373 // Is this block reachable?
374 bool Reachable = false;
375
376 const FactSet &getSet(CFGBlockSide Side) const {
377 return Side == CBS_Entry ? EntrySet : ExitSet;
378 }
379
380 SourceLocation getLocation(CFGBlockSide Side) const {
381 return Side == CBS_Entry ? EntryLoc : ExitLoc;
382 }
383
384private:
385 CFGBlockInfo(LocalVarContext EmptyCtx)
386 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
387
388public:
389 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
390};
391
392// A LocalVariableMap maintains a map from local variables to their currently
393// valid definitions. It provides SSA-like functionality when traversing the
394// CFG. Like SSA, each definition or assignment to a variable is assigned a
395// unique name (an integer), which acts as the SSA name for that definition.
396// The total set of names is shared among all CFG basic blocks.
397// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
398// with their SSA-names. Instead, we compute a Context for each point in the
399// code, which maps local variables to the appropriate SSA-name. This map
400// changes with each assignment.
401//
402// The map is computed in a single pass over the CFG. Subsequent analyses can
403// then query the map to find the appropriate Context for a statement, and use
404// that Context to look up the definitions of variables.
405class LocalVariableMap {
406public:
407 using Context = LocalVarContext;
408
409 /// A VarDefinition consists of an expression, representing the value of the
410 /// variable, along with the context in which that expression should be
411 /// interpreted. A reference VarDefinition does not itself contain this
412 /// information, but instead contains a pointer to a previous VarDefinition.
413 struct VarDefinition {
414 public:
415 friend class LocalVariableMap;
416
417 // The original declaration for this variable.
418 const NamedDecl *Dec;
419
420 // The expression for this variable, OR
421 const Expr *Exp = nullptr;
422
423 // Direct reference to another VarDefinition
424 unsigned DirectRef = 0;
425
426 // Reference to underlying canonical non-reference VarDefinition.
427 unsigned CanonicalRef = 0;
428
429 // The map with which Exp should be interpreted.
430 Context Ctx;
431
432 bool isReference() const { return !Exp; }
433
434 void invalidateRef() { DirectRef = CanonicalRef = 0; }
435
436 private:
437 // Create ordinary variable definition
438 VarDefinition(const NamedDecl *D, const Expr *E, Context C)
439 : Dec(D), Exp(E), Ctx(C) {}
440
441 // Create reference to previous definition
442 VarDefinition(const NamedDecl *D, unsigned DirectRef, unsigned CanonicalRef,
443 Context C)
444 : Dec(D), DirectRef(DirectRef), CanonicalRef(CanonicalRef), Ctx(C) {}
445 };
446
447private:
448 Context::Factory ContextFactory;
449 std::vector<VarDefinition> VarDefinitions;
450 std::vector<std::pair<const Stmt *, Context>> SavedContexts;
451
452public:
453 LocalVariableMap() {
454 // index 0 is a placeholder for undefined variables (aka phi-nodes).
455 VarDefinitions.push_back(x: VarDefinition(nullptr, 0, 0, getEmptyContext()));
456 }
457
458 /// Look up a definition, within the given context.
459 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
460 const unsigned *i = Ctx.lookup(K: D);
461 if (!i)
462 return nullptr;
463 assert(*i < VarDefinitions.size());
464 return &VarDefinitions[*i];
465 }
466
467 /// Look up the definition for D within the given context. Returns
468 /// NULL if the expression is not statically known. If successful, also
469 /// modifies Ctx to hold the context of the return Expr.
470 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
471 const unsigned *P = Ctx.lookup(K: D);
472 if (!P)
473 return nullptr;
474
475 unsigned i = *P;
476 while (i > 0) {
477 if (VarDefinitions[i].Exp) {
478 Ctx = VarDefinitions[i].Ctx;
479 return VarDefinitions[i].Exp;
480 }
481 i = VarDefinitions[i].DirectRef;
482 }
483 return nullptr;
484 }
485
486 Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
487
488 /// Return the next context after processing S. This function is used by
489 /// clients of the class to get the appropriate context when traversing the
490 /// CFG. It must be called for every assignment or DeclStmt.
491 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
492 if (SavedContexts[CtxIndex+1].first == S) {
493 CtxIndex++;
494 Context Result = SavedContexts[CtxIndex].second;
495 return Result;
496 }
497 return C;
498 }
499
500 void dumpVarDefinitionName(unsigned i) {
501 if (i == 0) {
502 llvm::errs() << "Undefined";
503 return;
504 }
505 const NamedDecl *Dec = VarDefinitions[i].Dec;
506 if (!Dec) {
507 llvm::errs() << "<<NULL>>";
508 return;
509 }
510 Dec->printName(OS&: llvm::errs());
511 llvm::errs() << "." << i << " " << ((const void*) Dec);
512 }
513
514 /// Dumps an ASCII representation of the variable map to llvm::errs()
515 void dump() {
516 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
517 const Expr *Exp = VarDefinitions[i].Exp;
518 unsigned Ref = VarDefinitions[i].DirectRef;
519
520 dumpVarDefinitionName(i);
521 llvm::errs() << " = ";
522 if (Exp) Exp->dump();
523 else {
524 dumpVarDefinitionName(i: Ref);
525 llvm::errs() << "\n";
526 }
527 }
528 }
529
530 /// Dumps an ASCII representation of a Context to llvm::errs()
531 void dumpContext(Context C) {
532 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
533 const NamedDecl *D = I.getKey();
534 D->printName(OS&: llvm::errs());
535 llvm::errs() << " -> ";
536 dumpVarDefinitionName(i: I.getData());
537 llvm::errs() << "\n";
538 }
539 }
540
541 /// Builds the variable map.
542 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
543 std::vector<CFGBlockInfo> &BlockInfo);
544
545protected:
546 friend class VarMapBuilder;
547
548 // Resolve any definition ID down to its non-reference base ID.
549 unsigned getCanonicalDefinitionID(unsigned ID) const {
550 while (ID > 0 && VarDefinitions[ID].isReference())
551 ID = VarDefinitions[ID].CanonicalRef;
552 return ID;
553 }
554
555 // Get the current context index
556 unsigned getContextIndex() { return SavedContexts.size()-1; }
557
558 // Save the current context for later replay
559 void saveContext(const Stmt *S, Context C) {
560 SavedContexts.push_back(x: std::make_pair(x&: S, y&: C));
561 }
562
563 // Adds a new definition to the given context, and returns a new context.
564 // This method should be called when declaring a new variable.
565 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
566 assert(!Ctx.contains(D));
567 unsigned newID = VarDefinitions.size();
568 Context NewCtx = ContextFactory.add(Old: Ctx, K: D, D: newID);
569 VarDefinitions.push_back(x: VarDefinition(D, Exp, Ctx));
570 return NewCtx;
571 }
572
573 // Add a new reference to an existing definition.
574 Context addReference(const NamedDecl *D, unsigned Ref, Context Ctx) {
575 unsigned newID = VarDefinitions.size();
576 Context NewCtx = ContextFactory.add(Old: Ctx, K: D, D: newID);
577 VarDefinitions.push_back(
578 x: VarDefinition(D, Ref, getCanonicalDefinitionID(ID: Ref), Ctx));
579 return NewCtx;
580 }
581
582 // Updates a definition only if that definition is already in the map.
583 // This method should be called when assigning to an existing variable.
584 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
585 if (Ctx.contains(K: D)) {
586 unsigned newID = VarDefinitions.size();
587 Context NewCtx = ContextFactory.remove(Old: Ctx, K: D);
588 NewCtx = ContextFactory.add(Old: NewCtx, K: D, D: newID);
589 VarDefinitions.push_back(x: VarDefinition(D, Exp, Ctx));
590 return NewCtx;
591 }
592 return Ctx;
593 }
594
595 // Removes a definition from the context, but keeps the variable name
596 // as a valid variable. The index 0 is a placeholder for cleared definitions.
597 Context clearDefinition(const NamedDecl *D, Context Ctx) {
598 Context NewCtx = Ctx;
599 if (NewCtx.contains(K: D)) {
600 NewCtx = ContextFactory.remove(Old: NewCtx, K: D);
601 NewCtx = ContextFactory.add(Old: NewCtx, K: D, D: 0);
602 }
603 return NewCtx;
604 }
605
606 // Remove a definition entirely frmo the context.
607 Context removeDefinition(const NamedDecl *D, Context Ctx) {
608 Context NewCtx = Ctx;
609 if (NewCtx.contains(K: D)) {
610 NewCtx = ContextFactory.remove(Old: NewCtx, K: D);
611 }
612 return NewCtx;
613 }
614
615 Context intersectContexts(Context C1, Context C2);
616 Context createReferenceContext(Context C);
617 void intersectBackEdge(Context C1, Context C2);
618};
619
620} // namespace
621
622// This has to be defined after LocalVariableMap.
623CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
624 return CFGBlockInfo(M.getEmptyContext());
625}
626
627namespace {
628
629/// Visitor which builds a LocalVariableMap
630class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
631public:
632 LocalVariableMap* VMap;
633 LocalVariableMap::Context Ctx;
634
635 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
636 : VMap(VM), Ctx(C) {}
637
638 void VisitDeclStmt(const DeclStmt *S);
639 void VisitBinaryOperator(const BinaryOperator *BO);
640 void VisitCallExpr(const CallExpr *CE);
641};
642
643} // namespace
644
645// Add new local variables to the variable map
646void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
647 bool modifiedCtx = false;
648 const DeclGroupRef DGrp = S->getDeclGroup();
649 for (const auto *D : DGrp) {
650 if (const auto *VD = dyn_cast_or_null<VarDecl>(Val: D)) {
651 const Expr *E = VD->getInit();
652
653 // Add local variables with trivial type to the variable map
654 QualType T = VD->getType();
655 if (T.isTrivialType(Context: VD->getASTContext())) {
656 Ctx = VMap->addDefinition(D: VD, Exp: E, Ctx);
657 modifiedCtx = true;
658 }
659 }
660 }
661 if (modifiedCtx)
662 VMap->saveContext(S, C: Ctx);
663}
664
665// Update local variable definitions in variable map
666void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
667 if (!BO->isAssignmentOp())
668 return;
669
670 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
671
672 // Update the variable map and current context.
673 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: LHSExp)) {
674 const ValueDecl *VDec = DRE->getDecl();
675 if (Ctx.lookup(K: VDec)) {
676 if (BO->getOpcode() == BO_Assign)
677 Ctx = VMap->updateDefinition(D: VDec, Exp: BO->getRHS(), Ctx);
678 else
679 // FIXME -- handle compound assignment operators
680 Ctx = VMap->clearDefinition(D: VDec, Ctx);
681 VMap->saveContext(S: BO, C: Ctx);
682 }
683 }
684}
685
686// Invalidates local variable definitions if variable escaped.
687void VarMapBuilder::VisitCallExpr(const CallExpr *CE) {
688 const FunctionDecl *FD = CE->getDirectCallee();
689 if (!FD)
690 return;
691
692 // Heuristic for likely-benign functions that pass by mutable reference. This
693 // is needed to avoid a slew of false positives due to mutable reference
694 // passing where the captured reference is usually passed on by-value.
695 if (const IdentifierInfo *II = FD->getIdentifier()) {
696 // Any kind of std::bind-like functions.
697 if (II->isStr(Str: "bind") || II->isStr(Str: "bind_front"))
698 return;
699 }
700
701 // Invalidate local variable definitions that are passed by non-const
702 // reference or non-const pointer.
703 for (unsigned Idx = 0; Idx < CE->getNumArgs(); ++Idx) {
704 if (Idx >= FD->getNumParams())
705 break;
706
707 const Expr *Arg = CE->getArg(Arg: Idx)->IgnoreParenImpCasts();
708 const ParmVarDecl *PVD = FD->getParamDecl(i: Idx);
709 QualType ParamType = PVD->getType();
710
711 // Potential reassignment if passed by non-const reference / pointer.
712 const ValueDecl *VDec = nullptr;
713 if (ParamType->isReferenceType() &&
714 !ParamType->getPointeeType().isConstQualified()) {
715 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Arg))
716 VDec = DRE->getDecl();
717 } else if (ParamType->isPointerType() &&
718 !ParamType->getPointeeType().isConstQualified()) {
719 Arg = Arg->IgnoreParenCasts();
720 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Arg)) {
721 if (UO->getOpcode() == UO_AddrOf) {
722 const Expr *SubE = UO->getSubExpr()->IgnoreParenCasts();
723 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: SubE))
724 VDec = DRE->getDecl();
725 }
726 }
727 }
728
729 if (VDec)
730 Ctx = VMap->clearDefinition(D: VDec, Ctx);
731 }
732 // Save the context after the call where escaped variables' definitions (if
733 // they exist) are cleared.
734 VMap->saveContext(S: CE, C: Ctx);
735}
736
737// Computes the intersection of two contexts. The intersection is the
738// set of variables which have the same definition in both contexts;
739// variables with different definitions are discarded.
740LocalVariableMap::Context
741LocalVariableMap::intersectContexts(Context C1, Context C2) {
742 Context Result = C1;
743 for (const auto &P : C1) {
744 const NamedDecl *Dec = P.first;
745 const unsigned *I2 = C2.lookup(K: Dec);
746 if (!I2) {
747 // The variable doesn't exist on second path.
748 Result = removeDefinition(D: Dec, Ctx: Result);
749 } else if (getCanonicalDefinitionID(ID: P.second) !=
750 getCanonicalDefinitionID(ID: *I2)) {
751 // If canonical definitions mismatch the underlying definitions are
752 // different, invalidate.
753 Result = clearDefinition(D: Dec, Ctx: Result);
754 }
755 }
756 return Result;
757}
758
759// For every variable in C, create a new variable that refers to the
760// definition in C. Return a new context that contains these new variables.
761// (We use this for a naive implementation of SSA on loop back-edges.)
762LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
763 Context Result = getEmptyContext();
764 for (const auto &P : C)
765 Result = addReference(D: P.first, Ref: P.second, Ctx: Result);
766 return Result;
767}
768
769// This routine also takes the intersection of C1 and C2, but it does so by
770// altering the VarDefinitions. C1 must be the result of an earlier call to
771// createReferenceContext.
772void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
773 for (const auto &P : C1) {
774 const unsigned I1 = P.second;
775 VarDefinition *VDef = &VarDefinitions[I1];
776 assert(VDef->isReference());
777
778 const unsigned *I2 = C2.lookup(K: P.first);
779 if (!I2) {
780 // Variable does not exist at the end of the loop, invalidate.
781 VDef->invalidateRef();
782 continue;
783 }
784
785 // Compare the canonical IDs. This correctly handles chains of references
786 // and determines if the variable is truly loop-invariant.
787 if (VDef->CanonicalRef != getCanonicalDefinitionID(ID: *I2))
788 VDef->invalidateRef(); // Mark this variable as undefined
789 }
790}
791
792// Traverse the CFG in topological order, so all predecessors of a block
793// (excluding back-edges) are visited before the block itself. At
794// each point in the code, we calculate a Context, which holds the set of
795// variable definitions which are visible at that point in execution.
796// Visible variables are mapped to their definitions using an array that
797// contains all definitions.
798//
799// At join points in the CFG, the set is computed as the intersection of
800// the incoming sets along each edge, E.g.
801//
802// { Context | VarDefinitions }
803// int x = 0; { x -> x1 | x1 = 0 }
804// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
805// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
806// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
807// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
808//
809// This is essentially a simpler and more naive version of the standard SSA
810// algorithm. Those definitions that remain in the intersection are from blocks
811// that strictly dominate the current block. We do not bother to insert proper
812// phi nodes, because they are not used in our analysis; instead, wherever
813// a phi node would be required, we simply remove that definition from the
814// context (E.g. x above).
815//
816// The initial traversal does not capture back-edges, so those need to be
817// handled on a separate pass. Whenever the first pass encounters an
818// incoming back edge, it duplicates the context, creating new definitions
819// that refer back to the originals. (These correspond to places where SSA
820// might have to insert a phi node.) On the second pass, these definitions are
821// set to NULL if the variable has changed on the back-edge (i.e. a phi
822// node was actually required.) E.g.
823//
824// { Context | VarDefinitions }
825// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
826// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
827// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
828// ... { y -> y1 | x3 = 2, x2 = 1, ... }
829void LocalVariableMap::traverseCFG(CFG *CFGraph,
830 const PostOrderCFGView *SortedGraph,
831 std::vector<CFGBlockInfo> &BlockInfo) {
832 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
833
834 for (const auto *CurrBlock : *SortedGraph) {
835 unsigned CurrBlockID = CurrBlock->getBlockID();
836 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
837
838 VisitedBlocks.insert(Block: CurrBlock);
839
840 // Calculate the entry context for the current block
841 bool HasBackEdges = false;
842 bool CtxInit = true;
843 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
844 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
845 // if *PI -> CurrBlock is a back edge, so skip it
846 if (*PI == nullptr || !VisitedBlocks.alreadySet(Block: *PI)) {
847 HasBackEdges = true;
848 continue;
849 }
850
851 unsigned PrevBlockID = (*PI)->getBlockID();
852 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
853
854 if (CtxInit) {
855 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
856 CtxInit = false;
857 }
858 else {
859 CurrBlockInfo->EntryContext =
860 intersectContexts(C1: CurrBlockInfo->EntryContext,
861 C2: PrevBlockInfo->ExitContext);
862 }
863 }
864
865 // Duplicate the context if we have back-edges, so we can call
866 // intersectBackEdges later.
867 if (HasBackEdges)
868 CurrBlockInfo->EntryContext =
869 createReferenceContext(C: CurrBlockInfo->EntryContext);
870
871 // Create a starting context index for the current block
872 saveContext(S: nullptr, C: CurrBlockInfo->EntryContext);
873 CurrBlockInfo->EntryIndex = getContextIndex();
874
875 // Visit all the statements in the basic block.
876 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
877 for (const auto &BI : *CurrBlock) {
878 switch (BI.getKind()) {
879 case CFGElement::Statement: {
880 CFGStmt CS = BI.castAs<CFGStmt>();
881 VMapBuilder.Visit(S: CS.getStmt());
882 break;
883 }
884 default:
885 break;
886 }
887 }
888 CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
889
890 // Mark variables on back edges as "unknown" if they've been changed.
891 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
892 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
893 // if CurrBlock -> *SI is *not* a back edge
894 if (*SI == nullptr || !VisitedBlocks.alreadySet(Block: *SI))
895 continue;
896
897 CFGBlock *FirstLoopBlock = *SI;
898 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
899 Context LoopEnd = CurrBlockInfo->ExitContext;
900 intersectBackEdge(C1: LoopBegin, C2: LoopEnd);
901 }
902 }
903
904 // Put an extra entry at the end of the indexed context array
905 unsigned exitID = CFGraph->getExit().getBlockID();
906 saveContext(S: nullptr, C: BlockInfo[exitID].ExitContext);
907}
908
909/// Find the appropriate source locations to use when producing diagnostics for
910/// each block in the CFG.
911static void findBlockLocations(CFG *CFGraph,
912 const PostOrderCFGView *SortedGraph,
913 std::vector<CFGBlockInfo> &BlockInfo) {
914 for (const auto *CurrBlock : *SortedGraph) {
915 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
916
917 // Find the source location of the last statement in the block, if the
918 // block is not empty.
919 if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
920 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
921 } else {
922 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
923 BE = CurrBlock->rend(); BI != BE; ++BI) {
924 // FIXME: Handle other CFGElement kinds.
925 if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
926 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
927 break;
928 }
929 }
930 }
931
932 if (CurrBlockInfo->ExitLoc.isValid()) {
933 // This block contains at least one statement. Find the source location
934 // of the first statement in the block.
935 for (const auto &BI : *CurrBlock) {
936 // FIXME: Handle other CFGElement kinds.
937 if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
938 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
939 break;
940 }
941 }
942 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
943 CurrBlock != &CFGraph->getExit()) {
944 // The block is empty, and has a single predecessor. Use its exit
945 // location.
946 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
947 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
948 } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) {
949 // The block is empty, and has a single successor. Use its entry
950 // location.
951 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
952 BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
953 }
954 }
955}
956
957namespace {
958
959class LockableFactEntry final : public FactEntry {
960private:
961 /// Reentrancy depth: incremented when a capability has been acquired
962 /// reentrantly (after initial acquisition). Always 0 for non-reentrant
963 /// capabilities.
964 unsigned int ReentrancyDepth = 0;
965
966 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
967 SourceKind Src)
968 : FactEntry(Lockable, CE, LK, Loc, Src) {}
969
970public:
971 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
972 const LockableFactEntry &Other) {
973 return new (Alloc) LockableFactEntry(Other);
974 }
975
976 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
977 const CapabilityExpr &CE, LockKind LK,
978 SourceLocation Loc,
979 SourceKind Src = Acquired) {
980 return new (Alloc) LockableFactEntry(CE, LK, Loc, Src);
981 }
982
983 unsigned int getReentrancyDepth() const { return ReentrancyDepth; }
984
985 void
986 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
987 SourceLocation JoinLoc, LockErrorKind LEK,
988 ThreadSafetyHandler &Handler) const override {
989 if (!asserted() && !negative() && !isUniversal()) {
990 Handler.handleMutexHeldEndOfScope(Kind: getKind(), LockName: toString(), LocLocked: loc(), LocEndOfScope: JoinLoc,
991 LEK);
992 }
993 }
994
995 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
996 ThreadSafetyHandler &Handler) const override {
997 if (const FactEntry *RFact = tryReenter(FactMan, ReenterKind: entry.kind())) {
998 // This capability has been reentrantly acquired.
999 FSet.replaceLock(FM&: FactMan, CapE: entry, Entry: RFact);
1000 } else {
1001 Handler.handleDoubleLock(Kind: entry.getKind(), LockName: entry.toString(), LocLocked: loc(),
1002 LocDoubleLock: entry.loc());
1003 }
1004 }
1005
1006 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1007 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1008 bool FullyRemove,
1009 ThreadSafetyHandler &Handler) const override {
1010 FSet.removeLock(FM&: FactMan, CapE: Cp);
1011
1012 if (const FactEntry *RFact = leaveReentrant(FactMan)) {
1013 // This capability remains reentrantly acquired.
1014 FSet.addLock(FM&: FactMan, Entry: RFact);
1015 } else if (!Cp.negative()) {
1016 FSet.addLock(FM&: FactMan, Entry: FactMan.createFact<LockableFactEntry>(
1017 Args: !Cp, Args: LK_Exclusive, Args&: UnlockLoc));
1018 }
1019 }
1020
1021 // Return an updated FactEntry if we can acquire this capability reentrant,
1022 // nullptr otherwise.
1023 const FactEntry *tryReenter(FactManager &FactMan,
1024 LockKind ReenterKind) const {
1025 if (!reentrant())
1026 return nullptr;
1027 if (kind() != ReenterKind)
1028 return nullptr;
1029 auto *NewFact = FactMan.createFact<LockableFactEntry>(Args: *this);
1030 NewFact->ReentrancyDepth++;
1031 return NewFact;
1032 }
1033
1034 // Return an updated FactEntry if we are releasing a capability previously
1035 // acquired reentrant, nullptr otherwise.
1036 const FactEntry *leaveReentrant(FactManager &FactMan) const {
1037 if (!ReentrancyDepth)
1038 return nullptr;
1039 assert(reentrant());
1040 auto *NewFact = FactMan.createFact<LockableFactEntry>(Args: *this);
1041 NewFact->ReentrancyDepth--;
1042 return NewFact;
1043 }
1044
1045 static bool classof(const FactEntry *A) {
1046 return A->getFactEntryKind() == Lockable;
1047 }
1048};
1049
1050enum UnderlyingCapabilityKind {
1051 UCK_Acquired, ///< Any kind of acquired capability.
1052 UCK_ReleasedShared, ///< Shared capability that was released.
1053 UCK_ReleasedExclusive, ///< Exclusive capability that was released.
1054};
1055
1056struct UnderlyingCapability {
1057 CapabilityExpr Cap;
1058 UnderlyingCapabilityKind Kind;
1059};
1060
1061class ScopedLockableFactEntry final
1062 : public FactEntry,
1063 private llvm::TrailingObjects<ScopedLockableFactEntry,
1064 UnderlyingCapability> {
1065 friend TrailingObjects;
1066
1067private:
1068 const unsigned ManagedCapacity;
1069 unsigned ManagedSize = 0;
1070
1071 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
1072 SourceKind Src, unsigned ManagedCapacity)
1073 : FactEntry(ScopedLockable, CE, LK_Exclusive, Loc, Src),
1074 ManagedCapacity(ManagedCapacity) {}
1075
1076 void addManaged(const CapabilityExpr &M, UnderlyingCapabilityKind UCK) {
1077 assert(ManagedSize < ManagedCapacity);
1078 new (getTrailingObjects() + ManagedSize) UnderlyingCapability{.Cap: M, .Kind: UCK};
1079 ++ManagedSize;
1080 }
1081
1082 ArrayRef<UnderlyingCapability> getManaged() const {
1083 return getTrailingObjects(N: ManagedSize);
1084 }
1085
1086public:
1087 static ScopedLockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
1088 const CapabilityExpr &CE,
1089 SourceLocation Loc, SourceKind Src,
1090 unsigned ManagedCapacity) {
1091 void *Storage =
1092 Alloc.Allocate(Size: totalSizeToAlloc<UnderlyingCapability>(Counts: ManagedCapacity),
1093 Alignment: alignof(ScopedLockableFactEntry));
1094 return new (Storage) ScopedLockableFactEntry(CE, Loc, Src, ManagedCapacity);
1095 }
1096
1097 CapExprSet getUnderlyingMutexes() const {
1098 CapExprSet UnderlyingMutexesSet;
1099 for (const UnderlyingCapability &UnderlyingMutex : getManaged())
1100 UnderlyingMutexesSet.push_back(Elt: UnderlyingMutex.Cap);
1101 return UnderlyingMutexesSet;
1102 }
1103
1104 /// \name Adding managed locks
1105 /// Capacity for managed locks must have been allocated via \ref create.
1106 /// There is no reallocation in case the capacity is exceeded!
1107 /// \{
1108 void addLock(const CapabilityExpr &M) { addManaged(M, UCK: UCK_Acquired); }
1109
1110 void addExclusiveUnlock(const CapabilityExpr &M) {
1111 addManaged(M, UCK: UCK_ReleasedExclusive);
1112 }
1113
1114 void addSharedUnlock(const CapabilityExpr &M) {
1115 addManaged(M, UCK: UCK_ReleasedShared);
1116 }
1117 /// \}
1118
1119 void
1120 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
1121 SourceLocation JoinLoc, LockErrorKind LEK,
1122 ThreadSafetyHandler &Handler) const override {
1123 if (LEK == LEK_LockedAtEndOfFunction || LEK == LEK_NotLockedAtEndOfFunction)
1124 return;
1125
1126 for (const auto &UnderlyingMutex : getManaged()) {
1127 const auto *Entry = FSet.findLock(FM&: FactMan, CapE: UnderlyingMutex.Cap);
1128 if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
1129 (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
1130 // If this scoped lock manages another mutex, and if the underlying
1131 // mutex is still/not held, then warn about the underlying mutex.
1132 Handler.handleMutexHeldEndOfScope(Kind: UnderlyingMutex.Cap.getKind(),
1133 LockName: UnderlyingMutex.Cap.toString(), LocLocked: loc(),
1134 LocEndOfScope: JoinLoc, LEK);
1135 }
1136 }
1137 }
1138
1139 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
1140 ThreadSafetyHandler &Handler) const override {
1141 for (const auto &UnderlyingMutex : getManaged()) {
1142 if (UnderlyingMutex.Kind == UCK_Acquired)
1143 lock(FSet, FactMan, Cp: UnderlyingMutex.Cap, kind: entry.kind(), loc: entry.loc(),
1144 Handler: &Handler);
1145 else
1146 unlock(FSet, FactMan, Cp: UnderlyingMutex.Cap, loc: entry.loc(), Handler: &Handler);
1147 }
1148 }
1149
1150 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1151 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1152 bool FullyRemove,
1153 ThreadSafetyHandler &Handler) const override {
1154 assert(!Cp.negative() && "Managing object cannot be negative.");
1155 for (const auto &UnderlyingMutex : getManaged()) {
1156 // Remove/lock the underlying mutex if it exists/is still unlocked; warn
1157 // on double unlocking/locking if we're not destroying the scoped object.
1158 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
1159 if (UnderlyingMutex.Kind == UCK_Acquired) {
1160 unlock(FSet, FactMan, Cp: UnderlyingMutex.Cap, loc: UnlockLoc, Handler: TSHandler);
1161 } else {
1162 LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
1163 ? LK_Shared
1164 : LK_Exclusive;
1165 lock(FSet, FactMan, Cp: UnderlyingMutex.Cap, kind, loc: UnlockLoc, Handler: TSHandler);
1166 }
1167 }
1168 if (FullyRemove)
1169 FSet.removeLock(FM&: FactMan, CapE: Cp);
1170 }
1171
1172 static bool classof(const FactEntry *A) {
1173 return A->getFactEntryKind() == ScopedLockable;
1174 }
1175
1176private:
1177 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1178 LockKind kind, SourceLocation loc,
1179 ThreadSafetyHandler *Handler) const {
1180 if (const auto It = FSet.findLockIter(FM&: FactMan, CapE: Cp); It != FSet.end()) {
1181 const auto &Fact = cast<LockableFactEntry>(Val: FactMan[*It]);
1182 if (const FactEntry *RFact = Fact.tryReenter(FactMan, ReenterKind: kind)) {
1183 // This capability has been reentrantly acquired.
1184 FSet.replaceLock(FM&: FactMan, It, Entry: RFact);
1185 } else if (Handler) {
1186 Handler->handleDoubleLock(Kind: Cp.getKind(), LockName: Cp.toString(), LocLocked: Fact.loc(), LocDoubleLock: loc);
1187 }
1188 } else {
1189 FSet.removeLock(FM&: FactMan, CapE: !Cp);
1190 FSet.addLock(FM&: FactMan, Entry: FactMan.createFact<LockableFactEntry>(Args: Cp, Args&: kind, Args&: loc,
1191 Args: Managed));
1192 }
1193 }
1194
1195 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1196 SourceLocation loc, ThreadSafetyHandler *Handler) const {
1197 if (const auto It = FSet.findLockIter(FM&: FactMan, CapE: Cp); It != FSet.end()) {
1198 const auto &Fact = cast<LockableFactEntry>(Val: FactMan[*It]);
1199 if (const FactEntry *RFact = Fact.leaveReentrant(FactMan)) {
1200 // This capability remains reentrantly acquired.
1201 FSet.replaceLock(FM&: FactMan, It, Entry: RFact);
1202 return;
1203 }
1204
1205 FSet.replaceLock(
1206 FM&: FactMan, It,
1207 Entry: FactMan.createFact<LockableFactEntry>(Args: !Cp, Args: LK_Exclusive, Args&: loc));
1208 } else if (Handler) {
1209 SourceLocation PrevLoc;
1210 if (const FactEntry *Neg = FSet.findLock(FM&: FactMan, CapE: !Cp))
1211 PrevLoc = Neg->loc();
1212 Handler->handleUnmatchedUnlock(Kind: Cp.getKind(), LockName: Cp.toString(), Loc: loc, LocPreviousUnlock: PrevLoc);
1213 }
1214 }
1215};
1216
1217/// Class which implements the core thread safety analysis routines.
1218class ThreadSafetyAnalyzer {
1219 friend class BuildLockset;
1220 friend class threadSafety::BeforeSet;
1221
1222 llvm::BumpPtrAllocator Bpa;
1223 threadSafety::til::MemRegionRef Arena;
1224 threadSafety::SExprBuilder SxBuilder;
1225
1226 ThreadSafetyHandler &Handler;
1227 const FunctionDecl *CurrentFunction;
1228 LocalVariableMap LocalVarMap;
1229 // Maps constructed objects to `this` placeholder prior to initialization.
1230 llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects;
1231 FactManager FactMan;
1232 std::vector<CFGBlockInfo> BlockInfo;
1233
1234 BeforeSet *GlobalBeforeSet;
1235
1236public:
1237 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet *Bset)
1238 : Arena(&Bpa), SxBuilder(Arena), Handler(H), FactMan(Bpa),
1239 GlobalBeforeSet(Bset) {}
1240
1241 bool inCurrentScope(const CapabilityExpr &CapE);
1242
1243 void addLock(FactSet &FSet, const FactEntry *Entry, bool ReqAttr = false);
1244 void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
1245 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
1246
1247 template <typename AttrType>
1248 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1249 const NamedDecl *D, til::SExpr *Self = nullptr);
1250
1251 template <class AttrType>
1252 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1253 const NamedDecl *D,
1254 const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1255 Expr *BrE, bool Neg);
1256
1257 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1258 bool &Negate);
1259
1260 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1261 const CFGBlock* PredBlock,
1262 const CFGBlock *CurrBlock);
1263
1264 bool join(const FactEntry &A, const FactEntry &B, SourceLocation JoinLoc,
1265 LockErrorKind EntryLEK);
1266
1267 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1268 SourceLocation JoinLoc, LockErrorKind EntryLEK,
1269 LockErrorKind ExitLEK);
1270
1271 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1272 SourceLocation JoinLoc, LockErrorKind LEK) {
1273 intersectAndWarn(EntrySet, ExitSet, JoinLoc, EntryLEK: LEK, ExitLEK: LEK);
1274 }
1275
1276 void runAnalysis(AnalysisDeclContext &AC);
1277
1278 void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D,
1279 const Expr *Exp, AccessKind AK, Expr *MutexExp,
1280 ProtectedOperationKind POK, til::SExpr *Self,
1281 SourceLocation Loc);
1282 void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp,
1283 Expr *MutexExp, til::SExpr *Self, SourceLocation Loc);
1284
1285 void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1286 ProtectedOperationKind POK);
1287 void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1288 ProtectedOperationKind POK);
1289};
1290
1291} // namespace
1292
1293/// Process acquired_before and acquired_after attributes on Vd.
1294BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1295 ThreadSafetyAnalyzer& Analyzer) {
1296 // Create a new entry for Vd.
1297 BeforeInfo *Info = nullptr;
1298 {
1299 // Keep InfoPtr in its own scope in case BMap is modified later and the
1300 // reference becomes invalid.
1301 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1302 if (!InfoPtr)
1303 InfoPtr.reset(p: new BeforeInfo());
1304 Info = InfoPtr.get();
1305 }
1306
1307 for (const auto *At : Vd->attrs()) {
1308 switch (At->getKind()) {
1309 case attr::AcquiredBefore: {
1310 const auto *A = cast<AcquiredBeforeAttr>(Val: At);
1311
1312 // Read exprs from the attribute, and add them to BeforeVect.
1313 for (const auto *Arg : A->args()) {
1314 CapabilityExpr Cp =
1315 Analyzer.SxBuilder.translateAttrExpr(AttrExp: Arg, Ctx: nullptr);
1316 if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1317 Info->Vect.push_back(Elt: Cpvd);
1318 const auto It = BMap.find(Val: Cpvd);
1319 if (It == BMap.end())
1320 insertAttrExprs(Vd: Cpvd, Analyzer);
1321 }
1322 }
1323 break;
1324 }
1325 case attr::AcquiredAfter: {
1326 const auto *A = cast<AcquiredAfterAttr>(Val: At);
1327
1328 // Read exprs from the attribute, and add them to BeforeVect.
1329 for (const auto *Arg : A->args()) {
1330 CapabilityExpr Cp =
1331 Analyzer.SxBuilder.translateAttrExpr(AttrExp: Arg, Ctx: nullptr);
1332 if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1333 // Get entry for mutex listed in attribute
1334 BeforeInfo *ArgInfo = getBeforeInfoForDecl(Vd: ArgVd, Analyzer);
1335 ArgInfo->Vect.push_back(Elt: Vd);
1336 }
1337 }
1338 break;
1339 }
1340 default:
1341 break;
1342 }
1343 }
1344
1345 return Info;
1346}
1347
1348BeforeSet::BeforeInfo *
1349BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd,
1350 ThreadSafetyAnalyzer &Analyzer) {
1351 auto It = BMap.find(Val: Vd);
1352 BeforeInfo *Info = nullptr;
1353 if (It == BMap.end())
1354 Info = insertAttrExprs(Vd, Analyzer);
1355 else
1356 Info = It->second.get();
1357 assert(Info && "BMap contained nullptr?");
1358 return Info;
1359}
1360
1361/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
1362void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
1363 const FactSet& FSet,
1364 ThreadSafetyAnalyzer& Analyzer,
1365 SourceLocation Loc, StringRef CapKind) {
1366 SmallVector<BeforeInfo*, 8> InfoVect;
1367
1368 // Do a depth-first traversal of Vd.
1369 // Return true if there are cycles.
1370 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1371 if (!Vd)
1372 return false;
1373
1374 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1375
1376 if (Info->Visited == 1)
1377 return true;
1378
1379 if (Info->Visited == 2)
1380 return false;
1381
1382 if (Info->Vect.empty())
1383 return false;
1384
1385 InfoVect.push_back(Elt: Info);
1386 Info->Visited = 1;
1387 for (const auto *Vdb : Info->Vect) {
1388 // Exclude mutexes in our immediate before set.
1389 if (FSet.containsMutexDecl(FM&: Analyzer.FactMan, Vd: Vdb)) {
1390 StringRef L1 = StartVd->getName();
1391 StringRef L2 = Vdb->getName();
1392 Analyzer.Handler.handleLockAcquiredBefore(Kind: CapKind, L1Name: L1, L2Name: L2, Loc);
1393 }
1394 // Transitively search other before sets, and warn on cycles.
1395 if (traverse(Vdb)) {
1396 if (CycMap.try_emplace(Key: Vd, Args: true).second) {
1397 StringRef L1 = Vd->getName();
1398 Analyzer.Handler.handleBeforeAfterCycle(L1Name: L1, Loc: Vd->getLocation());
1399 }
1400 }
1401 }
1402 Info->Visited = 2;
1403 return false;
1404 };
1405
1406 traverse(StartVd);
1407
1408 for (auto *Info : InfoVect)
1409 Info->Visited = 0;
1410}
1411
1412/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
1413static const ValueDecl *getValueDecl(const Expr *Exp) {
1414 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Val: Exp))
1415 return getValueDecl(Exp: CE->getSubExpr());
1416
1417 if (const auto *DR = dyn_cast<DeclRefExpr>(Val: Exp))
1418 return DR->getDecl();
1419
1420 if (const auto *ME = dyn_cast<MemberExpr>(Val: Exp))
1421 return ME->getMemberDecl();
1422
1423 return nullptr;
1424}
1425
1426bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1427 const threadSafety::til::SExpr *SExp = CapE.sexpr();
1428 assert(SExp && "Null expressions should be ignored");
1429
1430 if (const auto *LP = dyn_cast<til::LiteralPtr>(Val: SExp)) {
1431 const ValueDecl *VD = LP->clangDecl();
1432 // Variables defined in a function are always inaccessible.
1433 if (!VD || !VD->isDefinedOutsideFunctionOrMethod())
1434 return false;
1435 // For now we consider static class members to be inaccessible.
1436 if (isa<CXXRecordDecl>(Val: VD->getDeclContext()))
1437 return false;
1438 // Global variables are always in scope.
1439 return true;
1440 }
1441
1442 // Members are in scope from methods of the same class.
1443 if (const auto *P = dyn_cast<til::Project>(Val: SExp)) {
1444 if (!isa_and_nonnull<CXXMethodDecl>(Val: CurrentFunction))
1445 return false;
1446 const ValueDecl *VD = P->clangDecl();
1447 return VD->getDeclContext() == CurrentFunction->getDeclContext();
1448 }
1449
1450 return false;
1451}
1452
1453/// Add a new lock to the lockset, warning if the lock is already there.
1454/// \param ReqAttr -- true if this is part of an initial Requires attribute.
1455void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry *Entry,
1456 bool ReqAttr) {
1457 if (Entry->shouldIgnore())
1458 return;
1459
1460 if (!ReqAttr && !Entry->negative()) {
1461 // look for the negative capability, and remove it from the fact set.
1462 CapabilityExpr NegC = !*Entry;
1463 const FactEntry *Nen = FSet.findLock(FM&: FactMan, CapE: NegC);
1464 if (Nen) {
1465 FSet.removeLock(FM&: FactMan, CapE: NegC);
1466 }
1467 else {
1468 if (inCurrentScope(CapE: *Entry) && !Entry->asserted() && !Entry->reentrant())
1469 Handler.handleNegativeNotHeld(Kind: Entry->getKind(), LockName: Entry->toString(),
1470 Neg: NegC.toString(), Loc: Entry->loc());
1471 }
1472 }
1473
1474 // Check before/after constraints
1475 if (!Entry->asserted() && !Entry->declared()) {
1476 GlobalBeforeSet->checkBeforeAfter(StartVd: Entry->valueDecl(), FSet, Analyzer&: *this,
1477 Loc: Entry->loc(), CapKind: Entry->getKind());
1478 }
1479
1480 if (const FactEntry *Cp = FSet.findLock(FM&: FactMan, CapE: *Entry)) {
1481 if (!Entry->asserted())
1482 Cp->handleLock(FSet, FactMan, entry: *Entry, Handler);
1483 } else {
1484 FSet.addLock(FM&: FactMan, Entry);
1485 }
1486}
1487
1488/// Remove a lock from the lockset, warning if the lock is not there.
1489/// \param UnlockLoc The source location of the unlock (only used in error msg)
1490void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1491 SourceLocation UnlockLoc,
1492 bool FullyRemove, LockKind ReceivedKind) {
1493 if (Cp.shouldIgnore())
1494 return;
1495
1496 const FactEntry *LDat = FSet.findLock(FM&: FactMan, CapE: Cp);
1497 if (!LDat) {
1498 SourceLocation PrevLoc;
1499 if (const FactEntry *Neg = FSet.findLock(FM&: FactMan, CapE: !Cp))
1500 PrevLoc = Neg->loc();
1501 Handler.handleUnmatchedUnlock(Kind: Cp.getKind(), LockName: Cp.toString(), Loc: UnlockLoc,
1502 LocPreviousUnlock: PrevLoc);
1503 return;
1504 }
1505
1506 // Generic lock removal doesn't care about lock kind mismatches, but
1507 // otherwise diagnose when the lock kinds are mismatched.
1508 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
1509 Handler.handleIncorrectUnlockKind(Kind: Cp.getKind(), LockName: Cp.toString(), Expected: LDat->kind(),
1510 Received: ReceivedKind, LocLocked: LDat->loc(), LocUnlock: UnlockLoc);
1511 }
1512
1513 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
1514}
1515
1516/// Extract the list of mutexIDs from the attribute on an expression,
1517/// and push them onto Mtxs, discarding any duplicates.
1518template <typename AttrType>
1519void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1520 const Expr *Exp, const NamedDecl *D,
1521 til::SExpr *Self) {
1522 if (Attr->args_size() == 0) {
1523 // The mutex held is the "this" object.
1524 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: nullptr, D, DeclExp: Exp, Self);
1525 if (Cp.isInvalid()) {
1526 warnInvalidLock(Handler, MutexExp: nullptr, D, DeclExp: Exp, Kind: Cp.getKind());
1527 return;
1528 }
1529 //else
1530 if (!Cp.shouldIgnore())
1531 Mtxs.push_back_nodup(CapE: Cp);
1532 return;
1533 }
1534
1535 for (const auto *Arg : Attr->args()) {
1536 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self);
1537 if (Cp.isInvalid()) {
1538 warnInvalidLock(Handler, MutexExp: nullptr, D, DeclExp: Exp, Kind: Cp.getKind());
1539 continue;
1540 }
1541 //else
1542 if (!Cp.shouldIgnore())
1543 Mtxs.push_back_nodup(CapE: Cp);
1544 }
1545}
1546
1547/// Extract the list of mutexIDs from a trylock attribute. If the
1548/// trylock applies to the given edge, then push them onto Mtxs, discarding
1549/// any duplicates.
1550template <class AttrType>
1551void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1552 const Expr *Exp, const NamedDecl *D,
1553 const CFGBlock *PredBlock,
1554 const CFGBlock *CurrBlock,
1555 Expr *BrE, bool Neg) {
1556 // Find out which branch has the lock
1557 bool branch = false;
1558 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(Val: BrE))
1559 branch = BLE->getValue();
1560 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(Val: BrE))
1561 branch = ILE->getValue().getBoolValue();
1562
1563 int branchnum = branch ? 0 : 1;
1564 if (Neg)
1565 branchnum = !branchnum;
1566
1567 // If we've taken the trylock branch, then add the lock
1568 int i = 0;
1569 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1570 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1571 if (*SI == CurrBlock && i == branchnum)
1572 getMutexIDs(Mtxs, Attr, Exp, D);
1573 }
1574}
1575
1576static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1577 if (isa<CXXNullPtrLiteralExpr>(Val: E) || isa<GNUNullExpr>(Val: E)) {
1578 TCond = false;
1579 return true;
1580 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(Val: E)) {
1581 TCond = BLE->getValue();
1582 return true;
1583 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(Val: E)) {
1584 TCond = ILE->getValue().getBoolValue();
1585 return true;
1586 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(Val: E))
1587 return getStaticBooleanValue(E: CE->getSubExpr(), TCond);
1588 return false;
1589}
1590
1591// If Cond can be traced back to a function call, return the call expression.
1592// The negate variable should be called with false, and will be set to true
1593// if the function call is negated, e.g. if (!mu.tryLock(...))
1594const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1595 LocalVarContext C,
1596 bool &Negate) {
1597 if (!Cond)
1598 return nullptr;
1599
1600 if (const auto *CallExp = dyn_cast<CallExpr>(Val: Cond)) {
1601 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
1602 return getTrylockCallExpr(Cond: CallExp->getArg(Arg: 0), C, Negate);
1603 return CallExp;
1604 }
1605 else if (const auto *PE = dyn_cast<ParenExpr>(Val: Cond))
1606 return getTrylockCallExpr(Cond: PE->getSubExpr(), C, Negate);
1607 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Val: Cond))
1608 return getTrylockCallExpr(Cond: CE->getSubExpr(), C, Negate);
1609 else if (const auto *FE = dyn_cast<FullExpr>(Val: Cond))
1610 return getTrylockCallExpr(Cond: FE->getSubExpr(), C, Negate);
1611 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Cond)) {
1612 const Expr *E = LocalVarMap.lookupExpr(D: DRE->getDecl(), Ctx&: C);
1613 return getTrylockCallExpr(Cond: E, C, Negate);
1614 }
1615 else if (const auto *UOP = dyn_cast<UnaryOperator>(Val: Cond)) {
1616 if (UOP->getOpcode() == UO_LNot) {
1617 Negate = !Negate;
1618 return getTrylockCallExpr(Cond: UOP->getSubExpr(), C, Negate);
1619 }
1620 return nullptr;
1621 }
1622 else if (const auto *BOP = dyn_cast<BinaryOperator>(Val: Cond)) {
1623 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1624 if (BOP->getOpcode() == BO_NE)
1625 Negate = !Negate;
1626
1627 bool TCond = false;
1628 if (getStaticBooleanValue(E: BOP->getRHS(), TCond)) {
1629 if (!TCond) Negate = !Negate;
1630 return getTrylockCallExpr(Cond: BOP->getLHS(), C, Negate);
1631 }
1632 TCond = false;
1633 if (getStaticBooleanValue(E: BOP->getLHS(), TCond)) {
1634 if (!TCond) Negate = !Negate;
1635 return getTrylockCallExpr(Cond: BOP->getRHS(), C, Negate);
1636 }
1637 return nullptr;
1638 }
1639 if (BOP->getOpcode() == BO_LAnd) {
1640 // LHS must have been evaluated in a different block.
1641 return getTrylockCallExpr(Cond: BOP->getRHS(), C, Negate);
1642 }
1643 if (BOP->getOpcode() == BO_LOr)
1644 return getTrylockCallExpr(Cond: BOP->getRHS(), C, Negate);
1645 return nullptr;
1646 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Val: Cond)) {
1647 bool TCond, FCond;
1648 if (getStaticBooleanValue(E: COP->getTrueExpr(), TCond) &&
1649 getStaticBooleanValue(E: COP->getFalseExpr(), TCond&: FCond)) {
1650 if (TCond && !FCond)
1651 return getTrylockCallExpr(Cond: COP->getCond(), C, Negate);
1652 if (!TCond && FCond) {
1653 Negate = !Negate;
1654 return getTrylockCallExpr(Cond: COP->getCond(), C, Negate);
1655 }
1656 }
1657 }
1658 return nullptr;
1659}
1660
1661/// Find the lockset that holds on the edge between PredBlock
1662/// and CurrBlock. The edge set is the exit set of PredBlock (passed
1663/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1664void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1665 const FactSet &ExitSet,
1666 const CFGBlock *PredBlock,
1667 const CFGBlock *CurrBlock) {
1668 Result = ExitSet;
1669
1670 const Stmt *Cond = PredBlock->getTerminatorCondition();
1671 // We don't acquire try-locks on ?: branches, only when its result is used.
1672 if (!Cond || isa<ConditionalOperator>(Val: PredBlock->getTerminatorStmt()))
1673 return;
1674
1675 bool Negate = false;
1676 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1677 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1678
1679 if (Handler.issueBetaWarnings()) {
1680 // Temporarily set the lookup context for SExprBuilder.
1681 SxBuilder.setLookupLocalVarExpr(
1682 [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
1683 return LocalVarMap.lookupExpr(D, Ctx);
1684 });
1685 }
1686 llvm::scope_exit Cleanup(
1687 [this] { SxBuilder.setLookupLocalVarExpr(nullptr); });
1688
1689 const auto *Exp = getTrylockCallExpr(Cond, C: LVarCtx, Negate);
1690 if (!Exp)
1691 return;
1692
1693 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Val: Exp->getCalleeDecl());
1694 if (!FunDecl || !FunDecl->hasAttr<TryAcquireCapabilityAttr>())
1695 return;
1696
1697 CapExprSet ExclusiveLocksToAdd;
1698 CapExprSet SharedLocksToAdd;
1699
1700 // If the condition is a call to a Trylock function, then grab the attributes
1701 for (const auto *Attr : FunDecl->specific_attrs<TryAcquireCapabilityAttr>())
1702 getMutexIDs(Mtxs&: Attr->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr,
1703 Exp, D: FunDecl, PredBlock, CurrBlock, BrE: Attr->getSuccessValue(),
1704 Neg: Negate);
1705
1706 // Add and remove locks.
1707 SourceLocation Loc = Exp->getExprLoc();
1708 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1709 addLock(FSet&: Result, Entry: FactMan.createFact<LockableFactEntry>(Args: ExclusiveLockToAdd,
1710 Args: LK_Exclusive, Args&: Loc));
1711 for (const auto &SharedLockToAdd : SharedLocksToAdd)
1712 addLock(FSet&: Result, Entry: FactMan.createFact<LockableFactEntry>(Args: SharedLockToAdd,
1713 Args: LK_Shared, Args&: Loc));
1714}
1715
1716namespace {
1717
1718/// We use this class to visit different types of expressions in
1719/// CFGBlocks, and build up the lockset.
1720/// An expression may cause us to add or remove locks from the lockset, or else
1721/// output error messages related to missing locks.
1722/// FIXME: In future, we may be able to not inherit from a visitor.
1723class BuildLockset : public ConstStmtVisitor<BuildLockset> {
1724 friend class ThreadSafetyAnalyzer;
1725
1726 ThreadSafetyAnalyzer *Analyzer;
1727 FactSet FSet;
1728 // The fact set for the function on exit.
1729 const FactSet &FunctionExitFSet;
1730 LocalVariableMap::Context LVarCtx;
1731 unsigned CtxIndex;
1732
1733 // To update the context used in attr-expr translation. If `S` is non-null,
1734 // the context is updated to the program point right after 'S'.
1735 void updateLocalVarMapCtx(const Stmt *S) {
1736 if (S)
1737 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, C: LVarCtx);
1738 if (!Analyzer->Handler.issueBetaWarnings())
1739 return;
1740 // The lookup closure needs to be reconstructed with the refreshed LVarCtx.
1741 Analyzer->SxBuilder.setLookupLocalVarExpr(
1742 [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
1743 return Analyzer->LocalVarMap.lookupExpr(D, Ctx);
1744 });
1745 }
1746
1747 // helper functions
1748
1749 void checkAccess(const Expr *Exp, AccessKind AK,
1750 ProtectedOperationKind POK = POK_VarAccess) {
1751 Analyzer->checkAccess(FSet, Exp, AK, POK);
1752 }
1753 void checkPtAccess(const Expr *Exp, AccessKind AK,
1754 ProtectedOperationKind POK = POK_VarAccess) {
1755 Analyzer->checkPtAccess(FSet, Exp, AK, POK);
1756 }
1757
1758 void handleCall(const Expr *Exp, const NamedDecl *D,
1759 til::SExpr *Self = nullptr,
1760 SourceLocation Loc = SourceLocation());
1761 void examineArguments(const FunctionDecl *FD,
1762 CallExpr::const_arg_iterator ArgBegin,
1763 CallExpr::const_arg_iterator ArgEnd,
1764 bool SkipFirstParam = false);
1765
1766public:
1767 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info,
1768 const FactSet &FunctionExitFSet)
1769 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1770 FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
1771 CtxIndex(Info.EntryIndex) {
1772 updateLocalVarMapCtx(S: nullptr);
1773 }
1774
1775 ~BuildLockset() { Analyzer->SxBuilder.setLookupLocalVarExpr(nullptr); }
1776 BuildLockset(const BuildLockset &) = delete;
1777 BuildLockset &operator=(const BuildLockset &) = delete;
1778
1779 void VisitUnaryOperator(const UnaryOperator *UO);
1780 void VisitBinaryOperator(const BinaryOperator *BO);
1781 void VisitCastExpr(const CastExpr *CE);
1782 void VisitCallExpr(const CallExpr *Exp);
1783 void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
1784 void VisitDeclStmt(const DeclStmt *S);
1785 void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp);
1786 void VisitReturnStmt(const ReturnStmt *S);
1787};
1788
1789} // namespace
1790
1791/// Warn if the LSet does not contain a lock sufficient to protect access
1792/// of at least the passed in AccessKind.
1793void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
1794 const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK,
1795 Expr *MutexExp, ProtectedOperationKind POK, til::SExpr *Self,
1796 SourceLocation Loc) {
1797 LockKind LK = getLockKindFromAccessKind(AK);
1798 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: MutexExp, D, DeclExp: Exp, Self);
1799 if (Cp.isInvalid()) {
1800 warnInvalidLock(Handler, MutexExp, D, DeclExp: Exp, Kind: Cp.getKind());
1801 return;
1802 } else if (Cp.shouldIgnore()) {
1803 return;
1804 }
1805
1806 if (Cp.negative()) {
1807 // Negative capabilities act like locks excluded
1808 const FactEntry *LDat = FSet.findLock(FM&: FactMan, CapE: !Cp);
1809 if (LDat) {
1810 Handler.handleFunExcludesLock(Kind: Cp.getKind(), FunName: D->getNameAsString(),
1811 LockName: (!Cp).toString(), Loc);
1812 return;
1813 }
1814
1815 // If this does not refer to a negative capability in the same class,
1816 // then stop here.
1817 if (!inCurrentScope(CapE: Cp))
1818 return;
1819
1820 // Otherwise the negative requirement must be propagated to the caller.
1821 LDat = FSet.findLock(FM&: FactMan, CapE: Cp);
1822 if (!LDat) {
1823 Handler.handleNegativeNotHeld(D, LockName: Cp.toString(), Loc);
1824 }
1825 return;
1826 }
1827
1828 const FactEntry *LDat = FSet.findLockUniv(FM&: FactMan, CapE: Cp);
1829 bool NoError = true;
1830 if (!LDat) {
1831 // No exact match found. Look for a partial match.
1832 LDat = FSet.findPartialMatch(FM&: FactMan, CapE: Cp);
1833 if (LDat) {
1834 // Warn that there's no precise match.
1835 std::string PartMatchStr = LDat->toString();
1836 StringRef PartMatchName(PartMatchStr);
1837 Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK, LockName: Cp.toString(), LK, Loc,
1838 PossibleMatch: &PartMatchName);
1839 } else {
1840 // Warn that there's no match at all.
1841 Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK, LockName: Cp.toString(), LK, Loc);
1842 }
1843 NoError = false;
1844 }
1845 // Make sure the mutex we found is the right kind.
1846 if (NoError && LDat && !LDat->isAtLeast(LK)) {
1847 Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK, LockName: Cp.toString(), LK, Loc);
1848 }
1849}
1850
1851/// Warn if the LSet contains the given lock.
1852void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet,
1853 const NamedDecl *D, const Expr *Exp,
1854 Expr *MutexExp, til::SExpr *Self,
1855 SourceLocation Loc) {
1856 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: MutexExp, D, DeclExp: Exp, Self);
1857 if (Cp.isInvalid()) {
1858 warnInvalidLock(Handler, MutexExp, D, DeclExp: Exp, Kind: Cp.getKind());
1859 return;
1860 } else if (Cp.shouldIgnore()) {
1861 return;
1862 }
1863
1864 const FactEntry *LDat = FSet.findLock(FM&: FactMan, CapE: Cp);
1865 if (LDat) {
1866 Handler.handleFunExcludesLock(Kind: Cp.getKind(), FunName: D->getNameAsString(),
1867 LockName: Cp.toString(), Loc);
1868 }
1869}
1870
1871/// Checks guarded_by and pt_guarded_by attributes.
1872/// Whenever we identify an access (read or write) to a DeclRefExpr that is
1873/// marked with guarded_by, we must ensure the appropriate mutexes are held.
1874/// Similarly, we check if the access is to an expression that dereferences
1875/// a pointer marked with pt_guarded_by.
1876void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp,
1877 AccessKind AK,
1878 ProtectedOperationKind POK) {
1879 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1880
1881 SourceLocation Loc = Exp->getExprLoc();
1882
1883 // Local variables of reference type cannot be re-assigned;
1884 // map them to their initializer.
1885 while (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Exp)) {
1886 const auto *VD = dyn_cast<VarDecl>(Val: DRE->getDecl()->getCanonicalDecl());
1887 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
1888 if (const auto *E = VD->getInit()) {
1889 // Guard against self-initialization. e.g., int &i = i;
1890 if (E == Exp)
1891 break;
1892 Exp = E->IgnoreImplicit()->IgnoreParenCasts();
1893 continue;
1894 }
1895 }
1896 break;
1897 }
1898
1899 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Exp)) {
1900 // For dereferences
1901 if (UO->getOpcode() == UO_Deref)
1902 checkPtAccess(FSet, Exp: UO->getSubExpr(), AK, POK);
1903 return;
1904 }
1905
1906 if (const auto *BO = dyn_cast<BinaryOperator>(Val: Exp)) {
1907 switch (BO->getOpcode()) {
1908 case BO_PtrMemD: // .*
1909 return checkAccess(FSet, Exp: BO->getLHS(), AK, POK);
1910 case BO_PtrMemI: // ->*
1911 return checkPtAccess(FSet, Exp: BO->getLHS(), AK, POK);
1912 default:
1913 return;
1914 }
1915 }
1916
1917 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Val: Exp)) {
1918 checkPtAccess(FSet, Exp: AE->getLHS(), AK, POK);
1919 return;
1920 }
1921
1922 if (const auto *ME = dyn_cast<MemberExpr>(Val: Exp)) {
1923 if (ME->isArrow())
1924 checkPtAccess(FSet, Exp: ME->getBase(), AK, POK);
1925 else
1926 checkAccess(FSet, Exp: ME->getBase(), AK, POK);
1927 }
1928
1929 const ValueDecl *D = getValueDecl(Exp);
1930 if (!D || !D->hasAttrs())
1931 return;
1932
1933 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) {
1934 Handler.handleNoMutexHeld(D, POK, AK, Loc);
1935 }
1936
1937 for (const auto *I : D->specific_attrs<GuardedByAttr>())
1938 warnIfMutexNotHeld(FSet, D, Exp, AK, MutexExp: I->getArg(), POK, Self: nullptr, Loc);
1939}
1940
1941/// Checks pt_guarded_by and pt_guarded_var attributes.
1942/// POK is the same operationKind that was passed to checkAccess.
1943void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp,
1944 AccessKind AK,
1945 ProtectedOperationKind POK) {
1946 // Strip off paren- and cast-expressions, checking if we encounter any other
1947 // operator that should be delegated to checkAccess() instead.
1948 while (true) {
1949 if (const auto *PE = dyn_cast<ParenExpr>(Val: Exp)) {
1950 Exp = PE->getSubExpr();
1951 continue;
1952 }
1953 if (const auto *CE = dyn_cast<CastExpr>(Val: Exp)) {
1954 if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1955 // If it's an actual array, and not a pointer, then it's elements
1956 // are protected by GUARDED_BY, not PT_GUARDED_BY;
1957 checkAccess(FSet, Exp: CE->getSubExpr(), AK, POK);
1958 return;
1959 }
1960 Exp = CE->getSubExpr();
1961 continue;
1962 }
1963 break;
1964 }
1965
1966 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Exp)) {
1967 if (UO->getOpcode() == UO_AddrOf) {
1968 // Pointer access via pointer taken of variable, so the dereferenced
1969 // variable is not actually a pointer.
1970 checkAccess(FSet, Exp: UO->getSubExpr(), AK, POK);
1971 return;
1972 }
1973 }
1974
1975 // Pass by reference/pointer warnings are under a different flag.
1976 ProtectedOperationKind PtPOK = POK_VarDereference;
1977 switch (POK) {
1978 case POK_PassByRef:
1979 PtPOK = POK_PtPassByRef;
1980 break;
1981 case POK_ReturnByRef:
1982 PtPOK = POK_PtReturnByRef;
1983 break;
1984 case POK_PassPointer:
1985 PtPOK = POK_PtPassPointer;
1986 break;
1987 case POK_ReturnPointer:
1988 PtPOK = POK_PtReturnPointer;
1989 break;
1990 default:
1991 break;
1992 }
1993
1994 const ValueDecl *D = getValueDecl(Exp);
1995 if (!D || !D->hasAttrs())
1996 return;
1997
1998 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan))
1999 Handler.handleNoMutexHeld(D, POK: PtPOK, AK, Loc: Exp->getExprLoc());
2000
2001 for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
2002 warnIfMutexNotHeld(FSet, D, Exp, AK, MutexExp: I->getArg(), POK: PtPOK, Self: nullptr,
2003 Loc: Exp->getExprLoc());
2004}
2005
2006/// Process a function call, method call, constructor call,
2007/// or destructor call. This involves looking at the attributes on the
2008/// corresponding function/method/constructor/destructor, issuing warnings,
2009/// and updating the locksets accordingly.
2010///
2011/// FIXME: For classes annotated with one of the guarded annotations, we need
2012/// to treat const method calls as reads and non-const method calls as writes,
2013/// and check that the appropriate locks are held. Non-const method calls with
2014/// the same signature as const method calls can be also treated as reads.
2015///
2016/// \param Exp The call expression.
2017/// \param D The callee declaration.
2018/// \param Self If \p Exp = nullptr, the implicit this argument or the argument
2019/// of an implicitly called cleanup function.
2020/// \param Loc If \p Exp = nullptr, the location.
2021void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
2022 til::SExpr *Self, SourceLocation Loc) {
2023 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
2024 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
2025 CapExprSet ScopedReqsAndExcludes;
2026
2027 // Figure out if we're constructing an object of scoped lockable class
2028 CapabilityExpr Scp;
2029 if (Exp) {
2030 assert(!Self);
2031 const auto *TagT = Exp->getType()->getAs<TagType>();
2032 if (D->hasAttrs() && TagT && Exp->isPRValue()) {
2033 til::LiteralPtr *Placeholder =
2034 Analyzer->SxBuilder.createThisPlaceholder();
2035 [[maybe_unused]] auto inserted =
2036 Analyzer->ConstructedObjects.insert(KV: {Exp, Placeholder});
2037 assert(inserted.second && "Are we visiting the same expression again?");
2038 if (isa<CXXConstructExpr>(Val: Exp))
2039 Self = Placeholder;
2040 if (TagT->getDecl()->getMostRecentDecl()->hasAttr<ScopedLockableAttr>())
2041 Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
2042 }
2043
2044 assert(Loc.isInvalid());
2045 Loc = Exp->getExprLoc();
2046 }
2047
2048 for(const Attr *At : D->attrs()) {
2049 switch (At->getKind()) {
2050 // When we encounter a lock function, we need to add the lock to our
2051 // lockset.
2052 case attr::AcquireCapability: {
2053 const auto *A = cast<AcquireCapabilityAttr>(Val: At);
2054 Analyzer->getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd
2055 : ExclusiveLocksToAdd,
2056 Attr: A, Exp, D, Self);
2057 break;
2058 }
2059
2060 // An assert will add a lock to the lockset, but will not generate
2061 // a warning if it is already there, and will not generate a warning
2062 // if it is not removed.
2063 case attr::AssertCapability: {
2064 const auto *A = cast<AssertCapabilityAttr>(Val: At);
2065 CapExprSet AssertLocks;
2066 Analyzer->getMutexIDs(Mtxs&: AssertLocks, Attr: A, Exp, D, Self);
2067 for (const auto &AssertLock : AssertLocks)
2068 Analyzer->addLock(
2069 FSet, Entry: Analyzer->FactMan.createFact<LockableFactEntry>(
2070 Args: AssertLock, Args: A->isShared() ? LK_Shared : LK_Exclusive,
2071 Args&: Loc, Args: FactEntry::Asserted));
2072 break;
2073 }
2074
2075 // When we encounter an unlock function, we need to remove unlocked
2076 // mutexes from the lockset, and flag a warning if they are not there.
2077 case attr::ReleaseCapability: {
2078 const auto *A = cast<ReleaseCapabilityAttr>(Val: At);
2079 if (A->isGeneric())
2080 Analyzer->getMutexIDs(Mtxs&: GenericLocksToRemove, Attr: A, Exp, D, Self);
2081 else if (A->isShared())
2082 Analyzer->getMutexIDs(Mtxs&: SharedLocksToRemove, Attr: A, Exp, D, Self);
2083 else
2084 Analyzer->getMutexIDs(Mtxs&: ExclusiveLocksToRemove, Attr: A, Exp, D, Self);
2085 break;
2086 }
2087
2088 case attr::RequiresCapability: {
2089 const auto *A = cast<RequiresCapabilityAttr>(Val: At);
2090 for (auto *Arg : A->args()) {
2091 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2092 AK: A->isShared() ? AK_Read : AK_Written,
2093 MutexExp: Arg, POK: POK_FunctionCall, Self, Loc);
2094 // use for adopting a lock
2095 if (!Scp.shouldIgnore())
2096 Analyzer->getMutexIDs(Mtxs&: ScopedReqsAndExcludes, Attr: A, Exp, D, Self);
2097 }
2098 break;
2099 }
2100
2101 case attr::LocksExcluded: {
2102 const auto *A = cast<LocksExcludedAttr>(Val: At);
2103 for (auto *Arg : A->args()) {
2104 Analyzer->warnIfMutexHeld(FSet, D, Exp, MutexExp: Arg, Self, Loc);
2105 // use for deferring a lock
2106 if (!Scp.shouldIgnore())
2107 Analyzer->getMutexIDs(Mtxs&: ScopedReqsAndExcludes, Attr: A, Exp, D, Self);
2108 }
2109 break;
2110 }
2111
2112 // Ignore attributes unrelated to thread-safety
2113 default:
2114 break;
2115 }
2116 }
2117
2118 std::optional<CallExpr::const_arg_range> Args;
2119 if (Exp) {
2120 if (const auto *CE = dyn_cast<CallExpr>(Val: Exp))
2121 Args = CE->arguments();
2122 else if (const auto *CE = dyn_cast<CXXConstructExpr>(Val: Exp))
2123 Args = CE->arguments();
2124 else
2125 llvm_unreachable("Unknown call kind");
2126 }
2127 const auto *CalledFunction = dyn_cast<FunctionDecl>(Val: D);
2128 if (CalledFunction && Args.has_value()) {
2129 for (auto [Param, Arg] : zip(t: CalledFunction->parameters(), u&: *Args)) {
2130 CapExprSet DeclaredLocks;
2131 for (const Attr *At : Param->attrs()) {
2132 switch (At->getKind()) {
2133 case attr::AcquireCapability: {
2134 const auto *A = cast<AcquireCapabilityAttr>(Val: At);
2135 Analyzer->getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd
2136 : ExclusiveLocksToAdd,
2137 Attr: A, Exp, D, Self);
2138 Analyzer->getMutexIDs(Mtxs&: DeclaredLocks, Attr: A, Exp, D, Self);
2139 break;
2140 }
2141
2142 case attr::ReleaseCapability: {
2143 const auto *A = cast<ReleaseCapabilityAttr>(Val: At);
2144 if (A->isGeneric())
2145 Analyzer->getMutexIDs(Mtxs&: GenericLocksToRemove, Attr: A, Exp, D, Self);
2146 else if (A->isShared())
2147 Analyzer->getMutexIDs(Mtxs&: SharedLocksToRemove, Attr: A, Exp, D, Self);
2148 else
2149 Analyzer->getMutexIDs(Mtxs&: ExclusiveLocksToRemove, Attr: A, Exp, D, Self);
2150 Analyzer->getMutexIDs(Mtxs&: DeclaredLocks, Attr: A, Exp, D, Self);
2151 break;
2152 }
2153
2154 case attr::RequiresCapability: {
2155 const auto *A = cast<RequiresCapabilityAttr>(Val: At);
2156 for (auto *Arg : A->args())
2157 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2158 AK: A->isShared() ? AK_Read : AK_Written,
2159 MutexExp: Arg, POK: POK_FunctionCall, Self, Loc);
2160 Analyzer->getMutexIDs(Mtxs&: DeclaredLocks, Attr: A, Exp, D, Self);
2161 break;
2162 }
2163
2164 case attr::LocksExcluded: {
2165 const auto *A = cast<LocksExcludedAttr>(Val: At);
2166 for (auto *Arg : A->args())
2167 Analyzer->warnIfMutexHeld(FSet, D, Exp, MutexExp: Arg, Self, Loc);
2168 Analyzer->getMutexIDs(Mtxs&: DeclaredLocks, Attr: A, Exp, D, Self);
2169 break;
2170 }
2171
2172 default:
2173 break;
2174 }
2175 }
2176 if (DeclaredLocks.empty())
2177 continue;
2178 CapabilityExpr Cp(Analyzer->SxBuilder.translate(S: Arg, Ctx: nullptr),
2179 StringRef("mutex"), /*Neg=*/false, /*Reentrant=*/false);
2180 if (const auto *CBTE = dyn_cast<CXXBindTemporaryExpr>(Val: Arg->IgnoreCasts());
2181 Cp.isInvalid() && CBTE) {
2182 if (auto Object = Analyzer->ConstructedObjects.find(Val: CBTE->getSubExpr());
2183 Object != Analyzer->ConstructedObjects.end())
2184 Cp = CapabilityExpr(Object->second, StringRef("mutex"), /*Neg=*/false,
2185 /*Reentrant=*/false);
2186 }
2187 const FactEntry *Fact = FSet.findLock(FM&: Analyzer->FactMan, CapE: Cp);
2188 if (!Fact) {
2189 Analyzer->Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK: POK_FunctionCall,
2190 LockName: Cp.toString(), LK: LK_Exclusive,
2191 Loc: Exp->getExprLoc());
2192 continue;
2193 }
2194 const auto *Scope = cast<ScopedLockableFactEntry>(Val: Fact);
2195 for (const auto &[a, b] :
2196 zip_longest(t&: DeclaredLocks, u: Scope->getUnderlyingMutexes())) {
2197 if (!a.has_value()) {
2198 Analyzer->Handler.handleExpectFewerUnderlyingMutexes(
2199 Loc: Exp->getExprLoc(), DLoc: D->getLocation(), ScopeName: Scope->toString(),
2200 Kind: b.value().getKind(), Actual: b.value().toString());
2201 } else if (!b.has_value()) {
2202 Analyzer->Handler.handleExpectMoreUnderlyingMutexes(
2203 Loc: Exp->getExprLoc(), DLoc: D->getLocation(), ScopeName: Scope->toString(),
2204 Kind: a.value().getKind(), Expected: a.value().toString());
2205 } else if (!a.value().equals(other: b.value())) {
2206 Analyzer->Handler.handleUnmatchedUnderlyingMutexes(
2207 Loc: Exp->getExprLoc(), DLoc: D->getLocation(), ScopeName: Scope->toString(),
2208 Kind: a.value().getKind(), Expected: a.value().toString(), Actual: b.value().toString());
2209 break;
2210 }
2211 }
2212 }
2213 }
2214 // Remove locks first to allow lock upgrading/downgrading.
2215 // FIXME -- should only fully remove if the attribute refers to 'this'.
2216 bool Dtor = isa<CXXDestructorDecl>(Val: D);
2217 for (const auto &M : ExclusiveLocksToRemove)
2218 Analyzer->removeLock(FSet, Cp: M, UnlockLoc: Loc, FullyRemove: Dtor, ReceivedKind: LK_Exclusive);
2219 for (const auto &M : SharedLocksToRemove)
2220 Analyzer->removeLock(FSet, Cp: M, UnlockLoc: Loc, FullyRemove: Dtor, ReceivedKind: LK_Shared);
2221 for (const auto &M : GenericLocksToRemove)
2222 Analyzer->removeLock(FSet, Cp: M, UnlockLoc: Loc, FullyRemove: Dtor, ReceivedKind: LK_Generic);
2223
2224 // Add locks.
2225 FactEntry::SourceKind Source =
2226 !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
2227 for (const auto &M : ExclusiveLocksToAdd)
2228 Analyzer->addLock(FSet, Entry: Analyzer->FactMan.createFact<LockableFactEntry>(
2229 Args: M, Args: LK_Exclusive, Args&: Loc, Args&: Source));
2230 for (const auto &M : SharedLocksToAdd)
2231 Analyzer->addLock(FSet, Entry: Analyzer->FactMan.createFact<LockableFactEntry>(
2232 Args: M, Args: LK_Shared, Args&: Loc, Args&: Source));
2233
2234 if (!Scp.shouldIgnore()) {
2235 // Add the managing object as a dummy mutex, mapped to the underlying mutex.
2236 auto *ScopedEntry = Analyzer->FactMan.createFact<ScopedLockableFactEntry>(
2237 Args&: Scp, Args&: Loc, Args: FactEntry::Acquired,
2238 Args: ExclusiveLocksToAdd.size() + SharedLocksToAdd.size() +
2239 ScopedReqsAndExcludes.size() + ExclusiveLocksToRemove.size() +
2240 SharedLocksToRemove.size());
2241 for (const auto &M : ExclusiveLocksToAdd)
2242 ScopedEntry->addLock(M);
2243 for (const auto &M : SharedLocksToAdd)
2244 ScopedEntry->addLock(M);
2245 for (const auto &M : ScopedReqsAndExcludes)
2246 ScopedEntry->addLock(M);
2247 for (const auto &M : ExclusiveLocksToRemove)
2248 ScopedEntry->addExclusiveUnlock(M);
2249 for (const auto &M : SharedLocksToRemove)
2250 ScopedEntry->addSharedUnlock(M);
2251 Analyzer->addLock(FSet, Entry: ScopedEntry);
2252 }
2253}
2254
2255/// For unary operations which read and write a variable, we need to
2256/// check whether we hold any required mutexes. Reads are checked in
2257/// VisitCastExpr.
2258void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
2259 switch (UO->getOpcode()) {
2260 case UO_PostDec:
2261 case UO_PostInc:
2262 case UO_PreDec:
2263 case UO_PreInc:
2264 checkAccess(Exp: UO->getSubExpr(), AK: AK_Written);
2265 break;
2266 default:
2267 break;
2268 }
2269}
2270
2271/// For binary operations which assign to a variable (writes), we need to check
2272/// whether we hold any required mutexes.
2273/// FIXME: Deal with non-primitive types.
2274void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
2275 if (!BO->isAssignmentOp())
2276 return;
2277 checkAccess(Exp: BO->getLHS(), AK: AK_Written);
2278 updateLocalVarMapCtx(S: BO);
2279}
2280
2281/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
2282/// need to ensure we hold any required mutexes.
2283/// FIXME: Deal with non-primitive types.
2284void BuildLockset::VisitCastExpr(const CastExpr *CE) {
2285 if (CE->getCastKind() != CK_LValueToRValue)
2286 return;
2287 checkAccess(Exp: CE->getSubExpr(), AK: AK_Read);
2288}
2289
2290void BuildLockset::examineArguments(const FunctionDecl *FD,
2291 CallExpr::const_arg_iterator ArgBegin,
2292 CallExpr::const_arg_iterator ArgEnd,
2293 bool SkipFirstParam) {
2294 // Currently we can't do anything if we don't know the function declaration.
2295 if (!FD)
2296 return;
2297
2298 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
2299 // only turns off checking within the body of a function, but we also
2300 // use it to turn off checking in arguments to the function. This
2301 // could result in some false negatives, but the alternative is to
2302 // create yet another attribute.
2303 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
2304 return;
2305
2306 const ArrayRef<ParmVarDecl *> Params = FD->parameters();
2307 auto Param = Params.begin();
2308 if (SkipFirstParam)
2309 ++Param;
2310
2311 // There can be default arguments, so we stop when one iterator is at end().
2312 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
2313 ++Param, ++Arg) {
2314 QualType Qt = (*Param)->getType();
2315 if (Qt->isReferenceType())
2316 checkAccess(Exp: *Arg, AK: AK_Read, POK: POK_PassByRef);
2317 else if (Qt->isPointerType())
2318 checkPtAccess(Exp: *Arg, AK: AK_Read, POK: POK_PassPointer);
2319 }
2320}
2321
2322void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
2323 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Val: Exp)) {
2324 const auto *ME = dyn_cast<MemberExpr>(Val: CE->getCallee());
2325 // ME can be null when calling a method pointer
2326 const CXXMethodDecl *MD = CE->getMethodDecl();
2327
2328 if (ME && MD) {
2329 if (ME->isArrow()) {
2330 // Should perhaps be AK_Written if !MD->isConst().
2331 checkPtAccess(Exp: CE->getImplicitObjectArgument(), AK: AK_Read);
2332 } else {
2333 // Should perhaps be AK_Written if !MD->isConst().
2334 checkAccess(Exp: CE->getImplicitObjectArgument(), AK: AK_Read);
2335 }
2336 }
2337
2338 examineArguments(FD: CE->getDirectCallee(), ArgBegin: CE->arg_begin(), ArgEnd: CE->arg_end());
2339 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Val: Exp)) {
2340 OverloadedOperatorKind OEop = OE->getOperator();
2341 switch (OEop) {
2342 case OO_Equal:
2343 case OO_PlusEqual:
2344 case OO_MinusEqual:
2345 case OO_StarEqual:
2346 case OO_SlashEqual:
2347 case OO_PercentEqual:
2348 case OO_CaretEqual:
2349 case OO_AmpEqual:
2350 case OO_PipeEqual:
2351 case OO_LessLessEqual:
2352 case OO_GreaterGreaterEqual:
2353 checkAccess(Exp: OE->getArg(Arg: 1), AK: AK_Read);
2354 [[fallthrough]];
2355 case OO_PlusPlus:
2356 case OO_MinusMinus:
2357 checkAccess(Exp: OE->getArg(Arg: 0), AK: AK_Written);
2358 break;
2359 case OO_Star:
2360 case OO_ArrowStar:
2361 case OO_Arrow:
2362 case OO_Subscript:
2363 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
2364 // Grrr. operator* can be multiplication...
2365 checkPtAccess(Exp: OE->getArg(Arg: 0), AK: AK_Read);
2366 }
2367 [[fallthrough]];
2368 default: {
2369 // TODO: get rid of this, and rely on pass-by-ref instead.
2370 const Expr *Obj = OE->getArg(Arg: 0);
2371 checkAccess(Exp: Obj, AK: AK_Read);
2372 // Check the remaining arguments. For method operators, the first
2373 // argument is the implicit self argument, and doesn't appear in the
2374 // FunctionDecl, but for non-methods it does.
2375 const FunctionDecl *FD = OE->getDirectCallee();
2376 examineArguments(FD, ArgBegin: std::next(x: OE->arg_begin()), ArgEnd: OE->arg_end(),
2377 /*SkipFirstParam*/ !isa<CXXMethodDecl>(Val: FD));
2378 break;
2379 }
2380 }
2381 } else {
2382 examineArguments(FD: Exp->getDirectCallee(), ArgBegin: Exp->arg_begin(), ArgEnd: Exp->arg_end());
2383 }
2384
2385 auto *D = dyn_cast_or_null<NamedDecl>(Val: Exp->getCalleeDecl());
2386 if (D)
2387 handleCall(Exp, D);
2388 updateLocalVarMapCtx(S: Exp);
2389}
2390
2391void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
2392 const CXXConstructorDecl *D = Exp->getConstructor();
2393 if (D && D->isCopyConstructor()) {
2394 const Expr* Source = Exp->getArg(Arg: 0);
2395 checkAccess(Exp: Source, AK: AK_Read);
2396 } else {
2397 examineArguments(FD: D, ArgBegin: Exp->arg_begin(), ArgEnd: Exp->arg_end());
2398 }
2399 if (D && D->hasAttrs())
2400 handleCall(Exp, D);
2401}
2402
2403static const Expr *UnpackConstruction(const Expr *E) {
2404 if (auto *CE = dyn_cast<CastExpr>(Val: E))
2405 if (CE->getCastKind() == CK_NoOp)
2406 E = CE->getSubExpr()->IgnoreParens();
2407 if (auto *CE = dyn_cast<CastExpr>(Val: E))
2408 if (CE->getCastKind() == CK_ConstructorConversion ||
2409 CE->getCastKind() == CK_UserDefinedConversion)
2410 E = CE->getSubExpr();
2411 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(Val: E))
2412 E = BTE->getSubExpr();
2413 return E;
2414}
2415
2416void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
2417 for (auto *D : S->getDeclGroup()) {
2418 if (auto *VD = dyn_cast_or_null<VarDecl>(Val: D)) {
2419 const Expr *E = VD->getInit();
2420 if (!E)
2421 continue;
2422 E = E->IgnoreParens();
2423
2424 // handle constructors that involve temporaries
2425 if (auto *EWC = dyn_cast<ExprWithCleanups>(Val: E))
2426 E = EWC->getSubExpr()->IgnoreParens();
2427 E = UnpackConstruction(E);
2428
2429 if (auto Object = Analyzer->ConstructedObjects.find(Val: E);
2430 Object != Analyzer->ConstructedObjects.end()) {
2431 Object->second->setClangDecl(VD);
2432 Analyzer->ConstructedObjects.erase(I: Object);
2433 }
2434 }
2435 }
2436 updateLocalVarMapCtx(S);
2437}
2438
2439void BuildLockset::VisitMaterializeTemporaryExpr(
2440 const MaterializeTemporaryExpr *Exp) {
2441 if (const ValueDecl *ExtD = Exp->getExtendingDecl()) {
2442 if (auto Object = Analyzer->ConstructedObjects.find(
2443 Val: UnpackConstruction(E: Exp->getSubExpr()));
2444 Object != Analyzer->ConstructedObjects.end()) {
2445 Object->second->setClangDecl(ExtD);
2446 Analyzer->ConstructedObjects.erase(I: Object);
2447 }
2448 }
2449}
2450
2451void BuildLockset::VisitReturnStmt(const ReturnStmt *S) {
2452 if (Analyzer->CurrentFunction == nullptr)
2453 return;
2454 const Expr *RetVal = S->getRetValue();
2455 if (!RetVal)
2456 return;
2457
2458 // If returning by reference or pointer, check that the function requires the
2459 // appropriate capabilities.
2460 const QualType ReturnType =
2461 Analyzer->CurrentFunction->getReturnType().getCanonicalType();
2462 if (ReturnType->isLValueReferenceType()) {
2463 Analyzer->checkAccess(
2464 FSet: FunctionExitFSet, Exp: RetVal,
2465 AK: ReturnType->getPointeeType().isConstQualified() ? AK_Read : AK_Written,
2466 POK: POK_ReturnByRef);
2467 } else if (ReturnType->isPointerType()) {
2468 Analyzer->checkPtAccess(
2469 FSet: FunctionExitFSet, Exp: RetVal,
2470 AK: ReturnType->getPointeeType().isConstQualified() ? AK_Read : AK_Written,
2471 POK: POK_ReturnPointer);
2472 }
2473}
2474
2475/// Given two facts merging on a join point, possibly warn and decide whether to
2476/// keep or replace.
2477///
2478/// \return false if we should keep \p A, true if we should take \p B.
2479bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
2480 SourceLocation JoinLoc,
2481 LockErrorKind EntryLEK) {
2482 // Whether we can replace \p A by \p B.
2483 const bool CanModify = EntryLEK != LEK_LockedSomeLoopIterations;
2484 unsigned int ReentrancyDepthA = 0;
2485 unsigned int ReentrancyDepthB = 0;
2486
2487 if (const auto *LFE = dyn_cast<LockableFactEntry>(Val: &A))
2488 ReentrancyDepthA = LFE->getReentrancyDepth();
2489 if (const auto *LFE = dyn_cast<LockableFactEntry>(Val: &B))
2490 ReentrancyDepthB = LFE->getReentrancyDepth();
2491
2492 if (ReentrancyDepthA != ReentrancyDepthB) {
2493 Handler.handleMutexHeldEndOfScope(Kind: B.getKind(), LockName: B.toString(), LocLocked: B.loc(),
2494 LocEndOfScope: JoinLoc, LEK: EntryLEK,
2495 /*ReentrancyMismatch=*/true);
2496 // Pick the FactEntry with the greater reentrancy depth as the "good"
2497 // fact to reduce potential later warnings.
2498 return CanModify && ReentrancyDepthA < ReentrancyDepthB;
2499 } else if (A.kind() != B.kind()) {
2500 // For managed capabilities, the destructor should unlock in the right mode
2501 // anyway. For asserted capabilities no unlocking is needed.
2502 if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
2503 // The shared capability subsumes the exclusive capability, if possible.
2504 bool ShouldTakeB = B.kind() == LK_Shared;
2505 if (CanModify || !ShouldTakeB)
2506 return ShouldTakeB;
2507 }
2508 Handler.handleExclusiveAndShared(Kind: B.getKind(), LockName: B.toString(), Loc1: B.loc(),
2509 Loc2: A.loc());
2510 // Take the exclusive capability to reduce further warnings.
2511 return CanModify && B.kind() == LK_Exclusive;
2512 } else {
2513 // The non-asserted capability is the one we want to track.
2514 return CanModify && A.asserted() && !B.asserted();
2515 }
2516}
2517
2518/// Compute the intersection of two locksets and issue warnings for any
2519/// locks in the symmetric difference.
2520///
2521/// This function is used at a merge point in the CFG when comparing the lockset
2522/// of each branch being merged. For example, given the following sequence:
2523/// A; if () then B; else C; D; we need to check that the lockset after B and C
2524/// are the same. In the event of a difference, we use the intersection of these
2525/// two locksets at the start of D.
2526///
2527/// \param EntrySet A lockset for entry into a (possibly new) block.
2528/// \param ExitSet The lockset on exiting a preceding block.
2529/// \param JoinLoc The location of the join point for error reporting
2530/// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
2531/// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
2532void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
2533 const FactSet &ExitSet,
2534 SourceLocation JoinLoc,
2535 LockErrorKind EntryLEK,
2536 LockErrorKind ExitLEK) {
2537 FactSet EntrySetOrig = EntrySet;
2538
2539 // Find locks in ExitSet that conflict or are not in EntrySet, and warn.
2540 for (const auto &Fact : ExitSet) {
2541 const FactEntry &ExitFact = FactMan[Fact];
2542
2543 FactSet::iterator EntryIt = EntrySet.findLockIter(FM&: FactMan, CapE: ExitFact);
2544 if (EntryIt != EntrySet.end()) {
2545 if (join(A: FactMan[*EntryIt], B: ExitFact, JoinLoc, EntryLEK))
2546 *EntryIt = Fact;
2547 } else if (!ExitFact.managed() || EntryLEK == LEK_LockedAtEndOfFunction) {
2548 ExitFact.handleRemovalFromIntersection(FSet: ExitSet, FactMan, JoinLoc,
2549 LEK: EntryLEK, Handler);
2550 }
2551 }
2552
2553 // Find locks in EntrySet that are not in ExitSet, and remove them.
2554 for (const auto &Fact : EntrySetOrig) {
2555 const FactEntry *EntryFact = &FactMan[Fact];
2556 const FactEntry *ExitFact = ExitSet.findLock(FM&: FactMan, CapE: *EntryFact);
2557
2558 if (!ExitFact) {
2559 if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations ||
2560 ExitLEK == LEK_NotLockedAtEndOfFunction)
2561 EntryFact->handleRemovalFromIntersection(FSet: EntrySetOrig, FactMan, JoinLoc,
2562 LEK: ExitLEK, Handler);
2563 if (ExitLEK == LEK_LockedSomePredecessors)
2564 EntrySet.removeLock(FM&: FactMan, CapE: *EntryFact);
2565 }
2566 }
2567}
2568
2569// Return true if block B never continues to its successors.
2570static bool neverReturns(const CFGBlock *B) {
2571 if (B->hasNoReturnElement())
2572 return true;
2573 if (B->empty())
2574 return false;
2575
2576 CFGElement Last = B->back();
2577 if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2578 if (isa<CXXThrowExpr>(Val: S->getStmt()))
2579 return true;
2580 }
2581 return false;
2582}
2583
2584/// Check a function's CFG for thread-safety violations.
2585///
2586/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2587/// at the end of each block, and issue warnings for thread safety violations.
2588/// Each block in the CFG is traversed exactly once.
2589void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2590 // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2591 // For now, we just use the walker to set things up.
2592 threadSafety::CFGWalker walker;
2593 if (!walker.init(AC))
2594 return;
2595
2596 // AC.dumpCFG(true);
2597 // threadSafety::printSCFG(walker);
2598
2599 CFG *CFGraph = walker.getGraph();
2600 const NamedDecl *D = walker.getDecl();
2601 CurrentFunction = dyn_cast<FunctionDecl>(Val: D);
2602
2603 if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2604 return;
2605
2606 // FIXME: Do something a bit more intelligent inside constructor and
2607 // destructor code. Constructors and destructors must assume unique access
2608 // to 'this', so checks on member variable access is disabled, but we should
2609 // still enable checks on other objects.
2610 if (isa<CXXConstructorDecl>(Val: D))
2611 return; // Don't check inside constructors.
2612 if (isa<CXXDestructorDecl>(Val: D))
2613 return; // Don't check inside destructors.
2614
2615 Handler.enterFunction(FD: CurrentFunction);
2616
2617 BlockInfo.resize(new_size: CFGraph->getNumBlockIDs(),
2618 x: CFGBlockInfo::getEmptyBlockInfo(M&: LocalVarMap));
2619
2620 // We need to explore the CFG via a "topological" ordering.
2621 // That way, we will be guaranteed to have information about required
2622 // predecessor locksets when exploring a new block.
2623 const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2624 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2625
2626 CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
2627 CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
2628
2629 // Mark entry block as reachable
2630 Initial.Reachable = true;
2631
2632 // Compute SSA names for local variables
2633 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2634
2635 // Fill in source locations for all CFGBlocks.
2636 findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2637
2638 CapExprSet ExclusiveLocksAcquired;
2639 CapExprSet SharedLocksAcquired;
2640 CapExprSet LocksReleased;
2641
2642 // Add locks from exclusive_locks_required and shared_locks_required
2643 // to initial lockset. Also turn off checking for lock and unlock functions.
2644 // FIXME: is there a more intelligent way to check lock/unlock functions?
2645 if (!SortedGraph->empty()) {
2646 assert(*SortedGraph->begin() == &CFGraph->getEntry());
2647 FactSet &InitialLockset = Initial.EntrySet;
2648
2649 CapExprSet ExclusiveLocksToAdd;
2650 CapExprSet SharedLocksToAdd;
2651
2652 SourceLocation Loc = D->getLocation();
2653 for (const auto *Attr : D->attrs()) {
2654 Loc = Attr->getLocation();
2655 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Val: Attr)) {
2656 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr: A,
2657 Exp: nullptr, D);
2658 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Val: Attr)) {
2659 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2660 // We must ignore such methods.
2661 if (A->args_size() == 0)
2662 return;
2663 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr: A,
2664 Exp: nullptr, D);
2665 getMutexIDs(Mtxs&: LocksReleased, Attr: A, Exp: nullptr, D);
2666 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Val: Attr)) {
2667 if (A->args_size() == 0)
2668 return;
2669 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksAcquired
2670 : ExclusiveLocksAcquired,
2671 Attr: A, Exp: nullptr, D);
2672 } else if (isa<TryAcquireCapabilityAttr>(Val: Attr)) {
2673 // Don't try to check trylock functions for now.
2674 return;
2675 }
2676 }
2677 ArrayRef<ParmVarDecl *> Params;
2678 if (CurrentFunction)
2679 Params = CurrentFunction->getCanonicalDecl()->parameters();
2680 else if (auto CurrentMethod = dyn_cast<ObjCMethodDecl>(Val: D))
2681 Params = CurrentMethod->getCanonicalDecl()->parameters();
2682 else
2683 llvm_unreachable("Unknown function kind");
2684 for (const ParmVarDecl *Param : Params) {
2685 CapExprSet UnderlyingLocks;
2686 for (const auto *Attr : Param->attrs()) {
2687 Loc = Attr->getLocation();
2688 if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Val: Attr)) {
2689 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr: A,
2690 Exp: nullptr, D: Param);
2691 getMutexIDs(Mtxs&: LocksReleased, Attr: A, Exp: nullptr, D: Param);
2692 getMutexIDs(Mtxs&: UnderlyingLocks, Attr: A, Exp: nullptr, D: Param);
2693 } else if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Val: Attr)) {
2694 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr: A,
2695 Exp: nullptr, D: Param);
2696 getMutexIDs(Mtxs&: UnderlyingLocks, Attr: A, Exp: nullptr, D: Param);
2697 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Val: Attr)) {
2698 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksAcquired
2699 : ExclusiveLocksAcquired,
2700 Attr: A, Exp: nullptr, D: Param);
2701 getMutexIDs(Mtxs&: UnderlyingLocks, Attr: A, Exp: nullptr, D: Param);
2702 } else if (const auto *A = dyn_cast<LocksExcludedAttr>(Val: Attr)) {
2703 getMutexIDs(Mtxs&: UnderlyingLocks, Attr: A, Exp: nullptr, D: Param);
2704 }
2705 }
2706 if (UnderlyingLocks.empty())
2707 continue;
2708 CapabilityExpr Cp(SxBuilder.translateVariable(VD: Param, Ctx: nullptr),
2709 StringRef(),
2710 /*Neg=*/false, /*Reentrant=*/false);
2711 auto *ScopedEntry = FactMan.createFact<ScopedLockableFactEntry>(
2712 Args&: Cp, Args: Param->getLocation(), Args: FactEntry::Declared,
2713 Args: UnderlyingLocks.size());
2714 for (const CapabilityExpr &M : UnderlyingLocks)
2715 ScopedEntry->addLock(M);
2716 addLock(FSet&: InitialLockset, Entry: ScopedEntry, ReqAttr: true);
2717 }
2718
2719 // FIXME -- Loc can be wrong here.
2720 for (const auto &Mu : ExclusiveLocksToAdd) {
2721 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2722 Args: Mu, Args: LK_Exclusive, Args&: Loc, Args: FactEntry::Declared);
2723 addLock(FSet&: InitialLockset, Entry, ReqAttr: true);
2724 }
2725 for (const auto &Mu : SharedLocksToAdd) {
2726 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2727 Args: Mu, Args: LK_Shared, Args&: Loc, Args: FactEntry::Declared);
2728 addLock(FSet&: InitialLockset, Entry, ReqAttr: true);
2729 }
2730 }
2731
2732 // Compute the expected exit set.
2733 // By default, we expect all locks held on entry to be held on exit.
2734 FactSet ExpectedFunctionExitSet = Initial.EntrySet;
2735
2736 // Adjust the expected exit set by adding or removing locks, as declared
2737 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
2738 // issue the appropriate warning.
2739 // FIXME: the location here is not quite right.
2740 for (const auto &Lock : ExclusiveLocksAcquired)
2741 ExpectedFunctionExitSet.addLock(
2742 FM&: FactMan, Entry: FactMan.createFact<LockableFactEntry>(Args: Lock, Args: LK_Exclusive,
2743 Args: D->getLocation()));
2744 for (const auto &Lock : SharedLocksAcquired)
2745 ExpectedFunctionExitSet.addLock(
2746 FM&: FactMan, Entry: FactMan.createFact<LockableFactEntry>(Args: Lock, Args: LK_Shared,
2747 Args: D->getLocation()));
2748 for (const auto &Lock : LocksReleased)
2749 ExpectedFunctionExitSet.removeLock(FM&: FactMan, CapE: Lock);
2750
2751 for (const auto *CurrBlock : *SortedGraph) {
2752 unsigned CurrBlockID = CurrBlock->getBlockID();
2753 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2754
2755 // Use the default initial lockset in case there are no predecessors.
2756 VisitedBlocks.insert(Block: CurrBlock);
2757
2758 // Iterate through the predecessor blocks and warn if the lockset for all
2759 // predecessors is not the same. We take the entry lockset of the current
2760 // block to be the intersection of all previous locksets.
2761 // FIXME: By keeping the intersection, we may output more errors in future
2762 // for a lock which is not in the intersection, but was in the union. We
2763 // may want to also keep the union in future. As an example, let's say
2764 // the intersection contains Mutex L, and the union contains L and M.
2765 // Later we unlock M. At this point, we would output an error because we
2766 // never locked M; although the real error is probably that we forgot to
2767 // lock M on all code paths. Conversely, let's say that later we lock M.
2768 // In this case, we should compare against the intersection instead of the
2769 // union because the real error is probably that we forgot to unlock M on
2770 // all code paths.
2771 bool LocksetInitialized = false;
2772 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2773 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
2774 // if *PI -> CurrBlock is a back edge
2775 if (*PI == nullptr || !VisitedBlocks.alreadySet(Block: *PI))
2776 continue;
2777
2778 unsigned PrevBlockID = (*PI)->getBlockID();
2779 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2780
2781 // Ignore edges from blocks that can't return.
2782 if (neverReturns(B: *PI) || !PrevBlockInfo->Reachable)
2783 continue;
2784
2785 // Okay, we can reach this block from the entry.
2786 CurrBlockInfo->Reachable = true;
2787
2788 FactSet PrevLockset;
2789 getEdgeLockset(Result&: PrevLockset, ExitSet: PrevBlockInfo->ExitSet, PredBlock: *PI, CurrBlock);
2790
2791 if (!LocksetInitialized) {
2792 CurrBlockInfo->EntrySet = PrevLockset;
2793 LocksetInitialized = true;
2794 } else {
2795 // Surprisingly 'continue' doesn't always produce back edges, because
2796 // the CFG has empty "transition" blocks where they meet with the end
2797 // of the regular loop body. We still want to diagnose them as loop.
2798 intersectAndWarn(
2799 EntrySet&: CurrBlockInfo->EntrySet, ExitSet: PrevLockset, JoinLoc: CurrBlockInfo->EntryLoc,
2800 LEK: isa_and_nonnull<ContinueStmt>(Val: (*PI)->getTerminatorStmt())
2801 ? LEK_LockedSomeLoopIterations
2802 : LEK_LockedSomePredecessors);
2803 }
2804 }
2805
2806 // Skip rest of block if it's not reachable.
2807 if (!CurrBlockInfo->Reachable)
2808 continue;
2809
2810 BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet);
2811
2812 // Visit all the statements in the basic block.
2813 for (const auto &BI : *CurrBlock) {
2814 switch (BI.getKind()) {
2815 case CFGElement::Statement: {
2816 CFGStmt CS = BI.castAs<CFGStmt>();
2817 LocksetBuilder.Visit(S: CS.getStmt());
2818 break;
2819 }
2820 // Ignore BaseDtor and MemberDtor for now.
2821 case CFGElement::AutomaticObjectDtor: {
2822 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2823 const auto *DD = AD.getDestructorDecl(astContext&: AC.getASTContext());
2824 // Function parameters as they are constructed in caller's context and
2825 // the CFG does not contain the ctors. Ignore them as their
2826 // capabilities cannot be analysed because of this missing
2827 // information.
2828 if (isa_and_nonnull<ParmVarDecl>(Val: AD.getVarDecl()))
2829 break;
2830 if (!DD || !DD->hasAttrs())
2831 break;
2832
2833 LocksetBuilder.handleCall(
2834 Exp: nullptr, D: DD,
2835 Self: SxBuilder.translateVariable(VD: AD.getVarDecl(), Ctx: nullptr),
2836 Loc: AD.getTriggerStmt()->getEndLoc());
2837 break;
2838 }
2839
2840 case CFGElement::CleanupFunction: {
2841 const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>();
2842 LocksetBuilder.handleCall(
2843 /*Exp=*/nullptr, D: CF.getFunctionDecl(),
2844 Self: SxBuilder.translateVariable(VD: CF.getVarDecl(), Ctx: nullptr),
2845 Loc: CF.getVarDecl()->getLocation());
2846 break;
2847 }
2848
2849 case CFGElement::TemporaryDtor: {
2850 auto TD = BI.castAs<CFGTemporaryDtor>();
2851
2852 // Clean up constructed object even if there are no attributes to
2853 // keep the number of objects in limbo as small as possible.
2854 if (auto Object = ConstructedObjects.find(
2855 Val: TD.getBindTemporaryExpr()->getSubExpr());
2856 Object != ConstructedObjects.end()) {
2857 const auto *DD = TD.getDestructorDecl(astContext&: AC.getASTContext());
2858 if (DD->hasAttrs())
2859 // TODO: the location here isn't quite correct.
2860 LocksetBuilder.handleCall(Exp: nullptr, D: DD, Self: Object->second,
2861 Loc: TD.getBindTemporaryExpr()->getEndLoc());
2862 ConstructedObjects.erase(I: Object);
2863 }
2864 break;
2865 }
2866 default:
2867 break;
2868 }
2869 }
2870 CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2871
2872 // For every back edge from CurrBlock (the end of the loop) to another block
2873 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2874 // the one held at the beginning of FirstLoopBlock. We can look up the
2875 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2876 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2877 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
2878 // if CurrBlock -> *SI is *not* a back edge
2879 if (*SI == nullptr || !VisitedBlocks.alreadySet(Block: *SI))
2880 continue;
2881
2882 CFGBlock *FirstLoopBlock = *SI;
2883 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2884 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2885 intersectAndWarn(EntrySet&: PreLoop->EntrySet, ExitSet: LoopEnd->ExitSet, JoinLoc: PreLoop->EntryLoc,
2886 LEK: LEK_LockedSomeLoopIterations);
2887 }
2888 }
2889
2890 // Skip the final check if the exit block is unreachable.
2891 if (!Final.Reachable)
2892 return;
2893
2894 // FIXME: Should we call this function for all blocks which exit the function?
2895 intersectAndWarn(EntrySet&: ExpectedFunctionExitSet, ExitSet: Final.ExitSet, JoinLoc: Final.ExitLoc,
2896 EntryLEK: LEK_LockedAtEndOfFunction, ExitLEK: LEK_NotLockedAtEndOfFunction);
2897
2898 Handler.leaveFunction(FD: CurrentFunction);
2899}
2900
2901/// Check a function's CFG for thread-safety violations.
2902///
2903/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2904/// at the end of each block, and issue warnings for thread safety violations.
2905/// Each block in the CFG is traversed exactly once.
2906void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC,
2907 ThreadSafetyHandler &Handler,
2908 BeforeSet **BSet) {
2909 if (!*BSet)
2910 *BSet = new BeforeSet;
2911 ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2912 Analyzer.runAnalysis(AC);
2913}
2914
2915void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; }
2916
2917/// Helper function that returns a LockKind required for the given level
2918/// of access.
2919LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) {
2920 switch (AK) {
2921 case AK_Read :
2922 return LK_Shared;
2923 case AK_Written :
2924 return LK_Exclusive;
2925 }
2926 llvm_unreachable("Unknown AccessKind");
2927}
2928