1//===- ThreadSafety.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// A intra-procedural analysis for thread safety (e.g. deadlocks and race
10// conditions), based off of an annotation system.
11//
12// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
13// for more information.
14//
15//===----------------------------------------------------------------------===//
16
17#include "clang/Analysis/Analyses/ThreadSafety.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclGroup.h"
22#include "clang/AST/Expr.h"
23#include "clang/AST/ExprCXX.h"
24#include "clang/AST/OperationKinds.h"
25#include "clang/AST/Stmt.h"
26#include "clang/AST/StmtVisitor.h"
27#include "clang/AST/Type.h"
28#include "clang/Analysis/Analyses/PostOrderCFGView.h"
29#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
30#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
31#include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
32#include "clang/Analysis/AnalysisDeclContext.h"
33#include "clang/Analysis/CFG.h"
34#include "clang/Basic/Builtins.h"
35#include "clang/Basic/LLVM.h"
36#include "clang/Basic/OperatorKinds.h"
37#include "clang/Basic/SourceLocation.h"
38#include "clang/Basic/Specifiers.h"
39#include "llvm/ADT/DenseMap.h"
40#include "llvm/ADT/ImmutableMap.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/ScopeExit.h"
43#include "llvm/ADT/SmallVector.h"
44#include "llvm/ADT/StringRef.h"
45#include "llvm/Support/Allocator.h"
46#include "llvm/Support/Casting.h"
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/TrailingObjects.h"
49#include "llvm/Support/raw_ostream.h"
50#include <cassert>
51#include <functional>
52#include <iterator>
53#include <memory>
54#include <optional>
55#include <string>
56#include <utility>
57#include <vector>
58
59using namespace clang;
60using namespace threadSafety;
61
62// Key method definition
63ThreadSafetyHandler::~ThreadSafetyHandler() = default;
64
65/// Issue a warning about an invalid lock expression
66static void warnInvalidLock(ThreadSafetyHandler &Handler,
67 const Expr *MutexExp, const NamedDecl *D,
68 const Expr *DeclExp, StringRef Kind) {
69 SourceLocation Loc;
70 if (DeclExp)
71 Loc = DeclExp->getExprLoc();
72
73 // FIXME: add a note about the attribute location in MutexExp or D
74 if (Loc.isValid())
75 Handler.handleInvalidLockExp(Loc);
76}
77
78namespace {
79
80/// A set of CapabilityExpr objects, which are compiled from thread safety
81/// attributes on a function.
82class CapExprSet : public SmallVector<CapabilityExpr, 4> {
83public:
84 /// Push M onto list, but discard duplicates.
85 void push_back_nodup(const CapabilityExpr &CapE) {
86 if (llvm::none_of(Range&: *this, P: [=](const CapabilityExpr &CapE2) {
87 return CapE.equals(other: CapE2);
88 }))
89 push_back(Elt: CapE);
90 }
91};
92
93class FactManager;
94class FactSet;
95
96/// This is a helper class that stores a fact that is known at a
97/// particular point in program execution. Currently, a fact is a capability,
98/// along with additional information, such as where it was acquired, whether
99/// it is exclusive or shared, etc.
100class FactEntry : public CapabilityExpr {
101public:
102 enum FactEntryKind { Lockable, ScopedLockable };
103
104 /// Where a fact comes from.
105 enum SourceKind {
106 Acquired, ///< The fact has been directly acquired.
107 Asserted, ///< The fact has been asserted to be held.
108 Declared, ///< The fact is assumed to be held by callers.
109 Managed, ///< The fact has been acquired through a scoped capability.
110 };
111
112private:
113 const FactEntryKind Kind : 8;
114
115 /// Exclusive or shared.
116 LockKind LKind : 8;
117
118 /// How it was acquired.
119 SourceKind Source : 8;
120
121 /// Where it was acquired.
122 SourceLocation AcquireLoc;
123
124protected:
125 ~FactEntry() = default;
126
127public:
128 FactEntry(FactEntryKind FK, const CapabilityExpr &CE, LockKind LK,
129 SourceLocation Loc, SourceKind Src)
130 : CapabilityExpr(CE), Kind(FK), LKind(LK), Source(Src), AcquireLoc(Loc) {}
131
132 LockKind kind() const { return LKind; }
133 SourceLocation loc() const { return AcquireLoc; }
134 FactEntryKind getFactEntryKind() const { return Kind; }
135
136 bool asserted() const { return Source == Asserted; }
137 bool declared() const { return Source == Declared; }
138 bool managed() const { return Source == Managed; }
139
140 virtual void
141 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
142 SourceLocation JoinLoc, LockErrorKind LEK,
143 ThreadSafetyHandler &Handler) const = 0;
144 virtual void handleLock(FactSet &FSet, FactManager &FactMan,
145 const FactEntry &entry,
146 ThreadSafetyHandler &Handler) const = 0;
147 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
148 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
149 bool FullyRemove,
150 ThreadSafetyHandler &Handler) const = 0;
151
152 // Return true if LKind >= LK, where exclusive > shared
153 bool isAtLeast(LockKind LK) const {
154 return (LKind == LK_Exclusive) || (LK == LK_Shared);
155 }
156};
157
158using FactID = unsigned short;
159
160/// FactManager manages the memory for all facts that are created during
161/// the analysis of a single routine.
162class FactManager {
163private:
164 llvm::BumpPtrAllocator &Alloc;
165 std::vector<const FactEntry *> Facts;
166
167public:
168 FactManager(llvm::BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
169
170 template <typename T, typename... ArgTypes>
171 T *createFact(ArgTypes &&...Args) {
172 static_assert(std::is_trivially_destructible_v<T>);
173 return T::create(Alloc, std::forward<ArgTypes>(Args)...);
174 }
175
176 FactID newFact(const FactEntry *Entry) {
177 Facts.push_back(x: Entry);
178 assert(Facts.size() - 1 <= std::numeric_limits<FactID>::max() &&
179 "FactID space exhausted");
180 return static_cast<unsigned short>(Facts.size() - 1);
181 }
182
183 const FactEntry &operator[](FactID F) const { return *Facts[F]; }
184};
185
186/// A FactSet is the set of facts that are known to be true at a
187/// particular program point. FactSets must be small, because they are
188/// frequently copied, and are thus implemented as a set of indices into a
189/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
190/// locks, so we can get away with doing a linear search for lookup. Note
191/// that a hashtable or map is inappropriate in this case, because lookups
192/// may involve partial pattern matches, rather than exact matches.
193class FactSet {
194private:
195 using FactVec = SmallVector<FactID, 4>;
196
197 FactVec FactIDs;
198
199public:
200 using iterator = FactVec::iterator;
201 using const_iterator = FactVec::const_iterator;
202
203 iterator begin() { return FactIDs.begin(); }
204 const_iterator begin() const { return FactIDs.begin(); }
205
206 iterator end() { return FactIDs.end(); }
207 const_iterator end() const { return FactIDs.end(); }
208
209 bool isEmpty() const { return FactIDs.size() == 0; }
210
211 // Return true if the set contains only negative facts
212 bool isEmpty(FactManager &FactMan) const {
213 for (const auto FID : *this) {
214 if (!FactMan[FID].negative())
215 return false;
216 }
217 return true;
218 }
219
220 void addLockByID(FactID ID) { FactIDs.push_back(Elt: ID); }
221
222 FactID addLock(FactManager &FM, const FactEntry *Entry) {
223 FactID F = FM.newFact(Entry);
224 FactIDs.push_back(Elt: F);
225 return F;
226 }
227
228 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
229 unsigned n = FactIDs.size();
230 if (n == 0)
231 return false;
232
233 for (unsigned i = 0; i < n-1; ++i) {
234 if (FM[FactIDs[i]].matches(other: CapE)) {
235 FactIDs[i] = FactIDs[n-1];
236 FactIDs.pop_back();
237 return true;
238 }
239 }
240 if (FM[FactIDs[n-1]].matches(other: CapE)) {
241 FactIDs.pop_back();
242 return true;
243 }
244 return false;
245 }
246
247 std::optional<FactID> replaceLock(FactManager &FM, iterator It,
248 const FactEntry *Entry) {
249 if (It == end())
250 return std::nullopt;
251 FactID F = FM.newFact(Entry);
252 *It = F;
253 return F;
254 }
255
256 std::optional<FactID> replaceLock(FactManager &FM, const CapabilityExpr &CapE,
257 const FactEntry *Entry) {
258 return replaceLock(FM, It: findLockIter(FM, CapE), Entry);
259 }
260
261 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
262 return llvm::find_if(Range&: *this,
263 P: [&](FactID ID) { return FM[ID].matches(other: CapE); });
264 }
265
266 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
267 auto I =
268 llvm::find_if(Range: *this, P: [&](FactID ID) { return FM[ID].matches(other: CapE); });
269 return I != end() ? &FM[*I] : nullptr;
270 }
271
272 const FactEntry *findLockUniv(FactManager &FM,
273 const CapabilityExpr &CapE) const {
274 auto I = llvm::find_if(
275 Range: *this, P: [&](FactID ID) -> bool { return FM[ID].matchesUniv(CapE); });
276 return I != end() ? &FM[*I] : nullptr;
277 }
278
279 const FactEntry *findPartialMatch(FactManager &FM,
280 const CapabilityExpr &CapE) const {
281 auto I = llvm::find_if(Range: *this, P: [&](FactID ID) -> bool {
282 return FM[ID].partiallyMatches(other: CapE);
283 });
284 return I != end() ? &FM[*I] : nullptr;
285 }
286
287 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
288 auto I = llvm::find_if(
289 Range: *this, P: [&](FactID ID) -> bool { return FM[ID].valueDecl() == Vd; });
290 return I != end();
291 }
292};
293
294class ThreadSafetyAnalyzer;
295
296} // namespace
297
298namespace clang {
299namespace threadSafety {
300
301class BeforeSet {
302private:
303 using BeforeVect = SmallVector<const ValueDecl *, 4>;
304
305 struct BeforeInfo {
306 BeforeVect Vect;
307 int Visited = 0;
308
309 BeforeInfo() = default;
310 BeforeInfo(BeforeInfo &&) = default;
311 };
312
313 using BeforeMap =
314 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
315 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
316
317public:
318 BeforeSet() = default;
319
320 BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
321 ThreadSafetyAnalyzer& Analyzer);
322
323 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
324 ThreadSafetyAnalyzer &Analyzer);
325
326 void checkBeforeAfter(const ValueDecl* Vd,
327 const FactSet& FSet,
328 ThreadSafetyAnalyzer& Analyzer,
329 SourceLocation Loc, StringRef CapKind);
330
331private:
332 BeforeMap BMap;
333 CycleMap CycMap;
334};
335
336} // namespace threadSafety
337} // namespace clang
338
339namespace {
340
341class LocalVariableMap;
342
343using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
344
345/// A side (entry or exit) of a CFG node.
346enum CFGBlockSide { CBS_Entry, CBS_Exit };
347
348/// CFGBlockInfo is a struct which contains all the information that is
349/// maintained for each block in the CFG. See LocalVariableMap for more
350/// information about the contexts.
351struct CFGBlockInfo {
352 // Lockset held at entry to block
353 FactSet EntrySet;
354
355 // Lockset held at exit from block
356 FactSet ExitSet;
357
358 // Context held at entry to block
359 LocalVarContext EntryContext;
360
361 // Context held at exit from block
362 LocalVarContext ExitContext;
363
364 // Location of first statement in block
365 SourceLocation EntryLoc;
366
367 // Location of last statement in block.
368 SourceLocation ExitLoc;
369
370 // Used to replay contexts later
371 unsigned EntryIndex;
372
373 // Is this block reachable?
374 bool Reachable = false;
375
376 const FactSet &getSet(CFGBlockSide Side) const {
377 return Side == CBS_Entry ? EntrySet : ExitSet;
378 }
379
380 SourceLocation getLocation(CFGBlockSide Side) const {
381 return Side == CBS_Entry ? EntryLoc : ExitLoc;
382 }
383
384private:
385 CFGBlockInfo(LocalVarContext EmptyCtx)
386 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
387
388public:
389 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
390};
391
392// A LocalVariableMap maintains a map from local variables to their currently
393// valid definitions. It provides SSA-like functionality when traversing the
394// CFG. Like SSA, each definition or assignment to a variable is assigned a
395// unique name (an integer), which acts as the SSA name for that definition.
396// The total set of names is shared among all CFG basic blocks.
397// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
398// with their SSA-names. Instead, we compute a Context for each point in the
399// code, which maps local variables to the appropriate SSA-name. This map
400// changes with each assignment.
401//
402// The map is computed in a single pass over the CFG. Subsequent analyses can
403// then query the map to find the appropriate Context for a statement, and use
404// that Context to look up the definitions of variables.
405class LocalVariableMap {
406public:
407 using Context = LocalVarContext;
408
409 /// A VarDefinition consists of an expression, representing the value of the
410 /// variable, along with the context in which that expression should be
411 /// interpreted. A reference VarDefinition does not itself contain this
412 /// information, but instead contains a pointer to a previous VarDefinition.
413 struct VarDefinition {
414 public:
415 friend class LocalVariableMap;
416
417 // The original declaration for this variable.
418 const NamedDecl *Dec;
419
420 // The expression for this variable, OR
421 const Expr *Exp = nullptr;
422
423 // Direct reference to another VarDefinition
424 unsigned DirectRef = 0;
425
426 // Reference to underlying canonical non-reference VarDefinition.
427 unsigned CanonicalRef = 0;
428
429 // The map with which Exp should be interpreted.
430 Context Ctx;
431
432 bool isReference() const { return !Exp; }
433
434 void invalidateRef() { DirectRef = CanonicalRef = 0; }
435
436 private:
437 // Create ordinary variable definition
438 VarDefinition(const NamedDecl *D, const Expr *E, Context C)
439 : Dec(D), Exp(E), Ctx(C) {}
440
441 // Create reference to previous definition
442 VarDefinition(const NamedDecl *D, unsigned DirectRef, unsigned CanonicalRef,
443 Context C)
444 : Dec(D), DirectRef(DirectRef), CanonicalRef(CanonicalRef), Ctx(C) {}
445 };
446
447private:
448 Context::Factory ContextFactory;
449 std::vector<VarDefinition> VarDefinitions;
450 std::vector<std::pair<const Stmt *, Context>> SavedContexts;
451
452public:
453 LocalVariableMap() {
454 // index 0 is a placeholder for undefined variables (aka phi-nodes).
455 VarDefinitions.push_back(x: VarDefinition(nullptr, 0, 0, getEmptyContext()));
456 }
457
458 /// Look up a definition, within the given context.
459 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
460 const unsigned *i = Ctx.lookup(K: D);
461 if (!i)
462 return nullptr;
463 assert(*i < VarDefinitions.size());
464 return &VarDefinitions[*i];
465 }
466
467 /// Look up the definition for D within the given context. Returns
468 /// NULL if the expression is not statically known. If successful, also
469 /// modifies Ctx to hold the context of the return Expr.
470 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
471 const unsigned *P = Ctx.lookup(K: D);
472 if (!P)
473 return nullptr;
474
475 unsigned i = *P;
476 while (i > 0) {
477 if (VarDefinitions[i].Exp) {
478 Ctx = VarDefinitions[i].Ctx;
479 return VarDefinitions[i].Exp;
480 }
481 i = VarDefinitions[i].DirectRef;
482 }
483 return nullptr;
484 }
485
486 Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
487
488 /// Return the next context after processing S. This function is used by
489 /// clients of the class to get the appropriate context when traversing the
490 /// CFG. It must be called for every assignment or DeclStmt.
491 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
492 if (SavedContexts[CtxIndex+1].first == S) {
493 CtxIndex++;
494 Context Result = SavedContexts[CtxIndex].second;
495 return Result;
496 }
497 return C;
498 }
499
500 void dumpVarDefinitionName(unsigned i) {
501 if (i == 0) {
502 llvm::errs() << "Undefined";
503 return;
504 }
505 const NamedDecl *Dec = VarDefinitions[i].Dec;
506 if (!Dec) {
507 llvm::errs() << "<<NULL>>";
508 return;
509 }
510 Dec->printName(OS&: llvm::errs());
511 llvm::errs() << "." << i << " " << ((const void*) Dec);
512 }
513
514 /// Dumps an ASCII representation of the variable map to llvm::errs()
515 void dump() {
516 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
517 const Expr *Exp = VarDefinitions[i].Exp;
518 unsigned Ref = VarDefinitions[i].DirectRef;
519
520 dumpVarDefinitionName(i);
521 llvm::errs() << " = ";
522 if (Exp) Exp->dump();
523 else {
524 dumpVarDefinitionName(i: Ref);
525 llvm::errs() << "\n";
526 }
527 }
528 }
529
530 /// Dumps an ASCII representation of a Context to llvm::errs()
531 void dumpContext(Context C) {
532 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
533 const NamedDecl *D = I.getKey();
534 D->printName(OS&: llvm::errs());
535 llvm::errs() << " -> ";
536 dumpVarDefinitionName(i: I.getData());
537 llvm::errs() << "\n";
538 }
539 }
540
541 /// Builds the variable map.
542 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
543 std::vector<CFGBlockInfo> &BlockInfo);
544
545protected:
546 friend class VarMapBuilder;
547
548 // Resolve any definition ID down to its non-reference base ID.
549 unsigned getCanonicalDefinitionID(unsigned ID) const {
550 while (ID > 0 && VarDefinitions[ID].isReference())
551 ID = VarDefinitions[ID].CanonicalRef;
552 return ID;
553 }
554
555 // Get the current context index
556 unsigned getContextIndex() { return SavedContexts.size()-1; }
557
558 // Save the current context for later replay
559 void saveContext(const Stmt *S, Context C) {
560 SavedContexts.push_back(x: std::make_pair(x&: S, y&: C));
561 }
562
563 // Adds a new definition to the given context, and returns a new context.
564 // This method should be called when declaring a new variable.
565 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
566 assert(!Ctx.contains(D));
567 unsigned newID = VarDefinitions.size();
568 Context NewCtx = ContextFactory.add(Old: Ctx, K: D, D: newID);
569 VarDefinitions.push_back(x: VarDefinition(D, Exp, Ctx));
570 return NewCtx;
571 }
572
573 // Add a new reference to an existing definition.
574 Context addReference(const NamedDecl *D, unsigned Ref, Context Ctx) {
575 unsigned newID = VarDefinitions.size();
576 Context NewCtx = ContextFactory.add(Old: Ctx, K: D, D: newID);
577 VarDefinitions.push_back(
578 x: VarDefinition(D, Ref, getCanonicalDefinitionID(ID: Ref), Ctx));
579 return NewCtx;
580 }
581
582 // Updates a definition only if that definition is already in the map.
583 // This method should be called when assigning to an existing variable.
584 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
585 if (Ctx.contains(K: D)) {
586 unsigned newID = VarDefinitions.size();
587 Context NewCtx = ContextFactory.remove(Old: Ctx, K: D);
588 NewCtx = ContextFactory.add(Old: NewCtx, K: D, D: newID);
589 VarDefinitions.push_back(x: VarDefinition(D, Exp, Ctx));
590 return NewCtx;
591 }
592 return Ctx;
593 }
594
595 // Removes a definition from the context, but keeps the variable name
596 // as a valid variable. The index 0 is a placeholder for cleared definitions.
597 Context clearDefinition(const NamedDecl *D, Context Ctx) {
598 Context NewCtx = Ctx;
599 if (NewCtx.contains(K: D)) {
600 NewCtx = ContextFactory.remove(Old: NewCtx, K: D);
601 NewCtx = ContextFactory.add(Old: NewCtx, K: D, D: 0);
602 }
603 return NewCtx;
604 }
605
606 // Remove a definition entirely frmo the context.
607 Context removeDefinition(const NamedDecl *D, Context Ctx) {
608 Context NewCtx = Ctx;
609 if (NewCtx.contains(K: D)) {
610 NewCtx = ContextFactory.remove(Old: NewCtx, K: D);
611 }
612 return NewCtx;
613 }
614
615 Context intersectContexts(Context C1, Context C2);
616 Context createReferenceContext(Context C);
617 void intersectBackEdge(Context C1, Context C2);
618};
619
620} // namespace
621
622// This has to be defined after LocalVariableMap.
623CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
624 return CFGBlockInfo(M.getEmptyContext());
625}
626
627namespace {
628
629/// Visitor which builds a LocalVariableMap
630class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
631public:
632 LocalVariableMap* VMap;
633 LocalVariableMap::Context Ctx;
634
635 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
636 : VMap(VM), Ctx(C) {}
637
638 void VisitDeclStmt(const DeclStmt *S);
639 void VisitBinaryOperator(const BinaryOperator *BO);
640 void VisitCallExpr(const CallExpr *CE);
641};
642
643} // namespace
644
645// Add new local variables to the variable map
646void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
647 bool modifiedCtx = false;
648 const DeclGroupRef DGrp = S->getDeclGroup();
649 for (const auto *D : DGrp) {
650 if (const auto *VD = dyn_cast_or_null<VarDecl>(Val: D)) {
651 const Expr *E = VD->getInit();
652
653 // Add local variables with trivial type to the variable map
654 QualType T = VD->getType();
655 if (T.isTrivialType(Context: VD->getASTContext())) {
656 Ctx = VMap->addDefinition(D: VD, Exp: E, Ctx);
657 modifiedCtx = true;
658 }
659 }
660 }
661 if (modifiedCtx)
662 VMap->saveContext(S, C: Ctx);
663}
664
665// Update local variable definitions in variable map
666void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
667 if (!BO->isAssignmentOp())
668 return;
669
670 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
671
672 // Update the variable map and current context.
673 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: LHSExp)) {
674 const ValueDecl *VDec = DRE->getDecl();
675 if (Ctx.lookup(K: VDec)) {
676 if (BO->getOpcode() == BO_Assign)
677 Ctx = VMap->updateDefinition(D: VDec, Exp: BO->getRHS(), Ctx);
678 else
679 // FIXME -- handle compound assignment operators
680 Ctx = VMap->clearDefinition(D: VDec, Ctx);
681 VMap->saveContext(S: BO, C: Ctx);
682 }
683 }
684}
685
686// Invalidates local variable definitions if variable escaped.
687void VarMapBuilder::VisitCallExpr(const CallExpr *CE) {
688 const FunctionDecl *FD = CE->getDirectCallee();
689 if (!FD)
690 return;
691
692 // Heuristic for likely-benign functions that pass by mutable reference. This
693 // is needed to avoid a slew of false positives due to mutable reference
694 // passing where the captured reference is usually passed on by-value.
695 if (const IdentifierInfo *II = FD->getIdentifier()) {
696 // Any kind of std::bind-like functions.
697 if (II->isStr(Str: "bind") || II->isStr(Str: "bind_front"))
698 return;
699 }
700
701 // Invalidate local variable definitions that are passed by non-const
702 // reference or non-const pointer.
703 for (unsigned Idx = 0; Idx < CE->getNumArgs(); ++Idx) {
704 if (Idx >= FD->getNumParams())
705 break;
706
707 const Expr *Arg = CE->getArg(Arg: Idx)->IgnoreParenImpCasts();
708 const ParmVarDecl *PVD = FD->getParamDecl(i: Idx);
709 QualType ParamType = PVD->getType();
710
711 // Potential reassignment if passed by non-const reference / pointer.
712 const ValueDecl *VDec = nullptr;
713 if (ParamType->isReferenceType() &&
714 !ParamType->getPointeeType().isConstQualified()) {
715 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Arg))
716 VDec = DRE->getDecl();
717 } else if (ParamType->isPointerType() &&
718 !ParamType->getPointeeType().isConstQualified()) {
719 Arg = Arg->IgnoreParenCasts();
720 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Arg)) {
721 if (UO->getOpcode() == UO_AddrOf) {
722 const Expr *SubE = UO->getSubExpr()->IgnoreParenCasts();
723 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: SubE))
724 VDec = DRE->getDecl();
725 }
726 }
727 }
728
729 if (VDec && Ctx.lookup(K: VDec)) {
730 Ctx = VMap->clearDefinition(D: VDec, Ctx);
731 VMap->saveContext(S: CE, C: Ctx);
732 }
733 }
734}
735
736// Computes the intersection of two contexts. The intersection is the
737// set of variables which have the same definition in both contexts;
738// variables with different definitions are discarded.
739LocalVariableMap::Context
740LocalVariableMap::intersectContexts(Context C1, Context C2) {
741 Context Result = C1;
742 for (const auto &P : C1) {
743 const NamedDecl *Dec = P.first;
744 const unsigned *I2 = C2.lookup(K: Dec);
745 if (!I2) {
746 // The variable doesn't exist on second path.
747 Result = removeDefinition(D: Dec, Ctx: Result);
748 } else if (getCanonicalDefinitionID(ID: P.second) !=
749 getCanonicalDefinitionID(ID: *I2)) {
750 // If canonical definitions mismatch the underlying definitions are
751 // different, invalidate.
752 Result = clearDefinition(D: Dec, Ctx: Result);
753 }
754 }
755 return Result;
756}
757
758// For every variable in C, create a new variable that refers to the
759// definition in C. Return a new context that contains these new variables.
760// (We use this for a naive implementation of SSA on loop back-edges.)
761LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
762 Context Result = getEmptyContext();
763 for (const auto &P : C)
764 Result = addReference(D: P.first, Ref: P.second, Ctx: Result);
765 return Result;
766}
767
768// This routine also takes the intersection of C1 and C2, but it does so by
769// altering the VarDefinitions. C1 must be the result of an earlier call to
770// createReferenceContext.
771void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
772 for (const auto &P : C1) {
773 const unsigned I1 = P.second;
774 VarDefinition *VDef = &VarDefinitions[I1];
775 assert(VDef->isReference());
776
777 const unsigned *I2 = C2.lookup(K: P.first);
778 if (!I2) {
779 // Variable does not exist at the end of the loop, invalidate.
780 VDef->invalidateRef();
781 continue;
782 }
783
784 // Compare the canonical IDs. This correctly handles chains of references
785 // and determines if the variable is truly loop-invariant.
786 if (VDef->CanonicalRef != getCanonicalDefinitionID(ID: *I2))
787 VDef->invalidateRef(); // Mark this variable as undefined
788 }
789}
790
791// Traverse the CFG in topological order, so all predecessors of a block
792// (excluding back-edges) are visited before the block itself. At
793// each point in the code, we calculate a Context, which holds the set of
794// variable definitions which are visible at that point in execution.
795// Visible variables are mapped to their definitions using an array that
796// contains all definitions.
797//
798// At join points in the CFG, the set is computed as the intersection of
799// the incoming sets along each edge, E.g.
800//
801// { Context | VarDefinitions }
802// int x = 0; { x -> x1 | x1 = 0 }
803// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
804// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
805// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
806// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
807//
808// This is essentially a simpler and more naive version of the standard SSA
809// algorithm. Those definitions that remain in the intersection are from blocks
810// that strictly dominate the current block. We do not bother to insert proper
811// phi nodes, because they are not used in our analysis; instead, wherever
812// a phi node would be required, we simply remove that definition from the
813// context (E.g. x above).
814//
815// The initial traversal does not capture back-edges, so those need to be
816// handled on a separate pass. Whenever the first pass encounters an
817// incoming back edge, it duplicates the context, creating new definitions
818// that refer back to the originals. (These correspond to places where SSA
819// might have to insert a phi node.) On the second pass, these definitions are
820// set to NULL if the variable has changed on the back-edge (i.e. a phi
821// node was actually required.) E.g.
822//
823// { Context | VarDefinitions }
824// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
825// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
826// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
827// ... { y -> y1 | x3 = 2, x2 = 1, ... }
828void LocalVariableMap::traverseCFG(CFG *CFGraph,
829 const PostOrderCFGView *SortedGraph,
830 std::vector<CFGBlockInfo> &BlockInfo) {
831 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
832
833 for (const auto *CurrBlock : *SortedGraph) {
834 unsigned CurrBlockID = CurrBlock->getBlockID();
835 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
836
837 VisitedBlocks.insert(Block: CurrBlock);
838
839 // Calculate the entry context for the current block
840 bool HasBackEdges = false;
841 bool CtxInit = true;
842 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
843 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
844 // if *PI -> CurrBlock is a back edge, so skip it
845 if (*PI == nullptr || !VisitedBlocks.alreadySet(Block: *PI)) {
846 HasBackEdges = true;
847 continue;
848 }
849
850 unsigned PrevBlockID = (*PI)->getBlockID();
851 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
852
853 if (CtxInit) {
854 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
855 CtxInit = false;
856 }
857 else {
858 CurrBlockInfo->EntryContext =
859 intersectContexts(C1: CurrBlockInfo->EntryContext,
860 C2: PrevBlockInfo->ExitContext);
861 }
862 }
863
864 // Duplicate the context if we have back-edges, so we can call
865 // intersectBackEdges later.
866 if (HasBackEdges)
867 CurrBlockInfo->EntryContext =
868 createReferenceContext(C: CurrBlockInfo->EntryContext);
869
870 // Create a starting context index for the current block
871 saveContext(S: nullptr, C: CurrBlockInfo->EntryContext);
872 CurrBlockInfo->EntryIndex = getContextIndex();
873
874 // Visit all the statements in the basic block.
875 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
876 for (const auto &BI : *CurrBlock) {
877 switch (BI.getKind()) {
878 case CFGElement::Statement: {
879 CFGStmt CS = BI.castAs<CFGStmt>();
880 VMapBuilder.Visit(S: CS.getStmt());
881 break;
882 }
883 default:
884 break;
885 }
886 }
887 CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
888
889 // Mark variables on back edges as "unknown" if they've been changed.
890 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
891 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
892 // if CurrBlock -> *SI is *not* a back edge
893 if (*SI == nullptr || !VisitedBlocks.alreadySet(Block: *SI))
894 continue;
895
896 CFGBlock *FirstLoopBlock = *SI;
897 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
898 Context LoopEnd = CurrBlockInfo->ExitContext;
899 intersectBackEdge(C1: LoopBegin, C2: LoopEnd);
900 }
901 }
902
903 // Put an extra entry at the end of the indexed context array
904 unsigned exitID = CFGraph->getExit().getBlockID();
905 saveContext(S: nullptr, C: BlockInfo[exitID].ExitContext);
906}
907
908/// Find the appropriate source locations to use when producing diagnostics for
909/// each block in the CFG.
910static void findBlockLocations(CFG *CFGraph,
911 const PostOrderCFGView *SortedGraph,
912 std::vector<CFGBlockInfo> &BlockInfo) {
913 for (const auto *CurrBlock : *SortedGraph) {
914 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
915
916 // Find the source location of the last statement in the block, if the
917 // block is not empty.
918 if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
919 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
920 } else {
921 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
922 BE = CurrBlock->rend(); BI != BE; ++BI) {
923 // FIXME: Handle other CFGElement kinds.
924 if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
925 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
926 break;
927 }
928 }
929 }
930
931 if (CurrBlockInfo->ExitLoc.isValid()) {
932 // This block contains at least one statement. Find the source location
933 // of the first statement in the block.
934 for (const auto &BI : *CurrBlock) {
935 // FIXME: Handle other CFGElement kinds.
936 if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
937 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
938 break;
939 }
940 }
941 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
942 CurrBlock != &CFGraph->getExit()) {
943 // The block is empty, and has a single predecessor. Use its exit
944 // location.
945 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
946 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
947 } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) {
948 // The block is empty, and has a single successor. Use its entry
949 // location.
950 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
951 BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
952 }
953 }
954}
955
956namespace {
957
958class LockableFactEntry final : public FactEntry {
959private:
960 /// Reentrancy depth: incremented when a capability has been acquired
961 /// reentrantly (after initial acquisition). Always 0 for non-reentrant
962 /// capabilities.
963 unsigned int ReentrancyDepth = 0;
964
965 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
966 SourceKind Src)
967 : FactEntry(Lockable, CE, LK, Loc, Src) {}
968
969public:
970 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
971 const LockableFactEntry &Other) {
972 return new (Alloc) LockableFactEntry(Other);
973 }
974
975 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
976 const CapabilityExpr &CE, LockKind LK,
977 SourceLocation Loc,
978 SourceKind Src = Acquired) {
979 return new (Alloc) LockableFactEntry(CE, LK, Loc, Src);
980 }
981
982 unsigned int getReentrancyDepth() const { return ReentrancyDepth; }
983
984 void
985 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
986 SourceLocation JoinLoc, LockErrorKind LEK,
987 ThreadSafetyHandler &Handler) const override {
988 if (!asserted() && !negative() && !isUniversal()) {
989 Handler.handleMutexHeldEndOfScope(Kind: getKind(), LockName: toString(), LocLocked: loc(), LocEndOfScope: JoinLoc,
990 LEK);
991 }
992 }
993
994 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
995 ThreadSafetyHandler &Handler) const override {
996 if (const FactEntry *RFact = tryReenter(FactMan, ReenterKind: entry.kind())) {
997 // This capability has been reentrantly acquired.
998 FSet.replaceLock(FM&: FactMan, CapE: entry, Entry: RFact);
999 } else {
1000 Handler.handleDoubleLock(Kind: entry.getKind(), LockName: entry.toString(), LocLocked: loc(),
1001 LocDoubleLock: entry.loc());
1002 }
1003 }
1004
1005 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1006 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1007 bool FullyRemove,
1008 ThreadSafetyHandler &Handler) const override {
1009 FSet.removeLock(FM&: FactMan, CapE: Cp);
1010
1011 if (const FactEntry *RFact = leaveReentrant(FactMan)) {
1012 // This capability remains reentrantly acquired.
1013 FSet.addLock(FM&: FactMan, Entry: RFact);
1014 } else if (!Cp.negative()) {
1015 FSet.addLock(FM&: FactMan, Entry: FactMan.createFact<LockableFactEntry>(
1016 Args: !Cp, Args: LK_Exclusive, Args&: UnlockLoc));
1017 }
1018 }
1019
1020 // Return an updated FactEntry if we can acquire this capability reentrant,
1021 // nullptr otherwise.
1022 const FactEntry *tryReenter(FactManager &FactMan,
1023 LockKind ReenterKind) const {
1024 if (!reentrant())
1025 return nullptr;
1026 if (kind() != ReenterKind)
1027 return nullptr;
1028 auto *NewFact = FactMan.createFact<LockableFactEntry>(Args: *this);
1029 NewFact->ReentrancyDepth++;
1030 return NewFact;
1031 }
1032
1033 // Return an updated FactEntry if we are releasing a capability previously
1034 // acquired reentrant, nullptr otherwise.
1035 const FactEntry *leaveReentrant(FactManager &FactMan) const {
1036 if (!ReentrancyDepth)
1037 return nullptr;
1038 assert(reentrant());
1039 auto *NewFact = FactMan.createFact<LockableFactEntry>(Args: *this);
1040 NewFact->ReentrancyDepth--;
1041 return NewFact;
1042 }
1043
1044 static bool classof(const FactEntry *A) {
1045 return A->getFactEntryKind() == Lockable;
1046 }
1047};
1048
1049enum UnderlyingCapabilityKind {
1050 UCK_Acquired, ///< Any kind of acquired capability.
1051 UCK_ReleasedShared, ///< Shared capability that was released.
1052 UCK_ReleasedExclusive, ///< Exclusive capability that was released.
1053};
1054
1055struct UnderlyingCapability {
1056 CapabilityExpr Cap;
1057 UnderlyingCapabilityKind Kind;
1058};
1059
1060class ScopedLockableFactEntry final
1061 : public FactEntry,
1062 private llvm::TrailingObjects<ScopedLockableFactEntry,
1063 UnderlyingCapability> {
1064 friend TrailingObjects;
1065
1066private:
1067 const unsigned ManagedCapacity;
1068 unsigned ManagedSize = 0;
1069
1070 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
1071 SourceKind Src, unsigned ManagedCapacity)
1072 : FactEntry(ScopedLockable, CE, LK_Exclusive, Loc, Src),
1073 ManagedCapacity(ManagedCapacity) {}
1074
1075 void addManaged(const CapabilityExpr &M, UnderlyingCapabilityKind UCK) {
1076 assert(ManagedSize < ManagedCapacity);
1077 new (getTrailingObjects() + ManagedSize) UnderlyingCapability{.Cap: M, .Kind: UCK};
1078 ++ManagedSize;
1079 }
1080
1081 ArrayRef<UnderlyingCapability> getManaged() const {
1082 return getTrailingObjects(N: ManagedSize);
1083 }
1084
1085public:
1086 static ScopedLockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
1087 const CapabilityExpr &CE,
1088 SourceLocation Loc, SourceKind Src,
1089 unsigned ManagedCapacity) {
1090 void *Storage =
1091 Alloc.Allocate(Size: totalSizeToAlloc<UnderlyingCapability>(Counts: ManagedCapacity),
1092 Alignment: alignof(ScopedLockableFactEntry));
1093 return new (Storage) ScopedLockableFactEntry(CE, Loc, Src, ManagedCapacity);
1094 }
1095
1096 CapExprSet getUnderlyingMutexes() const {
1097 CapExprSet UnderlyingMutexesSet;
1098 for (const UnderlyingCapability &UnderlyingMutex : getManaged())
1099 UnderlyingMutexesSet.push_back(Elt: UnderlyingMutex.Cap);
1100 return UnderlyingMutexesSet;
1101 }
1102
1103 /// \name Adding managed locks
1104 /// Capacity for managed locks must have been allocated via \ref create.
1105 /// There is no reallocation in case the capacity is exceeded!
1106 /// \{
1107 void addLock(const CapabilityExpr &M) { addManaged(M, UCK: UCK_Acquired); }
1108
1109 void addExclusiveUnlock(const CapabilityExpr &M) {
1110 addManaged(M, UCK: UCK_ReleasedExclusive);
1111 }
1112
1113 void addSharedUnlock(const CapabilityExpr &M) {
1114 addManaged(M, UCK: UCK_ReleasedShared);
1115 }
1116 /// \}
1117
1118 void
1119 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
1120 SourceLocation JoinLoc, LockErrorKind LEK,
1121 ThreadSafetyHandler &Handler) const override {
1122 if (LEK == LEK_LockedAtEndOfFunction || LEK == LEK_NotLockedAtEndOfFunction)
1123 return;
1124
1125 for (const auto &UnderlyingMutex : getManaged()) {
1126 const auto *Entry = FSet.findLock(FM&: FactMan, CapE: UnderlyingMutex.Cap);
1127 if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
1128 (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
1129 // If this scoped lock manages another mutex, and if the underlying
1130 // mutex is still/not held, then warn about the underlying mutex.
1131 Handler.handleMutexHeldEndOfScope(Kind: UnderlyingMutex.Cap.getKind(),
1132 LockName: UnderlyingMutex.Cap.toString(), LocLocked: loc(),
1133 LocEndOfScope: JoinLoc, LEK);
1134 }
1135 }
1136 }
1137
1138 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
1139 ThreadSafetyHandler &Handler) const override {
1140 for (const auto &UnderlyingMutex : getManaged()) {
1141 if (UnderlyingMutex.Kind == UCK_Acquired)
1142 lock(FSet, FactMan, Cp: UnderlyingMutex.Cap, kind: entry.kind(), loc: entry.loc(),
1143 Handler: &Handler);
1144 else
1145 unlock(FSet, FactMan, Cp: UnderlyingMutex.Cap, loc: entry.loc(), Handler: &Handler);
1146 }
1147 }
1148
1149 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1150 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1151 bool FullyRemove,
1152 ThreadSafetyHandler &Handler) const override {
1153 assert(!Cp.negative() && "Managing object cannot be negative.");
1154 for (const auto &UnderlyingMutex : getManaged()) {
1155 // Remove/lock the underlying mutex if it exists/is still unlocked; warn
1156 // on double unlocking/locking if we're not destroying the scoped object.
1157 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
1158 if (UnderlyingMutex.Kind == UCK_Acquired) {
1159 unlock(FSet, FactMan, Cp: UnderlyingMutex.Cap, loc: UnlockLoc, Handler: TSHandler);
1160 } else {
1161 LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
1162 ? LK_Shared
1163 : LK_Exclusive;
1164 lock(FSet, FactMan, Cp: UnderlyingMutex.Cap, kind, loc: UnlockLoc, Handler: TSHandler);
1165 }
1166 }
1167 if (FullyRemove)
1168 FSet.removeLock(FM&: FactMan, CapE: Cp);
1169 }
1170
1171 static bool classof(const FactEntry *A) {
1172 return A->getFactEntryKind() == ScopedLockable;
1173 }
1174
1175private:
1176 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1177 LockKind kind, SourceLocation loc,
1178 ThreadSafetyHandler *Handler) const {
1179 if (const auto It = FSet.findLockIter(FM&: FactMan, CapE: Cp); It != FSet.end()) {
1180 const auto &Fact = cast<LockableFactEntry>(Val: FactMan[*It]);
1181 if (const FactEntry *RFact = Fact.tryReenter(FactMan, ReenterKind: kind)) {
1182 // This capability has been reentrantly acquired.
1183 FSet.replaceLock(FM&: FactMan, It, Entry: RFact);
1184 } else if (Handler) {
1185 Handler->handleDoubleLock(Kind: Cp.getKind(), LockName: Cp.toString(), LocLocked: Fact.loc(), LocDoubleLock: loc);
1186 }
1187 } else {
1188 FSet.removeLock(FM&: FactMan, CapE: !Cp);
1189 FSet.addLock(FM&: FactMan, Entry: FactMan.createFact<LockableFactEntry>(Args: Cp, Args&: kind, Args&: loc,
1190 Args: Managed));
1191 }
1192 }
1193
1194 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1195 SourceLocation loc, ThreadSafetyHandler *Handler) const {
1196 if (const auto It = FSet.findLockIter(FM&: FactMan, CapE: Cp); It != FSet.end()) {
1197 const auto &Fact = cast<LockableFactEntry>(Val: FactMan[*It]);
1198 if (const FactEntry *RFact = Fact.leaveReentrant(FactMan)) {
1199 // This capability remains reentrantly acquired.
1200 FSet.replaceLock(FM&: FactMan, It, Entry: RFact);
1201 return;
1202 }
1203
1204 FSet.replaceLock(
1205 FM&: FactMan, It,
1206 Entry: FactMan.createFact<LockableFactEntry>(Args: !Cp, Args: LK_Exclusive, Args&: loc));
1207 } else if (Handler) {
1208 SourceLocation PrevLoc;
1209 if (const FactEntry *Neg = FSet.findLock(FM&: FactMan, CapE: !Cp))
1210 PrevLoc = Neg->loc();
1211 Handler->handleUnmatchedUnlock(Kind: Cp.getKind(), LockName: Cp.toString(), Loc: loc, LocPreviousUnlock: PrevLoc);
1212 }
1213 }
1214};
1215
1216/// Class which implements the core thread safety analysis routines.
1217class ThreadSafetyAnalyzer {
1218 friend class BuildLockset;
1219 friend class threadSafety::BeforeSet;
1220
1221 llvm::BumpPtrAllocator Bpa;
1222 threadSafety::til::MemRegionRef Arena;
1223 threadSafety::SExprBuilder SxBuilder;
1224
1225 ThreadSafetyHandler &Handler;
1226 const FunctionDecl *CurrentFunction;
1227 LocalVariableMap LocalVarMap;
1228 // Maps constructed objects to `this` placeholder prior to initialization.
1229 llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects;
1230 FactManager FactMan;
1231 std::vector<CFGBlockInfo> BlockInfo;
1232
1233 BeforeSet *GlobalBeforeSet;
1234
1235public:
1236 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet *Bset)
1237 : Arena(&Bpa), SxBuilder(Arena), Handler(H), FactMan(Bpa),
1238 GlobalBeforeSet(Bset) {}
1239
1240 bool inCurrentScope(const CapabilityExpr &CapE);
1241
1242 void addLock(FactSet &FSet, const FactEntry *Entry, bool ReqAttr = false);
1243 void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
1244 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
1245
1246 template <typename AttrType>
1247 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1248 const NamedDecl *D, til::SExpr *Self = nullptr);
1249
1250 template <class AttrType>
1251 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1252 const NamedDecl *D,
1253 const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1254 Expr *BrE, bool Neg);
1255
1256 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1257 bool &Negate);
1258
1259 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1260 const CFGBlock* PredBlock,
1261 const CFGBlock *CurrBlock);
1262
1263 bool join(const FactEntry &A, const FactEntry &B, SourceLocation JoinLoc,
1264 LockErrorKind EntryLEK);
1265
1266 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1267 SourceLocation JoinLoc, LockErrorKind EntryLEK,
1268 LockErrorKind ExitLEK);
1269
1270 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1271 SourceLocation JoinLoc, LockErrorKind LEK) {
1272 intersectAndWarn(EntrySet, ExitSet, JoinLoc, EntryLEK: LEK, ExitLEK: LEK);
1273 }
1274
1275 void runAnalysis(AnalysisDeclContext &AC);
1276
1277 void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D,
1278 const Expr *Exp, AccessKind AK, Expr *MutexExp,
1279 ProtectedOperationKind POK, til::SExpr *Self,
1280 SourceLocation Loc);
1281 void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp,
1282 Expr *MutexExp, til::SExpr *Self, SourceLocation Loc);
1283
1284 void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1285 ProtectedOperationKind POK);
1286 void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1287 ProtectedOperationKind POK);
1288};
1289
1290} // namespace
1291
1292/// Process acquired_before and acquired_after attributes on Vd.
1293BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1294 ThreadSafetyAnalyzer& Analyzer) {
1295 // Create a new entry for Vd.
1296 BeforeInfo *Info = nullptr;
1297 {
1298 // Keep InfoPtr in its own scope in case BMap is modified later and the
1299 // reference becomes invalid.
1300 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1301 if (!InfoPtr)
1302 InfoPtr.reset(p: new BeforeInfo());
1303 Info = InfoPtr.get();
1304 }
1305
1306 for (const auto *At : Vd->attrs()) {
1307 switch (At->getKind()) {
1308 case attr::AcquiredBefore: {
1309 const auto *A = cast<AcquiredBeforeAttr>(Val: At);
1310
1311 // Read exprs from the attribute, and add them to BeforeVect.
1312 for (const auto *Arg : A->args()) {
1313 CapabilityExpr Cp =
1314 Analyzer.SxBuilder.translateAttrExpr(AttrExp: Arg, Ctx: nullptr);
1315 if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1316 Info->Vect.push_back(Elt: Cpvd);
1317 const auto It = BMap.find(Val: Cpvd);
1318 if (It == BMap.end())
1319 insertAttrExprs(Vd: Cpvd, Analyzer);
1320 }
1321 }
1322 break;
1323 }
1324 case attr::AcquiredAfter: {
1325 const auto *A = cast<AcquiredAfterAttr>(Val: At);
1326
1327 // Read exprs from the attribute, and add them to BeforeVect.
1328 for (const auto *Arg : A->args()) {
1329 CapabilityExpr Cp =
1330 Analyzer.SxBuilder.translateAttrExpr(AttrExp: Arg, Ctx: nullptr);
1331 if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1332 // Get entry for mutex listed in attribute
1333 BeforeInfo *ArgInfo = getBeforeInfoForDecl(Vd: ArgVd, Analyzer);
1334 ArgInfo->Vect.push_back(Elt: Vd);
1335 }
1336 }
1337 break;
1338 }
1339 default:
1340 break;
1341 }
1342 }
1343
1344 return Info;
1345}
1346
1347BeforeSet::BeforeInfo *
1348BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd,
1349 ThreadSafetyAnalyzer &Analyzer) {
1350 auto It = BMap.find(Val: Vd);
1351 BeforeInfo *Info = nullptr;
1352 if (It == BMap.end())
1353 Info = insertAttrExprs(Vd, Analyzer);
1354 else
1355 Info = It->second.get();
1356 assert(Info && "BMap contained nullptr?");
1357 return Info;
1358}
1359
1360/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
1361void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
1362 const FactSet& FSet,
1363 ThreadSafetyAnalyzer& Analyzer,
1364 SourceLocation Loc, StringRef CapKind) {
1365 SmallVector<BeforeInfo*, 8> InfoVect;
1366
1367 // Do a depth-first traversal of Vd.
1368 // Return true if there are cycles.
1369 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1370 if (!Vd)
1371 return false;
1372
1373 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1374
1375 if (Info->Visited == 1)
1376 return true;
1377
1378 if (Info->Visited == 2)
1379 return false;
1380
1381 if (Info->Vect.empty())
1382 return false;
1383
1384 InfoVect.push_back(Elt: Info);
1385 Info->Visited = 1;
1386 for (const auto *Vdb : Info->Vect) {
1387 // Exclude mutexes in our immediate before set.
1388 if (FSet.containsMutexDecl(FM&: Analyzer.FactMan, Vd: Vdb)) {
1389 StringRef L1 = StartVd->getName();
1390 StringRef L2 = Vdb->getName();
1391 Analyzer.Handler.handleLockAcquiredBefore(Kind: CapKind, L1Name: L1, L2Name: L2, Loc);
1392 }
1393 // Transitively search other before sets, and warn on cycles.
1394 if (traverse(Vdb)) {
1395 if (CycMap.try_emplace(Key: Vd, Args: true).second) {
1396 StringRef L1 = Vd->getName();
1397 Analyzer.Handler.handleBeforeAfterCycle(L1Name: L1, Loc: Vd->getLocation());
1398 }
1399 }
1400 }
1401 Info->Visited = 2;
1402 return false;
1403 };
1404
1405 traverse(StartVd);
1406
1407 for (auto *Info : InfoVect)
1408 Info->Visited = 0;
1409}
1410
1411/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
1412static const ValueDecl *getValueDecl(const Expr *Exp) {
1413 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Val: Exp))
1414 return getValueDecl(Exp: CE->getSubExpr());
1415
1416 if (const auto *DR = dyn_cast<DeclRefExpr>(Val: Exp))
1417 return DR->getDecl();
1418
1419 if (const auto *ME = dyn_cast<MemberExpr>(Val: Exp))
1420 return ME->getMemberDecl();
1421
1422 return nullptr;
1423}
1424
1425bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1426 const threadSafety::til::SExpr *SExp = CapE.sexpr();
1427 assert(SExp && "Null expressions should be ignored");
1428
1429 if (const auto *LP = dyn_cast<til::LiteralPtr>(Val: SExp)) {
1430 const ValueDecl *VD = LP->clangDecl();
1431 // Variables defined in a function are always inaccessible.
1432 if (!VD || !VD->isDefinedOutsideFunctionOrMethod())
1433 return false;
1434 // For now we consider static class members to be inaccessible.
1435 if (isa<CXXRecordDecl>(Val: VD->getDeclContext()))
1436 return false;
1437 // Global variables are always in scope.
1438 return true;
1439 }
1440
1441 // Members are in scope from methods of the same class.
1442 if (const auto *P = dyn_cast<til::Project>(Val: SExp)) {
1443 if (!isa_and_nonnull<CXXMethodDecl>(Val: CurrentFunction))
1444 return false;
1445 const ValueDecl *VD = P->clangDecl();
1446 return VD->getDeclContext() == CurrentFunction->getDeclContext();
1447 }
1448
1449 return false;
1450}
1451
1452/// Add a new lock to the lockset, warning if the lock is already there.
1453/// \param ReqAttr -- true if this is part of an initial Requires attribute.
1454void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry *Entry,
1455 bool ReqAttr) {
1456 if (Entry->shouldIgnore())
1457 return;
1458
1459 if (!ReqAttr && !Entry->negative()) {
1460 // look for the negative capability, and remove it from the fact set.
1461 CapabilityExpr NegC = !*Entry;
1462 const FactEntry *Nen = FSet.findLock(FM&: FactMan, CapE: NegC);
1463 if (Nen) {
1464 FSet.removeLock(FM&: FactMan, CapE: NegC);
1465 }
1466 else {
1467 if (inCurrentScope(CapE: *Entry) && !Entry->asserted() && !Entry->reentrant())
1468 Handler.handleNegativeNotHeld(Kind: Entry->getKind(), LockName: Entry->toString(),
1469 Neg: NegC.toString(), Loc: Entry->loc());
1470 }
1471 }
1472
1473 // Check before/after constraints
1474 if (!Entry->asserted() && !Entry->declared()) {
1475 GlobalBeforeSet->checkBeforeAfter(StartVd: Entry->valueDecl(), FSet, Analyzer&: *this,
1476 Loc: Entry->loc(), CapKind: Entry->getKind());
1477 }
1478
1479 if (const FactEntry *Cp = FSet.findLock(FM&: FactMan, CapE: *Entry)) {
1480 if (!Entry->asserted())
1481 Cp->handleLock(FSet, FactMan, entry: *Entry, Handler);
1482 } else {
1483 FSet.addLock(FM&: FactMan, Entry);
1484 }
1485}
1486
1487/// Remove a lock from the lockset, warning if the lock is not there.
1488/// \param UnlockLoc The source location of the unlock (only used in error msg)
1489void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1490 SourceLocation UnlockLoc,
1491 bool FullyRemove, LockKind ReceivedKind) {
1492 if (Cp.shouldIgnore())
1493 return;
1494
1495 const FactEntry *LDat = FSet.findLock(FM&: FactMan, CapE: Cp);
1496 if (!LDat) {
1497 SourceLocation PrevLoc;
1498 if (const FactEntry *Neg = FSet.findLock(FM&: FactMan, CapE: !Cp))
1499 PrevLoc = Neg->loc();
1500 Handler.handleUnmatchedUnlock(Kind: Cp.getKind(), LockName: Cp.toString(), Loc: UnlockLoc,
1501 LocPreviousUnlock: PrevLoc);
1502 return;
1503 }
1504
1505 // Generic lock removal doesn't care about lock kind mismatches, but
1506 // otherwise diagnose when the lock kinds are mismatched.
1507 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
1508 Handler.handleIncorrectUnlockKind(Kind: Cp.getKind(), LockName: Cp.toString(), Expected: LDat->kind(),
1509 Received: ReceivedKind, LocLocked: LDat->loc(), LocUnlock: UnlockLoc);
1510 }
1511
1512 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
1513}
1514
1515/// Extract the list of mutexIDs from the attribute on an expression,
1516/// and push them onto Mtxs, discarding any duplicates.
1517template <typename AttrType>
1518void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1519 const Expr *Exp, const NamedDecl *D,
1520 til::SExpr *Self) {
1521 if (Attr->args_size() == 0) {
1522 // The mutex held is the "this" object.
1523 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: nullptr, D, DeclExp: Exp, Self);
1524 if (Cp.isInvalid()) {
1525 warnInvalidLock(Handler, MutexExp: nullptr, D, DeclExp: Exp, Kind: Cp.getKind());
1526 return;
1527 }
1528 //else
1529 if (!Cp.shouldIgnore())
1530 Mtxs.push_back_nodup(CapE: Cp);
1531 return;
1532 }
1533
1534 for (const auto *Arg : Attr->args()) {
1535 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self);
1536 if (Cp.isInvalid()) {
1537 warnInvalidLock(Handler, MutexExp: nullptr, D, DeclExp: Exp, Kind: Cp.getKind());
1538 continue;
1539 }
1540 //else
1541 if (!Cp.shouldIgnore())
1542 Mtxs.push_back_nodup(CapE: Cp);
1543 }
1544}
1545
1546/// Extract the list of mutexIDs from a trylock attribute. If the
1547/// trylock applies to the given edge, then push them onto Mtxs, discarding
1548/// any duplicates.
1549template <class AttrType>
1550void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1551 const Expr *Exp, const NamedDecl *D,
1552 const CFGBlock *PredBlock,
1553 const CFGBlock *CurrBlock,
1554 Expr *BrE, bool Neg) {
1555 // Find out which branch has the lock
1556 bool branch = false;
1557 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(Val: BrE))
1558 branch = BLE->getValue();
1559 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(Val: BrE))
1560 branch = ILE->getValue().getBoolValue();
1561
1562 int branchnum = branch ? 0 : 1;
1563 if (Neg)
1564 branchnum = !branchnum;
1565
1566 // If we've taken the trylock branch, then add the lock
1567 int i = 0;
1568 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1569 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1570 if (*SI == CurrBlock && i == branchnum)
1571 getMutexIDs(Mtxs, Attr, Exp, D);
1572 }
1573}
1574
1575static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1576 if (isa<CXXNullPtrLiteralExpr>(Val: E) || isa<GNUNullExpr>(Val: E)) {
1577 TCond = false;
1578 return true;
1579 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(Val: E)) {
1580 TCond = BLE->getValue();
1581 return true;
1582 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(Val: E)) {
1583 TCond = ILE->getValue().getBoolValue();
1584 return true;
1585 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(Val: E))
1586 return getStaticBooleanValue(E: CE->getSubExpr(), TCond);
1587 return false;
1588}
1589
1590// If Cond can be traced back to a function call, return the call expression.
1591// The negate variable should be called with false, and will be set to true
1592// if the function call is negated, e.g. if (!mu.tryLock(...))
1593const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1594 LocalVarContext C,
1595 bool &Negate) {
1596 if (!Cond)
1597 return nullptr;
1598
1599 if (const auto *CallExp = dyn_cast<CallExpr>(Val: Cond)) {
1600 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
1601 return getTrylockCallExpr(Cond: CallExp->getArg(Arg: 0), C, Negate);
1602 return CallExp;
1603 }
1604 else if (const auto *PE = dyn_cast<ParenExpr>(Val: Cond))
1605 return getTrylockCallExpr(Cond: PE->getSubExpr(), C, Negate);
1606 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Val: Cond))
1607 return getTrylockCallExpr(Cond: CE->getSubExpr(), C, Negate);
1608 else if (const auto *FE = dyn_cast<FullExpr>(Val: Cond))
1609 return getTrylockCallExpr(Cond: FE->getSubExpr(), C, Negate);
1610 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Cond)) {
1611 const Expr *E = LocalVarMap.lookupExpr(D: DRE->getDecl(), Ctx&: C);
1612 return getTrylockCallExpr(Cond: E, C, Negate);
1613 }
1614 else if (const auto *UOP = dyn_cast<UnaryOperator>(Val: Cond)) {
1615 if (UOP->getOpcode() == UO_LNot) {
1616 Negate = !Negate;
1617 return getTrylockCallExpr(Cond: UOP->getSubExpr(), C, Negate);
1618 }
1619 return nullptr;
1620 }
1621 else if (const auto *BOP = dyn_cast<BinaryOperator>(Val: Cond)) {
1622 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1623 if (BOP->getOpcode() == BO_NE)
1624 Negate = !Negate;
1625
1626 bool TCond = false;
1627 if (getStaticBooleanValue(E: BOP->getRHS(), TCond)) {
1628 if (!TCond) Negate = !Negate;
1629 return getTrylockCallExpr(Cond: BOP->getLHS(), C, Negate);
1630 }
1631 TCond = false;
1632 if (getStaticBooleanValue(E: BOP->getLHS(), TCond)) {
1633 if (!TCond) Negate = !Negate;
1634 return getTrylockCallExpr(Cond: BOP->getRHS(), C, Negate);
1635 }
1636 return nullptr;
1637 }
1638 if (BOP->getOpcode() == BO_LAnd) {
1639 // LHS must have been evaluated in a different block.
1640 return getTrylockCallExpr(Cond: BOP->getRHS(), C, Negate);
1641 }
1642 if (BOP->getOpcode() == BO_LOr)
1643 return getTrylockCallExpr(Cond: BOP->getRHS(), C, Negate);
1644 return nullptr;
1645 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Val: Cond)) {
1646 bool TCond, FCond;
1647 if (getStaticBooleanValue(E: COP->getTrueExpr(), TCond) &&
1648 getStaticBooleanValue(E: COP->getFalseExpr(), TCond&: FCond)) {
1649 if (TCond && !FCond)
1650 return getTrylockCallExpr(Cond: COP->getCond(), C, Negate);
1651 if (!TCond && FCond) {
1652 Negate = !Negate;
1653 return getTrylockCallExpr(Cond: COP->getCond(), C, Negate);
1654 }
1655 }
1656 }
1657 return nullptr;
1658}
1659
1660/// Find the lockset that holds on the edge between PredBlock
1661/// and CurrBlock. The edge set is the exit set of PredBlock (passed
1662/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1663void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1664 const FactSet &ExitSet,
1665 const CFGBlock *PredBlock,
1666 const CFGBlock *CurrBlock) {
1667 Result = ExitSet;
1668
1669 const Stmt *Cond = PredBlock->getTerminatorCondition();
1670 // We don't acquire try-locks on ?: branches, only when its result is used.
1671 if (!Cond || isa<ConditionalOperator>(Val: PredBlock->getTerminatorStmt()))
1672 return;
1673
1674 bool Negate = false;
1675 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1676 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1677
1678 if (Handler.issueBetaWarnings()) {
1679 // Temporarily set the lookup context for SExprBuilder.
1680 SxBuilder.setLookupLocalVarExpr(
1681 [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
1682 return LocalVarMap.lookupExpr(D, Ctx);
1683 });
1684 }
1685 llvm::scope_exit Cleanup(
1686 [this] { SxBuilder.setLookupLocalVarExpr(nullptr); });
1687
1688 const auto *Exp = getTrylockCallExpr(Cond, C: LVarCtx, Negate);
1689 if (!Exp)
1690 return;
1691
1692 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Val: Exp->getCalleeDecl());
1693 if (!FunDecl || !FunDecl->hasAttr<TryAcquireCapabilityAttr>())
1694 return;
1695
1696 CapExprSet ExclusiveLocksToAdd;
1697 CapExprSet SharedLocksToAdd;
1698
1699 // If the condition is a call to a Trylock function, then grab the attributes
1700 for (const auto *Attr : FunDecl->specific_attrs<TryAcquireCapabilityAttr>())
1701 getMutexIDs(Mtxs&: Attr->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr,
1702 Exp, D: FunDecl, PredBlock, CurrBlock, BrE: Attr->getSuccessValue(),
1703 Neg: Negate);
1704
1705 // Add and remove locks.
1706 SourceLocation Loc = Exp->getExprLoc();
1707 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1708 addLock(FSet&: Result, Entry: FactMan.createFact<LockableFactEntry>(Args: ExclusiveLockToAdd,
1709 Args: LK_Exclusive, Args&: Loc));
1710 for (const auto &SharedLockToAdd : SharedLocksToAdd)
1711 addLock(FSet&: Result, Entry: FactMan.createFact<LockableFactEntry>(Args: SharedLockToAdd,
1712 Args: LK_Shared, Args&: Loc));
1713}
1714
1715namespace {
1716
1717/// We use this class to visit different types of expressions in
1718/// CFGBlocks, and build up the lockset.
1719/// An expression may cause us to add or remove locks from the lockset, or else
1720/// output error messages related to missing locks.
1721/// FIXME: In future, we may be able to not inherit from a visitor.
1722class BuildLockset : public ConstStmtVisitor<BuildLockset> {
1723 friend class ThreadSafetyAnalyzer;
1724
1725 ThreadSafetyAnalyzer *Analyzer;
1726 FactSet FSet;
1727 // The fact set for the function on exit.
1728 const FactSet &FunctionExitFSet;
1729 LocalVariableMap::Context LVarCtx;
1730 unsigned CtxIndex;
1731
1732 // To update and adjust the context.
1733 void updateLocalVarMapCtx(const Stmt *S) {
1734 if (S)
1735 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, C: LVarCtx);
1736 if (!Analyzer->Handler.issueBetaWarnings())
1737 return;
1738 // The lookup closure needs to be reconstructed with the refreshed LVarCtx.
1739 Analyzer->SxBuilder.setLookupLocalVarExpr(
1740 [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
1741 return Analyzer->LocalVarMap.lookupExpr(D, Ctx);
1742 });
1743 }
1744
1745 // helper functions
1746
1747 void checkAccess(const Expr *Exp, AccessKind AK,
1748 ProtectedOperationKind POK = POK_VarAccess) {
1749 Analyzer->checkAccess(FSet, Exp, AK, POK);
1750 }
1751 void checkPtAccess(const Expr *Exp, AccessKind AK,
1752 ProtectedOperationKind POK = POK_VarAccess) {
1753 Analyzer->checkPtAccess(FSet, Exp, AK, POK);
1754 }
1755
1756 void handleCall(const Expr *Exp, const NamedDecl *D,
1757 til::SExpr *Self = nullptr,
1758 SourceLocation Loc = SourceLocation());
1759 void examineArguments(const FunctionDecl *FD,
1760 CallExpr::const_arg_iterator ArgBegin,
1761 CallExpr::const_arg_iterator ArgEnd,
1762 bool SkipFirstParam = false);
1763
1764public:
1765 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info,
1766 const FactSet &FunctionExitFSet)
1767 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1768 FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
1769 CtxIndex(Info.EntryIndex) {
1770 updateLocalVarMapCtx(S: nullptr);
1771 }
1772
1773 ~BuildLockset() { Analyzer->SxBuilder.setLookupLocalVarExpr(nullptr); }
1774 BuildLockset(const BuildLockset &) = delete;
1775 BuildLockset &operator=(const BuildLockset &) = delete;
1776
1777 void VisitUnaryOperator(const UnaryOperator *UO);
1778 void VisitBinaryOperator(const BinaryOperator *BO);
1779 void VisitCastExpr(const CastExpr *CE);
1780 void VisitCallExpr(const CallExpr *Exp);
1781 void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
1782 void VisitDeclStmt(const DeclStmt *S);
1783 void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp);
1784 void VisitReturnStmt(const ReturnStmt *S);
1785};
1786
1787} // namespace
1788
1789/// Warn if the LSet does not contain a lock sufficient to protect access
1790/// of at least the passed in AccessKind.
1791void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
1792 const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK,
1793 Expr *MutexExp, ProtectedOperationKind POK, til::SExpr *Self,
1794 SourceLocation Loc) {
1795 LockKind LK = getLockKindFromAccessKind(AK);
1796 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: MutexExp, D, DeclExp: Exp, Self);
1797 if (Cp.isInvalid()) {
1798 warnInvalidLock(Handler, MutexExp, D, DeclExp: Exp, Kind: Cp.getKind());
1799 return;
1800 } else if (Cp.shouldIgnore()) {
1801 return;
1802 }
1803
1804 if (Cp.negative()) {
1805 // Negative capabilities act like locks excluded
1806 const FactEntry *LDat = FSet.findLock(FM&: FactMan, CapE: !Cp);
1807 if (LDat) {
1808 Handler.handleFunExcludesLock(Kind: Cp.getKind(), FunName: D->getNameAsString(),
1809 LockName: (!Cp).toString(), Loc);
1810 return;
1811 }
1812
1813 // If this does not refer to a negative capability in the same class,
1814 // then stop here.
1815 if (!inCurrentScope(CapE: Cp))
1816 return;
1817
1818 // Otherwise the negative requirement must be propagated to the caller.
1819 LDat = FSet.findLock(FM&: FactMan, CapE: Cp);
1820 if (!LDat) {
1821 Handler.handleNegativeNotHeld(D, LockName: Cp.toString(), Loc);
1822 }
1823 return;
1824 }
1825
1826 const FactEntry *LDat = FSet.findLockUniv(FM&: FactMan, CapE: Cp);
1827 bool NoError = true;
1828 if (!LDat) {
1829 // No exact match found. Look for a partial match.
1830 LDat = FSet.findPartialMatch(FM&: FactMan, CapE: Cp);
1831 if (LDat) {
1832 // Warn that there's no precise match.
1833 std::string PartMatchStr = LDat->toString();
1834 StringRef PartMatchName(PartMatchStr);
1835 Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK, LockName: Cp.toString(), LK, Loc,
1836 PossibleMatch: &PartMatchName);
1837 } else {
1838 // Warn that there's no match at all.
1839 Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK, LockName: Cp.toString(), LK, Loc);
1840 }
1841 NoError = false;
1842 }
1843 // Make sure the mutex we found is the right kind.
1844 if (NoError && LDat && !LDat->isAtLeast(LK)) {
1845 Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK, LockName: Cp.toString(), LK, Loc);
1846 }
1847}
1848
1849/// Warn if the LSet contains the given lock.
1850void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet,
1851 const NamedDecl *D, const Expr *Exp,
1852 Expr *MutexExp, til::SExpr *Self,
1853 SourceLocation Loc) {
1854 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: MutexExp, D, DeclExp: Exp, Self);
1855 if (Cp.isInvalid()) {
1856 warnInvalidLock(Handler, MutexExp, D, DeclExp: Exp, Kind: Cp.getKind());
1857 return;
1858 } else if (Cp.shouldIgnore()) {
1859 return;
1860 }
1861
1862 const FactEntry *LDat = FSet.findLock(FM&: FactMan, CapE: Cp);
1863 if (LDat) {
1864 Handler.handleFunExcludesLock(Kind: Cp.getKind(), FunName: D->getNameAsString(),
1865 LockName: Cp.toString(), Loc);
1866 }
1867}
1868
1869/// Checks guarded_by and pt_guarded_by attributes.
1870/// Whenever we identify an access (read or write) to a DeclRefExpr that is
1871/// marked with guarded_by, we must ensure the appropriate mutexes are held.
1872/// Similarly, we check if the access is to an expression that dereferences
1873/// a pointer marked with pt_guarded_by.
1874void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp,
1875 AccessKind AK,
1876 ProtectedOperationKind POK) {
1877 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1878
1879 SourceLocation Loc = Exp->getExprLoc();
1880
1881 // Local variables of reference type cannot be re-assigned;
1882 // map them to their initializer.
1883 while (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Exp)) {
1884 const auto *VD = dyn_cast<VarDecl>(Val: DRE->getDecl()->getCanonicalDecl());
1885 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
1886 if (const auto *E = VD->getInit()) {
1887 // Guard against self-initialization. e.g., int &i = i;
1888 if (E == Exp)
1889 break;
1890 Exp = E->IgnoreImplicit()->IgnoreParenCasts();
1891 continue;
1892 }
1893 }
1894 break;
1895 }
1896
1897 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Exp)) {
1898 // For dereferences
1899 if (UO->getOpcode() == UO_Deref)
1900 checkPtAccess(FSet, Exp: UO->getSubExpr(), AK, POK);
1901 return;
1902 }
1903
1904 if (const auto *BO = dyn_cast<BinaryOperator>(Val: Exp)) {
1905 switch (BO->getOpcode()) {
1906 case BO_PtrMemD: // .*
1907 return checkAccess(FSet, Exp: BO->getLHS(), AK, POK);
1908 case BO_PtrMemI: // ->*
1909 return checkPtAccess(FSet, Exp: BO->getLHS(), AK, POK);
1910 default:
1911 return;
1912 }
1913 }
1914
1915 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Val: Exp)) {
1916 checkPtAccess(FSet, Exp: AE->getLHS(), AK, POK);
1917 return;
1918 }
1919
1920 if (const auto *ME = dyn_cast<MemberExpr>(Val: Exp)) {
1921 if (ME->isArrow())
1922 checkPtAccess(FSet, Exp: ME->getBase(), AK, POK);
1923 else
1924 checkAccess(FSet, Exp: ME->getBase(), AK, POK);
1925 }
1926
1927 const ValueDecl *D = getValueDecl(Exp);
1928 if (!D || !D->hasAttrs())
1929 return;
1930
1931 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) {
1932 Handler.handleNoMutexHeld(D, POK, AK, Loc);
1933 }
1934
1935 for (const auto *I : D->specific_attrs<GuardedByAttr>())
1936 warnIfMutexNotHeld(FSet, D, Exp, AK, MutexExp: I->getArg(), POK, Self: nullptr, Loc);
1937}
1938
1939/// Checks pt_guarded_by and pt_guarded_var attributes.
1940/// POK is the same operationKind that was passed to checkAccess.
1941void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp,
1942 AccessKind AK,
1943 ProtectedOperationKind POK) {
1944 // Strip off paren- and cast-expressions, checking if we encounter any other
1945 // operator that should be delegated to checkAccess() instead.
1946 while (true) {
1947 if (const auto *PE = dyn_cast<ParenExpr>(Val: Exp)) {
1948 Exp = PE->getSubExpr();
1949 continue;
1950 }
1951 if (const auto *CE = dyn_cast<CastExpr>(Val: Exp)) {
1952 if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1953 // If it's an actual array, and not a pointer, then it's elements
1954 // are protected by GUARDED_BY, not PT_GUARDED_BY;
1955 checkAccess(FSet, Exp: CE->getSubExpr(), AK, POK);
1956 return;
1957 }
1958 Exp = CE->getSubExpr();
1959 continue;
1960 }
1961 break;
1962 }
1963
1964 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Exp)) {
1965 if (UO->getOpcode() == UO_AddrOf) {
1966 // Pointer access via pointer taken of variable, so the dereferenced
1967 // variable is not actually a pointer.
1968 checkAccess(FSet, Exp: UO->getSubExpr(), AK, POK);
1969 return;
1970 }
1971 }
1972
1973 // Pass by reference/pointer warnings are under a different flag.
1974 ProtectedOperationKind PtPOK = POK_VarDereference;
1975 switch (POK) {
1976 case POK_PassByRef:
1977 PtPOK = POK_PtPassByRef;
1978 break;
1979 case POK_ReturnByRef:
1980 PtPOK = POK_PtReturnByRef;
1981 break;
1982 case POK_PassPointer:
1983 PtPOK = POK_PtPassPointer;
1984 break;
1985 case POK_ReturnPointer:
1986 PtPOK = POK_PtReturnPointer;
1987 break;
1988 default:
1989 break;
1990 }
1991
1992 const ValueDecl *D = getValueDecl(Exp);
1993 if (!D || !D->hasAttrs())
1994 return;
1995
1996 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan))
1997 Handler.handleNoMutexHeld(D, POK: PtPOK, AK, Loc: Exp->getExprLoc());
1998
1999 for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
2000 warnIfMutexNotHeld(FSet, D, Exp, AK, MutexExp: I->getArg(), POK: PtPOK, Self: nullptr,
2001 Loc: Exp->getExprLoc());
2002}
2003
2004/// Process a function call, method call, constructor call,
2005/// or destructor call. This involves looking at the attributes on the
2006/// corresponding function/method/constructor/destructor, issuing warnings,
2007/// and updating the locksets accordingly.
2008///
2009/// FIXME: For classes annotated with one of the guarded annotations, we need
2010/// to treat const method calls as reads and non-const method calls as writes,
2011/// and check that the appropriate locks are held. Non-const method calls with
2012/// the same signature as const method calls can be also treated as reads.
2013///
2014/// \param Exp The call expression.
2015/// \param D The callee declaration.
2016/// \param Self If \p Exp = nullptr, the implicit this argument or the argument
2017/// of an implicitly called cleanup function.
2018/// \param Loc If \p Exp = nullptr, the location.
2019void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
2020 til::SExpr *Self, SourceLocation Loc) {
2021 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
2022 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
2023 CapExprSet ScopedReqsAndExcludes;
2024
2025 // Figure out if we're constructing an object of scoped lockable class
2026 CapabilityExpr Scp;
2027 if (Exp) {
2028 assert(!Self);
2029 const auto *TagT = Exp->getType()->getAs<TagType>();
2030 if (D->hasAttrs() && TagT && Exp->isPRValue()) {
2031 til::LiteralPtr *Placeholder =
2032 Analyzer->SxBuilder.createThisPlaceholder();
2033 [[maybe_unused]] auto inserted =
2034 Analyzer->ConstructedObjects.insert(KV: {Exp, Placeholder});
2035 assert(inserted.second && "Are we visiting the same expression again?");
2036 if (isa<CXXConstructExpr>(Val: Exp))
2037 Self = Placeholder;
2038 if (TagT->getDecl()->getMostRecentDecl()->hasAttr<ScopedLockableAttr>())
2039 Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
2040 }
2041
2042 assert(Loc.isInvalid());
2043 Loc = Exp->getExprLoc();
2044 }
2045
2046 for(const Attr *At : D->attrs()) {
2047 switch (At->getKind()) {
2048 // When we encounter a lock function, we need to add the lock to our
2049 // lockset.
2050 case attr::AcquireCapability: {
2051 const auto *A = cast<AcquireCapabilityAttr>(Val: At);
2052 Analyzer->getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd
2053 : ExclusiveLocksToAdd,
2054 Attr: A, Exp, D, Self);
2055 break;
2056 }
2057
2058 // An assert will add a lock to the lockset, but will not generate
2059 // a warning if it is already there, and will not generate a warning
2060 // if it is not removed.
2061 case attr::AssertCapability: {
2062 const auto *A = cast<AssertCapabilityAttr>(Val: At);
2063 CapExprSet AssertLocks;
2064 Analyzer->getMutexIDs(Mtxs&: AssertLocks, Attr: A, Exp, D, Self);
2065 for (const auto &AssertLock : AssertLocks)
2066 Analyzer->addLock(
2067 FSet, Entry: Analyzer->FactMan.createFact<LockableFactEntry>(
2068 Args: AssertLock, Args: A->isShared() ? LK_Shared : LK_Exclusive,
2069 Args&: Loc, Args: FactEntry::Asserted));
2070 break;
2071 }
2072
2073 // When we encounter an unlock function, we need to remove unlocked
2074 // mutexes from the lockset, and flag a warning if they are not there.
2075 case attr::ReleaseCapability: {
2076 const auto *A = cast<ReleaseCapabilityAttr>(Val: At);
2077 if (A->isGeneric())
2078 Analyzer->getMutexIDs(Mtxs&: GenericLocksToRemove, Attr: A, Exp, D, Self);
2079 else if (A->isShared())
2080 Analyzer->getMutexIDs(Mtxs&: SharedLocksToRemove, Attr: A, Exp, D, Self);
2081 else
2082 Analyzer->getMutexIDs(Mtxs&: ExclusiveLocksToRemove, Attr: A, Exp, D, Self);
2083 break;
2084 }
2085
2086 case attr::RequiresCapability: {
2087 const auto *A = cast<RequiresCapabilityAttr>(Val: At);
2088 for (auto *Arg : A->args()) {
2089 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2090 AK: A->isShared() ? AK_Read : AK_Written,
2091 MutexExp: Arg, POK: POK_FunctionCall, Self, Loc);
2092 // use for adopting a lock
2093 if (!Scp.shouldIgnore())
2094 Analyzer->getMutexIDs(Mtxs&: ScopedReqsAndExcludes, Attr: A, Exp, D, Self);
2095 }
2096 break;
2097 }
2098
2099 case attr::LocksExcluded: {
2100 const auto *A = cast<LocksExcludedAttr>(Val: At);
2101 for (auto *Arg : A->args()) {
2102 Analyzer->warnIfMutexHeld(FSet, D, Exp, MutexExp: Arg, Self, Loc);
2103 // use for deferring a lock
2104 if (!Scp.shouldIgnore())
2105 Analyzer->getMutexIDs(Mtxs&: ScopedReqsAndExcludes, Attr: A, Exp, D, Self);
2106 }
2107 break;
2108 }
2109
2110 // Ignore attributes unrelated to thread-safety
2111 default:
2112 break;
2113 }
2114 }
2115
2116 std::optional<CallExpr::const_arg_range> Args;
2117 if (Exp) {
2118 if (const auto *CE = dyn_cast<CallExpr>(Val: Exp))
2119 Args = CE->arguments();
2120 else if (const auto *CE = dyn_cast<CXXConstructExpr>(Val: Exp))
2121 Args = CE->arguments();
2122 else
2123 llvm_unreachable("Unknown call kind");
2124 }
2125 const auto *CalledFunction = dyn_cast<FunctionDecl>(Val: D);
2126 if (CalledFunction && Args.has_value()) {
2127 for (auto [Param, Arg] : zip(t: CalledFunction->parameters(), u&: *Args)) {
2128 CapExprSet DeclaredLocks;
2129 for (const Attr *At : Param->attrs()) {
2130 switch (At->getKind()) {
2131 case attr::AcquireCapability: {
2132 const auto *A = cast<AcquireCapabilityAttr>(Val: At);
2133 Analyzer->getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd
2134 : ExclusiveLocksToAdd,
2135 Attr: A, Exp, D, Self);
2136 Analyzer->getMutexIDs(Mtxs&: DeclaredLocks, Attr: A, Exp, D, Self);
2137 break;
2138 }
2139
2140 case attr::ReleaseCapability: {
2141 const auto *A = cast<ReleaseCapabilityAttr>(Val: At);
2142 if (A->isGeneric())
2143 Analyzer->getMutexIDs(Mtxs&: GenericLocksToRemove, Attr: A, Exp, D, Self);
2144 else if (A->isShared())
2145 Analyzer->getMutexIDs(Mtxs&: SharedLocksToRemove, Attr: A, Exp, D, Self);
2146 else
2147 Analyzer->getMutexIDs(Mtxs&: ExclusiveLocksToRemove, Attr: A, Exp, D, Self);
2148 Analyzer->getMutexIDs(Mtxs&: DeclaredLocks, Attr: A, Exp, D, Self);
2149 break;
2150 }
2151
2152 case attr::RequiresCapability: {
2153 const auto *A = cast<RequiresCapabilityAttr>(Val: At);
2154 for (auto *Arg : A->args())
2155 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2156 AK: A->isShared() ? AK_Read : AK_Written,
2157 MutexExp: Arg, POK: POK_FunctionCall, Self, Loc);
2158 Analyzer->getMutexIDs(Mtxs&: DeclaredLocks, Attr: A, Exp, D, Self);
2159 break;
2160 }
2161
2162 case attr::LocksExcluded: {
2163 const auto *A = cast<LocksExcludedAttr>(Val: At);
2164 for (auto *Arg : A->args())
2165 Analyzer->warnIfMutexHeld(FSet, D, Exp, MutexExp: Arg, Self, Loc);
2166 Analyzer->getMutexIDs(Mtxs&: DeclaredLocks, Attr: A, Exp, D, Self);
2167 break;
2168 }
2169
2170 default:
2171 break;
2172 }
2173 }
2174 if (DeclaredLocks.empty())
2175 continue;
2176 CapabilityExpr Cp(Analyzer->SxBuilder.translate(S: Arg, Ctx: nullptr),
2177 StringRef("mutex"), /*Neg=*/false, /*Reentrant=*/false);
2178 if (const auto *CBTE = dyn_cast<CXXBindTemporaryExpr>(Val: Arg->IgnoreCasts());
2179 Cp.isInvalid() && CBTE) {
2180 if (auto Object = Analyzer->ConstructedObjects.find(Val: CBTE->getSubExpr());
2181 Object != Analyzer->ConstructedObjects.end())
2182 Cp = CapabilityExpr(Object->second, StringRef("mutex"), /*Neg=*/false,
2183 /*Reentrant=*/false);
2184 }
2185 const FactEntry *Fact = FSet.findLock(FM&: Analyzer->FactMan, CapE: Cp);
2186 if (!Fact) {
2187 Analyzer->Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK: POK_FunctionCall,
2188 LockName: Cp.toString(), LK: LK_Exclusive,
2189 Loc: Exp->getExprLoc());
2190 continue;
2191 }
2192 const auto *Scope = cast<ScopedLockableFactEntry>(Val: Fact);
2193 for (const auto &[a, b] :
2194 zip_longest(t&: DeclaredLocks, u: Scope->getUnderlyingMutexes())) {
2195 if (!a.has_value()) {
2196 Analyzer->Handler.handleExpectFewerUnderlyingMutexes(
2197 Loc: Exp->getExprLoc(), DLoc: D->getLocation(), ScopeName: Scope->toString(),
2198 Kind: b.value().getKind(), Actual: b.value().toString());
2199 } else if (!b.has_value()) {
2200 Analyzer->Handler.handleExpectMoreUnderlyingMutexes(
2201 Loc: Exp->getExprLoc(), DLoc: D->getLocation(), ScopeName: Scope->toString(),
2202 Kind: a.value().getKind(), Expected: a.value().toString());
2203 } else if (!a.value().equals(other: b.value())) {
2204 Analyzer->Handler.handleUnmatchedUnderlyingMutexes(
2205 Loc: Exp->getExprLoc(), DLoc: D->getLocation(), ScopeName: Scope->toString(),
2206 Kind: a.value().getKind(), Expected: a.value().toString(), Actual: b.value().toString());
2207 break;
2208 }
2209 }
2210 }
2211 }
2212 // Remove locks first to allow lock upgrading/downgrading.
2213 // FIXME -- should only fully remove if the attribute refers to 'this'.
2214 bool Dtor = isa<CXXDestructorDecl>(Val: D);
2215 for (const auto &M : ExclusiveLocksToRemove)
2216 Analyzer->removeLock(FSet, Cp: M, UnlockLoc: Loc, FullyRemove: Dtor, ReceivedKind: LK_Exclusive);
2217 for (const auto &M : SharedLocksToRemove)
2218 Analyzer->removeLock(FSet, Cp: M, UnlockLoc: Loc, FullyRemove: Dtor, ReceivedKind: LK_Shared);
2219 for (const auto &M : GenericLocksToRemove)
2220 Analyzer->removeLock(FSet, Cp: M, UnlockLoc: Loc, FullyRemove: Dtor, ReceivedKind: LK_Generic);
2221
2222 // Add locks.
2223 FactEntry::SourceKind Source =
2224 !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
2225 for (const auto &M : ExclusiveLocksToAdd)
2226 Analyzer->addLock(FSet, Entry: Analyzer->FactMan.createFact<LockableFactEntry>(
2227 Args: M, Args: LK_Exclusive, Args&: Loc, Args&: Source));
2228 for (const auto &M : SharedLocksToAdd)
2229 Analyzer->addLock(FSet, Entry: Analyzer->FactMan.createFact<LockableFactEntry>(
2230 Args: M, Args: LK_Shared, Args&: Loc, Args&: Source));
2231
2232 if (!Scp.shouldIgnore()) {
2233 // Add the managing object as a dummy mutex, mapped to the underlying mutex.
2234 auto *ScopedEntry = Analyzer->FactMan.createFact<ScopedLockableFactEntry>(
2235 Args&: Scp, Args&: Loc, Args: FactEntry::Acquired,
2236 Args: ExclusiveLocksToAdd.size() + SharedLocksToAdd.size() +
2237 ScopedReqsAndExcludes.size() + ExclusiveLocksToRemove.size() +
2238 SharedLocksToRemove.size());
2239 for (const auto &M : ExclusiveLocksToAdd)
2240 ScopedEntry->addLock(M);
2241 for (const auto &M : SharedLocksToAdd)
2242 ScopedEntry->addLock(M);
2243 for (const auto &M : ScopedReqsAndExcludes)
2244 ScopedEntry->addLock(M);
2245 for (const auto &M : ExclusiveLocksToRemove)
2246 ScopedEntry->addExclusiveUnlock(M);
2247 for (const auto &M : SharedLocksToRemove)
2248 ScopedEntry->addSharedUnlock(M);
2249 Analyzer->addLock(FSet, Entry: ScopedEntry);
2250 }
2251}
2252
2253/// For unary operations which read and write a variable, we need to
2254/// check whether we hold any required mutexes. Reads are checked in
2255/// VisitCastExpr.
2256void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
2257 switch (UO->getOpcode()) {
2258 case UO_PostDec:
2259 case UO_PostInc:
2260 case UO_PreDec:
2261 case UO_PreInc:
2262 checkAccess(Exp: UO->getSubExpr(), AK: AK_Written);
2263 break;
2264 default:
2265 break;
2266 }
2267}
2268
2269/// For binary operations which assign to a variable (writes), we need to check
2270/// whether we hold any required mutexes.
2271/// FIXME: Deal with non-primitive types.
2272void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
2273 if (!BO->isAssignmentOp())
2274 return;
2275
2276 updateLocalVarMapCtx(S: BO);
2277 checkAccess(Exp: BO->getLHS(), AK: AK_Written);
2278}
2279
2280/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
2281/// need to ensure we hold any required mutexes.
2282/// FIXME: Deal with non-primitive types.
2283void BuildLockset::VisitCastExpr(const CastExpr *CE) {
2284 if (CE->getCastKind() != CK_LValueToRValue)
2285 return;
2286 checkAccess(Exp: CE->getSubExpr(), AK: AK_Read);
2287}
2288
2289void BuildLockset::examineArguments(const FunctionDecl *FD,
2290 CallExpr::const_arg_iterator ArgBegin,
2291 CallExpr::const_arg_iterator ArgEnd,
2292 bool SkipFirstParam) {
2293 // Currently we can't do anything if we don't know the function declaration.
2294 if (!FD)
2295 return;
2296
2297 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
2298 // only turns off checking within the body of a function, but we also
2299 // use it to turn off checking in arguments to the function. This
2300 // could result in some false negatives, but the alternative is to
2301 // create yet another attribute.
2302 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
2303 return;
2304
2305 const ArrayRef<ParmVarDecl *> Params = FD->parameters();
2306 auto Param = Params.begin();
2307 if (SkipFirstParam)
2308 ++Param;
2309
2310 // There can be default arguments, so we stop when one iterator is at end().
2311 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
2312 ++Param, ++Arg) {
2313 QualType Qt = (*Param)->getType();
2314 if (Qt->isReferenceType())
2315 checkAccess(Exp: *Arg, AK: AK_Read, POK: POK_PassByRef);
2316 else if (Qt->isPointerType())
2317 checkPtAccess(Exp: *Arg, AK: AK_Read, POK: POK_PassPointer);
2318 }
2319}
2320
2321void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
2322 updateLocalVarMapCtx(S: Exp);
2323
2324 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Val: Exp)) {
2325 const auto *ME = dyn_cast<MemberExpr>(Val: CE->getCallee());
2326 // ME can be null when calling a method pointer
2327 const CXXMethodDecl *MD = CE->getMethodDecl();
2328
2329 if (ME && MD) {
2330 if (ME->isArrow()) {
2331 // Should perhaps be AK_Written if !MD->isConst().
2332 checkPtAccess(Exp: CE->getImplicitObjectArgument(), AK: AK_Read);
2333 } else {
2334 // Should perhaps be AK_Written if !MD->isConst().
2335 checkAccess(Exp: CE->getImplicitObjectArgument(), AK: AK_Read);
2336 }
2337 }
2338
2339 examineArguments(FD: CE->getDirectCallee(), ArgBegin: CE->arg_begin(), ArgEnd: CE->arg_end());
2340 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Val: Exp)) {
2341 OverloadedOperatorKind OEop = OE->getOperator();
2342 switch (OEop) {
2343 case OO_Equal:
2344 case OO_PlusEqual:
2345 case OO_MinusEqual:
2346 case OO_StarEqual:
2347 case OO_SlashEqual:
2348 case OO_PercentEqual:
2349 case OO_CaretEqual:
2350 case OO_AmpEqual:
2351 case OO_PipeEqual:
2352 case OO_LessLessEqual:
2353 case OO_GreaterGreaterEqual:
2354 checkAccess(Exp: OE->getArg(Arg: 1), AK: AK_Read);
2355 [[fallthrough]];
2356 case OO_PlusPlus:
2357 case OO_MinusMinus:
2358 checkAccess(Exp: OE->getArg(Arg: 0), AK: AK_Written);
2359 break;
2360 case OO_Star:
2361 case OO_ArrowStar:
2362 case OO_Arrow:
2363 case OO_Subscript:
2364 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
2365 // Grrr. operator* can be multiplication...
2366 checkPtAccess(Exp: OE->getArg(Arg: 0), AK: AK_Read);
2367 }
2368 [[fallthrough]];
2369 default: {
2370 // TODO: get rid of this, and rely on pass-by-ref instead.
2371 const Expr *Obj = OE->getArg(Arg: 0);
2372 checkAccess(Exp: Obj, AK: AK_Read);
2373 // Check the remaining arguments. For method operators, the first
2374 // argument is the implicit self argument, and doesn't appear in the
2375 // FunctionDecl, but for non-methods it does.
2376 const FunctionDecl *FD = OE->getDirectCallee();
2377 examineArguments(FD, ArgBegin: std::next(x: OE->arg_begin()), ArgEnd: OE->arg_end(),
2378 /*SkipFirstParam*/ !isa<CXXMethodDecl>(Val: FD));
2379 break;
2380 }
2381 }
2382 } else {
2383 examineArguments(FD: Exp->getDirectCallee(), ArgBegin: Exp->arg_begin(), ArgEnd: Exp->arg_end());
2384 }
2385
2386 auto *D = dyn_cast_or_null<NamedDecl>(Val: Exp->getCalleeDecl());
2387 if (!D)
2388 return;
2389 handleCall(Exp, D);
2390}
2391
2392void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
2393 const CXXConstructorDecl *D = Exp->getConstructor();
2394 if (D && D->isCopyConstructor()) {
2395 const Expr* Source = Exp->getArg(Arg: 0);
2396 checkAccess(Exp: Source, AK: AK_Read);
2397 } else {
2398 examineArguments(FD: D, ArgBegin: Exp->arg_begin(), ArgEnd: Exp->arg_end());
2399 }
2400 if (D && D->hasAttrs())
2401 handleCall(Exp, D);
2402}
2403
2404static const Expr *UnpackConstruction(const Expr *E) {
2405 if (auto *CE = dyn_cast<CastExpr>(Val: E))
2406 if (CE->getCastKind() == CK_NoOp)
2407 E = CE->getSubExpr()->IgnoreParens();
2408 if (auto *CE = dyn_cast<CastExpr>(Val: E))
2409 if (CE->getCastKind() == CK_ConstructorConversion ||
2410 CE->getCastKind() == CK_UserDefinedConversion)
2411 E = CE->getSubExpr();
2412 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(Val: E))
2413 E = BTE->getSubExpr();
2414 return E;
2415}
2416
2417void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
2418 updateLocalVarMapCtx(S);
2419
2420 for (auto *D : S->getDeclGroup()) {
2421 if (auto *VD = dyn_cast_or_null<VarDecl>(Val: D)) {
2422 const Expr *E = VD->getInit();
2423 if (!E)
2424 continue;
2425 E = E->IgnoreParens();
2426
2427 // handle constructors that involve temporaries
2428 if (auto *EWC = dyn_cast<ExprWithCleanups>(Val: E))
2429 E = EWC->getSubExpr()->IgnoreParens();
2430 E = UnpackConstruction(E);
2431
2432 if (auto Object = Analyzer->ConstructedObjects.find(Val: E);
2433 Object != Analyzer->ConstructedObjects.end()) {
2434 Object->second->setClangDecl(VD);
2435 Analyzer->ConstructedObjects.erase(I: Object);
2436 }
2437 }
2438 }
2439}
2440
2441void BuildLockset::VisitMaterializeTemporaryExpr(
2442 const MaterializeTemporaryExpr *Exp) {
2443 if (const ValueDecl *ExtD = Exp->getExtendingDecl()) {
2444 if (auto Object = Analyzer->ConstructedObjects.find(
2445 Val: UnpackConstruction(E: Exp->getSubExpr()));
2446 Object != Analyzer->ConstructedObjects.end()) {
2447 Object->second->setClangDecl(ExtD);
2448 Analyzer->ConstructedObjects.erase(I: Object);
2449 }
2450 }
2451}
2452
2453void BuildLockset::VisitReturnStmt(const ReturnStmt *S) {
2454 if (Analyzer->CurrentFunction == nullptr)
2455 return;
2456 const Expr *RetVal = S->getRetValue();
2457 if (!RetVal)
2458 return;
2459
2460 // If returning by reference or pointer, check that the function requires the
2461 // appropriate capabilities.
2462 const QualType ReturnType =
2463 Analyzer->CurrentFunction->getReturnType().getCanonicalType();
2464 if (ReturnType->isLValueReferenceType()) {
2465 Analyzer->checkAccess(
2466 FSet: FunctionExitFSet, Exp: RetVal,
2467 AK: ReturnType->getPointeeType().isConstQualified() ? AK_Read : AK_Written,
2468 POK: POK_ReturnByRef);
2469 } else if (ReturnType->isPointerType()) {
2470 Analyzer->checkPtAccess(
2471 FSet: FunctionExitFSet, Exp: RetVal,
2472 AK: ReturnType->getPointeeType().isConstQualified() ? AK_Read : AK_Written,
2473 POK: POK_ReturnPointer);
2474 }
2475}
2476
2477/// Given two facts merging on a join point, possibly warn and decide whether to
2478/// keep or replace.
2479///
2480/// \return false if we should keep \p A, true if we should take \p B.
2481bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
2482 SourceLocation JoinLoc,
2483 LockErrorKind EntryLEK) {
2484 // Whether we can replace \p A by \p B.
2485 const bool CanModify = EntryLEK != LEK_LockedSomeLoopIterations;
2486 unsigned int ReentrancyDepthA = 0;
2487 unsigned int ReentrancyDepthB = 0;
2488
2489 if (const auto *LFE = dyn_cast<LockableFactEntry>(Val: &A))
2490 ReentrancyDepthA = LFE->getReentrancyDepth();
2491 if (const auto *LFE = dyn_cast<LockableFactEntry>(Val: &B))
2492 ReentrancyDepthB = LFE->getReentrancyDepth();
2493
2494 if (ReentrancyDepthA != ReentrancyDepthB) {
2495 Handler.handleMutexHeldEndOfScope(Kind: B.getKind(), LockName: B.toString(), LocLocked: B.loc(),
2496 LocEndOfScope: JoinLoc, LEK: EntryLEK,
2497 /*ReentrancyMismatch=*/true);
2498 // Pick the FactEntry with the greater reentrancy depth as the "good"
2499 // fact to reduce potential later warnings.
2500 return CanModify && ReentrancyDepthA < ReentrancyDepthB;
2501 } else if (A.kind() != B.kind()) {
2502 // For managed capabilities, the destructor should unlock in the right mode
2503 // anyway. For asserted capabilities no unlocking is needed.
2504 if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
2505 // The shared capability subsumes the exclusive capability, if possible.
2506 bool ShouldTakeB = B.kind() == LK_Shared;
2507 if (CanModify || !ShouldTakeB)
2508 return ShouldTakeB;
2509 }
2510 Handler.handleExclusiveAndShared(Kind: B.getKind(), LockName: B.toString(), Loc1: B.loc(),
2511 Loc2: A.loc());
2512 // Take the exclusive capability to reduce further warnings.
2513 return CanModify && B.kind() == LK_Exclusive;
2514 } else {
2515 // The non-asserted capability is the one we want to track.
2516 return CanModify && A.asserted() && !B.asserted();
2517 }
2518}
2519
2520/// Compute the intersection of two locksets and issue warnings for any
2521/// locks in the symmetric difference.
2522///
2523/// This function is used at a merge point in the CFG when comparing the lockset
2524/// of each branch being merged. For example, given the following sequence:
2525/// A; if () then B; else C; D; we need to check that the lockset after B and C
2526/// are the same. In the event of a difference, we use the intersection of these
2527/// two locksets at the start of D.
2528///
2529/// \param EntrySet A lockset for entry into a (possibly new) block.
2530/// \param ExitSet The lockset on exiting a preceding block.
2531/// \param JoinLoc The location of the join point for error reporting
2532/// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
2533/// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
2534void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
2535 const FactSet &ExitSet,
2536 SourceLocation JoinLoc,
2537 LockErrorKind EntryLEK,
2538 LockErrorKind ExitLEK) {
2539 FactSet EntrySetOrig = EntrySet;
2540
2541 // Find locks in ExitSet that conflict or are not in EntrySet, and warn.
2542 for (const auto &Fact : ExitSet) {
2543 const FactEntry &ExitFact = FactMan[Fact];
2544
2545 FactSet::iterator EntryIt = EntrySet.findLockIter(FM&: FactMan, CapE: ExitFact);
2546 if (EntryIt != EntrySet.end()) {
2547 if (join(A: FactMan[*EntryIt], B: ExitFact, JoinLoc, EntryLEK))
2548 *EntryIt = Fact;
2549 } else if (!ExitFact.managed() || EntryLEK == LEK_LockedAtEndOfFunction) {
2550 ExitFact.handleRemovalFromIntersection(FSet: ExitSet, FactMan, JoinLoc,
2551 LEK: EntryLEK, Handler);
2552 }
2553 }
2554
2555 // Find locks in EntrySet that are not in ExitSet, and remove them.
2556 for (const auto &Fact : EntrySetOrig) {
2557 const FactEntry *EntryFact = &FactMan[Fact];
2558 const FactEntry *ExitFact = ExitSet.findLock(FM&: FactMan, CapE: *EntryFact);
2559
2560 if (!ExitFact) {
2561 if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations ||
2562 ExitLEK == LEK_NotLockedAtEndOfFunction)
2563 EntryFact->handleRemovalFromIntersection(FSet: EntrySetOrig, FactMan, JoinLoc,
2564 LEK: ExitLEK, Handler);
2565 if (ExitLEK == LEK_LockedSomePredecessors)
2566 EntrySet.removeLock(FM&: FactMan, CapE: *EntryFact);
2567 }
2568 }
2569}
2570
2571// Return true if block B never continues to its successors.
2572static bool neverReturns(const CFGBlock *B) {
2573 if (B->hasNoReturnElement())
2574 return true;
2575 if (B->empty())
2576 return false;
2577
2578 CFGElement Last = B->back();
2579 if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2580 if (isa<CXXThrowExpr>(Val: S->getStmt()))
2581 return true;
2582 }
2583 return false;
2584}
2585
2586/// Check a function's CFG for thread-safety violations.
2587///
2588/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2589/// at the end of each block, and issue warnings for thread safety violations.
2590/// Each block in the CFG is traversed exactly once.
2591void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2592 // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2593 // For now, we just use the walker to set things up.
2594 threadSafety::CFGWalker walker;
2595 if (!walker.init(AC))
2596 return;
2597
2598 // AC.dumpCFG(true);
2599 // threadSafety::printSCFG(walker);
2600
2601 CFG *CFGraph = walker.getGraph();
2602 const NamedDecl *D = walker.getDecl();
2603 CurrentFunction = dyn_cast<FunctionDecl>(Val: D);
2604
2605 if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2606 return;
2607
2608 // FIXME: Do something a bit more intelligent inside constructor and
2609 // destructor code. Constructors and destructors must assume unique access
2610 // to 'this', so checks on member variable access is disabled, but we should
2611 // still enable checks on other objects.
2612 if (isa<CXXConstructorDecl>(Val: D))
2613 return; // Don't check inside constructors.
2614 if (isa<CXXDestructorDecl>(Val: D))
2615 return; // Don't check inside destructors.
2616
2617 Handler.enterFunction(FD: CurrentFunction);
2618
2619 BlockInfo.resize(new_size: CFGraph->getNumBlockIDs(),
2620 x: CFGBlockInfo::getEmptyBlockInfo(M&: LocalVarMap));
2621
2622 // We need to explore the CFG via a "topological" ordering.
2623 // That way, we will be guaranteed to have information about required
2624 // predecessor locksets when exploring a new block.
2625 const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2626 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2627
2628 CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
2629 CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
2630
2631 // Mark entry block as reachable
2632 Initial.Reachable = true;
2633
2634 // Compute SSA names for local variables
2635 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2636
2637 // Fill in source locations for all CFGBlocks.
2638 findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2639
2640 CapExprSet ExclusiveLocksAcquired;
2641 CapExprSet SharedLocksAcquired;
2642 CapExprSet LocksReleased;
2643
2644 // Add locks from exclusive_locks_required and shared_locks_required
2645 // to initial lockset. Also turn off checking for lock and unlock functions.
2646 // FIXME: is there a more intelligent way to check lock/unlock functions?
2647 if (!SortedGraph->empty()) {
2648 assert(*SortedGraph->begin() == &CFGraph->getEntry());
2649 FactSet &InitialLockset = Initial.EntrySet;
2650
2651 CapExprSet ExclusiveLocksToAdd;
2652 CapExprSet SharedLocksToAdd;
2653
2654 SourceLocation Loc = D->getLocation();
2655 for (const auto *Attr : D->attrs()) {
2656 Loc = Attr->getLocation();
2657 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Val: Attr)) {
2658 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr: A,
2659 Exp: nullptr, D);
2660 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Val: Attr)) {
2661 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2662 // We must ignore such methods.
2663 if (A->args_size() == 0)
2664 return;
2665 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr: A,
2666 Exp: nullptr, D);
2667 getMutexIDs(Mtxs&: LocksReleased, Attr: A, Exp: nullptr, D);
2668 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Val: Attr)) {
2669 if (A->args_size() == 0)
2670 return;
2671 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksAcquired
2672 : ExclusiveLocksAcquired,
2673 Attr: A, Exp: nullptr, D);
2674 } else if (isa<TryAcquireCapabilityAttr>(Val: Attr)) {
2675 // Don't try to check trylock functions for now.
2676 return;
2677 }
2678 }
2679 ArrayRef<ParmVarDecl *> Params;
2680 if (CurrentFunction)
2681 Params = CurrentFunction->getCanonicalDecl()->parameters();
2682 else if (auto CurrentMethod = dyn_cast<ObjCMethodDecl>(Val: D))
2683 Params = CurrentMethod->getCanonicalDecl()->parameters();
2684 else
2685 llvm_unreachable("Unknown function kind");
2686 for (const ParmVarDecl *Param : Params) {
2687 CapExprSet UnderlyingLocks;
2688 for (const auto *Attr : Param->attrs()) {
2689 Loc = Attr->getLocation();
2690 if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Val: Attr)) {
2691 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr: A,
2692 Exp: nullptr, D: Param);
2693 getMutexIDs(Mtxs&: LocksReleased, Attr: A, Exp: nullptr, D: Param);
2694 getMutexIDs(Mtxs&: UnderlyingLocks, Attr: A, Exp: nullptr, D: Param);
2695 } else if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Val: Attr)) {
2696 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr: A,
2697 Exp: nullptr, D: Param);
2698 getMutexIDs(Mtxs&: UnderlyingLocks, Attr: A, Exp: nullptr, D: Param);
2699 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Val: Attr)) {
2700 getMutexIDs(Mtxs&: A->isShared() ? SharedLocksAcquired
2701 : ExclusiveLocksAcquired,
2702 Attr: A, Exp: nullptr, D: Param);
2703 getMutexIDs(Mtxs&: UnderlyingLocks, Attr: A, Exp: nullptr, D: Param);
2704 } else if (const auto *A = dyn_cast<LocksExcludedAttr>(Val: Attr)) {
2705 getMutexIDs(Mtxs&: UnderlyingLocks, Attr: A, Exp: nullptr, D: Param);
2706 }
2707 }
2708 if (UnderlyingLocks.empty())
2709 continue;
2710 CapabilityExpr Cp(SxBuilder.translateVariable(VD: Param, Ctx: nullptr),
2711 StringRef(),
2712 /*Neg=*/false, /*Reentrant=*/false);
2713 auto *ScopedEntry = FactMan.createFact<ScopedLockableFactEntry>(
2714 Args&: Cp, Args: Param->getLocation(), Args: FactEntry::Declared,
2715 Args: UnderlyingLocks.size());
2716 for (const CapabilityExpr &M : UnderlyingLocks)
2717 ScopedEntry->addLock(M);
2718 addLock(FSet&: InitialLockset, Entry: ScopedEntry, ReqAttr: true);
2719 }
2720
2721 // FIXME -- Loc can be wrong here.
2722 for (const auto &Mu : ExclusiveLocksToAdd) {
2723 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2724 Args: Mu, Args: LK_Exclusive, Args&: Loc, Args: FactEntry::Declared);
2725 addLock(FSet&: InitialLockset, Entry, ReqAttr: true);
2726 }
2727 for (const auto &Mu : SharedLocksToAdd) {
2728 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2729 Args: Mu, Args: LK_Shared, Args&: Loc, Args: FactEntry::Declared);
2730 addLock(FSet&: InitialLockset, Entry, ReqAttr: true);
2731 }
2732 }
2733
2734 // Compute the expected exit set.
2735 // By default, we expect all locks held on entry to be held on exit.
2736 FactSet ExpectedFunctionExitSet = Initial.EntrySet;
2737
2738 // Adjust the expected exit set by adding or removing locks, as declared
2739 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
2740 // issue the appropriate warning.
2741 // FIXME: the location here is not quite right.
2742 for (const auto &Lock : ExclusiveLocksAcquired)
2743 ExpectedFunctionExitSet.addLock(
2744 FM&: FactMan, Entry: FactMan.createFact<LockableFactEntry>(Args: Lock, Args: LK_Exclusive,
2745 Args: D->getLocation()));
2746 for (const auto &Lock : SharedLocksAcquired)
2747 ExpectedFunctionExitSet.addLock(
2748 FM&: FactMan, Entry: FactMan.createFact<LockableFactEntry>(Args: Lock, Args: LK_Shared,
2749 Args: D->getLocation()));
2750 for (const auto &Lock : LocksReleased)
2751 ExpectedFunctionExitSet.removeLock(FM&: FactMan, CapE: Lock);
2752
2753 for (const auto *CurrBlock : *SortedGraph) {
2754 unsigned CurrBlockID = CurrBlock->getBlockID();
2755 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2756
2757 // Use the default initial lockset in case there are no predecessors.
2758 VisitedBlocks.insert(Block: CurrBlock);
2759
2760 // Iterate through the predecessor blocks and warn if the lockset for all
2761 // predecessors is not the same. We take the entry lockset of the current
2762 // block to be the intersection of all previous locksets.
2763 // FIXME: By keeping the intersection, we may output more errors in future
2764 // for a lock which is not in the intersection, but was in the union. We
2765 // may want to also keep the union in future. As an example, let's say
2766 // the intersection contains Mutex L, and the union contains L and M.
2767 // Later we unlock M. At this point, we would output an error because we
2768 // never locked M; although the real error is probably that we forgot to
2769 // lock M on all code paths. Conversely, let's say that later we lock M.
2770 // In this case, we should compare against the intersection instead of the
2771 // union because the real error is probably that we forgot to unlock M on
2772 // all code paths.
2773 bool LocksetInitialized = false;
2774 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2775 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
2776 // if *PI -> CurrBlock is a back edge
2777 if (*PI == nullptr || !VisitedBlocks.alreadySet(Block: *PI))
2778 continue;
2779
2780 unsigned PrevBlockID = (*PI)->getBlockID();
2781 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2782
2783 // Ignore edges from blocks that can't return.
2784 if (neverReturns(B: *PI) || !PrevBlockInfo->Reachable)
2785 continue;
2786
2787 // Okay, we can reach this block from the entry.
2788 CurrBlockInfo->Reachable = true;
2789
2790 FactSet PrevLockset;
2791 getEdgeLockset(Result&: PrevLockset, ExitSet: PrevBlockInfo->ExitSet, PredBlock: *PI, CurrBlock);
2792
2793 if (!LocksetInitialized) {
2794 CurrBlockInfo->EntrySet = PrevLockset;
2795 LocksetInitialized = true;
2796 } else {
2797 // Surprisingly 'continue' doesn't always produce back edges, because
2798 // the CFG has empty "transition" blocks where they meet with the end
2799 // of the regular loop body. We still want to diagnose them as loop.
2800 intersectAndWarn(
2801 EntrySet&: CurrBlockInfo->EntrySet, ExitSet: PrevLockset, JoinLoc: CurrBlockInfo->EntryLoc,
2802 LEK: isa_and_nonnull<ContinueStmt>(Val: (*PI)->getTerminatorStmt())
2803 ? LEK_LockedSomeLoopIterations
2804 : LEK_LockedSomePredecessors);
2805 }
2806 }
2807
2808 // Skip rest of block if it's not reachable.
2809 if (!CurrBlockInfo->Reachable)
2810 continue;
2811
2812 BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet);
2813
2814 // Visit all the statements in the basic block.
2815 for (const auto &BI : *CurrBlock) {
2816 switch (BI.getKind()) {
2817 case CFGElement::Statement: {
2818 CFGStmt CS = BI.castAs<CFGStmt>();
2819 LocksetBuilder.Visit(S: CS.getStmt());
2820 break;
2821 }
2822 // Ignore BaseDtor and MemberDtor for now.
2823 case CFGElement::AutomaticObjectDtor: {
2824 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2825 const auto *DD = AD.getDestructorDecl(astContext&: AC.getASTContext());
2826 // Function parameters as they are constructed in caller's context and
2827 // the CFG does not contain the ctors. Ignore them as their
2828 // capabilities cannot be analysed because of this missing
2829 // information.
2830 if (isa_and_nonnull<ParmVarDecl>(Val: AD.getVarDecl()))
2831 break;
2832 if (!DD || !DD->hasAttrs())
2833 break;
2834
2835 LocksetBuilder.handleCall(
2836 Exp: nullptr, D: DD,
2837 Self: SxBuilder.translateVariable(VD: AD.getVarDecl(), Ctx: nullptr),
2838 Loc: AD.getTriggerStmt()->getEndLoc());
2839 break;
2840 }
2841
2842 case CFGElement::CleanupFunction: {
2843 const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>();
2844 LocksetBuilder.handleCall(
2845 /*Exp=*/nullptr, D: CF.getFunctionDecl(),
2846 Self: SxBuilder.translateVariable(VD: CF.getVarDecl(), Ctx: nullptr),
2847 Loc: CF.getVarDecl()->getLocation());
2848 break;
2849 }
2850
2851 case CFGElement::TemporaryDtor: {
2852 auto TD = BI.castAs<CFGTemporaryDtor>();
2853
2854 // Clean up constructed object even if there are no attributes to
2855 // keep the number of objects in limbo as small as possible.
2856 if (auto Object = ConstructedObjects.find(
2857 Val: TD.getBindTemporaryExpr()->getSubExpr());
2858 Object != ConstructedObjects.end()) {
2859 const auto *DD = TD.getDestructorDecl(astContext&: AC.getASTContext());
2860 if (DD->hasAttrs())
2861 // TODO: the location here isn't quite correct.
2862 LocksetBuilder.handleCall(Exp: nullptr, D: DD, Self: Object->second,
2863 Loc: TD.getBindTemporaryExpr()->getEndLoc());
2864 ConstructedObjects.erase(I: Object);
2865 }
2866 break;
2867 }
2868 default:
2869 break;
2870 }
2871 }
2872 CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2873
2874 // For every back edge from CurrBlock (the end of the loop) to another block
2875 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2876 // the one held at the beginning of FirstLoopBlock. We can look up the
2877 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2878 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2879 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
2880 // if CurrBlock -> *SI is *not* a back edge
2881 if (*SI == nullptr || !VisitedBlocks.alreadySet(Block: *SI))
2882 continue;
2883
2884 CFGBlock *FirstLoopBlock = *SI;
2885 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2886 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2887 intersectAndWarn(EntrySet&: PreLoop->EntrySet, ExitSet: LoopEnd->ExitSet, JoinLoc: PreLoop->EntryLoc,
2888 LEK: LEK_LockedSomeLoopIterations);
2889 }
2890 }
2891
2892 // Skip the final check if the exit block is unreachable.
2893 if (!Final.Reachable)
2894 return;
2895
2896 // FIXME: Should we call this function for all blocks which exit the function?
2897 intersectAndWarn(EntrySet&: ExpectedFunctionExitSet, ExitSet: Final.ExitSet, JoinLoc: Final.ExitLoc,
2898 EntryLEK: LEK_LockedAtEndOfFunction, ExitLEK: LEK_NotLockedAtEndOfFunction);
2899
2900 Handler.leaveFunction(FD: CurrentFunction);
2901}
2902
2903/// Check a function's CFG for thread-safety violations.
2904///
2905/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2906/// at the end of each block, and issue warnings for thread safety violations.
2907/// Each block in the CFG is traversed exactly once.
2908void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC,
2909 ThreadSafetyHandler &Handler,
2910 BeforeSet **BSet) {
2911 if (!*BSet)
2912 *BSet = new BeforeSet;
2913 ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2914 Analyzer.runAnalysis(AC);
2915}
2916
2917void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; }
2918
2919/// Helper function that returns a LockKind required for the given level
2920/// of access.
2921LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) {
2922 switch (AK) {
2923 case AK_Read :
2924 return LK_Shared;
2925 case AK_Written :
2926 return LK_Exclusive;
2927 }
2928 llvm_unreachable("Unknown AccessKind");
2929}
2930