clang 22.0.0git
ThreadSafety.cpp
Go to the documentation of this file.
1//===- ThreadSafety.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// A intra-procedural analysis for thread safety (e.g. deadlocks and race
10// conditions), based off of an annotation system.
11//
12// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
13// for more information.
14//
15//===----------------------------------------------------------------------===//
16
18#include "clang/AST/Attr.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclGroup.h"
22#include "clang/AST/Expr.h"
23#include "clang/AST/ExprCXX.h"
25#include "clang/AST/Stmt.h"
27#include "clang/AST/Type.h"
33#include "clang/Analysis/CFG.h"
35#include "clang/Basic/LLVM.h"
39#include "llvm/ADT/DenseMap.h"
40#include "llvm/ADT/ImmutableMap.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/ScopeExit.h"
43#include "llvm/ADT/SmallVector.h"
44#include "llvm/ADT/StringRef.h"
45#include "llvm/Support/Allocator.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/TrailingObjects.h"
48#include "llvm/Support/raw_ostream.h"
49#include <cassert>
50#include <functional>
51#include <iterator>
52#include <memory>
53#include <optional>
54#include <string>
55#include <utility>
56#include <vector>
57
58using namespace clang;
59using namespace threadSafety;
60
61// Key method definition
63
64/// Issue a warning about an invalid lock expression
66 const Expr *MutexExp, const NamedDecl *D,
67 const Expr *DeclExp, StringRef Kind) {
69 if (DeclExp)
70 Loc = DeclExp->getExprLoc();
71
72 // FIXME: add a note about the attribute location in MutexExp or D
73 if (Loc.isValid())
74 Handler.handleInvalidLockExp(Loc);
75}
76
77namespace {
78
79/// A set of CapabilityExpr objects, which are compiled from thread safety
80/// attributes on a function.
81class CapExprSet : public SmallVector<CapabilityExpr, 4> {
82public:
83 /// Push M onto list, but discard duplicates.
84 void push_back_nodup(const CapabilityExpr &CapE) {
85 if (llvm::none_of(*this, [=](const CapabilityExpr &CapE2) {
86 return CapE.equals(CapE2);
87 }))
88 push_back(CapE);
89 }
90};
91
92class FactManager;
93class FactSet;
94
95/// This is a helper class that stores a fact that is known at a
96/// particular point in program execution. Currently, a fact is a capability,
97/// along with additional information, such as where it was acquired, whether
98/// it is exclusive or shared, etc.
99class FactEntry : public CapabilityExpr {
100public:
101 enum FactEntryKind { Lockable, ScopedLockable };
102
103 /// Where a fact comes from.
104 enum SourceKind {
105 Acquired, ///< The fact has been directly acquired.
106 Asserted, ///< The fact has been asserted to be held.
107 Declared, ///< The fact is assumed to be held by callers.
108 Managed, ///< The fact has been acquired through a scoped capability.
109 };
110
111private:
112 const FactEntryKind Kind : 8;
113
114 /// Exclusive or shared.
115 LockKind LKind : 8;
116
117 /// How it was acquired.
118 SourceKind Source : 8;
119
120 /// Where it was acquired.
121 SourceLocation AcquireLoc;
122
123protected:
124 ~FactEntry() = default;
125
126public:
127 FactEntry(FactEntryKind FK, const CapabilityExpr &CE, LockKind LK,
128 SourceLocation Loc, SourceKind Src)
129 : CapabilityExpr(CE), Kind(FK), LKind(LK), Source(Src), AcquireLoc(Loc) {}
130
131 LockKind kind() const { return LKind; }
132 SourceLocation loc() const { return AcquireLoc; }
133 FactEntryKind getFactEntryKind() const { return Kind; }
134
135 bool asserted() const { return Source == Asserted; }
136 bool declared() const { return Source == Declared; }
137 bool managed() const { return Source == Managed; }
138
139 virtual void
140 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
141 SourceLocation JoinLoc, LockErrorKind LEK,
142 ThreadSafetyHandler &Handler) const = 0;
143 virtual void handleLock(FactSet &FSet, FactManager &FactMan,
144 const FactEntry &entry,
145 ThreadSafetyHandler &Handler) const = 0;
146 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
147 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
148 bool FullyRemove,
149 ThreadSafetyHandler &Handler) const = 0;
150
151 // Return true if LKind >= LK, where exclusive > shared
152 bool isAtLeast(LockKind LK) const {
153 return (LKind == LK_Exclusive) || (LK == LK_Shared);
154 }
155};
156
157using FactID = unsigned short;
158
159/// FactManager manages the memory for all facts that are created during
160/// the analysis of a single routine.
161class FactManager {
162private:
163 llvm::BumpPtrAllocator &Alloc;
164 std::vector<const FactEntry *> Facts;
165
166public:
167 FactManager(llvm::BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
168
169 template <typename T, typename... ArgTypes>
170 T *createFact(ArgTypes &&...Args) {
171 static_assert(std::is_trivially_destructible_v<T>);
172 return T::create(Alloc, std::forward<ArgTypes>(Args)...);
173 }
174
175 FactID newFact(const FactEntry *Entry) {
176 Facts.push_back(Entry);
177 assert(Facts.size() - 1 <= std::numeric_limits<FactID>::max() &&
178 "FactID space exhausted");
179 return static_cast<unsigned short>(Facts.size() - 1);
180 }
181
182 const FactEntry &operator[](FactID F) const { return *Facts[F]; }
183};
184
185/// A FactSet is the set of facts that are known to be true at a
186/// particular program point. FactSets must be small, because they are
187/// frequently copied, and are thus implemented as a set of indices into a
188/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
189/// locks, so we can get away with doing a linear search for lookup. Note
190/// that a hashtable or map is inappropriate in this case, because lookups
191/// may involve partial pattern matches, rather than exact matches.
192class FactSet {
193private:
194 using FactVec = SmallVector<FactID, 4>;
195
196 FactVec FactIDs;
197
198public:
199 using iterator = FactVec::iterator;
200 using const_iterator = FactVec::const_iterator;
201
202 iterator begin() { return FactIDs.begin(); }
203 const_iterator begin() const { return FactIDs.begin(); }
204
205 iterator end() { return FactIDs.end(); }
206 const_iterator end() const { return FactIDs.end(); }
207
208 bool isEmpty() const { return FactIDs.size() == 0; }
209
210 // Return true if the set contains only negative facts
211 bool isEmpty(FactManager &FactMan) const {
212 for (const auto FID : *this) {
213 if (!FactMan[FID].negative())
214 return false;
215 }
216 return true;
217 }
218
219 void addLockByID(FactID ID) { FactIDs.push_back(ID); }
220
221 FactID addLock(FactManager &FM, const FactEntry *Entry) {
222 FactID F = FM.newFact(Entry);
223 FactIDs.push_back(F);
224 return F;
225 }
226
227 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
228 unsigned n = FactIDs.size();
229 if (n == 0)
230 return false;
231
232 for (unsigned i = 0; i < n-1; ++i) {
233 if (FM[FactIDs[i]].matches(CapE)) {
234 FactIDs[i] = FactIDs[n-1];
235 FactIDs.pop_back();
236 return true;
237 }
238 }
239 if (FM[FactIDs[n-1]].matches(CapE)) {
240 FactIDs.pop_back();
241 return true;
242 }
243 return false;
244 }
245
246 std::optional<FactID> replaceLock(FactManager &FM, iterator It,
247 const FactEntry *Entry) {
248 if (It == end())
249 return std::nullopt;
250 FactID F = FM.newFact(Entry);
251 *It = F;
252 return F;
253 }
254
255 std::optional<FactID> replaceLock(FactManager &FM, const CapabilityExpr &CapE,
256 const FactEntry *Entry) {
257 return replaceLock(FM, findLockIter(FM, CapE), Entry);
258 }
259
260 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
261 return llvm::find_if(*this,
262 [&](FactID ID) { return FM[ID].matches(CapE); });
263 }
264
265 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
266 auto I =
267 llvm::find_if(*this, [&](FactID ID) { return FM[ID].matches(CapE); });
268 return I != end() ? &FM[*I] : nullptr;
269 }
270
271 const FactEntry *findLockUniv(FactManager &FM,
272 const CapabilityExpr &CapE) const {
273 auto I = llvm::find_if(
274 *this, [&](FactID ID) -> bool { return FM[ID].matchesUniv(CapE); });
275 return I != end() ? &FM[*I] : nullptr;
276 }
277
278 const FactEntry *findPartialMatch(FactManager &FM,
279 const CapabilityExpr &CapE) const {
280 auto I = llvm::find_if(*this, [&](FactID ID) -> bool {
281 return FM[ID].partiallyMatches(CapE);
282 });
283 return I != end() ? &FM[*I] : nullptr;
284 }
285
286 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
287 auto I = llvm::find_if(
288 *this, [&](FactID ID) -> bool { return FM[ID].valueDecl() == Vd; });
289 return I != end();
290 }
291};
292
293class ThreadSafetyAnalyzer;
294
295} // namespace
296
297namespace clang {
298namespace threadSafety {
299
301private:
302 using BeforeVect = SmallVector<const ValueDecl *, 4>;
303
304 struct BeforeInfo {
305 BeforeVect Vect;
306 int Visited = 0;
307
308 BeforeInfo() = default;
309 BeforeInfo(BeforeInfo &&) = default;
310 };
311
312 using BeforeMap =
313 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
314 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
315
316public:
317 BeforeSet() = default;
318
319 BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
320 ThreadSafetyAnalyzer& Analyzer);
321
322 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
323 ThreadSafetyAnalyzer &Analyzer);
324
325 void checkBeforeAfter(const ValueDecl* Vd,
326 const FactSet& FSet,
327 ThreadSafetyAnalyzer& Analyzer,
328 SourceLocation Loc, StringRef CapKind);
329
330private:
331 BeforeMap BMap;
332 CycleMap CycMap;
333};
334
335} // namespace threadSafety
336} // namespace clang
337
338namespace {
339
340class LocalVariableMap;
341
342using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
343
344/// A side (entry or exit) of a CFG node.
345enum CFGBlockSide { CBS_Entry, CBS_Exit };
346
347/// CFGBlockInfo is a struct which contains all the information that is
348/// maintained for each block in the CFG. See LocalVariableMap for more
349/// information about the contexts.
350struct CFGBlockInfo {
351 // Lockset held at entry to block
352 FactSet EntrySet;
353
354 // Lockset held at exit from block
355 FactSet ExitSet;
356
357 // Context held at entry to block
358 LocalVarContext EntryContext;
359
360 // Context held at exit from block
361 LocalVarContext ExitContext;
362
363 // Location of first statement in block
364 SourceLocation EntryLoc;
365
366 // Location of last statement in block.
367 SourceLocation ExitLoc;
368
369 // Used to replay contexts later
370 unsigned EntryIndex;
371
372 // Is this block reachable?
373 bool Reachable = false;
374
375 const FactSet &getSet(CFGBlockSide Side) const {
376 return Side == CBS_Entry ? EntrySet : ExitSet;
377 }
378
379 SourceLocation getLocation(CFGBlockSide Side) const {
380 return Side == CBS_Entry ? EntryLoc : ExitLoc;
381 }
382
383private:
384 CFGBlockInfo(LocalVarContext EmptyCtx)
385 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
386
387public:
388 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
389};
390
391// A LocalVariableMap maintains a map from local variables to their currently
392// valid definitions. It provides SSA-like functionality when traversing the
393// CFG. Like SSA, each definition or assignment to a variable is assigned a
394// unique name (an integer), which acts as the SSA name for that definition.
395// The total set of names is shared among all CFG basic blocks.
396// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
397// with their SSA-names. Instead, we compute a Context for each point in the
398// code, which maps local variables to the appropriate SSA-name. This map
399// changes with each assignment.
400//
401// The map is computed in a single pass over the CFG. Subsequent analyses can
402// then query the map to find the appropriate Context for a statement, and use
403// that Context to look up the definitions of variables.
404class LocalVariableMap {
405public:
406 using Context = LocalVarContext;
407
408 /// A VarDefinition consists of an expression, representing the value of the
409 /// variable, along with the context in which that expression should be
410 /// interpreted. A reference VarDefinition does not itself contain this
411 /// information, but instead contains a pointer to a previous VarDefinition.
412 struct VarDefinition {
413 public:
414 friend class LocalVariableMap;
415
416 // The original declaration for this variable.
417 const NamedDecl *Dec;
418
419 // The expression for this variable, OR
420 const Expr *Exp = nullptr;
421
422 // Reference to another VarDefinition
423 unsigned Ref = 0;
424
425 // The map with which Exp should be interpreted.
426 Context Ctx;
427
428 bool isReference() const { return !Exp; }
429
430 private:
431 // Create ordinary variable definition
432 VarDefinition(const NamedDecl *D, const Expr *E, Context C)
433 : Dec(D), Exp(E), Ctx(C) {}
434
435 // Create reference to previous definition
436 VarDefinition(const NamedDecl *D, unsigned R, Context C)
437 : Dec(D), Ref(R), Ctx(C) {}
438 };
439
440private:
441 Context::Factory ContextFactory;
442 std::vector<VarDefinition> VarDefinitions;
443 std::vector<std::pair<const Stmt *, Context>> SavedContexts;
444
445public:
446 LocalVariableMap() {
447 // index 0 is a placeholder for undefined variables (aka phi-nodes).
448 VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext()));
449 }
450
451 /// Look up a definition, within the given context.
452 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
453 const unsigned *i = Ctx.lookup(D);
454 if (!i)
455 return nullptr;
456 assert(*i < VarDefinitions.size());
457 return &VarDefinitions[*i];
458 }
459
460 /// Look up the definition for D within the given context. Returns
461 /// NULL if the expression is not statically known. If successful, also
462 /// modifies Ctx to hold the context of the return Expr.
463 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
464 const unsigned *P = Ctx.lookup(D);
465 if (!P)
466 return nullptr;
467
468 unsigned i = *P;
469 while (i > 0) {
470 if (VarDefinitions[i].Exp) {
471 Ctx = VarDefinitions[i].Ctx;
472 return VarDefinitions[i].Exp;
473 }
474 i = VarDefinitions[i].Ref;
475 }
476 return nullptr;
477 }
478
479 Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
480
481 /// Return the next context after processing S. This function is used by
482 /// clients of the class to get the appropriate context when traversing the
483 /// CFG. It must be called for every assignment or DeclStmt.
484 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
485 if (SavedContexts[CtxIndex+1].first == S) {
486 CtxIndex++;
487 Context Result = SavedContexts[CtxIndex].second;
488 return Result;
489 }
490 return C;
491 }
492
493 void dumpVarDefinitionName(unsigned i) {
494 if (i == 0) {
495 llvm::errs() << "Undefined";
496 return;
497 }
498 const NamedDecl *Dec = VarDefinitions[i].Dec;
499 if (!Dec) {
500 llvm::errs() << "<<NULL>>";
501 return;
502 }
503 Dec->printName(llvm::errs());
504 llvm::errs() << "." << i << " " << ((const void*) Dec);
505 }
506
507 /// Dumps an ASCII representation of the variable map to llvm::errs()
508 void dump() {
509 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
510 const Expr *Exp = VarDefinitions[i].Exp;
511 unsigned Ref = VarDefinitions[i].Ref;
512
513 dumpVarDefinitionName(i);
514 llvm::errs() << " = ";
515 if (Exp) Exp->dump();
516 else {
517 dumpVarDefinitionName(Ref);
518 llvm::errs() << "\n";
519 }
520 }
521 }
522
523 /// Dumps an ASCII representation of a Context to llvm::errs()
524 void dumpContext(Context C) {
525 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
526 const NamedDecl *D = I.getKey();
527 D->printName(llvm::errs());
528 llvm::errs() << " -> ";
529 dumpVarDefinitionName(I.getData());
530 llvm::errs() << "\n";
531 }
532 }
533
534 /// Builds the variable map.
535 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
536 std::vector<CFGBlockInfo> &BlockInfo);
537
538protected:
539 friend class VarMapBuilder;
540
541 // Resolve any definition ID down to its non-reference base ID.
542 unsigned getCanonicalDefinitionID(unsigned ID) {
543 while (ID > 0 && VarDefinitions[ID].isReference())
544 ID = VarDefinitions[ID].Ref;
545 return ID;
546 }
547
548 // Get the current context index
549 unsigned getContextIndex() { return SavedContexts.size()-1; }
550
551 // Save the current context for later replay
552 void saveContext(const Stmt *S, Context C) {
553 SavedContexts.push_back(std::make_pair(S, C));
554 }
555
556 // Adds a new definition to the given context, and returns a new context.
557 // This method should be called when declaring a new variable.
558 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
559 assert(!Ctx.contains(D));
560 unsigned newID = VarDefinitions.size();
561 Context NewCtx = ContextFactory.add(Ctx, D, newID);
562 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
563 return NewCtx;
564 }
565
566 // Add a new reference to an existing definition.
567 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
568 unsigned newID = VarDefinitions.size();
569 Context NewCtx = ContextFactory.add(Ctx, D, newID);
570 VarDefinitions.push_back(VarDefinition(D, i, Ctx));
571 return NewCtx;
572 }
573
574 // Updates a definition only if that definition is already in the map.
575 // This method should be called when assigning to an existing variable.
576 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
577 if (Ctx.contains(D)) {
578 unsigned newID = VarDefinitions.size();
579 Context NewCtx = ContextFactory.remove(Ctx, D);
580 NewCtx = ContextFactory.add(NewCtx, D, newID);
581 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
582 return NewCtx;
583 }
584 return Ctx;
585 }
586
587 // Removes a definition from the context, but keeps the variable name
588 // as a valid variable. The index 0 is a placeholder for cleared definitions.
589 Context clearDefinition(const NamedDecl *D, Context Ctx) {
590 Context NewCtx = Ctx;
591 if (NewCtx.contains(D)) {
592 NewCtx = ContextFactory.remove(NewCtx, D);
593 NewCtx = ContextFactory.add(NewCtx, D, 0);
594 }
595 return NewCtx;
596 }
597
598 // Remove a definition entirely frmo the context.
599 Context removeDefinition(const NamedDecl *D, Context Ctx) {
600 Context NewCtx = Ctx;
601 if (NewCtx.contains(D)) {
602 NewCtx = ContextFactory.remove(NewCtx, D);
603 }
604 return NewCtx;
605 }
606
607 Context intersectContexts(Context C1, Context C2);
608 Context createReferenceContext(Context C);
609 void intersectBackEdge(Context C1, Context C2);
610};
611
612} // namespace
613
614// This has to be defined after LocalVariableMap.
615CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
616 return CFGBlockInfo(M.getEmptyContext());
617}
618
619namespace {
620
621/// Visitor which builds a LocalVariableMap
622class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
623public:
624 LocalVariableMap* VMap;
625 LocalVariableMap::Context Ctx;
626
627 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
628 : VMap(VM), Ctx(C) {}
629
630 void VisitDeclStmt(const DeclStmt *S);
631 void VisitBinaryOperator(const BinaryOperator *BO);
632 void VisitCallExpr(const CallExpr *CE);
633};
634
635} // namespace
636
637// Add new local variables to the variable map
638void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
639 bool modifiedCtx = false;
640 const DeclGroupRef DGrp = S->getDeclGroup();
641 for (const auto *D : DGrp) {
642 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
643 const Expr *E = VD->getInit();
644
645 // Add local variables with trivial type to the variable map
646 QualType T = VD->getType();
647 if (T.isTrivialType(VD->getASTContext())) {
648 Ctx = VMap->addDefinition(VD, E, Ctx);
649 modifiedCtx = true;
650 }
651 }
652 }
653 if (modifiedCtx)
654 VMap->saveContext(S, Ctx);
655}
656
657// Update local variable definitions in variable map
658void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
659 if (!BO->isAssignmentOp())
660 return;
661
662 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
663
664 // Update the variable map and current context.
665 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
666 const ValueDecl *VDec = DRE->getDecl();
667 if (Ctx.lookup(VDec)) {
668 if (BO->getOpcode() == BO_Assign)
669 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
670 else
671 // FIXME -- handle compound assignment operators
672 Ctx = VMap->clearDefinition(VDec, Ctx);
673 VMap->saveContext(BO, Ctx);
674 }
675 }
676}
677
678// Invalidates local variable definitions if variable escaped.
679void VarMapBuilder::VisitCallExpr(const CallExpr *CE) {
680 const FunctionDecl *FD = CE->getDirectCallee();
681 if (!FD)
682 return;
683
684 // Heuristic for likely-benign functions that pass by mutable reference. This
685 // is needed to avoid a slew of false positives due to mutable reference
686 // passing where the captured reference is usually passed on by-value.
687 if (const IdentifierInfo *II = FD->getIdentifier()) {
688 // Any kind of std::bind-like functions.
689 if (II->isStr("bind") || II->isStr("bind_front"))
690 return;
691 }
692
693 // Invalidate local variable definitions that are passed by non-const
694 // reference or non-const pointer.
695 for (unsigned Idx = 0; Idx < CE->getNumArgs(); ++Idx) {
696 if (Idx >= FD->getNumParams())
697 break;
698
699 const Expr *Arg = CE->getArg(Idx)->IgnoreParenImpCasts();
700 const ParmVarDecl *PVD = FD->getParamDecl(Idx);
701 QualType ParamType = PVD->getType();
702
703 // Potential reassignment if passed by non-const reference / pointer.
704 const ValueDecl *VDec = nullptr;
705 if (ParamType->isReferenceType() &&
706 !ParamType->getPointeeType().isConstQualified()) {
707 if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg))
708 VDec = DRE->getDecl();
709 } else if (ParamType->isPointerType() &&
710 !ParamType->getPointeeType().isConstQualified()) {
711 Arg = Arg->IgnoreParenCasts();
712 if (const auto *UO = dyn_cast<UnaryOperator>(Arg)) {
713 if (UO->getOpcode() == UO_AddrOf) {
714 const Expr *SubE = UO->getSubExpr()->IgnoreParenCasts();
715 if (const auto *DRE = dyn_cast<DeclRefExpr>(SubE))
716 VDec = DRE->getDecl();
717 }
718 }
719 }
720
721 if (VDec && Ctx.lookup(VDec)) {
722 Ctx = VMap->clearDefinition(VDec, Ctx);
723 VMap->saveContext(CE, Ctx);
724 }
725 }
726}
727
728// Computes the intersection of two contexts. The intersection is the
729// set of variables which have the same definition in both contexts;
730// variables with different definitions are discarded.
731LocalVariableMap::Context
732LocalVariableMap::intersectContexts(Context C1, Context C2) {
733 Context Result = C1;
734 for (const auto &P : C1) {
735 const NamedDecl *Dec = P.first;
736 const unsigned *I2 = C2.lookup(Dec);
737 if (!I2) {
738 // The variable doesn't exist on second path.
739 Result = removeDefinition(Dec, Result);
740 } else if (getCanonicalDefinitionID(P.second) !=
741 getCanonicalDefinitionID(*I2)) {
742 // If canonical definitions mismatch the underlying definitions are
743 // different, invalidate.
744 Result = clearDefinition(Dec, Result);
745 }
746 }
747 return Result;
748}
749
750// For every variable in C, create a new variable that refers to the
751// definition in C. Return a new context that contains these new variables.
752// (We use this for a naive implementation of SSA on loop back-edges.)
753LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
754 Context Result = getEmptyContext();
755 for (const auto &P : C)
756 Result = addReference(P.first, P.second, Result);
757 return Result;
758}
759
760// This routine also takes the intersection of C1 and C2, but it does so by
761// altering the VarDefinitions. C1 must be the result of an earlier call to
762// createReferenceContext.
763void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
764 for (const auto &P : C1) {
765 const unsigned I1 = P.second;
766 VarDefinition *VDef = &VarDefinitions[I1];
767 assert(VDef->isReference());
768
769 const unsigned *I2 = C2.lookup(P.first);
770 if (!I2) {
771 // Variable does not exist at the end of the loop, invalidate.
772 VDef->Ref = 0;
773 continue;
774 }
775
776 // Compare the canonical IDs. This correctly handles chains of references
777 // and determines if the variable is truly loop-invariant.
778 if (getCanonicalDefinitionID(VDef->Ref) != getCanonicalDefinitionID(*I2)) {
779 VDef->Ref = 0; // Mark this variable as undefined
780 }
781 }
782}
783
784// Traverse the CFG in topological order, so all predecessors of a block
785// (excluding back-edges) are visited before the block itself. At
786// each point in the code, we calculate a Context, which holds the set of
787// variable definitions which are visible at that point in execution.
788// Visible variables are mapped to their definitions using an array that
789// contains all definitions.
790//
791// At join points in the CFG, the set is computed as the intersection of
792// the incoming sets along each edge, E.g.
793//
794// { Context | VarDefinitions }
795// int x = 0; { x -> x1 | x1 = 0 }
796// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
797// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
798// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
799// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
800//
801// This is essentially a simpler and more naive version of the standard SSA
802// algorithm. Those definitions that remain in the intersection are from blocks
803// that strictly dominate the current block. We do not bother to insert proper
804// phi nodes, because they are not used in our analysis; instead, wherever
805// a phi node would be required, we simply remove that definition from the
806// context (E.g. x above).
807//
808// The initial traversal does not capture back-edges, so those need to be
809// handled on a separate pass. Whenever the first pass encounters an
810// incoming back edge, it duplicates the context, creating new definitions
811// that refer back to the originals. (These correspond to places where SSA
812// might have to insert a phi node.) On the second pass, these definitions are
813// set to NULL if the variable has changed on the back-edge (i.e. a phi
814// node was actually required.) E.g.
815//
816// { Context | VarDefinitions }
817// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
818// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
819// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
820// ... { y -> y1 | x3 = 2, x2 = 1, ... }
821void LocalVariableMap::traverseCFG(CFG *CFGraph,
822 const PostOrderCFGView *SortedGraph,
823 std::vector<CFGBlockInfo> &BlockInfo) {
824 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
825
826 for (const auto *CurrBlock : *SortedGraph) {
827 unsigned CurrBlockID = CurrBlock->getBlockID();
828 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
829
830 VisitedBlocks.insert(CurrBlock);
831
832 // Calculate the entry context for the current block
833 bool HasBackEdges = false;
834 bool CtxInit = true;
835 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
836 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
837 // if *PI -> CurrBlock is a back edge, so skip it
838 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) {
839 HasBackEdges = true;
840 continue;
841 }
842
843 unsigned PrevBlockID = (*PI)->getBlockID();
844 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
845
846 if (CtxInit) {
847 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
848 CtxInit = false;
849 }
850 else {
851 CurrBlockInfo->EntryContext =
852 intersectContexts(CurrBlockInfo->EntryContext,
853 PrevBlockInfo->ExitContext);
854 }
855 }
856
857 // Duplicate the context if we have back-edges, so we can call
858 // intersectBackEdges later.
859 if (HasBackEdges)
860 CurrBlockInfo->EntryContext =
861 createReferenceContext(CurrBlockInfo->EntryContext);
862
863 // Create a starting context index for the current block
864 saveContext(nullptr, CurrBlockInfo->EntryContext);
865 CurrBlockInfo->EntryIndex = getContextIndex();
866
867 // Visit all the statements in the basic block.
868 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
869 for (const auto &BI : *CurrBlock) {
870 switch (BI.getKind()) {
872 CFGStmt CS = BI.castAs<CFGStmt>();
873 VMapBuilder.Visit(CS.getStmt());
874 break;
875 }
876 default:
877 break;
878 }
879 }
880 CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
881
882 // Mark variables on back edges as "unknown" if they've been changed.
883 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
884 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
885 // if CurrBlock -> *SI is *not* a back edge
886 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
887 continue;
888
889 CFGBlock *FirstLoopBlock = *SI;
890 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
891 Context LoopEnd = CurrBlockInfo->ExitContext;
892 intersectBackEdge(LoopBegin, LoopEnd);
893 }
894 }
895
896 // Put an extra entry at the end of the indexed context array
897 unsigned exitID = CFGraph->getExit().getBlockID();
898 saveContext(nullptr, BlockInfo[exitID].ExitContext);
899}
900
901/// Find the appropriate source locations to use when producing diagnostics for
902/// each block in the CFG.
903static void findBlockLocations(CFG *CFGraph,
904 const PostOrderCFGView *SortedGraph,
905 std::vector<CFGBlockInfo> &BlockInfo) {
906 for (const auto *CurrBlock : *SortedGraph) {
907 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
908
909 // Find the source location of the last statement in the block, if the
910 // block is not empty.
911 if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
912 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
913 } else {
914 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
915 BE = CurrBlock->rend(); BI != BE; ++BI) {
916 // FIXME: Handle other CFGElement kinds.
917 if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
918 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
919 break;
920 }
921 }
922 }
923
924 if (CurrBlockInfo->ExitLoc.isValid()) {
925 // This block contains at least one statement. Find the source location
926 // of the first statement in the block.
927 for (const auto &BI : *CurrBlock) {
928 // FIXME: Handle other CFGElement kinds.
929 if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
930 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
931 break;
932 }
933 }
934 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
935 CurrBlock != &CFGraph->getExit()) {
936 // The block is empty, and has a single predecessor. Use its exit
937 // location.
938 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
939 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
940 } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) {
941 // The block is empty, and has a single successor. Use its entry
942 // location.
943 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
944 BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
945 }
946 }
947}
948
949namespace {
950
951class LockableFactEntry final : public FactEntry {
952private:
953 /// Reentrancy depth: incremented when a capability has been acquired
954 /// reentrantly (after initial acquisition). Always 0 for non-reentrant
955 /// capabilities.
956 unsigned int ReentrancyDepth = 0;
957
958 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
959 SourceKind Src)
960 : FactEntry(Lockable, CE, LK, Loc, Src) {}
961
962public:
963 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
964 const LockableFactEntry &Other) {
965 return new (Alloc) LockableFactEntry(Other);
966 }
967
968 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
969 const CapabilityExpr &CE, LockKind LK,
970 SourceLocation Loc,
971 SourceKind Src = Acquired) {
972 return new (Alloc) LockableFactEntry(CE, LK, Loc, Src);
973 }
974
975 unsigned int getReentrancyDepth() const { return ReentrancyDepth; }
976
977 void
978 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
979 SourceLocation JoinLoc, LockErrorKind LEK,
980 ThreadSafetyHandler &Handler) const override {
981 if (!asserted() && !negative() && !isUniversal()) {
982 Handler.handleMutexHeldEndOfScope(getKind(), toString(), loc(), JoinLoc,
983 LEK);
984 }
985 }
986
987 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
988 ThreadSafetyHandler &Handler) const override {
989 if (const FactEntry *RFact = tryReenter(FactMan, entry.kind())) {
990 // This capability has been reentrantly acquired.
991 FSet.replaceLock(FactMan, entry, RFact);
992 } else {
993 Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
994 entry.loc());
995 }
996 }
997
998 void handleUnlock(FactSet &FSet, FactManager &FactMan,
999 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1000 bool FullyRemove,
1001 ThreadSafetyHandler &Handler) const override {
1002 FSet.removeLock(FactMan, Cp);
1003
1004 if (const FactEntry *RFact = leaveReentrant(FactMan)) {
1005 // This capability remains reentrantly acquired.
1006 FSet.addLock(FactMan, RFact);
1007 } else if (!Cp.negative()) {
1008 FSet.addLock(FactMan, FactMan.createFact<LockableFactEntry>(
1009 !Cp, LK_Exclusive, UnlockLoc));
1010 }
1011 }
1012
1013 // Return an updated FactEntry if we can acquire this capability reentrant,
1014 // nullptr otherwise.
1015 const FactEntry *tryReenter(FactManager &FactMan,
1016 LockKind ReenterKind) const {
1017 if (!reentrant())
1018 return nullptr;
1019 if (kind() != ReenterKind)
1020 return nullptr;
1021 auto *NewFact = FactMan.createFact<LockableFactEntry>(*this);
1022 NewFact->ReentrancyDepth++;
1023 return NewFact;
1024 }
1025
1026 // Return an updated FactEntry if we are releasing a capability previously
1027 // acquired reentrant, nullptr otherwise.
1028 const FactEntry *leaveReentrant(FactManager &FactMan) const {
1029 if (!ReentrancyDepth)
1030 return nullptr;
1031 assert(reentrant());
1032 auto *NewFact = FactMan.createFact<LockableFactEntry>(*this);
1033 NewFact->ReentrancyDepth--;
1034 return NewFact;
1035 }
1036
1037 static bool classof(const FactEntry *A) {
1038 return A->getFactEntryKind() == Lockable;
1039 }
1040};
1041
1042enum UnderlyingCapabilityKind {
1043 UCK_Acquired, ///< Any kind of acquired capability.
1044 UCK_ReleasedShared, ///< Shared capability that was released.
1045 UCK_ReleasedExclusive, ///< Exclusive capability that was released.
1046};
1047
1048struct UnderlyingCapability {
1049 CapabilityExpr Cap;
1050 UnderlyingCapabilityKind Kind;
1051};
1052
1053class ScopedLockableFactEntry final
1054 : public FactEntry,
1055 private llvm::TrailingObjects<ScopedLockableFactEntry,
1056 UnderlyingCapability> {
1057 friend TrailingObjects;
1058
1059private:
1060 const unsigned ManagedCapacity;
1061 unsigned ManagedSize = 0;
1062
1063 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
1064 SourceKind Src, unsigned ManagedCapacity)
1065 : FactEntry(ScopedLockable, CE, LK_Exclusive, Loc, Src),
1066 ManagedCapacity(ManagedCapacity) {}
1067
1068 void addManaged(const CapabilityExpr &M, UnderlyingCapabilityKind UCK) {
1069 assert(ManagedSize < ManagedCapacity);
1070 new (getTrailingObjects() + ManagedSize) UnderlyingCapability{M, UCK};
1071 ++ManagedSize;
1072 }
1073
1074 ArrayRef<UnderlyingCapability> getManaged() const {
1075 return getTrailingObjects(ManagedSize);
1076 }
1077
1078public:
1079 static ScopedLockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
1080 const CapabilityExpr &CE,
1081 SourceLocation Loc, SourceKind Src,
1082 unsigned ManagedCapacity) {
1083 void *Storage =
1084 Alloc.Allocate(totalSizeToAlloc<UnderlyingCapability>(ManagedCapacity),
1085 alignof(ScopedLockableFactEntry));
1086 return new (Storage) ScopedLockableFactEntry(CE, Loc, Src, ManagedCapacity);
1087 }
1088
1089 CapExprSet getUnderlyingMutexes() const {
1090 CapExprSet UnderlyingMutexesSet;
1091 for (const UnderlyingCapability &UnderlyingMutex : getManaged())
1092 UnderlyingMutexesSet.push_back(UnderlyingMutex.Cap);
1093 return UnderlyingMutexesSet;
1094 }
1095
1096 /// \name Adding managed locks
1097 /// Capacity for managed locks must have been allocated via \ref create.
1098 /// There is no reallocation in case the capacity is exceeded!
1099 /// \{
1100 void addLock(const CapabilityExpr &M) { addManaged(M, UCK_Acquired); }
1101
1102 void addExclusiveUnlock(const CapabilityExpr &M) {
1103 addManaged(M, UCK_ReleasedExclusive);
1104 }
1105
1106 void addSharedUnlock(const CapabilityExpr &M) {
1107 addManaged(M, UCK_ReleasedShared);
1108 }
1109 /// \}
1110
1111 void
1112 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
1113 SourceLocation JoinLoc, LockErrorKind LEK,
1114 ThreadSafetyHandler &Handler) const override {
1116 return;
1117
1118 for (const auto &UnderlyingMutex : getManaged()) {
1119 const auto *Entry = FSet.findLock(FactMan, UnderlyingMutex.Cap);
1120 if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
1121 (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
1122 // If this scoped lock manages another mutex, and if the underlying
1123 // mutex is still/not held, then warn about the underlying mutex.
1124 Handler.handleMutexHeldEndOfScope(UnderlyingMutex.Cap.getKind(),
1125 UnderlyingMutex.Cap.toString(), loc(),
1126 JoinLoc, LEK);
1127 }
1128 }
1129 }
1130
1131 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
1132 ThreadSafetyHandler &Handler) const override {
1133 for (const auto &UnderlyingMutex : getManaged()) {
1134 if (UnderlyingMutex.Kind == UCK_Acquired)
1135 lock(FSet, FactMan, UnderlyingMutex.Cap, entry.kind(), entry.loc(),
1136 &Handler);
1137 else
1138 unlock(FSet, FactMan, UnderlyingMutex.Cap, entry.loc(), &Handler);
1139 }
1140 }
1141
1142 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1143 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1144 bool FullyRemove,
1145 ThreadSafetyHandler &Handler) const override {
1146 assert(!Cp.negative() && "Managing object cannot be negative.");
1147 for (const auto &UnderlyingMutex : getManaged()) {
1148 // Remove/lock the underlying mutex if it exists/is still unlocked; warn
1149 // on double unlocking/locking if we're not destroying the scoped object.
1150 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
1151 if (UnderlyingMutex.Kind == UCK_Acquired) {
1152 unlock(FSet, FactMan, UnderlyingMutex.Cap, UnlockLoc, TSHandler);
1153 } else {
1154 LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
1155 ? LK_Shared
1156 : LK_Exclusive;
1157 lock(FSet, FactMan, UnderlyingMutex.Cap, kind, UnlockLoc, TSHandler);
1158 }
1159 }
1160 if (FullyRemove)
1161 FSet.removeLock(FactMan, Cp);
1162 }
1163
1164 static bool classof(const FactEntry *A) {
1165 return A->getFactEntryKind() == ScopedLockable;
1166 }
1167
1168private:
1169 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1170 LockKind kind, SourceLocation loc,
1171 ThreadSafetyHandler *Handler) const {
1172 if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
1173 const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
1174 if (const FactEntry *RFact = Fact.tryReenter(FactMan, kind)) {
1175 // This capability has been reentrantly acquired.
1176 FSet.replaceLock(FactMan, It, RFact);
1177 } else if (Handler) {
1178 Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact.loc(), loc);
1179 }
1180 } else {
1181 FSet.removeLock(FactMan, !Cp);
1182 FSet.addLock(FactMan, FactMan.createFact<LockableFactEntry>(Cp, kind, loc,
1183 Managed));
1184 }
1185 }
1186
1187 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1188 SourceLocation loc, ThreadSafetyHandler *Handler) const {
1189 if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
1190 const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
1191 if (const FactEntry *RFact = Fact.leaveReentrant(FactMan)) {
1192 // This capability remains reentrantly acquired.
1193 FSet.replaceLock(FactMan, It, RFact);
1194 return;
1195 }
1196
1197 FSet.replaceLock(
1198 FactMan, It,
1199 FactMan.createFact<LockableFactEntry>(!Cp, LK_Exclusive, loc));
1200 } else if (Handler) {
1201 SourceLocation PrevLoc;
1202 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1203 PrevLoc = Neg->loc();
1204 Handler->handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), loc, PrevLoc);
1205 }
1206 }
1207};
1208
1209/// Class which implements the core thread safety analysis routines.
1210class ThreadSafetyAnalyzer {
1211 friend class BuildLockset;
1212 friend class threadSafety::BeforeSet;
1213
1214 llvm::BumpPtrAllocator Bpa;
1215 threadSafety::til::MemRegionRef Arena;
1216 threadSafety::SExprBuilder SxBuilder;
1217
1218 ThreadSafetyHandler &Handler;
1219 const FunctionDecl *CurrentFunction;
1220 LocalVariableMap LocalVarMap;
1221 // Maps constructed objects to `this` placeholder prior to initialization.
1222 llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects;
1223 FactManager FactMan;
1224 std::vector<CFGBlockInfo> BlockInfo;
1225
1226 BeforeSet *GlobalBeforeSet;
1227
1228public:
1229 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet *Bset)
1230 : Arena(&Bpa), SxBuilder(Arena), Handler(H), FactMan(Bpa),
1231 GlobalBeforeSet(Bset) {}
1232
1233 bool inCurrentScope(const CapabilityExpr &CapE);
1234
1235 void addLock(FactSet &FSet, const FactEntry *Entry, bool ReqAttr = false);
1236 void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
1237 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
1238
1239 template <typename AttrType>
1240 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1241 const NamedDecl *D, til::SExpr *Self = nullptr);
1242
1243 template <class AttrType>
1244 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1245 const NamedDecl *D,
1246 const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1247 Expr *BrE, bool Neg);
1248
1249 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1250 bool &Negate);
1251
1252 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1253 const CFGBlock* PredBlock,
1254 const CFGBlock *CurrBlock);
1255
1256 bool join(const FactEntry &A, const FactEntry &B, SourceLocation JoinLoc,
1257 LockErrorKind EntryLEK);
1258
1259 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1260 SourceLocation JoinLoc, LockErrorKind EntryLEK,
1261 LockErrorKind ExitLEK);
1262
1263 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1264 SourceLocation JoinLoc, LockErrorKind LEK) {
1265 intersectAndWarn(EntrySet, ExitSet, JoinLoc, LEK, LEK);
1266 }
1267
1268 void runAnalysis(AnalysisDeclContext &AC);
1269
1270 void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D,
1271 const Expr *Exp, AccessKind AK, Expr *MutexExp,
1272 ProtectedOperationKind POK, til::SExpr *Self,
1273 SourceLocation Loc);
1274 void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp,
1275 Expr *MutexExp, til::SExpr *Self, SourceLocation Loc);
1276
1277 void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1279 void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1281};
1282
1283} // namespace
1284
1285/// Process acquired_before and acquired_after attributes on Vd.
1286BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1287 ThreadSafetyAnalyzer& Analyzer) {
1288 // Create a new entry for Vd.
1289 BeforeInfo *Info = nullptr;
1290 {
1291 // Keep InfoPtr in its own scope in case BMap is modified later and the
1292 // reference becomes invalid.
1293 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1294 if (!InfoPtr)
1295 InfoPtr.reset(new BeforeInfo());
1296 Info = InfoPtr.get();
1297 }
1298
1299 for (const auto *At : Vd->attrs()) {
1300 switch (At->getKind()) {
1301 case attr::AcquiredBefore: {
1302 const auto *A = cast<AcquiredBeforeAttr>(At);
1303
1304 // Read exprs from the attribute, and add them to BeforeVect.
1305 for (const auto *Arg : A->args()) {
1306 CapabilityExpr Cp =
1307 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1308 if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1309 Info->Vect.push_back(Cpvd);
1310 const auto It = BMap.find(Cpvd);
1311 if (It == BMap.end())
1312 insertAttrExprs(Cpvd, Analyzer);
1313 }
1314 }
1315 break;
1316 }
1317 case attr::AcquiredAfter: {
1318 const auto *A = cast<AcquiredAfterAttr>(At);
1319
1320 // Read exprs from the attribute, and add them to BeforeVect.
1321 for (const auto *Arg : A->args()) {
1322 CapabilityExpr Cp =
1323 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1324 if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1325 // Get entry for mutex listed in attribute
1326 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer);
1327 ArgInfo->Vect.push_back(Vd);
1328 }
1329 }
1330 break;
1331 }
1332 default:
1333 break;
1334 }
1335 }
1336
1337 return Info;
1338}
1339
1340BeforeSet::BeforeInfo *
1342 ThreadSafetyAnalyzer &Analyzer) {
1343 auto It = BMap.find(Vd);
1344 BeforeInfo *Info = nullptr;
1345 if (It == BMap.end())
1346 Info = insertAttrExprs(Vd, Analyzer);
1347 else
1348 Info = It->second.get();
1349 assert(Info && "BMap contained nullptr?");
1350 return Info;
1351}
1352
1353/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
1355 const FactSet& FSet,
1356 ThreadSafetyAnalyzer& Analyzer,
1357 SourceLocation Loc, StringRef CapKind) {
1359
1360 // Do a depth-first traversal of Vd.
1361 // Return true if there are cycles.
1362 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1363 if (!Vd)
1364 return false;
1365
1366 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1367
1368 if (Info->Visited == 1)
1369 return true;
1370
1371 if (Info->Visited == 2)
1372 return false;
1373
1374 if (Info->Vect.empty())
1375 return false;
1376
1377 InfoVect.push_back(Info);
1378 Info->Visited = 1;
1379 for (const auto *Vdb : Info->Vect) {
1380 // Exclude mutexes in our immediate before set.
1381 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
1382 StringRef L1 = StartVd->getName();
1383 StringRef L2 = Vdb->getName();
1384 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc);
1385 }
1386 // Transitively search other before sets, and warn on cycles.
1387 if (traverse(Vdb)) {
1388 if (CycMap.try_emplace(Vd, true).second) {
1389 StringRef L1 = Vd->getName();
1390 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
1391 }
1392 }
1393 }
1394 Info->Visited = 2;
1395 return false;
1396 };
1397
1398 traverse(StartVd);
1399
1400 for (auto *Info : InfoVect)
1401 Info->Visited = 0;
1402}
1403
1404/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
1405static const ValueDecl *getValueDecl(const Expr *Exp) {
1406 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
1407 return getValueDecl(CE->getSubExpr());
1408
1409 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
1410 return DR->getDecl();
1411
1412 if (const auto *ME = dyn_cast<MemberExpr>(Exp))
1413 return ME->getMemberDecl();
1414
1415 return nullptr;
1416}
1417
1418bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1419 const threadSafety::til::SExpr *SExp = CapE.sexpr();
1420 assert(SExp && "Null expressions should be ignored");
1421
1422 if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) {
1423 const ValueDecl *VD = LP->clangDecl();
1424 // Variables defined in a function are always inaccessible.
1425 if (!VD || !VD->isDefinedOutsideFunctionOrMethod())
1426 return false;
1427 // For now we consider static class members to be inaccessible.
1429 return false;
1430 // Global variables are always in scope.
1431 return true;
1432 }
1433
1434 // Members are in scope from methods of the same class.
1435 if (const auto *P = dyn_cast<til::Project>(SExp)) {
1436 if (!isa_and_nonnull<CXXMethodDecl>(CurrentFunction))
1437 return false;
1438 const ValueDecl *VD = P->clangDecl();
1439 return VD->getDeclContext() == CurrentFunction->getDeclContext();
1440 }
1441
1442 return false;
1443}
1444
1445/// Add a new lock to the lockset, warning if the lock is already there.
1446/// \param ReqAttr -- true if this is part of an initial Requires attribute.
1447void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry *Entry,
1448 bool ReqAttr) {
1449 if (Entry->shouldIgnore())
1450 return;
1451
1452 if (!ReqAttr && !Entry->negative()) {
1453 // look for the negative capability, and remove it from the fact set.
1454 CapabilityExpr NegC = !*Entry;
1455 const FactEntry *Nen = FSet.findLock(FactMan, NegC);
1456 if (Nen) {
1457 FSet.removeLock(FactMan, NegC);
1458 }
1459 else {
1460 if (inCurrentScope(*Entry) && !Entry->asserted() && !Entry->reentrant())
1461 Handler.handleNegativeNotHeld(Entry->getKind(), Entry->toString(),
1462 NegC.toString(), Entry->loc());
1463 }
1464 }
1465
1466 // Check before/after constraints
1467 if (!Entry->asserted() && !Entry->declared()) {
1468 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
1469 Entry->loc(), Entry->getKind());
1470 }
1471
1472 if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
1473 if (!Entry->asserted())
1474 Cp->handleLock(FSet, FactMan, *Entry, Handler);
1475 } else {
1476 FSet.addLock(FactMan, Entry);
1477 }
1478}
1479
1480/// Remove a lock from the lockset, warning if the lock is not there.
1481/// \param UnlockLoc The source location of the unlock (only used in error msg)
1482void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1483 SourceLocation UnlockLoc,
1484 bool FullyRemove, LockKind ReceivedKind) {
1485 if (Cp.shouldIgnore())
1486 return;
1487
1488 const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1489 if (!LDat) {
1490 SourceLocation PrevLoc;
1491 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1492 PrevLoc = Neg->loc();
1493 Handler.handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), UnlockLoc,
1494 PrevLoc);
1495 return;
1496 }
1497
1498 // Generic lock removal doesn't care about lock kind mismatches, but
1499 // otherwise diagnose when the lock kinds are mismatched.
1500 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
1501 Handler.handleIncorrectUnlockKind(Cp.getKind(), Cp.toString(), LDat->kind(),
1502 ReceivedKind, LDat->loc(), UnlockLoc);
1503 }
1504
1505 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
1506}
1507
1508/// Extract the list of mutexIDs from the attribute on an expression,
1509/// and push them onto Mtxs, discarding any duplicates.
1510template <typename AttrType>
1511void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1512 const Expr *Exp, const NamedDecl *D,
1513 til::SExpr *Self) {
1514 if (Attr->args_size() == 0) {
1515 // The mutex held is the "this" object.
1516 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, Self);
1517 if (Cp.isInvalid()) {
1518 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1519 return;
1520 }
1521 //else
1522 if (!Cp.shouldIgnore())
1523 Mtxs.push_back_nodup(Cp);
1524 return;
1525 }
1526
1527 for (const auto *Arg : Attr->args()) {
1528 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self);
1529 if (Cp.isInvalid()) {
1530 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1531 continue;
1532 }
1533 //else
1534 if (!Cp.shouldIgnore())
1535 Mtxs.push_back_nodup(Cp);
1536 }
1537}
1538
1539/// Extract the list of mutexIDs from a trylock attribute. If the
1540/// trylock applies to the given edge, then push them onto Mtxs, discarding
1541/// any duplicates.
1542template <class AttrType>
1543void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1544 const Expr *Exp, const NamedDecl *D,
1545 const CFGBlock *PredBlock,
1546 const CFGBlock *CurrBlock,
1547 Expr *BrE, bool Neg) {
1548 // Find out which branch has the lock
1549 bool branch = false;
1550 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1551 branch = BLE->getValue();
1552 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1553 branch = ILE->getValue().getBoolValue();
1554
1555 int branchnum = branch ? 0 : 1;
1556 if (Neg)
1557 branchnum = !branchnum;
1558
1559 // If we've taken the trylock branch, then add the lock
1560 int i = 0;
1561 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1562 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1563 if (*SI == CurrBlock && i == branchnum)
1564 getMutexIDs(Mtxs, Attr, Exp, D);
1565 }
1566}
1567
1568static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1570 TCond = false;
1571 return true;
1572 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
1573 TCond = BLE->getValue();
1574 return true;
1575 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) {
1576 TCond = ILE->getValue().getBoolValue();
1577 return true;
1578 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E))
1579 return getStaticBooleanValue(CE->getSubExpr(), TCond);
1580 return false;
1581}
1582
1583// If Cond can be traced back to a function call, return the call expression.
1584// The negate variable should be called with false, and will be set to true
1585// if the function call is negated, e.g. if (!mu.tryLock(...))
1586const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1587 LocalVarContext C,
1588 bool &Negate) {
1589 if (!Cond)
1590 return nullptr;
1591
1592 if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) {
1593 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
1594 return getTrylockCallExpr(CallExp->getArg(0), C, Negate);
1595 return CallExp;
1596 }
1597 else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
1598 return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
1599 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
1600 return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
1601 else if (const auto *FE = dyn_cast<FullExpr>(Cond))
1602 return getTrylockCallExpr(FE->getSubExpr(), C, Negate);
1603 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
1604 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1605 return getTrylockCallExpr(E, C, Negate);
1606 }
1607 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) {
1608 if (UOP->getOpcode() == UO_LNot) {
1609 Negate = !Negate;
1610 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1611 }
1612 return nullptr;
1613 }
1614 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) {
1615 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1616 if (BOP->getOpcode() == BO_NE)
1617 Negate = !Negate;
1618
1619 bool TCond = false;
1620 if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
1621 if (!TCond) Negate = !Negate;
1622 return getTrylockCallExpr(BOP->getLHS(), C, Negate);
1623 }
1624 TCond = false;
1625 if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
1626 if (!TCond) Negate = !Negate;
1627 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1628 }
1629 return nullptr;
1630 }
1631 if (BOP->getOpcode() == BO_LAnd) {
1632 // LHS must have been evaluated in a different block.
1633 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1634 }
1635 if (BOP->getOpcode() == BO_LOr)
1636 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1637 return nullptr;
1638 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) {
1639 bool TCond, FCond;
1640 if (getStaticBooleanValue(COP->getTrueExpr(), TCond) &&
1641 getStaticBooleanValue(COP->getFalseExpr(), FCond)) {
1642 if (TCond && !FCond)
1643 return getTrylockCallExpr(COP->getCond(), C, Negate);
1644 if (!TCond && FCond) {
1645 Negate = !Negate;
1646 return getTrylockCallExpr(COP->getCond(), C, Negate);
1647 }
1648 }
1649 }
1650 return nullptr;
1651}
1652
1653/// Find the lockset that holds on the edge between PredBlock
1654/// and CurrBlock. The edge set is the exit set of PredBlock (passed
1655/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1656void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1657 const FactSet &ExitSet,
1658 const CFGBlock *PredBlock,
1659 const CFGBlock *CurrBlock) {
1660 Result = ExitSet;
1661
1662 const Stmt *Cond = PredBlock->getTerminatorCondition();
1663 // We don't acquire try-locks on ?: branches, only when its result is used.
1664 if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminatorStmt()))
1665 return;
1666
1667 bool Negate = false;
1668 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1669 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1670
1671 // Temporarily set the lookup context for SExprBuilder.
1672 SxBuilder.setLookupLocalVarExpr([&](const NamedDecl *D) -> const Expr * {
1673 if (!Handler.issueBetaWarnings())
1674 return nullptr;
1675 auto Ctx = LVarCtx;
1676 return LocalVarMap.lookupExpr(D, Ctx);
1677 });
1678 auto Cleanup = llvm::make_scope_exit(
1679 [this] { SxBuilder.setLookupLocalVarExpr(nullptr); });
1680
1681 const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
1682 if (!Exp)
1683 return;
1684
1685 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1686 if (!FunDecl || !FunDecl->hasAttr<TryAcquireCapabilityAttr>())
1687 return;
1688
1689 CapExprSet ExclusiveLocksToAdd;
1690 CapExprSet SharedLocksToAdd;
1691
1692 // If the condition is a call to a Trylock function, then grab the attributes
1693 for (const auto *Attr : FunDecl->specific_attrs<TryAcquireCapabilityAttr>())
1694 getMutexIDs(Attr->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr,
1695 Exp, FunDecl, PredBlock, CurrBlock, Attr->getSuccessValue(),
1696 Negate);
1697
1698 // Add and remove locks.
1699 SourceLocation Loc = Exp->getExprLoc();
1700 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1701 addLock(Result, FactMan.createFact<LockableFactEntry>(ExclusiveLockToAdd,
1702 LK_Exclusive, Loc));
1703 for (const auto &SharedLockToAdd : SharedLocksToAdd)
1704 addLock(Result, FactMan.createFact<LockableFactEntry>(SharedLockToAdd,
1705 LK_Shared, Loc));
1706}
1707
1708namespace {
1709
1710/// We use this class to visit different types of expressions in
1711/// CFGBlocks, and build up the lockset.
1712/// An expression may cause us to add or remove locks from the lockset, or else
1713/// output error messages related to missing locks.
1714/// FIXME: In future, we may be able to not inherit from a visitor.
1715class BuildLockset : public ConstStmtVisitor<BuildLockset> {
1716 friend class ThreadSafetyAnalyzer;
1717
1718 ThreadSafetyAnalyzer *Analyzer;
1719 FactSet FSet;
1720 // The fact set for the function on exit.
1721 const FactSet &FunctionExitFSet;
1722 LocalVariableMap::Context LVarCtx;
1723 unsigned CtxIndex;
1724
1725 // helper functions
1726
1727 void checkAccess(const Expr *Exp, AccessKind AK,
1729 Analyzer->checkAccess(FSet, Exp, AK, POK);
1730 }
1731 void checkPtAccess(const Expr *Exp, AccessKind AK,
1733 Analyzer->checkPtAccess(FSet, Exp, AK, POK);
1734 }
1735
1736 void handleCall(const Expr *Exp, const NamedDecl *D,
1737 til::SExpr *Self = nullptr,
1738 SourceLocation Loc = SourceLocation());
1739 void examineArguments(const FunctionDecl *FD,
1742 bool SkipFirstParam = false);
1743
1744public:
1745 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info,
1746 const FactSet &FunctionExitFSet)
1747 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1748 FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
1749 CtxIndex(Info.EntryIndex) {
1750 Analyzer->SxBuilder.setLookupLocalVarExpr(
1751 [this](const NamedDecl *D) -> const Expr * {
1752 if (!Analyzer->Handler.issueBetaWarnings())
1753 return nullptr;
1754 auto Ctx = LVarCtx;
1755 return Analyzer->LocalVarMap.lookupExpr(D, Ctx);
1756 });
1757 }
1758
1759 ~BuildLockset() { Analyzer->SxBuilder.setLookupLocalVarExpr(nullptr); }
1760
1761 void VisitUnaryOperator(const UnaryOperator *UO);
1762 void VisitBinaryOperator(const BinaryOperator *BO);
1763 void VisitCastExpr(const CastExpr *CE);
1764 void VisitCallExpr(const CallExpr *Exp);
1765 void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
1766 void VisitDeclStmt(const DeclStmt *S);
1767 void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp);
1768 void VisitReturnStmt(const ReturnStmt *S);
1769};
1770
1771} // namespace
1772
1773/// Warn if the LSet does not contain a lock sufficient to protect access
1774/// of at least the passed in AccessKind.
1775void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
1776 const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK,
1777 Expr *MutexExp, ProtectedOperationKind POK, til::SExpr *Self,
1778 SourceLocation Loc) {
1780 CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1781 if (Cp.isInvalid()) {
1782 warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
1783 return;
1784 } else if (Cp.shouldIgnore()) {
1785 return;
1786 }
1787
1788 if (Cp.negative()) {
1789 // Negative capabilities act like locks excluded
1790 const FactEntry *LDat = FSet.findLock(FactMan, !Cp);
1791 if (LDat) {
1793 (!Cp).toString(), Loc);
1794 return;
1795 }
1796
1797 // If this does not refer to a negative capability in the same class,
1798 // then stop here.
1799 if (!inCurrentScope(Cp))
1800 return;
1801
1802 // Otherwise the negative requirement must be propagated to the caller.
1803 LDat = FSet.findLock(FactMan, Cp);
1804 if (!LDat) {
1805 Handler.handleNegativeNotHeld(D, Cp.toString(), Loc);
1806 }
1807 return;
1808 }
1809
1810 const FactEntry *LDat = FSet.findLockUniv(FactMan, Cp);
1811 bool NoError = true;
1812 if (!LDat) {
1813 // No exact match found. Look for a partial match.
1814 LDat = FSet.findPartialMatch(FactMan, Cp);
1815 if (LDat) {
1816 // Warn that there's no precise match.
1817 std::string PartMatchStr = LDat->toString();
1818 StringRef PartMatchName(PartMatchStr);
1819 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc,
1820 &PartMatchName);
1821 } else {
1822 // Warn that there's no match at all.
1823 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
1824 }
1825 NoError = false;
1826 }
1827 // Make sure the mutex we found is the right kind.
1828 if (NoError && LDat && !LDat->isAtLeast(LK)) {
1829 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
1830 }
1831}
1832
1833/// Warn if the LSet contains the given lock.
1834void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet,
1835 const NamedDecl *D, const Expr *Exp,
1836 Expr *MutexExp, til::SExpr *Self,
1837 SourceLocation Loc) {
1838 CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1839 if (Cp.isInvalid()) {
1840 warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
1841 return;
1842 } else if (Cp.shouldIgnore()) {
1843 return;
1844 }
1845
1846 const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1847 if (LDat) {
1849 Cp.toString(), Loc);
1850 }
1851}
1852
1853/// Checks guarded_by and pt_guarded_by attributes.
1854/// Whenever we identify an access (read or write) to a DeclRefExpr that is
1855/// marked with guarded_by, we must ensure the appropriate mutexes are held.
1856/// Similarly, we check if the access is to an expression that dereferences
1857/// a pointer marked with pt_guarded_by.
1858void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp,
1859 AccessKind AK,
1861 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1862
1863 SourceLocation Loc = Exp->getExprLoc();
1864
1865 // Local variables of reference type cannot be re-assigned;
1866 // map them to their initializer.
1867 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
1868 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
1869 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
1870 if (const auto *E = VD->getInit()) {
1871 // Guard against self-initialization. e.g., int &i = i;
1872 if (E == Exp)
1873 break;
1874 Exp = E->IgnoreImplicit()->IgnoreParenCasts();
1875 continue;
1876 }
1877 }
1878 break;
1879 }
1880
1881 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1882 // For dereferences
1883 if (UO->getOpcode() == UO_Deref)
1884 checkPtAccess(FSet, UO->getSubExpr(), AK, POK);
1885 return;
1886 }
1887
1888 if (const auto *BO = dyn_cast<BinaryOperator>(Exp)) {
1889 switch (BO->getOpcode()) {
1890 case BO_PtrMemD: // .*
1891 return checkAccess(FSet, BO->getLHS(), AK, POK);
1892 case BO_PtrMemI: // ->*
1893 return checkPtAccess(FSet, BO->getLHS(), AK, POK);
1894 default:
1895 return;
1896 }
1897 }
1898
1899 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
1900 checkPtAccess(FSet, AE->getLHS(), AK, POK);
1901 return;
1902 }
1903
1904 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) {
1905 if (ME->isArrow())
1906 checkPtAccess(FSet, ME->getBase(), AK, POK);
1907 else
1908 checkAccess(FSet, ME->getBase(), AK, POK);
1909 }
1910
1911 const ValueDecl *D = getValueDecl(Exp);
1912 if (!D || !D->hasAttrs())
1913 return;
1914
1915 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) {
1916 Handler.handleNoMutexHeld(D, POK, AK, Loc);
1917 }
1918
1919 for (const auto *I : D->specific_attrs<GuardedByAttr>())
1920 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), POK, nullptr, Loc);
1921}
1922
1923/// Checks pt_guarded_by and pt_guarded_var attributes.
1924/// POK is the same operationKind that was passed to checkAccess.
1925void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp,
1926 AccessKind AK,
1928 // Strip off paren- and cast-expressions, checking if we encounter any other
1929 // operator that should be delegated to checkAccess() instead.
1930 while (true) {
1931 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) {
1932 Exp = PE->getSubExpr();
1933 continue;
1934 }
1935 if (const auto *CE = dyn_cast<CastExpr>(Exp)) {
1936 if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1937 // If it's an actual array, and not a pointer, then it's elements
1938 // are protected by GUARDED_BY, not PT_GUARDED_BY;
1939 checkAccess(FSet, CE->getSubExpr(), AK, POK);
1940 return;
1941 }
1942 Exp = CE->getSubExpr();
1943 continue;
1944 }
1945 break;
1946 }
1947
1948 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1949 if (UO->getOpcode() == UO_AddrOf) {
1950 // Pointer access via pointer taken of variable, so the dereferenced
1951 // variable is not actually a pointer.
1952 checkAccess(FSet, UO->getSubExpr(), AK, POK);
1953 return;
1954 }
1955 }
1956
1957 // Pass by reference/pointer warnings are under a different flag.
1959 switch (POK) {
1960 case POK_PassByRef:
1961 PtPOK = POK_PtPassByRef;
1962 break;
1963 case POK_ReturnByRef:
1964 PtPOK = POK_PtReturnByRef;
1965 break;
1966 case POK_PassPointer:
1967 PtPOK = POK_PtPassPointer;
1968 break;
1969 case POK_ReturnPointer:
1970 PtPOK = POK_PtReturnPointer;
1971 break;
1972 default:
1973 break;
1974 }
1975
1976 const ValueDecl *D = getValueDecl(Exp);
1977 if (!D || !D->hasAttrs())
1978 return;
1979
1980 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan))
1981 Handler.handleNoMutexHeld(D, PtPOK, AK, Exp->getExprLoc());
1982
1983 for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
1984 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), PtPOK, nullptr,
1985 Exp->getExprLoc());
1986}
1987
1988/// Process a function call, method call, constructor call,
1989/// or destructor call. This involves looking at the attributes on the
1990/// corresponding function/method/constructor/destructor, issuing warnings,
1991/// and updating the locksets accordingly.
1992///
1993/// FIXME: For classes annotated with one of the guarded annotations, we need
1994/// to treat const method calls as reads and non-const method calls as writes,
1995/// and check that the appropriate locks are held. Non-const method calls with
1996/// the same signature as const method calls can be also treated as reads.
1997///
1998/// \param Exp The call expression.
1999/// \param D The callee declaration.
2000/// \param Self If \p Exp = nullptr, the implicit this argument or the argument
2001/// of an implicitly called cleanup function.
2002/// \param Loc If \p Exp = nullptr, the location.
2003void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
2004 til::SExpr *Self, SourceLocation Loc) {
2005 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
2006 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
2007 CapExprSet ScopedReqsAndExcludes;
2008
2009 // Figure out if we're constructing an object of scoped lockable class
2010 CapabilityExpr Scp;
2011 if (Exp) {
2012 assert(!Self);
2013 const auto *TagT = Exp->getType()->getAs<TagType>();
2014 if (D->hasAttrs() && TagT && Exp->isPRValue()) {
2015 til::LiteralPtr *Placeholder =
2016 Analyzer->SxBuilder.createThisPlaceholder();
2017 [[maybe_unused]] auto inserted =
2018 Analyzer->ConstructedObjects.insert({Exp, Placeholder});
2019 assert(inserted.second && "Are we visiting the same expression again?");
2020 if (isa<CXXConstructExpr>(Exp))
2021 Self = Placeholder;
2022 if (TagT->getOriginalDecl()
2023 ->getMostRecentDecl()
2024 ->hasAttr<ScopedLockableAttr>())
2025 Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
2026 }
2027
2028 assert(Loc.isInvalid());
2029 Loc = Exp->getExprLoc();
2030 }
2031
2032 for(const Attr *At : D->attrs()) {
2033 switch (At->getKind()) {
2034 // When we encounter a lock function, we need to add the lock to our
2035 // lockset.
2036 case attr::AcquireCapability: {
2037 const auto *A = cast<AcquireCapabilityAttr>(At);
2038 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
2039 : ExclusiveLocksToAdd,
2040 A, Exp, D, Self);
2041 break;
2042 }
2043
2044 // An assert will add a lock to the lockset, but will not generate
2045 // a warning if it is already there, and will not generate a warning
2046 // if it is not removed.
2047 case attr::AssertCapability: {
2048 const auto *A = cast<AssertCapabilityAttr>(At);
2049 CapExprSet AssertLocks;
2050 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
2051 for (const auto &AssertLock : AssertLocks)
2052 Analyzer->addLock(
2053 FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2054 AssertLock, A->isShared() ? LK_Shared : LK_Exclusive,
2055 Loc, FactEntry::Asserted));
2056 break;
2057 }
2058
2059 // When we encounter an unlock function, we need to remove unlocked
2060 // mutexes from the lockset, and flag a warning if they are not there.
2061 case attr::ReleaseCapability: {
2062 const auto *A = cast<ReleaseCapabilityAttr>(At);
2063 if (A->isGeneric())
2064 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
2065 else if (A->isShared())
2066 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
2067 else
2068 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
2069 break;
2070 }
2071
2072 case attr::RequiresCapability: {
2073 const auto *A = cast<RequiresCapabilityAttr>(At);
2074 for (auto *Arg : A->args()) {
2075 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2076 A->isShared() ? AK_Read : AK_Written,
2077 Arg, POK_FunctionCall, Self, Loc);
2078 // use for adopting a lock
2079 if (!Scp.shouldIgnore())
2080 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
2081 }
2082 break;
2083 }
2084
2085 case attr::LocksExcluded: {
2086 const auto *A = cast<LocksExcludedAttr>(At);
2087 for (auto *Arg : A->args()) {
2088 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
2089 // use for deferring a lock
2090 if (!Scp.shouldIgnore())
2091 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
2092 }
2093 break;
2094 }
2095
2096 // Ignore attributes unrelated to thread-safety
2097 default:
2098 break;
2099 }
2100 }
2101
2102 std::optional<CallExpr::const_arg_range> Args;
2103 if (Exp) {
2104 if (const auto *CE = dyn_cast<CallExpr>(Exp))
2105 Args = CE->arguments();
2106 else if (const auto *CE = dyn_cast<CXXConstructExpr>(Exp))
2107 Args = CE->arguments();
2108 else
2109 llvm_unreachable("Unknown call kind");
2110 }
2111 const auto *CalledFunction = dyn_cast<FunctionDecl>(D);
2112 if (CalledFunction && Args.has_value()) {
2113 for (auto [Param, Arg] : zip(CalledFunction->parameters(), *Args)) {
2114 CapExprSet DeclaredLocks;
2115 for (const Attr *At : Param->attrs()) {
2116 switch (At->getKind()) {
2117 case attr::AcquireCapability: {
2118 const auto *A = cast<AcquireCapabilityAttr>(At);
2119 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
2120 : ExclusiveLocksToAdd,
2121 A, Exp, D, Self);
2122 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2123 break;
2124 }
2125
2126 case attr::ReleaseCapability: {
2127 const auto *A = cast<ReleaseCapabilityAttr>(At);
2128 if (A->isGeneric())
2129 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
2130 else if (A->isShared())
2131 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
2132 else
2133 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
2134 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2135 break;
2136 }
2137
2138 case attr::RequiresCapability: {
2139 const auto *A = cast<RequiresCapabilityAttr>(At);
2140 for (auto *Arg : A->args())
2141 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2142 A->isShared() ? AK_Read : AK_Written,
2143 Arg, POK_FunctionCall, Self, Loc);
2144 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2145 break;
2146 }
2147
2148 case attr::LocksExcluded: {
2149 const auto *A = cast<LocksExcludedAttr>(At);
2150 for (auto *Arg : A->args())
2151 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
2152 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2153 break;
2154 }
2155
2156 default:
2157 break;
2158 }
2159 }
2160 if (DeclaredLocks.empty())
2161 continue;
2162 CapabilityExpr Cp(Analyzer->SxBuilder.translate(Arg, nullptr),
2163 StringRef("mutex"), /*Neg=*/false, /*Reentrant=*/false);
2164 if (const auto *CBTE = dyn_cast<CXXBindTemporaryExpr>(Arg->IgnoreCasts());
2165 Cp.isInvalid() && CBTE) {
2166 if (auto Object = Analyzer->ConstructedObjects.find(CBTE->getSubExpr());
2167 Object != Analyzer->ConstructedObjects.end())
2168 Cp = CapabilityExpr(Object->second, StringRef("mutex"), /*Neg=*/false,
2169 /*Reentrant=*/false);
2170 }
2171 const FactEntry *Fact = FSet.findLock(Analyzer->FactMan, Cp);
2172 if (!Fact) {
2173 Analyzer->Handler.handleMutexNotHeld(Cp.getKind(), D, POK_FunctionCall,
2174 Cp.toString(), LK_Exclusive,
2175 Exp->getExprLoc());
2176 continue;
2177 }
2178 const auto *Scope = cast<ScopedLockableFactEntry>(Fact);
2179 for (const auto &[a, b] :
2180 zip_longest(DeclaredLocks, Scope->getUnderlyingMutexes())) {
2181 if (!a.has_value()) {
2182 Analyzer->Handler.handleExpectFewerUnderlyingMutexes(
2183 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2184 b.value().getKind(), b.value().toString());
2185 } else if (!b.has_value()) {
2186 Analyzer->Handler.handleExpectMoreUnderlyingMutexes(
2187 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2188 a.value().getKind(), a.value().toString());
2189 } else if (!a.value().equals(b.value())) {
2190 Analyzer->Handler.handleUnmatchedUnderlyingMutexes(
2191 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2192 a.value().getKind(), a.value().toString(), b.value().toString());
2193 break;
2194 }
2195 }
2196 }
2197 }
2198 // Remove locks first to allow lock upgrading/downgrading.
2199 // FIXME -- should only fully remove if the attribute refers to 'this'.
2200 bool Dtor = isa<CXXDestructorDecl>(D);
2201 for (const auto &M : ExclusiveLocksToRemove)
2202 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive);
2203 for (const auto &M : SharedLocksToRemove)
2204 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared);
2205 for (const auto &M : GenericLocksToRemove)
2206 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic);
2207
2208 // Add locks.
2209 FactEntry::SourceKind Source =
2210 !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
2211 for (const auto &M : ExclusiveLocksToAdd)
2212 Analyzer->addLock(FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2213 M, LK_Exclusive, Loc, Source));
2214 for (const auto &M : SharedLocksToAdd)
2215 Analyzer->addLock(FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2216 M, LK_Shared, Loc, Source));
2217
2218 if (!Scp.shouldIgnore()) {
2219 // Add the managing object as a dummy mutex, mapped to the underlying mutex.
2220 auto *ScopedEntry = Analyzer->FactMan.createFact<ScopedLockableFactEntry>(
2221 Scp, Loc, FactEntry::Acquired,
2222 ExclusiveLocksToAdd.size() + SharedLocksToAdd.size() +
2223 ScopedReqsAndExcludes.size() + ExclusiveLocksToRemove.size() +
2224 SharedLocksToRemove.size());
2225 for (const auto &M : ExclusiveLocksToAdd)
2226 ScopedEntry->addLock(M);
2227 for (const auto &M : SharedLocksToAdd)
2228 ScopedEntry->addLock(M);
2229 for (const auto &M : ScopedReqsAndExcludes)
2230 ScopedEntry->addLock(M);
2231 for (const auto &M : ExclusiveLocksToRemove)
2232 ScopedEntry->addExclusiveUnlock(M);
2233 for (const auto &M : SharedLocksToRemove)
2234 ScopedEntry->addSharedUnlock(M);
2235 Analyzer->addLock(FSet, ScopedEntry);
2236 }
2237}
2238
2239/// For unary operations which read and write a variable, we need to
2240/// check whether we hold any required mutexes. Reads are checked in
2241/// VisitCastExpr.
2242void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
2243 switch (UO->getOpcode()) {
2244 case UO_PostDec:
2245 case UO_PostInc:
2246 case UO_PreDec:
2247 case UO_PreInc:
2248 checkAccess(UO->getSubExpr(), AK_Written);
2249 break;
2250 default:
2251 break;
2252 }
2253}
2254
2255/// For binary operations which assign to a variable (writes), we need to check
2256/// whether we hold any required mutexes.
2257/// FIXME: Deal with non-primitive types.
2258void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
2259 if (!BO->isAssignmentOp())
2260 return;
2261
2262 // adjust the context
2263 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
2264
2265 checkAccess(BO->getLHS(), AK_Written);
2266}
2267
2268/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
2269/// need to ensure we hold any required mutexes.
2270/// FIXME: Deal with non-primitive types.
2271void BuildLockset::VisitCastExpr(const CastExpr *CE) {
2272 if (CE->getCastKind() != CK_LValueToRValue)
2273 return;
2274 checkAccess(CE->getSubExpr(), AK_Read);
2275}
2276
2277void BuildLockset::examineArguments(const FunctionDecl *FD,
2280 bool SkipFirstParam) {
2281 // Currently we can't do anything if we don't know the function declaration.
2282 if (!FD)
2283 return;
2284
2285 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
2286 // only turns off checking within the body of a function, but we also
2287 // use it to turn off checking in arguments to the function. This
2288 // could result in some false negatives, but the alternative is to
2289 // create yet another attribute.
2290 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
2291 return;
2292
2293 const ArrayRef<ParmVarDecl *> Params = FD->parameters();
2294 auto Param = Params.begin();
2295 if (SkipFirstParam)
2296 ++Param;
2297
2298 // There can be default arguments, so we stop when one iterator is at end().
2299 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
2300 ++Param, ++Arg) {
2301 QualType Qt = (*Param)->getType();
2302 if (Qt->isReferenceType())
2303 checkAccess(*Arg, AK_Read, POK_PassByRef);
2304 else if (Qt->isPointerType())
2305 checkPtAccess(*Arg, AK_Read, POK_PassPointer);
2306 }
2307}
2308
2309void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
2310 // adjust the context
2311 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, Exp, LVarCtx);
2312
2313 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
2314 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
2315 // ME can be null when calling a method pointer
2316 const CXXMethodDecl *MD = CE->getMethodDecl();
2317
2318 if (ME && MD) {
2319 if (ME->isArrow()) {
2320 // Should perhaps be AK_Written if !MD->isConst().
2321 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
2322 } else {
2323 // Should perhaps be AK_Written if !MD->isConst().
2324 checkAccess(CE->getImplicitObjectArgument(), AK_Read);
2325 }
2326 }
2327
2328 examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end());
2329 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
2330 OverloadedOperatorKind OEop = OE->getOperator();
2331 switch (OEop) {
2332 case OO_Equal:
2333 case OO_PlusEqual:
2334 case OO_MinusEqual:
2335 case OO_StarEqual:
2336 case OO_SlashEqual:
2337 case OO_PercentEqual:
2338 case OO_CaretEqual:
2339 case OO_AmpEqual:
2340 case OO_PipeEqual:
2341 case OO_LessLessEqual:
2342 case OO_GreaterGreaterEqual:
2343 checkAccess(OE->getArg(1), AK_Read);
2344 [[fallthrough]];
2345 case OO_PlusPlus:
2346 case OO_MinusMinus:
2347 checkAccess(OE->getArg(0), AK_Written);
2348 break;
2349 case OO_Star:
2350 case OO_ArrowStar:
2351 case OO_Arrow:
2352 case OO_Subscript:
2353 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
2354 // Grrr. operator* can be multiplication...
2355 checkPtAccess(OE->getArg(0), AK_Read);
2356 }
2357 [[fallthrough]];
2358 default: {
2359 // TODO: get rid of this, and rely on pass-by-ref instead.
2360 const Expr *Obj = OE->getArg(0);
2361 checkAccess(Obj, AK_Read);
2362 // Check the remaining arguments. For method operators, the first
2363 // argument is the implicit self argument, and doesn't appear in the
2364 // FunctionDecl, but for non-methods it does.
2365 const FunctionDecl *FD = OE->getDirectCallee();
2366 examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(),
2367 /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD));
2368 break;
2369 }
2370 }
2371 } else {
2372 examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end());
2373 }
2374
2375 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
2376 if (!D)
2377 return;
2378 handleCall(Exp, D);
2379}
2380
2381void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
2382 const CXXConstructorDecl *D = Exp->getConstructor();
2383 if (D && D->isCopyConstructor()) {
2384 const Expr* Source = Exp->getArg(0);
2385 checkAccess(Source, AK_Read);
2386 } else {
2387 examineArguments(D, Exp->arg_begin(), Exp->arg_end());
2388 }
2389 if (D && D->hasAttrs())
2390 handleCall(Exp, D);
2391}
2392
2393static const Expr *UnpackConstruction(const Expr *E) {
2394 if (auto *CE = dyn_cast<CastExpr>(E))
2395 if (CE->getCastKind() == CK_NoOp)
2396 E = CE->getSubExpr()->IgnoreParens();
2397 if (auto *CE = dyn_cast<CastExpr>(E))
2398 if (CE->getCastKind() == CK_ConstructorConversion ||
2399 CE->getCastKind() == CK_UserDefinedConversion)
2400 E = CE->getSubExpr();
2401 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
2402 E = BTE->getSubExpr();
2403 return E;
2404}
2405
2406void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
2407 // adjust the context
2408 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
2409
2410 for (auto *D : S->getDeclGroup()) {
2411 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
2412 const Expr *E = VD->getInit();
2413 if (!E)
2414 continue;
2415 E = E->IgnoreParens();
2416
2417 // handle constructors that involve temporaries
2418 if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
2419 E = EWC->getSubExpr()->IgnoreParens();
2420 E = UnpackConstruction(E);
2421
2422 if (auto Object = Analyzer->ConstructedObjects.find(E);
2423 Object != Analyzer->ConstructedObjects.end()) {
2424 Object->second->setClangDecl(VD);
2425 Analyzer->ConstructedObjects.erase(Object);
2426 }
2427 }
2428 }
2429}
2430
2431void BuildLockset::VisitMaterializeTemporaryExpr(
2432 const MaterializeTemporaryExpr *Exp) {
2433 if (const ValueDecl *ExtD = Exp->getExtendingDecl()) {
2434 if (auto Object = Analyzer->ConstructedObjects.find(
2436 Object != Analyzer->ConstructedObjects.end()) {
2437 Object->second->setClangDecl(ExtD);
2438 Analyzer->ConstructedObjects.erase(Object);
2439 }
2440 }
2441}
2442
2443void BuildLockset::VisitReturnStmt(const ReturnStmt *S) {
2444 if (Analyzer->CurrentFunction == nullptr)
2445 return;
2446 const Expr *RetVal = S->getRetValue();
2447 if (!RetVal)
2448 return;
2449
2450 // If returning by reference or pointer, check that the function requires the
2451 // appropriate capabilities.
2452 const QualType ReturnType =
2453 Analyzer->CurrentFunction->getReturnType().getCanonicalType();
2454 if (ReturnType->isLValueReferenceType()) {
2455 Analyzer->checkAccess(
2456 FunctionExitFSet, RetVal,
2459 } else if (ReturnType->isPointerType()) {
2460 Analyzer->checkPtAccess(
2461 FunctionExitFSet, RetVal,
2464 }
2465}
2466
2467/// Given two facts merging on a join point, possibly warn and decide whether to
2468/// keep or replace.
2469///
2470/// \return false if we should keep \p A, true if we should take \p B.
2471bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
2472 SourceLocation JoinLoc,
2473 LockErrorKind EntryLEK) {
2474 // Whether we can replace \p A by \p B.
2475 const bool CanModify = EntryLEK != LEK_LockedSomeLoopIterations;
2476 unsigned int ReentrancyDepthA = 0;
2477 unsigned int ReentrancyDepthB = 0;
2478
2479 if (const auto *LFE = dyn_cast<LockableFactEntry>(&A))
2480 ReentrancyDepthA = LFE->getReentrancyDepth();
2481 if (const auto *LFE = dyn_cast<LockableFactEntry>(&B))
2482 ReentrancyDepthB = LFE->getReentrancyDepth();
2483
2484 if (ReentrancyDepthA != ReentrancyDepthB) {
2485 Handler.handleMutexHeldEndOfScope(B.getKind(), B.toString(), B.loc(),
2486 JoinLoc, EntryLEK,
2487 /*ReentrancyMismatch=*/true);
2488 // Pick the FactEntry with the greater reentrancy depth as the "good"
2489 // fact to reduce potential later warnings.
2490 return CanModify && ReentrancyDepthA < ReentrancyDepthB;
2491 } else if (A.kind() != B.kind()) {
2492 // For managed capabilities, the destructor should unlock in the right mode
2493 // anyway. For asserted capabilities no unlocking is needed.
2494 if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
2495 // The shared capability subsumes the exclusive capability, if possible.
2496 bool ShouldTakeB = B.kind() == LK_Shared;
2497 if (CanModify || !ShouldTakeB)
2498 return ShouldTakeB;
2499 }
2500 Handler.handleExclusiveAndShared(B.getKind(), B.toString(), B.loc(),
2501 A.loc());
2502 // Take the exclusive capability to reduce further warnings.
2503 return CanModify && B.kind() == LK_Exclusive;
2504 } else {
2505 // The non-asserted capability is the one we want to track.
2506 return CanModify && A.asserted() && !B.asserted();
2507 }
2508}
2509
2510/// Compute the intersection of two locksets and issue warnings for any
2511/// locks in the symmetric difference.
2512///
2513/// This function is used at a merge point in the CFG when comparing the lockset
2514/// of each branch being merged. For example, given the following sequence:
2515/// A; if () then B; else C; D; we need to check that the lockset after B and C
2516/// are the same. In the event of a difference, we use the intersection of these
2517/// two locksets at the start of D.
2518///
2519/// \param EntrySet A lockset for entry into a (possibly new) block.
2520/// \param ExitSet The lockset on exiting a preceding block.
2521/// \param JoinLoc The location of the join point for error reporting
2522/// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
2523/// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
2524void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
2525 const FactSet &ExitSet,
2526 SourceLocation JoinLoc,
2527 LockErrorKind EntryLEK,
2528 LockErrorKind ExitLEK) {
2529 FactSet EntrySetOrig = EntrySet;
2530
2531 // Find locks in ExitSet that conflict or are not in EntrySet, and warn.
2532 for (const auto &Fact : ExitSet) {
2533 const FactEntry &ExitFact = FactMan[Fact];
2534
2535 FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact);
2536 if (EntryIt != EntrySet.end()) {
2537 if (join(FactMan[*EntryIt], ExitFact, JoinLoc, EntryLEK))
2538 *EntryIt = Fact;
2539 } else if (!ExitFact.managed() || EntryLEK == LEK_LockedAtEndOfFunction) {
2540 ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc,
2541 EntryLEK, Handler);
2542 }
2543 }
2544
2545 // Find locks in EntrySet that are not in ExitSet, and remove them.
2546 for (const auto &Fact : EntrySetOrig) {
2547 const FactEntry *EntryFact = &FactMan[Fact];
2548 const FactEntry *ExitFact = ExitSet.findLock(FactMan, *EntryFact);
2549
2550 if (!ExitFact) {
2551 if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations ||
2553 EntryFact->handleRemovalFromIntersection(EntrySetOrig, FactMan, JoinLoc,
2554 ExitLEK, Handler);
2555 if (ExitLEK == LEK_LockedSomePredecessors)
2556 EntrySet.removeLock(FactMan, *EntryFact);
2557 }
2558 }
2559}
2560
2561// Return true if block B never continues to its successors.
2562static bool neverReturns(const CFGBlock *B) {
2563 if (B->hasNoReturnElement())
2564 return true;
2565 if (B->empty())
2566 return false;
2567
2568 CFGElement Last = B->back();
2569 if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2570 if (isa<CXXThrowExpr>(S->getStmt()))
2571 return true;
2572 }
2573 return false;
2574}
2575
2576/// Check a function's CFG for thread-safety violations.
2577///
2578/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2579/// at the end of each block, and issue warnings for thread safety violations.
2580/// Each block in the CFG is traversed exactly once.
2581void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2582 // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2583 // For now, we just use the walker to set things up.
2584 threadSafety::CFGWalker walker;
2585 if (!walker.init(AC))
2586 return;
2587
2588 // AC.dumpCFG(true);
2589 // threadSafety::printSCFG(walker);
2590
2591 CFG *CFGraph = walker.getGraph();
2592 const NamedDecl *D = walker.getDecl();
2593 CurrentFunction = dyn_cast<FunctionDecl>(D);
2594
2595 if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2596 return;
2597
2598 // FIXME: Do something a bit more intelligent inside constructor and
2599 // destructor code. Constructors and destructors must assume unique access
2600 // to 'this', so checks on member variable access is disabled, but we should
2601 // still enable checks on other objects.
2603 return; // Don't check inside constructors.
2605 return; // Don't check inside destructors.
2606
2607 Handler.enterFunction(CurrentFunction);
2608
2609 BlockInfo.resize(CFGraph->getNumBlockIDs(),
2610 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
2611
2612 // We need to explore the CFG via a "topological" ordering.
2613 // That way, we will be guaranteed to have information about required
2614 // predecessor locksets when exploring a new block.
2615 const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2616 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2617
2618 CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
2619 CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
2620
2621 // Mark entry block as reachable
2622 Initial.Reachable = true;
2623
2624 // Compute SSA names for local variables
2625 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2626
2627 // Fill in source locations for all CFGBlocks.
2628 findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2629
2630 CapExprSet ExclusiveLocksAcquired;
2631 CapExprSet SharedLocksAcquired;
2632 CapExprSet LocksReleased;
2633
2634 // Add locks from exclusive_locks_required and shared_locks_required
2635 // to initial lockset. Also turn off checking for lock and unlock functions.
2636 // FIXME: is there a more intelligent way to check lock/unlock functions?
2637 if (!SortedGraph->empty()) {
2638 assert(*SortedGraph->begin() == &CFGraph->getEntry());
2639 FactSet &InitialLockset = Initial.EntrySet;
2640
2641 CapExprSet ExclusiveLocksToAdd;
2642 CapExprSet SharedLocksToAdd;
2643
2644 SourceLocation Loc = D->getLocation();
2645 for (const auto *Attr : D->attrs()) {
2646 Loc = Attr->getLocation();
2647 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2648 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2649 nullptr, D);
2650 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2651 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2652 // We must ignore such methods.
2653 if (A->args_size() == 0)
2654 return;
2655 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2656 nullptr, D);
2657 getMutexIDs(LocksReleased, A, nullptr, D);
2658 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2659 if (A->args_size() == 0)
2660 return;
2661 getMutexIDs(A->isShared() ? SharedLocksAcquired
2662 : ExclusiveLocksAcquired,
2663 A, nullptr, D);
2664 } else if (isa<TryAcquireCapabilityAttr>(Attr)) {
2665 // Don't try to check trylock functions for now.
2666 return;
2667 }
2668 }
2669 ArrayRef<ParmVarDecl *> Params;
2670 if (CurrentFunction)
2671 Params = CurrentFunction->getCanonicalDecl()->parameters();
2672 else if (auto CurrentMethod = dyn_cast<ObjCMethodDecl>(D))
2673 Params = CurrentMethod->getCanonicalDecl()->parameters();
2674 else
2675 llvm_unreachable("Unknown function kind");
2676 for (const ParmVarDecl *Param : Params) {
2677 CapExprSet UnderlyingLocks;
2678 for (const auto *Attr : Param->attrs()) {
2679 Loc = Attr->getLocation();
2680 if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2681 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2682 nullptr, Param);
2683 getMutexIDs(LocksReleased, A, nullptr, Param);
2684 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2685 } else if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2686 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2687 nullptr, Param);
2688 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2689 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2690 getMutexIDs(A->isShared() ? SharedLocksAcquired
2691 : ExclusiveLocksAcquired,
2692 A, nullptr, Param);
2693 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2694 } else if (const auto *A = dyn_cast<LocksExcludedAttr>(Attr)) {
2695 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2696 }
2697 }
2698 if (UnderlyingLocks.empty())
2699 continue;
2700 CapabilityExpr Cp(SxBuilder.translateVariable(Param, nullptr),
2701 StringRef(),
2702 /*Neg=*/false, /*Reentrant=*/false);
2703 auto *ScopedEntry = FactMan.createFact<ScopedLockableFactEntry>(
2704 Cp, Param->getLocation(), FactEntry::Declared,
2705 UnderlyingLocks.size());
2706 for (const CapabilityExpr &M : UnderlyingLocks)
2707 ScopedEntry->addLock(M);
2708 addLock(InitialLockset, ScopedEntry, true);
2709 }
2710
2711 // FIXME -- Loc can be wrong here.
2712 for (const auto &Mu : ExclusiveLocksToAdd) {
2713 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2714 Mu, LK_Exclusive, Loc, FactEntry::Declared);
2715 addLock(InitialLockset, Entry, true);
2716 }
2717 for (const auto &Mu : SharedLocksToAdd) {
2718 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2719 Mu, LK_Shared, Loc, FactEntry::Declared);
2720 addLock(InitialLockset, Entry, true);
2721 }
2722 }
2723
2724 // Compute the expected exit set.
2725 // By default, we expect all locks held on entry to be held on exit.
2726 FactSet ExpectedFunctionExitSet = Initial.EntrySet;
2727
2728 // Adjust the expected exit set by adding or removing locks, as declared
2729 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
2730 // issue the appropriate warning.
2731 // FIXME: the location here is not quite right.
2732 for (const auto &Lock : ExclusiveLocksAcquired)
2733 ExpectedFunctionExitSet.addLock(
2734 FactMan, FactMan.createFact<LockableFactEntry>(Lock, LK_Exclusive,
2735 D->getLocation()));
2736 for (const auto &Lock : SharedLocksAcquired)
2737 ExpectedFunctionExitSet.addLock(
2738 FactMan, FactMan.createFact<LockableFactEntry>(Lock, LK_Shared,
2739 D->getLocation()));
2740 for (const auto &Lock : LocksReleased)
2741 ExpectedFunctionExitSet.removeLock(FactMan, Lock);
2742
2743 for (const auto *CurrBlock : *SortedGraph) {
2744 unsigned CurrBlockID = CurrBlock->getBlockID();
2745 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2746
2747 // Use the default initial lockset in case there are no predecessors.
2748 VisitedBlocks.insert(CurrBlock);
2749
2750 // Iterate through the predecessor blocks and warn if the lockset for all
2751 // predecessors is not the same. We take the entry lockset of the current
2752 // block to be the intersection of all previous locksets.
2753 // FIXME: By keeping the intersection, we may output more errors in future
2754 // for a lock which is not in the intersection, but was in the union. We
2755 // may want to also keep the union in future. As an example, let's say
2756 // the intersection contains Mutex L, and the union contains L and M.
2757 // Later we unlock M. At this point, we would output an error because we
2758 // never locked M; although the real error is probably that we forgot to
2759 // lock M on all code paths. Conversely, let's say that later we lock M.
2760 // In this case, we should compare against the intersection instead of the
2761 // union because the real error is probably that we forgot to unlock M on
2762 // all code paths.
2763 bool LocksetInitialized = false;
2764 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2765 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
2766 // if *PI -> CurrBlock is a back edge
2767 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
2768 continue;
2769
2770 unsigned PrevBlockID = (*PI)->getBlockID();
2771 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2772
2773 // Ignore edges from blocks that can't return.
2774 if (neverReturns(*PI) || !PrevBlockInfo->Reachable)
2775 continue;
2776
2777 // Okay, we can reach this block from the entry.
2778 CurrBlockInfo->Reachable = true;
2779
2780 FactSet PrevLockset;
2781 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
2782
2783 if (!LocksetInitialized) {
2784 CurrBlockInfo->EntrySet = PrevLockset;
2785 LocksetInitialized = true;
2786 } else {
2787 // Surprisingly 'continue' doesn't always produce back edges, because
2788 // the CFG has empty "transition" blocks where they meet with the end
2789 // of the regular loop body. We still want to diagnose them as loop.
2790 intersectAndWarn(
2791 CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc,
2792 isa_and_nonnull<ContinueStmt>((*PI)->getTerminatorStmt())
2795 }
2796 }
2797
2798 // Skip rest of block if it's not reachable.
2799 if (!CurrBlockInfo->Reachable)
2800 continue;
2801
2802 BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet);
2803
2804 // Visit all the statements in the basic block.
2805 for (const auto &BI : *CurrBlock) {
2806 switch (BI.getKind()) {
2807 case CFGElement::Statement: {
2808 CFGStmt CS = BI.castAs<CFGStmt>();
2809 LocksetBuilder.Visit(CS.getStmt());
2810 break;
2811 }
2812 // Ignore BaseDtor and MemberDtor for now.
2814 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2815 const auto *DD = AD.getDestructorDecl(AC.getASTContext());
2816 if (!DD->hasAttrs())
2817 break;
2818
2819 LocksetBuilder.handleCall(
2820 nullptr, DD,
2821 SxBuilder.translateVariable(AD.getVarDecl(), nullptr),
2822 AD.getTriggerStmt()->getEndLoc());
2823 break;
2824 }
2825
2827 const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>();
2828 LocksetBuilder.handleCall(
2829 /*Exp=*/nullptr, CF.getFunctionDecl(),
2830 SxBuilder.translateVariable(CF.getVarDecl(), nullptr),
2831 CF.getVarDecl()->getLocation());
2832 break;
2833 }
2834
2836 auto TD = BI.castAs<CFGTemporaryDtor>();
2837
2838 // Clean up constructed object even if there are no attributes to
2839 // keep the number of objects in limbo as small as possible.
2840 if (auto Object = ConstructedObjects.find(
2841 TD.getBindTemporaryExpr()->getSubExpr());
2842 Object != ConstructedObjects.end()) {
2843 const auto *DD = TD.getDestructorDecl(AC.getASTContext());
2844 if (DD->hasAttrs())
2845 // TODO: the location here isn't quite correct.
2846 LocksetBuilder.handleCall(nullptr, DD, Object->second,
2847 TD.getBindTemporaryExpr()->getEndLoc());
2848 ConstructedObjects.erase(Object);
2849 }
2850 break;
2851 }
2852 default:
2853 break;
2854 }
2855 }
2856 CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2857
2858 // For every back edge from CurrBlock (the end of the loop) to another block
2859 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2860 // the one held at the beginning of FirstLoopBlock. We can look up the
2861 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2862 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2863 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
2864 // if CurrBlock -> *SI is *not* a back edge
2865 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
2866 continue;
2867
2868 CFGBlock *FirstLoopBlock = *SI;
2869 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2870 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2871 intersectAndWarn(PreLoop->EntrySet, LoopEnd->ExitSet, PreLoop->EntryLoc,
2873 }
2874 }
2875
2876 // Skip the final check if the exit block is unreachable.
2877 if (!Final.Reachable)
2878 return;
2879
2880 // FIXME: Should we call this function for all blocks which exit the function?
2881 intersectAndWarn(ExpectedFunctionExitSet, Final.ExitSet, Final.ExitLoc,
2883
2884 Handler.leaveFunction(CurrentFunction);
2885}
2886
2887/// Check a function's CFG for thread-safety violations.
2888///
2889/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2890/// at the end of each block, and issue warnings for thread safety violations.
2891/// Each block in the CFG is traversed exactly once.
2893 ThreadSafetyHandler &Handler,
2894 BeforeSet **BSet) {
2895 if (!*BSet)
2896 *BSet = new BeforeSet;
2897 ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2898 Analyzer.runAnalysis(AC);
2899}
2900
2902
2903/// Helper function that returns a LockKind required for the given level
2904/// of access.
2906 switch (AK) {
2907 case AK_Read :
2908 return LK_Shared;
2909 case AK_Written :
2910 return LK_Exclusive;
2911 }
2912 llvm_unreachable("Unknown AccessKind");
2913}
This file defines AnalysisDeclContext, a class that manages the analysis context data for context sen...
Defines enum values for all the target-independent builtin functions.
static void dump(llvm::raw_ostream &OS, StringRef FunctionName, ArrayRef< CounterExpression > Expressions, ArrayRef< CounterMappingRegion > Regions)
static Decl::Kind getKind(const Decl *D)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the clang::Expr interface and subclasses for C++ expressions.
Forward-declares and imports various common LLVM datatypes that clang wants to use unqualified.
Defines an enumeration for C++ overloaded operators.
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
Defines the clang::SourceLocation class and associated facilities.
Defines various enumerations that describe declaration and type specifiers.
static void warnInvalidLock(ThreadSafetyHandler &Handler, const Expr *MutexExp, const NamedDecl *D, const Expr *DeclExp, StringRef Kind)
Issue a warning about an invalid lock expression.
static bool getStaticBooleanValue(Expr *E, bool &TCond)
static bool neverReturns(const CFGBlock *B)
static void findBlockLocations(CFG *CFGraph, const PostOrderCFGView *SortedGraph, std::vector< CFGBlockInfo > &BlockInfo)
Find the appropriate source locations to use when producing diagnostics for each block in the CFG.
static const ValueDecl * getValueDecl(const Expr *Exp)
Gets the value decl pointer from DeclRefExprs or MemberExprs.
static const Expr * UnpackConstruction(const Expr *E)
C Language Family Type Representation.
__device__ __2f16 b
AnalysisDeclContext contains the context data for the function, method or block under analysis.
ASTContext & getASTContext() const
Expr * getLHS() const
Definition Expr.h:4024
Expr * getRHS() const
Definition Expr.h:4026
static bool isAssignmentOp(Opcode Opc)
Definition Expr.h:4110
Opcode getOpcode() const
Definition Expr.h:4019
const VarDecl * getVarDecl() const
Definition CFG.h:423
const Stmt * getTriggerStmt() const
Definition CFG.h:428
Represents a single basic block in a source-level CFG.
Definition CFG.h:605
pred_iterator pred_end()
Definition CFG.h:973
succ_iterator succ_end()
Definition CFG.h:991
bool hasNoReturnElement() const
Definition CFG.h:1109
CFGElement back() const
Definition CFG.h:908
ElementList::const_reverse_iterator const_reverse_iterator
Definition CFG.h:903
bool empty() const
Definition CFG.h:953
succ_iterator succ_begin()
Definition CFG.h:990
Stmt * getTerminatorStmt()
Definition CFG.h:1087
AdjacentBlocks::const_iterator const_pred_iterator
Definition CFG.h:959
pred_iterator pred_begin()
Definition CFG.h:972
unsigned getBlockID() const
Definition CFG.h:1111
Stmt * getTerminatorCondition(bool StripParens=true)
Definition CFG.cpp:6378
AdjacentBlocks::const_iterator const_succ_iterator
Definition CFG.h:966
Represents a top-level expression in a basic block.
Definition CFG.h:55
@ CleanupFunction
Definition CFG.h:79
@ AutomaticObjectDtor
Definition CFG.h:72
const CXXDestructorDecl * getDestructorDecl(ASTContext &astContext) const
Definition CFG.cpp:5398
const Stmt * getStmt() const
Definition CFG.h:139
Represents a source-level, intra-procedural CFG that represents the control-flow of a Stmt.
Definition CFG.h:1222
CFGBlock & getExit()
Definition CFG.h:1332
CFGBlock & getEntry()
Definition CFG.h:1330
unsigned getNumBlockIDs() const
Returns the total number of BlockIDs allocated (which start at 0).
Definition CFG.h:1409
arg_iterator arg_begin()
Definition ExprCXX.h:1678
Expr * getArg(unsigned Arg)
Return the specified argument.
Definition ExprCXX.h:1692
arg_iterator arg_end()
Definition ExprCXX.h:1679
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will (ultimately) call.
Definition ExprCXX.h:1612
bool isCopyConstructor(unsigned &TypeQuals) const
Whether this constructor is a copy constructor (C++ [class.copy]p2, which can be used to copy the cla...
Definition DeclCXX.cpp:3008
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3083
ConstExprIterator const_arg_iterator
Definition Expr.h:3127
arg_iterator arg_begin()
Definition Expr.h:3136
arg_iterator arg_end()
Definition Expr.h:3139
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3062
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3070
Decl * getCalleeDecl()
Definition Expr.h:3056
CastKind getCastKind() const
Definition Expr.h:3656
Expr * getSubExpr()
Definition Expr.h:3662
const DeclGroupRef getDeclGroup() const
Definition Stmt.h:1629
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.h:1637
bool hasAttrs() const
Definition DeclBase.h:518
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition DeclBase.h:559
SourceLocation getLocation() const
Definition DeclBase.h:439
bool isDefinedOutsideFunctionOrMethod() const
isDefinedOutsideFunctionOrMethod - This predicate returns true if this scoped decl is defined outside...
Definition DeclBase.h:949
DeclContext * getDeclContext()
Definition DeclBase.h:448
attr_range attrs() const
Definition DeclBase.h:535
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3078
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3073
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3061
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
bool isPRValue() const
Definition Expr.h:285
Expr * IgnoreCasts() LLVM_READONLY
Skip past any casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3057
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2794
QualType getReturnType() const
Definition Decl.h:2842
ArrayRef< ParmVarDecl * > parameters() const
Definition Decl.h:2771
FunctionDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition Decl.cpp:3688
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
Definition Decl.cpp:3767
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4931
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4964
This represents a decl that may have a name.
Definition Decl.h:273
IdentifierInfo * getIdentifier() const
Get the identifier that names this declaration, if there is one.
Definition Decl.h:294
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:300
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:316
virtual void printName(raw_ostream &OS, const PrintingPolicy &Policy) const
Pretty-print the unqualified name of this declaration.
Definition Decl.cpp:1672
QualType getCanonicalType() const
Definition TypeBase.h:8337
bool isConstQualified() const
Determine whether this type is const-qualified.
Definition TypeBase.h:8358
Expr * getRetValue()
Definition Stmt.h:3187
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
Stmt - This represents one statement.
Definition Stmt.h:85
SourceLocation getEndLoc() const LLVM_READONLY
Definition Stmt.cpp:358
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
bool isPointerType() const
Definition TypeBase.h:8522
bool isReferenceType() const
Definition TypeBase.h:8546
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isLValueReferenceType() const
Definition TypeBase.h:8550
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9101
Expr * getSubExpr() const
Definition Expr.h:2287
Opcode getOpcode() const
Definition Expr.h:2282
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:711
QualType getType() const
Definition Decl.h:722
void checkBeforeAfter(const ValueDecl *Vd, const FactSet &FSet, ThreadSafetyAnalyzer &Analyzer, SourceLocation Loc, StringRef CapKind)
Return true if any mutexes in FSet are in the acquired_before set of Vd.
BeforeInfo * insertAttrExprs(const ValueDecl *Vd, ThreadSafetyAnalyzer &Analyzer)
Process acquired_before and acquired_after attributes on Vd.
BeforeInfo * getBeforeInfoForDecl(const ValueDecl *Vd, ThreadSafetyAnalyzer &Analyzer)
const PostOrderCFGView * getSortedGraph() const
const NamedDecl * getDecl() const
bool init(AnalysisDeclContext &AC)
bool equals(const CapabilityExpr &other) const
CapabilityExpr translateAttrExpr(const Expr *AttrExp, const NamedDecl *D, const Expr *DeclExp, til::SExpr *Self=nullptr)
Translate a clang expression in an attribute to a til::SExpr.
void setLookupLocalVarExpr(std::function< const Expr *(const NamedDecl *)> F)
til::SExpr * translate(const Stmt *S, CallingContext *Ctx)
til::SExpr * translateVariable(const VarDecl *VD, CallingContext *Ctx)
Handler class for thread safety warnings.
virtual void handleExpectMoreUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Expected)
Warn when we get fewer underlying mutexes than expected.
virtual void handleInvalidLockExp(SourceLocation Loc)
Warn about lock expressions which fail to resolve to lockable objects.
virtual void handleUnmatchedUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Expected, Name Actual)
Warn when an actual underlying mutex of a scoped lockable does not match the expected.
virtual void handleExpectFewerUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Actual)
Warn when we get more underlying mutexes than expected.
virtual void enterFunction(const FunctionDecl *FD)
Called by the analysis when starting analysis of a function.
virtual void handleIncorrectUnlockKind(StringRef Kind, Name LockName, LockKind Expected, LockKind Received, SourceLocation LocLocked, SourceLocation LocUnlock)
Warn about an unlock function call that attempts to unlock a lock with the incorrect lock kind.
virtual void handleMutexHeldEndOfScope(StringRef Kind, Name LockName, SourceLocation LocLocked, SourceLocation LocEndOfScope, LockErrorKind LEK, bool ReentrancyMismatch=false)
Warn about situations where a mutex is sometimes held and sometimes not.
virtual void leaveFunction(const FunctionDecl *FD)
Called by the analysis when finishing analysis of a function.
virtual void handleExclusiveAndShared(StringRef Kind, Name LockName, SourceLocation Loc1, SourceLocation Loc2)
Warn when a mutex is held exclusively and shared at the same point.
virtual void handleMutexNotHeld(StringRef Kind, const NamedDecl *D, ProtectedOperationKind POK, Name LockName, LockKind LK, SourceLocation Loc, Name *PossibleMatch=nullptr)
Warn when a protected operation occurs while the specific mutex protecting the operation is not locke...
virtual void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName, SourceLocation Loc)
Warn when a function is called while an excluded mutex is locked.
virtual void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK, AccessKind AK, SourceLocation Loc)
Warn when a protected operation occurs while no locks are held.
virtual void handleUnmatchedUnlock(StringRef Kind, Name LockName, SourceLocation Loc, SourceLocation LocPreviousUnlock)
Warn about unlock function calls that do not have a prior matching lock expression.
virtual void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg, SourceLocation Loc)
Warn when acquiring a lock that the negative capability is not held.
virtual void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation LocLocked, SourceLocation LocDoubleLock)
Warn about lock function calls for locks which are already held.
#define bool
Definition gpuintrin.h:32
internal::Matcher< T > traverse(TraversalKind TK, const internal::Matcher< T > &InnerMatcher)
Causes all nested matchers to be matched with the specified traversal kind.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
@ CF
Indicates that the tracked object is a CF object.
bool Alloc(InterpState &S, CodePtr OpPC, const Descriptor *Desc)
Definition Interp.h:3455
bool Dec(InterpState &S, CodePtr OpPC, bool CanOverflow)
1) Pops a pointer from the stack 2) Load the value from the pointer 3) Writes the value decreased by ...
Definition Interp.h:900
bool Neg(InterpState &S, CodePtr OpPC)
Definition Interp.h:749
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions &DiagOpts, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
bool matches(const til::SExpr *E1, const til::SExpr *E2)
LockKind getLockKindFromAccessKind(AccessKind AK)
Helper function that returns a LockKind required for the given level of access.
LockErrorKind
This enum distinguishes between different situations where we warn due to inconsistent locking.
@ LEK_NotLockedAtEndOfFunction
Expecting a capability to be held at the end of function.
@ LEK_LockedSomePredecessors
A capability is locked in some but not all predecessors of a CFGBlock.
@ LEK_LockedAtEndOfFunction
A capability is still locked at the end of a function.
@ LEK_LockedSomeLoopIterations
A capability is locked for some but not all loop iterations.
void threadSafetyCleanup(BeforeSet *Cache)
AccessKind
This enum distinguishes between different ways to access (read or write) a variable.
@ AK_Written
Writing a variable.
@ AK_Read
Reading a variable.
LockKind
This enum distinguishes between different kinds of lock actions.
@ LK_Shared
Shared/reader lock of a mutex.
@ LK_Exclusive
Exclusive/writer lock of a mutex.
@ LK_Generic
Can be either Shared or Exclusive.
void runThreadSafetyAnalysis(AnalysisDeclContext &AC, ThreadSafetyHandler &Handler, BeforeSet **Bset)
Check a function's CFG for thread-safety violations.
ProtectedOperationKind
This enum distinguishes between different kinds of operations that may need to be protected by locks.
@ POK_PtPassByRef
Passing a pt-guarded variable by reference.
@ POK_PassPointer
Passing pointer to a guarded variable.
@ POK_VarDereference
Dereferencing a variable (e.g. p in *p = 5;)
@ POK_PassByRef
Passing a guarded variable by reference.
@ POK_ReturnByRef
Returning a guarded variable by reference.
@ POK_PtPassPointer
Passing a pt-guarded pointer.
@ POK_PtReturnPointer
Returning a pt-guarded pointer.
@ POK_VarAccess
Reading or writing a variable (e.g. x in x = 5;)
@ POK_FunctionCall
Making a function call (e.g. fool())
@ POK_ReturnPointer
Returning pointer to a guarded variable.
@ POK_PtReturnByRef
Returning a pt-guarded variable by reference.
The JSON file list parser is used to communicate input to InstallAPI.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Self
'self' clause, allowed on Compute and Combined Constructs, plus 'update'.
nullptr
This class represents a compute construct, representing a 'Kind' of β€˜parallel’, 'serial',...
Expr * Cond
};
static bool classof(const Stmt *T)
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1745
int const char * function
Definition c++config.h:31