1 | //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of ThreadSanitizer/AddressSanitizer runtime. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef SANITIZER_MUTEX_H |
14 | #define SANITIZER_MUTEX_H |
15 | |
16 | #include "sanitizer_atomic.h" |
17 | #include "sanitizer_internal_defs.h" |
18 | #include "sanitizer_libc.h" |
19 | #include "sanitizer_thread_safety.h" |
20 | |
21 | namespace __sanitizer { |
22 | |
23 | class SANITIZER_MUTEX StaticSpinMutex { |
24 | public: |
25 | void Init() { |
26 | atomic_store(a: &state_, v: 0, mo: memory_order_relaxed); |
27 | } |
28 | |
29 | void Lock() SANITIZER_ACQUIRE() { |
30 | if (LIKELY(TryLock())) |
31 | return; |
32 | LockSlow(); |
33 | } |
34 | |
35 | bool TryLock() SANITIZER_TRY_ACQUIRE(true) { |
36 | return atomic_exchange(a: &state_, v: 1, mo: memory_order_acquire) == 0; |
37 | } |
38 | |
39 | void Unlock() SANITIZER_RELEASE() { |
40 | atomic_store(a: &state_, v: 0, mo: memory_order_release); |
41 | } |
42 | |
43 | void CheckLocked() const SANITIZER_CHECK_LOCKED() { |
44 | CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1); |
45 | } |
46 | |
47 | private: |
48 | atomic_uint8_t state_; |
49 | |
50 | void LockSlow(); |
51 | }; |
52 | |
53 | class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex { |
54 | public: |
55 | SpinMutex() { |
56 | Init(); |
57 | } |
58 | |
59 | SpinMutex(const SpinMutex &) = delete; |
60 | void operator=(const SpinMutex &) = delete; |
61 | }; |
62 | |
63 | // Semaphore provides an OS-dependent way to park/unpark threads. |
64 | // The last thread returned from Wait can destroy the object |
65 | // (destruction-safety). |
66 | class Semaphore { |
67 | public: |
68 | constexpr Semaphore() {} |
69 | Semaphore(const Semaphore &) = delete; |
70 | void operator=(const Semaphore &) = delete; |
71 | |
72 | void Wait(); |
73 | void Post(u32 count = 1); |
74 | |
75 | private: |
76 | atomic_uint32_t state_ = {.val_dont_use: 0}; |
77 | }; |
78 | |
79 | typedef int MutexType; |
80 | |
81 | enum { |
82 | // Used as sentinel and to catch unassigned types |
83 | // (should not be used as real Mutex type). |
84 | MutexInvalid = 0, |
85 | MutexThreadRegistry, |
86 | // Each tool own mutexes must start at this number. |
87 | MutexLastCommon, |
88 | // Type for legacy mutexes that are not checked for deadlocks. |
89 | MutexUnchecked = -1, |
90 | // Special marks that can be used in MutexMeta::can_lock table. |
91 | // The leaf mutexes can be locked under any other non-leaf mutex, |
92 | // but no other mutex can be locked while under a leaf mutex. |
93 | MutexLeaf = -1, |
94 | // Multiple mutexes of this type can be locked at the same time. |
95 | MutexMulti = -3, |
96 | }; |
97 | |
98 | // Go linker does not support THREADLOCAL variables, |
99 | // so we can't use per-thread state. |
100 | // Disable checked locks on Darwin. Although Darwin platforms support |
101 | // THREADLOCAL variables they are not usable early on during process init when |
102 | // `__sanitizer::Mutex` is used. |
103 | #define SANITIZER_CHECK_DEADLOCKS \ |
104 | (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_APPLE) |
105 | |
106 | #if SANITIZER_CHECK_DEADLOCKS |
107 | struct MutexMeta { |
108 | MutexType type; |
109 | const char *name; |
110 | // The table fixes what mutexes can be locked under what mutexes. |
111 | // If the entry for MutexTypeFoo contains MutexTypeBar, |
112 | // then Bar mutex can be locked while under Foo mutex. |
113 | // Can also contain the special MutexLeaf/MutexMulti marks. |
114 | MutexType can_lock[10]; |
115 | }; |
116 | #endif |
117 | |
118 | class CheckedMutex { |
119 | public: |
120 | explicit constexpr CheckedMutex(MutexType type) |
121 | #if SANITIZER_CHECK_DEADLOCKS |
122 | : type_(type) |
123 | #endif |
124 | { |
125 | } |
126 | |
127 | ALWAYS_INLINE void Lock() { |
128 | #if SANITIZER_CHECK_DEADLOCKS |
129 | LockImpl(GET_CALLER_PC()); |
130 | #endif |
131 | } |
132 | |
133 | ALWAYS_INLINE void Unlock() { |
134 | #if SANITIZER_CHECK_DEADLOCKS |
135 | UnlockImpl(); |
136 | #endif |
137 | } |
138 | |
139 | // Checks that the current thread does not hold any mutexes |
140 | // (e.g. when returning from a runtime function to user code). |
141 | static void CheckNoLocks() { |
142 | #if SANITIZER_CHECK_DEADLOCKS |
143 | CheckNoLocksImpl(); |
144 | #endif |
145 | } |
146 | |
147 | private: |
148 | #if SANITIZER_CHECK_DEADLOCKS |
149 | const MutexType type_; |
150 | |
151 | void LockImpl(uptr pc); |
152 | void UnlockImpl(); |
153 | static void CheckNoLocksImpl(); |
154 | #endif |
155 | }; |
156 | |
157 | // Reader-writer mutex. |
158 | // Derive from CheckedMutex for the purposes of EBO. |
159 | // We could make it a field marked with [[no_unique_address]], |
160 | // but this attribute is not supported by some older compilers. |
161 | class SANITIZER_MUTEX Mutex : CheckedMutex { |
162 | public: |
163 | explicit constexpr Mutex(MutexType type = MutexUnchecked) |
164 | : CheckedMutex(type) {} |
165 | |
166 | void Lock() SANITIZER_ACQUIRE() { |
167 | CheckedMutex::Lock(); |
168 | u64 reset_mask = ~0ull; |
169 | u64 state = atomic_load_relaxed(a: &state_); |
170 | for (uptr spin_iters = 0;; spin_iters++) { |
171 | u64 new_state; |
172 | bool locked = (state & (kWriterLock | kReaderLockMask)) != 0; |
173 | if (LIKELY(!locked)) { |
174 | // The mutex is not read-/write-locked, try to lock. |
175 | new_state = (state | kWriterLock) & reset_mask; |
176 | } else if (spin_iters > kMaxSpinIters) { |
177 | // We've spun enough, increment waiting writers count and block. |
178 | // The counter will be decremented by whoever wakes us. |
179 | new_state = (state + kWaitingWriterInc) & reset_mask; |
180 | } else if ((state & kWriterSpinWait) == 0) { |
181 | // Active spinning, but denote our presence so that unlocking |
182 | // thread does not wake up other threads. |
183 | new_state = state | kWriterSpinWait; |
184 | } else { |
185 | // Active spinning. |
186 | state = atomic_load(a: &state_, mo: memory_order_relaxed); |
187 | continue; |
188 | } |
189 | if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state, |
190 | memory_order_acquire))) |
191 | continue; |
192 | if (LIKELY(!locked)) |
193 | return; // We've locked the mutex. |
194 | if (spin_iters > kMaxSpinIters) { |
195 | // We've incremented waiting writers, so now block. |
196 | writers_.Wait(); |
197 | spin_iters = 0; |
198 | } else { |
199 | // We've set kWriterSpinWait, but we are still in active spinning. |
200 | } |
201 | // We either blocked and were unblocked, |
202 | // or we just spun but set kWriterSpinWait. |
203 | // Either way we need to reset kWriterSpinWait |
204 | // next time we take the lock or block again. |
205 | reset_mask = ~kWriterSpinWait; |
206 | state = atomic_load(a: &state_, mo: memory_order_relaxed); |
207 | DCHECK_NE(state & kWriterSpinWait, 0); |
208 | } |
209 | } |
210 | |
211 | bool TryLock() SANITIZER_TRY_ACQUIRE(true) { |
212 | u64 state = atomic_load_relaxed(a: &state_); |
213 | for (;;) { |
214 | if (UNLIKELY(state & (kWriterLock | kReaderLockMask))) |
215 | return false; |
216 | // The mutex is not read-/write-locked, try to lock. |
217 | if (LIKELY(atomic_compare_exchange_weak( |
218 | &state_, &state, state | kWriterLock, memory_order_acquire))) { |
219 | CheckedMutex::Lock(); |
220 | return true; |
221 | } |
222 | } |
223 | } |
224 | |
225 | void Unlock() SANITIZER_RELEASE() { |
226 | CheckedMutex::Unlock(); |
227 | bool wake_writer; |
228 | u64 wake_readers; |
229 | u64 new_state; |
230 | u64 state = atomic_load_relaxed(a: &state_); |
231 | do { |
232 | DCHECK_NE(state & kWriterLock, 0); |
233 | DCHECK_EQ(state & kReaderLockMask, 0); |
234 | new_state = state & ~kWriterLock; |
235 | wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 && |
236 | (state & kWaitingWriterMask) != 0; |
237 | if (wake_writer) |
238 | new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait; |
239 | wake_readers = |
240 | wake_writer || (state & kWriterSpinWait) != 0 |
241 | ? 0 |
242 | : ((state & kWaitingReaderMask) >> kWaitingReaderShift); |
243 | if (wake_readers) |
244 | new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait; |
245 | } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state, |
246 | memory_order_release))); |
247 | if (UNLIKELY(wake_writer)) |
248 | writers_.Post(); |
249 | else if (UNLIKELY(wake_readers)) |
250 | readers_.Post(count: wake_readers); |
251 | } |
252 | |
253 | void ReadLock() SANITIZER_ACQUIRE_SHARED() { |
254 | CheckedMutex::Lock(); |
255 | u64 reset_mask = ~0ull; |
256 | u64 state = atomic_load_relaxed(a: &state_); |
257 | for (uptr spin_iters = 0;; spin_iters++) { |
258 | bool locked = (state & kWriterLock) != 0; |
259 | u64 new_state; |
260 | if (LIKELY(!locked)) { |
261 | new_state = (state + kReaderLockInc) & reset_mask; |
262 | } else if (spin_iters > kMaxSpinIters) { |
263 | new_state = (state + kWaitingReaderInc) & reset_mask; |
264 | } else if ((state & kReaderSpinWait) == 0) { |
265 | // Active spinning, but denote our presence so that unlocking |
266 | // thread does not wake up other threads. |
267 | new_state = state | kReaderSpinWait; |
268 | } else { |
269 | // Active spinning. |
270 | state = atomic_load(a: &state_, mo: memory_order_relaxed); |
271 | continue; |
272 | } |
273 | if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state, |
274 | memory_order_acquire))) |
275 | continue; |
276 | if (LIKELY(!locked)) |
277 | return; // We've locked the mutex. |
278 | if (spin_iters > kMaxSpinIters) { |
279 | // We've incremented waiting readers, so now block. |
280 | readers_.Wait(); |
281 | spin_iters = 0; |
282 | } else { |
283 | // We've set kReaderSpinWait, but we are still in active spinning. |
284 | } |
285 | reset_mask = ~kReaderSpinWait; |
286 | state = atomic_load(a: &state_, mo: memory_order_relaxed); |
287 | } |
288 | } |
289 | |
290 | void ReadUnlock() SANITIZER_RELEASE_SHARED() { |
291 | CheckedMutex::Unlock(); |
292 | bool wake; |
293 | u64 new_state; |
294 | u64 state = atomic_load_relaxed(a: &state_); |
295 | do { |
296 | DCHECK_NE(state & kReaderLockMask, 0); |
297 | DCHECK_EQ(state & kWriterLock, 0); |
298 | new_state = state - kReaderLockInc; |
299 | wake = (new_state & |
300 | (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 && |
301 | (new_state & kWaitingWriterMask) != 0; |
302 | if (wake) |
303 | new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait; |
304 | } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state, |
305 | memory_order_release))); |
306 | if (UNLIKELY(wake)) |
307 | writers_.Post(); |
308 | } |
309 | |
310 | // This function does not guarantee an explicit check that the calling thread |
311 | // is the thread which owns the mutex. This behavior, while more strictly |
312 | // correct, causes problems in cases like StopTheWorld, where a parent thread |
313 | // owns the mutex but a child checks that it is locked. Rather than |
314 | // maintaining complex state to work around those situations, the check only |
315 | // checks that the mutex is owned. |
316 | void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() { |
317 | CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock); |
318 | } |
319 | |
320 | void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); } |
321 | |
322 | void CheckReadLocked() const SANITIZER_CHECK_LOCKED() { |
323 | CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask); |
324 | } |
325 | |
326 | private: |
327 | atomic_uint64_t state_ = {.val_dont_use: 0}; |
328 | Semaphore writers_; |
329 | Semaphore readers_; |
330 | |
331 | // The state has 3 counters: |
332 | // - number of readers holding the lock, |
333 | // if non zero, the mutex is read-locked |
334 | // - number of waiting readers, |
335 | // if not zero, the mutex is write-locked |
336 | // - number of waiting writers, |
337 | // if non zero, the mutex is read- or write-locked |
338 | // And 2 flags: |
339 | // - writer lock |
340 | // if set, the mutex is write-locked |
341 | // - a writer is awake and spin-waiting |
342 | // the flag is used to prevent thundering herd problem |
343 | // (new writers are not woken if this flag is set) |
344 | // - a reader is awake and spin-waiting |
345 | // |
346 | // Both writers and readers use active spinning before blocking. |
347 | // But readers are more aggressive and always take the mutex |
348 | // if there are any other readers. |
349 | // After wake up both writers and readers compete to lock the |
350 | // mutex again. This is needed to allow repeated locks even in presence |
351 | // of other blocked threads. |
352 | static constexpr u64 kCounterWidth = 20; |
353 | static constexpr u64 kReaderLockShift = 0; |
354 | static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift; |
355 | static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1) |
356 | << kReaderLockShift; |
357 | static constexpr u64 kWaitingReaderShift = kCounterWidth; |
358 | static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift; |
359 | static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1) |
360 | << kWaitingReaderShift; |
361 | static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth; |
362 | static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift; |
363 | static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1) |
364 | << kWaitingWriterShift; |
365 | static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth); |
366 | static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1); |
367 | static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2); |
368 | |
369 | static constexpr uptr kMaxSpinIters = 1500; |
370 | |
371 | Mutex(LinkerInitialized) = delete; |
372 | Mutex(const Mutex &) = delete; |
373 | void operator=(const Mutex &) = delete; |
374 | }; |
375 | |
376 | void FutexWait(atomic_uint32_t *p, u32 cmp); |
377 | void FutexWake(atomic_uint32_t *p, u32 count); |
378 | |
379 | template <typename MutexType> |
380 | class SANITIZER_SCOPED_LOCK GenericScopedLock { |
381 | public: |
382 | explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) { |
383 | mu_->Lock(); |
384 | } |
385 | |
386 | ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); } |
387 | |
388 | private: |
389 | MutexType *mu_; |
390 | |
391 | GenericScopedLock(const GenericScopedLock &) = delete; |
392 | void operator=(const GenericScopedLock &) = delete; |
393 | }; |
394 | |
395 | template <typename MutexType> |
396 | class SANITIZER_SCOPED_LOCK GenericScopedReadLock { |
397 | public: |
398 | explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu) |
399 | : mu_(mu) { |
400 | mu_->ReadLock(); |
401 | } |
402 | |
403 | ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); } |
404 | |
405 | private: |
406 | MutexType *mu_; |
407 | |
408 | GenericScopedReadLock(const GenericScopedReadLock &) = delete; |
409 | void operator=(const GenericScopedReadLock &) = delete; |
410 | }; |
411 | |
412 | template <typename MutexType> |
413 | class SANITIZER_SCOPED_LOCK GenericScopedRWLock { |
414 | public: |
415 | ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write) |
416 | SANITIZER_ACQUIRE(mu) |
417 | : mu_(mu), write_(write) { |
418 | if (write_) |
419 | mu_->Lock(); |
420 | else |
421 | mu_->ReadLock(); |
422 | } |
423 | |
424 | ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() { |
425 | if (write_) |
426 | mu_->Unlock(); |
427 | else |
428 | mu_->ReadUnlock(); |
429 | } |
430 | |
431 | private: |
432 | MutexType *mu_; |
433 | bool write_; |
434 | |
435 | GenericScopedRWLock(const GenericScopedRWLock &) = delete; |
436 | void operator=(const GenericScopedRWLock &) = delete; |
437 | }; |
438 | |
439 | typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock; |
440 | typedef GenericScopedLock<Mutex> Lock; |
441 | typedef GenericScopedReadLock<Mutex> ReadLock; |
442 | typedef GenericScopedRWLock<Mutex> RWLock; |
443 | |
444 | } // namespace __sanitizer |
445 | |
446 | #endif // SANITIZER_MUTEX_H |
447 | |