1 | //===----------------------------------------------------------------------===// |
2 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
3 | // See https://llvm.org/LICENSE.txt for license information. |
4 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
5 | // |
6 | //===----------------------------------------------------------------------===// |
7 | |
8 | // To run this test, build libcxx and cxx-benchmarks targets |
9 | // cd third-party/benchmark/tools |
10 | // ./compare.py filters ../../../build/libcxx/benchmarks/atomic_wait_vs_mutex_lock.libcxx.out BM_atomic_wait BM_mutex |
11 | |
12 | #include <atomic> |
13 | #include <mutex> |
14 | #include <numeric> |
15 | #include <thread> |
16 | |
17 | #include "benchmark/benchmark.h" |
18 | #include "make_test_thread.h" |
19 | |
20 | using namespace std::chrono_literals; |
21 | |
22 | struct AtomicLock { |
23 | std::atomic<bool>& locked_; |
24 | |
25 | AtomicLock(const AtomicLock&) = delete; |
26 | AtomicLock& operator=(const AtomicLock&) = delete; |
27 | |
28 | AtomicLock(std::atomic<bool>& l) : locked_(l) { lock(); } |
29 | ~AtomicLock() { unlock(); } |
30 | |
31 | void lock() { |
32 | while (true) { |
33 | locked_.wait(true, std::memory_order_relaxed); |
34 | bool expected = false; |
35 | if (locked_.compare_exchange_weak(expected, true, std::memory_order_acquire, std::memory_order_relaxed)) |
36 | break; |
37 | } |
38 | } |
39 | |
40 | void unlock() { |
41 | locked_.store(false, std::memory_order_release); |
42 | locked_.notify_all(); |
43 | } |
44 | }; |
45 | |
46 | // using LockState = std::atomic<bool>; |
47 | // using Lock = AtomicLock; |
48 | |
49 | // using LockState = std::mutex; |
50 | // using Lock = std::unique_lock<std::mutex>; |
51 | |
52 | template <class LockState, class Lock> |
53 | void test_multi_thread_lock_unlock(benchmark::State& state) { |
54 | std::uint64_t total_loop_test_param = state.range(0); |
55 | constexpr auto num_threads = 15; |
56 | std::vector<std::jthread> threads; |
57 | threads.reserve(num_threads); |
58 | |
59 | std::atomic<std::uint64_t> start_flag = 0; |
60 | std::atomic<std::uint64_t> done_count = 0; |
61 | |
62 | LockState lock_state{}; |
63 | |
64 | auto func = [&start_flag, &done_count, &lock_state, total_loop_test_param](std::stop_token st) { |
65 | auto old_start = 0; |
66 | while (!st.stop_requested()) { |
67 | start_flag.wait(old_start); |
68 | old_start = start_flag.load(); |
69 | |
70 | // main things under test: locking and unlocking in the loop |
71 | for (std::uint64_t i = 0; i < total_loop_test_param; ++i) { |
72 | Lock l{lock_state}; |
73 | } |
74 | |
75 | done_count.fetch_add(1); |
76 | } |
77 | }; |
78 | |
79 | for (size_t i = 0; i < num_threads; ++i) { |
80 | threads.emplace_back(support::make_test_jthread(func)); |
81 | } |
82 | |
83 | for (auto _ : state) { |
84 | done_count = 0; |
85 | start_flag.fetch_add(1); |
86 | start_flag.notify_all(); |
87 | while (done_count < num_threads) { |
88 | std::this_thread::yield(); |
89 | } |
90 | } |
91 | for (auto& t : threads) { |
92 | t.request_stop(); |
93 | } |
94 | start_flag.fetch_add(1); |
95 | start_flag.notify_all(); |
96 | for (auto& t : threads) { |
97 | t.join(); |
98 | } |
99 | } |
100 | |
101 | void BM_atomic_wait(benchmark::State& state) { test_multi_thread_lock_unlock<std::atomic<bool>, AtomicLock>(state); } |
102 | BENCHMARK(BM_atomic_wait)->RangeMultiplier(2)->Range(1 << 10, 1 << 20); |
103 | |
104 | void BM_mutex(benchmark::State& state) { |
105 | test_multi_thread_lock_unlock<std::mutex, std::unique_lock<std::mutex>>(state); |
106 | } |
107 | BENCHMARK(BM_mutex)->RangeMultiplier(2)->Range(1 << 10, 1 << 20); |
108 | |
109 | BENCHMARK_MAIN(); |
110 | |