19 #include "absl/base/config.h"
20 #include "absl/base/internal/cycleclock.h"
21 #include "absl/base/internal/spinlock.h"
22 #include "absl/synchronization/blocking_counter.h"
23 #include "absl/synchronization/internal/thread_pool.h"
24 #include "absl/synchronization/mutex.h"
25 #include "benchmark/benchmark.h"
35 BENCHMARK(BM_Mutex)->UseRealTime()->Threads(1)->ThreadPerCpu();
46 template <
typename MutexType>
50 ~RaiiLocker() {
mu_->Unlock(); }
59 ~RaiiLocker() {
mu_->unlock(); }
65 class ScopedThreadMutexPriority {
67 explicit ScopedThreadMutexPriority(
int priority) {
77 ~ScopedThreadMutexPriority() {
98 const bool multiple_priorities =
state.range(0);
99 ScopedThreadMutexPriority priority_setter(
100 (multiple_priorities &&
state.thread_index() != 0) ? 1 : 0);
104 std::atomic<int> looping_threads{0};
105 std::atomic<int> blocked_threads{0};
106 std::atomic<bool> thread_has_mutex{
false};
108 static Shared* shared =
new Shared;
120 &shared->blocked_threads);
127 shared->looping_threads.load(std::memory_order_relaxed) == 0 &&
128 shared->blocked_threads.load(std::memory_order_relaxed) == 0 &&
129 !shared->thread_has_mutex.load(std::memory_order_relaxed),
130 "Shared state isn't zeroed at start of benchmark iteration");
132 static constexpr
int kBatchSize = 1000;
133 while (
state.KeepRunningBatch(kBatchSize)) {
134 shared->looping_threads.fetch_add(1);
135 for (
int i = 0;
i < kBatchSize;
i++) {
138 shared->thread_has_mutex.store(
true, std::memory_order_relaxed);
143 while (shared->looping_threads.load(std::memory_order_relaxed) -
144 shared->blocked_threads.load(std::memory_order_relaxed) !=
147 shared->thread_has_mutex.store(
false);
152 while (!shared->thread_has_mutex.load(std::memory_order_relaxed) &&
153 shared->looping_threads.load(std::memory_order_relaxed) > 1) {
162 shared->looping_threads.fetch_add(-1);
173 ->ArgName(
"multiple_priorities")
177 template <
typename MutexType>
180 ScopedThreadMutexPriority priority_setter(
priority);
186 static auto* shared =
new Shared;
200 RaiiLocker<MutexType> locker(&shared->mu);
201 DelayNs(
state.range(0), &shared->data);
205 bool do_test_priorities) {
206 const int max_num_priorities = do_test_priorities ? 2 : 1;
227 for (
int critical_section_ns : {1, 20, 50, 200, 2000}) {
228 for (
int num_priorities = 1; num_priorities <= max_num_priorities;
230 bm->
ArgPair(critical_section_ns, num_priorities);
237 SetupBenchmarkArgs(bm,
true);
242 SetupBenchmarkArgs(bm,
false);
247 SetupBenchmarkArgs(bm,
false);
257 int num_classes =
state.range(0);
258 int num_waiters =
state.range(1);
262 init->DecrementCount();
264 static_cast<bool (*)(
int*)
>([](
int*
v) {
return *
v == 0; }),
p));
269 if (num_classes == 0) {
271 num_classes = num_waiters;
276 std::vector<int> equivalence_classes(num_classes, 1);
281 for (
int i = 0;
i < num_waiters;
i++) {
284 pool.Schedule([&,
i] {
285 Helper::Waiter(&
init, &
mu, &equivalence_classes[
i % num_classes]);
296 for (
int i = 0;
i < num_classes;
i++) {
297 equivalence_classes[
i] = 0;
303 #if defined(__linux__) && !defined(ABSL_HAVE_THREAD_SANITIZER)
304 constexpr
int kMaxConditionWaiters = 8192;
306 constexpr
int kMaxConditionWaiters = 1024;
308 BENCHMARK(BM_ConditionWaiters)->RangePair(0, 2, 1, kMaxConditionWaiters);