10 #ifndef EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_
11 #define EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_
69 std::atomic_thread_fence(std::memory_order_seq_cst);
79 uint64_t state =
state_.load(std::memory_order_seq_cst);
81 if (int64_t((state &
kEpochMask) - epoch) < 0) {
85 state =
state_.load(std::memory_order_seq_cst);
89 if (int64_t((state &
kEpochMask) - epoch) > 0)
return;
95 w->next.store(
nullptr, std::memory_order_relaxed);
98 if (
state_.compare_exchange_weak(state, newstate,
99 std::memory_order_release))
110 uint64_t state =
state_.load(std::memory_order_relaxed);
112 if (int64_t((state &
kEpochMask) - epoch) < 0) {
116 state =
state_.load(std::memory_order_relaxed);
120 if (int64_t((state &
kEpochMask) - epoch) > 0)
return;
124 std::memory_order_relaxed))
132 std::atomic_thread_fence(std::memory_order_seq_cst);
133 uint64_t state =
state_.load(std::memory_order_acquire);
143 }
else if (waiters) {
149 Waiter* wnext = w->next.load(std::memory_order_relaxed);
151 if (wnext !=
nullptr) next = wnext - &
waiters_[0];
158 if (
state_.compare_exchange_weak(state, newstate,
159 std::memory_order_acquire)) {
160 if (!all && waiters)
return;
163 if (!all) w->next.store(
nullptr, std::memory_order_relaxed);
205 std::unique_lock<std::mutex> lock(w->
mu);
214 for (
Waiter* w = waiters; w; w = next) {
215 next = w->next.load(std::memory_order_relaxed);
218 std::unique_lock<std::mutex> lock(w->mu);
233 #endif // EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_