abseil-cpp/absl/flags/internal/sequence_lock.h
Go to the documentation of this file.
1 //
2 // Copyright 2020 The Abseil Authors.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // https://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 
16 #ifndef ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
17 #define ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
18 
19 #include <stddef.h>
20 #include <stdint.h>
21 
22 #include <atomic>
23 #include <cassert>
24 #include <cstring>
25 
26 #include "absl/base/optimization.h"
27 
28 namespace absl {
30 namespace flags_internal {
31 
32 // Align 'x' up to the nearest 'align' bytes.
33 inline constexpr size_t AlignUp(size_t x, size_t align) {
34  return align * ((x + align - 1) / align);
35 }
36 
37 // A SequenceLock implements lock-free reads. A sequence counter is incremented
38 // before and after each write, and readers access the counter before and after
39 // accessing the protected data. If the counter is verified to not change during
40 // the access, and the sequence counter value was even, then the reader knows
41 // that the read was race-free and valid. Otherwise, the reader must fall back
42 // to a Mutex-based code path.
43 //
44 // This particular SequenceLock starts in an "uninitialized" state in which
45 // TryRead() returns false. It must be enabled by calling MarkInitialized().
46 // This serves as a marker that the associated flag value has not yet been
47 // initialized and a slow path needs to be taken.
48 //
49 // The memory reads and writes protected by this lock must use the provided
50 // `TryRead()` and `Write()` functions. These functions behave similarly to
51 // `memcpy()`, with one oddity: the protected data must be an array of
52 // `std::atomic<uint64>`. This is to comply with the C++ standard, which
53 // considers data races on non-atomic objects to be undefined behavior. See "Can
54 // Seqlocks Get Along With Programming Language Memory Models?"[1] by Hans J.
55 // Boehm for more details.
56 //
57 // [1] https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
58 class SequenceLock {
59  public:
60  constexpr SequenceLock() : lock_(kUninitialized) {}
61 
62  // Mark that this lock is ready for use.
63  void MarkInitialized() {
64  assert(lock_.load(std::memory_order_relaxed) == kUninitialized);
65  lock_.store(0, std::memory_order_release);
66  }
67 
68  // Copy "size" bytes of data from "src" to "dst", protected as a read-side
69  // critical section of the sequence lock.
70  //
71  // Unlike traditional sequence lock implementations which loop until getting a
72  // clean read, this implementation returns false in the case of concurrent
73  // calls to `Write`. In such a case, the caller should fall back to a
74  // locking-based slow path.
75  //
76  // Returns false if the sequence lock was not yet marked as initialized.
77  //
78  // NOTE: If this returns false, "dst" may be overwritten with undefined
79  // (potentially uninitialized) data.
80  bool TryRead(void* dst, const std::atomic<uint64_t>* src, size_t size) const {
81  // Acquire barrier ensures that no loads done by f() are reordered
82  // above the first load of the sequence counter.
83  int64_t seq_before = lock_.load(std::memory_order_acquire);
84  if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) return false;
86  // Another acquire fence ensures that the load of 'lock_' below is
87  // strictly ordered after the RelaxedCopyToAtomic call above.
88  std::atomic_thread_fence(std::memory_order_acquire);
89  int64_t seq_after = lock_.load(std::memory_order_relaxed);
90  return ABSL_PREDICT_TRUE(seq_before == seq_after);
91  }
92 
93  // Copy "size" bytes from "src" to "dst" as a write-side critical section
94  // of the sequence lock. Any concurrent readers will be forced to retry
95  // until they get a read that does not conflict with this write.
96  //
97  // This call must be externally synchronized against other calls to Write,
98  // but may proceed concurrently with reads.
99  void Write(std::atomic<uint64_t>* dst, const void* src, size_t size) {
100  // We can use relaxed instructions to increment the counter since we
101  // are extenally synchronized. The std::atomic_thread_fence below
102  // ensures that the counter updates don't get interleaved with the
103  // copy to the data.
104  int64_t orig_seq = lock_.load(std::memory_order_relaxed);
105  assert((orig_seq & 1) == 0); // Must be initially unlocked.
106  lock_.store(orig_seq + 1, std::memory_order_relaxed);
107 
108  // We put a release fence between update to lock_ and writes to shared data.
109  // Thus all stores to shared data are effectively release operations and
110  // update to lock_ above cannot be re-ordered past any of them. Note that
111  // this barrier is not for the fetch_add above. A release barrier for the
112  // fetch_add would be before it, not after.
113  std::atomic_thread_fence(std::memory_order_release);
115  // "Release" semantics ensure that none of the writes done by
116  // RelaxedCopyToAtomic() can be reordered after the following modification.
117  lock_.store(orig_seq + 2, std::memory_order_release);
118  }
119 
120  // Return the number of times that Write() has been called.
121  //
122  // REQUIRES: This must be externally synchronized against concurrent calls to
123  // `Write()` or `IncrementModificationCount()`.
124  // REQUIRES: `MarkInitialized()` must have been previously called.
126  int64_t val = lock_.load(std::memory_order_relaxed);
127  assert(val != kUninitialized && (val & 1) == 0);
128  return val / 2;
129  }
130 
131  // REQUIRES: This must be externally synchronized against concurrent calls to
132  // `Write()` or `ModificationCount()`.
133  // REQUIRES: `MarkInitialized()` must have been previously called.
135  int64_t val = lock_.load(std::memory_order_relaxed);
136  assert(val != kUninitialized);
137  lock_.store(val + 2, std::memory_order_relaxed);
138  }
139 
140  private:
141  // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
142  // atomics.
143  static void RelaxedCopyFromAtomic(void* dst, const std::atomic<uint64_t>* src,
144  size_t size) {
145  char* dst_byte = static_cast<char*>(dst);
146  while (size >= sizeof(uint64_t)) {
147  uint64_t word = src->load(std::memory_order_relaxed);
148  std::memcpy(dst_byte, &word, sizeof(word));
149  dst_byte += sizeof(word);
150  src++;
151  size -= sizeof(word);
152  }
153  if (size > 0) {
154  uint64_t word = src->load(std::memory_order_relaxed);
155  std::memcpy(dst_byte, &word, size);
156  }
157  }
158 
159  // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
160  // atomics.
161  static void RelaxedCopyToAtomic(std::atomic<uint64_t>* dst, const void* src,
162  size_t size) {
163  const char* src_byte = static_cast<const char*>(src);
164  while (size >= sizeof(uint64_t)) {
165  uint64_t word;
166  std::memcpy(&word, src_byte, sizeof(word));
167  dst->store(word, std::memory_order_relaxed);
168  src_byte += sizeof(word);
169  dst++;
170  size -= sizeof(word);
171  }
172  if (size > 0) {
173  uint64_t word = 0;
174  std::memcpy(&word, src_byte, size);
175  dst->store(word, std::memory_order_relaxed);
176  }
177  }
178 
179  static constexpr int64_t kUninitialized = -1;
180  std::atomic<int64_t> lock_;
181 };
182 
183 } // namespace flags_internal
185 } // namespace absl
186 
187 #endif // ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
ABSL_PREDICT_FALSE
#define ABSL_PREDICT_FALSE(x)
Definition: abseil-cpp/absl/base/optimization.h:180
dst
static const char dst[]
Definition: test-fs-copyfile.c:37
absl::flags_internal::SequenceLock::IncrementModificationCount
void IncrementModificationCount()
Definition: abseil-cpp/absl/flags/internal/sequence_lock.h:134
absl::flags_internal::SequenceLock::RelaxedCopyToAtomic
static void RelaxedCopyToAtomic(std::atomic< uint64_t > *dst, const void *src, size_t size)
Definition: abseil-cpp/absl/flags/internal/sequence_lock.h:161
absl::flags_internal::AlignUp
constexpr size_t AlignUp(size_t x, size_t align)
Definition: abseil-cpp/absl/flags/internal/sequence_lock.h:33
ABSL_NAMESPACE_END
#define ABSL_NAMESPACE_END
Definition: third_party/abseil-cpp/absl/base/config.h:171
absl::flags_internal::SequenceLock::Write
void Write(std::atomic< uint64_t > *dst, const void *src, size_t size)
Definition: abseil-cpp/absl/flags/internal/sequence_lock.h:99
memcpy
memcpy(mem, inblock.get(), min(CONTAINING_RECORD(inblock.get(), MEMBLOCK, data) ->size, size))
ABSL_NAMESPACE_BEGIN
#define ABSL_NAMESPACE_BEGIN
Definition: third_party/abseil-cpp/absl/base/config.h:170
int64_t
signed __int64 int64_t
Definition: stdint-msvc2008.h:89
uint64_t
unsigned __int64 uint64_t
Definition: stdint-msvc2008.h:90
absl::flags_internal::SequenceLock::TryRead
bool TryRead(void *dst, const std::atomic< uint64_t > *src, size_t size) const
Definition: abseil-cpp/absl/flags/internal/sequence_lock.h:80
x
int x
Definition: bloaty/third_party/googletest/googlemock/test/gmock-matchers_test.cc:3610
absl::flags_internal::SequenceLock::lock_
std::atomic< int64_t > lock_
Definition: abseil-cpp/absl/flags/internal/sequence_lock.h:180
absl::flags_internal::SequenceLock::kUninitialized
static constexpr int64_t kUninitialized
Definition: abseil-cpp/absl/flags/internal/sequence_lock.h:179
absl::flags_internal::SequenceLock::RelaxedCopyFromAtomic
static void RelaxedCopyFromAtomic(void *dst, const std::atomic< uint64_t > *src, size_t size)
Definition: abseil-cpp/absl/flags/internal/sequence_lock.h:143
stdint.h
ABSL_PREDICT_TRUE
#define ABSL_PREDICT_TRUE(x)
Definition: abseil-cpp/absl/base/optimization.h:181
absl::time_internal::cctz::detail::align
CONSTEXPR_F fields align(second_tag, fields f) noexcept
Definition: abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h:325
absl
Definition: abseil-cpp/absl/algorithm/algorithm.h:31
size
voidpf void uLong size
Definition: bloaty/third_party/zlib/contrib/minizip/ioapi.h:136
absl::flags_internal::SequenceLock::MarkInitialized
void MarkInitialized()
Definition: abseil-cpp/absl/flags/internal/sequence_lock.h:63
absl::flags_internal::SequenceLock::ModificationCount
int64_t ModificationCount() const
Definition: abseil-cpp/absl/flags/internal/sequence_lock.h:125
absl::flags_internal::SequenceLock::SequenceLock
constexpr SequenceLock()
Definition: abseil-cpp/absl/flags/internal/sequence_lock.h:60
absl::flags_internal::SequenceLock
Definition: abseil-cpp/absl/flags/internal/sequence_lock.h:58


grpc
Author(s):
autogenerated on Fri May 16 2025 03:00:10