atomicops.h
Go to the documentation of this file.
1 // ©2013-2016 Cameron Desrochers.
2 // Distributed under the simplified BSD license (see the license file that
3 // should have come with this header).
4 // Uses Jeff Preshing's semaphore implementation (under the terms of its
5 // separate zlib license, embedded below).
6 
7 #pragma once
8 
9 // Provides portable (VC++2010+, Intel ICC 13, GCC 4.7+, and anything C++11 compliant) implementation
10 // of low-level memory barriers, plus a few semi-portable utility macros (for inlining and alignment).
11 // Also has a basic atomic type (limited to hardware-supported atomics with no memory ordering guarantees).
12 // Uses the AE_* prefix for macros (historical reasons), and the "moodycamel" namespace for symbols.
13 
14 #include <cassert>
15 #include <type_traits>
16 #include <cerrno>
17 #include <cstdint>
18 #include <ctime>
19 
20 // Platform detection
21 #if defined(__INTEL_COMPILER)
22 #define AE_ICC
23 #elif defined(_MSC_VER)
24 #define AE_VCPP
25 #elif defined(__GNUC__)
26 #define AE_GCC
27 #endif
28 
29 #if defined(_M_IA64) || defined(__ia64__)
30 #define AE_ARCH_IA64
31 #elif defined(_WIN64) || defined(__amd64__) || defined(_M_X64) || defined(__x86_64__)
32 #define AE_ARCH_X64
33 #elif defined(_M_IX86) || defined(__i386__)
34 #define AE_ARCH_X86
35 #elif defined(_M_PPC) || defined(__powerpc__)
36 #define AE_ARCH_PPC
37 #else
38 #define AE_ARCH_UNKNOWN
39 #endif
40 
41 
42 // AE_UNUSED
43 #define AE_UNUSED(x) ((void)x)
44 
45 // AE_NO_TSAN
46 #if defined(__has_feature)
47 #if __has_feature(thread_sanitizer)
48 #define AE_NO_TSAN __attribute__((no_sanitize("thread")))
49 #else
50 #define AE_NO_TSAN
51 #endif
52 #else
53 #define AE_NO_TSAN
54 #endif
55 
56 
57 // AE_FORCEINLINE
58 #if defined(AE_VCPP) || defined(AE_ICC)
59 #define AE_FORCEINLINE __forceinline
60 #elif defined(AE_GCC)
61 //#define AE_FORCEINLINE __attribute__((always_inline))
62 #define AE_FORCEINLINE inline
63 #else
64 #define AE_FORCEINLINE inline
65 #endif
66 
67 
68 // AE_ALIGN
69 #if defined(AE_VCPP) || defined(AE_ICC)
70 #define AE_ALIGN(x) __declspec(align(x))
71 #elif defined(AE_GCC)
72 #define AE_ALIGN(x) __attribute__((aligned(x)))
73 #else
74 // Assume GCC compliant syntax...
75 #define AE_ALIGN(x) __attribute__((aligned(x)))
76 #endif
77 
78 
79 // Portable atomic fences implemented below:
80 
81 namespace moodycamel {
82 
89 
90  // memory_order_sync: Forces a full sync:
91  // #LoadLoad, #LoadStore, #StoreStore, and most significantly, #StoreLoad
93 };
94 
95 } // end namespace moodycamel
96 
97 #if (defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))) || (defined(AE_ICC) && __INTEL_COMPILER < 1600)
98 // VS2010 and ICC13 don't support std::atomic_*_fence, implement our own fences
99 
100 #include <intrin.h>
101 
102 #if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
103 #define AeFullSync _mm_mfence
104 #define AeLiteSync _mm_mfence
105 #elif defined(AE_ARCH_IA64)
106 #define AeFullSync __mf
107 #define AeLiteSync __mf
108 #elif defined(AE_ARCH_PPC)
109 #include <ppcintrinsics.h>
110 #define AeFullSync __sync
111 #define AeLiteSync __lwsync
112 #endif
113 
114 
115 #ifdef AE_VCPP
116 #pragma warning(push)
117 #pragma warning(disable: 4365) // Disable erroneous 'conversion from long to unsigned int, signed/unsigned mismatch' error when using `assert`
118 #ifdef __cplusplus_cli
119 #pragma managed(push, off)
120 #endif
121 #endif
122 
123 namespace moodycamel {
124 
126 {
127  switch (order) {
128  case memory_order_relaxed: break;
129  case memory_order_acquire: _ReadBarrier(); break;
130  case memory_order_release: _WriteBarrier(); break;
131  case memory_order_acq_rel: _ReadWriteBarrier(); break;
132  case memory_order_seq_cst: _ReadWriteBarrier(); break;
133  default: assert(false);
134  }
135 }
136 
137 // x86/x64 have a strong memory model -- all loads and stores have
138 // acquire and release semantics automatically (so only need compiler
139 // barriers for those).
140 #if defined(AE_ARCH_X86) || defined(AE_ARCH_X64)
142 {
143  switch (order) {
144  case memory_order_relaxed: break;
145  case memory_order_acquire: _ReadBarrier(); break;
146  case memory_order_release: _WriteBarrier(); break;
147  case memory_order_acq_rel: _ReadWriteBarrier(); break;
149  _ReadWriteBarrier();
150  AeFullSync();
151  _ReadWriteBarrier();
152  break;
153  default: assert(false);
154  }
155 }
156 #else
158 {
159  // Non-specialized arch, use heavier memory barriers everywhere just in case :-(
160  switch (order) {
162  break;
164  _ReadBarrier();
165  AeLiteSync();
166  _ReadBarrier();
167  break;
169  _WriteBarrier();
170  AeLiteSync();
171  _WriteBarrier();
172  break;
174  _ReadWriteBarrier();
175  AeLiteSync();
176  _ReadWriteBarrier();
177  break;
179  _ReadWriteBarrier();
180  AeFullSync();
181  _ReadWriteBarrier();
182  break;
183  default: assert(false);
184  }
185 }
186 #endif
187 } // end namespace moodycamel
188 #else
189 // Use standard library of atomics
190 #include <atomic>
191 
192 namespace moodycamel {
193 
195 {
196  switch (order) {
197  case memory_order_relaxed: break;
198  case memory_order_acquire: std::atomic_signal_fence(std::memory_order_acquire); break;
199  case memory_order_release: std::atomic_signal_fence(std::memory_order_release); break;
200  case memory_order_acq_rel: std::atomic_signal_fence(std::memory_order_acq_rel); break;
201  case memory_order_seq_cst: std::atomic_signal_fence(std::memory_order_seq_cst); break;
202  default: assert(false);
203  }
204 }
205 
207 {
208  switch (order) {
209  case memory_order_relaxed: break;
210  case memory_order_acquire: std::atomic_thread_fence(std::memory_order_acquire); break;
211  case memory_order_release: std::atomic_thread_fence(std::memory_order_release); break;
212  case memory_order_acq_rel: std::atomic_thread_fence(std::memory_order_acq_rel); break;
213  case memory_order_seq_cst: std::atomic_thread_fence(std::memory_order_seq_cst); break;
214  default: assert(false);
215  }
216 }
217 
218 } // end namespace moodycamel
219 
220 #endif
221 
222 
223 #if !defined(AE_VCPP) || (_MSC_VER >= 1700 && !defined(__cplusplus_cli))
224 #define AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
225 #endif
226 
227 #ifdef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
228 #include <atomic>
229 #endif
230 #include <utility>
231 
232 // WARNING: *NOT* A REPLACEMENT FOR std::atomic. READ CAREFULLY:
233 // Provides basic support for atomic variables -- no memory ordering guarantees are provided.
234 // The guarantee of atomicity is only made for types that already have atomic load and store guarantees
235 // at the hardware level -- on most platforms this generally means aligned pointers and integers (only).
236 namespace moodycamel {
237 template<typename T>
239 {
240 public:
242 #ifdef AE_VCPP
243 #pragma warning(push)
244 #pragma warning(disable: 4100) // Get rid of (erroneous) 'unreferenced formal parameter' warning
245 #endif
246  template<typename U> AE_NO_TSAN weak_atomic(U&& x) : value(std::forward<U>(x)) { }
247 #ifdef __cplusplus_cli
248  // Work around bug with universal reference/nullptr combination that only appears when /clr is on
249  AE_NO_TSAN weak_atomic(nullptr_t) : value(nullptr) { }
250 #endif
251  AE_NO_TSAN weak_atomic(weak_atomic const& other) : value(other.load()) { }
252  AE_NO_TSAN weak_atomic(weak_atomic&& other) : value(std::move(other.load())) { }
253 #ifdef AE_VCPP
254 #pragma warning(pop)
255 #endif
256 
257  AE_FORCEINLINE operator T() const AE_NO_TSAN { return load(); }
258 
259 
260 #ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
261  template<typename U> AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN { value = std::forward<U>(x); return *this; }
262  AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN { value = other.value; return *this; }
263 
264  AE_FORCEINLINE T load() const AE_NO_TSAN { return value; }
265 
267  {
268 #if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
269  if (sizeof(T) == 4) return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
270 #if defined(_M_AMD64)
271  else if (sizeof(T) == 8) return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
272 #endif
273 #else
274 #error Unsupported platform
275 #endif
276  assert(false && "T must be either a 32 or 64 bit type");
277  return value;
278  }
279 
281  {
282 #if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
283  if (sizeof(T) == 4) return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
284 #if defined(_M_AMD64)
285  else if (sizeof(T) == 8) return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
286 #endif
287 #else
288 #error Unsupported platform
289 #endif
290  assert(false && "T must be either a 32 or 64 bit type");
291  return value;
292  }
293 #else
294  template<typename U>
295  AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN
296  {
297  value.store(std::forward<U>(x), std::memory_order_relaxed);
298  return *this;
299  }
300 
301  AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN
302  {
304  return *this;
305  }
306 
308 
310  {
311  return value.fetch_add(increment, std::memory_order_acquire);
312  }
313 
315  {
316  return value.fetch_add(increment, std::memory_order_release);
317  }
318 #endif
319 
320 
321 private:
322 #ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
323  // No std::atomic support, but still need to circumvent compiler optimizations.
324  // `volatile` will make memory access slow, but is guaranteed to be reliable.
325  volatile T value;
326 #else
327  std::atomic<T> value;
328 #endif
329 };
330 
331 } // end namespace moodycamel
332 
333 
334 
335 // Portable single-producer, single-consumer semaphore below:
336 
337 #if defined(_WIN32)
338 // Avoid including windows.h in a header; we only need a handful of
339 // items, so we'll redeclare them here (this is relatively safe since
340 // the API generally has to remain stable between Windows versions).
341 // I know this is an ugly hack but it still beats polluting the global
342 // namespace with thousands of generic names or adding a .cpp for nothing.
343 extern "C" {
344  struct _SECURITY_ATTRIBUTES;
345  __declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, long lInitialCount, long lMaximumCount, const wchar_t* lpName);
346  __declspec(dllimport) int __stdcall CloseHandle(void* hObject);
347  __declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds);
348  __declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount);
349 }
350 #elif defined(__MACH__)
351 #include <mach/mach.h>
352 #elif defined(__unix__)
353 #include <semaphore.h>
354 #endif
355 
356 namespace moodycamel
357 {
358  // Code in the spsc_sema namespace below is an adaptation of Jeff Preshing's
359  // portable + lightweight semaphore implementations, originally from
360  // https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h
361  // LICENSE:
362  // Copyright (c) 2015 Jeff Preshing
363  //
364  // This software is provided 'as-is', without any express or implied
365  // warranty. In no event will the authors be held liable for any damages
366  // arising from the use of this software.
367  //
368  // Permission is granted to anyone to use this software for any purpose,
369  // including commercial applications, and to alter it and redistribute it
370  // freely, subject to the following restrictions:
371  //
372  // 1. The origin of this software must not be misrepresented; you must not
373  // claim that you wrote the original software. If you use this software
374  // in a product, an acknowledgement in the product documentation would be
375  // appreciated but is not required.
376  // 2. Altered source versions must be plainly marked as such, and must not be
377  // misrepresented as being the original software.
378  // 3. This notice may not be removed or altered from any source distribution.
379  namespace spsc_sema
380  {
381 #if defined(_WIN32)
382  class Semaphore
383  {
384  private:
385  void* m_hSema;
386 
387  Semaphore(const Semaphore& other);
388  Semaphore& operator=(const Semaphore& other);
389 
390  public:
391  AE_NO_TSAN Semaphore(int initialCount = 0)
392  {
393  assert(initialCount >= 0);
394  const long maxLong = 0x7fffffff;
395  m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr);
396  }
397 
398  AE_NO_TSAN ~Semaphore()
399  {
400  CloseHandle(m_hSema);
401  }
402 
403  void wait() AE_NO_TSAN
404  {
405  const unsigned long infinite = 0xffffffff;
406  WaitForSingleObject(m_hSema, infinite);
407  }
408 
409  bool try_wait() AE_NO_TSAN
410  {
411  const unsigned long RC_WAIT_TIMEOUT = 0x00000102;
412  return WaitForSingleObject(m_hSema, 0) != RC_WAIT_TIMEOUT;
413  }
414 
415  bool timed_wait(std::uint64_t usecs) AE_NO_TSAN
416  {
417  const unsigned long RC_WAIT_TIMEOUT = 0x00000102;
418  return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) != RC_WAIT_TIMEOUT;
419  }
420 
421  void signal(int count = 1) AE_NO_TSAN
422  {
423  ReleaseSemaphore(m_hSema, count, nullptr);
424  }
425  };
426 #elif defined(__MACH__)
427  //---------------------------------------------------------
428  // Semaphore (Apple iOS and OSX)
429  // Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html
430  //---------------------------------------------------------
431  class Semaphore
432  {
433  private:
434  semaphore_t m_sema;
435 
436  Semaphore(const Semaphore& other);
437  Semaphore& operator=(const Semaphore& other);
438 
439  public:
440  AE_NO_TSAN Semaphore(int initialCount = 0)
441  {
442  assert(initialCount >= 0);
443  semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount);
444  }
445 
446  AE_NO_TSAN ~Semaphore()
447  {
448  semaphore_destroy(mach_task_self(), m_sema);
449  }
450 
451  void wait() AE_NO_TSAN
452  {
453  semaphore_wait(m_sema);
454  }
455 
456  bool try_wait() AE_NO_TSAN
457  {
458  return timed_wait(0);
459  }
460 
461  bool timed_wait(std::int64_t timeout_usecs) AE_NO_TSAN
462  {
463  mach_timespec_t ts;
464  ts.tv_sec = static_cast<unsigned int>(timeout_usecs / 1000000);
465  ts.tv_nsec = (timeout_usecs % 1000000) * 1000;
466 
467  // added in OSX 10.10: https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html
468  kern_return_t rc = semaphore_timedwait(m_sema, ts);
469 
470  return rc != KERN_OPERATION_TIMED_OUT && rc != KERN_ABORTED;
471  }
472 
473  void signal() AE_NO_TSAN
474  {
475  semaphore_signal(m_sema);
476  }
477 
478  void signal(int count) AE_NO_TSAN
479  {
480  while (count-- > 0)
481  {
482  semaphore_signal(m_sema);
483  }
484  }
485  };
486 #elif defined(__unix__)
487  //---------------------------------------------------------
488  // Semaphore (POSIX, Linux)
489  //---------------------------------------------------------
490  class Semaphore
491  {
492  private:
493  sem_t m_sema;
494 
495  Semaphore(const Semaphore& other);
496  Semaphore& operator=(const Semaphore& other);
497 
498  public:
499  AE_NO_TSAN Semaphore(int initialCount = 0)
500  {
501  assert(initialCount >= 0);
502  sem_init(&m_sema, 0, initialCount);
503  }
504 
505  AE_NO_TSAN ~Semaphore()
506  {
507  sem_destroy(&m_sema);
508  }
509 
510  void wait() AE_NO_TSAN
511  {
512  // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error
513  int rc;
514  do
515  {
516  rc = sem_wait(&m_sema);
517  }
518  while (rc == -1 && errno == EINTR);
519  }
520 
521  bool try_wait() AE_NO_TSAN
522  {
523  int rc;
524  do {
525  rc = sem_trywait(&m_sema);
526  } while (rc == -1 && errno == EINTR);
527  return !(rc == -1 && errno == EAGAIN);
528  }
529 
530  bool timed_wait(std::uint64_t usecs) AE_NO_TSAN
531  {
532  struct timespec ts;
533  const int usecs_in_1_sec = 1000000;
534  const int nsecs_in_1_sec = 1000000000;
535  clock_gettime(CLOCK_REALTIME, &ts);
536  ts.tv_sec += usecs / usecs_in_1_sec;
537  ts.tv_nsec += (usecs % usecs_in_1_sec) * 1000;
538  // sem_timedwait bombs if you have more than 1e9 in tv_nsec
539  // so we have to clean things up before passing it in
540  if (ts.tv_nsec >= nsecs_in_1_sec) {
541  ts.tv_nsec -= nsecs_in_1_sec;
542  ++ts.tv_sec;
543  }
544 
545  int rc;
546  do {
547  rc = sem_timedwait(&m_sema, &ts);
548  } while (rc == -1 && errno == EINTR);
549  return !(rc == -1 && errno == ETIMEDOUT);
550  }
551 
552  void signal() AE_NO_TSAN
553  {
554  sem_post(&m_sema);
555  }
556 
557  void signal(int count) AE_NO_TSAN
558  {
559  while (count-- > 0)
560  {
561  sem_post(&m_sema);
562  }
563  }
564  };
565 #else
566 #error Unsupported platform! (No semaphore wrapper available)
567 #endif
568 
569  //---------------------------------------------------------
570  // LightweightSemaphore
571  //---------------------------------------------------------
573  {
574  public:
575  typedef std::make_signed<std::size_t>::type ssize_t;
576 
577  private:
579  Semaphore m_sema;
580 
581  bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1) AE_NO_TSAN
582  {
583  ssize_t oldCount;
584  // Is there a better way to set the initial spin count?
585  // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC,
586  // as threads start hitting the kernel semaphore.
587  int spin = 10000;
588  while (--spin >= 0)
589  {
590  if (m_count.load() > 0)
591  {
592  m_count.fetch_add_acquire(-1);
593  return true;
594  }
595  compiler_fence(memory_order_acquire); // Prevent the compiler from collapsing the loop.
596  }
597  oldCount = m_count.fetch_add_acquire(-1);
598  if (oldCount > 0)
599  return true;
600  if (timeout_usecs < 0)
601  {
602  m_sema.wait();
603  return true;
604  }
605  if (m_sema.timed_wait(timeout_usecs))
606  return true;
607  // At this point, we've timed out waiting for the semaphore, but the
608  // count is still decremented indicating we may still be waiting on
609  // it. So we have to re-adjust the count, but only if the semaphore
610  // wasn't signaled enough times for us too since then. If it was, we
611  // need to release the semaphore too.
612  while (true)
613  {
614  oldCount = m_count.fetch_add_release(1);
615  if (oldCount < 0)
616  return false; // successfully restored things to the way they were
617  // Oh, the producer thread just signaled the semaphore after all. Try again:
618  oldCount = m_count.fetch_add_acquire(-1);
619  if (oldCount > 0 && m_sema.try_wait())
620  return true;
621  }
622  }
623 
624  public:
625  AE_NO_TSAN LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount)
626  {
627  assert(initialCount >= 0);
628  }
629 
631  {
632  if (m_count.load() > 0)
633  {
634  m_count.fetch_add_acquire(-1);
635  return true;
636  }
637  return false;
638  }
639 
641  {
642  if (!tryWait())
643  waitWithPartialSpinning();
644  }
645 
646  bool wait(std::int64_t timeout_usecs) AE_NO_TSAN
647  {
648  return tryWait() || waitWithPartialSpinning(timeout_usecs);
649  }
650 
651  void signal(ssize_t count = 1) AE_NO_TSAN
652  {
653  assert(count >= 0);
654  ssize_t oldCount = m_count.fetch_add_release(count);
655  assert(oldCount >= -1);
656  if (oldCount < 0)
657  {
658  m_sema.signal(1);
659  }
660  }
661 
662  ssize_t availableApprox() const AE_NO_TSAN
663  {
664  ssize_t count = m_count.load();
665  return count > 0 ? count : 0;
666  }
667  };
668  } // end namespace spsc_sema
669 } // end namespace moodycamel
670 
671 #if defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))
672 #pragma warning(pop)
673 #ifdef __cplusplus_cli
674 #pragma managed(pop)
675 #endif
676 #endif
AE_NO_TSAN LightweightSemaphore(ssize_t initialCount=0)
Definition: atomicops.h:625
AE_NO_TSAN weak_atomic(weak_atomic const &other)
Definition: atomicops.h:251
AE_FORCEINLINE weak_atomic const & operator=(weak_atomic const &other) AE_NO_TSAN
Definition: atomicops.h:301
bool wait(std::int64_t timeout_usecs) AE_NO_TSAN
Definition: atomicops.h:646
AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN
Definition: atomicops.h:309
#define AE_NO_TSAN
Definition: atomicops.h:53
std::atomic< T > value
Definition: atomicops.h:327
ssize_t availableApprox() const AE_NO_TSAN
Definition: atomicops.h:662
AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN
Definition: atomicops.h:206
AE_FORCEINLINE T load() const AE_NO_TSAN
Definition: atomicops.h:307
AE_NO_TSAN weak_atomic(weak_atomic &&other)
Definition: atomicops.h:252
AE_NO_TSAN weak_atomic()
Definition: atomicops.h:241
AE_NO_TSAN weak_atomic(U &&x)
Definition: atomicops.h:246
void signal(ssize_t count=1) AE_NO_TSAN
Definition: atomicops.h:651
TFSIMD_FORCE_INLINE const tfScalar & x() const
std::make_signed< std::size_t >::type ssize_t
Definition: atomicops.h:575
AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN
Definition: atomicops.h:314
AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN
Definition: atomicops.h:194
bool waitWithPartialSpinning(std::int64_t timeout_usecs=-1) AE_NO_TSAN
Definition: atomicops.h:581
AE_FORCEINLINE weak_atomic const & operator=(U &&x) AE_NO_TSAN
Definition: atomicops.h:295
#define AE_FORCEINLINE
Definition: atomicops.h:64


orb_slam2_with_maps_odom
Author(s): teng zhang
autogenerated on Fri Sep 25 2020 03:24:47