gcc-x86.hpp
Go to the documentation of this file.
1 #ifndef BOOST_DETAIL_ATOMIC_GCC_X86_HPP
2 #define BOOST_DETAIL_ATOMIC_GCC_X86_HPP
3 
4 // Copyright (c) 2009 Helge Bahmann
5 //
6 // Distributed under the Boost Software License, Version 1.0.
7 // See accompanying file LICENSE_1_0.txt or copy at
8 // http://www.boost.org/LICENSE_1_0.txt)
9 
10 #define BOOST_ATOMIC_CHAR_LOCK_FREE 2
11 #define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2
12 #define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2
13 #define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
14 #define BOOST_ATOMIC_SHORT_LOCK_FREE 2
15 #define BOOST_ATOMIC_INT_LOCK_FREE 2
16 #define BOOST_ATOMIC_LONG_LOCK_FREE 2
17 
18 #if defined(__x86_64__)
19 #define BOOST_ATOMIC_LLONG_LOCK_FREE 2
20 #else
21 #define BOOST_ATOMIC_LLONG_LOCK_FREE 1
22 #endif
23 
24 #define BOOST_ATOMIC_ADDRESS_LOCK_FREE 2
25 #define BOOST_ATOMIC_BOOL_LOCK_FREE 2
26 
27 namespace boost {
28 
29 #if defined(__x86_64__)
30 # define BOOST_ATOMIC_X86_FENCE_INSTR "mfence\n"
31 #else
32 # define BOOST_ATOMIC_X86_FENCE_INSTR "lock ; addl $0, (%%esp)\n"
33 #endif
34 
35 #define BOOST_ATOMIC_THREAD_FENCE 2
36 static inline void
38 {
39  switch(order) {
41  break;
43  __asm__ __volatile__ ("" ::: "memory");
44  break;
46  __asm__ __volatile__ ("" ::: "memory");
47  break;
49  __asm__ __volatile__ ("" ::: "memory");
50  break;
52  break;
54  __asm__ __volatile__ (BOOST_ATOMIC_X86_FENCE_INSTR ::: "memory");
55  break;
56  default:;
57  }
58 }
59 
60 #define BOOST_ATOMIC_SIGNAL_FENCE 2
61 static inline void
63 {
64  __asm__ __volatile__ ("" ::: "memory");
65 }
66 
67 namespace detail {
68 namespace atomic {
69 
70 static inline void
72 {
73  switch(order) {
77  break;
80  __asm__ __volatile__ ("" ::: "memory");
81  /* release */
82  break;
84  __asm__ __volatile__ ("" ::: "memory");
85  /* seq */
86  break;
87  }
88 }
89 
90 static inline void
92 {
93  switch(order) {
96  break;
99  __asm__ __volatile__ ("" ::: "memory");
100  /* acquire */
101  break;
103  /* consume */
104  break;
106  __asm__ __volatile__ ("" ::: "memory");
107  /* seq */
108  break;
109  default:;
110  }
111 }
112 
113 static inline void
115 {
116  switch(order) {
119  break;
122  __asm__ __volatile__ ("" ::: "memory");
123  break;
125  break;
127  __asm__ __volatile__ (BOOST_ATOMIC_X86_FENCE_INSTR ::: "memory");
128  break;
129  default:;
130  }
131 }
132 
133 template<typename T, bool Sign>
134 class base_atomic<T, int, 1, Sign> {
136  typedef T value_type;
137  typedef T difference_type;
138 public:
139  explicit base_atomic(value_type v) : v_(v) {}
140  base_atomic(void) {}
141 
142  void
143  store(value_type v, memory_order order = memory_order_seq_cst) volatile
144  {
145  if (order != memory_order_seq_cst) {
146  platform_fence_before(order);
147  const_cast<volatile value_type &>(v_) = v;
148  } else {
149  exchange(v, order);
150  }
151  }
152 
153  value_type
154  load(memory_order order = memory_order_seq_cst) const volatile
155  {
156  value_type v = const_cast<const volatile value_type &>(v_);
158  return v;
159  }
160 
161  value_type
162  fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
163  {
164  platform_fence_before(order);
165  __asm__ (
166  "lock ; xaddb %0, %1"
167  : "+q" (v), "+m" (v_)
168  );
169  platform_fence_after(order);
170  return v;
171  }
172 
173  value_type
174  fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
175  {
176  return fetch_add(-v, order);
177  }
178 
179  value_type
180  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
181  {
182  platform_fence_before(order);
183  __asm__ (
184  "xchgb %0, %1"
185  : "+q" (v), "+m" (v_)
186  );
187  platform_fence_after(order);
188  return v;
189  }
190 
191  bool
193  value_type & expected,
194  value_type desired,
195  memory_order success_order,
196  memory_order failure_order) volatile
197  {
198  value_type previous = expected;
199  platform_fence_before(success_order);
200  __asm__ (
201  "lock ; cmpxchgb %2, %1"
202  : "+a" (previous), "+m" (v_)
203  : "q" (desired)
204  );
205  bool success = (previous == expected);
206  if (success)
207  platform_fence_after(success_order);
208  else
209  platform_fence_after(failure_order);
210  expected = previous;
211  return success;
212  }
213 
214  bool
216  value_type & expected,
217  value_type desired,
218  memory_order success_order,
219  memory_order failure_order) volatile
220  {
221  return compare_exchange_strong(expected, desired, success_order, failure_order);
222  }
223 
224  value_type
225  fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
226  {
227  value_type tmp = load(memory_order_relaxed);
228  do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed));
229  return tmp;
230  }
231 
232  value_type
233  fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
234  {
235  value_type tmp = load(memory_order_relaxed);
236  do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed));
237  return tmp;
238  }
239 
240  value_type
241  fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
242  {
243  value_type tmp = load(memory_order_relaxed);
244  do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed));
245  return tmp;
246  }
247 
248  bool
249  is_lock_free(void) const volatile
250  {
251  return true;
252  }
253 
255 private:
256  base_atomic(const base_atomic &) /* = delete */ ;
257  void operator=(const base_atomic &) /* = delete */ ;
258  value_type v_;
259 };
260 
261 template<typename T, bool Sign>
262 class base_atomic<T, int, 2, Sign> {
264  typedef T value_type;
265  typedef T difference_type;
266 public:
267  explicit base_atomic(value_type v) : v_(v) {}
268  base_atomic(void) {}
269 
270  void
271  store(value_type v, memory_order order = memory_order_seq_cst) volatile
272  {
273  if (order != memory_order_seq_cst) {
274  platform_fence_before(order);
275  const_cast<volatile value_type &>(v_) = v;
276  } else {
277  exchange(v, order);
278  }
279  }
280 
281  value_type
282  load(memory_order order = memory_order_seq_cst) const volatile
283  {
284  value_type v = const_cast<const volatile value_type &>(v_);
286  return v;
287  }
288 
289  value_type
290  fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
291  {
292  platform_fence_before(order);
293  __asm__ (
294  "lock ; xaddw %0, %1"
295  : "+q" (v), "+m" (v_)
296  );
297  platform_fence_after(order);
298  return v;
299  }
300 
301  value_type
302  fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
303  {
304  return fetch_add(-v, order);
305  }
306 
307  value_type
308  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
309  {
310  platform_fence_before(order);
311  __asm__ (
312  "xchgw %0, %1"
313  : "+q" (v), "+m" (v_)
314  );
315  platform_fence_after(order);
316  return v;
317  }
318 
319  bool
321  value_type & expected,
322  value_type desired,
323  memory_order success_order,
324  memory_order failure_order) volatile
325  {
326  value_type previous = expected;
327  platform_fence_before(success_order);
328  __asm__ (
329  "lock ; cmpxchgw %2, %1"
330  : "+a" (previous), "+m" (v_)
331  : "q" (desired)
332  );
333  bool success = (previous == expected);
334  if (success)
335  platform_fence_after(success_order);
336  else
337  platform_fence_after(failure_order);
338  expected = previous;
339  return success;
340  }
341 
342  bool
344  value_type & expected,
345  value_type desired,
346  memory_order success_order,
347  memory_order failure_order) volatile
348  {
349  return compare_exchange_strong(expected, desired, success_order, failure_order);
350  }
351 
352  value_type
353  fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
354  {
355  value_type tmp = load(memory_order_relaxed);
356  do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed));
357  return tmp;
358  }
359 
360  value_type
361  fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
362  {
363  value_type tmp = load(memory_order_relaxed);
364  do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed));
365  return tmp;
366  }
367 
368  value_type
369  fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
370  {
371  value_type tmp = load(memory_order_relaxed);
372  do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed));
373  return tmp;
374  }
375 
376  bool
377  is_lock_free(void) const volatile
378  {
379  return true;
380  }
381 
383 private:
384  base_atomic(const base_atomic &) /* = delete */ ;
385  void operator=(const base_atomic &) /* = delete */ ;
386  value_type v_;
387 };
388 
389 template<typename T, bool Sign>
390 class base_atomic<T, int, 4, Sign> {
392  typedef T value_type;
393  typedef T difference_type;
394 public:
395  explicit base_atomic(value_type v) : v_(v) {}
396  base_atomic(void) {}
397 
398  void
399  store(value_type v, memory_order order = memory_order_seq_cst) volatile
400  {
401  if (order != memory_order_seq_cst) {
402  platform_fence_before(order);
403  const_cast<volatile value_type &>(v_) = v;
404  } else {
405  exchange(v, order);
406  }
407  }
408 
409  value_type
410  load(memory_order order = memory_order_seq_cst) const volatile
411  {
412  value_type v = const_cast<const volatile value_type &>(v_);
414  return v;
415  }
416 
417  value_type
418  fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
419  {
420  platform_fence_before(order);
421  __asm__ (
422  "lock ; xaddl %0, %1"
423  : "+r" (v), "+m" (v_)
424  );
425  platform_fence_after(order);
426  return v;
427  }
428 
429  value_type
430  fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
431  {
432  return fetch_add(-v, order);
433  }
434 
435  value_type
436  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
437  {
438  platform_fence_before(order);
439  __asm__ (
440  "xchgl %0, %1"
441  : "+r" (v), "+m" (v_)
442  );
443  platform_fence_after(order);
444  return v;
445  }
446 
447  bool
449  value_type & expected,
450  value_type desired,
451  memory_order success_order,
452  memory_order failure_order) volatile
453  {
454  value_type previous = expected;
455  platform_fence_before(success_order);
456  __asm__ (
457  "lock ; cmpxchgl %2, %1"
458  : "+a" (previous), "+m" (v_)
459  : "r" (desired)
460  );
461  bool success = (previous == expected);
462  if (success)
463  platform_fence_after(success_order);
464  else
465  platform_fence_after(failure_order);
466  expected = previous;
467  return success;
468  }
469 
470  bool
472  value_type & expected,
473  value_type desired,
474  memory_order success_order,
475  memory_order failure_order) volatile
476  {
477  return compare_exchange_strong(expected, desired, success_order, failure_order);
478  }
479 
480  value_type
481  fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
482  {
483  value_type tmp = load(memory_order_relaxed);
484  do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed));
485  return tmp;
486  }
487 
488  value_type
489  fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
490  {
491  value_type tmp = load(memory_order_relaxed);
492  do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed));
493  return tmp;
494  }
495 
496  value_type
497  fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
498  {
499  value_type tmp = load(memory_order_relaxed);
500  do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed));
501  return tmp;
502  }
503 
504  bool
505  is_lock_free(void) const volatile
506  {
507  return true;
508  }
509 
511 private:
512  base_atomic(const base_atomic &) /* = delete */ ;
513  void operator=(const base_atomic &) /* = delete */ ;
514  value_type v_;
515 };
516 
517 #if defined(__x86_64__)
518 template<typename T, bool Sign>
519 class base_atomic<T, int, 8, Sign> {
520  typedef base_atomic this_type;
521  typedef T value_type;
522  typedef T difference_type;
523 public:
524  explicit base_atomic(value_type v) : v_(v) {}
525  base_atomic(void) {}
526 
527  void
528  store(value_type v, memory_order order = memory_order_seq_cst) volatile
529  {
530  if (order != memory_order_seq_cst) {
531  platform_fence_before(order);
532  const_cast<volatile value_type &>(v_) = v;
533  } else {
534  exchange(v, order);
535  }
536  }
537 
538  value_type
539  load(memory_order order = memory_order_seq_cst) const volatile
540  {
541  value_type v = const_cast<const volatile value_type &>(v_);
543  return v;
544  }
545 
546  value_type
547  fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
548  {
549  platform_fence_before(order);
550  __asm__ (
551  "lock ; xaddq %0, %1"
552  : "+r" (v), "+m" (v_)
553  );
554  platform_fence_after(order);
555  return v;
556  }
557 
558  value_type
559  fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
560  {
561  return fetch_add(-v, order);
562  }
563 
564  value_type
565  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
566  {
567  platform_fence_before(order);
568  __asm__ (
569  "xchgq %0, %1"
570  : "+r" (v), "+m" (v_)
571  );
572  platform_fence_after(order);
573  return v;
574  }
575 
576  bool
578  value_type & expected,
579  value_type desired,
580  memory_order success_order,
581  memory_order failure_order) volatile
582  {
583  value_type previous = expected;
584  platform_fence_before(success_order);
585  __asm__ (
586  "lock ; cmpxchgq %2, %1"
587  : "+a" (previous), "+m" (v_)
588  : "r" (desired)
589  );
590  bool success = (previous == expected);
591  if (success)
592  platform_fence_after(success_order);
593  else
594  platform_fence_after(failure_order);
595  expected = previous;
596  return success;
597  }
598 
599  bool
601  value_type & expected,
602  value_type desired,
603  memory_order success_order,
604  memory_order failure_order) volatile
605  {
606  return compare_exchange_strong(expected, desired, success_order, failure_order);
607  }
608 
609  value_type
610  fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
611  {
612  value_type tmp = load(memory_order_relaxed);
613  do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed));
614  return tmp;
615  }
616 
617  value_type
618  fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
619  {
620  value_type tmp = load(memory_order_relaxed);
621  do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed));
622  return tmp;
623  }
624 
625  value_type
626  fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
627  {
628  value_type tmp = load(memory_order_relaxed);
629  do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed));
630  return tmp;
631  }
632 
633  bool
634  is_lock_free(void) const volatile
635  {
636  return true;
637  }
638 
640 private:
641  base_atomic(const base_atomic &) /* = delete */ ;
642  void operator=(const base_atomic &) /* = delete */ ;
643  value_type v_;
644 };
645 
646 #endif
647 
648 /* pointers */
649 
650 #if !defined(__x86_64__)
651 
652 template<bool Sign>
653 class base_atomic<void *, void *, 4, Sign> {
655  typedef void * value_type;
656 public:
657  explicit base_atomic(value_type v) : v_(v) {}
658  base_atomic(void) {}
659 
660  void
661  store(value_type v, memory_order order = memory_order_seq_cst) volatile
662  {
663  if (order != memory_order_seq_cst) {
664  platform_fence_before(order);
665  const_cast<volatile value_type &>(v_) = v;
666  } else {
667  exchange(v, order);
668  }
669  }
670 
671  value_type load(memory_order order = memory_order_seq_cst) const volatile
672  {
673  value_type v = const_cast<const volatile value_type &>(v_);
675  return v;
676  }
677 
678  value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
679  {
680  platform_fence_before(order);
681  __asm__ (
682  "xchgl %0, %1"
683  : "+r" (v), "+m" (v_)
684  );
685  platform_fence_after(order);
686  return v;
687  }
688 
689  bool compare_exchange_strong(value_type & expected, value_type desired,
690  memory_order success_order,
691  memory_order failure_order) volatile
692  {
693  value_type previous = expected;
694  platform_fence_before(success_order);
695  __asm__ (
696  "lock ; cmpxchgl %2, %1"
697  : "+a" (previous), "+m" (v_)
698  : "r" (desired)
699  );
700  bool success = (previous == expected);
701  if (success)
702  platform_fence_after(success_order);
703  else
704  platform_fence_after(failure_order);
705  expected = previous;
706  return success;
707  }
708 
709  bool compare_exchange_weak(value_type & expected, value_type desired,
710  memory_order success_order,
711  memory_order failure_order) volatile
712  {
713  return compare_exchange_strong(expected, desired, success_order, failure_order);
714  }
715 
716  bool
717  is_lock_free(void) const volatile
718  {
719  return true;
720  }
721 
723 private:
724  base_atomic(const base_atomic &) /* = delete */ ;
725  void operator=(const base_atomic &) /* = delete */ ;
726  value_type v_;
727 };
728 
729 template<typename T, bool Sign>
730 class base_atomic<T *, void *, 4, Sign> {
732  typedef T * value_type;
733  typedef ptrdiff_t difference_type;
734 public:
735  explicit base_atomic(value_type v) : v_(v) {}
736  base_atomic(void) {}
737 
738  void
739  store(value_type v, memory_order order = memory_order_seq_cst) volatile
740  {
741  if (order != memory_order_seq_cst) {
742  platform_fence_before(order);
743  const_cast<volatile value_type &>(v_) = v;
744  } else {
745  exchange(v, order);
746  }
747  }
748 
749  value_type
750  load(memory_order order = memory_order_seq_cst) const volatile
751  {
752  value_type v = const_cast<const volatile value_type &>(v_);
754  return v;
755  }
756 
757  value_type
758  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
759  {
760  platform_fence_before(order);
761  __asm__ (
762  "xchgl %0, %1"
763  : "+r" (v), "+m" (v_)
764  );
765  platform_fence_after(order);
766  return v;
767  }
768 
769  bool
771  value_type & expected,
772  value_type desired,
773  memory_order success_order,
774  memory_order failure_order) volatile
775  {
776  value_type previous = expected;
777  platform_fence_before(success_order);
778  __asm__ (
779  "lock ; cmpxchgl %2, %1"
780  : "+a" (previous), "+m" (v_)
781  : "r" (desired)
782  );
783  bool success = (previous == expected);
784  if (success)
785  platform_fence_after(success_order);
786  else
787  platform_fence_after(failure_order);
788  expected = previous;
789  return success;
790  }
791 
792  bool
794  value_type & expected,
795  value_type desired,
796  memory_order success_order,
797  memory_order failure_order) volatile
798  {
799  return compare_exchange_strong(expected, desired, success_order, failure_order);
800  }
801 
802  value_type
803  fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
804  {
805  v = v * sizeof(*v_);
806  platform_fence_before(order);
807  __asm__ (
808  "lock ; xaddl %0, %1"
809  : "+r" (v), "+m" (v_)
810  );
811  platform_fence_after(order);
812  return reinterpret_cast<value_type>(v);
813  }
814 
815  value_type
816  fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
817  {
818  return fetch_add(-v, order);
819  }
820 
821  bool
822  is_lock_free(void) const volatile
823  {
824  return true;
825  }
826 
828 private:
829  base_atomic(const base_atomic &) /* = delete */ ;
830  void operator=(const base_atomic &) /* = delete */ ;
831  value_type v_;
832 };
833 
834 #else
835 
836 template<bool Sign>
837 class base_atomic<void *, void *, 8, Sign> {
838  typedef base_atomic this_type;
839  typedef void * value_type;
840 public:
841  explicit base_atomic(value_type v) : v_(v) {}
842  base_atomic(void) {}
843 
844  void
845  store(value_type v, memory_order order = memory_order_seq_cst) volatile
846  {
847  if (order != memory_order_seq_cst) {
848  platform_fence_before(order);
849  const_cast<volatile value_type &>(v_) = v;
850  } else {
851  exchange(v, order);
852  }
853  }
854 
855  value_type load(memory_order order = memory_order_seq_cst) const volatile
856  {
857  value_type v = const_cast<const volatile value_type &>(v_);
859  return v;
860  }
861 
862  value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
863  {
864  platform_fence_before(order);
865  __asm__ (
866  "xchgq %0, %1"
867  : "+r" (v), "+m" (v_)
868  );
869  platform_fence_after(order);
870  return v;
871  }
872 
873  bool compare_exchange_strong(value_type & expected, value_type desired,
874  memory_order success_order,
875  memory_order failure_order) volatile
876  {
877  value_type previous = expected;
878  platform_fence_before(success_order);
879  __asm__ (
880  "lock ; cmpxchgq %2, %1"
881  : "+a" (previous), "+m" (v_)
882  : "r" (desired)
883  );
884  bool success = (previous == expected);
885  if (success)
886  platform_fence_after(success_order);
887  else
888  platform_fence_after(failure_order);
889  expected = previous;
890  return success;
891  }
892 
893  bool compare_exchange_weak(value_type & expected, value_type desired,
894  memory_order success_order,
895  memory_order failure_order) volatile
896  {
897  return compare_exchange_strong(expected, desired, success_order, failure_order);
898  }
899 
900  bool
901  is_lock_free(void) const volatile
902  {
903  return true;
904  }
905 
907 private:
908  base_atomic(const base_atomic &) /* = delete */ ;
909  void operator=(const base_atomic &) /* = delete */ ;
910  value_type v_;
911 };
912 
913 template<typename T, bool Sign>
914 class base_atomic<T *, void *, 8, Sign> {
915  typedef base_atomic this_type;
916  typedef T * value_type;
917  typedef ptrdiff_t difference_type;
918 public:
919  explicit base_atomic(value_type v) : v_(v) {}
920  base_atomic(void) {}
921 
922  void
923  store(value_type v, memory_order order = memory_order_seq_cst) volatile
924  {
925  if (order != memory_order_seq_cst) {
926  platform_fence_before(order);
927  const_cast<volatile value_type &>(v_) = v;
928  } else {
929  exchange(v, order);
930  }
931  }
932 
933  value_type
934  load(memory_order order = memory_order_seq_cst) const volatile
935  {
936  value_type v = const_cast<const volatile value_type &>(v_);
938  return v;
939  }
940 
941  value_type
942  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
943  {
944  platform_fence_before(order);
945  __asm__ (
946  "xchgq %0, %1"
947  : "+r" (v), "+m" (v_)
948  );
949  platform_fence_after(order);
950  return v;
951  }
952 
953  bool
955  value_type & expected,
956  value_type desired,
957  memory_order success_order,
958  memory_order failure_order) volatile
959  {
960  value_type previous = expected;
961  platform_fence_before(success_order);
962  __asm__ (
963  "lock ; cmpxchgq %2, %1"
964  : "+a" (previous), "+m" (v_)
965  : "r" (desired)
966  );
967  bool success = (previous == expected);
968  if (success)
969  platform_fence_after(success_order);
970  else
971  platform_fence_after(failure_order);
972  expected = previous;
973  return success;
974  }
975 
976  bool
978  value_type & expected,
979  value_type desired,
980  memory_order success_order,
981  memory_order failure_order) volatile
982  {
983  return compare_exchange_strong(expected, desired, success_order, failure_order);
984  }
985 
986  value_type
987  fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
988  {
989  v = v * sizeof(*v_);
990  platform_fence_before(order);
991  __asm__ (
992  "lock ; xaddq %0, %1"
993  : "+r" (v), "+m" (v_)
994  );
995  platform_fence_after(order);
996  return reinterpret_cast<value_type>(v);
997  }
998 
999  value_type
1000  fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
1001  {
1002  return fetch_add(-v, order);
1003  }
1004 
1005  bool
1006  is_lock_free(void) const volatile
1007  {
1008  return true;
1009  }
1010 
1012 private:
1013  base_atomic(const base_atomic &) /* = delete */ ;
1014  void operator=(const base_atomic &) /* = delete */ ;
1015  value_type v_;
1016 };
1017 
1018 #endif
1019 
1020 template<typename T, bool Sign>
1021 class base_atomic<T, void, 1, Sign> {
1023  typedef T value_type;
1024  typedef uint8_t storage_type;
1025 public:
1026  explicit base_atomic(value_type v)
1027  {
1028  memcpy(&v_, &v, sizeof(value_type));
1029  }
1030  base_atomic(void) {}
1031 
1032  void
1033  store(value_type v, memory_order order = memory_order_seq_cst) volatile
1034  {
1035  if (order != memory_order_seq_cst) {
1036  storage_type tmp;
1037  memcpy(&tmp, &v, sizeof(value_type));
1038  platform_fence_before(order);
1039  const_cast<volatile storage_type &>(v_) = tmp;
1040  } else {
1041  exchange(v, order);
1042  }
1043  }
1044 
1045  value_type
1046  load(memory_order order = memory_order_seq_cst) const volatile
1047  {
1048  storage_type tmp = const_cast<volatile storage_type &>(v_);
1050  value_type v;
1051  memcpy(&v, &tmp, sizeof(value_type));
1052  return v;
1053  }
1054 
1055  value_type
1056  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
1057  {
1058  storage_type tmp;
1059  memcpy(&tmp, &v, sizeof(value_type));
1060  platform_fence_before(order);
1061  __asm__ (
1062  "xchgb %0, %1"
1063  : "+q" (tmp), "+m" (v_)
1064  );
1065  platform_fence_after(order);
1066  memcpy(&v, &tmp, sizeof(value_type));
1067  return v;
1068  }
1069 
1070  bool
1072  value_type & expected,
1073  value_type desired,
1074  memory_order success_order,
1075  memory_order failure_order) volatile
1076  {
1077  storage_type expected_s, desired_s;
1078  memcpy(&expected_s, &expected, sizeof(value_type));
1079  memcpy(&desired_s, &desired, sizeof(value_type));
1080  storage_type previous_s = expected_s;
1081  platform_fence_before(success_order);
1082  __asm__ (
1083  "lock ; cmpxchgb %2, %1"
1084  : "+a" (previous_s), "+m" (v_)
1085  : "q" (desired_s)
1086  );
1087  bool success = (previous_s == expected_s);
1088  if (success)
1089  platform_fence_after(success_order);
1090  else
1091  platform_fence_after(failure_order);
1092  memcpy(&expected, &previous_s, sizeof(value_type));
1093  return success;
1094  }
1095 
1096  bool
1098  value_type & expected,
1099  value_type desired,
1100  memory_order success_order,
1101  memory_order failure_order) volatile
1102  {
1103  return compare_exchange_strong(expected, desired, success_order, failure_order);
1104  }
1105 
1106  bool
1107  is_lock_free(void) const volatile
1108  {
1109  return true;
1110  }
1111 
1113 private:
1114  base_atomic(const base_atomic &) /* = delete */ ;
1115  void operator=(const base_atomic &) /* = delete */ ;
1116  storage_type v_;
1117 };
1118 
1119 template<typename T, bool Sign>
1120 class base_atomic<T, void, 2, Sign> {
1122  typedef T value_type;
1123  typedef uint16_t storage_type;
1124 public:
1125  explicit base_atomic(value_type v)
1126  {
1127  memcpy(&v_, &v, sizeof(value_type));
1128  }
1129  base_atomic(void) {}
1130 
1131  void
1132  store(value_type v, memory_order order = memory_order_seq_cst) volatile
1133  {
1134  if (order != memory_order_seq_cst) {
1135  storage_type tmp;
1136  memcpy(&tmp, &v, sizeof(value_type));
1137  platform_fence_before(order);
1138  const_cast<volatile storage_type &>(v_) = tmp;
1139  } else {
1140  exchange(v, order);
1141  }
1142  }
1143 
1144  value_type
1145  load(memory_order order = memory_order_seq_cst) const volatile
1146  {
1147  storage_type tmp = const_cast<volatile storage_type &>(v_);
1149  value_type v;
1150  memcpy(&v, &tmp, sizeof(value_type));
1151  return v;
1152  }
1153 
1154  value_type
1155  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
1156  {
1157  storage_type tmp;
1158  memcpy(&tmp, &v, sizeof(value_type));
1159  platform_fence_before(order);
1160  __asm__ (
1161  "xchgw %0, %1"
1162  : "+q" (tmp), "+m" (v_)
1163  );
1164  platform_fence_after(order);
1165  memcpy(&v, &tmp, sizeof(value_type));
1166  return v;
1167  }
1168 
1169  bool
1171  value_type & expected,
1172  value_type desired,
1173  memory_order success_order,
1174  memory_order failure_order) volatile
1175  {
1176  storage_type expected_s, desired_s;
1177  memcpy(&expected_s, &expected, sizeof(value_type));
1178  memcpy(&desired_s, &desired, sizeof(value_type));
1179  storage_type previous_s = expected_s;
1180  platform_fence_before(success_order);
1181  __asm__ (
1182  "lock ; cmpxchgw %2, %1"
1183  : "+a" (previous_s), "+m" (v_)
1184  : "q" (desired_s)
1185  );
1186  bool success = (previous_s == expected_s);
1187  if (success)
1188  platform_fence_after(success_order);
1189  else
1190  platform_fence_after(failure_order);
1191  memcpy(&expected, &previous_s, sizeof(value_type));
1192  return success;
1193  }
1194 
1195  bool
1197  value_type & expected,
1198  value_type desired,
1199  memory_order success_order,
1200  memory_order failure_order) volatile
1201  {
1202  return compare_exchange_strong(expected, desired, success_order, failure_order);
1203  }
1204 
1205  bool
1206  is_lock_free(void) const volatile
1207  {
1208  return true;
1209  }
1210 
1212 private:
1213  base_atomic(const base_atomic &) /* = delete */ ;
1214  void operator=(const base_atomic &) /* = delete */ ;
1215  storage_type v_;
1216 };
1217 
1218 template<typename T, bool Sign>
1219 class base_atomic<T, void, 4, Sign> {
1221  typedef T value_type;
1222  typedef uint32_t storage_type;
1223 public:
1224  explicit base_atomic(value_type v)
1225  {
1226  memcpy(&v_, &v, sizeof(value_type));
1227  }
1228  base_atomic(void) {}
1229 
1230  void
1231  store(value_type v, memory_order order = memory_order_seq_cst) volatile
1232  {
1233  if (order != memory_order_seq_cst) {
1234  storage_type tmp;
1235  memcpy(&tmp, &v, sizeof(value_type));
1236  platform_fence_before(order);
1237  const_cast<volatile storage_type &>(v_) = tmp;
1238  } else {
1239  exchange(v, order);
1240  }
1241  }
1242 
1243  value_type
1244  load(memory_order order = memory_order_seq_cst) const volatile
1245  {
1246  storage_type tmp = const_cast<volatile storage_type &>(v_);
1248  value_type v;
1249  memcpy(&v, &tmp, sizeof(value_type));
1250  return v;
1251  }
1252 
1253  value_type
1254  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
1255  {
1256  storage_type tmp;
1257  memcpy(&tmp, &v, sizeof(value_type));
1258  platform_fence_before(order);
1259  __asm__ (
1260  "xchgl %0, %1"
1261  : "+q" (tmp), "+m" (v_)
1262  );
1263  platform_fence_after(order);
1264  memcpy(&v, &tmp, sizeof(value_type));
1265  return v;
1266  }
1267 
1268  bool
1270  value_type & expected,
1271  value_type desired,
1272  memory_order success_order,
1273  memory_order failure_order) volatile
1274  {
1275  storage_type expected_s, desired_s;
1276  memcpy(&expected_s, &expected, sizeof(value_type));
1277  memcpy(&desired_s, &desired, sizeof(value_type));
1278  storage_type previous_s = expected_s;
1279  platform_fence_before(success_order);
1280  __asm__ (
1281  "lock ; cmpxchgl %2, %1"
1282  : "+a" (previous_s), "+m" (v_)
1283  : "q" (desired_s)
1284  );
1285  bool success = (previous_s == expected_s);
1286  if (success)
1287  platform_fence_after(success_order);
1288  else
1289  platform_fence_after(failure_order);
1290  memcpy(&expected, &previous_s, sizeof(value_type));
1291  return success;
1292  }
1293 
1294  bool
1296  value_type & expected,
1297  value_type desired,
1298  memory_order success_order,
1299  memory_order failure_order) volatile
1300  {
1301  return compare_exchange_strong(expected, desired, success_order, failure_order);
1302  }
1303 
1304  bool
1305  is_lock_free(void) const volatile
1306  {
1307  return true;
1308  }
1309 
1311 private:
1312  base_atomic(const base_atomic &) /* = delete */ ;
1313  void operator=(const base_atomic &) /* = delete */ ;
1314  storage_type v_;
1315 };
1316 
1317 #if defined(__x86_64__)
1318 template<typename T, bool Sign>
1319 class base_atomic<T, void, 8, Sign> {
1320  typedef base_atomic this_type;
1321  typedef T value_type;
1322  typedef uint64_t storage_type;
1323 public:
1324  explicit base_atomic(value_type v)
1325  {
1326  memcpy(&v_, &v, sizeof(value_type));
1327  }
1328  base_atomic(void) {}
1329 
1330  void
1331  store(value_type v, memory_order order = memory_order_seq_cst) volatile
1332  {
1333  if (order != memory_order_seq_cst) {
1334  storage_type tmp;
1335  memcpy(&tmp, &v, sizeof(value_type));
1336  platform_fence_before(order);
1337  const_cast<volatile storage_type &>(v_) = tmp;
1338  } else {
1339  exchange(v, order);
1340  }
1341  }
1342 
1343  value_type
1344  load(memory_order order = memory_order_seq_cst) const volatile
1345  {
1346  storage_type tmp = const_cast<volatile storage_type &>(v_);
1348  value_type v;
1349  memcpy(&v, &tmp, sizeof(value_type));
1350  return v;
1351  }
1352 
1353  value_type
1354  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
1355  {
1356  storage_type tmp;
1357  memcpy(&tmp, &v, sizeof(value_type));
1358  platform_fence_before(order);
1359  __asm__ (
1360  "xchgq %0, %1"
1361  : "+q" (tmp), "+m" (v_)
1362  );
1363  platform_fence_after(order);
1364  memcpy(&v, &tmp, sizeof(value_type));
1365  return v;
1366  }
1367 
1368  bool
1370  value_type & expected,
1371  value_type desired,
1372  memory_order success_order,
1373  memory_order failure_order) volatile
1374  {
1375  storage_type expected_s, desired_s;
1376  memcpy(&expected_s, &expected, sizeof(value_type));
1377  memcpy(&desired_s, &desired, sizeof(value_type));
1378  storage_type previous_s = expected_s;
1379  platform_fence_before(success_order);
1380  __asm__ (
1381  "lock ; cmpxchgq %2, %1"
1382  : "+a" (previous_s), "+m" (v_)
1383  : "q" (desired_s)
1384  );
1385  bool success = (previous_s == expected_s);
1386  if (success)
1387  platform_fence_after(success_order);
1388  else
1389  platform_fence_after(failure_order);
1390  memcpy(&expected, &previous_s, sizeof(value_type));
1391  return success;
1392  }
1393 
1394  bool
1396  value_type & expected,
1397  value_type desired,
1398  memory_order success_order,
1399  memory_order failure_order) volatile
1400  {
1401  return compare_exchange_strong(expected, desired, success_order, failure_order);
1402  }
1403 
1404  bool
1405  is_lock_free(void) const volatile
1406  {
1407  return true;
1408  }
1409 
1411 private:
1412  base_atomic(const base_atomic &) /* = delete */ ;
1413  void operator=(const base_atomic &) /* = delete */ ;
1414  storage_type v_;
1415 };
1416 #endif
1417 
1418 #if defined(__i686__)
1419 
1420 template<typename T>
1421 bool
1422 platform_cmpxchg64_strong(T & expected, T desired, volatile T * ptr)
1423 {
1424  int scratch;
1425  T prev = expected;
1426  /* Make sure ebx is saved and restored properly in case
1427  this object is compiled as "position independent". Since
1428  programmers on x86 tend to forget specifying -DPIC or
1429  similar, always assume PIC.
1430 
1431  To make this work uniformly even in the non-PIC case,
1432  setup register constraints such that ebx can not be
1433  used by accident e.g. as base address for the variable
1434  to be modified. Accessing "scratch" should always be okay,
1435  as it can only be placed on the stack (and therefore
1436  accessed through ebp or esp only).
1437 
1438  In theory, could push/pop ebx onto/off the stack, but movs
1439  to a prepared stack slot turn out to be faster. */
1440  __asm__ __volatile__ (
1441  "movl %%ebx, %1\n"
1442  "movl %2, %%ebx\n"
1443  "lock; cmpxchg8b 0(%4)\n"
1444  "movl %1, %%ebx\n"
1445  : "=A" (prev), "=m" (scratch)
1446  : "D" ((int)desired), "c" ((int)(desired >> 32)), "S" (ptr), "0" (prev)
1447  : "memory");
1448  bool success = (prev == expected);
1449  expected = prev;
1450  return success;
1451 }
1452 
1453 #endif
1454 
1455 }
1456 }
1457 }
1458 
1459 /* pull in 64-bit atomic type using cmpxchg8b above */
1460 #if defined(__i686__)
1462 #endif
1463 
1464 #endif
#define BOOST_ATOMIC_X86_FENCE_INSTR
Definition: gcc-x86.hpp:32
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:1155
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:1071
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order, memory_order) volatile
Definition: base.hpp:166
value_type fetch_xor(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:369
Definition: base.hpp:116
static void atomic_signal_fence(memory_order)
Definition: gcc-armv6+.hpp:193
value_type fetch_or(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:233
static void platform_fence_after_load(memory_order order)
Definition: gcc-armv6+.hpp:141
value_type fetch_or(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:361
static void atomic_thread_fence(memory_order order)
Definition: gcc-armv6+.hpp:179
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:1269
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-x86.hpp:671
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-x86.hpp:1046
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:709
value_type fetch_and(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:353
value_type fetch_and(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:225
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:271
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:399
value_type fetch_add(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:162
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:1231
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:180
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-x86.hpp:282
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:308
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:689
#define BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
Definition: base.hpp:107
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:320
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:758
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:1254
value_type load(memory_order=memory_order_seq_cst) volatileconst
Definition: base.hpp:156
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:739
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:143
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:793
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:448
value_type fetch_sub(difference_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:816
value_type fetch_sub(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:430
static void platform_fence_after(memory_order order)
Definition: gcc-armv6+.hpp:116
#define BOOST_ATOMIC_DECLARE_BASE_OPERATORS
Definition: base.hpp:19
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:1295
void store(value_type v, memory_order=memory_order_seq_cst) volatile
Definition: base.hpp:148
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:343
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:1196
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:1097
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:1033
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:436
value_type fetch_or(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:489
value_type fetch_xor(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:497
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:215
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:471
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: base.hpp:184
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:192
value_type fetch_xor(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:241
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-x86.hpp:1145
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:770
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:661
value_type exchange(value_type v, memory_order=memory_order_seq_cst) volatile
Definition: base.hpp:194
#define BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
Definition: base.hpp:111
bool is_lock_free(void) const volatile
Definition: base.hpp:206
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-x86.hpp:1170
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-x86.hpp:750
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-x86.hpp:1244
static void platform_fence_before(memory_order order)
Definition: gcc-armv6+.hpp:103
value_type fetch_add(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:418
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-x86.hpp:154
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:678
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-x86.hpp:410
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:1132
value_type fetch_add(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:290
value_type fetch_add(difference_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:803
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:1056
void operator=(const base_atomic &)
value_type fetch_sub(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:174
value_type fetch_and(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:481
value_type fetch_sub(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-x86.hpp:302


rosatomic
Author(s): Josh Faust
autogenerated on Fri Apr 5 2019 02:16:35