gcc-ppc.hpp
Go to the documentation of this file.
1 #ifndef BOOST_DETAIL_ATOMIC_GCC_PPC_HPP
2 #define BOOST_DETAIL_ATOMIC_GCC_PPC_HPP
3 
4 // Copyright (c) 2009 Helge Bahmann
5 //
6 // Distributed under the Boost Software License, Version 1.0.
7 // See accompanying file LICENSE_1_0.txt or copy at
8 // http://www.boost.org/LICENSE_1_0.txt)
9 
11 
12 /*
13  Refer to: Motorola: "Programming Environments Manual for 32-Bit
14  Implementations of the PowerPC Architecture", Appendix E:
15  "Synchronization Programming Examples" for an explanation of what is
16  going on here (can be found on the web at various places by the
17  name "MPCFPE32B.pdf", Google is your friend...)
18 
19  Most of the atomic operations map to instructions in a relatively
20  straight-forward fashion, but "load"s may at first glance appear
21  a bit strange as they map to:
22 
23  lwz %rX, addr
24  cmpw %rX, %rX
25  bne- 1f
26  1:
27 
28  That is, the CPU is forced to perform a branch that "formally" depends
29  on the value retrieved from memory. This scheme has an overhead of
30  about 1-2 clock cycles per load, but it allows to map "acquire" to
31  the "isync" instruction instead of "sync" uniformly and for all type
32  of atomic operations. Since "isync" has a cost of about 15 clock
33  cycles, while "sync" hast a cost of about 50 clock cycles, the small
34  penalty to atomic loads more than compensates for this.
35 
36  Byte- and halfword-sized atomic values are realized by encoding the
37  value to be represented into a word, performing sign/zero extension
38  as appropriate. This means that after add/sub operations the value
39  needs fixing up to accurately preserve the wrap-around semantic of
40  the smaller type. (Nothing special needs to be done for the bit-wise
41  and the "exchange type" operators as the compiler already sees to
42  it that values carried in registers are extended appropriately and
43  everything falls into place naturally).
44 
45  The register constrant "b" instructs gcc to use any register
46  except r0; this is sometimes required because the encoding for
47  r0 is used to signify "constant zero" in a number of instructions,
48  making r0 unusable in this place. For simplicity this constraint
49  is used everywhere since I am to lazy to look this up on a
50  per-instruction basis, and ppc has enough registers for this not
51  to pose a problem.
52 */
53 
54 #define BOOST_ATOMIC_CHAR_LOCK_FREE 2
55 #define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2
56 #define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2
57 #define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
58 #define BOOST_ATOMIC_SHORT_LOCK_FREE 2
59 #define BOOST_ATOMIC_INT_LOCK_FREE 2
60 #define BOOST_ATOMIC_LONG_LOCK_FREE 2
61 #define BOOST_ATOMIC_ADDRESS_LOCK_FREE 2
62 #if defined(__powerpc64__)
63 #define BOOST_ATOMIC_LLONG_LOCK_FREE 2
64 #else
65 #define BOOST_ATOMIC_LLONG_LOCK_FREE 0
66 #endif
67 #define BOOST_ATOMIC_BOOL_LOCK_FREE 2
68 
69 /* Would like to move the slow-path of failed compare_exchange
70 (that clears the "success" bit) out-of-line. gcc can in
71 principle do that using ".subsection"/".previous", but Apple's
72 binutils seemingly does not understand that. Therefore wrap
73 the "clear" of the flag in a macro and let it remain
74 in-line for Apple
75 */
76 
77 #if !defined(__APPLE__)
78 
79 #define BOOST_ATOMIC_ASM_SLOWPATH_CLEAR \
80  "1:\n" \
81  ".subsection 2\n" \
82  "2: addi %1,0,0\n" \
83  "b 1b\n" \
84  ".previous\n" \
85 
86 #else
87 
88 #define BOOST_ATOMIC_ASM_SLOWPATH_CLEAR \
89  "b 1f\n" \
90  "2: addi %1,0,0\n" \
91  "1:\n" \
92 
93 #endif
94 
95 namespace boost {
96 namespace detail {
97 namespace atomic {
98 
99 static inline void
101 {
102  switch(order) {
105 #if defined(__powerpc64__)
106  __asm__ __volatile__ ("lwsync" ::: "memory");
107  break;
108 #endif
110  __asm__ __volatile__ ("sync" ::: "memory");
111  default:;
112  }
113 }
114 
115 static inline void
117 {
118  switch(order) {
122  __asm__ __volatile__ ("isync");
124  __asm__ __volatile__ ("" ::: "memory");
125  default:;
126  }
127 }
128 
129 static inline void
131 {
132  switch(order) {
134  __asm__ __volatile__ ("sync");
135  default:;
136  }
137 }
138 
139 /* integral types */
140 
141 template<typename T>
142 class base_atomic<T, int, 1, true> {
144  typedef T value_type;
145  typedef int32_t storage_type;
146  typedef T difference_type;
147 public:
148  explicit base_atomic(value_type v) : v_(v) {}
149  base_atomic(void) {}
150 
151  void
152  store(value_type v, memory_order order = memory_order_seq_cst) volatile
153  {
154  ppc_fence_before(order);
155  __asm__ (
156  "stw %1, %0\n"
157  : "+m"(v_)
158  : "r" (v)
159  );
160  ppc_fence_after_store(order);
161  }
162 
163  value_type
164  load(memory_order order = memory_order_seq_cst) const volatile
165  {
166  value_type v;
167  __asm__ __volatile__ (
168  "lwz %0, %1\n"
169  "cmpw %0, %0\n"
170  "bne- 1f\n"
171  "1:\n"
172  : "=&r" (v)
173  : "m" (v_)
174  );
175  ppc_fence_after(order);
176  return v;
177  }
178 
179  value_type
180  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
181  {
182  value_type original;
183  ppc_fence_before(order);
184  __asm__ (
185  "1:\n"
186  "lwarx %0,%y1\n"
187  "stwcx. %2,%y1\n"
188  "bne- 1b\n"
189  : "=&b" (original), "+Z"(v_)
190  : "b" (v)
191  : "cr0"
192  );
193  ppc_fence_after(order);
194  return original;
195  }
196 
197  bool
199  value_type & expected,
200  value_type desired,
201  memory_order success_order,
202  memory_order failure_order) volatile
203  {
204  int success;
205  ppc_fence_before(success_order);
206  __asm__(
207  "lwarx %0,%y2\n"
208  "cmpw %0, %3\n"
209  "bne- 2f\n"
210  "stwcx. %4,%y2\n"
211  "bne- 2f\n"
212  "addi %1,0,1\n"
213  "1:"
215  : "=&b" (expected), "=&b" (success), "+Z"(v_)
216  : "b" (expected), "b" (desired)
217  : "cr0"
218  );
219  if (success)
220  ppc_fence_after(success_order);
221  else
222  ppc_fence_after(failure_order);
223  return success;
224  }
225 
226  bool
228  value_type & expected,
229  value_type desired,
230  memory_order success_order,
231  memory_order failure_order) volatile
232  {
233  int success;
234  ppc_fence_before(success_order);
235  __asm__(
236  "0: lwarx %0,%y2\n"
237  "cmpw %0, %3\n"
238  "bne- 2f\n"
239  "stwcx. %4,%y2\n"
240  "bne- 0b\n"
241  "addi %1,0,1\n"
242  "1:"
243 
245  : "=&b" (expected), "=&b" (success), "+Z"(v_)
246  : "b" (expected), "b" (desired)
247  : "cr0"
248  );
249  if (success)
250  ppc_fence_after(success_order);
251  else
252  ppc_fence_after(failure_order);
253  return success;
254  }
255 
256  value_type
257  fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
258  {
259  value_type original, tmp;
260  ppc_fence_before(order);
261  __asm__ (
262  "1:\n"
263  "lwarx %0,%y2\n"
264  "add %1,%0,%3\n"
265  "extsb %1, %1\n"
266  "stwcx. %1,%y2\n"
267  "bne- 1b\n"
268  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
269  : "b" (v)
270  : "cc");
271  ppc_fence_after(order);
272  return original;
273  }
274 
275  value_type
276  fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
277  {
278  value_type original, tmp;
279  ppc_fence_before(order);
280  __asm__ (
281  "1:\n"
282  "lwarx %0,%y2\n"
283  "sub %1,%0,%3\n"
284  "extsb %1, %1\n"
285  "stwcx. %1,%y2\n"
286  "bne- 1b\n"
287  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
288  : "b" (v)
289  : "cc");
290  ppc_fence_after(order);
291  return original;
292  }
293 
294  value_type
295  fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
296  {
297  value_type original, tmp;
298  ppc_fence_before(order);
299  __asm__ (
300  "1:\n"
301  "lwarx %0,%y2\n"
302  "and %1,%0,%3\n"
303  "stwcx. %1,%y2\n"
304  "bne- 1b\n"
305  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
306  : "b" (v)
307  : "cc");
308  ppc_fence_after(order);
309  return original;
310  }
311 
312  value_type
313  fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
314  {
315  value_type original, tmp;
316  ppc_fence_before(order);
317  __asm__ (
318  "1:\n"
319  "lwarx %0,%y2\n"
320  "or %1,%0,%3\n"
321  "stwcx. %1,%y2\n"
322  "bne- 1b\n"
323  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
324  : "b" (v)
325  : "cc");
326  ppc_fence_after(order);
327  return original;
328  }
329 
330  value_type
331  fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
332  {
333  value_type original, tmp;
334  ppc_fence_before(order);
335  __asm__ (
336  "1:\n"
337  "lwarx %0,%y2\n"
338  "xor %1,%0,%3\n"
339  "stwcx. %1,%y2\n"
340  "bne- 1b\n"
341  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
342  : "b" (v)
343  : "cc");
344  ppc_fence_after(order);
345  return original;
346  }
347 
348  bool
349  is_lock_free(void) const volatile
350  {
351  return true;
352  }
353 
355 private:
356  base_atomic(const base_atomic &) /* = delete */ ;
357  void operator=(const base_atomic &) /* = delete */ ;
358  storage_type v_;
359 };
360 
361 template<typename T>
362 class base_atomic<T, int, 1, false> {
364  typedef T value_type;
365  typedef uint32_t storage_type;
366  typedef T difference_type;
367 public:
368  explicit base_atomic(value_type v) : v_(v) {}
369  base_atomic(void) {}
370 
371  void
372  store(value_type v, memory_order order = memory_order_seq_cst) volatile
373  {
374  ppc_fence_before(order);
375  __asm__ (
376  "stw %1, %0\n"
377  : "+m"(v_)
378  : "r" (v)
379  );
380  ppc_fence_after_store(order);
381  }
382 
383  value_type
384  load(memory_order order = memory_order_seq_cst) const volatile
385  {
386  value_type v;
387  __asm__ __volatile__ (
388  "lwz %0, %1\n"
389  "cmpw %0, %0\n"
390  "bne- 1f\n"
391  "1:\n"
392  : "=&r" (v)
393  : "m" (v_)
394  );
395  ppc_fence_after(order);
396  return v;
397  }
398 
399  value_type
400  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
401  {
402  value_type original;
403  ppc_fence_before(order);
404  __asm__ (
405  "1:\n"
406  "lwarx %0,%y1\n"
407  "stwcx. %2,%y1\n"
408  "bne- 1b\n"
409  : "=&b" (original), "+Z"(v_)
410  : "b" (v)
411  : "cr0"
412  );
413  ppc_fence_after(order);
414  return original;
415  }
416 
417  bool
419  value_type & expected,
420  value_type desired,
421  memory_order success_order,
422  memory_order failure_order) volatile
423  {
424  int success;
425  ppc_fence_before(success_order);
426  __asm__(
427  "lwarx %0,%y2\n"
428  "cmpw %0, %3\n"
429  "bne- 2f\n"
430  "stwcx. %4,%y2\n"
431  "bne- 2f\n"
432  "addi %1,0,1\n"
433  "1:"
434 
436  : "=&b" (expected), "=&b" (success), "+Z"(v_)
437  : "b" (expected), "b" (desired)
438  : "cr0"
439  );
440  if (success)
441  ppc_fence_after(success_order);
442  else
443  ppc_fence_after(failure_order);
444  return success;
445  }
446 
447  bool
449  value_type & expected,
450  value_type desired,
451  memory_order success_order,
452  memory_order failure_order) volatile
453  {
454  int success;
455  ppc_fence_before(success_order);
456  __asm__(
457  "0: lwarx %0,%y2\n"
458  "cmpw %0, %3\n"
459  "bne- 2f\n"
460  "stwcx. %4,%y2\n"
461  "bne- 0b\n"
462  "addi %1,0,1\n"
463  "1:"
464 
466  : "=&b" (expected), "=&b" (success), "+Z"(v_)
467  : "b" (expected), "b" (desired)
468  : "cr0"
469  );
470  if (success)
471  ppc_fence_after(success_order);
472  else
473  ppc_fence_after(failure_order);
474  return success;
475  }
476 
477  value_type
478  fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
479  {
480  value_type original, tmp;
481  ppc_fence_before(order);
482  __asm__ (
483  "1:\n"
484  "lwarx %0,%y2\n"
485  "add %1,%0,%3\n"
486  "rlwinm %1, %1, 0, 0xff\n"
487  "stwcx. %1,%y2\n"
488  "bne- 1b\n"
489  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
490  : "b" (v)
491  : "cc");
492  ppc_fence_after(order);
493  return original;
494  }
495 
496  value_type
497  fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
498  {
499  value_type original, tmp;
500  ppc_fence_before(order);
501  __asm__ (
502  "1:\n"
503  "lwarx %0,%y2\n"
504  "sub %1,%0,%3\n"
505  "rlwinm %1, %1, 0, 0xff\n"
506  "stwcx. %1,%y2\n"
507  "bne- 1b\n"
508  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
509  : "b" (v)
510  : "cc");
511  ppc_fence_after(order);
512  return original;
513  }
514 
515  value_type
516  fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
517  {
518  value_type original, tmp;
519  ppc_fence_before(order);
520  __asm__ (
521  "1:\n"
522  "lwarx %0,%y2\n"
523  "and %1,%0,%3\n"
524  "stwcx. %1,%y2\n"
525  "bne- 1b\n"
526  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
527  : "b" (v)
528  : "cc");
529  ppc_fence_after(order);
530  return original;
531  }
532 
533  value_type
534  fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
535  {
536  value_type original, tmp;
537  ppc_fence_before(order);
538  __asm__ (
539  "1:\n"
540  "lwarx %0,%y2\n"
541  "or %1,%0,%3\n"
542  "stwcx. %1,%y2\n"
543  "bne- 1b\n"
544  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
545  : "b" (v)
546  : "cc");
547  ppc_fence_after(order);
548  return original;
549  }
550 
551  value_type
552  fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
553  {
554  value_type original, tmp;
555  ppc_fence_before(order);
556  __asm__ (
557  "1:\n"
558  "lwarx %0,%y2\n"
559  "xor %1,%0,%3\n"
560  "stwcx. %1,%y2\n"
561  "bne- 1b\n"
562  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
563  : "b" (v)
564  : "cc");
565  ppc_fence_after(order);
566  return original;
567  }
568 
569  bool
570  is_lock_free(void) const volatile
571  {
572  return true;
573  }
574 
576 private:
577  base_atomic(const base_atomic &) /* = delete */ ;
578  void operator=(const base_atomic &) /* = delete */ ;
579  storage_type v_;
580 };
581 
582 template<typename T>
583 class base_atomic<T, int, 2, true> {
585  typedef T value_type;
586  typedef int32_t storage_type;
587  typedef T difference_type;
588 public:
589  explicit base_atomic(value_type v) : v_(v) {}
590  base_atomic(void) {}
591 
592  void
593  store(value_type v, memory_order order = memory_order_seq_cst) volatile
594  {
595  ppc_fence_before(order);
596  __asm__ (
597  "stw %1, %0\n"
598  : "+m"(v_)
599  : "r" (v)
600  );
601  ppc_fence_after_store(order);
602  }
603 
604  value_type
605  load(memory_order order = memory_order_seq_cst) const volatile
606  {
607  value_type v;
608  __asm__ __volatile__ (
609  "lwz %0, %1\n"
610  "cmpw %0, %0\n"
611  "bne- 1f\n"
612  "1:\n"
613  : "=&r" (v)
614  : "m" (v_)
615  );
616  ppc_fence_after(order);
617  return v;
618  }
619 
620  value_type
621  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
622  {
623  value_type original;
624  ppc_fence_before(order);
625  __asm__ (
626  "1:\n"
627  "lwarx %0,%y1\n"
628  "stwcx. %2,%y1\n"
629  "bne- 1b\n"
630  : "=&b" (original), "+Z"(v_)
631  : "b" (v)
632  : "cr0"
633  );
634  ppc_fence_after(order);
635  return original;
636  }
637 
638  bool
640  value_type & expected,
641  value_type desired,
642  memory_order success_order,
643  memory_order failure_order) volatile
644  {
645  int success;
646  ppc_fence_before(success_order);
647  __asm__(
648  "lwarx %0,%y2\n"
649  "cmpw %0, %3\n"
650  "bne- 2f\n"
651  "stwcx. %4,%y2\n"
652  "bne- 2f\n"
653  "addi %1,0,1\n"
654  "1:"
655 
657  : "=&b" (expected), "=&b" (success), "+Z"(v_)
658  : "b" (expected), "b" (desired)
659  : "cr0"
660  );
661  if (success)
662  ppc_fence_after(success_order);
663  else
664  ppc_fence_after(failure_order);
665  return success;
666  }
667 
668  bool
670  value_type & expected,
671  value_type desired,
672  memory_order success_order,
673  memory_order failure_order) volatile
674  {
675  int success;
676  ppc_fence_before(success_order);
677  __asm__(
678  "0: lwarx %0,%y2\n"
679  "cmpw %0, %3\n"
680  "bne- 2f\n"
681  "stwcx. %4,%y2\n"
682  "bne- 0b\n"
683  "addi %1,0,1\n"
684  "1:"
685 
687  : "=&b" (expected), "=&b" (success), "+Z"(v_)
688  : "b" (expected), "b" (desired)
689  : "cr0"
690  );
691  if (success)
692  ppc_fence_after(success_order);
693  else
694  ppc_fence_after(failure_order);
695  return success;
696  }
697 
698  value_type
699  fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
700  {
701  value_type original, tmp;
702  ppc_fence_before(order);
703  __asm__ (
704  "1:\n"
705  "lwarx %0,%y2\n"
706  "add %1,%0,%3\n"
707  "extsh %1, %1\n"
708  "stwcx. %1,%y2\n"
709  "bne- 1b\n"
710  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
711  : "b" (v)
712  : "cc");
713  ppc_fence_after(order);
714  return original;
715  }
716 
717  value_type
718  fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
719  {
720  value_type original, tmp;
721  ppc_fence_before(order);
722  __asm__ (
723  "1:\n"
724  "lwarx %0,%y2\n"
725  "sub %1,%0,%3\n"
726  "extsh %1, %1\n"
727  "stwcx. %1,%y2\n"
728  "bne- 1b\n"
729  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
730  : "b" (v)
731  : "cc");
732  ppc_fence_after(order);
733  return original;
734  }
735 
736  value_type
737  fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
738  {
739  value_type original, tmp;
740  ppc_fence_before(order);
741  __asm__ (
742  "1:\n"
743  "lwarx %0,%y2\n"
744  "and %1,%0,%3\n"
745  "stwcx. %1,%y2\n"
746  "bne- 1b\n"
747  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
748  : "b" (v)
749  : "cc");
750  ppc_fence_after(order);
751  return original;
752  }
753 
754  value_type
755  fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
756  {
757  value_type original, tmp;
758  ppc_fence_before(order);
759  __asm__ (
760  "1:\n"
761  "lwarx %0,%y2\n"
762  "or %1,%0,%3\n"
763  "stwcx. %1,%y2\n"
764  "bne- 1b\n"
765  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
766  : "b" (v)
767  : "cc");
768  ppc_fence_after(order);
769  return original;
770  }
771 
772  value_type
773  fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
774  {
775  value_type original, tmp;
776  ppc_fence_before(order);
777  __asm__ (
778  "1:\n"
779  "lwarx %0,%y2\n"
780  "xor %1,%0,%3\n"
781  "stwcx. %1,%y2\n"
782  "bne- 1b\n"
783  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
784  : "b" (v)
785  : "cc");
786  ppc_fence_after(order);
787  return original;
788  }
789 
790  bool
791  is_lock_free(void) const volatile
792  {
793  return true;
794  }
795 
797 private:
798  base_atomic(const base_atomic &) /* = delete */ ;
799  void operator=(const base_atomic &) /* = delete */ ;
800  storage_type v_;
801 };
802 
803 template<typename T>
804 class base_atomic<T, int, 2, false> {
806  typedef T value_type;
807  typedef uint32_t storage_type;
808  typedef T difference_type;
809 public:
810  explicit base_atomic(value_type v) : v_(v) {}
811  base_atomic(void) {}
812 
813  void
814  store(value_type v, memory_order order = memory_order_seq_cst) volatile
815  {
816  ppc_fence_before(order);
817  __asm__ (
818  "stw %1, %0\n"
819  : "+m"(v_)
820  : "r" (v)
821  );
822  ppc_fence_after_store(order);
823  }
824 
825  value_type
826  load(memory_order order = memory_order_seq_cst) const volatile
827  {
828  value_type v;
829  __asm__ __volatile__ (
830  "lwz %0, %1\n"
831  "cmpw %0, %0\n"
832  "bne- 1f\n"
833  "1:\n"
834  : "=&r" (v)
835  : "m" (v_)
836  );
837  ppc_fence_after(order);
838  return v;
839  }
840 
841  value_type
842  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
843  {
844  value_type original;
845  ppc_fence_before(order);
846  __asm__ (
847  "1:\n"
848  "lwarx %0,%y1\n"
849  "stwcx. %2,%y1\n"
850  "bne- 1b\n"
851  : "=&b" (original), "+Z"(v_)
852  : "b" (v)
853  : "cr0"
854  );
855  ppc_fence_after(order);
856  return original;
857  }
858 
859  bool
861  value_type & expected,
862  value_type desired,
863  memory_order success_order,
864  memory_order failure_order) volatile
865  {
866  int success;
867  ppc_fence_before(success_order);
868  __asm__(
869  "lwarx %0,%y2\n"
870  "cmpw %0, %3\n"
871  "bne- 2f\n"
872  "stwcx. %4,%y2\n"
873  "bne- 2f\n"
874  "addi %1,0,1\n"
875  "1:"
876 
878  : "=&b" (expected), "=&b" (success), "+Z"(v_)
879  : "b" (expected), "b" (desired)
880  : "cr0"
881  );
882  if (success)
883  ppc_fence_after(success_order);
884  else
885  ppc_fence_after(failure_order);
886  return success;
887  }
888 
889  bool
891  value_type & expected,
892  value_type desired,
893  memory_order success_order,
894  memory_order failure_order) volatile
895  {
896  int success;
897  ppc_fence_before(success_order);
898  __asm__(
899  "0: lwarx %0,%y2\n"
900  "cmpw %0, %3\n"
901  "bne- 2f\n"
902  "stwcx. %4,%y2\n"
903  "bne- 0b\n"
904  "addi %1,0,1\n"
905  "1:"
906 
908  : "=&b" (expected), "=&b" (success), "+Z"(v_)
909  : "b" (expected), "b" (desired)
910  : "cr0"
911  );
912  if (success)
913  ppc_fence_after(success_order);
914  else
915  ppc_fence_after(failure_order);
916  return success;
917  }
918 
919  value_type
920  fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
921  {
922  value_type original, tmp;
923  ppc_fence_before(order);
924  __asm__ (
925  "1:\n"
926  "lwarx %0,%y2\n"
927  "add %1,%0,%3\n"
928  "rlwinm %1, %1, 0, 0xffff\n"
929  "stwcx. %1,%y2\n"
930  "bne- 1b\n"
931  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
932  : "b" (v)
933  : "cc");
934  ppc_fence_after(order);
935  return original;
936  }
937 
938  value_type
939  fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
940  {
941  value_type original, tmp;
942  ppc_fence_before(order);
943  __asm__ (
944  "1:\n"
945  "lwarx %0,%y2\n"
946  "sub %1,%0,%3\n"
947  "rlwinm %1, %1, 0, 0xffff\n"
948  "stwcx. %1,%y2\n"
949  "bne- 1b\n"
950  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
951  : "b" (v)
952  : "cc");
953  ppc_fence_after(order);
954  return original;
955  }
956 
957  value_type
958  fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
959  {
960  value_type original, tmp;
961  ppc_fence_before(order);
962  __asm__ (
963  "1:\n"
964  "lwarx %0,%y2\n"
965  "and %1,%0,%3\n"
966  "stwcx. %1,%y2\n"
967  "bne- 1b\n"
968  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
969  : "b" (v)
970  : "cc");
971  ppc_fence_after(order);
972  return original;
973  }
974 
975  value_type
976  fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
977  {
978  value_type original, tmp;
979  ppc_fence_before(order);
980  __asm__ (
981  "1:\n"
982  "lwarx %0,%y2\n"
983  "or %1,%0,%3\n"
984  "stwcx. %1,%y2\n"
985  "bne- 1b\n"
986  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
987  : "b" (v)
988  : "cc");
989  ppc_fence_after(order);
990  return original;
991  }
992 
993  value_type
994  fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
995  {
996  value_type original, tmp;
997  ppc_fence_before(order);
998  __asm__ (
999  "1:\n"
1000  "lwarx %0,%y2\n"
1001  "xor %1,%0,%3\n"
1002  "stwcx. %1,%y2\n"
1003  "bne- 1b\n"
1004  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1005  : "b" (v)
1006  : "cc");
1007  ppc_fence_after(order);
1008  return original;
1009  }
1010 
1011  bool
1012  is_lock_free(void) const volatile
1013  {
1014  return true;
1015  }
1016 
1018 private:
1019  base_atomic(const base_atomic &) /* = delete */ ;
1020  void operator=(const base_atomic &) /* = delete */ ;
1021  storage_type v_;
1022 };
1023 
1024 template<typename T, bool Sign>
1025 class base_atomic<T, int, 4, Sign> {
1027  typedef T value_type;
1028  typedef T difference_type;
1029 public:
1030  explicit base_atomic(value_type v) : v_(v) {}
1031  base_atomic(void) {}
1032 
1033  void
1034  store(value_type v, memory_order order = memory_order_seq_cst) volatile
1035  {
1036  ppc_fence_before(order);
1037  const_cast<volatile value_type &>(v_) = v;
1038  ppc_fence_after_store(order);
1039  }
1040 
1041  value_type
1042  load(memory_order order = memory_order_seq_cst) const volatile
1043  {
1044  value_type v = const_cast<const volatile value_type &>(v_);
1045  __asm__ __volatile__ (
1046  "cmpw %0, %0\n"
1047  "bne- 1f\n"
1048  "1:\n"
1049  : "+b"(v)
1050  :
1051  : "cr0"
1052  );
1053  ppc_fence_after(order);
1054  return v;
1055  }
1056 
1057  value_type
1058  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
1059  {
1060  value_type original;
1061  ppc_fence_before(order);
1062  __asm__ (
1063  "1:\n"
1064  "lwarx %0,%y1\n"
1065  "stwcx. %2,%y1\n"
1066  "bne- 1b\n"
1067  : "=&b" (original), "+Z"(v_)
1068  : "b" (v)
1069  : "cr0"
1070  );
1071  ppc_fence_after(order);
1072  return original;
1073  }
1074 
1075  bool
1077  value_type & expected,
1078  value_type desired,
1079  memory_order success_order,
1080  memory_order failure_order) volatile
1081  {
1082  int success;
1083  ppc_fence_before(success_order);
1084  __asm__(
1085  "lwarx %0,%y2\n"
1086  "cmpw %0, %3\n"
1087  "bne- 2f\n"
1088  "stwcx. %4,%y2\n"
1089  "bne- 2f\n"
1090  "addi %1,0,1\n"
1091  "1:"
1092 
1094  : "=&b" (expected), "=&b" (success), "+Z"(v_)
1095  : "b" (expected), "b" (desired)
1096  : "cr0"
1097  );
1098  if (success)
1099  ppc_fence_after(success_order);
1100  else
1101  ppc_fence_after(failure_order);
1102  return success;
1103  }
1104 
1105  bool
1107  value_type & expected,
1108  value_type desired,
1109  memory_order success_order,
1110  memory_order failure_order) volatile
1111  {
1112  int success;
1113  ppc_fence_before(success_order);
1114  __asm__(
1115  "0: lwarx %0,%y2\n"
1116  "cmpw %0, %3\n"
1117  "bne- 2f\n"
1118  "stwcx. %4,%y2\n"
1119  "bne- 0b\n"
1120  "addi %1,0,1\n"
1121  "1:"
1122 
1124  : "=&b" (expected), "=&b" (success), "+Z"(v_)
1125  : "b" (expected), "b" (desired)
1126  : "cr0"
1127  );
1128  if (success)
1129  ppc_fence_after(success_order);
1130  else
1131  ppc_fence_after(failure_order);
1132  return success;
1133  }
1134 
1135  value_type
1136  fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
1137  {
1138  value_type original, tmp;
1139  ppc_fence_before(order);
1140  __asm__ (
1141  "1:\n"
1142  "lwarx %0,%y2\n"
1143  "add %1,%0,%3\n"
1144  "stwcx. %1,%y2\n"
1145  "bne- 1b\n"
1146  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1147  : "b" (v)
1148  : "cc");
1149  ppc_fence_after(order);
1150  return original;
1151  }
1152 
1153  value_type
1154  fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
1155  {
1156  value_type original, tmp;
1157  ppc_fence_before(order);
1158  __asm__ (
1159  "1:\n"
1160  "lwarx %0,%y2\n"
1161  "sub %1,%0,%3\n"
1162  "stwcx. %1,%y2\n"
1163  "bne- 1b\n"
1164  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1165  : "b" (v)
1166  : "cc");
1167  ppc_fence_after(order);
1168  return original;
1169  }
1170 
1171  value_type
1172  fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
1173  {
1174  value_type original, tmp;
1175  ppc_fence_before(order);
1176  __asm__ (
1177  "1:\n"
1178  "lwarx %0,%y2\n"
1179  "and %1,%0,%3\n"
1180  "stwcx. %1,%y2\n"
1181  "bne- 1b\n"
1182  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1183  : "b" (v)
1184  : "cc");
1185  ppc_fence_after(order);
1186  return original;
1187  }
1188 
1189  value_type
1190  fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
1191  {
1192  value_type original, tmp;
1193  ppc_fence_before(order);
1194  __asm__ (
1195  "1:\n"
1196  "lwarx %0,%y2\n"
1197  "or %1,%0,%3\n"
1198  "stwcx. %1,%y2\n"
1199  "bne- 1b\n"
1200  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1201  : "b" (v)
1202  : "cc");
1203  ppc_fence_after(order);
1204  return original;
1205  }
1206 
1207  value_type
1208  fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
1209  {
1210  value_type original, tmp;
1211  ppc_fence_before(order);
1212  __asm__ (
1213  "1:\n"
1214  "lwarx %0,%y2\n"
1215  "xor %1,%0,%3\n"
1216  "stwcx. %1,%y2\n"
1217  "bne- 1b\n"
1218  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1219  : "b" (v)
1220  : "cc");
1221  ppc_fence_after(order);
1222  return original;
1223  }
1224 
1225  bool
1226  is_lock_free(void) const volatile
1227  {
1228  return true;
1229  }
1230 
1232 private:
1233  base_atomic(const base_atomic &) /* = delete */ ;
1234  void operator=(const base_atomic &) /* = delete */ ;
1235  value_type v_;
1236 };
1237 
1238 #if defined(__powerpc64__)
1239 
1240 template<typename T, bool Sign>
1241 class base_atomic<T, int, 8, Sign> {
1242  typedef base_atomic this_type;
1243  typedef T value_type;
1244  typedef T difference_type;
1245 public:
1246  explicit base_atomic(value_type v) : v_(v) {}
1247  base_atomic(void) {}
1248 
1249  void
1250  store(value_type v, memory_order order = memory_order_seq_cst) volatile
1251  {
1252  ppc_fence_before(order);
1253  const_cast<volatile value_type &>(v_) = v;
1254  ppc_fence_after_store(order);
1255  }
1256 
1257  value_type
1258  load(memory_order order = memory_order_seq_cst) const volatile
1259  {
1260  value_type v = const_cast<const volatile value_type &>(v_);
1261  __asm__ __volatile__ (
1262  "cmpd %0, %0\n"
1263  "bne- 1f\n"
1264  "1:\n"
1265  : "+b"(v)
1266  :
1267  : "cr0"
1268  );
1269  ppc_fence_after(order);
1270  return v;
1271  }
1272 
1273  value_type
1274  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
1275  {
1276  value_type original;
1277  ppc_fence_before(order);
1278  __asm__ (
1279  "1:\n"
1280  "ldarx %0,%y1\n"
1281  "stdcx. %2,%y1\n"
1282  "bne- 1b\n"
1283  : "=&b" (original), "+Z"(v_)
1284  : "b" (v)
1285  : "cr0"
1286  );
1287  ppc_fence_after(order);
1288  return original;
1289  }
1290 
1291  bool
1293  value_type & expected,
1294  value_type desired,
1295  memory_order success_order,
1296  memory_order failure_order) volatile
1297  {
1298  int success;
1299  ppc_fence_before(success_order);
1300  __asm__(
1301  "ldarx %0,%y2\n"
1302  "cmpd %0, %3\n"
1303  "bne- 2f\n"
1304  "stdcx. %4,%y2\n"
1305  "bne- 2f\n"
1306  "addi %1,0,1\n"
1307  "1:"
1308 
1310  : "=&b" (expected), "=&b" (success), "+Z"(v_)
1311  : "b" (expected), "b" (desired)
1312  : "cr0"
1313  );
1314  if (success)
1315  ppc_fence_after(success_order);
1316  else
1317  ppc_fence_after(failure_order);
1318  return success;
1319  }
1320 
1321  bool
1323  value_type & expected,
1324  value_type desired,
1325  memory_order success_order,
1326  memory_order failure_order) volatile
1327  {
1328  int success;
1329  ppc_fence_before(success_order);
1330  __asm__(
1331  "0: ldarx %0,%y2\n"
1332  "cmpd %0, %3\n"
1333  "bne- 2f\n"
1334  "stdcx. %4,%y2\n"
1335  "bne- 0b\n"
1336  "addi %1,0,1\n"
1337  "1:"
1338 
1340  : "=&b" (expected), "=&b" (success), "+Z"(v_)
1341  : "b" (expected), "b" (desired)
1342  : "cr0"
1343  );
1344  if (success)
1345  ppc_fence_after(success_order);
1346  else
1347  ppc_fence_after(failure_order);
1348  return success;
1349  }
1350 
1351  value_type
1352  fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
1353  {
1354  value_type original, tmp;
1355  ppc_fence_before(order);
1356  __asm__ (
1357  "1:\n"
1358  "ldarx %0,%y2\n"
1359  "add %1,%0,%3\n"
1360  "stdcx. %1,%y2\n"
1361  "bne- 1b\n"
1362  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1363  : "b" (v)
1364  : "cc");
1365  ppc_fence_after(order);
1366  return original;
1367  }
1368 
1369  value_type
1370  fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
1371  {
1372  value_type original, tmp;
1373  ppc_fence_before(order);
1374  __asm__ (
1375  "1:\n"
1376  "ldarx %0,%y2\n"
1377  "sub %1,%0,%3\n"
1378  "stdcx. %1,%y2\n"
1379  "bne- 1b\n"
1380  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1381  : "b" (v)
1382  : "cc");
1383  ppc_fence_after(order);
1384  return original;
1385  }
1386 
1387  value_type
1388  fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
1389  {
1390  value_type original, tmp;
1391  ppc_fence_before(order);
1392  __asm__ (
1393  "1:\n"
1394  "ldarx %0,%y2\n"
1395  "and %1,%0,%3\n"
1396  "stdcx. %1,%y2\n"
1397  "bne- 1b\n"
1398  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1399  : "b" (v)
1400  : "cc");
1401  ppc_fence_after(order);
1402  return original;
1403  }
1404 
1405  value_type
1406  fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
1407  {
1408  value_type original, tmp;
1409  ppc_fence_before(order);
1410  __asm__ (
1411  "1:\n"
1412  "ldarx %0,%y2\n"
1413  "or %1,%0,%3\n"
1414  "stdcx. %1,%y2\n"
1415  "bne- 1b\n"
1416  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1417  : "b" (v)
1418  : "cc");
1419  ppc_fence_after(order);
1420  return original;
1421  }
1422 
1423  value_type
1424  fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
1425  {
1426  value_type original, tmp;
1427  ppc_fence_before(order);
1428  __asm__ (
1429  "1:\n"
1430  "ldarx %0,%y2\n"
1431  "xor %1,%0,%3\n"
1432  "stdcx. %1,%y2\n"
1433  "bne- 1b\n"
1434  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1435  : "b" (v)
1436  : "cc");
1437  ppc_fence_after(order);
1438  return original;
1439  }
1440 
1441  bool
1442  is_lock_free(void) const volatile
1443  {
1444  return true;
1445  }
1446 
1448 private:
1449  base_atomic(const base_atomic &) /* = delete */ ;
1450  void operator=(const base_atomic &) /* = delete */ ;
1451  value_type v_;
1452 };
1453 
1454 #endif
1455 
1456 /* pointer types */
1457 
1458 #if !defined(__powerpc64__)
1459 
1460 template<bool Sign>
1461 class base_atomic<void *, void *, 4, Sign> {
1463  typedef void * value_type;
1464 public:
1465  explicit base_atomic(value_type v) : v_(v) {}
1466  base_atomic(void) {}
1467 
1468  void
1469  store(value_type v, memory_order order = memory_order_seq_cst) volatile
1470  {
1471  ppc_fence_before(order);
1472  __asm__ (
1473  "stw %1, %0\n"
1474  : "+m" (v_)
1475  : "r" (v)
1476  );
1477  ppc_fence_after_store(order);
1478  }
1479 
1480  value_type
1481  load(memory_order order = memory_order_seq_cst) const volatile
1482  {
1483  value_type v;
1484  __asm__ (
1485  "lwz %0, %1\n"
1486  "cmpw %0, %0\n"
1487  "bne- 1f\n"
1488  "1:\n"
1489  : "=r"(v)
1490  : "m"(v_)
1491  : "cr0"
1492  );
1493  ppc_fence_after(order);
1494  return v;
1495  }
1496 
1497  value_type
1498  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
1499  {
1500  value_type original;
1501  ppc_fence_before(order);
1502  __asm__ (
1503  "1:\n"
1504  "lwarx %0,%y1\n"
1505  "stwcx. %2,%y1\n"
1506  "bne- 1b\n"
1507  : "=&b" (original), "+Z"(v_)
1508  : "b" (v)
1509  : "cr0"
1510  );
1511  ppc_fence_after(order);
1512  return original;
1513  }
1514 
1515  bool
1517  value_type & expected,
1518  value_type desired,
1519  memory_order success_order,
1520  memory_order failure_order) volatile
1521  {
1522  int success;
1523  ppc_fence_before(success_order);
1524  __asm__(
1525  "lwarx %0,%y2\n"
1526  "cmpw %0, %3\n"
1527  "bne- 2f\n"
1528  "stwcx. %4,%y2\n"
1529  "bne- 2f\n"
1530  "addi %1,0,1\n"
1531  "1:"
1532 
1534  : "=&b" (expected), "=&b" (success), "+Z"(v_)
1535  : "b" (expected), "b" (desired)
1536  : "cr0"
1537  );
1538  if (success)
1539  ppc_fence_after(success_order);
1540  else
1541  ppc_fence_after(failure_order);
1542  return success;
1543  }
1544 
1545  bool
1547  value_type & expected,
1548  value_type desired,
1549  memory_order success_order,
1550  memory_order failure_order) volatile
1551  {
1552  int success;
1553  ppc_fence_before(success_order);
1554  __asm__(
1555  "0: lwarx %0,%y2\n"
1556  "cmpw %0, %3\n"
1557  "bne- 2f\n"
1558  "stwcx. %4,%y2\n"
1559  "bne- 0b\n"
1560  "addi %1,0,1\n"
1561  "1:"
1562 
1564  : "=&b" (expected), "=&b" (success), "+Z"(v_)
1565  : "b" (expected), "b" (desired)
1566  : "cr0"
1567  );
1568  if (success)
1569  ppc_fence_after(success_order);
1570  else
1571  ppc_fence_after(failure_order);
1572  return success;
1573  }
1574 
1575  bool
1576  is_lock_free(void) const volatile
1577  {
1578  return true;
1579  }
1580 
1582 private:
1583  base_atomic(const base_atomic &) /* = delete */ ;
1584  void operator=(const base_atomic &) /* = delete */ ;
1585  value_type v_;
1586 };
1587 
1588 template<typename T, bool Sign>
1589 class base_atomic<T *, void *, 4, Sign> {
1591  typedef T * value_type;
1592  typedef ptrdiff_t difference_type;
1593 public:
1594  explicit base_atomic(value_type v) : v_(v) {}
1595  base_atomic(void) {}
1596 
1597  void
1598  store(value_type v, memory_order order = memory_order_seq_cst) volatile
1599  {
1600  ppc_fence_before(order);
1601  __asm__ (
1602  "stw %1, %0\n"
1603  : "+m" (v_)
1604  : "r" (v)
1605  );
1606  ppc_fence_after_store(order);
1607  }
1608 
1609  value_type
1610  load(memory_order order = memory_order_seq_cst) const volatile
1611  {
1612  value_type v;
1613  __asm__ (
1614  "lwz %0, %1\n"
1615  "cmpw %0, %0\n"
1616  "bne- 1f\n"
1617  "1:\n"
1618  : "=r"(v)
1619  : "m"(v_)
1620  : "cr0"
1621  );
1622  ppc_fence_after(order);
1623  return v;
1624  }
1625 
1626  value_type
1627  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
1628  {
1629  value_type original;
1630  ppc_fence_before(order);
1631  __asm__ (
1632  "1:\n"
1633  "lwarx %0,%y1\n"
1634  "stwcx. %2,%y1\n"
1635  "bne- 1b\n"
1636  : "=&b" (original), "+Z"(v_)
1637  : "b" (v)
1638  : "cr0"
1639  );
1640  ppc_fence_after(order);
1641  return original;
1642  }
1643 
1644  bool
1646  value_type & expected,
1647  value_type desired,
1648  memory_order success_order,
1649  memory_order failure_order) volatile
1650  {
1651  int success;
1652  ppc_fence_before(success_order);
1653  __asm__(
1654  "lwarx %0,%y2\n"
1655  "cmpw %0, %3\n"
1656  "bne- 2f\n"
1657  "stwcx. %4,%y2\n"
1658  "bne- 2f\n"
1659  "addi %1,0,1\n"
1660  "1:"
1661 
1663  : "=&b" (expected), "=&b" (success), "+Z"(v_)
1664  : "b" (expected), "b" (desired)
1665  : "cr0"
1666  );
1667  if (success)
1668  ppc_fence_after(success_order);
1669  else
1670  ppc_fence_after(failure_order);
1671  return success;
1672  }
1673 
1674  bool
1676  value_type & expected,
1677  value_type desired,
1678  memory_order success_order,
1679  memory_order failure_order) volatile
1680  {
1681  int success;
1682  ppc_fence_before(success_order);
1683  __asm__(
1684  "0: lwarx %0,%y2\n"
1685  "cmpw %0, %3\n"
1686  "bne- 2f\n"
1687  "stwcx. %4,%y2\n"
1688  "bne- 0b\n"
1689  "addi %1,0,1\n"
1690  "1:"
1691 
1693  : "=&b" (expected), "=&b" (success), "+Z"(v_)
1694  : "b" (expected), "b" (desired)
1695  : "cr0"
1696  );
1697  if (success)
1698  ppc_fence_after(success_order);
1699  else
1700  ppc_fence_after(failure_order);
1701  return success;
1702  }
1703 
1704  value_type
1705  fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
1706  {
1707  v = v * sizeof(*v_);
1708  value_type original, tmp;
1709  ppc_fence_before(order);
1710  __asm__ (
1711  "1:\n"
1712  "lwarx %0,%y2\n"
1713  "add %1,%0,%3\n"
1714  "stwcx. %1,%y2\n"
1715  "bne- 1b\n"
1716  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1717  : "b" (v)
1718  : "cc");
1719  ppc_fence_after(order);
1720  return original;
1721  }
1722 
1723  value_type
1724  fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
1725  {
1726  v = v * sizeof(*v_);
1727  value_type original, tmp;
1728  ppc_fence_before(order);
1729  __asm__ (
1730  "1:\n"
1731  "lwarx %0,%y2\n"
1732  "sub %1,%0,%3\n"
1733  "stwcx. %1,%y2\n"
1734  "bne- 1b\n"
1735  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
1736  : "b" (v)
1737  : "cc");
1738  ppc_fence_after(order);
1739  return original;
1740  }
1741 
1742  bool
1743  is_lock_free(void) const volatile
1744  {
1745  return true;
1746  }
1747 
1749 private:
1750  base_atomic(const base_atomic &) /* = delete */ ;
1751  void operator=(const base_atomic &) /* = delete */ ;
1752  value_type v_;
1753 };
1754 
1755 #else
1756 
1757 template<bool Sign>
1758 class base_atomic<void *, void *, 8, Sign> {
1759  typedef base_atomic this_type;
1760  typedef void * value_type;
1761 public:
1762  explicit base_atomic(value_type v) : v_(v) {}
1763  base_atomic(void) {}
1764 
1765  void
1766  store(value_type v, memory_order order = memory_order_seq_cst) volatile
1767  {
1768  ppc_fence_before(order);
1769  __asm__ (
1770  "std %1, %0\n"
1771  : "+m" (v_)
1772  : "r" (v)
1773  );
1774  ppc_fence_after_store(order);
1775  }
1776 
1777  value_type
1778  load(memory_order order = memory_order_seq_cst) const volatile
1779  {
1780  value_type v;
1781  __asm__ (
1782  "ld %0, %1\n"
1783  "cmpd %0, %0\n"
1784  "bne- 1f\n"
1785  "1:\n"
1786  : "=r"(v)
1787  : "m"(v_)
1788  : "cr0"
1789  );
1790  ppc_fence_after(order);
1791  return v;
1792  }
1793 
1794  value_type
1795  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
1796  {
1797  value_type original;
1798  ppc_fence_before(order);
1799  __asm__ (
1800  "1:\n"
1801  "ldarx %0,%y1\n"
1802  "stdcx. %2,%y1\n"
1803  "bne- 1b\n"
1804  : "=&b" (original), "+Z"(v_)
1805  : "b" (v)
1806  : "cr0"
1807  );
1808  ppc_fence_after(order);
1809  return original;
1810  }
1811 
1812  bool
1814  value_type & expected,
1815  value_type desired,
1816  memory_order success_order,
1817  memory_order failure_order) volatile
1818  {
1819  int success;
1820  ppc_fence_before(success_order);
1821  __asm__(
1822  "ldarx %0,%y2\n"
1823  "cmpd %0, %3\n"
1824  "bne- 2f\n"
1825  "stdcx. %4,%y2\n"
1826  "bne- 2f\n"
1827  "addi %1,0,1\n"
1828  "1:"
1829 
1831  : "=&b" (expected), "=&b" (success), "+Z"(v_)
1832  : "b" (expected), "b" (desired)
1833  : "cr0"
1834  );
1835  if (success)
1836  ppc_fence_after(success_order);
1837  else
1838  ppc_fence_after(failure_order);
1839  return success;
1840  }
1841 
1842  bool
1844  value_type & expected,
1845  value_type desired,
1846  memory_order success_order,
1847  memory_order failure_order) volatile
1848  {
1849  int success;
1850  ppc_fence_before(success_order);
1851  __asm__(
1852  "0: ldarx %0,%y2\n"
1853  "cmpd %0, %3\n"
1854  "bne- 2f\n"
1855  "stdcx. %4,%y2\n"
1856  "bne- 0b\n"
1857  "addi %1,0,1\n"
1858  "1:"
1859 
1861  : "=&b" (expected), "=&b" (success), "+Z"(v_)
1862  : "b" (expected), "b" (desired)
1863  : "cr0"
1864  );
1865  if (success)
1866  ppc_fence_after(success_order);
1867  else
1868  ppc_fence_after(failure_order);
1869  return success;
1870  }
1871 
1872  bool
1873  is_lock_free(void) const volatile
1874  {
1875  return true;
1876  }
1877 
1879 private:
1880  base_atomic(const base_atomic &) /* = delete */ ;
1881  void operator=(const base_atomic &) /* = delete */ ;
1882  value_type v_;
1883 };
1884 
1885 template<typename T, bool Sign>
1886 class base_atomic<T *, void *, 8, Sign> {
1887  typedef base_atomic this_type;
1888  typedef T * value_type;
1889  typedef ptrdiff_t difference_type;
1890 public:
1891  explicit base_atomic(value_type v) : v_(v) {}
1892  base_atomic(void) {}
1893 
1894  void
1895  store(value_type v, memory_order order = memory_order_seq_cst) volatile
1896  {
1897  ppc_fence_before(order);
1898  __asm__ (
1899  "std %1, %0\n"
1900  : "+m" (v_)
1901  : "r" (v)
1902  );
1903  ppc_fence_after_store(order);
1904  }
1905 
1906  value_type
1907  load(memory_order order = memory_order_seq_cst) const volatile
1908  {
1909  value_type v;
1910  __asm__ (
1911  "ld %0, %1\n"
1912  "cmpd %0, %0\n"
1913  "bne- 1f\n"
1914  "1:\n"
1915  : "=r"(v)
1916  : "m"(v_)
1917  : "cr0"
1918  );
1919  ppc_fence_after(order);
1920  return v;
1921  }
1922 
1923  value_type
1924  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
1925  {
1926  value_type original;
1927  ppc_fence_before(order);
1928  __asm__ (
1929  "1:\n"
1930  "ldarx %0,%y1\n"
1931  "stdcx. %2,%y1\n"
1932  "bne- 1b\n"
1933  : "=&b" (original), "+Z"(v_)
1934  : "b" (v)
1935  : "cr0"
1936  );
1937  ppc_fence_after(order);
1938  return original;
1939  }
1940 
1941  bool
1943  value_type & expected,
1944  value_type desired,
1945  memory_order success_order,
1946  memory_order failure_order) volatile
1947  {
1948  int success;
1949  ppc_fence_before(success_order);
1950  __asm__(
1951  "ldarx %0,%y2\n"
1952  "cmpd %0, %3\n"
1953  "bne- 2f\n"
1954  "stdcx. %4,%y2\n"
1955  "bne- 2f\n"
1956  "addi %1,0,1\n"
1957  "1:"
1958 
1960  : "=&b" (expected), "=&b" (success), "+Z"(v_)
1961  : "b" (expected), "b" (desired)
1962  : "cr0"
1963  );
1964  if (success)
1965  ppc_fence_after(success_order);
1966  else
1967  ppc_fence_after(failure_order);
1968  return success;
1969  }
1970 
1971  bool
1973  value_type & expected,
1974  value_type desired,
1975  memory_order success_order,
1976  memory_order failure_order) volatile
1977  {
1978  int success;
1979  ppc_fence_before(success_order);
1980  __asm__(
1981  "0: ldarx %0,%y2\n"
1982  "cmpd %0, %3\n"
1983  "bne- 2f\n"
1984  "stdcx. %4,%y2\n"
1985  "bne- 0b\n"
1986  "addi %1,0,1\n"
1987  "1:"
1988 
1990  : "=&b" (expected), "=&b" (success), "+Z"(v_)
1991  : "b" (expected), "b" (desired)
1992  : "cr0"
1993  );
1994  if (success)
1995  ppc_fence_after(success_order);
1996  else
1997  ppc_fence_after(failure_order);
1998  return success;
1999  }
2000 
2001  value_type
2002  fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
2003  {
2004  v = v * sizeof(*v_);
2005  value_type original, tmp;
2006  ppc_fence_before(order);
2007  __asm__ (
2008  "1:\n"
2009  "ldarx %0,%y2\n"
2010  "add %1,%0,%3\n"
2011  "stdcx. %1,%y2\n"
2012  "bne- 1b\n"
2013  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
2014  : "b" (v)
2015  : "cc");
2016  ppc_fence_after(order);
2017  return original;
2018  }
2019 
2020  value_type
2021  fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
2022  {
2023  v = v * sizeof(*v_);
2024  value_type original, tmp;
2025  ppc_fence_before(order);
2026  __asm__ (
2027  "1:\n"
2028  "ldarx %0,%y2\n"
2029  "sub %1,%0,%3\n"
2030  "stdcx. %1,%y2\n"
2031  "bne- 1b\n"
2032  : "=&b" (original), "=&b" (tmp), "+Z"(v_)
2033  : "b" (v)
2034  : "cc");
2035  ppc_fence_after(order);
2036  return original;
2037  }
2038 
2039  bool
2040  is_lock_free(void) const volatile
2041  {
2042  return true;
2043  }
2044 
2046 private:
2047  base_atomic(const base_atomic &) /* = delete */ ;
2048  void operator=(const base_atomic &) /* = delete */ ;
2049  value_type v_;
2050 };
2051 
2052 #endif
2053 
2054 /* generic */
2055 
2056 template<typename T, bool Sign>
2057 class base_atomic<T, void, 1, Sign> {
2059  typedef T value_type;
2060  typedef uint32_t storage_type;
2061 public:
2062  explicit base_atomic(value_type v) : v_(0)
2063  {
2064  memcpy(&v_, &v, sizeof(value_type));
2065  }
2066  base_atomic(void) : v_(0) {}
2067 
2068  void
2069  store(value_type v, memory_order order = memory_order_seq_cst) volatile
2070  {
2071  storage_type tmp = 0;
2072  memcpy(&tmp, &v, sizeof(value_type));
2073  ppc_fence_before(order);
2074  __asm__ (
2075  "stw %1, %0\n"
2076  : "+m" (v_)
2077  : "r" (tmp)
2078  );
2079  ppc_fence_after_store(order);
2080  }
2081 
2082  value_type
2083  load(memory_order order = memory_order_seq_cst) const volatile
2084  {
2085  storage_type tmp;
2086  __asm__ __volatile__ (
2087  "lwz %0, %1\n"
2088  "cmpw %0, %0\n"
2089  "bne- 1f\n"
2090  "1:\n"
2091  : "=r"(tmp)
2092  : "m"(v_)
2093  : "cr0"
2094  );
2095  ppc_fence_after(order);
2096 
2097  value_type v;
2098  memcpy(&v, &tmp, sizeof(value_type));
2099  return v;
2100  }
2101 
2102  value_type
2103  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
2104  {
2105  storage_type tmp = 0, original;
2106  memcpy(&tmp, &v, sizeof(value_type));
2107  ppc_fence_before(order);
2108  __asm__ (
2109  "1:\n"
2110  "lwarx %0,%y1\n"
2111  "stwcx. %2,%y1\n"
2112  "bne- 1b\n"
2113  : "=&b" (original), "+Z"(v_)
2114  : "b" (tmp)
2115  : "cr0"
2116  );
2117  ppc_fence_after(order);
2118  memcpy(&v, &original, sizeof(value_type));
2119  return v;
2120  }
2121 
2122  bool
2124  value_type & expected,
2125  value_type desired,
2126  memory_order success_order,
2127  memory_order failure_order) volatile
2128  {
2129  storage_type expected_s = 0, desired_s = 0;
2130  memcpy(&expected_s, &expected, sizeof(value_type));
2131  memcpy(&desired_s, &desired, sizeof(value_type));
2132 
2133  int success;
2134  ppc_fence_before(success_order);
2135  __asm__(
2136  "lwarx %0,%y2\n"
2137  "cmpw %0, %3\n"
2138  "bne- 2f\n"
2139  "stwcx. %4,%y2\n"
2140  "bne- 2f\n"
2141  "addi %1,0,1\n"
2142  "1:"
2143 
2145  : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
2146  : "b" (expected_s), "b" (desired_s)
2147  : "cr0"
2148  );
2149  if (success)
2150  ppc_fence_after(success_order);
2151  else
2152  ppc_fence_after(failure_order);
2153  memcpy(&expected, &expected_s, sizeof(value_type));
2154  return success;
2155  }
2156 
2157  bool
2159  value_type & expected,
2160  value_type desired,
2161  memory_order success_order,
2162  memory_order failure_order) volatile
2163  {
2164  storage_type expected_s = 0, desired_s = 0;
2165  memcpy(&expected_s, &expected, sizeof(value_type));
2166  memcpy(&desired_s, &desired, sizeof(value_type));
2167 
2168  int success;
2169  ppc_fence_before(success_order);
2170  __asm__(
2171  "0: lwarx %0,%y2\n"
2172  "cmpw %0, %3\n"
2173  "bne- 2f\n"
2174  "stwcx. %4,%y2\n"
2175  "bne- 0b\n"
2176  "addi %1,0,1\n"
2177  "1:"
2178 
2180  : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
2181  : "b" (expected_s), "b" (desired_s)
2182  : "cr0"
2183  );
2184  if (success)
2185  ppc_fence_after(success_order);
2186  else
2187  ppc_fence_after(failure_order);
2188  memcpy(&expected, &expected_s, sizeof(value_type));
2189  return success;
2190  }
2191 
2192  bool
2193  is_lock_free(void) const volatile
2194  {
2195  return true;
2196  }
2197 
2199 private:
2200  base_atomic(const base_atomic &) /* = delete */ ;
2201  void operator=(const base_atomic &) /* = delete */ ;
2202  storage_type v_;
2203 };
2204 
2205 template<typename T, bool Sign>
2206 class base_atomic<T, void, 2, Sign> {
2208  typedef T value_type;
2209  typedef uint32_t storage_type;
2210 public:
2211  explicit base_atomic(value_type v) : v_(0)
2212  {
2213  memcpy(&v_, &v, sizeof(value_type));
2214  }
2215  base_atomic(void) : v_(0) {}
2216 
2217  void
2218  store(value_type v, memory_order order = memory_order_seq_cst) volatile
2219  {
2220  storage_type tmp = 0;
2221  memcpy(&tmp, &v, sizeof(value_type));
2222  ppc_fence_before(order);
2223  __asm__ (
2224  "stw %1, %0\n"
2225  : "+m" (v_)
2226  : "r" (tmp)
2227  );
2228  ppc_fence_after_store(order);
2229  }
2230 
2231  value_type
2232  load(memory_order order = memory_order_seq_cst) const volatile
2233  {
2234  storage_type tmp;
2235  __asm__ __volatile__ (
2236  "lwz %0, %1\n"
2237  "cmpw %0, %0\n"
2238  "bne- 1f\n"
2239  "1:\n"
2240  : "=r"(tmp)
2241  : "m"(v_)
2242  : "cr0"
2243  );
2244  ppc_fence_after(order);
2245 
2246  value_type v;
2247  memcpy(&v, &tmp, sizeof(value_type));
2248  return v;
2249  }
2250 
2251  value_type
2252  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
2253  {
2254  storage_type tmp = 0, original;
2255  memcpy(&tmp, &v, sizeof(value_type));
2256  ppc_fence_before(order);
2257  __asm__ (
2258  "1:\n"
2259  "lwarx %0,%y1\n"
2260  "stwcx. %2,%y1\n"
2261  "bne- 1b\n"
2262  : "=&b" (original), "+Z"(v_)
2263  : "b" (tmp)
2264  : "cr0"
2265  );
2266  ppc_fence_after(order);
2267  memcpy(&v, &original, sizeof(value_type));
2268  return v;
2269  }
2270 
2271  bool
2273  value_type & expected,
2274  value_type desired,
2275  memory_order success_order,
2276  memory_order failure_order) volatile
2277  {
2278  storage_type expected_s = 0, desired_s = 0;
2279  memcpy(&expected_s, &expected, sizeof(value_type));
2280  memcpy(&desired_s, &desired, sizeof(value_type));
2281 
2282  int success;
2283  ppc_fence_before(success_order);
2284  __asm__(
2285  "lwarx %0,%y2\n"
2286  "cmpw %0, %3\n"
2287  "bne- 2f\n"
2288  "stwcx. %4,%y2\n"
2289  "bne- 2f\n"
2290  "addi %1,0,1\n"
2291  "1:"
2292 
2294  : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
2295  : "b" (expected_s), "b" (desired_s)
2296  : "cr0"
2297  );
2298  if (success)
2299  ppc_fence_after(success_order);
2300  else
2301  ppc_fence_after(failure_order);
2302  memcpy(&expected, &expected_s, sizeof(value_type));
2303  return success;
2304  }
2305 
2306  bool
2308  value_type & expected,
2309  value_type desired,
2310  memory_order success_order,
2311  memory_order failure_order) volatile
2312  {
2313  storage_type expected_s = 0, desired_s = 0;
2314  memcpy(&expected_s, &expected, sizeof(value_type));
2315  memcpy(&desired_s, &desired, sizeof(value_type));
2316 
2317  int success;
2318  ppc_fence_before(success_order);
2319  __asm__(
2320  "0: lwarx %0,%y2\n"
2321  "cmpw %0, %3\n"
2322  "bne- 2f\n"
2323  "stwcx. %4,%y2\n"
2324  "bne- 0b\n"
2325  "addi %1,0,1\n"
2326  "1:"
2327 
2329  : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
2330  : "b" (expected_s), "b" (desired_s)
2331  : "cr0"
2332  );
2333  if (success)
2334  ppc_fence_after(success_order);
2335  else
2336  ppc_fence_after(failure_order);
2337  memcpy(&expected, &expected_s, sizeof(value_type));
2338  return success;
2339  }
2340 
2341  bool
2342  is_lock_free(void) const volatile
2343  {
2344  return true;
2345  }
2346 
2348 private:
2349  base_atomic(const base_atomic &) /* = delete */ ;
2350  void operator=(const base_atomic &) /* = delete */ ;
2351  storage_type v_;
2352 };
2353 
2354 template<typename T, bool Sign>
2355 class base_atomic<T, void, 4, Sign> {
2357  typedef T value_type;
2358  typedef uint32_t storage_type;
2359 public:
2360  explicit base_atomic(value_type v) : v_(0)
2361  {
2362  memcpy(&v_, &v, sizeof(value_type));
2363  }
2364  base_atomic(void) : v_(0) {}
2365 
2366  void
2367  store(value_type v, memory_order order = memory_order_seq_cst) volatile
2368  {
2369  storage_type tmp = 0;
2370  memcpy(&tmp, &v, sizeof(value_type));
2371  ppc_fence_before(order);
2372  __asm__ (
2373  "stw %1, %0\n"
2374  : "+m" (v_)
2375  : "r" (tmp)
2376  );
2377  ppc_fence_after_store(order);
2378  }
2379 
2380  value_type
2381  load(memory_order order = memory_order_seq_cst) const volatile
2382  {
2383  storage_type tmp;
2384  __asm__ __volatile__ (
2385  "lwz %0, %1\n"
2386  "cmpw %0, %0\n"
2387  "bne- 1f\n"
2388  "1:\n"
2389  : "=r"(tmp)
2390  : "m"(v_)
2391  : "cr0"
2392  );
2393  ppc_fence_after(order);
2394 
2395  value_type v;
2396  memcpy(&v, &tmp, sizeof(value_type));
2397  return v;
2398  }
2399 
2400  value_type
2401  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
2402  {
2403  storage_type tmp = 0, original;
2404  memcpy(&tmp, &v, sizeof(value_type));
2405  ppc_fence_before(order);
2406  __asm__ (
2407  "1:\n"
2408  "lwarx %0,%y1\n"
2409  "stwcx. %2,%y1\n"
2410  "bne- 1b\n"
2411  : "=&b" (original), "+Z"(v_)
2412  : "b" (tmp)
2413  : "cr0"
2414  );
2415  ppc_fence_after(order);
2416  memcpy(&v, &original, sizeof(value_type));
2417  return v;
2418  }
2419 
2420  bool
2422  value_type & expected,
2423  value_type desired,
2424  memory_order success_order,
2425  memory_order failure_order) volatile
2426  {
2427  storage_type expected_s = 0, desired_s = 0;
2428  memcpy(&expected_s, &expected, sizeof(value_type));
2429  memcpy(&desired_s, &desired, sizeof(value_type));
2430 
2431  int success;
2432  ppc_fence_before(success_order);
2433  __asm__(
2434  "lwarx %0,%y2\n"
2435  "cmpw %0, %3\n"
2436  "bne- 2f\n"
2437  "stwcx. %4,%y2\n"
2438  "bne- 2f\n"
2439  "addi %1,0,1\n"
2440  "1:"
2441 
2443  : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
2444  : "b" (expected_s), "b" (desired_s)
2445  : "cr0"
2446  );
2447  if (success)
2448  ppc_fence_after(success_order);
2449  else
2450  ppc_fence_after(failure_order);
2451  memcpy(&expected, &expected_s, sizeof(value_type));
2452  return success;
2453  }
2454 
2455  bool
2457  value_type & expected,
2458  value_type desired,
2459  memory_order success_order,
2460  memory_order failure_order) volatile
2461  {
2462  storage_type expected_s = 0, desired_s = 0;
2463  memcpy(&expected_s, &expected, sizeof(value_type));
2464  memcpy(&desired_s, &desired, sizeof(value_type));
2465 
2466  int success;
2467  ppc_fence_before(success_order);
2468  __asm__(
2469  "0: lwarx %0,%y2\n"
2470  "cmpw %0, %3\n"
2471  "bne- 2f\n"
2472  "stwcx. %4,%y2\n"
2473  "bne- 0b\n"
2474  "addi %1,0,1\n"
2475  "1:"
2476 
2478  : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
2479  : "b" (expected_s), "b" (desired_s)
2480  : "cr0"
2481  );
2482  if (success)
2483  ppc_fence_after(success_order);
2484  else
2485  ppc_fence_after(failure_order);
2486  memcpy(&expected, &expected_s, sizeof(value_type));
2487  return success;
2488  }
2489 
2490  bool
2491  is_lock_free(void) const volatile
2492  {
2493  return true;
2494  }
2495 
2497 private:
2498  base_atomic(const base_atomic &) /* = delete */ ;
2499  void operator=(const base_atomic &) /* = delete */ ;
2500  storage_type v_;
2501 };
2502 
2503 #if defined(__powerpc64__)
2504 
2505 template<typename T, bool Sign>
2506 class base_atomic<T, void, 8, Sign> {
2507  typedef base_atomic this_type;
2508  typedef T value_type;
2509  typedef uint64_t storage_type;
2510 public:
2511  explicit base_atomic(value_type v)
2512  {
2513  memcpy(&v_, &v, sizeof(value_type));
2514  }
2515  base_atomic(void) {}
2516 
2517  void
2518  store(value_type v, memory_order order = memory_order_seq_cst) volatile
2519  {
2520  storage_type tmp;
2521  memcpy(&tmp, &v, sizeof(value_type));
2522  ppc_fence_before(order);
2523  __asm__ (
2524  "std %1, %0\n"
2525  : "+m" (v_)
2526  : "r" (tmp)
2527  );
2528  ppc_fence_after_store(order);
2529  }
2530 
2531  value_type
2532  load(memory_order order = memory_order_seq_cst) const volatile
2533  {
2534  storage_type tmp;
2535  __asm__ __volatile__ (
2536  "ld %0, %1\n"
2537  "cmpd %0, %0\n"
2538  "bne- 1f\n"
2539  "1:\n"
2540  : "=r"(tmp)
2541  : "m"(v_)
2542  : "cr0"
2543  );
2544  ppc_fence_after(order);
2545 
2546  value_type v;
2547  memcpy(&v, &tmp, sizeof(value_type));
2548  return v;
2549  }
2550 
2551  value_type
2552  exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
2553  {
2554  storage_type tmp = 0, original;
2555  memcpy(&tmp, &v, sizeof(value_type));
2556  ppc_fence_before(order);
2557  __asm__ (
2558  "1:\n"
2559  "ldarx %0,%y1\n"
2560  "stdcx. %2,%y1\n"
2561  "bne- 1b\n"
2562  : "=&b" (original), "+Z"(v_)
2563  : "b" (tmp)
2564  : "cr0"
2565  );
2566  ppc_fence_after(order);
2567  memcpy(&v, &original, sizeof(value_type));
2568  return v;
2569  }
2570 
2571  bool
2573  value_type & expected,
2574  value_type desired,
2575  memory_order success_order,
2576  memory_order failure_order) volatile
2577  {
2578  storage_type expected_s, desired_s;
2579  memcpy(&expected_s, &expected, sizeof(value_type));
2580  memcpy(&desired_s, &desired, sizeof(value_type));
2581 
2582  int success;
2583  ppc_fence_before(success_order);
2584  __asm__(
2585  "ldarx %0,%y2\n"
2586  "cmpd %0, %3\n"
2587  "bne- 2f\n"
2588  "stdcx. %4,%y2\n"
2589  "bne- 2f\n"
2590  "addi %1,0,1\n"
2591  "1:"
2592 
2594  : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
2595  : "b" (expected_s), "b" (desired_s)
2596  : "cr0"
2597  );
2598  if (success)
2599  ppc_fence_after(success_order);
2600  else
2601  ppc_fence_after(failure_order);
2602  memcpy(&expected, &expected_s, sizeof(value_type));
2603  return success;
2604  }
2605 
2606  bool
2608  value_type & expected,
2609  value_type desired,
2610  memory_order success_order,
2611  memory_order failure_order) volatile
2612  {
2613  storage_type expected_s, desired_s;
2614  memcpy(&expected_s, &expected, sizeof(value_type));
2615  memcpy(&desired_s, &desired, sizeof(value_type));
2616 
2617  int success;
2618  ppc_fence_before(success_order);
2619  __asm__(
2620  "0: ldarx %0,%y2\n"
2621  "cmpd %0, %3\n"
2622  "bne- 2f\n"
2623  "stdcx. %4,%y2\n"
2624  "bne- 0b\n"
2625  "addi %1,0,1\n"
2626  "1:"
2627 
2629  : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
2630  : "b" (expected_s), "b" (desired_s)
2631  : "cr0"
2632  );
2633  if (success)
2634  ppc_fence_after(success_order);
2635  else
2636  ppc_fence_after(failure_order);
2637  memcpy(&expected, &expected_s, sizeof(value_type));
2638  return success;
2639  }
2640 
2641  bool
2642  is_lock_free(void) const volatile
2643  {
2644  return true;
2645  }
2646 
2648 private:
2649  base_atomic(const base_atomic &) /* = delete */ ;
2650  void operator=(const base_atomic &) /* = delete */ ;
2651  storage_type v_;
2652 };
2653 #endif
2654 
2655 }
2656 }
2657 
2658 #define BOOST_ATOMIC_THREAD_FENCE 2
2659 inline void
2661 {
2662  switch(order) {
2663  case memory_order_acquire:
2664  __asm__ __volatile__ ("isync" ::: "memory");
2665  break;
2666  case memory_order_release:
2667 #if defined(__powerpc64__)
2668  __asm__ __volatile__ ("lwsync" ::: "memory");
2669  break;
2670 #endif
2671  case memory_order_acq_rel:
2672  case memory_order_seq_cst:
2673  __asm__ __volatile__ ("sync" ::: "memory");
2674  default:;
2675  }
2676 }
2677 
2678 #define BOOST_ATOMIC_SIGNAL_FENCE 2
2679 inline void
2681 {
2682  switch(order) {
2683  case memory_order_acquire:
2684  case memory_order_release:
2685  case memory_order_acq_rel:
2686  case memory_order_seq_cst:
2687  __asm__ __volatile__ ("" ::: "memory");
2688  break;
2689  default:;
2690  }
2691 }
2692 
2693 }
2694 
2695 #endif
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:418
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:2252
static void ppc_fence_after(memory_order order)
Definition: gcc-ppc.hpp:116
value_type fetch_sub(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:497
value_type fetch_sub(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:939
value_type fetch_and(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:516
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:2158
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order, memory_order) volatile
Definition: base.hpp:166
value_type fetch_or(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:313
value_type fetch_sub(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:276
Definition: base.hpp:116
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:593
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:227
static void atomic_signal_fence(memory_order)
Definition: gcc-armv6+.hpp:193
static void atomic_thread_fence(memory_order order)
Definition: gcc-armv6+.hpp:179
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-ppc.hpp:384
value_type fetch_add(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:478
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:180
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:2456
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-ppc.hpp:1481
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:669
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:372
value_type fetch_and(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:958
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-ppc.hpp:2083
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:1516
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1034
static void ppc_fence_after_store(memory_order order)
Definition: gcc-ppc.hpp:130
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:2367
value_type fetch_and(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:737
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-ppc.hpp:605
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:1546
#define BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
Definition: base.hpp:107
value_type fetch_add(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:257
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1627
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:400
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:842
static void ppc_fence_before(memory_order order)
Definition: gcc-ppc.hpp:100
#define BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
Definition: gcc-ppc.hpp:79
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:2401
value_type fetch_xor(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:552
value_type fetch_or(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:976
value_type load(memory_order=memory_order_seq_cst) volatileconst
Definition: base.hpp:156
value_type fetch_and(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:295
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-ppc.hpp:826
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1598
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:1645
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:1106
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:448
value_type fetch_sub(difference_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1724
value_type fetch_sub(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1154
value_type fetch_or(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:534
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-ppc.hpp:164
#define BOOST_ATOMIC_DECLARE_BASE_OPERATORS
Definition: base.hpp:19
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:2421
void store(value_type v, memory_order=memory_order_seq_cst) volatile
Definition: base.hpp:148
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:2272
value_type fetch_or(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:755
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:2123
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:890
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:2069
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1058
value_type fetch_or(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1190
value_type fetch_xor(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1208
value_type fetch_xor(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:773
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:1076
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: base.hpp:184
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:814
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:860
value_type fetch_xor(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:994
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-ppc.hpp:2232
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:198
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:621
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:1675
bool compare_exchange_weak(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:639
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1469
value_type exchange(value_type v, memory_order=memory_order_seq_cst) volatile
Definition: base.hpp:194
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:152
#define BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
Definition: base.hpp:111
bool is_lock_free(void) const volatile
Definition: base.hpp:206
bool compare_exchange_strong(value_type &expected, value_type desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-ppc.hpp:2307
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-ppc.hpp:1610
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-ppc.hpp:2381
value_type fetch_add(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1136
value_type fetch_add(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:699
value_type fetch_add(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:920
value_type fetch_sub(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:718
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1498
value_type load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-ppc.hpp:1042
void store(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:2218
value_type fetch_add(difference_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1705
value_type exchange(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:2103
void operator=(const base_atomic &)
value_type fetch_and(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:1172
value_type fetch_xor(value_type v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-ppc.hpp:331


rosatomic
Author(s): Josh Faust
autogenerated on Fri Apr 5 2019 02:16:35