00001 #ifndef BOOST_DETAIL_ATOMIC_GCC_PPC_HPP
00002 #define BOOST_DETAIL_ATOMIC_GCC_PPC_HPP
00003
00004
00005
00006
00007
00008
00009
00010 #include <boost/atomic/detail/base.hpp>
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054 #define BOOST_ATOMIC_CHAR_LOCK_FREE 2
00055 #define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2
00056 #define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2
00057 #define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
00058 #define BOOST_ATOMIC_SHORT_LOCK_FREE 2
00059 #define BOOST_ATOMIC_INT_LOCK_FREE 2
00060 #define BOOST_ATOMIC_LONG_LOCK_FREE 2
00061 #define BOOST_ATOMIC_ADDRESS_LOCK_FREE 2
00062 #if defined(__powerpc64__)
00063 #define BOOST_ATOMIC_LLONG_LOCK_FREE 2
00064 #else
00065 #define BOOST_ATOMIC_LLONG_LOCK_FREE 0
00066 #endif
00067 #define BOOST_ATOMIC_BOOL_LOCK_FREE 2
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077 #if !defined(__APPLE__)
00078
00079 #define BOOST_ATOMIC_ASM_SLOWPATH_CLEAR \
00080 "1:\n" \
00081 ".subsection 2\n" \
00082 "2: addi %1,0,0\n" \
00083 "b 1b\n" \
00084 ".previous\n" \
00085
00086 #else
00087
00088 #define BOOST_ATOMIC_ASM_SLOWPATH_CLEAR \
00089 "b 1f\n" \
00090 "2: addi %1,0,0\n" \
00091 "1:\n" \
00092
00093 #endif
00094
00095 namespace boost {
00096 namespace detail {
00097 namespace atomic {
00098
00099 static inline void
00100 ppc_fence_before(memory_order order)
00101 {
00102 switch(order) {
00103 case memory_order_release:
00104 case memory_order_acq_rel:
00105 #if defined(__powerpc64__)
00106 __asm__ __volatile__ ("lwsync" ::: "memory");
00107 break;
00108 #endif
00109 case memory_order_seq_cst:
00110 __asm__ __volatile__ ("sync" ::: "memory");
00111 default:;
00112 }
00113 }
00114
00115 static inline void
00116 ppc_fence_after(memory_order order)
00117 {
00118 switch(order) {
00119 case memory_order_acquire:
00120 case memory_order_acq_rel:
00121 case memory_order_seq_cst:
00122 __asm__ __volatile__ ("isync");
00123 case memory_order_consume:
00124 __asm__ __volatile__ ("" ::: "memory");
00125 default:;
00126 }
00127 }
00128
00129 static inline void
00130 ppc_fence_after_store(memory_order order)
00131 {
00132 switch(order) {
00133 case memory_order_seq_cst:
00134 __asm__ __volatile__ ("sync");
00135 default:;
00136 }
00137 }
00138
00139
00140
00141 template<typename T>
00142 class base_atomic<T, int, 1, true> {
00143 typedef base_atomic this_type;
00144 typedef T value_type;
00145 typedef int32_t storage_type;
00146 typedef T difference_type;
00147 public:
00148 explicit base_atomic(value_type v) : v_(v) {}
00149 base_atomic(void) {}
00150
00151 void
00152 store(value_type v, memory_order order = memory_order_seq_cst) volatile
00153 {
00154 ppc_fence_before(order);
00155 __asm__ (
00156 "stw %1, %0\n"
00157 : "+m"(v_)
00158 : "r" (v)
00159 );
00160 ppc_fence_after_store(order);
00161 }
00162
00163 value_type
00164 load(memory_order order = memory_order_seq_cst) const volatile
00165 {
00166 value_type v;
00167 __asm__ __volatile__ (
00168 "lwz %0, %1\n"
00169 "cmpw %0, %0\n"
00170 "bne- 1f\n"
00171 "1:\n"
00172 : "=&r" (v)
00173 : "m" (v_)
00174 );
00175 ppc_fence_after(order);
00176 return v;
00177 }
00178
00179 value_type
00180 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
00181 {
00182 value_type original;
00183 ppc_fence_before(order);
00184 __asm__ (
00185 "1:\n"
00186 "lwarx %0,%y1\n"
00187 "stwcx. %2,%y1\n"
00188 "bne- 1b\n"
00189 : "=&b" (original), "+Z"(v_)
00190 : "b" (v)
00191 : "cr0"
00192 );
00193 ppc_fence_after(order);
00194 return original;
00195 }
00196
00197 bool
00198 compare_exchange_weak(
00199 value_type & expected,
00200 value_type desired,
00201 memory_order success_order,
00202 memory_order failure_order) volatile
00203 {
00204 int success;
00205 ppc_fence_before(success_order);
00206 __asm__(
00207 "lwarx %0,%y2\n"
00208 "cmpw %0, %3\n"
00209 "bne- 2f\n"
00210 "stwcx. %4,%y2\n"
00211 "bne- 2f\n"
00212 "addi %1,0,1\n"
00213 "1:"
00214 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
00215 : "=&b" (expected), "=&b" (success), "+Z"(v_)
00216 : "b" (expected), "b" (desired)
00217 : "cr0"
00218 );
00219 if (success)
00220 ppc_fence_after(success_order);
00221 else
00222 ppc_fence_after(failure_order);
00223 return success;
00224 }
00225
00226 bool
00227 compare_exchange_strong(
00228 value_type & expected,
00229 value_type desired,
00230 memory_order success_order,
00231 memory_order failure_order) volatile
00232 {
00233 int success;
00234 ppc_fence_before(success_order);
00235 __asm__(
00236 "0: lwarx %0,%y2\n"
00237 "cmpw %0, %3\n"
00238 "bne- 2f\n"
00239 "stwcx. %4,%y2\n"
00240 "bne- 0b\n"
00241 "addi %1,0,1\n"
00242 "1:"
00243
00244 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
00245 : "=&b" (expected), "=&b" (success), "+Z"(v_)
00246 : "b" (expected), "b" (desired)
00247 : "cr0"
00248 );
00249 if (success)
00250 ppc_fence_after(success_order);
00251 else
00252 ppc_fence_after(failure_order);
00253 return success;
00254 }
00255
00256 value_type
00257 fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
00258 {
00259 value_type original, tmp;
00260 ppc_fence_before(order);
00261 __asm__ (
00262 "1:\n"
00263 "lwarx %0,%y2\n"
00264 "add %1,%0,%3\n"
00265 "extsb %1, %1\n"
00266 "stwcx. %1,%y2\n"
00267 "bne- 1b\n"
00268 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00269 : "b" (v)
00270 : "cc");
00271 ppc_fence_after(order);
00272 return original;
00273 }
00274
00275 value_type
00276 fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
00277 {
00278 value_type original, tmp;
00279 ppc_fence_before(order);
00280 __asm__ (
00281 "1:\n"
00282 "lwarx %0,%y2\n"
00283 "sub %1,%0,%3\n"
00284 "extsb %1, %1\n"
00285 "stwcx. %1,%y2\n"
00286 "bne- 1b\n"
00287 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00288 : "b" (v)
00289 : "cc");
00290 ppc_fence_after(order);
00291 return original;
00292 }
00293
00294 value_type
00295 fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
00296 {
00297 value_type original, tmp;
00298 ppc_fence_before(order);
00299 __asm__ (
00300 "1:\n"
00301 "lwarx %0,%y2\n"
00302 "and %1,%0,%3\n"
00303 "stwcx. %1,%y2\n"
00304 "bne- 1b\n"
00305 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00306 : "b" (v)
00307 : "cc");
00308 ppc_fence_after(order);
00309 return original;
00310 }
00311
00312 value_type
00313 fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
00314 {
00315 value_type original, tmp;
00316 ppc_fence_before(order);
00317 __asm__ (
00318 "1:\n"
00319 "lwarx %0,%y2\n"
00320 "or %1,%0,%3\n"
00321 "stwcx. %1,%y2\n"
00322 "bne- 1b\n"
00323 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00324 : "b" (v)
00325 : "cc");
00326 ppc_fence_after(order);
00327 return original;
00328 }
00329
00330 value_type
00331 fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
00332 {
00333 value_type original, tmp;
00334 ppc_fence_before(order);
00335 __asm__ (
00336 "1:\n"
00337 "lwarx %0,%y2\n"
00338 "xor %1,%0,%3\n"
00339 "stwcx. %1,%y2\n"
00340 "bne- 1b\n"
00341 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00342 : "b" (v)
00343 : "cc");
00344 ppc_fence_after(order);
00345 return original;
00346 }
00347
00348 bool
00349 is_lock_free(void) const volatile
00350 {
00351 return true;
00352 }
00353
00354 BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
00355 private:
00356 base_atomic(const base_atomic &) ;
00357 void operator=(const base_atomic &) ;
00358 storage_type v_;
00359 };
00360
00361 template<typename T>
00362 class base_atomic<T, int, 1, false> {
00363 typedef base_atomic this_type;
00364 typedef T value_type;
00365 typedef uint32_t storage_type;
00366 typedef T difference_type;
00367 public:
00368 explicit base_atomic(value_type v) : v_(v) {}
00369 base_atomic(void) {}
00370
00371 void
00372 store(value_type v, memory_order order = memory_order_seq_cst) volatile
00373 {
00374 ppc_fence_before(order);
00375 __asm__ (
00376 "stw %1, %0\n"
00377 : "+m"(v_)
00378 : "r" (v)
00379 );
00380 ppc_fence_after_store(order);
00381 }
00382
00383 value_type
00384 load(memory_order order = memory_order_seq_cst) const volatile
00385 {
00386 value_type v;
00387 __asm__ __volatile__ (
00388 "lwz %0, %1\n"
00389 "cmpw %0, %0\n"
00390 "bne- 1f\n"
00391 "1:\n"
00392 : "=&r" (v)
00393 : "m" (v_)
00394 );
00395 ppc_fence_after(order);
00396 return v;
00397 }
00398
00399 value_type
00400 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
00401 {
00402 value_type original;
00403 ppc_fence_before(order);
00404 __asm__ (
00405 "1:\n"
00406 "lwarx %0,%y1\n"
00407 "stwcx. %2,%y1\n"
00408 "bne- 1b\n"
00409 : "=&b" (original), "+Z"(v_)
00410 : "b" (v)
00411 : "cr0"
00412 );
00413 ppc_fence_after(order);
00414 return original;
00415 }
00416
00417 bool
00418 compare_exchange_weak(
00419 value_type & expected,
00420 value_type desired,
00421 memory_order success_order,
00422 memory_order failure_order) volatile
00423 {
00424 int success;
00425 ppc_fence_before(success_order);
00426 __asm__(
00427 "lwarx %0,%y2\n"
00428 "cmpw %0, %3\n"
00429 "bne- 2f\n"
00430 "stwcx. %4,%y2\n"
00431 "bne- 2f\n"
00432 "addi %1,0,1\n"
00433 "1:"
00434
00435 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
00436 : "=&b" (expected), "=&b" (success), "+Z"(v_)
00437 : "b" (expected), "b" (desired)
00438 : "cr0"
00439 );
00440 if (success)
00441 ppc_fence_after(success_order);
00442 else
00443 ppc_fence_after(failure_order);
00444 return success;
00445 }
00446
00447 bool
00448 compare_exchange_strong(
00449 value_type & expected,
00450 value_type desired,
00451 memory_order success_order,
00452 memory_order failure_order) volatile
00453 {
00454 int success;
00455 ppc_fence_before(success_order);
00456 __asm__(
00457 "0: lwarx %0,%y2\n"
00458 "cmpw %0, %3\n"
00459 "bne- 2f\n"
00460 "stwcx. %4,%y2\n"
00461 "bne- 0b\n"
00462 "addi %1,0,1\n"
00463 "1:"
00464
00465 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
00466 : "=&b" (expected), "=&b" (success), "+Z"(v_)
00467 : "b" (expected), "b" (desired)
00468 : "cr0"
00469 );
00470 if (success)
00471 ppc_fence_after(success_order);
00472 else
00473 ppc_fence_after(failure_order);
00474 return success;
00475 }
00476
00477 value_type
00478 fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
00479 {
00480 value_type original, tmp;
00481 ppc_fence_before(order);
00482 __asm__ (
00483 "1:\n"
00484 "lwarx %0,%y2\n"
00485 "add %1,%0,%3\n"
00486 "rlwinm %1, %1, 0, 0xff\n"
00487 "stwcx. %1,%y2\n"
00488 "bne- 1b\n"
00489 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00490 : "b" (v)
00491 : "cc");
00492 ppc_fence_after(order);
00493 return original;
00494 }
00495
00496 value_type
00497 fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
00498 {
00499 value_type original, tmp;
00500 ppc_fence_before(order);
00501 __asm__ (
00502 "1:\n"
00503 "lwarx %0,%y2\n"
00504 "sub %1,%0,%3\n"
00505 "rlwinm %1, %1, 0, 0xff\n"
00506 "stwcx. %1,%y2\n"
00507 "bne- 1b\n"
00508 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00509 : "b" (v)
00510 : "cc");
00511 ppc_fence_after(order);
00512 return original;
00513 }
00514
00515 value_type
00516 fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
00517 {
00518 value_type original, tmp;
00519 ppc_fence_before(order);
00520 __asm__ (
00521 "1:\n"
00522 "lwarx %0,%y2\n"
00523 "and %1,%0,%3\n"
00524 "stwcx. %1,%y2\n"
00525 "bne- 1b\n"
00526 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00527 : "b" (v)
00528 : "cc");
00529 ppc_fence_after(order);
00530 return original;
00531 }
00532
00533 value_type
00534 fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
00535 {
00536 value_type original, tmp;
00537 ppc_fence_before(order);
00538 __asm__ (
00539 "1:\n"
00540 "lwarx %0,%y2\n"
00541 "or %1,%0,%3\n"
00542 "stwcx. %1,%y2\n"
00543 "bne- 1b\n"
00544 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00545 : "b" (v)
00546 : "cc");
00547 ppc_fence_after(order);
00548 return original;
00549 }
00550
00551 value_type
00552 fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
00553 {
00554 value_type original, tmp;
00555 ppc_fence_before(order);
00556 __asm__ (
00557 "1:\n"
00558 "lwarx %0,%y2\n"
00559 "xor %1,%0,%3\n"
00560 "stwcx. %1,%y2\n"
00561 "bne- 1b\n"
00562 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00563 : "b" (v)
00564 : "cc");
00565 ppc_fence_after(order);
00566 return original;
00567 }
00568
00569 bool
00570 is_lock_free(void) const volatile
00571 {
00572 return true;
00573 }
00574
00575 BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
00576 private:
00577 base_atomic(const base_atomic &) ;
00578 void operator=(const base_atomic &) ;
00579 storage_type v_;
00580 };
00581
00582 template<typename T>
00583 class base_atomic<T, int, 2, true> {
00584 typedef base_atomic this_type;
00585 typedef T value_type;
00586 typedef int32_t storage_type;
00587 typedef T difference_type;
00588 public:
00589 explicit base_atomic(value_type v) : v_(v) {}
00590 base_atomic(void) {}
00591
00592 void
00593 store(value_type v, memory_order order = memory_order_seq_cst) volatile
00594 {
00595 ppc_fence_before(order);
00596 __asm__ (
00597 "stw %1, %0\n"
00598 : "+m"(v_)
00599 : "r" (v)
00600 );
00601 ppc_fence_after_store(order);
00602 }
00603
00604 value_type
00605 load(memory_order order = memory_order_seq_cst) const volatile
00606 {
00607 value_type v;
00608 __asm__ __volatile__ (
00609 "lwz %0, %1\n"
00610 "cmpw %0, %0\n"
00611 "bne- 1f\n"
00612 "1:\n"
00613 : "=&r" (v)
00614 : "m" (v_)
00615 );
00616 ppc_fence_after(order);
00617 return v;
00618 }
00619
00620 value_type
00621 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
00622 {
00623 value_type original;
00624 ppc_fence_before(order);
00625 __asm__ (
00626 "1:\n"
00627 "lwarx %0,%y1\n"
00628 "stwcx. %2,%y1\n"
00629 "bne- 1b\n"
00630 : "=&b" (original), "+Z"(v_)
00631 : "b" (v)
00632 : "cr0"
00633 );
00634 ppc_fence_after(order);
00635 return original;
00636 }
00637
00638 bool
00639 compare_exchange_weak(
00640 value_type & expected,
00641 value_type desired,
00642 memory_order success_order,
00643 memory_order failure_order) volatile
00644 {
00645 int success;
00646 ppc_fence_before(success_order);
00647 __asm__(
00648 "lwarx %0,%y2\n"
00649 "cmpw %0, %3\n"
00650 "bne- 2f\n"
00651 "stwcx. %4,%y2\n"
00652 "bne- 2f\n"
00653 "addi %1,0,1\n"
00654 "1:"
00655
00656 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
00657 : "=&b" (expected), "=&b" (success), "+Z"(v_)
00658 : "b" (expected), "b" (desired)
00659 : "cr0"
00660 );
00661 if (success)
00662 ppc_fence_after(success_order);
00663 else
00664 ppc_fence_after(failure_order);
00665 return success;
00666 }
00667
00668 bool
00669 compare_exchange_strong(
00670 value_type & expected,
00671 value_type desired,
00672 memory_order success_order,
00673 memory_order failure_order) volatile
00674 {
00675 int success;
00676 ppc_fence_before(success_order);
00677 __asm__(
00678 "0: lwarx %0,%y2\n"
00679 "cmpw %0, %3\n"
00680 "bne- 2f\n"
00681 "stwcx. %4,%y2\n"
00682 "bne- 0b\n"
00683 "addi %1,0,1\n"
00684 "1:"
00685
00686 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
00687 : "=&b" (expected), "=&b" (success), "+Z"(v_)
00688 : "b" (expected), "b" (desired)
00689 : "cr0"
00690 );
00691 if (success)
00692 ppc_fence_after(success_order);
00693 else
00694 ppc_fence_after(failure_order);
00695 return success;
00696 }
00697
00698 value_type
00699 fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
00700 {
00701 value_type original, tmp;
00702 ppc_fence_before(order);
00703 __asm__ (
00704 "1:\n"
00705 "lwarx %0,%y2\n"
00706 "add %1,%0,%3\n"
00707 "extsh %1, %1\n"
00708 "stwcx. %1,%y2\n"
00709 "bne- 1b\n"
00710 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00711 : "b" (v)
00712 : "cc");
00713 ppc_fence_after(order);
00714 return original;
00715 }
00716
00717 value_type
00718 fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
00719 {
00720 value_type original, tmp;
00721 ppc_fence_before(order);
00722 __asm__ (
00723 "1:\n"
00724 "lwarx %0,%y2\n"
00725 "sub %1,%0,%3\n"
00726 "extsh %1, %1\n"
00727 "stwcx. %1,%y2\n"
00728 "bne- 1b\n"
00729 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00730 : "b" (v)
00731 : "cc");
00732 ppc_fence_after(order);
00733 return original;
00734 }
00735
00736 value_type
00737 fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
00738 {
00739 value_type original, tmp;
00740 ppc_fence_before(order);
00741 __asm__ (
00742 "1:\n"
00743 "lwarx %0,%y2\n"
00744 "and %1,%0,%3\n"
00745 "stwcx. %1,%y2\n"
00746 "bne- 1b\n"
00747 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00748 : "b" (v)
00749 : "cc");
00750 ppc_fence_after(order);
00751 return original;
00752 }
00753
00754 value_type
00755 fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
00756 {
00757 value_type original, tmp;
00758 ppc_fence_before(order);
00759 __asm__ (
00760 "1:\n"
00761 "lwarx %0,%y2\n"
00762 "or %1,%0,%3\n"
00763 "stwcx. %1,%y2\n"
00764 "bne- 1b\n"
00765 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00766 : "b" (v)
00767 : "cc");
00768 ppc_fence_after(order);
00769 return original;
00770 }
00771
00772 value_type
00773 fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
00774 {
00775 value_type original, tmp;
00776 ppc_fence_before(order);
00777 __asm__ (
00778 "1:\n"
00779 "lwarx %0,%y2\n"
00780 "xor %1,%0,%3\n"
00781 "stwcx. %1,%y2\n"
00782 "bne- 1b\n"
00783 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00784 : "b" (v)
00785 : "cc");
00786 ppc_fence_after(order);
00787 return original;
00788 }
00789
00790 bool
00791 is_lock_free(void) const volatile
00792 {
00793 return true;
00794 }
00795
00796 BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
00797 private:
00798 base_atomic(const base_atomic &) ;
00799 void operator=(const base_atomic &) ;
00800 storage_type v_;
00801 };
00802
00803 template<typename T>
00804 class base_atomic<T, int, 2, false> {
00805 typedef base_atomic this_type;
00806 typedef T value_type;
00807 typedef uint32_t storage_type;
00808 typedef T difference_type;
00809 public:
00810 explicit base_atomic(value_type v) : v_(v) {}
00811 base_atomic(void) {}
00812
00813 void
00814 store(value_type v, memory_order order = memory_order_seq_cst) volatile
00815 {
00816 ppc_fence_before(order);
00817 __asm__ (
00818 "stw %1, %0\n"
00819 : "+m"(v_)
00820 : "r" (v)
00821 );
00822 ppc_fence_after_store(order);
00823 }
00824
00825 value_type
00826 load(memory_order order = memory_order_seq_cst) const volatile
00827 {
00828 value_type v;
00829 __asm__ __volatile__ (
00830 "lwz %0, %1\n"
00831 "cmpw %0, %0\n"
00832 "bne- 1f\n"
00833 "1:\n"
00834 : "=&r" (v)
00835 : "m" (v_)
00836 );
00837 ppc_fence_after(order);
00838 return v;
00839 }
00840
00841 value_type
00842 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
00843 {
00844 value_type original;
00845 ppc_fence_before(order);
00846 __asm__ (
00847 "1:\n"
00848 "lwarx %0,%y1\n"
00849 "stwcx. %2,%y1\n"
00850 "bne- 1b\n"
00851 : "=&b" (original), "+Z"(v_)
00852 : "b" (v)
00853 : "cr0"
00854 );
00855 ppc_fence_after(order);
00856 return original;
00857 }
00858
00859 bool
00860 compare_exchange_weak(
00861 value_type & expected,
00862 value_type desired,
00863 memory_order success_order,
00864 memory_order failure_order) volatile
00865 {
00866 int success;
00867 ppc_fence_before(success_order);
00868 __asm__(
00869 "lwarx %0,%y2\n"
00870 "cmpw %0, %3\n"
00871 "bne- 2f\n"
00872 "stwcx. %4,%y2\n"
00873 "bne- 2f\n"
00874 "addi %1,0,1\n"
00875 "1:"
00876
00877 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
00878 : "=&b" (expected), "=&b" (success), "+Z"(v_)
00879 : "b" (expected), "b" (desired)
00880 : "cr0"
00881 );
00882 if (success)
00883 ppc_fence_after(success_order);
00884 else
00885 ppc_fence_after(failure_order);
00886 return success;
00887 }
00888
00889 bool
00890 compare_exchange_strong(
00891 value_type & expected,
00892 value_type desired,
00893 memory_order success_order,
00894 memory_order failure_order) volatile
00895 {
00896 int success;
00897 ppc_fence_before(success_order);
00898 __asm__(
00899 "0: lwarx %0,%y2\n"
00900 "cmpw %0, %3\n"
00901 "bne- 2f\n"
00902 "stwcx. %4,%y2\n"
00903 "bne- 0b\n"
00904 "addi %1,0,1\n"
00905 "1:"
00906
00907 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
00908 : "=&b" (expected), "=&b" (success), "+Z"(v_)
00909 : "b" (expected), "b" (desired)
00910 : "cr0"
00911 );
00912 if (success)
00913 ppc_fence_after(success_order);
00914 else
00915 ppc_fence_after(failure_order);
00916 return success;
00917 }
00918
00919 value_type
00920 fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
00921 {
00922 value_type original, tmp;
00923 ppc_fence_before(order);
00924 __asm__ (
00925 "1:\n"
00926 "lwarx %0,%y2\n"
00927 "add %1,%0,%3\n"
00928 "rlwinm %1, %1, 0, 0xffff\n"
00929 "stwcx. %1,%y2\n"
00930 "bne- 1b\n"
00931 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00932 : "b" (v)
00933 : "cc");
00934 ppc_fence_after(order);
00935 return original;
00936 }
00937
00938 value_type
00939 fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
00940 {
00941 value_type original, tmp;
00942 ppc_fence_before(order);
00943 __asm__ (
00944 "1:\n"
00945 "lwarx %0,%y2\n"
00946 "sub %1,%0,%3\n"
00947 "rlwinm %1, %1, 0, 0xffff\n"
00948 "stwcx. %1,%y2\n"
00949 "bne- 1b\n"
00950 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00951 : "b" (v)
00952 : "cc");
00953 ppc_fence_after(order);
00954 return original;
00955 }
00956
00957 value_type
00958 fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
00959 {
00960 value_type original, tmp;
00961 ppc_fence_before(order);
00962 __asm__ (
00963 "1:\n"
00964 "lwarx %0,%y2\n"
00965 "and %1,%0,%3\n"
00966 "stwcx. %1,%y2\n"
00967 "bne- 1b\n"
00968 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00969 : "b" (v)
00970 : "cc");
00971 ppc_fence_after(order);
00972 return original;
00973 }
00974
00975 value_type
00976 fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
00977 {
00978 value_type original, tmp;
00979 ppc_fence_before(order);
00980 __asm__ (
00981 "1:\n"
00982 "lwarx %0,%y2\n"
00983 "or %1,%0,%3\n"
00984 "stwcx. %1,%y2\n"
00985 "bne- 1b\n"
00986 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
00987 : "b" (v)
00988 : "cc");
00989 ppc_fence_after(order);
00990 return original;
00991 }
00992
00993 value_type
00994 fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
00995 {
00996 value_type original, tmp;
00997 ppc_fence_before(order);
00998 __asm__ (
00999 "1:\n"
01000 "lwarx %0,%y2\n"
01001 "xor %1,%0,%3\n"
01002 "stwcx. %1,%y2\n"
01003 "bne- 1b\n"
01004 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01005 : "b" (v)
01006 : "cc");
01007 ppc_fence_after(order);
01008 return original;
01009 }
01010
01011 bool
01012 is_lock_free(void) const volatile
01013 {
01014 return true;
01015 }
01016
01017 BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
01018 private:
01019 base_atomic(const base_atomic &) ;
01020 void operator=(const base_atomic &) ;
01021 storage_type v_;
01022 };
01023
01024 template<typename T, bool Sign>
01025 class base_atomic<T, int, 4, Sign> {
01026 typedef base_atomic this_type;
01027 typedef T value_type;
01028 typedef T difference_type;
01029 public:
01030 explicit base_atomic(value_type v) : v_(v) {}
01031 base_atomic(void) {}
01032
01033 void
01034 store(value_type v, memory_order order = memory_order_seq_cst) volatile
01035 {
01036 ppc_fence_before(order);
01037 const_cast<volatile value_type &>(v_) = v;
01038 ppc_fence_after_store(order);
01039 }
01040
01041 value_type
01042 load(memory_order order = memory_order_seq_cst) const volatile
01043 {
01044 value_type v = const_cast<const volatile value_type &>(v_);
01045 __asm__ __volatile__ (
01046 "cmpw %0, %0\n"
01047 "bne- 1f\n"
01048 "1:\n"
01049 : "+b"(v)
01050 :
01051 : "cr0"
01052 );
01053 ppc_fence_after(order);
01054 return v;
01055 }
01056
01057 value_type
01058 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
01059 {
01060 value_type original;
01061 ppc_fence_before(order);
01062 __asm__ (
01063 "1:\n"
01064 "lwarx %0,%y1\n"
01065 "stwcx. %2,%y1\n"
01066 "bne- 1b\n"
01067 : "=&b" (original), "+Z"(v_)
01068 : "b" (v)
01069 : "cr0"
01070 );
01071 ppc_fence_after(order);
01072 return original;
01073 }
01074
01075 bool
01076 compare_exchange_weak(
01077 value_type & expected,
01078 value_type desired,
01079 memory_order success_order,
01080 memory_order failure_order) volatile
01081 {
01082 int success;
01083 ppc_fence_before(success_order);
01084 __asm__(
01085 "lwarx %0,%y2\n"
01086 "cmpw %0, %3\n"
01087 "bne- 2f\n"
01088 "stwcx. %4,%y2\n"
01089 "bne- 2f\n"
01090 "addi %1,0,1\n"
01091 "1:"
01092
01093 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
01094 : "=&b" (expected), "=&b" (success), "+Z"(v_)
01095 : "b" (expected), "b" (desired)
01096 : "cr0"
01097 );
01098 if (success)
01099 ppc_fence_after(success_order);
01100 else
01101 ppc_fence_after(failure_order);
01102 return success;
01103 }
01104
01105 bool
01106 compare_exchange_strong(
01107 value_type & expected,
01108 value_type desired,
01109 memory_order success_order,
01110 memory_order failure_order) volatile
01111 {
01112 int success;
01113 ppc_fence_before(success_order);
01114 __asm__(
01115 "0: lwarx %0,%y2\n"
01116 "cmpw %0, %3\n"
01117 "bne- 2f\n"
01118 "stwcx. %4,%y2\n"
01119 "bne- 0b\n"
01120 "addi %1,0,1\n"
01121 "1:"
01122
01123 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
01124 : "=&b" (expected), "=&b" (success), "+Z"(v_)
01125 : "b" (expected), "b" (desired)
01126 : "cr0"
01127 );
01128 if (success)
01129 ppc_fence_after(success_order);
01130 else
01131 ppc_fence_after(failure_order);
01132 return success;
01133 }
01134
01135 value_type
01136 fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
01137 {
01138 value_type original, tmp;
01139 ppc_fence_before(order);
01140 __asm__ (
01141 "1:\n"
01142 "lwarx %0,%y2\n"
01143 "add %1,%0,%3\n"
01144 "stwcx. %1,%y2\n"
01145 "bne- 1b\n"
01146 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01147 : "b" (v)
01148 : "cc");
01149 ppc_fence_after(order);
01150 return original;
01151 }
01152
01153 value_type
01154 fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
01155 {
01156 value_type original, tmp;
01157 ppc_fence_before(order);
01158 __asm__ (
01159 "1:\n"
01160 "lwarx %0,%y2\n"
01161 "sub %1,%0,%3\n"
01162 "stwcx. %1,%y2\n"
01163 "bne- 1b\n"
01164 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01165 : "b" (v)
01166 : "cc");
01167 ppc_fence_after(order);
01168 return original;
01169 }
01170
01171 value_type
01172 fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
01173 {
01174 value_type original, tmp;
01175 ppc_fence_before(order);
01176 __asm__ (
01177 "1:\n"
01178 "lwarx %0,%y2\n"
01179 "and %1,%0,%3\n"
01180 "stwcx. %1,%y2\n"
01181 "bne- 1b\n"
01182 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01183 : "b" (v)
01184 : "cc");
01185 ppc_fence_after(order);
01186 return original;
01187 }
01188
01189 value_type
01190 fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
01191 {
01192 value_type original, tmp;
01193 ppc_fence_before(order);
01194 __asm__ (
01195 "1:\n"
01196 "lwarx %0,%y2\n"
01197 "or %1,%0,%3\n"
01198 "stwcx. %1,%y2\n"
01199 "bne- 1b\n"
01200 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01201 : "b" (v)
01202 : "cc");
01203 ppc_fence_after(order);
01204 return original;
01205 }
01206
01207 value_type
01208 fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
01209 {
01210 value_type original, tmp;
01211 ppc_fence_before(order);
01212 __asm__ (
01213 "1:\n"
01214 "lwarx %0,%y2\n"
01215 "xor %1,%0,%3\n"
01216 "stwcx. %1,%y2\n"
01217 "bne- 1b\n"
01218 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01219 : "b" (v)
01220 : "cc");
01221 ppc_fence_after(order);
01222 return original;
01223 }
01224
01225 bool
01226 is_lock_free(void) const volatile
01227 {
01228 return true;
01229 }
01230
01231 BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
01232 private:
01233 base_atomic(const base_atomic &) ;
01234 void operator=(const base_atomic &) ;
01235 value_type v_;
01236 };
01237
01238 #if defined(__powerpc64__)
01239
01240 template<typename T, bool Sign>
01241 class base_atomic<T, int, 8, Sign> {
01242 typedef base_atomic this_type;
01243 typedef T value_type;
01244 typedef T difference_type;
01245 public:
01246 explicit base_atomic(value_type v) : v_(v) {}
01247 base_atomic(void) {}
01248
01249 void
01250 store(value_type v, memory_order order = memory_order_seq_cst) volatile
01251 {
01252 ppc_fence_before(order);
01253 const_cast<volatile value_type &>(v_) = v;
01254 ppc_fence_after_store(order);
01255 }
01256
01257 value_type
01258 load(memory_order order = memory_order_seq_cst) const volatile
01259 {
01260 value_type v = const_cast<const volatile value_type &>(v_);
01261 __asm__ __volatile__ (
01262 "cmpd %0, %0\n"
01263 "bne- 1f\n"
01264 "1:\n"
01265 : "+b"(v)
01266 :
01267 : "cr0"
01268 );
01269 ppc_fence_after(order);
01270 return v;
01271 }
01272
01273 value_type
01274 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
01275 {
01276 value_type original;
01277 ppc_fence_before(order);
01278 __asm__ (
01279 "1:\n"
01280 "ldarx %0,%y1\n"
01281 "stdcx. %2,%y1\n"
01282 "bne- 1b\n"
01283 : "=&b" (original), "+Z"(v_)
01284 : "b" (v)
01285 : "cr0"
01286 );
01287 ppc_fence_after(order);
01288 return original;
01289 }
01290
01291 bool
01292 compare_exchange_weak(
01293 value_type & expected,
01294 value_type desired,
01295 memory_order success_order,
01296 memory_order failure_order) volatile
01297 {
01298 int success;
01299 ppc_fence_before(success_order);
01300 __asm__(
01301 "ldarx %0,%y2\n"
01302 "cmpd %0, %3\n"
01303 "bne- 2f\n"
01304 "stdcx. %4,%y2\n"
01305 "bne- 2f\n"
01306 "addi %1,0,1\n"
01307 "1:"
01308
01309 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
01310 : "=&b" (expected), "=&b" (success), "+Z"(v_)
01311 : "b" (expected), "b" (desired)
01312 : "cr0"
01313 );
01314 if (success)
01315 ppc_fence_after(success_order);
01316 else
01317 ppc_fence_after(failure_order);
01318 return success;
01319 }
01320
01321 bool
01322 compare_exchange_strong(
01323 value_type & expected,
01324 value_type desired,
01325 memory_order success_order,
01326 memory_order failure_order) volatile
01327 {
01328 int success;
01329 ppc_fence_before(success_order);
01330 __asm__(
01331 "0: ldarx %0,%y2\n"
01332 "cmpd %0, %3\n"
01333 "bne- 2f\n"
01334 "stdcx. %4,%y2\n"
01335 "bne- 0b\n"
01336 "addi %1,0,1\n"
01337 "1:"
01338
01339 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
01340 : "=&b" (expected), "=&b" (success), "+Z"(v_)
01341 : "b" (expected), "b" (desired)
01342 : "cr0"
01343 );
01344 if (success)
01345 ppc_fence_after(success_order);
01346 else
01347 ppc_fence_after(failure_order);
01348 return success;
01349 }
01350
01351 value_type
01352 fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
01353 {
01354 value_type original, tmp;
01355 ppc_fence_before(order);
01356 __asm__ (
01357 "1:\n"
01358 "ldarx %0,%y2\n"
01359 "add %1,%0,%3\n"
01360 "stdcx. %1,%y2\n"
01361 "bne- 1b\n"
01362 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01363 : "b" (v)
01364 : "cc");
01365 ppc_fence_after(order);
01366 return original;
01367 }
01368
01369 value_type
01370 fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
01371 {
01372 value_type original, tmp;
01373 ppc_fence_before(order);
01374 __asm__ (
01375 "1:\n"
01376 "ldarx %0,%y2\n"
01377 "sub %1,%0,%3\n"
01378 "stdcx. %1,%y2\n"
01379 "bne- 1b\n"
01380 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01381 : "b" (v)
01382 : "cc");
01383 ppc_fence_after(order);
01384 return original;
01385 }
01386
01387 value_type
01388 fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
01389 {
01390 value_type original, tmp;
01391 ppc_fence_before(order);
01392 __asm__ (
01393 "1:\n"
01394 "ldarx %0,%y2\n"
01395 "and %1,%0,%3\n"
01396 "stdcx. %1,%y2\n"
01397 "bne- 1b\n"
01398 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01399 : "b" (v)
01400 : "cc");
01401 ppc_fence_after(order);
01402 return original;
01403 }
01404
01405 value_type
01406 fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
01407 {
01408 value_type original, tmp;
01409 ppc_fence_before(order);
01410 __asm__ (
01411 "1:\n"
01412 "ldarx %0,%y2\n"
01413 "or %1,%0,%3\n"
01414 "stdcx. %1,%y2\n"
01415 "bne- 1b\n"
01416 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01417 : "b" (v)
01418 : "cc");
01419 ppc_fence_after(order);
01420 return original;
01421 }
01422
01423 value_type
01424 fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
01425 {
01426 value_type original, tmp;
01427 ppc_fence_before(order);
01428 __asm__ (
01429 "1:\n"
01430 "ldarx %0,%y2\n"
01431 "xor %1,%0,%3\n"
01432 "stdcx. %1,%y2\n"
01433 "bne- 1b\n"
01434 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01435 : "b" (v)
01436 : "cc");
01437 ppc_fence_after(order);
01438 return original;
01439 }
01440
01441 bool
01442 is_lock_free(void) const volatile
01443 {
01444 return true;
01445 }
01446
01447 BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
01448 private:
01449 base_atomic(const base_atomic &) ;
01450 void operator=(const base_atomic &) ;
01451 value_type v_;
01452 };
01453
01454 #endif
01455
01456
01457
01458 #if !defined(__powerpc64__)
01459
01460 template<bool Sign>
01461 class base_atomic<void *, void *, 4, Sign> {
01462 typedef base_atomic this_type;
01463 typedef void * value_type;
01464 public:
01465 explicit base_atomic(value_type v) : v_(v) {}
01466 base_atomic(void) {}
01467
01468 void
01469 store(value_type v, memory_order order = memory_order_seq_cst) volatile
01470 {
01471 ppc_fence_before(order);
01472 __asm__ (
01473 "stw %1, %0\n"
01474 : "+m" (v_)
01475 : "r" (v)
01476 );
01477 ppc_fence_after_store(order);
01478 }
01479
01480 value_type
01481 load(memory_order order = memory_order_seq_cst) const volatile
01482 {
01483 value_type v;
01484 __asm__ (
01485 "lwz %0, %1\n"
01486 "cmpw %0, %0\n"
01487 "bne- 1f\n"
01488 "1:\n"
01489 : "=r"(v)
01490 : "m"(v_)
01491 : "cr0"
01492 );
01493 ppc_fence_after(order);
01494 return v;
01495 }
01496
01497 value_type
01498 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
01499 {
01500 value_type original;
01501 ppc_fence_before(order);
01502 __asm__ (
01503 "1:\n"
01504 "lwarx %0,%y1\n"
01505 "stwcx. %2,%y1\n"
01506 "bne- 1b\n"
01507 : "=&b" (original), "+Z"(v_)
01508 : "b" (v)
01509 : "cr0"
01510 );
01511 ppc_fence_after(order);
01512 return original;
01513 }
01514
01515 bool
01516 compare_exchange_weak(
01517 value_type & expected,
01518 value_type desired,
01519 memory_order success_order,
01520 memory_order failure_order) volatile
01521 {
01522 int success;
01523 ppc_fence_before(success_order);
01524 __asm__(
01525 "lwarx %0,%y2\n"
01526 "cmpw %0, %3\n"
01527 "bne- 2f\n"
01528 "stwcx. %4,%y2\n"
01529 "bne- 2f\n"
01530 "addi %1,0,1\n"
01531 "1:"
01532
01533 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
01534 : "=&b" (expected), "=&b" (success), "+Z"(v_)
01535 : "b" (expected), "b" (desired)
01536 : "cr0"
01537 );
01538 if (success)
01539 ppc_fence_after(success_order);
01540 else
01541 ppc_fence_after(failure_order);
01542 return success;
01543 }
01544
01545 bool
01546 compare_exchange_strong(
01547 value_type & expected,
01548 value_type desired,
01549 memory_order success_order,
01550 memory_order failure_order) volatile
01551 {
01552 int success;
01553 ppc_fence_before(success_order);
01554 __asm__(
01555 "0: lwarx %0,%y2\n"
01556 "cmpw %0, %3\n"
01557 "bne- 2f\n"
01558 "stwcx. %4,%y2\n"
01559 "bne- 0b\n"
01560 "addi %1,0,1\n"
01561 "1:"
01562
01563 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
01564 : "=&b" (expected), "=&b" (success), "+Z"(v_)
01565 : "b" (expected), "b" (desired)
01566 : "cr0"
01567 );
01568 if (success)
01569 ppc_fence_after(success_order);
01570 else
01571 ppc_fence_after(failure_order);
01572 return success;
01573 }
01574
01575 bool
01576 is_lock_free(void) const volatile
01577 {
01578 return true;
01579 }
01580
01581 BOOST_ATOMIC_DECLARE_BASE_OPERATORS
01582 private:
01583 base_atomic(const base_atomic &) ;
01584 void operator=(const base_atomic &) ;
01585 value_type v_;
01586 };
01587
01588 template<typename T, bool Sign>
01589 class base_atomic<T *, void *, 4, Sign> {
01590 typedef base_atomic this_type;
01591 typedef T * value_type;
01592 typedef ptrdiff_t difference_type;
01593 public:
01594 explicit base_atomic(value_type v) : v_(v) {}
01595 base_atomic(void) {}
01596
01597 void
01598 store(value_type v, memory_order order = memory_order_seq_cst) volatile
01599 {
01600 ppc_fence_before(order);
01601 __asm__ (
01602 "stw %1, %0\n"
01603 : "+m" (v_)
01604 : "r" (v)
01605 );
01606 ppc_fence_after_store(order);
01607 }
01608
01609 value_type
01610 load(memory_order order = memory_order_seq_cst) const volatile
01611 {
01612 value_type v;
01613 __asm__ (
01614 "lwz %0, %1\n"
01615 "cmpw %0, %0\n"
01616 "bne- 1f\n"
01617 "1:\n"
01618 : "=r"(v)
01619 : "m"(v_)
01620 : "cr0"
01621 );
01622 ppc_fence_after(order);
01623 return v;
01624 }
01625
01626 value_type
01627 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
01628 {
01629 value_type original;
01630 ppc_fence_before(order);
01631 __asm__ (
01632 "1:\n"
01633 "lwarx %0,%y1\n"
01634 "stwcx. %2,%y1\n"
01635 "bne- 1b\n"
01636 : "=&b" (original), "+Z"(v_)
01637 : "b" (v)
01638 : "cr0"
01639 );
01640 ppc_fence_after(order);
01641 return original;
01642 }
01643
01644 bool
01645 compare_exchange_weak(
01646 value_type & expected,
01647 value_type desired,
01648 memory_order success_order,
01649 memory_order failure_order) volatile
01650 {
01651 int success;
01652 ppc_fence_before(success_order);
01653 __asm__(
01654 "lwarx %0,%y2\n"
01655 "cmpw %0, %3\n"
01656 "bne- 2f\n"
01657 "stwcx. %4,%y2\n"
01658 "bne- 2f\n"
01659 "addi %1,0,1\n"
01660 "1:"
01661
01662 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
01663 : "=&b" (expected), "=&b" (success), "+Z"(v_)
01664 : "b" (expected), "b" (desired)
01665 : "cr0"
01666 );
01667 if (success)
01668 ppc_fence_after(success_order);
01669 else
01670 ppc_fence_after(failure_order);
01671 return success;
01672 }
01673
01674 bool
01675 compare_exchange_strong(
01676 value_type & expected,
01677 value_type desired,
01678 memory_order success_order,
01679 memory_order failure_order) volatile
01680 {
01681 int success;
01682 ppc_fence_before(success_order);
01683 __asm__(
01684 "0: lwarx %0,%y2\n"
01685 "cmpw %0, %3\n"
01686 "bne- 2f\n"
01687 "stwcx. %4,%y2\n"
01688 "bne- 0b\n"
01689 "addi %1,0,1\n"
01690 "1:"
01691
01692 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
01693 : "=&b" (expected), "=&b" (success), "+Z"(v_)
01694 : "b" (expected), "b" (desired)
01695 : "cr0"
01696 );
01697 if (success)
01698 ppc_fence_after(success_order);
01699 else
01700 ppc_fence_after(failure_order);
01701 return success;
01702 }
01703
01704 value_type
01705 fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
01706 {
01707 v = v * sizeof(*v_);
01708 value_type original, tmp;
01709 ppc_fence_before(order);
01710 __asm__ (
01711 "1:\n"
01712 "lwarx %0,%y2\n"
01713 "add %1,%0,%3\n"
01714 "stwcx. %1,%y2\n"
01715 "bne- 1b\n"
01716 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01717 : "b" (v)
01718 : "cc");
01719 ppc_fence_after(order);
01720 return original;
01721 }
01722
01723 value_type
01724 fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
01725 {
01726 v = v * sizeof(*v_);
01727 value_type original, tmp;
01728 ppc_fence_before(order);
01729 __asm__ (
01730 "1:\n"
01731 "lwarx %0,%y2\n"
01732 "sub %1,%0,%3\n"
01733 "stwcx. %1,%y2\n"
01734 "bne- 1b\n"
01735 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
01736 : "b" (v)
01737 : "cc");
01738 ppc_fence_after(order);
01739 return original;
01740 }
01741
01742 bool
01743 is_lock_free(void) const volatile
01744 {
01745 return true;
01746 }
01747
01748 BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
01749 private:
01750 base_atomic(const base_atomic &) ;
01751 void operator=(const base_atomic &) ;
01752 value_type v_;
01753 };
01754
01755 #else
01756
01757 template<bool Sign>
01758 class base_atomic<void *, void *, 8, Sign> {
01759 typedef base_atomic this_type;
01760 typedef void * value_type;
01761 public:
01762 explicit base_atomic(value_type v) : v_(v) {}
01763 base_atomic(void) {}
01764
01765 void
01766 store(value_type v, memory_order order = memory_order_seq_cst) volatile
01767 {
01768 ppc_fence_before(order);
01769 __asm__ (
01770 "std %1, %0\n"
01771 : "+m" (v_)
01772 : "r" (v)
01773 );
01774 ppc_fence_after_store(order);
01775 }
01776
01777 value_type
01778 load(memory_order order = memory_order_seq_cst) const volatile
01779 {
01780 value_type v;
01781 __asm__ (
01782 "ld %0, %1\n"
01783 "cmpd %0, %0\n"
01784 "bne- 1f\n"
01785 "1:\n"
01786 : "=r"(v)
01787 : "m"(v_)
01788 : "cr0"
01789 );
01790 ppc_fence_after(order);
01791 return v;
01792 }
01793
01794 value_type
01795 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
01796 {
01797 value_type original;
01798 ppc_fence_before(order);
01799 __asm__ (
01800 "1:\n"
01801 "ldarx %0,%y1\n"
01802 "stdcx. %2,%y1\n"
01803 "bne- 1b\n"
01804 : "=&b" (original), "+Z"(v_)
01805 : "b" (v)
01806 : "cr0"
01807 );
01808 ppc_fence_after(order);
01809 return original;
01810 }
01811
01812 bool
01813 compare_exchange_weak(
01814 value_type & expected,
01815 value_type desired,
01816 memory_order success_order,
01817 memory_order failure_order) volatile
01818 {
01819 int success;
01820 ppc_fence_before(success_order);
01821 __asm__(
01822 "ldarx %0,%y2\n"
01823 "cmpd %0, %3\n"
01824 "bne- 2f\n"
01825 "stdcx. %4,%y2\n"
01826 "bne- 2f\n"
01827 "addi %1,0,1\n"
01828 "1:"
01829
01830 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
01831 : "=&b" (expected), "=&b" (success), "+Z"(v_)
01832 : "b" (expected), "b" (desired)
01833 : "cr0"
01834 );
01835 if (success)
01836 ppc_fence_after(success_order);
01837 else
01838 ppc_fence_after(failure_order);
01839 return success;
01840 }
01841
01842 bool
01843 compare_exchange_strong(
01844 value_type & expected,
01845 value_type desired,
01846 memory_order success_order,
01847 memory_order failure_order) volatile
01848 {
01849 int success;
01850 ppc_fence_before(success_order);
01851 __asm__(
01852 "0: ldarx %0,%y2\n"
01853 "cmpd %0, %3\n"
01854 "bne- 2f\n"
01855 "stdcx. %4,%y2\n"
01856 "bne- 0b\n"
01857 "addi %1,0,1\n"
01858 "1:"
01859
01860 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
01861 : "=&b" (expected), "=&b" (success), "+Z"(v_)
01862 : "b" (expected), "b" (desired)
01863 : "cr0"
01864 );
01865 if (success)
01866 ppc_fence_after(success_order);
01867 else
01868 ppc_fence_after(failure_order);
01869 return success;
01870 }
01871
01872 bool
01873 is_lock_free(void) const volatile
01874 {
01875 return true;
01876 }
01877
01878 BOOST_ATOMIC_DECLARE_BASE_OPERATORS
01879 private:
01880 base_atomic(const base_atomic &) ;
01881 void operator=(const base_atomic &) ;
01882 value_type v_;
01883 };
01884
01885 template<typename T, bool Sign>
01886 class base_atomic<T *, void *, 8, Sign> {
01887 typedef base_atomic this_type;
01888 typedef T * value_type;
01889 typedef ptrdiff_t difference_type;
01890 public:
01891 explicit base_atomic(value_type v) : v_(v) {}
01892 base_atomic(void) {}
01893
01894 void
01895 store(value_type v, memory_order order = memory_order_seq_cst) volatile
01896 {
01897 ppc_fence_before(order);
01898 __asm__ (
01899 "std %1, %0\n"
01900 : "+m" (v_)
01901 : "r" (v)
01902 );
01903 ppc_fence_after_store(order);
01904 }
01905
01906 value_type
01907 load(memory_order order = memory_order_seq_cst) const volatile
01908 {
01909 value_type v;
01910 __asm__ (
01911 "ld %0, %1\n"
01912 "cmpd %0, %0\n"
01913 "bne- 1f\n"
01914 "1:\n"
01915 : "=r"(v)
01916 : "m"(v_)
01917 : "cr0"
01918 );
01919 ppc_fence_after(order);
01920 return v;
01921 }
01922
01923 value_type
01924 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
01925 {
01926 value_type original;
01927 ppc_fence_before(order);
01928 __asm__ (
01929 "1:\n"
01930 "ldarx %0,%y1\n"
01931 "stdcx. %2,%y1\n"
01932 "bne- 1b\n"
01933 : "=&b" (original), "+Z"(v_)
01934 : "b" (v)
01935 : "cr0"
01936 );
01937 ppc_fence_after(order);
01938 return original;
01939 }
01940
01941 bool
01942 compare_exchange_weak(
01943 value_type & expected,
01944 value_type desired,
01945 memory_order success_order,
01946 memory_order failure_order) volatile
01947 {
01948 int success;
01949 ppc_fence_before(success_order);
01950 __asm__(
01951 "ldarx %0,%y2\n"
01952 "cmpd %0, %3\n"
01953 "bne- 2f\n"
01954 "stdcx. %4,%y2\n"
01955 "bne- 2f\n"
01956 "addi %1,0,1\n"
01957 "1:"
01958
01959 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
01960 : "=&b" (expected), "=&b" (success), "+Z"(v_)
01961 : "b" (expected), "b" (desired)
01962 : "cr0"
01963 );
01964 if (success)
01965 ppc_fence_after(success_order);
01966 else
01967 ppc_fence_after(failure_order);
01968 return success;
01969 }
01970
01971 bool
01972 compare_exchange_strong(
01973 value_type & expected,
01974 value_type desired,
01975 memory_order success_order,
01976 memory_order failure_order) volatile
01977 {
01978 int success;
01979 ppc_fence_before(success_order);
01980 __asm__(
01981 "0: ldarx %0,%y2\n"
01982 "cmpd %0, %3\n"
01983 "bne- 2f\n"
01984 "stdcx. %4,%y2\n"
01985 "bne- 0b\n"
01986 "addi %1,0,1\n"
01987 "1:"
01988
01989 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
01990 : "=&b" (expected), "=&b" (success), "+Z"(v_)
01991 : "b" (expected), "b" (desired)
01992 : "cr0"
01993 );
01994 if (success)
01995 ppc_fence_after(success_order);
01996 else
01997 ppc_fence_after(failure_order);
01998 return success;
01999 }
02000
02001 value_type
02002 fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
02003 {
02004 v = v * sizeof(*v_);
02005 value_type original, tmp;
02006 ppc_fence_before(order);
02007 __asm__ (
02008 "1:\n"
02009 "ldarx %0,%y2\n"
02010 "add %1,%0,%3\n"
02011 "stdcx. %1,%y2\n"
02012 "bne- 1b\n"
02013 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
02014 : "b" (v)
02015 : "cc");
02016 ppc_fence_after(order);
02017 return original;
02018 }
02019
02020 value_type
02021 fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
02022 {
02023 v = v * sizeof(*v_);
02024 value_type original, tmp;
02025 ppc_fence_before(order);
02026 __asm__ (
02027 "1:\n"
02028 "ldarx %0,%y2\n"
02029 "sub %1,%0,%3\n"
02030 "stdcx. %1,%y2\n"
02031 "bne- 1b\n"
02032 : "=&b" (original), "=&b" (tmp), "+Z"(v_)
02033 : "b" (v)
02034 : "cc");
02035 ppc_fence_after(order);
02036 return original;
02037 }
02038
02039 bool
02040 is_lock_free(void) const volatile
02041 {
02042 return true;
02043 }
02044
02045 BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
02046 private:
02047 base_atomic(const base_atomic &) ;
02048 void operator=(const base_atomic &) ;
02049 value_type v_;
02050 };
02051
02052 #endif
02053
02054
02055
02056 template<typename T, bool Sign>
02057 class base_atomic<T, void, 1, Sign> {
02058 typedef base_atomic this_type;
02059 typedef T value_type;
02060 typedef uint32_t storage_type;
02061 public:
02062 explicit base_atomic(value_type v) : v_(0)
02063 {
02064 memcpy(&v_, &v, sizeof(value_type));
02065 }
02066 base_atomic(void) : v_(0) {}
02067
02068 void
02069 store(value_type v, memory_order order = memory_order_seq_cst) volatile
02070 {
02071 storage_type tmp = 0;
02072 memcpy(&tmp, &v, sizeof(value_type));
02073 ppc_fence_before(order);
02074 __asm__ (
02075 "stw %1, %0\n"
02076 : "+m" (v_)
02077 : "r" (tmp)
02078 );
02079 ppc_fence_after_store(order);
02080 }
02081
02082 value_type
02083 load(memory_order order = memory_order_seq_cst) const volatile
02084 {
02085 storage_type tmp;
02086 __asm__ __volatile__ (
02087 "lwz %0, %1\n"
02088 "cmpw %0, %0\n"
02089 "bne- 1f\n"
02090 "1:\n"
02091 : "=r"(tmp)
02092 : "m"(v_)
02093 : "cr0"
02094 );
02095 ppc_fence_after(order);
02096
02097 value_type v;
02098 memcpy(&v, &tmp, sizeof(value_type));
02099 return v;
02100 }
02101
02102 value_type
02103 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
02104 {
02105 storage_type tmp = 0, original;
02106 memcpy(&tmp, &v, sizeof(value_type));
02107 ppc_fence_before(order);
02108 __asm__ (
02109 "1:\n"
02110 "lwarx %0,%y1\n"
02111 "stwcx. %2,%y1\n"
02112 "bne- 1b\n"
02113 : "=&b" (original), "+Z"(v_)
02114 : "b" (tmp)
02115 : "cr0"
02116 );
02117 ppc_fence_after(order);
02118 memcpy(&v, &original, sizeof(value_type));
02119 return v;
02120 }
02121
02122 bool
02123 compare_exchange_weak(
02124 value_type & expected,
02125 value_type desired,
02126 memory_order success_order,
02127 memory_order failure_order) volatile
02128 {
02129 storage_type expected_s = 0, desired_s = 0;
02130 memcpy(&expected_s, &expected, sizeof(value_type));
02131 memcpy(&desired_s, &desired, sizeof(value_type));
02132
02133 int success;
02134 ppc_fence_before(success_order);
02135 __asm__(
02136 "lwarx %0,%y2\n"
02137 "cmpw %0, %3\n"
02138 "bne- 2f\n"
02139 "stwcx. %4,%y2\n"
02140 "bne- 2f\n"
02141 "addi %1,0,1\n"
02142 "1:"
02143
02144 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
02145 : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
02146 : "b" (expected_s), "b" (desired_s)
02147 : "cr0"
02148 );
02149 if (success)
02150 ppc_fence_after(success_order);
02151 else
02152 ppc_fence_after(failure_order);
02153 memcpy(&expected, &expected_s, sizeof(value_type));
02154 return success;
02155 }
02156
02157 bool
02158 compare_exchange_strong(
02159 value_type & expected,
02160 value_type desired,
02161 memory_order success_order,
02162 memory_order failure_order) volatile
02163 {
02164 storage_type expected_s = 0, desired_s = 0;
02165 memcpy(&expected_s, &expected, sizeof(value_type));
02166 memcpy(&desired_s, &desired, sizeof(value_type));
02167
02168 int success;
02169 ppc_fence_before(success_order);
02170 __asm__(
02171 "0: lwarx %0,%y2\n"
02172 "cmpw %0, %3\n"
02173 "bne- 2f\n"
02174 "stwcx. %4,%y2\n"
02175 "bne- 0b\n"
02176 "addi %1,0,1\n"
02177 "1:"
02178
02179 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
02180 : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
02181 : "b" (expected_s), "b" (desired_s)
02182 : "cr0"
02183 );
02184 if (success)
02185 ppc_fence_after(success_order);
02186 else
02187 ppc_fence_after(failure_order);
02188 memcpy(&expected, &expected_s, sizeof(value_type));
02189 return success;
02190 }
02191
02192 bool
02193 is_lock_free(void) const volatile
02194 {
02195 return true;
02196 }
02197
02198 BOOST_ATOMIC_DECLARE_BASE_OPERATORS
02199 private:
02200 base_atomic(const base_atomic &) ;
02201 void operator=(const base_atomic &) ;
02202 storage_type v_;
02203 };
02204
02205 template<typename T, bool Sign>
02206 class base_atomic<T, void, 2, Sign> {
02207 typedef base_atomic this_type;
02208 typedef T value_type;
02209 typedef uint32_t storage_type;
02210 public:
02211 explicit base_atomic(value_type v) : v_(0)
02212 {
02213 memcpy(&v_, &v, sizeof(value_type));
02214 }
02215 base_atomic(void) : v_(0) {}
02216
02217 void
02218 store(value_type v, memory_order order = memory_order_seq_cst) volatile
02219 {
02220 storage_type tmp = 0;
02221 memcpy(&tmp, &v, sizeof(value_type));
02222 ppc_fence_before(order);
02223 __asm__ (
02224 "stw %1, %0\n"
02225 : "+m" (v_)
02226 : "r" (tmp)
02227 );
02228 ppc_fence_after_store(order);
02229 }
02230
02231 value_type
02232 load(memory_order order = memory_order_seq_cst) const volatile
02233 {
02234 storage_type tmp;
02235 __asm__ __volatile__ (
02236 "lwz %0, %1\n"
02237 "cmpw %0, %0\n"
02238 "bne- 1f\n"
02239 "1:\n"
02240 : "=r"(tmp)
02241 : "m"(v_)
02242 : "cr0"
02243 );
02244 ppc_fence_after(order);
02245
02246 value_type v;
02247 memcpy(&v, &tmp, sizeof(value_type));
02248 return v;
02249 }
02250
02251 value_type
02252 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
02253 {
02254 storage_type tmp = 0, original;
02255 memcpy(&tmp, &v, sizeof(value_type));
02256 ppc_fence_before(order);
02257 __asm__ (
02258 "1:\n"
02259 "lwarx %0,%y1\n"
02260 "stwcx. %2,%y1\n"
02261 "bne- 1b\n"
02262 : "=&b" (original), "+Z"(v_)
02263 : "b" (tmp)
02264 : "cr0"
02265 );
02266 ppc_fence_after(order);
02267 memcpy(&v, &original, sizeof(value_type));
02268 return v;
02269 }
02270
02271 bool
02272 compare_exchange_weak(
02273 value_type & expected,
02274 value_type desired,
02275 memory_order success_order,
02276 memory_order failure_order) volatile
02277 {
02278 storage_type expected_s = 0, desired_s = 0;
02279 memcpy(&expected_s, &expected, sizeof(value_type));
02280 memcpy(&desired_s, &desired, sizeof(value_type));
02281
02282 int success;
02283 ppc_fence_before(success_order);
02284 __asm__(
02285 "lwarx %0,%y2\n"
02286 "cmpw %0, %3\n"
02287 "bne- 2f\n"
02288 "stwcx. %4,%y2\n"
02289 "bne- 2f\n"
02290 "addi %1,0,1\n"
02291 "1:"
02292
02293 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
02294 : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
02295 : "b" (expected_s), "b" (desired_s)
02296 : "cr0"
02297 );
02298 if (success)
02299 ppc_fence_after(success_order);
02300 else
02301 ppc_fence_after(failure_order);
02302 memcpy(&expected, &expected_s, sizeof(value_type));
02303 return success;
02304 }
02305
02306 bool
02307 compare_exchange_strong(
02308 value_type & expected,
02309 value_type desired,
02310 memory_order success_order,
02311 memory_order failure_order) volatile
02312 {
02313 storage_type expected_s = 0, desired_s = 0;
02314 memcpy(&expected_s, &expected, sizeof(value_type));
02315 memcpy(&desired_s, &desired, sizeof(value_type));
02316
02317 int success;
02318 ppc_fence_before(success_order);
02319 __asm__(
02320 "0: lwarx %0,%y2\n"
02321 "cmpw %0, %3\n"
02322 "bne- 2f\n"
02323 "stwcx. %4,%y2\n"
02324 "bne- 0b\n"
02325 "addi %1,0,1\n"
02326 "1:"
02327
02328 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
02329 : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
02330 : "b" (expected_s), "b" (desired_s)
02331 : "cr0"
02332 );
02333 if (success)
02334 ppc_fence_after(success_order);
02335 else
02336 ppc_fence_after(failure_order);
02337 memcpy(&expected, &expected_s, sizeof(value_type));
02338 return success;
02339 }
02340
02341 bool
02342 is_lock_free(void) const volatile
02343 {
02344 return true;
02345 }
02346
02347 BOOST_ATOMIC_DECLARE_BASE_OPERATORS
02348 private:
02349 base_atomic(const base_atomic &) ;
02350 void operator=(const base_atomic &) ;
02351 storage_type v_;
02352 };
02353
02354 template<typename T, bool Sign>
02355 class base_atomic<T, void, 4, Sign> {
02356 typedef base_atomic this_type;
02357 typedef T value_type;
02358 typedef uint32_t storage_type;
02359 public:
02360 explicit base_atomic(value_type v) : v_(0)
02361 {
02362 memcpy(&v_, &v, sizeof(value_type));
02363 }
02364 base_atomic(void) : v_(0) {}
02365
02366 void
02367 store(value_type v, memory_order order = memory_order_seq_cst) volatile
02368 {
02369 storage_type tmp = 0;
02370 memcpy(&tmp, &v, sizeof(value_type));
02371 ppc_fence_before(order);
02372 __asm__ (
02373 "stw %1, %0\n"
02374 : "+m" (v_)
02375 : "r" (tmp)
02376 );
02377 ppc_fence_after_store(order);
02378 }
02379
02380 value_type
02381 load(memory_order order = memory_order_seq_cst) const volatile
02382 {
02383 storage_type tmp;
02384 __asm__ __volatile__ (
02385 "lwz %0, %1\n"
02386 "cmpw %0, %0\n"
02387 "bne- 1f\n"
02388 "1:\n"
02389 : "=r"(tmp)
02390 : "m"(v_)
02391 : "cr0"
02392 );
02393 ppc_fence_after(order);
02394
02395 value_type v;
02396 memcpy(&v, &tmp, sizeof(value_type));
02397 return v;
02398 }
02399
02400 value_type
02401 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
02402 {
02403 storage_type tmp = 0, original;
02404 memcpy(&tmp, &v, sizeof(value_type));
02405 ppc_fence_before(order);
02406 __asm__ (
02407 "1:\n"
02408 "lwarx %0,%y1\n"
02409 "stwcx. %2,%y1\n"
02410 "bne- 1b\n"
02411 : "=&b" (original), "+Z"(v_)
02412 : "b" (tmp)
02413 : "cr0"
02414 );
02415 ppc_fence_after(order);
02416 memcpy(&v, &original, sizeof(value_type));
02417 return v;
02418 }
02419
02420 bool
02421 compare_exchange_weak(
02422 value_type & expected,
02423 value_type desired,
02424 memory_order success_order,
02425 memory_order failure_order) volatile
02426 {
02427 storage_type expected_s = 0, desired_s = 0;
02428 memcpy(&expected_s, &expected, sizeof(value_type));
02429 memcpy(&desired_s, &desired, sizeof(value_type));
02430
02431 int success;
02432 ppc_fence_before(success_order);
02433 __asm__(
02434 "lwarx %0,%y2\n"
02435 "cmpw %0, %3\n"
02436 "bne- 2f\n"
02437 "stwcx. %4,%y2\n"
02438 "bne- 2f\n"
02439 "addi %1,0,1\n"
02440 "1:"
02441
02442 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
02443 : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
02444 : "b" (expected_s), "b" (desired_s)
02445 : "cr0"
02446 );
02447 if (success)
02448 ppc_fence_after(success_order);
02449 else
02450 ppc_fence_after(failure_order);
02451 memcpy(&expected, &expected_s, sizeof(value_type));
02452 return success;
02453 }
02454
02455 bool
02456 compare_exchange_strong(
02457 value_type & expected,
02458 value_type desired,
02459 memory_order success_order,
02460 memory_order failure_order) volatile
02461 {
02462 storage_type expected_s = 0, desired_s = 0;
02463 memcpy(&expected_s, &expected, sizeof(value_type));
02464 memcpy(&desired_s, &desired, sizeof(value_type));
02465
02466 int success;
02467 ppc_fence_before(success_order);
02468 __asm__(
02469 "0: lwarx %0,%y2\n"
02470 "cmpw %0, %3\n"
02471 "bne- 2f\n"
02472 "stwcx. %4,%y2\n"
02473 "bne- 0b\n"
02474 "addi %1,0,1\n"
02475 "1:"
02476
02477 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
02478 : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
02479 : "b" (expected_s), "b" (desired_s)
02480 : "cr0"
02481 );
02482 if (success)
02483 ppc_fence_after(success_order);
02484 else
02485 ppc_fence_after(failure_order);
02486 memcpy(&expected, &expected_s, sizeof(value_type));
02487 return success;
02488 }
02489
02490 bool
02491 is_lock_free(void) const volatile
02492 {
02493 return true;
02494 }
02495
02496 BOOST_ATOMIC_DECLARE_BASE_OPERATORS
02497 private:
02498 base_atomic(const base_atomic &) ;
02499 void operator=(const base_atomic &) ;
02500 storage_type v_;
02501 };
02502
02503 #if defined(__powerpc64__)
02504
02505 template<typename T, bool Sign>
02506 class base_atomic<T, void, 8, Sign> {
02507 typedef base_atomic this_type;
02508 typedef T value_type;
02509 typedef uint64_t storage_type;
02510 public:
02511 explicit base_atomic(value_type v)
02512 {
02513 memcpy(&v_, &v, sizeof(value_type));
02514 }
02515 base_atomic(void) {}
02516
02517 void
02518 store(value_type v, memory_order order = memory_order_seq_cst) volatile
02519 {
02520 storage_type tmp;
02521 memcpy(&tmp, &v, sizeof(value_type));
02522 ppc_fence_before(order);
02523 __asm__ (
02524 "std %1, %0\n"
02525 : "+m" (v_)
02526 : "r" (tmp)
02527 );
02528 ppc_fence_after_store(order);
02529 }
02530
02531 value_type
02532 load(memory_order order = memory_order_seq_cst) const volatile
02533 {
02534 storage_type tmp;
02535 __asm__ __volatile__ (
02536 "ld %0, %1\n"
02537 "cmpd %0, %0\n"
02538 "bne- 1f\n"
02539 "1:\n"
02540 : "=r"(tmp)
02541 : "m"(v_)
02542 : "cr0"
02543 );
02544 ppc_fence_after(order);
02545
02546 value_type v;
02547 memcpy(&v, &tmp, sizeof(value_type));
02548 return v;
02549 }
02550
02551 value_type
02552 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
02553 {
02554 storage_type tmp = 0, original;
02555 memcpy(&tmp, &v, sizeof(value_type));
02556 ppc_fence_before(order);
02557 __asm__ (
02558 "1:\n"
02559 "ldarx %0,%y1\n"
02560 "stdcx. %2,%y1\n"
02561 "bne- 1b\n"
02562 : "=&b" (original), "+Z"(v_)
02563 : "b" (tmp)
02564 : "cr0"
02565 );
02566 ppc_fence_after(order);
02567 memcpy(&v, &original, sizeof(value_type));
02568 return v;
02569 }
02570
02571 bool
02572 compare_exchange_weak(
02573 value_type & expected,
02574 value_type desired,
02575 memory_order success_order,
02576 memory_order failure_order) volatile
02577 {
02578 storage_type expected_s, desired_s;
02579 memcpy(&expected_s, &expected, sizeof(value_type));
02580 memcpy(&desired_s, &desired, sizeof(value_type));
02581
02582 int success;
02583 ppc_fence_before(success_order);
02584 __asm__(
02585 "ldarx %0,%y2\n"
02586 "cmpd %0, %3\n"
02587 "bne- 2f\n"
02588 "stdcx. %4,%y2\n"
02589 "bne- 2f\n"
02590 "addi %1,0,1\n"
02591 "1:"
02592
02593 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
02594 : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
02595 : "b" (expected_s), "b" (desired_s)
02596 : "cr0"
02597 );
02598 if (success)
02599 ppc_fence_after(success_order);
02600 else
02601 ppc_fence_after(failure_order);
02602 memcpy(&expected, &expected_s, sizeof(value_type));
02603 return success;
02604 }
02605
02606 bool
02607 compare_exchange_strong(
02608 value_type & expected,
02609 value_type desired,
02610 memory_order success_order,
02611 memory_order failure_order) volatile
02612 {
02613 storage_type expected_s, desired_s;
02614 memcpy(&expected_s, &expected, sizeof(value_type));
02615 memcpy(&desired_s, &desired, sizeof(value_type));
02616
02617 int success;
02618 ppc_fence_before(success_order);
02619 __asm__(
02620 "0: ldarx %0,%y2\n"
02621 "cmpd %0, %3\n"
02622 "bne- 2f\n"
02623 "stdcx. %4,%y2\n"
02624 "bne- 0b\n"
02625 "addi %1,0,1\n"
02626 "1:"
02627
02628 BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
02629 : "=&b" (expected_s), "=&b" (success), "+Z"(v_)
02630 : "b" (expected_s), "b" (desired_s)
02631 : "cr0"
02632 );
02633 if (success)
02634 ppc_fence_after(success_order);
02635 else
02636 ppc_fence_after(failure_order);
02637 memcpy(&expected, &expected_s, sizeof(value_type));
02638 return success;
02639 }
02640
02641 bool
02642 is_lock_free(void) const volatile
02643 {
02644 return true;
02645 }
02646
02647 BOOST_ATOMIC_DECLARE_BASE_OPERATORS
02648 private:
02649 base_atomic(const base_atomic &) ;
02650 void operator=(const base_atomic &) ;
02651 storage_type v_;
02652 };
02653 #endif
02654
02655 }
02656 }
02657
02658 #define BOOST_ATOMIC_THREAD_FENCE 2
02659 inline void
02660 atomic_thread_fence(memory_order order)
02661 {
02662 switch(order) {
02663 case memory_order_acquire:
02664 __asm__ __volatile__ ("isync" ::: "memory");
02665 break;
02666 case memory_order_release:
02667 #if defined(__powerpc64__)
02668 __asm__ __volatile__ ("lwsync" ::: "memory");
02669 break;
02670 #endif
02671 case memory_order_acq_rel:
02672 case memory_order_seq_cst:
02673 __asm__ __volatile__ ("sync" ::: "memory");
02674 default:;
02675 }
02676 }
02677
02678 #define BOOST_ATOMIC_SIGNAL_FENCE 2
02679 inline void
02680 atomic_signal_fence(memory_order order)
02681 {
02682 switch(order) {
02683 case memory_order_acquire:
02684 case memory_order_release:
02685 case memory_order_acq_rel:
02686 case memory_order_seq_cst:
02687 __asm__ __volatile__ ("" ::: "memory");
02688 break;
02689 default:;
02690 }
02691 }
02692
02693 }
02694
02695 #endif