$search
00001 #ifndef BOOST_DETAIL_ATOMIC_GCC_X86_HPP 00002 #define BOOST_DETAIL_ATOMIC_GCC_X86_HPP 00003 00004 // Copyright (c) 2009 Helge Bahmann 00005 // 00006 // Distributed under the Boost Software License, Version 1.0. 00007 // See accompanying file LICENSE_1_0.txt or copy at 00008 // http://www.boost.org/LICENSE_1_0.txt) 00009 00010 #define BOOST_ATOMIC_CHAR_LOCK_FREE 2 00011 #define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 00012 #define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 00013 #define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 00014 #define BOOST_ATOMIC_SHORT_LOCK_FREE 2 00015 #define BOOST_ATOMIC_INT_LOCK_FREE 2 00016 #define BOOST_ATOMIC_LONG_LOCK_FREE 2 00017 00018 #if defined(__x86_64__) 00019 #define BOOST_ATOMIC_LLONG_LOCK_FREE 2 00020 #else 00021 #define BOOST_ATOMIC_LLONG_LOCK_FREE 1 00022 #endif 00023 00024 #define BOOST_ATOMIC_ADDRESS_LOCK_FREE 2 00025 #define BOOST_ATOMIC_BOOL_LOCK_FREE 2 00026 00027 namespace boost { 00028 00029 #if defined(__x86_64__) 00030 # define BOOST_ATOMIC_X86_FENCE_INSTR "mfence\n" 00031 #else 00032 # define BOOST_ATOMIC_X86_FENCE_INSTR "lock ; addl $0, (%%esp)\n" 00033 #endif 00034 00035 #define BOOST_ATOMIC_THREAD_FENCE 2 00036 static inline void 00037 atomic_thread_fence(memory_order order) 00038 { 00039 switch(order) { 00040 case memory_order_relaxed: 00041 break; 00042 case memory_order_release: 00043 __asm__ __volatile__ ("" ::: "memory"); 00044 break; 00045 case memory_order_acquire: 00046 __asm__ __volatile__ ("" ::: "memory"); 00047 break; 00048 case memory_order_acq_rel: 00049 __asm__ __volatile__ ("" ::: "memory"); 00050 break; 00051 case memory_order_consume: 00052 break; 00053 case memory_order_seq_cst: 00054 __asm__ __volatile__ (BOOST_ATOMIC_X86_FENCE_INSTR ::: "memory"); 00055 break; 00056 default:; 00057 } 00058 } 00059 00060 #define BOOST_ATOMIC_SIGNAL_FENCE 2 00061 static inline void 00062 atomic_signal_fence(memory_order) 00063 { 00064 __asm__ __volatile__ ("" ::: "memory"); 00065 } 00066 00067 namespace detail { 00068 namespace atomic { 00069 00070 static inline void 00071 platform_fence_before(memory_order order) 00072 { 00073 switch(order) { 00074 case memory_order_relaxed: 00075 case memory_order_acquire: 00076 case memory_order_consume: 00077 break; 00078 case memory_order_release: 00079 case memory_order_acq_rel: 00080 __asm__ __volatile__ ("" ::: "memory"); 00081 /* release */ 00082 break; 00083 case memory_order_seq_cst: 00084 __asm__ __volatile__ ("" ::: "memory"); 00085 /* seq */ 00086 break; 00087 } 00088 } 00089 00090 static inline void 00091 platform_fence_after(memory_order order) 00092 { 00093 switch(order) { 00094 case memory_order_relaxed: 00095 case memory_order_release: 00096 break; 00097 case memory_order_acquire: 00098 case memory_order_acq_rel: 00099 __asm__ __volatile__ ("" ::: "memory"); 00100 /* acquire */ 00101 break; 00102 case memory_order_consume: 00103 /* consume */ 00104 break; 00105 case memory_order_seq_cst: 00106 __asm__ __volatile__ ("" ::: "memory"); 00107 /* seq */ 00108 break; 00109 default:; 00110 } 00111 } 00112 00113 static inline void 00114 platform_fence_after_load(memory_order order) 00115 { 00116 switch(order) { 00117 case memory_order_relaxed: 00118 case memory_order_release: 00119 break; 00120 case memory_order_acquire: 00121 case memory_order_acq_rel: 00122 __asm__ __volatile__ ("" ::: "memory"); 00123 break; 00124 case memory_order_consume: 00125 break; 00126 case memory_order_seq_cst: 00127 __asm__ __volatile__ (BOOST_ATOMIC_X86_FENCE_INSTR ::: "memory"); 00128 break; 00129 default:; 00130 } 00131 } 00132 00133 template<typename T, bool Sign> 00134 class base_atomic<T, int, 1, Sign> { 00135 typedef base_atomic this_type; 00136 typedef T value_type; 00137 typedef T difference_type; 00138 public: 00139 explicit base_atomic(value_type v) : v_(v) {} 00140 base_atomic(void) {} 00141 00142 void 00143 store(value_type v, memory_order order = memory_order_seq_cst) volatile 00144 { 00145 if (order != memory_order_seq_cst) { 00146 platform_fence_before(order); 00147 const_cast<volatile value_type &>(v_) = v; 00148 } else { 00149 exchange(v, order); 00150 } 00151 } 00152 00153 value_type 00154 load(memory_order order = memory_order_seq_cst) const volatile 00155 { 00156 value_type v = const_cast<const volatile value_type &>(v_); 00157 platform_fence_after_load(order); 00158 return v; 00159 } 00160 00161 value_type 00162 fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile 00163 { 00164 platform_fence_before(order); 00165 __asm__ ( 00166 "lock ; xaddb %0, %1" 00167 : "+q" (v), "+m" (v_) 00168 ); 00169 platform_fence_after(order); 00170 return v; 00171 } 00172 00173 value_type 00174 fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile 00175 { 00176 return fetch_add(-v, order); 00177 } 00178 00179 value_type 00180 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 00181 { 00182 platform_fence_before(order); 00183 __asm__ ( 00184 "xchgb %0, %1" 00185 : "+q" (v), "+m" (v_) 00186 ); 00187 platform_fence_after(order); 00188 return v; 00189 } 00190 00191 bool 00192 compare_exchange_strong( 00193 value_type & expected, 00194 value_type desired, 00195 memory_order success_order, 00196 memory_order failure_order) volatile 00197 { 00198 value_type previous = expected; 00199 platform_fence_before(success_order); 00200 __asm__ ( 00201 "lock ; cmpxchgb %2, %1" 00202 : "+a" (previous), "+m" (v_) 00203 : "q" (desired) 00204 ); 00205 bool success = (previous == expected); 00206 if (success) 00207 platform_fence_after(success_order); 00208 else 00209 platform_fence_after(failure_order); 00210 expected = previous; 00211 return success; 00212 } 00213 00214 bool 00215 compare_exchange_weak( 00216 value_type & expected, 00217 value_type desired, 00218 memory_order success_order, 00219 memory_order failure_order) volatile 00220 { 00221 return compare_exchange_strong(expected, desired, success_order, failure_order); 00222 } 00223 00224 value_type 00225 fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile 00226 { 00227 value_type tmp = load(memory_order_relaxed); 00228 do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); 00229 return tmp; 00230 } 00231 00232 value_type 00233 fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile 00234 { 00235 value_type tmp = load(memory_order_relaxed); 00236 do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); 00237 return tmp; 00238 } 00239 00240 value_type 00241 fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile 00242 { 00243 value_type tmp = load(memory_order_relaxed); 00244 do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); 00245 return tmp; 00246 } 00247 00248 bool 00249 is_lock_free(void) const volatile 00250 { 00251 return true; 00252 } 00253 00254 BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS 00255 private: 00256 base_atomic(const base_atomic &) /* = delete */ ; 00257 void operator=(const base_atomic &) /* = delete */ ; 00258 value_type v_; 00259 }; 00260 00261 template<typename T, bool Sign> 00262 class base_atomic<T, int, 2, Sign> { 00263 typedef base_atomic this_type; 00264 typedef T value_type; 00265 typedef T difference_type; 00266 public: 00267 explicit base_atomic(value_type v) : v_(v) {} 00268 base_atomic(void) {} 00269 00270 void 00271 store(value_type v, memory_order order = memory_order_seq_cst) volatile 00272 { 00273 if (order != memory_order_seq_cst) { 00274 platform_fence_before(order); 00275 const_cast<volatile value_type &>(v_) = v; 00276 } else { 00277 exchange(v, order); 00278 } 00279 } 00280 00281 value_type 00282 load(memory_order order = memory_order_seq_cst) const volatile 00283 { 00284 value_type v = const_cast<const volatile value_type &>(v_); 00285 platform_fence_after_load(order); 00286 return v; 00287 } 00288 00289 value_type 00290 fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile 00291 { 00292 platform_fence_before(order); 00293 __asm__ ( 00294 "lock ; xaddw %0, %1" 00295 : "+q" (v), "+m" (v_) 00296 ); 00297 platform_fence_after(order); 00298 return v; 00299 } 00300 00301 value_type 00302 fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile 00303 { 00304 return fetch_add(-v, order); 00305 } 00306 00307 value_type 00308 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 00309 { 00310 platform_fence_before(order); 00311 __asm__ ( 00312 "xchgw %0, %1" 00313 : "+q" (v), "+m" (v_) 00314 ); 00315 platform_fence_after(order); 00316 return v; 00317 } 00318 00319 bool 00320 compare_exchange_strong( 00321 value_type & expected, 00322 value_type desired, 00323 memory_order success_order, 00324 memory_order failure_order) volatile 00325 { 00326 value_type previous = expected; 00327 platform_fence_before(success_order); 00328 __asm__ ( 00329 "lock ; cmpxchgw %2, %1" 00330 : "+a" (previous), "+m" (v_) 00331 : "q" (desired) 00332 ); 00333 bool success = (previous == expected); 00334 if (success) 00335 platform_fence_after(success_order); 00336 else 00337 platform_fence_after(failure_order); 00338 expected = previous; 00339 return success; 00340 } 00341 00342 bool 00343 compare_exchange_weak( 00344 value_type & expected, 00345 value_type desired, 00346 memory_order success_order, 00347 memory_order failure_order) volatile 00348 { 00349 return compare_exchange_strong(expected, desired, success_order, failure_order); 00350 } 00351 00352 value_type 00353 fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile 00354 { 00355 value_type tmp = load(memory_order_relaxed); 00356 do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); 00357 return tmp; 00358 } 00359 00360 value_type 00361 fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile 00362 { 00363 value_type tmp = load(memory_order_relaxed); 00364 do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); 00365 return tmp; 00366 } 00367 00368 value_type 00369 fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile 00370 { 00371 value_type tmp = load(memory_order_relaxed); 00372 do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); 00373 return tmp; 00374 } 00375 00376 bool 00377 is_lock_free(void) const volatile 00378 { 00379 return true; 00380 } 00381 00382 BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS 00383 private: 00384 base_atomic(const base_atomic &) /* = delete */ ; 00385 void operator=(const base_atomic &) /* = delete */ ; 00386 value_type v_; 00387 }; 00388 00389 template<typename T, bool Sign> 00390 class base_atomic<T, int, 4, Sign> { 00391 typedef base_atomic this_type; 00392 typedef T value_type; 00393 typedef T difference_type; 00394 public: 00395 explicit base_atomic(value_type v) : v_(v) {} 00396 base_atomic(void) {} 00397 00398 void 00399 store(value_type v, memory_order order = memory_order_seq_cst) volatile 00400 { 00401 if (order != memory_order_seq_cst) { 00402 platform_fence_before(order); 00403 const_cast<volatile value_type &>(v_) = v; 00404 } else { 00405 exchange(v, order); 00406 } 00407 } 00408 00409 value_type 00410 load(memory_order order = memory_order_seq_cst) const volatile 00411 { 00412 value_type v = const_cast<const volatile value_type &>(v_); 00413 platform_fence_after_load(order); 00414 return v; 00415 } 00416 00417 value_type 00418 fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile 00419 { 00420 platform_fence_before(order); 00421 __asm__ ( 00422 "lock ; xaddl %0, %1" 00423 : "+r" (v), "+m" (v_) 00424 ); 00425 platform_fence_after(order); 00426 return v; 00427 } 00428 00429 value_type 00430 fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile 00431 { 00432 return fetch_add(-v, order); 00433 } 00434 00435 value_type 00436 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 00437 { 00438 platform_fence_before(order); 00439 __asm__ ( 00440 "xchgl %0, %1" 00441 : "+r" (v), "+m" (v_) 00442 ); 00443 platform_fence_after(order); 00444 return v; 00445 } 00446 00447 bool 00448 compare_exchange_strong( 00449 value_type & expected, 00450 value_type desired, 00451 memory_order success_order, 00452 memory_order failure_order) volatile 00453 { 00454 value_type previous = expected; 00455 platform_fence_before(success_order); 00456 __asm__ ( 00457 "lock ; cmpxchgl %2, %1" 00458 : "+a" (previous), "+m" (v_) 00459 : "r" (desired) 00460 ); 00461 bool success = (previous == expected); 00462 if (success) 00463 platform_fence_after(success_order); 00464 else 00465 platform_fence_after(failure_order); 00466 expected = previous; 00467 return success; 00468 } 00469 00470 bool 00471 compare_exchange_weak( 00472 value_type & expected, 00473 value_type desired, 00474 memory_order success_order, 00475 memory_order failure_order) volatile 00476 { 00477 return compare_exchange_strong(expected, desired, success_order, failure_order); 00478 } 00479 00480 value_type 00481 fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile 00482 { 00483 value_type tmp = load(memory_order_relaxed); 00484 do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); 00485 return tmp; 00486 } 00487 00488 value_type 00489 fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile 00490 { 00491 value_type tmp = load(memory_order_relaxed); 00492 do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); 00493 return tmp; 00494 } 00495 00496 value_type 00497 fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile 00498 { 00499 value_type tmp = load(memory_order_relaxed); 00500 do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); 00501 return tmp; 00502 } 00503 00504 bool 00505 is_lock_free(void) const volatile 00506 { 00507 return true; 00508 } 00509 00510 BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS 00511 private: 00512 base_atomic(const base_atomic &) /* = delete */ ; 00513 void operator=(const base_atomic &) /* = delete */ ; 00514 value_type v_; 00515 }; 00516 00517 #if defined(__x86_64__) 00518 template<typename T, bool Sign> 00519 class base_atomic<T, int, 8, Sign> { 00520 typedef base_atomic this_type; 00521 typedef T value_type; 00522 typedef T difference_type; 00523 public: 00524 explicit base_atomic(value_type v) : v_(v) {} 00525 base_atomic(void) {} 00526 00527 void 00528 store(value_type v, memory_order order = memory_order_seq_cst) volatile 00529 { 00530 if (order != memory_order_seq_cst) { 00531 platform_fence_before(order); 00532 const_cast<volatile value_type &>(v_) = v; 00533 } else { 00534 exchange(v, order); 00535 } 00536 } 00537 00538 value_type 00539 load(memory_order order = memory_order_seq_cst) const volatile 00540 { 00541 value_type v = const_cast<const volatile value_type &>(v_); 00542 platform_fence_after_load(order); 00543 return v; 00544 } 00545 00546 value_type 00547 fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile 00548 { 00549 platform_fence_before(order); 00550 __asm__ ( 00551 "lock ; xaddq %0, %1" 00552 : "+r" (v), "+m" (v_) 00553 ); 00554 platform_fence_after(order); 00555 return v; 00556 } 00557 00558 value_type 00559 fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile 00560 { 00561 return fetch_add(-v, order); 00562 } 00563 00564 value_type 00565 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 00566 { 00567 platform_fence_before(order); 00568 __asm__ ( 00569 "xchgq %0, %1" 00570 : "+r" (v), "+m" (v_) 00571 ); 00572 platform_fence_after(order); 00573 return v; 00574 } 00575 00576 bool 00577 compare_exchange_strong( 00578 value_type & expected, 00579 value_type desired, 00580 memory_order success_order, 00581 memory_order failure_order) volatile 00582 { 00583 value_type previous = expected; 00584 platform_fence_before(success_order); 00585 __asm__ ( 00586 "lock ; cmpxchgq %2, %1" 00587 : "+a" (previous), "+m" (v_) 00588 : "r" (desired) 00589 ); 00590 bool success = (previous == expected); 00591 if (success) 00592 platform_fence_after(success_order); 00593 else 00594 platform_fence_after(failure_order); 00595 expected = previous; 00596 return success; 00597 } 00598 00599 bool 00600 compare_exchange_weak( 00601 value_type & expected, 00602 value_type desired, 00603 memory_order success_order, 00604 memory_order failure_order) volatile 00605 { 00606 return compare_exchange_strong(expected, desired, success_order, failure_order); 00607 } 00608 00609 value_type 00610 fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile 00611 { 00612 value_type tmp = load(memory_order_relaxed); 00613 do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); 00614 return tmp; 00615 } 00616 00617 value_type 00618 fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile 00619 { 00620 value_type tmp = load(memory_order_relaxed); 00621 do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); 00622 return tmp; 00623 } 00624 00625 value_type 00626 fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile 00627 { 00628 value_type tmp = load(memory_order_relaxed); 00629 do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); 00630 return tmp; 00631 } 00632 00633 bool 00634 is_lock_free(void) const volatile 00635 { 00636 return true; 00637 } 00638 00639 BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS 00640 private: 00641 base_atomic(const base_atomic &) /* = delete */ ; 00642 void operator=(const base_atomic &) /* = delete */ ; 00643 value_type v_; 00644 }; 00645 00646 #endif 00647 00648 /* pointers */ 00649 00650 #if !defined(__x86_64__) 00651 00652 template<bool Sign> 00653 class base_atomic<void *, void *, 4, Sign> { 00654 typedef base_atomic this_type; 00655 typedef void * value_type; 00656 public: 00657 explicit base_atomic(value_type v) : v_(v) {} 00658 base_atomic(void) {} 00659 00660 void 00661 store(value_type v, memory_order order = memory_order_seq_cst) volatile 00662 { 00663 if (order != memory_order_seq_cst) { 00664 platform_fence_before(order); 00665 const_cast<volatile value_type &>(v_) = v; 00666 } else { 00667 exchange(v, order); 00668 } 00669 } 00670 00671 value_type load(memory_order order = memory_order_seq_cst) const volatile 00672 { 00673 value_type v = const_cast<const volatile value_type &>(v_); 00674 platform_fence_after_load(order); 00675 return v; 00676 } 00677 00678 value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 00679 { 00680 platform_fence_before(order); 00681 __asm__ ( 00682 "xchgl %0, %1" 00683 : "+r" (v), "+m" (v_) 00684 ); 00685 platform_fence_after(order); 00686 return v; 00687 } 00688 00689 bool compare_exchange_strong(value_type & expected, value_type desired, 00690 memory_order success_order, 00691 memory_order failure_order) volatile 00692 { 00693 value_type previous = expected; 00694 platform_fence_before(success_order); 00695 __asm__ ( 00696 "lock ; cmpxchgl %2, %1" 00697 : "+a" (previous), "+m" (v_) 00698 : "r" (desired) 00699 ); 00700 bool success = (previous == expected); 00701 if (success) 00702 platform_fence_after(success_order); 00703 else 00704 platform_fence_after(failure_order); 00705 expected = previous; 00706 return success; 00707 } 00708 00709 bool compare_exchange_weak(value_type & expected, value_type desired, 00710 memory_order success_order, 00711 memory_order failure_order) volatile 00712 { 00713 return compare_exchange_strong(expected, desired, success_order, failure_order); 00714 } 00715 00716 bool 00717 is_lock_free(void) const volatile 00718 { 00719 return true; 00720 } 00721 00722 BOOST_ATOMIC_DECLARE_BASE_OPERATORS 00723 private: 00724 base_atomic(const base_atomic &) /* = delete */ ; 00725 void operator=(const base_atomic &) /* = delete */ ; 00726 value_type v_; 00727 }; 00728 00729 template<typename T, bool Sign> 00730 class base_atomic<T *, void *, 4, Sign> { 00731 typedef base_atomic this_type; 00732 typedef T * value_type; 00733 typedef ptrdiff_t difference_type; 00734 public: 00735 explicit base_atomic(value_type v) : v_(v) {} 00736 base_atomic(void) {} 00737 00738 void 00739 store(value_type v, memory_order order = memory_order_seq_cst) volatile 00740 { 00741 if (order != memory_order_seq_cst) { 00742 platform_fence_before(order); 00743 const_cast<volatile value_type &>(v_) = v; 00744 } else { 00745 exchange(v, order); 00746 } 00747 } 00748 00749 value_type 00750 load(memory_order order = memory_order_seq_cst) const volatile 00751 { 00752 value_type v = const_cast<const volatile value_type &>(v_); 00753 platform_fence_after_load(order); 00754 return v; 00755 } 00756 00757 value_type 00758 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 00759 { 00760 platform_fence_before(order); 00761 __asm__ ( 00762 "xchgl %0, %1" 00763 : "+r" (v), "+m" (v_) 00764 ); 00765 platform_fence_after(order); 00766 return v; 00767 } 00768 00769 bool 00770 compare_exchange_strong( 00771 value_type & expected, 00772 value_type desired, 00773 memory_order success_order, 00774 memory_order failure_order) volatile 00775 { 00776 value_type previous = expected; 00777 platform_fence_before(success_order); 00778 __asm__ ( 00779 "lock ; cmpxchgl %2, %1" 00780 : "+a" (previous), "+m" (v_) 00781 : "r" (desired) 00782 ); 00783 bool success = (previous == expected); 00784 if (success) 00785 platform_fence_after(success_order); 00786 else 00787 platform_fence_after(failure_order); 00788 expected = previous; 00789 return success; 00790 } 00791 00792 bool 00793 compare_exchange_weak( 00794 value_type & expected, 00795 value_type desired, 00796 memory_order success_order, 00797 memory_order failure_order) volatile 00798 { 00799 return compare_exchange_strong(expected, desired, success_order, failure_order); 00800 } 00801 00802 value_type 00803 fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile 00804 { 00805 v = v * sizeof(*v_); 00806 platform_fence_before(order); 00807 __asm__ ( 00808 "lock ; xaddl %0, %1" 00809 : "+r" (v), "+m" (v_) 00810 ); 00811 platform_fence_after(order); 00812 return reinterpret_cast<value_type>(v); 00813 } 00814 00815 value_type 00816 fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile 00817 { 00818 return fetch_add(-v, order); 00819 } 00820 00821 bool 00822 is_lock_free(void) const volatile 00823 { 00824 return true; 00825 } 00826 00827 BOOST_ATOMIC_DECLARE_POINTER_OPERATORS 00828 private: 00829 base_atomic(const base_atomic &) /* = delete */ ; 00830 void operator=(const base_atomic &) /* = delete */ ; 00831 value_type v_; 00832 }; 00833 00834 #else 00835 00836 template<bool Sign> 00837 class base_atomic<void *, void *, 8, Sign> { 00838 typedef base_atomic this_type; 00839 typedef void * value_type; 00840 public: 00841 explicit base_atomic(value_type v) : v_(v) {} 00842 base_atomic(void) {} 00843 00844 void 00845 store(value_type v, memory_order order = memory_order_seq_cst) volatile 00846 { 00847 if (order != memory_order_seq_cst) { 00848 platform_fence_before(order); 00849 const_cast<volatile value_type &>(v_) = v; 00850 } else { 00851 exchange(v, order); 00852 } 00853 } 00854 00855 value_type load(memory_order order = memory_order_seq_cst) const volatile 00856 { 00857 value_type v = const_cast<const volatile value_type &>(v_); 00858 platform_fence_after_load(order); 00859 return v; 00860 } 00861 00862 value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 00863 { 00864 platform_fence_before(order); 00865 __asm__ ( 00866 "xchgq %0, %1" 00867 : "+r" (v), "+m" (v_) 00868 ); 00869 platform_fence_after(order); 00870 return v; 00871 } 00872 00873 bool compare_exchange_strong(value_type & expected, value_type desired, 00874 memory_order success_order, 00875 memory_order failure_order) volatile 00876 { 00877 value_type previous = expected; 00878 platform_fence_before(success_order); 00879 __asm__ ( 00880 "lock ; cmpxchgq %2, %1" 00881 : "+a" (previous), "+m" (v_) 00882 : "r" (desired) 00883 ); 00884 bool success = (previous == expected); 00885 if (success) 00886 platform_fence_after(success_order); 00887 else 00888 platform_fence_after(failure_order); 00889 expected = previous; 00890 return success; 00891 } 00892 00893 bool compare_exchange_weak(value_type & expected, value_type desired, 00894 memory_order success_order, 00895 memory_order failure_order) volatile 00896 { 00897 return compare_exchange_strong(expected, desired, success_order, failure_order); 00898 } 00899 00900 bool 00901 is_lock_free(void) const volatile 00902 { 00903 return true; 00904 } 00905 00906 BOOST_ATOMIC_DECLARE_BASE_OPERATORS 00907 private: 00908 base_atomic(const base_atomic &) /* = delete */ ; 00909 void operator=(const base_atomic &) /* = delete */ ; 00910 value_type v_; 00911 }; 00912 00913 template<typename T, bool Sign> 00914 class base_atomic<T *, void *, 8, Sign> { 00915 typedef base_atomic this_type; 00916 typedef T * value_type; 00917 typedef ptrdiff_t difference_type; 00918 public: 00919 explicit base_atomic(value_type v) : v_(v) {} 00920 base_atomic(void) {} 00921 00922 void 00923 store(value_type v, memory_order order = memory_order_seq_cst) volatile 00924 { 00925 if (order != memory_order_seq_cst) { 00926 platform_fence_before(order); 00927 const_cast<volatile value_type &>(v_) = v; 00928 } else { 00929 exchange(v, order); 00930 } 00931 } 00932 00933 value_type 00934 load(memory_order order = memory_order_seq_cst) const volatile 00935 { 00936 value_type v = const_cast<const volatile value_type &>(v_); 00937 platform_fence_after_load(order); 00938 return v; 00939 } 00940 00941 value_type 00942 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 00943 { 00944 platform_fence_before(order); 00945 __asm__ ( 00946 "xchgq %0, %1" 00947 : "+r" (v), "+m" (v_) 00948 ); 00949 platform_fence_after(order); 00950 return v; 00951 } 00952 00953 bool 00954 compare_exchange_strong( 00955 value_type & expected, 00956 value_type desired, 00957 memory_order success_order, 00958 memory_order failure_order) volatile 00959 { 00960 value_type previous = expected; 00961 platform_fence_before(success_order); 00962 __asm__ ( 00963 "lock ; cmpxchgq %2, %1" 00964 : "+a" (previous), "+m" (v_) 00965 : "r" (desired) 00966 ); 00967 bool success = (previous == expected); 00968 if (success) 00969 platform_fence_after(success_order); 00970 else 00971 platform_fence_after(failure_order); 00972 expected = previous; 00973 return success; 00974 } 00975 00976 bool 00977 compare_exchange_weak( 00978 value_type & expected, 00979 value_type desired, 00980 memory_order success_order, 00981 memory_order failure_order) volatile 00982 { 00983 return compare_exchange_strong(expected, desired, success_order, failure_order); 00984 } 00985 00986 value_type 00987 fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile 00988 { 00989 v = v * sizeof(*v_); 00990 platform_fence_before(order); 00991 __asm__ ( 00992 "lock ; xaddq %0, %1" 00993 : "+r" (v), "+m" (v_) 00994 ); 00995 platform_fence_after(order); 00996 return reinterpret_cast<value_type>(v); 00997 } 00998 00999 value_type 01000 fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile 01001 { 01002 return fetch_add(-v, order); 01003 } 01004 01005 bool 01006 is_lock_free(void) const volatile 01007 { 01008 return true; 01009 } 01010 01011 BOOST_ATOMIC_DECLARE_POINTER_OPERATORS 01012 private: 01013 base_atomic(const base_atomic &) /* = delete */ ; 01014 void operator=(const base_atomic &) /* = delete */ ; 01015 value_type v_; 01016 }; 01017 01018 #endif 01019 01020 template<typename T, bool Sign> 01021 class base_atomic<T, void, 1, Sign> { 01022 typedef base_atomic this_type; 01023 typedef T value_type; 01024 typedef uint8_t storage_type; 01025 public: 01026 explicit base_atomic(value_type v) 01027 { 01028 memcpy(&v_, &v, sizeof(value_type)); 01029 } 01030 base_atomic(void) {} 01031 01032 void 01033 store(value_type v, memory_order order = memory_order_seq_cst) volatile 01034 { 01035 if (order != memory_order_seq_cst) { 01036 storage_type tmp; 01037 memcpy(&tmp, &v, sizeof(value_type)); 01038 platform_fence_before(order); 01039 const_cast<volatile storage_type &>(v_) = tmp; 01040 } else { 01041 exchange(v, order); 01042 } 01043 } 01044 01045 value_type 01046 load(memory_order order = memory_order_seq_cst) const volatile 01047 { 01048 storage_type tmp = const_cast<volatile storage_type &>(v_); 01049 platform_fence_after_load(order); 01050 value_type v; 01051 memcpy(&v, &tmp, sizeof(value_type)); 01052 return v; 01053 } 01054 01055 value_type 01056 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 01057 { 01058 storage_type tmp; 01059 memcpy(&tmp, &v, sizeof(value_type)); 01060 platform_fence_before(order); 01061 __asm__ ( 01062 "xchgb %0, %1" 01063 : "+q" (tmp), "+m" (v_) 01064 ); 01065 platform_fence_after(order); 01066 memcpy(&v, &tmp, sizeof(value_type)); 01067 return v; 01068 } 01069 01070 bool 01071 compare_exchange_strong( 01072 value_type & expected, 01073 value_type desired, 01074 memory_order success_order, 01075 memory_order failure_order) volatile 01076 { 01077 storage_type expected_s, desired_s; 01078 memcpy(&expected_s, &expected, sizeof(value_type)); 01079 memcpy(&desired_s, &desired, sizeof(value_type)); 01080 storage_type previous_s = expected_s; 01081 platform_fence_before(success_order); 01082 __asm__ ( 01083 "lock ; cmpxchgb %2, %1" 01084 : "+a" (previous_s), "+m" (v_) 01085 : "q" (desired_s) 01086 ); 01087 bool success = (previous_s == expected_s); 01088 if (success) 01089 platform_fence_after(success_order); 01090 else 01091 platform_fence_after(failure_order); 01092 memcpy(&expected, &previous_s, sizeof(value_type)); 01093 return success; 01094 } 01095 01096 bool 01097 compare_exchange_weak( 01098 value_type & expected, 01099 value_type desired, 01100 memory_order success_order, 01101 memory_order failure_order) volatile 01102 { 01103 return compare_exchange_strong(expected, desired, success_order, failure_order); 01104 } 01105 01106 bool 01107 is_lock_free(void) const volatile 01108 { 01109 return true; 01110 } 01111 01112 BOOST_ATOMIC_DECLARE_BASE_OPERATORS 01113 private: 01114 base_atomic(const base_atomic &) /* = delete */ ; 01115 void operator=(const base_atomic &) /* = delete */ ; 01116 storage_type v_; 01117 }; 01118 01119 template<typename T, bool Sign> 01120 class base_atomic<T, void, 2, Sign> { 01121 typedef base_atomic this_type; 01122 typedef T value_type; 01123 typedef uint16_t storage_type; 01124 public: 01125 explicit base_atomic(value_type v) 01126 { 01127 memcpy(&v_, &v, sizeof(value_type)); 01128 } 01129 base_atomic(void) {} 01130 01131 void 01132 store(value_type v, memory_order order = memory_order_seq_cst) volatile 01133 { 01134 if (order != memory_order_seq_cst) { 01135 storage_type tmp; 01136 memcpy(&tmp, &v, sizeof(value_type)); 01137 platform_fence_before(order); 01138 const_cast<volatile storage_type &>(v_) = tmp; 01139 } else { 01140 exchange(v, order); 01141 } 01142 } 01143 01144 value_type 01145 load(memory_order order = memory_order_seq_cst) const volatile 01146 { 01147 storage_type tmp = const_cast<volatile storage_type &>(v_); 01148 platform_fence_after_load(order); 01149 value_type v; 01150 memcpy(&v, &tmp, sizeof(value_type)); 01151 return v; 01152 } 01153 01154 value_type 01155 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 01156 { 01157 storage_type tmp; 01158 memcpy(&tmp, &v, sizeof(value_type)); 01159 platform_fence_before(order); 01160 __asm__ ( 01161 "xchgw %0, %1" 01162 : "+q" (tmp), "+m" (v_) 01163 ); 01164 platform_fence_after(order); 01165 memcpy(&v, &tmp, sizeof(value_type)); 01166 return v; 01167 } 01168 01169 bool 01170 compare_exchange_strong( 01171 value_type & expected, 01172 value_type desired, 01173 memory_order success_order, 01174 memory_order failure_order) volatile 01175 { 01176 storage_type expected_s, desired_s; 01177 memcpy(&expected_s, &expected, sizeof(value_type)); 01178 memcpy(&desired_s, &desired, sizeof(value_type)); 01179 storage_type previous_s = expected_s; 01180 platform_fence_before(success_order); 01181 __asm__ ( 01182 "lock ; cmpxchgw %2, %1" 01183 : "+a" (previous_s), "+m" (v_) 01184 : "q" (desired_s) 01185 ); 01186 bool success = (previous_s == expected_s); 01187 if (success) 01188 platform_fence_after(success_order); 01189 else 01190 platform_fence_after(failure_order); 01191 memcpy(&expected, &previous_s, sizeof(value_type)); 01192 return success; 01193 } 01194 01195 bool 01196 compare_exchange_weak( 01197 value_type & expected, 01198 value_type desired, 01199 memory_order success_order, 01200 memory_order failure_order) volatile 01201 { 01202 return compare_exchange_strong(expected, desired, success_order, failure_order); 01203 } 01204 01205 bool 01206 is_lock_free(void) const volatile 01207 { 01208 return true; 01209 } 01210 01211 BOOST_ATOMIC_DECLARE_BASE_OPERATORS 01212 private: 01213 base_atomic(const base_atomic &) /* = delete */ ; 01214 void operator=(const base_atomic &) /* = delete */ ; 01215 storage_type v_; 01216 }; 01217 01218 template<typename T, bool Sign> 01219 class base_atomic<T, void, 4, Sign> { 01220 typedef base_atomic this_type; 01221 typedef T value_type; 01222 typedef uint32_t storage_type; 01223 public: 01224 explicit base_atomic(value_type v) 01225 { 01226 memcpy(&v_, &v, sizeof(value_type)); 01227 } 01228 base_atomic(void) {} 01229 01230 void 01231 store(value_type v, memory_order order = memory_order_seq_cst) volatile 01232 { 01233 if (order != memory_order_seq_cst) { 01234 storage_type tmp; 01235 memcpy(&tmp, &v, sizeof(value_type)); 01236 platform_fence_before(order); 01237 const_cast<volatile storage_type &>(v_) = tmp; 01238 } else { 01239 exchange(v, order); 01240 } 01241 } 01242 01243 value_type 01244 load(memory_order order = memory_order_seq_cst) const volatile 01245 { 01246 storage_type tmp = const_cast<volatile storage_type &>(v_); 01247 platform_fence_after_load(order); 01248 value_type v; 01249 memcpy(&v, &tmp, sizeof(value_type)); 01250 return v; 01251 } 01252 01253 value_type 01254 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 01255 { 01256 storage_type tmp; 01257 memcpy(&tmp, &v, sizeof(value_type)); 01258 platform_fence_before(order); 01259 __asm__ ( 01260 "xchgl %0, %1" 01261 : "+q" (tmp), "+m" (v_) 01262 ); 01263 platform_fence_after(order); 01264 memcpy(&v, &tmp, sizeof(value_type)); 01265 return v; 01266 } 01267 01268 bool 01269 compare_exchange_strong( 01270 value_type & expected, 01271 value_type desired, 01272 memory_order success_order, 01273 memory_order failure_order) volatile 01274 { 01275 storage_type expected_s, desired_s; 01276 memcpy(&expected_s, &expected, sizeof(value_type)); 01277 memcpy(&desired_s, &desired, sizeof(value_type)); 01278 storage_type previous_s = expected_s; 01279 platform_fence_before(success_order); 01280 __asm__ ( 01281 "lock ; cmpxchgl %2, %1" 01282 : "+a" (previous_s), "+m" (v_) 01283 : "q" (desired_s) 01284 ); 01285 bool success = (previous_s == expected_s); 01286 if (success) 01287 platform_fence_after(success_order); 01288 else 01289 platform_fence_after(failure_order); 01290 memcpy(&expected, &previous_s, sizeof(value_type)); 01291 return success; 01292 } 01293 01294 bool 01295 compare_exchange_weak( 01296 value_type & expected, 01297 value_type desired, 01298 memory_order success_order, 01299 memory_order failure_order) volatile 01300 { 01301 return compare_exchange_strong(expected, desired, success_order, failure_order); 01302 } 01303 01304 bool 01305 is_lock_free(void) const volatile 01306 { 01307 return true; 01308 } 01309 01310 BOOST_ATOMIC_DECLARE_BASE_OPERATORS 01311 private: 01312 base_atomic(const base_atomic &) /* = delete */ ; 01313 void operator=(const base_atomic &) /* = delete */ ; 01314 storage_type v_; 01315 }; 01316 01317 #if defined(__x86_64__) 01318 template<typename T, bool Sign> 01319 class base_atomic<T, void, 8, Sign> { 01320 typedef base_atomic this_type; 01321 typedef T value_type; 01322 typedef uint64_t storage_type; 01323 public: 01324 explicit base_atomic(value_type v) 01325 { 01326 memcpy(&v_, &v, sizeof(value_type)); 01327 } 01328 base_atomic(void) {} 01329 01330 void 01331 store(value_type v, memory_order order = memory_order_seq_cst) volatile 01332 { 01333 if (order != memory_order_seq_cst) { 01334 storage_type tmp; 01335 memcpy(&tmp, &v, sizeof(value_type)); 01336 platform_fence_before(order); 01337 const_cast<volatile storage_type &>(v_) = tmp; 01338 } else { 01339 exchange(v, order); 01340 } 01341 } 01342 01343 value_type 01344 load(memory_order order = memory_order_seq_cst) const volatile 01345 { 01346 storage_type tmp = const_cast<volatile storage_type &>(v_); 01347 platform_fence_after_load(order); 01348 value_type v; 01349 memcpy(&v, &tmp, sizeof(value_type)); 01350 return v; 01351 } 01352 01353 value_type 01354 exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 01355 { 01356 storage_type tmp; 01357 memcpy(&tmp, &v, sizeof(value_type)); 01358 platform_fence_before(order); 01359 __asm__ ( 01360 "xchgq %0, %1" 01361 : "+q" (tmp), "+m" (v_) 01362 ); 01363 platform_fence_after(order); 01364 memcpy(&v, &tmp, sizeof(value_type)); 01365 return v; 01366 } 01367 01368 bool 01369 compare_exchange_strong( 01370 value_type & expected, 01371 value_type desired, 01372 memory_order success_order, 01373 memory_order failure_order) volatile 01374 { 01375 storage_type expected_s, desired_s; 01376 memcpy(&expected_s, &expected, sizeof(value_type)); 01377 memcpy(&desired_s, &desired, sizeof(value_type)); 01378 storage_type previous_s = expected_s; 01379 platform_fence_before(success_order); 01380 __asm__ ( 01381 "lock ; cmpxchgq %2, %1" 01382 : "+a" (previous_s), "+m" (v_) 01383 : "q" (desired_s) 01384 ); 01385 bool success = (previous_s == expected_s); 01386 if (success) 01387 platform_fence_after(success_order); 01388 else 01389 platform_fence_after(failure_order); 01390 memcpy(&expected, &previous_s, sizeof(value_type)); 01391 return success; 01392 } 01393 01394 bool 01395 compare_exchange_weak( 01396 value_type & expected, 01397 value_type desired, 01398 memory_order success_order, 01399 memory_order failure_order) volatile 01400 { 01401 return compare_exchange_strong(expected, desired, success_order, failure_order); 01402 } 01403 01404 bool 01405 is_lock_free(void) const volatile 01406 { 01407 return true; 01408 } 01409 01410 BOOST_ATOMIC_DECLARE_BASE_OPERATORS 01411 private: 01412 base_atomic(const base_atomic &) /* = delete */ ; 01413 void operator=(const base_atomic &) /* = delete */ ; 01414 storage_type v_; 01415 }; 01416 #endif 01417 01418 #if defined(__i686__) 01419 01420 template<typename T> 01421 bool 01422 platform_cmpxchg64_strong(T & expected, T desired, volatile T * ptr) 01423 { 01424 int scratch; 01425 T prev = expected; 01426 /* Make sure ebx is saved and restored properly in case 01427 this object is compiled as "position independent". Since 01428 programmers on x86 tend to forget specifying -DPIC or 01429 similar, always assume PIC. 01430 01431 To make this work uniformly even in the non-PIC case, 01432 setup register constraints such that ebx can not be 01433 used by accident e.g. as base address for the variable 01434 to be modified. Accessing "scratch" should always be okay, 01435 as it can only be placed on the stack (and therefore 01436 accessed through ebp or esp only). 01437 01438 In theory, could push/pop ebx onto/off the stack, but movs 01439 to a prepared stack slot turn out to be faster. */ 01440 __asm__ __volatile__ ( 01441 "movl %%ebx, %1\n" 01442 "movl %2, %%ebx\n" 01443 "lock; cmpxchg8b 0(%4)\n" 01444 "movl %1, %%ebx\n" 01445 : "=A" (prev), "=m" (scratch) 01446 : "D" ((int)desired), "c" ((int)(desired >> 32)), "S" (ptr), "0" (prev) 01447 : "memory"); 01448 bool success = (prev == expected); 01449 expected = prev; 01450 return success; 01451 } 01452 01453 #endif 01454 01455 } 01456 } 01457 } 01458 01459 /* pull in 64-bit atomic type using cmpxchg8b above */ 01460 #if defined(__i686__) 01461 #include <boost/atomic/detail/cas64strong.hpp> 01462 #endif 01463 01464 #endif