gcc-alpha.hpp
Go to the documentation of this file.
00001 #ifndef BOOST_DETAIL_ATOMIC_GCC_ALPHA_HPP
00002 #define BOOST_DETAIL_ATOMIC_GCC_ALPHA_HPP
00003 
00004 //  Copyright (c) 2009 Helge Bahmann
00005 //
00006 //  Distributed under the Boost Software License, Version 1.0.
00007 //  See accompanying file LICENSE_1_0.txt or copy at
00008 //  http://www.boost.org/LICENSE_1_0.txt)
00009 
00010 #include <boost/atomic/detail/base.hpp>
00011 #include <boost/atomic/detail/builder.hpp>
00012 
00013 /*
00014   Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
00015   (HP OpenVMS systems documentation) and the alpha reference manual.
00016  */
00017 
00018 /*
00019         NB: The most natural thing would be to write the increment/decrement
00020         operators along the following lines:
00021         
00022         __asm__ __volatile__(
00023                 "1: ldl_l %0,%1 \n"
00024                 "addl %0,1,%0 \n"
00025                 "stl_c %0,%1 \n"
00026                 "beq %0,1b\n"
00027                 : "=&b" (tmp)
00028                 : "m" (value)
00029                 : "cc"
00030         );
00031         
00032         However according to the comments on the HP website and matching
00033         comments in the Linux kernel sources this defies branch prediction,
00034         as the cpu assumes that backward branches are always taken; so
00035         instead copy the trick from the Linux kernel, introduce a forward
00036         branch and back again.
00037         
00038         I have, however, had a hard time measuring the difference between
00039         the two versions in microbenchmarks -- I am leaving it in nevertheless
00040         as it apparently does not hurt either.
00041 */
00042 
00043 namespace boost {
00044 namespace detail {
00045 namespace atomic {
00046 
00047 static inline void fence_before(memory_order order)
00048 {
00049         switch(order) {
00050                 case memory_order_consume:
00051                 case memory_order_release:
00052                 case memory_order_acq_rel:
00053                 case memory_order_seq_cst:
00054                         __asm__ __volatile__ ("mb" ::: "memory");
00055                 default:;
00056         }
00057 }
00058 
00059 static inline void fence_after(memory_order order)
00060 {
00061         switch(order) {
00062                 case memory_order_acquire:
00063                 case memory_order_acq_rel:
00064                 case memory_order_seq_cst:
00065                         __asm__ __volatile__ ("mb" ::: "memory");
00066                 default:;
00067         }
00068 }
00069 
00070 template<>
00071 inline void platform_atomic_thread_fence(memory_order order)
00072 {
00073         switch(order) {
00074                 case memory_order_acquire:
00075                 case memory_order_consume:
00076                 case memory_order_release:
00077                 case memory_order_acq_rel:
00078                 case memory_order_seq_cst:
00079                         __asm__ __volatile__ ("mb" ::: "memory");
00080                 default:;
00081         }
00082 }
00083 
00084 template<typename T>
00085 class atomic_alpha_32 {
00086 public:
00087         typedef T integral_type;
00088         explicit atomic_alpha_32(T v) : i(v) {}
00089         atomic_alpha_32() {}
00090         T load(memory_order order=memory_order_seq_cst) const volatile
00091         {
00092                 T v=*reinterpret_cast<volatile const int *>(&i);
00093                 fence_after(order);
00094                 return v;
00095         }
00096         void store(T v, memory_order order=memory_order_seq_cst) volatile
00097         {
00098                 fence_before(order);
00099                 *reinterpret_cast<volatile int *>(&i)=(int)v;
00100         }
00101         bool compare_exchange_weak(
00102                 T &expected,
00103                 T desired,
00104                 memory_order success_order,
00105                 memory_order failure_order) volatile
00106         {
00107                 fence_before(success_order);
00108                 int current, success;
00109                 __asm__ __volatile__(
00110                         "1: ldl_l %2, %4\n"
00111                         "cmpeq %2, %0, %3\n"
00112                         "mov %2, %0\n"
00113                         "beq %3, 3f\n"
00114                         "stl_c %1, %4\n"
00115                         "2:\n"
00116                         
00117                         ".subsection 2\n"
00118                         "3: mov %3, %1\n"
00119                         "br 2b\n"
00120                         ".previous\n"
00121                         
00122                         : "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
00123                         : "m" (i)
00124                         :
00125                 );
00126                 if (desired) fence_after(success_order);
00127                 else fence_after(failure_order);
00128                 return desired;
00129         }
00130         
00131         bool is_lock_free(void) const volatile {return true;}
00132 protected:
00133         inline T fetch_add_var(T c, memory_order order) volatile
00134         {
00135                 fence_before(order);
00136                 T original, modified;
00137                 __asm__ __volatile__(
00138                         "1: ldl_l %0, %2\n"
00139                         "addl %0, %3, %1\n"
00140                         "stl_c %1, %2\n"
00141                         "beq %1, 2f\n"
00142                         
00143                         ".subsection 2\n"
00144                         "2: br 1b\n"
00145                         ".previous\n"
00146                         
00147                         : "=&r" (original), "=&r" (modified)
00148                         : "m" (i), "r" (c)
00149                         :
00150                 );
00151                 fence_after(order);
00152                 return original;
00153         }
00154         inline T fetch_inc(memory_order order) volatile
00155         {
00156                 fence_before(order);
00157                 int original, modified;
00158                 __asm__ __volatile__(
00159                         "1: ldl_l %0, %2\n"
00160                         "addl %0, 1, %1\n"
00161                         "stl_c %1, %2\n"
00162                         "beq %1, 2f\n"
00163                         
00164                         ".subsection 2\n"
00165                         "2: br 1b\n"
00166                         ".previous\n"
00167                         
00168                         : "=&r" (original), "=&r" (modified)
00169                         : "m" (i)
00170                         :
00171                 );
00172                 fence_after(order);
00173                 return original;
00174         }
00175         inline T fetch_dec(memory_order order) volatile
00176         {
00177                 fence_before(order);
00178                 int original, modified;
00179                 __asm__ __volatile__(
00180                         "1: ldl_l %0, %2\n"
00181                         "subl %0, 1, %1\n"
00182                         "stl_c %1, %2\n"
00183                         "beq %1, 2f\n"
00184                         
00185                         ".subsection 2\n"
00186                         "2: br 1b\n"
00187                         ".previous\n"
00188                         
00189                         : "=&r" (original), "=&r" (modified)
00190                         : "m" (i)
00191                         :
00192                 );
00193                 fence_after(order);
00194                 return original;
00195         }
00196 private:
00197         T i;
00198 };
00199 
00200 template<typename T>
00201 class atomic_alpha_64 {
00202 public:
00203         typedef T integral_type;
00204         explicit atomic_alpha_64(T v) : i(v) {}
00205         atomic_alpha_64() {}
00206         T load(memory_order order=memory_order_seq_cst) const volatile
00207         {
00208                 T v=*reinterpret_cast<volatile const T *>(&i);
00209                 fence_after(order);
00210                 return v;
00211         }
00212         void store(T v, memory_order order=memory_order_seq_cst) volatile
00213         {
00214                 fence_before(order);
00215                 *reinterpret_cast<volatile T *>(&i)=v;
00216         }
00217         bool compare_exchange_weak(
00218                 T &expected,
00219                 T desired,
00220                 memory_order success_order,
00221                 memory_order failure_order) volatile
00222         {
00223                 fence_before(success_order);
00224                 int current, success;
00225                 __asm__ __volatile__(
00226                         "1: ldq_l %2, %4\n"
00227                         "cmpeq %2, %0, %3\n"
00228                         "mov %2, %0\n"
00229                         "beq %3, 3f\n"
00230                         "stq_c %1, %4\n"
00231                         "2:\n"
00232                         
00233                         ".subsection 2\n"
00234                         "3: mov %3, %1\n"
00235                         "br 2b\n"
00236                         ".previous\n"
00237                         
00238                         : "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
00239                         : "m" (i)
00240                         :
00241                 );
00242                 if (desired) fence_after(success_order);
00243                 else fence_after(failure_order);
00244                 return desired;
00245         }
00246         
00247         bool is_lock_free(void) const volatile {return true;}
00248 protected:
00249         inline T fetch_add_var(T c, memory_order order) volatile
00250         {
00251                 fence_before(order);
00252                 T original, modified;
00253                 __asm__ __volatile__(
00254                         "1: ldq_l %0, %2\n"
00255                         "addq %0, %3, %1\n"
00256                         "stq_c %1, %2\n"
00257                         "beq %1, 2f\n"
00258                         
00259                         ".subsection 2\n"
00260                         "2: br 1b\n"
00261                         ".previous\n"
00262                         
00263                         : "=&r" (original), "=&r" (modified)
00264                         : "m" (i), "r" (c)
00265                         :
00266                 );
00267                 fence_after(order);
00268                 return original;
00269         }
00270         inline T fetch_inc(memory_order order) volatile
00271         {
00272                 fence_before(order);
00273                 T original, modified;
00274                 __asm__ __volatile__(
00275                         "1: ldq_l %0, %2\n"
00276                         "addq %0, 1, %1\n"
00277                         "stq_c %1, %2\n"
00278                         "beq %1, 2f\n"
00279                         
00280                         ".subsection 2\n"
00281                         "2: br 1b\n"
00282                         ".previous\n"
00283                         
00284                         : "=&r" (original), "=&r" (modified)
00285                         : "m" (i)
00286                         :
00287                 );
00288                 fence_after(order);
00289                 return original;
00290         }
00291         inline T fetch_dec(memory_order order) volatile
00292         {
00293                 fence_before(order);
00294                 T original, modified;
00295                 __asm__ __volatile__(
00296                         "1: ldq_l %0, %2\n"
00297                         "subq %0, 1, %1\n"
00298                         "stq_c %1, %2\n"
00299                         "beq %1, 2f\n"
00300                         
00301                         ".subsection 2\n"
00302                         "2: br 1b\n"
00303                         ".previous\n"
00304                         
00305                         : "=&r" (original), "=&r" (modified)
00306                         : "m" (i)
00307                         :
00308                 );
00309                 fence_after(order);
00310                 return original;
00311         }
00312 private:
00313         T i;
00314 };
00315 
00316 template<typename T>
00317 class platform_atomic_integral<T, 4> : public build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > {
00318 public:
00319         typedef build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > super;
00320         explicit platform_atomic_integral(T v) : super(v) {}
00321         platform_atomic_integral(void) {}
00322 };
00323 
00324 template<typename T>
00325 class platform_atomic_integral<T, 8> : public build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > {
00326 public:
00327         typedef build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > super;
00328         explicit platform_atomic_integral(T v) : super(v) {}
00329         platform_atomic_integral(void) {}
00330 };
00331 
00332 template<typename T>
00333 class platform_atomic_integral<T, 1>: public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> {
00334 public:
00335         typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
00336         
00337         explicit platform_atomic_integral(T v) : super(v) {}
00338         platform_atomic_integral(void) {}
00339 };
00340 
00341 template<typename T>
00342 class platform_atomic_integral<T, 2>: public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> {
00343 public:
00344         typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
00345         
00346         explicit platform_atomic_integral(T v) : super(v) {}
00347         platform_atomic_integral(void) {}
00348 };
00349 
00350 }
00351 }
00352 }
00353 
00354 #endif


rosatomic
Author(s): Josh Faust
autogenerated on Mon Oct 6 2014 06:54:49