gcc-alpha.hpp
Go to the documentation of this file.
1 #ifndef BOOST_DETAIL_ATOMIC_GCC_ALPHA_HPP
2 #define BOOST_DETAIL_ATOMIC_GCC_ALPHA_HPP
3 
4 // Copyright (c) 2009 Helge Bahmann
5 //
6 // Distributed under the Boost Software License, Version 1.0.
7 // See accompanying file LICENSE_1_0.txt or copy at
8 // http://www.boost.org/LICENSE_1_0.txt)
9 
11 #include <boost/atomic/detail/builder.hpp>
12 
13 /*
14  Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
15  (HP OpenVMS systems documentation) and the alpha reference manual.
16  */
17 
18 /*
19  NB: The most natural thing would be to write the increment/decrement
20  operators along the following lines:
21 
22  __asm__ __volatile__(
23  "1: ldl_l %0,%1 \n"
24  "addl %0,1,%0 \n"
25  "stl_c %0,%1 \n"
26  "beq %0,1b\n"
27  : "=&b" (tmp)
28  : "m" (value)
29  : "cc"
30  );
31 
32  However according to the comments on the HP website and matching
33  comments in the Linux kernel sources this defies branch prediction,
34  as the cpu assumes that backward branches are always taken; so
35  instead copy the trick from the Linux kernel, introduce a forward
36  branch and back again.
37 
38  I have, however, had a hard time measuring the difference between
39  the two versions in microbenchmarks -- I am leaving it in nevertheless
40  as it apparently does not hurt either.
41 */
42 
43 namespace boost {
44 namespace detail {
45 namespace atomic {
46 
47 static inline void fence_before(memory_order order)
48 {
49  switch(order) {
54  __asm__ __volatile__ ("mb" ::: "memory");
55  default:;
56  }
57 }
58 
59 static inline void fence_after(memory_order order)
60 {
61  switch(order) {
65  __asm__ __volatile__ ("mb" ::: "memory");
66  default:;
67  }
68 }
69 
70 template<>
72 {
73  switch(order) {
79  __asm__ __volatile__ ("mb" ::: "memory");
80  default:;
81  }
82 }
83 
84 template<typename T>
86 public:
87  typedef T integral_type;
88  explicit atomic_alpha_32(T v) : i(v) {}
90  T load(memory_order order=memory_order_seq_cst) const volatile
91  {
92  T v=*reinterpret_cast<volatile const int *>(&i);
93  fence_after(order);
94  return v;
95  }
96  void store(T v, memory_order order=memory_order_seq_cst) volatile
97  {
98  fence_before(order);
99  *reinterpret_cast<volatile int *>(&i)=(int)v;
100  }
102  T &expected,
103  T desired,
104  memory_order success_order,
105  memory_order failure_order) volatile
106  {
107  fence_before(success_order);
108  int current, success;
109  __asm__ __volatile__(
110  "1: ldl_l %2, %4\n"
111  "cmpeq %2, %0, %3\n"
112  "mov %2, %0\n"
113  "beq %3, 3f\n"
114  "stl_c %1, %4\n"
115  "2:\n"
116 
117  ".subsection 2\n"
118  "3: mov %3, %1\n"
119  "br 2b\n"
120  ".previous\n"
121 
122  : "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
123  : "m" (i)
124  :
125  );
126  if (desired) fence_after(success_order);
127  else fence_after(failure_order);
128  return desired;
129  }
130 
131  bool is_lock_free(void) const volatile {return true;}
132 protected:
133  inline T fetch_add_var(T c, memory_order order) volatile
134  {
135  fence_before(order);
136  T original, modified;
137  __asm__ __volatile__(
138  "1: ldl_l %0, %2\n"
139  "addl %0, %3, %1\n"
140  "stl_c %1, %2\n"
141  "beq %1, 2f\n"
142 
143  ".subsection 2\n"
144  "2: br 1b\n"
145  ".previous\n"
146 
147  : "=&r" (original), "=&r" (modified)
148  : "m" (i), "r" (c)
149  :
150  );
151  fence_after(order);
152  return original;
153  }
154  inline T fetch_inc(memory_order order) volatile
155  {
156  fence_before(order);
157  int original, modified;
158  __asm__ __volatile__(
159  "1: ldl_l %0, %2\n"
160  "addl %0, 1, %1\n"
161  "stl_c %1, %2\n"
162  "beq %1, 2f\n"
163 
164  ".subsection 2\n"
165  "2: br 1b\n"
166  ".previous\n"
167 
168  : "=&r" (original), "=&r" (modified)
169  : "m" (i)
170  :
171  );
172  fence_after(order);
173  return original;
174  }
175  inline T fetch_dec(memory_order order) volatile
176  {
177  fence_before(order);
178  int original, modified;
179  __asm__ __volatile__(
180  "1: ldl_l %0, %2\n"
181  "subl %0, 1, %1\n"
182  "stl_c %1, %2\n"
183  "beq %1, 2f\n"
184 
185  ".subsection 2\n"
186  "2: br 1b\n"
187  ".previous\n"
188 
189  : "=&r" (original), "=&r" (modified)
190  : "m" (i)
191  :
192  );
193  fence_after(order);
194  return original;
195  }
196 private:
197  T i;
198 };
199 
200 template<typename T>
202 public:
203  typedef T integral_type;
204  explicit atomic_alpha_64(T v) : i(v) {}
206  T load(memory_order order=memory_order_seq_cst) const volatile
207  {
208  T v=*reinterpret_cast<volatile const T *>(&i);
209  fence_after(order);
210  return v;
211  }
212  void store(T v, memory_order order=memory_order_seq_cst) volatile
213  {
214  fence_before(order);
215  *reinterpret_cast<volatile T *>(&i)=v;
216  }
218  T &expected,
219  T desired,
220  memory_order success_order,
221  memory_order failure_order) volatile
222  {
223  fence_before(success_order);
224  int current, success;
225  __asm__ __volatile__(
226  "1: ldq_l %2, %4\n"
227  "cmpeq %2, %0, %3\n"
228  "mov %2, %0\n"
229  "beq %3, 3f\n"
230  "stq_c %1, %4\n"
231  "2:\n"
232 
233  ".subsection 2\n"
234  "3: mov %3, %1\n"
235  "br 2b\n"
236  ".previous\n"
237 
238  : "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
239  : "m" (i)
240  :
241  );
242  if (desired) fence_after(success_order);
243  else fence_after(failure_order);
244  return desired;
245  }
246 
247  bool is_lock_free(void) const volatile {return true;}
248 protected:
249  inline T fetch_add_var(T c, memory_order order) volatile
250  {
251  fence_before(order);
252  T original, modified;
253  __asm__ __volatile__(
254  "1: ldq_l %0, %2\n"
255  "addq %0, %3, %1\n"
256  "stq_c %1, %2\n"
257  "beq %1, 2f\n"
258 
259  ".subsection 2\n"
260  "2: br 1b\n"
261  ".previous\n"
262 
263  : "=&r" (original), "=&r" (modified)
264  : "m" (i), "r" (c)
265  :
266  );
267  fence_after(order);
268  return original;
269  }
270  inline T fetch_inc(memory_order order) volatile
271  {
272  fence_before(order);
273  T original, modified;
274  __asm__ __volatile__(
275  "1: ldq_l %0, %2\n"
276  "addq %0, 1, %1\n"
277  "stq_c %1, %2\n"
278  "beq %1, 2f\n"
279 
280  ".subsection 2\n"
281  "2: br 1b\n"
282  ".previous\n"
283 
284  : "=&r" (original), "=&r" (modified)
285  : "m" (i)
286  :
287  );
288  fence_after(order);
289  return original;
290  }
291  inline T fetch_dec(memory_order order) volatile
292  {
293  fence_before(order);
294  T original, modified;
295  __asm__ __volatile__(
296  "1: ldq_l %0, %2\n"
297  "subq %0, 1, %1\n"
298  "stq_c %1, %2\n"
299  "beq %1, 2f\n"
300 
301  ".subsection 2\n"
302  "2: br 1b\n"
303  ".previous\n"
304 
305  : "=&r" (original), "=&r" (modified)
306  : "m" (i)
307  :
308  );
309  fence_after(order);
310  return original;
311  }
312 private:
313  T i;
314 };
315 
316 template<typename T>
317 class platform_atomic_integral<T, 4> : public build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > {
318 public:
319  typedef build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > super;
320  explicit platform_atomic_integral(T v) : super(v) {}
322 };
323 
324 template<typename T>
325 class platform_atomic_integral<T, 8> : public build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > {
326 public:
327  typedef build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > super;
328  explicit platform_atomic_integral(T v) : super(v) {}
330 };
331 
332 template<typename T>
333 class platform_atomic_integral<T, 1>: public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> {
334 public:
335  typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
336 
337  explicit platform_atomic_integral(T v) : super(v) {}
339 };
340 
341 template<typename T>
342 class platform_atomic_integral<T, 2>: public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> {
343 public:
344  typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
345 
346  explicit platform_atomic_integral(T v) : super(v) {}
348 };
349 
350 }
351 }
352 }
353 
354 #endif
Definition: base.hpp:116
T fetch_inc(memory_order order) volatile
Definition: gcc-alpha.hpp:154
static void fence_before(memory_order order)
Definition: gcc-alpha.hpp:47
T fetch_add_var(T c, memory_order order) volatile
Definition: gcc-alpha.hpp:249
void store(T v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-alpha.hpp:96
T load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-alpha.hpp:206
T load(memory_order order=memory_order_seq_cst) const volatile
Definition: gcc-alpha.hpp:90
build_atomic_from_typical< build_exchange< atomic_alpha_64< T > > > super
Definition: gcc-alpha.hpp:327
bool is_lock_free(void) const volatile
Definition: gcc-alpha.hpp:247
void store(T v, memory_order order=memory_order_seq_cst) volatile
Definition: gcc-alpha.hpp:212
static void fence_after(memory_order order)
Definition: gcc-alpha.hpp:59
build_atomic_from_typical< build_exchange< atomic_alpha_32< T > > > super
Definition: gcc-alpha.hpp:319
build_atomic_from_larger_type< atomic_alpha_32< uint32_t >, T > super
Definition: gcc-alpha.hpp:344
bool compare_exchange_weak(T &expected, T desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-alpha.hpp:217
T fetch_dec(memory_order order) volatile
Definition: gcc-alpha.hpp:175
T fetch_inc(memory_order order) volatile
Definition: gcc-alpha.hpp:270
build_atomic_from_larger_type< atomic_alpha_32< uint32_t >, T > super
Definition: gcc-alpha.hpp:335
T fetch_add_var(T c, memory_order order) volatile
Definition: gcc-alpha.hpp:133
bool is_lock_free(void) const volatile
Definition: gcc-alpha.hpp:131
void platform_atomic_thread_fence(memory_order order)
Definition: gcc-alpha.hpp:71
T fetch_dec(memory_order order) volatile
Definition: gcc-alpha.hpp:291
bool compare_exchange_weak(T &expected, T desired, memory_order success_order, memory_order failure_order) volatile
Definition: gcc-alpha.hpp:101


rosatomic
Author(s): Josh Faust
autogenerated on Fri Apr 5 2019 02:16:35