OpenMPI  0.1.1
atomic_impl.h
1 /*
2  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
3  * University Research and Technology
4  * Corporation. All rights reserved.
5  * Copyright (c) 2004-2006 The University of Tennessee and The University
6  * of Tennessee Research Foundation. All rights
7  * reserved.
8  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
9  * University of Stuttgart. All rights reserved.
10  * Copyright (c) 2004-2005 The Regents of the University of California.
11  * All rights reserved.
12  * Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
13  * $COPYRIGHT$
14  *
15  * Additional copyrights may follow
16  *
17  * $HEADER$
18  */
19 
20 /* Inline C implementation of the functions defined in atomic.h */
21 
22 #ifdef HAVE_STDLIB_H
23 #include <stdlib.h>
24 #endif
25 
26 /**********************************************************************
27  *
28  * Atomic math operations
29  *
30  * All the architectures provide a compare_and_set atomic operations. If
31  * they dont provide atomic additions and/or substractions then we can
32  * define these operations using the atomic compare_and_set.
33  *
34  * Some architectures do not provide support for the 64 bits
35  * atomic operations. Until we find a better solution let's just
36  * undefine all those functions if there is no 64 bit cmpset
37  *
38  *********************************************************************/
39 #if OPAL_HAVE_ATOMIC_CMPSET_32
40 
41 #if !defined(OPAL_HAVE_ATOMIC_SWAP_32)
42 #define OPAL_HAVE_ATOMIC_SWAP_32 1
43 static inline int32_t opal_atomic_swap_32(volatile int32_t *addr,
44  int32_t newval)
45 {
46  int32_t old;
47  do {
48  old = *addr;
49  } while (0 == opal_atomic_cmpset_32(addr, old, newval));
50 
51  return old;
52 }
53 #endif /* OPAL_HAVE_ATOMIC_SWAP_32 */
54 
55 #if !defined(OPAL_HAVE_ATOMIC_ADD_32)
56 #define OPAL_HAVE_ATOMIC_ADD_32 1
57 static inline int32_t
58 opal_atomic_add_32(volatile int32_t *addr, int delta)
59 {
60  int32_t oldval;
61 
62  do {
63  oldval = *addr;
64  } while (0 == opal_atomic_cmpset_32(addr, oldval, oldval + delta));
65  return (oldval + delta);
66 }
67 #endif /* OPAL_HAVE_ATOMIC_ADD_32 */
68 
69 
70 #if !defined(OPAL_HAVE_ATOMIC_SUB_32)
71 #define OPAL_HAVE_ATOMIC_SUB_32 1
72 static inline int32_t
73 opal_atomic_sub_32(volatile int32_t *addr, int delta)
74 {
75  int32_t oldval;
76 
77  do {
78  oldval = *addr;
79  } while (0 == opal_atomic_cmpset_32(addr, oldval, oldval - delta));
80  return (oldval - delta);
81 }
82 #endif /* OPAL_HAVE_ATOMIC_SUB_32 */
83 
84 #endif /* OPAL_HAVE_ATOMIC_CMPSET_32 */
85 
86 
87 #if OPAL_HAVE_ATOMIC_CMPSET_64
88 
89 #if !defined(OPAL_HAVE_ATOMIC_SWAP_64)
90 #define OPAL_HAVE_ATOMIC_SWAP_64 1
91 static inline int64_t opal_atomic_swap_64(volatile int64_t *addr,
92  int64_t newval)
93 {
94  int64_t old;
95  do {
96  old = *addr;
97  } while (0 == opal_atomic_cmpset_64(addr, old, newval));
98  return old;
99 }
100 #endif /* OPAL_HAVE_ATOMIC_SWAP_32 */
101 
102 #if !defined(OPAL_HAVE_ATOMIC_ADD_64)
103 #define OPAL_HAVE_ATOMIC_ADD_64 1
104 static inline int64_t
105 opal_atomic_add_64(volatile int64_t *addr, int64_t delta)
106 {
107  int64_t oldval;
108 
109  do {
110  oldval = *addr;
111  } while (0 == opal_atomic_cmpset_64(addr, oldval, oldval + delta));
112  return (oldval + delta);
113 }
114 #endif /* OPAL_HAVE_ATOMIC_ADD_64 */
115 
116 
117 #if !defined(OPAL_HAVE_ATOMIC_SUB_64)
118 #define OPAL_HAVE_ATOMIC_SUB_64 1
119 static inline int64_t
120 opal_atomic_sub_64(volatile int64_t *addr, int64_t delta)
121 {
122  int64_t oldval;
123 
124  do {
125  oldval = *addr;
126  } while (0 == opal_atomic_cmpset_64(addr, oldval, oldval - delta));
127  return (oldval - delta);
128 }
129 #endif /* OPAL_HAVE_ATOMIC_SUB_64 */
130 
131 #else
132 
133 #if !defined(OPAL_HAVE_ATOMIC_ADD_64)
134 #define OPAL_HAVE_ATOMIC_ADD_64 0
135 #endif
136 
137 #if !defined(OPAL_HAVE_ATOMIC_SUB_64)
138 #define OPAL_HAVE_ATOMIC_SUB_64 0
139 #endif
140 
141 #endif /* OPAL_HAVE_ATOMIC_CMPSET_64 */
142 
143 
144 #if (OPAL_HAVE_ATOMIC_CMPSET_32 || OPAL_HAVE_ATOMIC_CMPSET_64)
145 
146 static inline int
147 opal_atomic_cmpset_xx(volatile void* addr, int64_t oldval,
148  int64_t newval, size_t length)
149 {
150  switch( length ) {
151 #if OPAL_HAVE_ATOMIC_CMPSET_32
152  case 4:
153  return opal_atomic_cmpset_32( (volatile int32_t*)addr,
154  (int32_t)oldval, (int32_t)newval );
155 #endif /* OPAL_HAVE_ATOMIC_CMPSET_32 */
156 
157 #if OPAL_HAVE_ATOMIC_CMPSET_64
158  case 8:
159  return opal_atomic_cmpset_64( (volatile int64_t*)addr,
160  (int64_t)oldval, (int64_t)newval );
161 #endif /* OPAL_HAVE_ATOMIC_CMPSET_64 */
162  default:
163  /* This should never happen, so deliberately abort (hopefully
164  leaving a coreful for analysis) */
165  abort();
166  }
167  return 0; /* always fail */
168 }
169 
170 
171 static inline int
172 opal_atomic_cmpset_acq_xx(volatile void* addr, int64_t oldval,
173  int64_t newval, size_t length)
174 {
175  switch( length ) {
176 #if OPAL_HAVE_ATOMIC_CMPSET_32
177  case 4:
178  return opal_atomic_cmpset_acq_32( (volatile int32_t*)addr,
179  (int32_t)oldval, (int32_t)newval );
180 #endif /* OPAL_HAVE_ATOMIC_CMPSET_32 */
181 
182 #if OPAL_HAVE_ATOMIC_CMPSET_64
183  case 8:
184  return opal_atomic_cmpset_acq_64( (volatile int64_t*)addr,
185  (int64_t)oldval, (int64_t)newval );
186 #endif /* OPAL_HAVE_ATOMIC_CMPSET_64 */
187  default:
188  /* This should never happen, so deliberately abort (hopefully
189  leaving a coreful for analysis) */
190  abort();
191  }
192  return 0; /* always fail */
193 }
194 
195 
196 static inline int
197 opal_atomic_cmpset_rel_xx(volatile void* addr, int64_t oldval,
198  int64_t newval, size_t length)
199 {
200  switch( length ) {
201 #if OPAL_HAVE_ATOMIC_CMPSET_32
202  case 4:
203  return opal_atomic_cmpset_rel_32( (volatile int32_t*)addr,
204  (int32_t)oldval, (int32_t)newval );
205 #endif /* OPAL_HAVE_ATOMIC_CMPSET_32 */
206 
207 #if OPAL_HAVE_ATOMIC_CMPSET_64
208  case 8:
209  return opal_atomic_cmpset_rel_64( (volatile int64_t*)addr,
210  (int64_t)oldval, (int64_t)newval );
211 #endif /* OPAL_HAVE_ATOMIC_CMPSET_64 */
212  default:
213  /* This should never happen, so deliberately abort (hopefully
214  leaving a coreful for analysis) */
215  abort();
216  }
217  return 0; /* always fail */
218 }
219 
220 
221 static inline int
222 opal_atomic_cmpset_ptr(volatile void* addr,
223  void* oldval,
224  void* newval)
225 {
226 #if SIZEOF_VOID_P == 4 && OPAL_HAVE_ATOMIC_CMPSET_32
227  return opal_atomic_cmpset_32((int32_t*) addr, (unsigned long) oldval,
228  (unsigned long) newval);
229 #elif SIZEOF_VOID_P == 8 && OPAL_HAVE_ATOMIC_CMPSET_64
230  return opal_atomic_cmpset_64((int64_t*) addr, (unsigned long) oldval,
231  (unsigned long) newval);
232 #else
233  abort();
234  return 0;
235 #endif
236 }
237 
238 
239 static inline int
240 opal_atomic_cmpset_acq_ptr(volatile void* addr,
241  void* oldval,
242  void* newval)
243 {
244 #if SIZEOF_VOID_P == 4 && OPAL_HAVE_ATOMIC_CMPSET_32
245  return opal_atomic_cmpset_acq_32((int32_t*) addr, (unsigned long) oldval,
246  (unsigned long) newval);
247 #elif SIZEOF_VOID_P == 8 && OPAL_HAVE_ATOMIC_CMPSET_64
248  return opal_atomic_cmpset_acq_64((int64_t*) addr, (unsigned long) oldval,
249  (unsigned long) newval);
250 #else
251  abort();
252  return 0;
253 #endif
254 }
255 
256 
257 static inline int opal_atomic_cmpset_rel_ptr(volatile void* addr,
258  void* oldval,
259  void* newval)
260 {
261 #if SIZEOF_VOID_P == 4 && OPAL_HAVE_ATOMIC_CMPSET_32
262  return opal_atomic_cmpset_rel_32((int32_t*) addr, (unsigned long) oldval,
263  (unsigned long) newval);
264 #elif SIZEOF_VOID_P == 8 && OPAL_HAVE_ATOMIC_CMPSET_64
265  return opal_atomic_cmpset_rel_64((int64_t*) addr, (unsigned long) oldval,
266  (unsigned long) newval);
267 #else
268  abort();
269  return 0;
270 #endif
271 }
272 
273 #endif /* (OPAL_HAVE_ATOMIC_CMPSET_32 || OPAL_HAVE_ATOMIC_CMPSET_64) */
274 
275 #if (OPAL_HAVE_ATOMIC_SWAP_32 || OPAL_HAVE_ATOMIC_SWAP_64)
276 
277 #if SIZEOF_VOID_P == 4 && OPAL_HAVE_ATOMIC_SWAP_32
278 #define opal_atomic_swap_ptr(addr, value) opal_atomic_swap_32((int32_t *) addr, value)
279 #elif SIZEOF_VOID_P == 8 && OPAL_HAVE_ATOMIC_SWAP_64
280 #define opal_atomic_swap_ptr(addr, value) opal_atomic_swap_64((int64_t *) addr, value)
281 #endif
282 
283 #endif /* (OPAL_HAVE_ATOMIC_SWAP_32 || OPAL_HAVE_ATOMIC_SWAP_64) */
284 
285 #if OPAL_HAVE_ATOMIC_MATH_32 || OPAL_HAVE_ATOMIC_MATH_64
286 
287 
288 static inline void
289 opal_atomic_add_xx(volatile void* addr, int32_t value, size_t length)
290 {
291  switch( length ) {
292 #if OPAL_HAVE_ATOMIC_ADD_32
293  case 4:
294  opal_atomic_add_32( (volatile int32_t*)addr, (int32_t)value );
295  break;
296 #endif /* OPAL_HAVE_ATOMIC_CMPSET_32 */
297 
298 #if OPAL_HAVE_ATOMIC_ADD_64
299  case 8:
300  opal_atomic_add_64( (volatile int64_t*)addr, (int64_t)value );
301  break;
302 #endif /* OPAL_HAVE_ATOMIC_ADD_64 */
303  default:
304  /* This should never happen, so deliberately abort (hopefully
305  leaving a coreful for analysis) */
306  abort();
307  }
308 }
309 
310 
311 static inline void
312 opal_atomic_sub_xx(volatile void* addr, int32_t value, size_t length)
313 {
314  switch( length ) {
315 #if OPAL_HAVE_ATOMIC_SUB_32
316  case 4:
317  opal_atomic_sub_32( (volatile int32_t*)addr, (int32_t)value );
318  break;
319 #endif /* OPAL_HAVE_ATOMIC_SUB_32 */
320 
321 #if OPAL_HAVE_ATOMIC_SUB_64
322  case 8:
323  opal_atomic_sub_64( (volatile int64_t*)addr, (int64_t)value );
324  break;
325 #endif /* OPAL_HAVE_ATOMIC_SUB_64 */
326  default:
327  /* This should never happen, so deliberately abort (hopefully
328  leaving a coreful for analysis) */
329  abort();
330  }
331 }
332 
333 #if SIZEOF_VOID_P == 4 && OPAL_HAVE_ATOMIC_ADD_32
334 static inline int32_t opal_atomic_add_ptr( volatile void* addr,
335  void* delta )
336 {
337  return opal_atomic_add_32((int32_t*) addr, (unsigned long) delta);
338 }
339 #elif SIZEOF_VOID_P == 8 && OPAL_HAVE_ATOMIC_ADD_64
340 static inline int64_t opal_atomic_add_ptr( volatile void* addr,
341  void* delta )
342 {
343  return opal_atomic_add_64((int64_t*) addr, (unsigned long) delta);
344 }
345 #else
346 static inline int32_t opal_atomic_add_ptr( volatile void* addr,
347  void* delta )
348 {
349  abort();
350  return 0;
351 }
352 #endif
353 
354 #if SIZEOF_VOID_P == 4 && OPAL_HAVE_ATOMIC_SUB_32
355 static inline int32_t opal_atomic_sub_ptr( volatile void* addr,
356  void* delta )
357 {
358  return opal_atomic_sub_32((int32_t*) addr, (unsigned long) delta);
359 }
360 #elif SIZEOF_VOID_P == 8 && OPAL_HAVE_ATOMIC_SUB_32
361 static inline int64_t opal_atomic_sub_ptr( volatile void* addr,
362  void* delta )
363 {
364  return opal_atomic_sub_64((int64_t*) addr, (unsigned long) delta);
365 }
366 #else
367 static inline int32_t opal_atomic_sub_ptr( volatile void* addr,
368  void* delta )
369 {
370  abort();
371  return 0;
372 }
373 #endif
374 
375 #endif /* OPAL_HAVE_ATOMIC_MATH_32 || OPAL_HAVE_ATOMIC_MATH_64 */
376 
377 /**********************************************************************
378  *
379  * Atomic spinlocks
380  *
381  *********************************************************************/
382 #ifdef OPAL_NEED_INLINE_ATOMIC_SPINLOCKS
383 
384 /*
385  * Lock initialization function. It set the lock to UNLOCKED.
386  */
387 static inline void
388 opal_atomic_init( opal_atomic_lock_t* lock, int32_t value )
389 {
390  lock->u.lock = value;
391 }
392 
393 
394 static inline int
396 {
397  int ret = opal_atomic_cmpset_acq_32( &(lock->u.lock),
398  OPAL_ATOMIC_UNLOCKED, OPAL_ATOMIC_LOCKED);
399  return (ret == 0) ? 1 : 0;
400 }
401 
402 
403 static inline void
405 {
406  while( !opal_atomic_cmpset_acq_32( &(lock->u.lock),
407  OPAL_ATOMIC_UNLOCKED, OPAL_ATOMIC_LOCKED) ) {
408  while (lock->u.lock == OPAL_ATOMIC_LOCKED) {
409  /* spin */ ;
410  }
411  }
412 }
413 
414 
415 static inline void
417 {
418  opal_atomic_wmb();
419  lock->u.lock=OPAL_ATOMIC_UNLOCKED;
420 }
421 
422 #endif /* OPAL_HAVE_ATOMIC_SPINLOCKS */
Volatile lock object (with optional padding).
Definition: atomic.h:102
static void opal_atomic_unlock(opal_atomic_lock_t *lock)
Release a lock.
static void opal_atomic_lock(opal_atomic_lock_t *lock)
Acquire a lock by spinning.
void opal_atomic_wmb(void)
Write memory barrier.
static void opal_atomic_init(opal_atomic_lock_t *lock, int32_t value)
Initialize a lock to value.
volatile int32_t lock
The lock address (an integer)
Definition: atomic.h:104
static int opal_atomic_trylock(opal_atomic_lock_t *lock)
Try to acquire a lock.