OpenMPI  0.1.1
atomic.h
1 /*
2  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
3  * University Research and Technology
4  * Corporation. All rights reserved.
5  * Copyright (c) 2004-2010 The University of Tennessee and The University
6  * of Tennessee Research Foundation. All rights
7  * reserved.
8  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
9  * University of Stuttgart. All rights reserved.
10  * Copyright (c) 2004-2005 The Regents of the University of California.
11  * All rights reserved.
12  * Copyright (c) 2007-2010 Oracle and/or its affiliates. All rights reserved.
13  * $COPYRIGHT$
14  *
15  * Additional copyrights may follow
16  *
17  * $HEADER$
18  */
19 
20 #ifndef OMPI_SYS_ARCH_ATOMIC_H
21 #define OMPI_SYS_ARCH_ATOMIC_H 1
22 
23 /*
24  * On ia32, we use cmpxchg.
25  */
26 
27 #if OPAL_WANT_SMP_LOCKS
28 #define SMPLOCK "lock; "
29 #define MB() __asm__ __volatile__("": : :"memory")
30 #else
31 #define SMPLOCK
32 #define MB()
33 #endif
34 
35 
36 /**********************************************************************
37  *
38  * Define constants for IA32
39  *
40  *********************************************************************/
41 #define OPAL_HAVE_ATOMIC_MEM_BARRIER 1
42 
43 #define OPAL_HAVE_ATOMIC_CMPSET_32 1
44 
45 #define OPAL_HAVE_ATOMIC_MATH_32 1
46 #define OPAL_HAVE_ATOMIC_ADD_32 1
47 #define OPAL_HAVE_ATOMIC_SUB_32 1
48 
49 #define OPAL_HAVE_ATOMIC_CMPSET_64 1
50 
51 #undef OPAL_HAVE_INLINE_ATOMIC_CMPSET_64
52 #define OPAL_HAVE_INLINE_ATOMIC_CMPSET_64 0
53 
54 /**********************************************************************
55  *
56  * Memory Barriers
57  *
58  *********************************************************************/
59 #if OMPI_GCC_INLINE_ASSEMBLY
60 
61 static inline void opal_atomic_mb(void)
62 {
63  MB();
64 }
65 
66 
67 static inline void opal_atomic_rmb(void)
68 {
69  MB();
70 }
71 
72 
73 static inline void opal_atomic_wmb(void)
74 {
75  MB();
76 }
77 
78 #endif /* OMPI_GCC_INLINE_ASSEMBLY */
79 
80 
81 /**********************************************************************
82  *
83  * Atomic math operations
84  *
85  *********************************************************************/
86 #if OMPI_GCC_INLINE_ASSEMBLY
87 
88 static inline int opal_atomic_cmpset_32(volatile int32_t *addr,
89  int32_t oldval,
90  int32_t newval)
91 {
92  unsigned char ret;
93  __asm__ __volatile__ (
94  SMPLOCK "cmpxchgl %3,%2 \n\t"
95  "sete %0 \n\t"
96  : "=qm" (ret), "+a" (oldval), "+m" (*addr)
97  : "q"(newval)
98  : "memory", "cc");
99 
100  return (int)ret;
101 }
102 
103 #endif /* OMPI_GCC_INLINE_ASSEMBLY */
104 
105 #define opal_atomic_cmpset_acq_32 opal_atomic_cmpset_32
106 #define opal_atomic_cmpset_rel_32 opal_atomic_cmpset_32
107 
108 #if OMPI_GCC_INLINE_ASSEMBLY
109 
110 #if 0
111 
112 /* some versions of GCC won't let you use ebx period (even though they
113  should be able to save / restore for the life of the inline
114  assembly). For the beta, just use the non-inline version */
115 
116 #ifndef ll_low /* GLIBC provides these somewhere, so protect */
117 #define ll_low(x) *(((unsigned int*)&(x))+0)
118 #define ll_high(x) *(((unsigned int*)&(x))+1)
119 #endif
120 
121 /* On Linux the EBX register is used by the shared libraries
122  * to keep the global offset. In same time this register is
123  * required by the cmpxchg8b instruction (as an input parameter).
124  * This conflict force us to save the EBX before the cmpxchg8b
125  * and to restore it afterward.
126  */
127 static inline int opal_atomic_cmpset_64(volatile int64_t *addr,
128  int64_t oldval,
129  int64_t newval)
130 {
131  /*
132  * Compare EDX:EAX with m64. If equal, set ZF and load ECX:EBX into
133  * m64. Else, clear ZF and load m64 into EDX:EAX.
134  */
135  unsigned char ret;
136 
137  __asm__ __volatile__(
138  "push %%ebx \n\t"
139  "movl %4, %%ebx \n\t"
140  SMPLOCK "cmpxchg8b (%1) \n\t"
141  "sete %0 \n\t"
142  "pop %%ebx \n\t"
143  : "=qm"(ret)
144  : "D"(addr), "a"(ll_low(oldval)), "d"(ll_high(oldval)),
145  "r"(ll_low(newval)), "c"(ll_high(newval))
146  : "cc", "memory", "ebx");
147  return (int) ret;
148 }
149 #endif /* if 0 */
150 
151 #endif /* OMPI_GCC_INLINE_ASSEMBLY */
152 
153 #define opal_atomic_cmpset_acq_64 opal_atomic_cmpset_64
154 #define opal_atomic_cmpset_rel_64 opal_atomic_cmpset_64
155 
156 #if OMPI_GCC_INLINE_ASSEMBLY
157 
158 /**
159  * atomic_add - add integer to atomic variable
160  * @i: integer value to add
161  * @v: pointer of type int
162  *
163  * Atomically adds @i to @v.
164  */
165 static inline int32_t opal_atomic_add_32(volatile int32_t* v, int i)
166 {
167  int ret = i;
168  __asm__ __volatile__(
169  SMPLOCK "xaddl %1,%0"
170  :"=m" (*v), "+r" (ret)
171  :"m" (*v)
172  :"memory", "cc"
173  );
174  return (ret+i);
175 }
176 
177 
178 /**
179  * atomic_sub - subtract the atomic variable
180  * @i: integer value to subtract
181  * @v: pointer of type int
182  *
183  * Atomically subtracts @i from @v.
184  */
185 static inline int32_t opal_atomic_sub_32(volatile int32_t* v, int i)
186 {
187  int ret = -i;
188  __asm__ __volatile__(
189  SMPLOCK "xaddl %1,%0"
190  :"=m" (*v), "+r" (ret)
191  :"m" (*v)
192  :"memory", "cc"
193  );
194  return (ret-i);
195 }
196 
197 #endif /* OMPI_GCC_INLINE_ASSEMBLY */
198 
199 #endif /* ! OMPI_SYS_ARCH_ATOMIC_H */
void opal_atomic_rmb(void)
Read memory barrier.
void opal_atomic_mb(void)
Memory barrier.
void opal_atomic_wmb(void)
Write memory barrier.