OpenMPI  0.1.1
atomic.h
1 /*
2  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
3  * University Research and Technology
4  * Corporation. All rights reserved.
5  * Copyright (c) 2004-2005 The University of Tennessee and The University
6  * of Tennessee Research Foundation. All rights
7  * reserved.
8  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
9  * University of Stuttgart. All rights reserved.
10  * Copyright (c) 2004-2005 The Regents of the University of California.
11  * All rights reserved.
12  * $COPYRIGHT$
13  *
14  * Additional copyrights may follow
15  *
16  * $HEADER$
17  */
18 
19 #ifndef OMPI_SYS_ARCH_ATOMIC_H
20 #define OMPI_SYS_ARCH_ATOMIC_H 1
21 
22 /*
23  * On ia64, we use cmpxchg, which supports acquire/release semantics natively.
24  */
25 
26 
27 #define MB() __asm__ __volatile__("mf": : :"memory")
28 
29 
30 /**********************************************************************
31  *
32  * Define constants for IA64
33  *
34  *********************************************************************/
35 #define OPAL_HAVE_ATOMIC_MEM_BARRIER 1
36 
37 #define OPAL_HAVE_ATOMIC_CMPSET_32 1
38 #define OPAL_HAVE_ATOMIC_CMPSET_64 1
39 
40 /**********************************************************************
41  *
42  * Memory Barriers
43  *
44  *********************************************************************/
45 #if OMPI_GCC_INLINE_ASSEMBLY
46 
47 static inline void opal_atomic_mb(void)
48 {
49  MB();
50 }
51 
52 
53 static inline void opal_atomic_rmb(void)
54 {
55  MB();
56 }
57 
58 
59 static inline void opal_atomic_wmb(void)
60 {
61  MB();
62 }
63 
64 
65 #endif /* OMPI_GCC_INLINE_ASSEMBLY */
66 
67 
68 /**********************************************************************
69  *
70  * Atomic math operations
71  *
72  *********************************************************************/
73 #if OMPI_GCC_INLINE_ASSEMBLY
74 
75 #define ia64_cmpxchg4_acq(ptr, new, old) \
76 ({ \
77  __u64 ia64_intri_res; \
78  ia64_intri_res; \
79 })
80 
81 static inline int opal_atomic_cmpset_acq_32( volatile int32_t *addr,
82  int32_t oldval, int32_t newval)
83 {
84  int64_t ret;
85 
86  __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(oldval));
87  __asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv":
88  "=r"(ret) : "r"(addr), "r"(newval) : "memory");
89 
90  return ((int32_t)ret == oldval);
91 }
92 
93 
94 static inline int opal_atomic_cmpset_rel_32( volatile int32_t *addr,
95  int32_t oldval, int32_t newval)
96 {
97  int64_t ret;
98 
99  __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(oldval));
100  __asm__ __volatile__ ("cmpxchg4.rel %0=[%1],%2,ar.ccv":
101  "=r"(ret) : "r"(addr), "r"(newval) : "memory");
102 
103  return ((int32_t)ret == oldval);
104 }
105 
106 #endif /* OMPI_GCC_INLINE_ASSEMBLY */
107 
108 
109 #define opal_atomic_cmpset_32 opal_atomic_cmpset_acq_32
110 
111 #if OMPI_GCC_INLINE_ASSEMBLY
112 
113 static inline int opal_atomic_cmpset_acq_64( volatile int64_t *addr,
114  int64_t oldval, int64_t newval)
115 {
116  int64_t ret;
117 
118  __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(oldval));
119  __asm__ __volatile__ ("cmpxchg8.acq %0=[%1],%2,ar.ccv":
120  "=r"(ret) : "r"(addr), "r"(newval) : "memory");
121 
122  return ((int32_t)ret == oldval);
123 }
124 
125 
126 static inline int opal_atomic_cmpset_rel_64( volatile int64_t *addr,
127  int64_t oldval, int64_t newval)
128 {
129  int64_t ret;
130 
131  __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(oldval));
132  __asm__ __volatile__ ("cmpxchg8.rel %0=[%1],%2,ar.ccv":
133  "=r"(ret) : "r"(addr), "r"(newval) : "memory");
134 
135  return ((int32_t)ret == oldval);
136 }
137 
138 #endif /* OMPI_GCC_INLINE_ASSEMBLY */
139 
140 #define opal_atomic_cmpset_64 opal_atomic_cmpset_acq_64
141 
142 #endif /* ! OMPI_SYS_ARCH_ATOMIC_H */
void opal_atomic_rmb(void)
Read memory barrier.
void opal_atomic_mb(void)
Memory barrier.
void opal_atomic_wmb(void)
Write memory barrier.