|
17 | 17 |
|
18 | 18 | #include "CLibMongoC_bson-atomic.h" |
19 | 19 |
|
| 20 | +#ifdef BSON_OS_UNIX |
| 21 | +/* For sched_yield() */ |
| 22 | +#include <sched.h> |
| 23 | +#endif |
20 | 24 |
|
21 | | -/* |
22 | | - * We should only ever hit these on non-Windows systems, for which we require |
23 | | - * pthread support. Therefore, we will avoid making a threading portability |
24 | | - * for threads here and just use pthreads directly. |
25 | | - */ |
| 25 | +int32_t |
| 26 | +bson_atomic_int_add (volatile int32_t *p, int32_t n) |
| 27 | +{ |
| 28 | + return n + bson_atomic_int32_fetch_add (p, n, bson_memory_order_seq_cst); |
| 29 | +} |
26 | 30 |
|
| 31 | +int64_t |
| 32 | +bson_atomic_int64_add (volatile int64_t *p, int64_t n) |
| 33 | +{ |
| 34 | + return n + bson_atomic_int64_fetch_add (p, n, bson_memory_order_seq_cst); |
| 35 | +} |
| 36 | + |
| 37 | +void |
| 38 | +bson_thrd_yield (void) |
| 39 | +{ |
| 40 | + BSON_IF_WINDOWS (SwitchToThread ();) |
| 41 | + BSON_IF_POSIX (sched_yield ();) |
| 42 | +} |
27 | 43 |
|
28 | | -#ifdef __BSON_NEED_BARRIER |
29 | | -#include <pthread.h> |
30 | | -static pthread_mutex_t gBarrier = PTHREAD_MUTEX_INITIALIZER; |
31 | 44 | void |
32 | 45 | bson_memory_barrier (void) |
33 | 46 | { |
34 | | - pthread_mutex_lock (&gBarrier); |
35 | | - pthread_mutex_unlock (&gBarrier); |
| 47 | + bson_atomic_thread_fence (); |
36 | 48 | } |
37 | | -#endif |
38 | 49 |
|
| 50 | +/** |
| 51 | + * Some platforms do not support compiler intrinsics for atomic operations. |
| 52 | + * We emulate that here using a spin lock and regular arithmetic operations |
| 53 | + */ |
| 54 | +static int8_t gEmulAtomicLock = 0; |
39 | 55 |
|
40 | | -#ifdef __BSON_NEED_ATOMIC_32 |
41 | | -#include <pthread.h> |
42 | | -static pthread_mutex_t gSync32 = PTHREAD_MUTEX_INITIALIZER; |
43 | | -int32_t |
44 | | -bson_atomic_int_add (volatile int32_t *p, int32_t n) |
| 56 | +static void |
| 57 | +_lock_emul_atomic () |
45 | 58 | { |
46 | | - int ret; |
| 59 | + int i; |
| 60 | + if (bson_atomic_int8_compare_exchange_weak ( |
| 61 | + &gEmulAtomicLock, 0, 1, bson_memory_order_acquire) == 0) { |
| 62 | + /* Successfully took the spinlock */ |
| 63 | + return; |
| 64 | + } |
| 65 | + /* Failed. Try taking ten more times, then begin sleeping. */ |
| 66 | + for (i = 0; i < 10; ++i) { |
| 67 | + if (bson_atomic_int8_compare_exchange_weak ( |
| 68 | + &gEmulAtomicLock, 0, 1, bson_memory_order_acquire) == 0) { |
| 69 | + /* Succeeded in taking the lock */ |
| 70 | + return; |
| 71 | + } |
| 72 | + } |
| 73 | + /* Still don't have the lock. Spin and yield */ |
| 74 | + while (bson_atomic_int8_compare_exchange_weak ( |
| 75 | + &gEmulAtomicLock, 0, 1, bson_memory_order_acquire) != 0) { |
| 76 | + bson_thrd_yield (); |
| 77 | + } |
| 78 | +} |
47 | 79 |
|
48 | | - pthread_mutex_lock (&gSync32); |
49 | | - *p += n; |
50 | | - ret = *p; |
51 | | - pthread_mutex_unlock (&gSync32); |
| 80 | +static void |
| 81 | +_unlock_emul_atomic () |
| 82 | +{ |
| 83 | + int64_t rv = bson_atomic_int8_exchange ( |
| 84 | + &gEmulAtomicLock, 0, bson_memory_order_release); |
| 85 | + BSON_ASSERT (rv == 1 && "Released atomic lock while not holding it"); |
| 86 | +} |
52 | 87 |
|
| 88 | +int64_t |
| 89 | +_bson_emul_atomic_int64_fetch_add (volatile int64_t *p, |
| 90 | + int64_t n, |
| 91 | + enum bson_memory_order _unused) |
| 92 | +{ |
| 93 | + int64_t ret; |
| 94 | + _lock_emul_atomic (); |
| 95 | + ret = *p; |
| 96 | + *p += n; |
| 97 | + _unlock_emul_atomic (); |
53 | 98 | return ret; |
54 | 99 | } |
55 | | -#endif |
56 | 100 |
|
| 101 | +int64_t |
| 102 | +_bson_emul_atomic_int64_exchange (volatile int64_t *p, |
| 103 | + int64_t n, |
| 104 | + enum bson_memory_order _unused) |
| 105 | +{ |
| 106 | + int64_t ret; |
| 107 | + _lock_emul_atomic (); |
| 108 | + ret = *p; |
| 109 | + *p = n; |
| 110 | + _unlock_emul_atomic (); |
| 111 | + return ret; |
| 112 | +} |
57 | 113 |
|
58 | | -#ifdef __BSON_NEED_ATOMIC_64 |
59 | | -#include <pthread.h> |
60 | | -static pthread_mutex_t gSync64 = PTHREAD_MUTEX_INITIALIZER; |
61 | 114 | int64_t |
62 | | -bson_atomic_int64_add (volatile int64_t *p, int64_t n) |
| 115 | +_bson_emul_atomic_int64_compare_exchange_strong (volatile int64_t *p, |
| 116 | + int64_t expect_value, |
| 117 | + int64_t new_value, |
| 118 | + enum bson_memory_order _unused) |
63 | 119 | { |
64 | 120 | int64_t ret; |
| 121 | + _lock_emul_atomic (); |
| 122 | + ret = *p; |
| 123 | + if (ret == expect_value) { |
| 124 | + *p = new_value; |
| 125 | + } |
| 126 | + _unlock_emul_atomic (); |
| 127 | + return ret; |
| 128 | +} |
| 129 | + |
| 130 | +int64_t |
| 131 | +_bson_emul_atomic_int64_compare_exchange_weak (volatile int64_t *p, |
| 132 | + int64_t expect_value, |
| 133 | + int64_t new_value, |
| 134 | + enum bson_memory_order order) |
| 135 | +{ |
| 136 | + /* We're emulating. We can't do a weak version. */ |
| 137 | + return _bson_emul_atomic_int64_compare_exchange_strong ( |
| 138 | + p, expect_value, new_value, order); |
| 139 | +} |
65 | 140 |
|
66 | | - pthread_mutex_lock (&gSync64); |
| 141 | + |
| 142 | +int32_t |
| 143 | +_bson_emul_atomic_int32_fetch_add (volatile int32_t *p, |
| 144 | + int32_t n, |
| 145 | + enum bson_memory_order _unused) |
| 146 | +{ |
| 147 | + int32_t ret; |
| 148 | + _lock_emul_atomic (); |
| 149 | + ret = *p; |
67 | 150 | *p += n; |
| 151 | + _unlock_emul_atomic (); |
| 152 | + return ret; |
| 153 | +} |
| 154 | + |
| 155 | +int32_t |
| 156 | +_bson_emul_atomic_int32_exchange (volatile int32_t *p, |
| 157 | + int32_t n, |
| 158 | + enum bson_memory_order _unused) |
| 159 | +{ |
| 160 | + int32_t ret; |
| 161 | + _lock_emul_atomic (); |
68 | 162 | ret = *p; |
69 | | - pthread_mutex_unlock (&gSync64); |
| 163 | + *p = n; |
| 164 | + _unlock_emul_atomic (); |
| 165 | + return ret; |
| 166 | +} |
70 | 167 |
|
| 168 | +int32_t |
| 169 | +_bson_emul_atomic_int32_compare_exchange_strong (volatile int32_t *p, |
| 170 | + int32_t expect_value, |
| 171 | + int32_t new_value, |
| 172 | + enum bson_memory_order _unused) |
| 173 | +{ |
| 174 | + int32_t ret; |
| 175 | + _lock_emul_atomic (); |
| 176 | + ret = *p; |
| 177 | + if (ret == expect_value) { |
| 178 | + *p = new_value; |
| 179 | + } |
| 180 | + _unlock_emul_atomic (); |
71 | 181 | return ret; |
72 | 182 | } |
73 | | -#endif |
74 | 183 |
|
| 184 | +int32_t |
| 185 | +_bson_emul_atomic_int32_compare_exchange_weak (volatile int32_t *p, |
| 186 | + int32_t expect_value, |
| 187 | + int32_t new_value, |
| 188 | + enum bson_memory_order order) |
| 189 | +{ |
| 190 | + /* We're emulating. We can't do a weak version. */ |
| 191 | + return _bson_emul_atomic_int32_compare_exchange_strong ( |
| 192 | + p, expect_value, new_value, order); |
| 193 | +} |
75 | 194 |
|
76 | | -/* |
77 | | - * The logic in the header is such that __BSON_NEED_ATOMIC_WINDOWS should only |
78 | | - * be defined if neither __BSON_NEED_ATOMIC_32 nor __BSON_NEED_ATOMIC_64 are. |
79 | | - */ |
80 | 195 |
|
| 196 | +int |
| 197 | +_bson_emul_atomic_int_fetch_add (volatile int *p, |
| 198 | + int n, |
| 199 | + enum bson_memory_order _unused) |
| 200 | +{ |
| 201 | + int ret; |
| 202 | + _lock_emul_atomic (); |
| 203 | + ret = *p; |
| 204 | + *p += n; |
| 205 | + _unlock_emul_atomic (); |
| 206 | + return ret; |
| 207 | +} |
81 | 208 |
|
82 | | -#ifdef __BSON_NEED_ATOMIC_WINDOWS |
83 | | -int32_t |
84 | | -bson_atomic_int_add (volatile int32_t *p, int32_t n) |
| 209 | +int |
| 210 | +_bson_emul_atomic_int_exchange (volatile int *p, |
| 211 | + int n, |
| 212 | + enum bson_memory_order _unused) |
85 | 213 | { |
86 | | - return InterlockedExchangeAdd (p, n) + n; |
| 214 | + int ret; |
| 215 | + _lock_emul_atomic (); |
| 216 | + ret = *p; |
| 217 | + *p = n; |
| 218 | + _unlock_emul_atomic (); |
| 219 | + return ret; |
87 | 220 | } |
88 | 221 |
|
| 222 | +int |
| 223 | +_bson_emul_atomic_int_compare_exchange_strong (volatile int *p, |
| 224 | + int expect_value, |
| 225 | + int new_value, |
| 226 | + enum bson_memory_order _unused) |
| 227 | +{ |
| 228 | + int ret; |
| 229 | + _lock_emul_atomic (); |
| 230 | + ret = *p; |
| 231 | + if (ret == expect_value) { |
| 232 | + *p = new_value; |
| 233 | + } |
| 234 | + _unlock_emul_atomic (); |
| 235 | + return ret; |
| 236 | +} |
89 | 237 |
|
90 | | -int64_t |
91 | | -bson_atomic_int64_add (volatile int64_t *p, int64_t n) |
| 238 | +int |
| 239 | +_bson_emul_atomic_int_compare_exchange_weak (volatile int *p, |
| 240 | + int expect_value, |
| 241 | + int new_value, |
| 242 | + enum bson_memory_order order) |
92 | 243 | { |
93 | | - return InterlockedExchangeAdd (p, n) + n; |
| 244 | + /* We're emulating. We can't do a weak version. */ |
| 245 | + return _bson_emul_atomic_int_compare_exchange_strong ( |
| 246 | + p, expect_value, new_value, order); |
94 | 247 | } |
95 | | -#endif |
|
0 commit comments