1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 // Memory order requirements for POSIX semaphores appear unclear and are 30 // currently interpreted inconsistently. 31 // We conservatively prefer sequentially consistent operations for now. 32 // CAUTION: This is more conservative than some other major implementations, 33 // and may change if and when the issue is resolved. 34 35 #include <semaphore.h> 36 #include <errno.h> 37 #include <limits.h> 38 #include <stdatomic.h> 39 #include <sys/time.h> 40 #include <time.h> 41 42 #include "private/bionic_constants.h" 43 #include "private/bionic_futex.h" 44 #include "private/bionic_time_conversions.h" 45 46 // In this implementation, a semaphore contains a 47 // 31-bit signed value and a 1-bit 'shared' flag 48 // (for process-sharing purpose). 49 // 50 // We use the value -1 to indicate contention on the 51 // semaphore, 0 or more to indicate uncontended state, 52 // any value lower than -2 is invalid at runtime. 53 // 54 // State diagram: 55 // 56 // post(1) ==> 2 57 // post(0) ==> 1 58 // post(-1) ==> 1, then wake all waiters 59 // 60 // wait(2) ==> 1 61 // wait(1) ==> 0 62 // wait(0) ==> -1 then wait for a wake up + loop 63 // wait(-1) ==> -1 then wait for a wake up + loop 64 65 // Use the upper 31-bits for the counter, and the lower one 66 // for the shared flag. 67 #define SEMCOUNT_SHARED_MASK 0x00000001 68 #define SEMCOUNT_VALUE_MASK 0xfffffffe 69 #define SEMCOUNT_VALUE_SHIFT 1 70 71 // Convert a value into the corresponding sem->count bit pattern. 72 #define SEMCOUNT_FROM_VALUE(val) (((val) << SEMCOUNT_VALUE_SHIFT) & SEMCOUNT_VALUE_MASK) 73 74 // Convert a sem->count bit pattern into the corresponding signed value. 75 static inline int SEMCOUNT_TO_VALUE(unsigned int sval) { 76 return (static_cast<int>(sval) >> SEMCOUNT_VALUE_SHIFT); 77 } 78 79 // The value +1 as a sem->count bit-pattern. 80 #define SEMCOUNT_ONE SEMCOUNT_FROM_VALUE(1) 81 82 // The value -1 as a sem->count bit-pattern. 83 #define SEMCOUNT_MINUS_ONE SEMCOUNT_FROM_VALUE(-1) 84 85 #define SEMCOUNT_DECREMENT(sval) (((sval) - (1U << SEMCOUNT_VALUE_SHIFT)) & SEMCOUNT_VALUE_MASK) 86 #define SEMCOUNT_INCREMENT(sval) (((sval) + (1U << SEMCOUNT_VALUE_SHIFT)) & SEMCOUNT_VALUE_MASK) 87 88 static inline atomic_uint* SEM_TO_ATOMIC_POINTER(sem_t* sem) { 89 static_assert(sizeof(atomic_uint) == sizeof(sem->count), 90 "sem->count should actually be atomic_uint in implementation."); 91 92 // We prefer casting to atomic_uint instead of declaring sem->count to be atomic_uint directly. 93 // Because using the second method pollutes semaphore.h. 94 return reinterpret_cast<atomic_uint*>(&sem->count); 95 } 96 97 // Return the shared bitflag from a semaphore counter. 98 static inline unsigned int SEM_GET_SHARED(atomic_uint* sem_count_ptr) { 99 // memory_order_relaxed is used as SHARED flag will not be changed after init. 100 return (atomic_load_explicit(sem_count_ptr, memory_order_relaxed) & SEMCOUNT_SHARED_MASK); 101 } 102 103 int sem_init(sem_t* sem, int pshared, unsigned int value) { 104 // Ensure that 'value' can be stored in the semaphore. 105 if (value > SEM_VALUE_MAX) { 106 errno = EINVAL; 107 return -1; 108 } 109 110 unsigned int count = SEMCOUNT_FROM_VALUE(value); 111 if (pshared != 0) { 112 count |= SEMCOUNT_SHARED_MASK; 113 } 114 115 atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem); 116 atomic_init(sem_count_ptr, count); 117 return 0; 118 } 119 120 int sem_destroy(sem_t*) { 121 return 0; 122 } 123 124 sem_t* sem_open(const char*, int, ...) { 125 errno = ENOSYS; 126 return SEM_FAILED; 127 } 128 129 int sem_close(sem_t*) { 130 errno = ENOSYS; 131 return -1; 132 } 133 134 int sem_unlink(const char*) { 135 errno = ENOSYS; 136 return -1; 137 } 138 139 // Decrement a semaphore's value atomically, 140 // and return the old one. As a special case, 141 // this returns immediately if the value is 142 // negative (i.e. -1) 143 static int __sem_dec(atomic_uint* sem_count_ptr) { 144 unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed); 145 unsigned int shared = old_value & SEMCOUNT_SHARED_MASK; 146 147 // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all 148 // memory access made by other threads can be seen in current thread. 149 // An acquire fence may be sufficient, but it is still in discussion whether 150 // POSIX semaphores should provide sequential consistency. 151 do { 152 if (SEMCOUNT_TO_VALUE(old_value) < 0) { 153 break; 154 } 155 } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value, 156 SEMCOUNT_DECREMENT(old_value) | shared)); 157 158 return SEMCOUNT_TO_VALUE(old_value); 159 } 160 161 // Same as __sem_dec, but will not touch anything if the 162 // value is already negative *or* 0. Returns the old value. 163 static int __sem_trydec(atomic_uint* sem_count_ptr) { 164 unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed); 165 unsigned int shared = old_value & SEMCOUNT_SHARED_MASK; 166 167 // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all 168 // memory access made by other threads can be seen in current thread. 169 // An acquire fence may be sufficient, but it is still in discussion whether 170 // POSIX semaphores should provide sequential consistency. 171 do { 172 if (SEMCOUNT_TO_VALUE(old_value) <= 0) { 173 break; 174 } 175 } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value, 176 SEMCOUNT_DECREMENT(old_value) | shared)); 177 178 return SEMCOUNT_TO_VALUE(old_value); 179 } 180 181 // "Increment" the value of a semaphore atomically and 182 // return its old value. Note that this implements 183 // the special case of "incrementing" any negative 184 // value to +1 directly. 185 // 186 // NOTE: The value will _not_ wrap above SEM_VALUE_MAX 187 static int __sem_inc(atomic_uint* sem_count_ptr) { 188 unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed); 189 unsigned int shared = old_value & SEMCOUNT_SHARED_MASK; 190 unsigned int new_value; 191 192 // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all 193 // memory access made before can be seen in other threads. 194 // A release fence may be sufficient, but it is still in discussion whether 195 // POSIX semaphores should provide sequential consistency. 196 do { 197 // Can't go higher than SEM_VALUE_MAX. 198 if (SEMCOUNT_TO_VALUE(old_value) == SEM_VALUE_MAX) { 199 break; 200 } 201 202 // If the counter is negative, go directly to one, otherwise just increment. 203 if (SEMCOUNT_TO_VALUE(old_value) < 0) { 204 new_value = SEMCOUNT_ONE | shared; 205 } else { 206 new_value = SEMCOUNT_INCREMENT(old_value) | shared; 207 } 208 } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value, 209 new_value)); 210 211 return SEMCOUNT_TO_VALUE(old_value); 212 } 213 214 int sem_wait(sem_t* sem) { 215 atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem); 216 unsigned int shared = SEM_GET_SHARED(sem_count_ptr); 217 218 while (true) { 219 if (__sem_dec(sem_count_ptr) > 0) { 220 return 0; 221 } 222 223 __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, NULL); 224 } 225 } 226 227 int sem_timedwait(sem_t* sem, const timespec* abs_timeout) { 228 atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem); 229 230 // POSIX says we need to try to decrement the semaphore 231 // before checking the timeout value. Note that if the 232 // value is currently 0, __sem_trydec() does nothing. 233 if (__sem_trydec(sem_count_ptr) > 0) { 234 return 0; 235 } 236 237 // Check it as per POSIX. 238 if (abs_timeout == NULL || abs_timeout->tv_sec < 0 || abs_timeout->tv_nsec < 0 || abs_timeout->tv_nsec >= NS_PER_S) { 239 errno = EINVAL; 240 return -1; 241 } 242 243 unsigned int shared = SEM_GET_SHARED(sem_count_ptr); 244 245 while (true) { 246 // POSIX mandates CLOCK_REALTIME here. 247 timespec ts; 248 if (!timespec_from_absolute_timespec(ts, *abs_timeout, CLOCK_REALTIME)) { 249 errno = ETIMEDOUT; 250 return -1; 251 } 252 253 // Try to grab the semaphore. If the value was 0, this will also change it to -1. 254 if (__sem_dec(sem_count_ptr) > 0) { 255 break; 256 } 257 258 // Contention detected. Wait for a wakeup event. 259 int ret = __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, &ts); 260 261 // Return in case of timeout or interrupt. 262 if (ret == -ETIMEDOUT || ret == -EINTR) { 263 errno = -ret; 264 return -1; 265 } 266 } 267 return 0; 268 } 269 270 int sem_post(sem_t* sem) { 271 atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem); 272 unsigned int shared = SEM_GET_SHARED(sem_count_ptr); 273 274 int old_value = __sem_inc(sem_count_ptr); 275 if (old_value < 0) { 276 // Contention on the semaphore. Wake up all waiters. 277 __futex_wake_ex(sem_count_ptr, shared, INT_MAX); 278 } else if (old_value == SEM_VALUE_MAX) { 279 // Overflow detected. 280 errno = EOVERFLOW; 281 return -1; 282 } 283 284 return 0; 285 } 286 287 int sem_trywait(sem_t* sem) { 288 atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem); 289 if (__sem_trydec(sem_count_ptr) > 0) { 290 return 0; 291 } else { 292 errno = EAGAIN; 293 return -1; 294 } 295 } 296 297 int sem_getvalue(sem_t* sem, int* sval) { 298 atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem); 299 300 // Use memory_order_seq_cst in atomic_load operation. 301 // memory_order_relaxed may be fine here, but it is still in discussion 302 // whether POSIX semaphores should provide sequential consistency. 303 int val = SEMCOUNT_TO_VALUE(atomic_load(sem_count_ptr)); 304 if (val < 0) { 305 val = 0; 306 } 307 308 *sval = val; 309 return 0; 310 } 311