1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 // Memory order requirements for POSIX semaphores appear unclear and are
30 // currently interpreted inconsistently.
31 // We conservatively prefer sequentially consistent operations for now.
32 // CAUTION: This is more conservative than some other major implementations,
33 // and may change if and when the issue is resolved.
34 
35 #include <semaphore.h>
36 #include <errno.h>
37 #include <limits.h>
38 #include <stdatomic.h>
39 #include <sys/time.h>
40 #include <time.h>
41 
42 #include "private/bionic_constants.h"
43 #include "private/bionic_futex.h"
44 #include "private/bionic_time_conversions.h"
45 
46 // In this implementation, a semaphore contains a
47 // 31-bit signed value and a 1-bit 'shared' flag
48 // (for process-sharing purpose).
49 //
50 // We use the value -1 to indicate contention on the
51 // semaphore, 0 or more to indicate uncontended state,
52 // any value lower than -2 is invalid at runtime.
53 //
54 // State diagram:
55 //
56 // post(1)  ==> 2
57 // post(0)  ==> 1
58 // post(-1) ==> 1, then wake all waiters
59 //
60 // wait(2)  ==> 1
61 // wait(1)  ==> 0
62 // wait(0)  ==> -1 then wait for a wake up + loop
63 // wait(-1) ==> -1 then wait for a wake up + loop
64 
65 // Use the upper 31-bits for the counter, and the lower one
66 // for the shared flag.
67 #define SEMCOUNT_SHARED_MASK      0x00000001
68 #define SEMCOUNT_VALUE_MASK       0xfffffffe
69 #define SEMCOUNT_VALUE_SHIFT      1
70 
71 // Convert a value into the corresponding sem->count bit pattern.
72 #define SEMCOUNT_FROM_VALUE(val)    (((val) << SEMCOUNT_VALUE_SHIFT) & SEMCOUNT_VALUE_MASK)
73 
74 // Convert a sem->count bit pattern into the corresponding signed value.
SEMCOUNT_TO_VALUE(unsigned int sval)75 static inline int SEMCOUNT_TO_VALUE(unsigned int sval) {
76   return (static_cast<int>(sval) >> SEMCOUNT_VALUE_SHIFT);
77 }
78 
79 // The value +1 as a sem->count bit-pattern.
80 #define SEMCOUNT_ONE              SEMCOUNT_FROM_VALUE(1)
81 
82 // The value -1 as a sem->count bit-pattern.
83 #define SEMCOUNT_MINUS_ONE        SEMCOUNT_FROM_VALUE(~0U)
84 
85 #define SEMCOUNT_DECREMENT(sval)    (((sval) - (1U << SEMCOUNT_VALUE_SHIFT)) & SEMCOUNT_VALUE_MASK)
86 #define SEMCOUNT_INCREMENT(sval)    (((sval) + (1U << SEMCOUNT_VALUE_SHIFT)) & SEMCOUNT_VALUE_MASK)
87 
SEM_TO_ATOMIC_POINTER(sem_t * sem)88 static inline atomic_uint* SEM_TO_ATOMIC_POINTER(sem_t* sem) {
89   static_assert(sizeof(atomic_uint) == sizeof(sem->count),
90                 "sem->count should actually be atomic_uint in implementation.");
91 
92   // We prefer casting to atomic_uint instead of declaring sem->count to be atomic_uint directly.
93   // Because using the second method pollutes semaphore.h.
94   return reinterpret_cast<atomic_uint*>(&sem->count);
95 }
96 
97 // Return the shared bitflag from a semaphore counter.
SEM_GET_SHARED(atomic_uint * sem_count_ptr)98 static inline unsigned int SEM_GET_SHARED(atomic_uint* sem_count_ptr) {
99   // memory_order_relaxed is used as SHARED flag will not be changed after init.
100   return (atomic_load_explicit(sem_count_ptr, memory_order_relaxed) & SEMCOUNT_SHARED_MASK);
101 }
102 
sem_init(sem_t * sem,int pshared,unsigned int value)103 int sem_init(sem_t* sem, int pshared, unsigned int value) {
104   // Ensure that 'value' can be stored in the semaphore.
105   if (value > SEM_VALUE_MAX) {
106     errno = EINVAL;
107     return -1;
108   }
109 
110   unsigned int count = SEMCOUNT_FROM_VALUE(value);
111   if (pshared != 0) {
112     count |= SEMCOUNT_SHARED_MASK;
113   }
114 
115   atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
116   atomic_init(sem_count_ptr, count);
117   return 0;
118 }
119 
sem_destroy(sem_t *)120 int sem_destroy(sem_t*) {
121   return 0;
122 }
123 
sem_open(const char *,int,...)124 sem_t* sem_open(const char*, int, ...) {
125   errno = ENOSYS;
126   return SEM_FAILED;
127 }
128 
sem_close(sem_t *)129 int sem_close(sem_t*) {
130   errno = ENOSYS;
131   return -1;
132 }
133 
sem_unlink(const char *)134 int sem_unlink(const char*) {
135   errno = ENOSYS;
136   return -1;
137 }
138 
139 // Decrement a semaphore's value atomically,
140 // and return the old one. As a special case,
141 // this returns immediately if the value is
142 // negative (i.e. -1)
__sem_dec(atomic_uint * sem_count_ptr)143 static int __sem_dec(atomic_uint* sem_count_ptr) {
144   unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed);
145   unsigned int shared = old_value & SEMCOUNT_SHARED_MASK;
146 
147   // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all
148   // memory access made by other threads can be seen in current thread.
149   // An acquire fence may be sufficient, but it is still in discussion whether
150   // POSIX semaphores should provide sequential consistency.
151   do {
152     if (SEMCOUNT_TO_VALUE(old_value) < 0) {
153       break;
154     }
155   } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value,
156            SEMCOUNT_DECREMENT(old_value) | shared));
157 
158   return SEMCOUNT_TO_VALUE(old_value);
159 }
160 
161 // Same as __sem_dec, but will not touch anything if the
162 // value is already negative *or* 0. Returns the old value.
__sem_trydec(atomic_uint * sem_count_ptr)163 static int __sem_trydec(atomic_uint* sem_count_ptr) {
164   unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed);
165   unsigned int shared = old_value & SEMCOUNT_SHARED_MASK;
166 
167   // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all
168   // memory access made by other threads can be seen in current thread.
169   // An acquire fence may be sufficient, but it is still in discussion whether
170   // POSIX semaphores should provide sequential consistency.
171   do {
172     if (SEMCOUNT_TO_VALUE(old_value) <= 0) {
173       break;
174     }
175   } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value,
176            SEMCOUNT_DECREMENT(old_value) | shared));
177 
178   return SEMCOUNT_TO_VALUE(old_value);
179 }
180 
181 // "Increment" the value of a semaphore atomically and
182 // return its old value. Note that this implements
183 // the special case of "incrementing" any negative
184 // value to +1 directly.
185 //
186 // NOTE: The value will _not_ wrap above SEM_VALUE_MAX
__sem_inc(atomic_uint * sem_count_ptr)187 static int __sem_inc(atomic_uint* sem_count_ptr) {
188   unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed);
189   unsigned int shared = old_value  & SEMCOUNT_SHARED_MASK;
190   unsigned int new_value;
191 
192   // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all
193   // memory access made before can be seen in other threads.
194   // A release fence may be sufficient, but it is still in discussion whether
195   // POSIX semaphores should provide sequential consistency.
196   do {
197     // Can't go higher than SEM_VALUE_MAX.
198     if (SEMCOUNT_TO_VALUE(old_value) == SEM_VALUE_MAX) {
199       break;
200     }
201 
202     // If the counter is negative, go directly to one, otherwise just increment.
203     if (SEMCOUNT_TO_VALUE(old_value) < 0) {
204       new_value = SEMCOUNT_ONE | shared;
205     } else {
206       new_value = SEMCOUNT_INCREMENT(old_value) | shared;
207     }
208   } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value,
209            new_value));
210 
211   return SEMCOUNT_TO_VALUE(old_value);
212 }
213 
sem_wait(sem_t * sem)214 int sem_wait(sem_t* sem) {
215   atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
216   unsigned int shared = SEM_GET_SHARED(sem_count_ptr);
217 
218   while (true) {
219     if (__sem_dec(sem_count_ptr) > 0) {
220       return 0;
221     }
222 
223     int result = __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, false, nullptr);
224     if (android_get_application_target_sdk_version() >= 24) {
225       if (result ==-EINTR) {
226         errno = EINTR;
227         return -1;
228       }
229     }
230   }
231 }
232 
__sem_timedwait(sem_t * sem,const timespec * abs_timeout,bool use_realtime_clock)233 static int __sem_timedwait(sem_t* sem, const timespec* abs_timeout, bool use_realtime_clock) {
234   atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
235 
236   // POSIX says we need to try to decrement the semaphore
237   // before checking the timeout value. Note that if the
238   // value is currently 0, __sem_trydec() does nothing.
239   if (__sem_trydec(sem_count_ptr) > 0) {
240     return 0;
241   }
242 
243   // Check it as per POSIX.
244   int result = check_timespec(abs_timeout, false);
245   if (result != 0) {
246     errno = result;
247     return -1;
248   }
249 
250   unsigned int shared = SEM_GET_SHARED(sem_count_ptr);
251 
252   while (true) {
253     // Try to grab the semaphore. If the value was 0, this will also change it to -1.
254     if (__sem_dec(sem_count_ptr) > 0) {
255       return 0;
256     }
257 
258     // Contention detected. Wait for a wakeup event.
259     int result = __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE,
260                                  use_realtime_clock, abs_timeout);
261 
262     // Return in case of timeout or interrupt.
263     if (result == -ETIMEDOUT || result == -EINTR) {
264       errno = -result;
265       return -1;
266     }
267   }
268 }
269 
sem_timedwait(sem_t * sem,const timespec * abs_timeout)270 int sem_timedwait(sem_t* sem, const timespec* abs_timeout) {
271   return __sem_timedwait(sem, abs_timeout, true);
272 }
273 
sem_timedwait_monotonic_np(sem_t * sem,const timespec * abs_timeout)274 int sem_timedwait_monotonic_np(sem_t* sem, const timespec* abs_timeout) {
275   return __sem_timedwait(sem, abs_timeout, false);
276 }
277 
sem_clockwait(sem_t * sem,clockid_t clock,const timespec * abs_timeout)278 int sem_clockwait(sem_t* sem, clockid_t clock, const timespec* abs_timeout) {
279   switch (clock) {
280     case CLOCK_MONOTONIC:
281       return sem_timedwait_monotonic_np(sem, abs_timeout);
282     case CLOCK_REALTIME:
283       return sem_timedwait(sem, abs_timeout);
284     default:
285       return EINVAL;
286   }
287 }
288 
sem_post(sem_t * sem)289 int sem_post(sem_t* sem) {
290   atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
291   unsigned int shared = SEM_GET_SHARED(sem_count_ptr);
292 
293   int old_value = __sem_inc(sem_count_ptr);
294   if (old_value < 0) {
295     // Contention on the semaphore. Wake up all waiters.
296     __futex_wake_ex(sem_count_ptr, shared, INT_MAX);
297   } else if (old_value == SEM_VALUE_MAX) {
298     // Overflow detected.
299     errno = EOVERFLOW;
300     return -1;
301   }
302 
303   return 0;
304 }
305 
sem_trywait(sem_t * sem)306 int sem_trywait(sem_t* sem) {
307   atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
308   if (__sem_trydec(sem_count_ptr) > 0) {
309     return 0;
310   } else {
311     errno = EAGAIN;
312     return -1;
313   }
314 }
315 
sem_getvalue(sem_t * sem,int * sval)316 int sem_getvalue(sem_t* sem, int* sval) {
317   atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
318 
319   // Use memory_order_seq_cst in atomic_load operation.
320   // memory_order_relaxed may be fine here, but it is still in discussion
321   // whether POSIX semaphores should provide sequential consistency.
322   int val = SEMCOUNT_TO_VALUE(atomic_load(sem_count_ptr));
323   if (val < 0) {
324     val = 0;
325   }
326 
327   *sval = val;
328   return 0;
329 }
330