1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <pthread.h>
30 
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <sys/mman.h>
35 #include <time.h>
36 #include <unistd.h>
37 
38 #include "pthread_internal.h"
39 
40 #include "private/bionic_futex.h"
41 #include "private/bionic_time_conversions.h"
42 #include "private/bionic_tls.h"
43 
44 // XXX *technically* there is a race condition that could allow
45 // XXX a signal to be missed.  If thread A is preempted in _wait()
46 // XXX after unlocking the mutex and before waiting, and if other
47 // XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
48 // XXX before thread A is scheduled again and calls futex_wait(),
49 // XXX then the signal will be lost.
50 
51 // We use one bit in pthread_condattr_t (long) values as the 'shared' flag
52 // and one bit for the clock type (CLOCK_REALTIME is 0 and
53 // CLOCK_MONOTONIC is 1). The rest of the bits are a counter.
54 //
55 // The 'value' field in pthread_cond_t has the same layout.
56 
57 #define COND_SHARED_MASK 0x0001
58 #define COND_CLOCK_MASK 0x0002
59 #define COND_COUNTER_STEP 0x0004
60 #define COND_FLAGS_MASK (COND_SHARED_MASK | COND_CLOCK_MASK)
61 #define COND_COUNTER_MASK (~COND_FLAGS_MASK)
62 
63 #define COND_IS_SHARED(c) (((c) & COND_SHARED_MASK) != 0)
64 #define COND_GET_CLOCK(c) (((c) & COND_CLOCK_MASK) >> 1)
65 #define COND_SET_CLOCK(attr, c) ((attr) | (c << 1))
66 
pthread_condattr_init(pthread_condattr_t * attr)67 int pthread_condattr_init(pthread_condattr_t* attr) {
68   *attr = 0;
69   *attr |= PTHREAD_PROCESS_PRIVATE;
70   *attr |= (CLOCK_REALTIME << 1);
71   return 0;
72 }
73 
pthread_condattr_getpshared(const pthread_condattr_t * attr,int * pshared)74 int pthread_condattr_getpshared(const pthread_condattr_t* attr, int* pshared) {
75   *pshared = static_cast<int>(COND_IS_SHARED(*attr));
76   return 0;
77 }
78 
pthread_condattr_setpshared(pthread_condattr_t * attr,int pshared)79 int pthread_condattr_setpshared(pthread_condattr_t* attr, int pshared) {
80   if (pshared != PTHREAD_PROCESS_SHARED && pshared != PTHREAD_PROCESS_PRIVATE) {
81     return EINVAL;
82   }
83 
84   *attr |= pshared;
85   return 0;
86 }
87 
pthread_condattr_getclock(const pthread_condattr_t * attr,clockid_t * clock)88 int pthread_condattr_getclock(const pthread_condattr_t* attr, clockid_t* clock) {
89   *clock = COND_GET_CLOCK(*attr);
90   return 0;
91 }
92 
pthread_condattr_setclock(pthread_condattr_t * attr,clockid_t clock)93 int pthread_condattr_setclock(pthread_condattr_t* attr, clockid_t clock) {
94   if (clock != CLOCK_MONOTONIC && clock != CLOCK_REALTIME) {
95     return EINVAL;
96   }
97 
98   *attr = COND_SET_CLOCK(*attr, clock);
99   return 0;
100 }
101 
pthread_condattr_destroy(pthread_condattr_t * attr)102 int pthread_condattr_destroy(pthread_condattr_t* attr) {
103   *attr = 0xdeada11d;
104   return 0;
105 }
106 
107 struct pthread_cond_internal_t {
108   atomic_uint state;
109 
process_sharedpthread_cond_internal_t110   bool process_shared() {
111     return COND_IS_SHARED(atomic_load_explicit(&state, memory_order_relaxed));
112   }
113 
use_realtime_clockpthread_cond_internal_t114   bool use_realtime_clock() {
115     return COND_GET_CLOCK(atomic_load_explicit(&state, memory_order_relaxed)) == CLOCK_REALTIME;
116   }
117 
118 #if defined(__LP64__)
119   atomic_uint waiters;
120   char __reserved[40];
121 #endif
122 };
123 
124 static_assert(sizeof(pthread_cond_t) == sizeof(pthread_cond_internal_t),
125               "pthread_cond_t should actually be pthread_cond_internal_t in implementation.");
126 
127 // For binary compatibility with old version of pthread_cond_t, we can't use more strict alignment
128 // than 4-byte alignment.
129 static_assert(alignof(pthread_cond_t) == 4,
130               "pthread_cond_t should fulfill the alignment requirement of pthread_cond_internal_t.");
131 
__get_internal_cond(pthread_cond_t * cond_interface)132 static pthread_cond_internal_t* __get_internal_cond(pthread_cond_t* cond_interface) {
133   return reinterpret_cast<pthread_cond_internal_t*>(cond_interface);
134 }
135 
pthread_cond_init(pthread_cond_t * cond_interface,const pthread_condattr_t * attr)136 int pthread_cond_init(pthread_cond_t* cond_interface, const pthread_condattr_t* attr) {
137   pthread_cond_internal_t* cond = __get_internal_cond(cond_interface);
138 
139   unsigned int init_state = 0;
140   if (attr != nullptr) {
141     init_state = (*attr & COND_FLAGS_MASK);
142   }
143   atomic_init(&cond->state, init_state);
144 
145 #if defined(__LP64__)
146   atomic_init(&cond->waiters, 0);
147 #endif
148 
149   return 0;
150 }
151 
pthread_cond_destroy(pthread_cond_t * cond_interface)152 int pthread_cond_destroy(pthread_cond_t* cond_interface) {
153   pthread_cond_internal_t* cond = __get_internal_cond(cond_interface);
154   atomic_store_explicit(&cond->state, 0xdeadc04d, memory_order_relaxed);
155   return 0;
156 }
157 
158 // This function is used by pthread_cond_broadcast and
159 // pthread_cond_signal to atomically decrement the counter
160 // then wake up thread_count threads.
__pthread_cond_pulse(pthread_cond_internal_t * cond,int thread_count)161 static int __pthread_cond_pulse(pthread_cond_internal_t* cond, int thread_count) {
162   // We don't use a release/seq_cst fence here. Because pthread_cond_wait/signal can't be
163   // used as a method for memory synchronization by itself. It should always be used with
164   // pthread mutexes. Note that Spurious wakeups from pthread_cond_wait/timedwait may occur,
165   // so when using condition variables there is always a boolean predicate involving shared
166   // variables associated with each condition wait that is true if the thread should proceed.
167   // If the predicate is seen true before a condition wait, pthread_cond_wait/timedwait will
168   // not be called. That's why pthread_wait/signal pair can't be used as a method for memory
169   // synchronization. And it doesn't help even if we use any fence here.
170 
171 #if defined(__LP64__)
172   if (atomic_load_explicit(&cond->waiters, memory_order_relaxed) == 0) {
173     return 0;
174   }
175 #endif
176 
177   // The increase of value should leave flags alone, even if the value can overflows.
178   atomic_fetch_add_explicit(&cond->state, COND_COUNTER_STEP, memory_order_relaxed);
179 
180   __futex_wake_ex(&cond->state, cond->process_shared(), thread_count);
181   return 0;
182 }
183 
__pthread_cond_timedwait(pthread_cond_internal_t * cond,pthread_mutex_t * mutex,bool use_realtime_clock,const timespec * abs_timeout_or_null)184 static int __pthread_cond_timedwait(pthread_cond_internal_t* cond, pthread_mutex_t* mutex,
185                                     bool use_realtime_clock, const timespec* abs_timeout_or_null) {
186   int result = check_timespec(abs_timeout_or_null, true);
187   if (result != 0) {
188     return result;
189   }
190 
191   unsigned int old_state = atomic_load_explicit(&cond->state, memory_order_relaxed);
192 
193 #if defined(__LP64__)
194   atomic_fetch_add_explicit(&cond->waiters, 1, memory_order_relaxed);
195 #endif
196 
197   pthread_mutex_unlock(mutex);
198   int status = __futex_wait_ex(&cond->state, cond->process_shared(), old_state,
199                                use_realtime_clock, abs_timeout_or_null);
200 
201 #if defined(__LP64__)
202   atomic_fetch_sub_explicit(&cond->waiters, 1, memory_order_relaxed);
203 #endif
204 
205   pthread_mutex_lock(mutex);
206 
207   if (status == -ETIMEDOUT) {
208     return ETIMEDOUT;
209   }
210   return 0;
211 }
212 
pthread_cond_broadcast(pthread_cond_t * cond_interface)213 int pthread_cond_broadcast(pthread_cond_t* cond_interface) {
214   return __pthread_cond_pulse(__get_internal_cond(cond_interface), INT_MAX);
215 }
216 
pthread_cond_signal(pthread_cond_t * cond_interface)217 int pthread_cond_signal(pthread_cond_t* cond_interface) {
218   return __pthread_cond_pulse(__get_internal_cond(cond_interface), 1);
219 }
220 
pthread_cond_wait(pthread_cond_t * cond_interface,pthread_mutex_t * mutex)221 int pthread_cond_wait(pthread_cond_t* cond_interface, pthread_mutex_t* mutex) {
222   pthread_cond_internal_t* cond = __get_internal_cond(cond_interface);
223   return __pthread_cond_timedwait(cond, mutex, false, nullptr);
224 }
225 
pthread_cond_timedwait(pthread_cond_t * cond_interface,pthread_mutex_t * mutex,const timespec * abstime)226 int pthread_cond_timedwait(pthread_cond_t *cond_interface, pthread_mutex_t * mutex,
227                            const timespec *abstime) {
228 
229   pthread_cond_internal_t* cond = __get_internal_cond(cond_interface);
230   return __pthread_cond_timedwait(cond, mutex, cond->use_realtime_clock(), abstime);
231 }
232 
pthread_cond_timedwait_monotonic_np(pthread_cond_t * cond_interface,pthread_mutex_t * mutex,const timespec * abs_timeout)233 extern "C" int pthread_cond_timedwait_monotonic_np(pthread_cond_t* cond_interface,
234                                                    pthread_mutex_t* mutex,
235                                                    const timespec* abs_timeout) {
236   return __pthread_cond_timedwait(__get_internal_cond(cond_interface), mutex, false, abs_timeout);
237 }
238 
pthread_cond_clockwait(pthread_cond_t * cond_interface,pthread_mutex_t * mutex,clockid_t clock,const struct timespec * abs_timeout)239 int pthread_cond_clockwait(pthread_cond_t* cond_interface, pthread_mutex_t* mutex, clockid_t clock,
240                            const struct timespec* abs_timeout) {
241   switch (clock) {
242     case CLOCK_MONOTONIC:
243       return pthread_cond_timedwait_monotonic_np(cond_interface, mutex, abs_timeout);
244     case CLOCK_REALTIME:
245       return __pthread_cond_timedwait(__get_internal_cond(cond_interface), mutex, true, abs_timeout);
246     default:
247       return EINVAL;
248   }
249 }
250 
251 #if !defined(__LP64__)
252 // TODO: this exists only for backward binary compatibility on 32 bit platforms.
pthread_cond_timedwait_monotonic(pthread_cond_t * cond_interface,pthread_mutex_t * mutex,const timespec * abs_timeout)253 extern "C" int pthread_cond_timedwait_monotonic(pthread_cond_t* cond_interface,
254                                                 pthread_mutex_t* mutex,
255                                                 const timespec* abs_timeout) {
256   return pthread_cond_timedwait_monotonic_np(cond_interface, mutex, abs_timeout);
257 }
258 
259 // Force this function using CLOCK_MONOTONIC because it was always using
260 // CLOCK_MONOTONIC in history.
pthread_cond_timedwait_relative_np(pthread_cond_t * cond_interface,pthread_mutex_t * mutex,const timespec * rel_timeout)261 extern "C" int pthread_cond_timedwait_relative_np(pthread_cond_t* cond_interface,
262                                                   pthread_mutex_t* mutex,
263                                                   const timespec* rel_timeout) {
264   timespec ts;
265   timespec* abs_timeout = nullptr;
266   if (rel_timeout != nullptr) {
267     absolute_timespec_from_timespec(ts, *rel_timeout, CLOCK_MONOTONIC);
268     abs_timeout = &ts;
269   }
270   return __pthread_cond_timedwait(__get_internal_cond(cond_interface), mutex, false, abs_timeout);
271 }
272 
pthread_cond_timeout_np(pthread_cond_t * cond_interface,pthread_mutex_t * mutex,unsigned ms)273 extern "C" int pthread_cond_timeout_np(pthread_cond_t* cond_interface,
274                                        pthread_mutex_t* mutex, unsigned ms) {
275   timespec ts;
276   timespec_from_ms(ts, ms);
277   return pthread_cond_timedwait_relative_np(cond_interface, mutex, &ts);
278 }
279 #endif // !defined(__LP64__)
280