1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <gtest/gtest.h>
18 
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <malloc.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdio.h>
26 #include <sys/cdefs.h>
27 #include <sys/mman.h>
28 #include <sys/param.h>
29 #include <sys/prctl.h>
30 #include <sys/resource.h>
31 #include <sys/syscall.h>
32 #include <time.h>
33 #include <unistd.h>
34 #include <unwind.h>
35 
36 #include <atomic>
37 #include <future>
38 #include <vector>
39 
40 #include <android-base/macros.h>
41 #include <android-base/parseint.h>
42 #include <android-base/scopeguard.h>
43 #include <android-base/silent_death_test.h>
44 #include <android-base/strings.h>
45 #include <android-base/test_utils.h>
46 
47 #include "private/bionic_constants.h"
48 #include "SignalUtils.h"
49 #include "utils.h"
50 
51 using pthread_DeathTest = SilentDeathTest;
52 
TEST(pthread,pthread_key_create)53 TEST(pthread, pthread_key_create) {
54   pthread_key_t key;
55   ASSERT_EQ(0, pthread_key_create(&key, nullptr));
56   ASSERT_EQ(0, pthread_key_delete(key));
57   // Can't delete a key that's already been deleted.
58   ASSERT_EQ(EINVAL, pthread_key_delete(key));
59 }
60 
TEST(pthread,pthread_keys_max)61 TEST(pthread, pthread_keys_max) {
62   // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
63   ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
64 }
65 
TEST(pthread,sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX)66 TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
67   int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
68   ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
69 }
70 
TEST(pthread,pthread_key_many_distinct)71 TEST(pthread, pthread_key_many_distinct) {
72   // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
73   // pthread keys, but We should be able to allocate at least this many keys.
74   int nkeys = PTHREAD_KEYS_MAX / 2;
75   std::vector<pthread_key_t> keys;
76 
77   auto scope_guard = android::base::make_scope_guard([&keys] {
78     for (const auto& key : keys) {
79       EXPECT_EQ(0, pthread_key_delete(key));
80     }
81   });
82 
83   for (int i = 0; i < nkeys; ++i) {
84     pthread_key_t key;
85     // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
86     ASSERT_EQ(0, pthread_key_create(&key, nullptr)) << i << " of " << nkeys;
87     keys.push_back(key);
88     ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
89   }
90 
91   for (int i = keys.size() - 1; i >= 0; --i) {
92     ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
93     pthread_key_t key = keys.back();
94     keys.pop_back();
95     ASSERT_EQ(0, pthread_key_delete(key));
96   }
97 }
98 
TEST(pthread,pthread_key_not_exceed_PTHREAD_KEYS_MAX)99 TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
100   std::vector<pthread_key_t> keys;
101   int rv = 0;
102 
103   // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
104   // be more than we are allowed to allocate now.
105   for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
106     pthread_key_t key;
107     rv = pthread_key_create(&key, nullptr);
108     if (rv == EAGAIN) {
109       break;
110     }
111     EXPECT_EQ(0, rv);
112     keys.push_back(key);
113   }
114 
115   // Don't leak keys.
116   for (const auto& key : keys) {
117     EXPECT_EQ(0, pthread_key_delete(key));
118   }
119   keys.clear();
120 
121   // We should have eventually reached the maximum number of keys and received
122   // EAGAIN.
123   ASSERT_EQ(EAGAIN, rv);
124 }
125 
TEST(pthread,pthread_key_delete)126 TEST(pthread, pthread_key_delete) {
127   void* expected = reinterpret_cast<void*>(1234);
128   pthread_key_t key;
129   ASSERT_EQ(0, pthread_key_create(&key, nullptr));
130   ASSERT_EQ(0, pthread_setspecific(key, expected));
131   ASSERT_EQ(expected, pthread_getspecific(key));
132   ASSERT_EQ(0, pthread_key_delete(key));
133   // After deletion, pthread_getspecific returns nullptr.
134   ASSERT_EQ(nullptr, pthread_getspecific(key));
135   // And you can't use pthread_setspecific with the deleted key.
136   ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
137 }
138 
TEST(pthread,pthread_key_fork)139 TEST(pthread, pthread_key_fork) {
140   void* expected = reinterpret_cast<void*>(1234);
141   pthread_key_t key;
142   ASSERT_EQ(0, pthread_key_create(&key, nullptr));
143   ASSERT_EQ(0, pthread_setspecific(key, expected));
144   ASSERT_EQ(expected, pthread_getspecific(key));
145 
146   pid_t pid = fork();
147   ASSERT_NE(-1, pid) << strerror(errno);
148 
149   if (pid == 0) {
150     // The surviving thread inherits all the forking thread's TLS values...
151     ASSERT_EQ(expected, pthread_getspecific(key));
152     _exit(99);
153   }
154 
155   AssertChildExited(pid, 99);
156 
157   ASSERT_EQ(expected, pthread_getspecific(key));
158   ASSERT_EQ(0, pthread_key_delete(key));
159 }
160 
DirtyKeyFn(void * key)161 static void* DirtyKeyFn(void* key) {
162   return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
163 }
164 
TEST(pthread,pthread_key_dirty)165 TEST(pthread, pthread_key_dirty) {
166   pthread_key_t key;
167   ASSERT_EQ(0, pthread_key_create(&key, nullptr));
168 
169   size_t stack_size = 640 * 1024;
170   void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
171   ASSERT_NE(MAP_FAILED, stack);
172   memset(stack, 0xff, stack_size);
173 
174   pthread_attr_t attr;
175   ASSERT_EQ(0, pthread_attr_init(&attr));
176   ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
177 
178   pthread_t t;
179   ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
180 
181   void* result;
182   ASSERT_EQ(0, pthread_join(t, &result));
183   ASSERT_EQ(nullptr, result); // Not ~0!
184 
185   ASSERT_EQ(0, munmap(stack, stack_size));
186   ASSERT_EQ(0, pthread_key_delete(key));
187 }
188 
FnWithStackFrame(void *)189 static void* FnWithStackFrame(void*) {
190   int x;
191   *const_cast<volatile int*>(&x) = 1;
192   return nullptr;
193 }
194 
TEST(pthread,pthread_heap_allocated_stack)195 TEST(pthread, pthread_heap_allocated_stack) {
196   SKIP_WITH_HWASAN; // TODO(b/148982147): Re-enable when fixed.
197 
198   size_t stack_size = 640 * 1024;
199   std::unique_ptr<char[]> stack(new (std::align_val_t(getpagesize())) char[stack_size]);
200   memset(stack.get(), '\xff', stack_size);
201 
202   pthread_attr_t attr;
203   ASSERT_EQ(0, pthread_attr_init(&attr));
204   ASSERT_EQ(0, pthread_attr_setstack(&attr, stack.get(), stack_size));
205 
206   pthread_t t;
207   ASSERT_EQ(0, pthread_create(&t, &attr, FnWithStackFrame, nullptr));
208 
209   void* result;
210   ASSERT_EQ(0, pthread_join(t, &result));
211 }
212 
TEST(pthread,static_pthread_key_used_before_creation)213 TEST(pthread, static_pthread_key_used_before_creation) {
214 #if defined(__BIONIC__)
215   // See http://b/19625804. The bug is about a static/global pthread key being used before creation.
216   // So here tests if the static/global default value 0 can be detected as invalid key.
217   static pthread_key_t key;
218   ASSERT_EQ(nullptr, pthread_getspecific(key));
219   ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr));
220   ASSERT_EQ(EINVAL, pthread_key_delete(key));
221 #else
222   GTEST_SKIP() << "bionic-only test";
223 #endif
224 }
225 
IdFn(void * arg)226 static void* IdFn(void* arg) {
227   return arg;
228 }
229 
230 class SpinFunctionHelper {
231  public:
SpinFunctionHelper()232   SpinFunctionHelper() {
233     SpinFunctionHelper::spin_flag_ = true;
234   }
235 
~SpinFunctionHelper()236   ~SpinFunctionHelper() {
237     UnSpin();
238   }
239 
GetFunction()240   auto GetFunction() -> void* (*)(void*) {
241     return SpinFunctionHelper::SpinFn;
242   }
243 
UnSpin()244   void UnSpin() {
245     SpinFunctionHelper::spin_flag_ = false;
246   }
247 
248  private:
SpinFn(void *)249   static void* SpinFn(void*) {
250     while (spin_flag_) {}
251     return nullptr;
252   }
253   static std::atomic<bool> spin_flag_;
254 };
255 
256 // It doesn't matter if spin_flag_ is used in several tests,
257 // because it is always set to false after each test. Each thread
258 // loops on spin_flag_ can find it becomes false at some time.
259 std::atomic<bool> SpinFunctionHelper::spin_flag_;
260 
JoinFn(void * arg)261 static void* JoinFn(void* arg) {
262   return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), nullptr));
263 }
264 
AssertDetached(pthread_t t,bool is_detached)265 static void AssertDetached(pthread_t t, bool is_detached) {
266   pthread_attr_t attr;
267   ASSERT_EQ(0, pthread_getattr_np(t, &attr));
268   int detach_state;
269   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
270   pthread_attr_destroy(&attr);
271   ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
272 }
273 
MakeDeadThread(pthread_t & t)274 static void MakeDeadThread(pthread_t& t) {
275   ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, nullptr));
276   ASSERT_EQ(0, pthread_join(t, nullptr));
277 }
278 
TEST(pthread,pthread_create)279 TEST(pthread, pthread_create) {
280   void* expected_result = reinterpret_cast<void*>(123);
281   // Can we create a thread?
282   pthread_t t;
283   ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, expected_result));
284   // If we join, do we get the expected value back?
285   void* result;
286   ASSERT_EQ(0, pthread_join(t, &result));
287   ASSERT_EQ(expected_result, result);
288 }
289 
TEST(pthread,pthread_create_EAGAIN)290 TEST(pthread, pthread_create_EAGAIN) {
291   pthread_attr_t attributes;
292   ASSERT_EQ(0, pthread_attr_init(&attributes));
293   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
294 
295   pthread_t t;
296   ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, nullptr));
297 }
298 
TEST(pthread,pthread_no_join_after_detach)299 TEST(pthread, pthread_no_join_after_detach) {
300   SpinFunctionHelper spin_helper;
301 
302   pthread_t t1;
303   ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
304 
305   // After a pthread_detach...
306   ASSERT_EQ(0, pthread_detach(t1));
307   AssertDetached(t1, true);
308 
309   // ...pthread_join should fail.
310   ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
311 }
312 
TEST(pthread,pthread_no_op_detach_after_join)313 TEST(pthread, pthread_no_op_detach_after_join) {
314   SpinFunctionHelper spin_helper;
315 
316   pthread_t t1;
317   ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
318 
319   // If thread 2 is already waiting to join thread 1...
320   pthread_t t2;
321   ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
322 
323   sleep(1); // (Give t2 a chance to call pthread_join.)
324 
325 #if defined(__BIONIC__)
326   ASSERT_EQ(EINVAL, pthread_detach(t1));
327 #else
328   ASSERT_EQ(0, pthread_detach(t1));
329 #endif
330   AssertDetached(t1, false);
331 
332   spin_helper.UnSpin();
333 
334   // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
335   void* join_result;
336   ASSERT_EQ(0, pthread_join(t2, &join_result));
337   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
338 }
339 
TEST(pthread,pthread_join_self)340 TEST(pthread, pthread_join_self) {
341   ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), nullptr));
342 }
343 
344 struct TestBug37410 {
345   pthread_t main_thread;
346   pthread_mutex_t mutex;
347 
mainTestBug37410348   static void main() {
349     TestBug37410 data;
350     data.main_thread = pthread_self();
351     ASSERT_EQ(0, pthread_mutex_init(&data.mutex, nullptr));
352     ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
353 
354     pthread_t t;
355     ASSERT_EQ(0, pthread_create(&t, nullptr, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
356 
357     // Wait for the thread to be running...
358     ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
359     ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
360 
361     // ...and exit.
362     pthread_exit(nullptr);
363   }
364 
365  private:
thread_fnTestBug37410366   static void* thread_fn(void* arg) {
367     TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
368 
369     // Unlocking data->mutex will cause the main thread to exit, invalidating *data. Save the handle.
370     pthread_t main_thread = data->main_thread;
371 
372     // Let the main thread know we're running.
373     pthread_mutex_unlock(&data->mutex);
374 
375     // And wait for the main thread to exit.
376     pthread_join(main_thread, nullptr);
377 
378     return nullptr;
379   }
380 };
381 
382 // Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
383 // run this test (which exits normally) in its own process.
TEST_F(pthread_DeathTest,pthread_bug_37410)384 TEST_F(pthread_DeathTest, pthread_bug_37410) {
385   // http://code.google.com/p/android/issues/detail?id=37410
386   ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
387 }
388 
SignalHandlerFn(void * arg)389 static void* SignalHandlerFn(void* arg) {
390   sigset64_t wait_set;
391   sigfillset64(&wait_set);
392   return reinterpret_cast<void*>(sigwait64(&wait_set, reinterpret_cast<int*>(arg)));
393 }
394 
TEST(pthread,pthread_sigmask)395 TEST(pthread, pthread_sigmask) {
396   // Check that SIGUSR1 isn't blocked.
397   sigset_t original_set;
398   sigemptyset(&original_set);
399   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &original_set));
400   ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
401 
402   // Block SIGUSR1.
403   sigset_t set;
404   sigemptyset(&set);
405   sigaddset(&set, SIGUSR1);
406   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, nullptr));
407 
408   // Check that SIGUSR1 is blocked.
409   sigset_t final_set;
410   sigemptyset(&final_set);
411   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &final_set));
412   ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
413   // ...and that sigprocmask agrees with pthread_sigmask.
414   sigemptyset(&final_set);
415   ASSERT_EQ(0, sigprocmask(SIG_BLOCK, nullptr, &final_set));
416   ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
417 
418   // Spawn a thread that calls sigwait and tells us what it received.
419   pthread_t signal_thread;
420   int received_signal = -1;
421   ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
422 
423   // Send that thread SIGUSR1.
424   pthread_kill(signal_thread, SIGUSR1);
425 
426   // See what it got.
427   void* join_result;
428   ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
429   ASSERT_EQ(SIGUSR1, received_signal);
430   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
431 
432   // Restore the original signal mask.
433   ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, nullptr));
434 }
435 
TEST(pthread,pthread_sigmask64_SIGTRMIN)436 TEST(pthread, pthread_sigmask64_SIGTRMIN) {
437   // Check that SIGRTMIN isn't blocked.
438   sigset64_t original_set;
439   sigemptyset64(&original_set);
440   ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &original_set));
441   ASSERT_FALSE(sigismember64(&original_set, SIGRTMIN));
442 
443   // Block SIGRTMIN.
444   sigset64_t set;
445   sigemptyset64(&set);
446   sigaddset64(&set, SIGRTMIN);
447   ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, &set, nullptr));
448 
449   // Check that SIGRTMIN is blocked.
450   sigset64_t final_set;
451   sigemptyset64(&final_set);
452   ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &final_set));
453   ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
454   // ...and that sigprocmask64 agrees with pthread_sigmask64.
455   sigemptyset64(&final_set);
456   ASSERT_EQ(0, sigprocmask64(SIG_BLOCK, nullptr, &final_set));
457   ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
458 
459   // Spawn a thread that calls sigwait64 and tells us what it received.
460   pthread_t signal_thread;
461   int received_signal = -1;
462   ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
463 
464   // Send that thread SIGRTMIN.
465   pthread_kill(signal_thread, SIGRTMIN);
466 
467   // See what it got.
468   void* join_result;
469   ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
470   ASSERT_EQ(SIGRTMIN, received_signal);
471   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
472 
473   // Restore the original signal mask.
474   ASSERT_EQ(0, pthread_sigmask64(SIG_SETMASK, &original_set, nullptr));
475 }
476 
test_pthread_setname_np__pthread_getname_np(pthread_t t)477 static void test_pthread_setname_np__pthread_getname_np(pthread_t t) {
478   ASSERT_EQ(0, pthread_setname_np(t, "short"));
479   char name[32];
480   ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
481   ASSERT_STREQ("short", name);
482 
483   // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL.
484   ASSERT_EQ(0, pthread_setname_np(t, "123456789012345"));
485   ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
486   ASSERT_STREQ("123456789012345", name);
487 
488   ASSERT_EQ(ERANGE, pthread_setname_np(t, "1234567890123456"));
489 
490   // The passed-in buffer should be at least 16 bytes.
491   ASSERT_EQ(0, pthread_getname_np(t, name, 16));
492   ASSERT_EQ(ERANGE, pthread_getname_np(t, name, 15));
493 }
494 
TEST(pthread,pthread_setname_np__pthread_getname_np__self)495 TEST(pthread, pthread_setname_np__pthread_getname_np__self) {
496   test_pthread_setname_np__pthread_getname_np(pthread_self());
497 }
498 
TEST(pthread,pthread_setname_np__pthread_getname_np__other)499 TEST(pthread, pthread_setname_np__pthread_getname_np__other) {
500   SpinFunctionHelper spin_helper;
501 
502   pthread_t t;
503   ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
504   test_pthread_setname_np__pthread_getname_np(t);
505   spin_helper.UnSpin();
506   ASSERT_EQ(0, pthread_join(t, nullptr));
507 }
508 
509 // http://b/28051133: a kernel misfeature means that you can't change the
510 // name of another thread if you've set PR_SET_DUMPABLE to 0.
TEST(pthread,pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE)511 TEST(pthread, pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE) {
512   ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 0)) << strerror(errno);
513 
514   SpinFunctionHelper spin_helper;
515 
516   pthread_t t;
517   ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
518   test_pthread_setname_np__pthread_getname_np(t);
519   spin_helper.UnSpin();
520   ASSERT_EQ(0, pthread_join(t, nullptr));
521 }
522 
TEST_F(pthread_DeathTest,pthread_setname_np__no_such_thread)523 TEST_F(pthread_DeathTest, pthread_setname_np__no_such_thread) {
524   pthread_t dead_thread;
525   MakeDeadThread(dead_thread);
526 
527   EXPECT_DEATH(pthread_setname_np(dead_thread, "short 3"),
528                "invalid pthread_t (.*) passed to pthread_setname_np");
529 }
530 
TEST_F(pthread_DeathTest,pthread_setname_np__null_thread)531 TEST_F(pthread_DeathTest, pthread_setname_np__null_thread) {
532   pthread_t null_thread = 0;
533   EXPECT_EQ(ENOENT, pthread_setname_np(null_thread, "short 3"));
534 }
535 
TEST_F(pthread_DeathTest,pthread_getname_np__no_such_thread)536 TEST_F(pthread_DeathTest, pthread_getname_np__no_such_thread) {
537   pthread_t dead_thread;
538   MakeDeadThread(dead_thread);
539 
540   char name[64];
541   EXPECT_DEATH(pthread_getname_np(dead_thread, name, sizeof(name)),
542                "invalid pthread_t (.*) passed to pthread_getname_np");
543 }
544 
TEST_F(pthread_DeathTest,pthread_getname_np__null_thread)545 TEST_F(pthread_DeathTest, pthread_getname_np__null_thread) {
546   pthread_t null_thread = 0;
547 
548   char name[64];
549   EXPECT_EQ(ENOENT, pthread_getname_np(null_thread, name, sizeof(name)));
550 }
551 
TEST(pthread,pthread_kill__0)552 TEST(pthread, pthread_kill__0) {
553   // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
554   ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
555 }
556 
TEST(pthread,pthread_kill__invalid_signal)557 TEST(pthread, pthread_kill__invalid_signal) {
558   ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
559 }
560 
pthread_kill__in_signal_handler_helper(int signal_number)561 static void pthread_kill__in_signal_handler_helper(int signal_number) {
562   static int count = 0;
563   ASSERT_EQ(SIGALRM, signal_number);
564   if (++count == 1) {
565     // Can we call pthread_kill from a signal handler?
566     ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
567   }
568 }
569 
TEST(pthread,pthread_kill__in_signal_handler)570 TEST(pthread, pthread_kill__in_signal_handler) {
571   ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
572   ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
573 }
574 
TEST(pthread,pthread_kill__exited_thread)575 TEST(pthread, pthread_kill__exited_thread) {
576   static std::promise<pid_t> tid_promise;
577   pthread_t thread;
578   ASSERT_EQ(0, pthread_create(&thread, nullptr,
579                               [](void*) -> void* {
580                                 tid_promise.set_value(gettid());
581                                 return nullptr;
582                               },
583                               nullptr));
584 
585   pid_t tid = tid_promise.get_future().get();
586   while (TEMP_FAILURE_RETRY(syscall(__NR_tgkill, getpid(), tid, 0)) != -1) {
587     continue;
588   }
589   ASSERT_ERRNO(ESRCH);
590 
591   ASSERT_EQ(ESRCH, pthread_kill(thread, 0));
592 }
593 
TEST_F(pthread_DeathTest,pthread_detach__no_such_thread)594 TEST_F(pthread_DeathTest, pthread_detach__no_such_thread) {
595   pthread_t dead_thread;
596   MakeDeadThread(dead_thread);
597 
598   EXPECT_DEATH(pthread_detach(dead_thread),
599                "invalid pthread_t (.*) passed to pthread_detach");
600 }
601 
TEST_F(pthread_DeathTest,pthread_detach__null_thread)602 TEST_F(pthread_DeathTest, pthread_detach__null_thread) {
603   pthread_t null_thread = 0;
604   EXPECT_EQ(ESRCH, pthread_detach(null_thread));
605 }
606 
TEST(pthread,pthread_getcpuclockid__clock_gettime)607 TEST(pthread, pthread_getcpuclockid__clock_gettime) {
608   SpinFunctionHelper spin_helper;
609 
610   pthread_t t;
611   ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
612 
613   clockid_t c;
614   ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
615   timespec ts;
616   ASSERT_EQ(0, clock_gettime(c, &ts));
617   spin_helper.UnSpin();
618   ASSERT_EQ(0, pthread_join(t, nullptr));
619 }
620 
TEST_F(pthread_DeathTest,pthread_getcpuclockid__no_such_thread)621 TEST_F(pthread_DeathTest, pthread_getcpuclockid__no_such_thread) {
622   pthread_t dead_thread;
623   MakeDeadThread(dead_thread);
624 
625   clockid_t c;
626   EXPECT_DEATH(pthread_getcpuclockid(dead_thread, &c),
627                "invalid pthread_t (.*) passed to pthread_getcpuclockid");
628 }
629 
TEST_F(pthread_DeathTest,pthread_getcpuclockid__null_thread)630 TEST_F(pthread_DeathTest, pthread_getcpuclockid__null_thread) {
631   pthread_t null_thread = 0;
632   clockid_t c;
633   EXPECT_EQ(ESRCH, pthread_getcpuclockid(null_thread, &c));
634 }
635 
TEST_F(pthread_DeathTest,pthread_getschedparam__no_such_thread)636 TEST_F(pthread_DeathTest, pthread_getschedparam__no_such_thread) {
637   pthread_t dead_thread;
638   MakeDeadThread(dead_thread);
639 
640   int policy;
641   sched_param param;
642   EXPECT_DEATH(pthread_getschedparam(dead_thread, &policy, &param),
643                "invalid pthread_t (.*) passed to pthread_getschedparam");
644 }
645 
TEST_F(pthread_DeathTest,pthread_getschedparam__null_thread)646 TEST_F(pthread_DeathTest, pthread_getschedparam__null_thread) {
647   pthread_t null_thread = 0;
648   int policy;
649   sched_param param;
650   EXPECT_EQ(ESRCH, pthread_getschedparam(null_thread, &policy, &param));
651 }
652 
TEST_F(pthread_DeathTest,pthread_setschedparam__no_such_thread)653 TEST_F(pthread_DeathTest, pthread_setschedparam__no_such_thread) {
654   pthread_t dead_thread;
655   MakeDeadThread(dead_thread);
656 
657   int policy = 0;
658   sched_param param;
659   EXPECT_DEATH(pthread_setschedparam(dead_thread, policy, &param),
660                "invalid pthread_t (.*) passed to pthread_setschedparam");
661 }
662 
TEST_F(pthread_DeathTest,pthread_setschedparam__null_thread)663 TEST_F(pthread_DeathTest, pthread_setschedparam__null_thread) {
664   pthread_t null_thread = 0;
665   int policy = 0;
666   sched_param param;
667   EXPECT_EQ(ESRCH, pthread_setschedparam(null_thread, policy, &param));
668 }
669 
TEST_F(pthread_DeathTest,pthread_setschedprio__no_such_thread)670 TEST_F(pthread_DeathTest, pthread_setschedprio__no_such_thread) {
671   pthread_t dead_thread;
672   MakeDeadThread(dead_thread);
673 
674   EXPECT_DEATH(pthread_setschedprio(dead_thread, 123),
675                "invalid pthread_t (.*) passed to pthread_setschedprio");
676 }
677 
TEST_F(pthread_DeathTest,pthread_setschedprio__null_thread)678 TEST_F(pthread_DeathTest, pthread_setschedprio__null_thread) {
679   pthread_t null_thread = 0;
680   EXPECT_EQ(ESRCH, pthread_setschedprio(null_thread, 123));
681 }
682 
TEST_F(pthread_DeathTest,pthread_join__no_such_thread)683 TEST_F(pthread_DeathTest, pthread_join__no_such_thread) {
684   pthread_t dead_thread;
685   MakeDeadThread(dead_thread);
686 
687   EXPECT_DEATH(pthread_join(dead_thread, nullptr),
688                "invalid pthread_t (.*) passed to pthread_join");
689 }
690 
TEST_F(pthread_DeathTest,pthread_join__null_thread)691 TEST_F(pthread_DeathTest, pthread_join__null_thread) {
692   pthread_t null_thread = 0;
693   EXPECT_EQ(ESRCH, pthread_join(null_thread, nullptr));
694 }
695 
TEST_F(pthread_DeathTest,pthread_kill__no_such_thread)696 TEST_F(pthread_DeathTest, pthread_kill__no_such_thread) {
697   pthread_t dead_thread;
698   MakeDeadThread(dead_thread);
699 
700   EXPECT_DEATH(pthread_kill(dead_thread, 0),
701                "invalid pthread_t (.*) passed to pthread_kill");
702 }
703 
TEST_F(pthread_DeathTest,pthread_kill__null_thread)704 TEST_F(pthread_DeathTest, pthread_kill__null_thread) {
705   pthread_t null_thread = 0;
706   EXPECT_EQ(ESRCH, pthread_kill(null_thread, 0));
707 }
708 
TEST(pthread,pthread_join__multijoin)709 TEST(pthread, pthread_join__multijoin) {
710   SpinFunctionHelper spin_helper;
711 
712   pthread_t t1;
713   ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
714 
715   pthread_t t2;
716   ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
717 
718   sleep(1); // (Give t2 a chance to call pthread_join.)
719 
720   // Multiple joins to the same thread should fail.
721   ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
722 
723   spin_helper.UnSpin();
724 
725   // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
726   void* join_result;
727   ASSERT_EQ(0, pthread_join(t2, &join_result));
728   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
729 }
730 
TEST(pthread,pthread_join__race)731 TEST(pthread, pthread_join__race) {
732   // http://b/11693195 --- pthread_join could return before the thread had actually exited.
733   // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
734   for (size_t i = 0; i < 1024; ++i) {
735     size_t stack_size = 640*1024;
736     void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
737 
738     pthread_attr_t a;
739     pthread_attr_init(&a);
740     pthread_attr_setstack(&a, stack, stack_size);
741 
742     pthread_t t;
743     ASSERT_EQ(0, pthread_create(&t, &a, IdFn, nullptr));
744     ASSERT_EQ(0, pthread_join(t, nullptr));
745     ASSERT_EQ(0, munmap(stack, stack_size));
746   }
747 }
748 
GetActualGuardSizeFn(void * arg)749 static void* GetActualGuardSizeFn(void* arg) {
750   pthread_attr_t attributes;
751   pthread_getattr_np(pthread_self(), &attributes);
752   pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
753   return nullptr;
754 }
755 
GetActualGuardSize(const pthread_attr_t & attributes)756 static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
757   size_t result;
758   pthread_t t;
759   pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
760   pthread_join(t, nullptr);
761   return result;
762 }
763 
GetActualStackSizeFn(void * arg)764 static void* GetActualStackSizeFn(void* arg) {
765   pthread_attr_t attributes;
766   pthread_getattr_np(pthread_self(), &attributes);
767   pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
768   return nullptr;
769 }
770 
GetActualStackSize(const pthread_attr_t & attributes)771 static size_t GetActualStackSize(const pthread_attr_t& attributes) {
772   size_t result;
773   pthread_t t;
774   pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
775   pthread_join(t, nullptr);
776   return result;
777 }
778 
TEST(pthread,pthread_attr_setguardsize_tiny)779 TEST(pthread, pthread_attr_setguardsize_tiny) {
780   pthread_attr_t attributes;
781   ASSERT_EQ(0, pthread_attr_init(&attributes));
782 
783   // No such thing as too small: will be rounded up to one page by pthread_create.
784   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
785   size_t guard_size;
786   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
787   ASSERT_EQ(128U, guard_size);
788   ASSERT_EQ(static_cast<unsigned long>(getpagesize()), GetActualGuardSize(attributes));
789 }
790 
TEST(pthread,pthread_attr_setguardsize_reasonable)791 TEST(pthread, pthread_attr_setguardsize_reasonable) {
792   pthread_attr_t attributes;
793   ASSERT_EQ(0, pthread_attr_init(&attributes));
794 
795   // Large enough and a multiple of the page size.
796   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
797   size_t guard_size;
798   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
799   ASSERT_EQ(32*1024U, guard_size);
800   ASSERT_EQ(32*1024U, GetActualGuardSize(attributes));
801 }
802 
TEST(pthread,pthread_attr_setguardsize_needs_rounding)803 TEST(pthread, pthread_attr_setguardsize_needs_rounding) {
804   pthread_attr_t attributes;
805   ASSERT_EQ(0, pthread_attr_init(&attributes));
806 
807   // Large enough but not a multiple of the page size.
808   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
809   size_t guard_size;
810   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
811   ASSERT_EQ(32*1024U + 1, guard_size);
812   ASSERT_EQ(roundup(32 * 1024U + 1, getpagesize()), GetActualGuardSize(attributes));
813 }
814 
TEST(pthread,pthread_attr_setguardsize_enormous)815 TEST(pthread, pthread_attr_setguardsize_enormous) {
816   pthread_attr_t attributes;
817   ASSERT_EQ(0, pthread_attr_init(&attributes));
818 
819   // Larger than the stack itself. (Historically we mistakenly carved
820   // the guard out of the stack itself, rather than adding it after the
821   // end.)
822   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024*1024));
823   size_t guard_size;
824   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
825   ASSERT_EQ(32*1024*1024U, guard_size);
826   ASSERT_EQ(32*1024*1024U, GetActualGuardSize(attributes));
827 }
828 
TEST(pthread,pthread_attr_setstacksize)829 TEST(pthread, pthread_attr_setstacksize) {
830   pthread_attr_t attributes;
831   ASSERT_EQ(0, pthread_attr_init(&attributes));
832 
833   // Get the default stack size.
834   size_t default_stack_size;
835   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
836 
837   // Too small.
838   ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
839   size_t stack_size;
840   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
841   ASSERT_EQ(default_stack_size, stack_size);
842   ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
843 
844   // Large enough and a multiple of the page size; may be rounded up by pthread_create.
845   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
846   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
847   ASSERT_EQ(32*1024U, stack_size);
848   ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
849 
850   // Large enough but not aligned; will be rounded up by pthread_create.
851   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
852   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
853   ASSERT_EQ(32*1024U + 1, stack_size);
854 #if defined(__BIONIC__)
855   ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
856 #else // __BIONIC__
857   // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
858   ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
859 #endif // __BIONIC__
860 }
861 
TEST(pthread,pthread_rwlockattr_smoke)862 TEST(pthread, pthread_rwlockattr_smoke) {
863   pthread_rwlockattr_t attr;
864   ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
865 
866   int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED};
867   for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) {
868     ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i]));
869     int pshared;
870     ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared));
871     ASSERT_EQ(pshared_value_array[i], pshared);
872   }
873 
874 #if !defined(ANDROID_HOST_MUSL)
875   // musl doesn't have pthread_rwlockattr_setkind_np
876   int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP,
877                       PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP};
878   for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) {
879     ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i]));
880     int kind;
881     ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind));
882     ASSERT_EQ(kind_array[i], kind);
883   }
884 #endif
885 
886   ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
887 }
888 
TEST(pthread,pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER)889 TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) {
890   pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER;
891   pthread_rwlock_t lock2;
892   ASSERT_EQ(0, pthread_rwlock_init(&lock2, nullptr));
893   ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1)));
894 }
895 
TEST(pthread,pthread_rwlock_smoke)896 TEST(pthread, pthread_rwlock_smoke) {
897   pthread_rwlock_t l;
898   ASSERT_EQ(0, pthread_rwlock_init(&l, nullptr));
899 
900   // Single read lock
901   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
902   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
903 
904   // Multiple read lock
905   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
906   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
907   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
908   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
909 
910   // Write lock
911   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
912   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
913 
914   // Try writer lock
915   ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
916   ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
917   ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
918   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
919 
920   // Try reader lock
921   ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
922   ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
923   ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
924   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
925   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
926 
927   // Try writer lock after unlock
928   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
929   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
930 
931   // EDEADLK in "read after write"
932   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
933   ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
934   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
935 
936   // EDEADLK in "write after write"
937   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
938   ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
939   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
940 
941   ASSERT_EQ(0, pthread_rwlock_destroy(&l));
942 }
943 
944 struct RwlockWakeupHelperArg {
945   pthread_rwlock_t lock;
946   enum Progress {
947     LOCK_INITIALIZED,
948     LOCK_WAITING,
949     LOCK_RELEASED,
950     LOCK_ACCESSED,
951     LOCK_TIMEDOUT,
952   };
953   std::atomic<Progress> progress;
954   std::atomic<pid_t> tid;
955   std::function<int (pthread_rwlock_t*)> trylock_function;
956   std::function<int (pthread_rwlock_t*)> lock_function;
957   std::function<int (pthread_rwlock_t*, const timespec*)> timed_lock_function;
958   clockid_t clock;
959 };
960 
pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg * arg)961 static void pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg* arg) {
962   arg->tid = gettid();
963   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
964   arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
965 
966   ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
967   ASSERT_EQ(0, arg->lock_function(&arg->lock));
968   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
969   ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
970 
971   arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
972 }
973 
test_pthread_rwlock_reader_wakeup_writer(std::function<int (pthread_rwlock_t *)> lock_function)974 static void test_pthread_rwlock_reader_wakeup_writer(std::function<int (pthread_rwlock_t*)> lock_function) {
975   RwlockWakeupHelperArg wakeup_arg;
976   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
977   ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
978   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
979   wakeup_arg.tid = 0;
980   wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
981   wakeup_arg.lock_function = lock_function;
982 
983   pthread_t thread;
984   ASSERT_EQ(0, pthread_create(&thread, nullptr,
985     reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
986   WaitUntilThreadSleep(wakeup_arg.tid);
987   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
988 
989   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
990   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
991 
992   ASSERT_EQ(0, pthread_join(thread, nullptr));
993   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
994   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
995 }
996 
TEST(pthread,pthread_rwlock_reader_wakeup_writer)997 TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
998   test_pthread_rwlock_reader_wakeup_writer(pthread_rwlock_wrlock);
999 }
1000 
TEST(pthread,pthread_rwlock_reader_wakeup_writer_timedwait)1001 TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait) {
1002   timespec ts;
1003   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1004   ts.tv_sec += 1;
1005   test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1006     return pthread_rwlock_timedwrlock(lock, &ts);
1007   });
1008 }
1009 
TEST(pthread,pthread_rwlock_reader_wakeup_writer_timedwait_monotonic_np)1010 TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait_monotonic_np) {
1011 #if defined(__BIONIC__)
1012   timespec ts;
1013   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1014   ts.tv_sec += 1;
1015   test_pthread_rwlock_reader_wakeup_writer(
1016       [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedwrlock_monotonic_np(lock, &ts); });
1017 #else   // __BIONIC__
1018   GTEST_SKIP() << "pthread_rwlock_timedwrlock_monotonic_np not available";
1019 #endif  // __BIONIC__
1020 }
1021 
TEST(pthread,pthread_rwlock_reader_wakeup_writer_clockwait)1022 TEST(pthread, pthread_rwlock_reader_wakeup_writer_clockwait) {
1023 #if defined(__BIONIC__)
1024   timespec ts;
1025   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1026   ts.tv_sec += 1;
1027   test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1028     return pthread_rwlock_clockwrlock(lock, CLOCK_MONOTONIC, &ts);
1029   });
1030 
1031   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1032   ts.tv_sec += 1;
1033   test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1034     return pthread_rwlock_clockwrlock(lock, CLOCK_REALTIME, &ts);
1035   });
1036 #else   // __BIONIC__
1037   GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1038 #endif  // __BIONIC__
1039 }
1040 
test_pthread_rwlock_writer_wakeup_reader(std::function<int (pthread_rwlock_t *)> lock_function)1041 static void test_pthread_rwlock_writer_wakeup_reader(std::function<int (pthread_rwlock_t*)> lock_function) {
1042   RwlockWakeupHelperArg wakeup_arg;
1043   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1044   ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
1045   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1046   wakeup_arg.tid = 0;
1047   wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
1048   wakeup_arg.lock_function = lock_function;
1049 
1050   pthread_t thread;
1051   ASSERT_EQ(0, pthread_create(&thread, nullptr,
1052     reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
1053   WaitUntilThreadSleep(wakeup_arg.tid);
1054   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1055 
1056   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
1057   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1058 
1059   ASSERT_EQ(0, pthread_join(thread, nullptr));
1060   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
1061   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1062 }
1063 
TEST(pthread,pthread_rwlock_writer_wakeup_reader)1064 TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
1065   test_pthread_rwlock_writer_wakeup_reader(pthread_rwlock_rdlock);
1066 }
1067 
TEST(pthread,pthread_rwlock_writer_wakeup_reader_timedwait)1068 TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait) {
1069   timespec ts;
1070   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1071   ts.tv_sec += 1;
1072   test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1073     return pthread_rwlock_timedrdlock(lock, &ts);
1074   });
1075 }
1076 
TEST(pthread,pthread_rwlock_writer_wakeup_reader_timedwait_monotonic_np)1077 TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait_monotonic_np) {
1078 #if defined(__BIONIC__)
1079   timespec ts;
1080   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1081   ts.tv_sec += 1;
1082   test_pthread_rwlock_writer_wakeup_reader(
1083       [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedrdlock_monotonic_np(lock, &ts); });
1084 #else   // __BIONIC__
1085   GTEST_SKIP() << "pthread_rwlock_timedrdlock_monotonic_np not available";
1086 #endif  // __BIONIC__
1087 }
1088 
TEST(pthread,pthread_rwlock_writer_wakeup_reader_clockwait)1089 TEST(pthread, pthread_rwlock_writer_wakeup_reader_clockwait) {
1090 #if defined(__BIONIC__)
1091   timespec ts;
1092   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1093   ts.tv_sec += 1;
1094   test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1095     return pthread_rwlock_clockrdlock(lock, CLOCK_MONOTONIC, &ts);
1096   });
1097 
1098   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1099   ts.tv_sec += 1;
1100   test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1101     return pthread_rwlock_clockrdlock(lock, CLOCK_REALTIME, &ts);
1102   });
1103 #else   // __BIONIC__
1104   GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1105 #endif  // __BIONIC__
1106 }
1107 
pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg * arg)1108 static void pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg* arg) {
1109   arg->tid = gettid();
1110   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
1111   arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
1112 
1113   ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
1114 
1115   timespec ts;
1116   ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
1117   ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1118   ts.tv_nsec = -1;
1119   ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1120   ts.tv_nsec = NS_PER_S;
1121   ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1122   ts.tv_nsec = NS_PER_S - 1;
1123   ts.tv_sec = -1;
1124   ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1125   ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
1126   ts.tv_sec += 1;
1127   ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1128   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, arg->progress);
1129   arg->progress = RwlockWakeupHelperArg::LOCK_TIMEDOUT;
1130 }
1131 
pthread_rwlock_timedrdlock_timeout_helper(clockid_t clock,int (* lock_function)(pthread_rwlock_t * __rwlock,const timespec * __timeout))1132 static void pthread_rwlock_timedrdlock_timeout_helper(
1133     clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
1134   RwlockWakeupHelperArg wakeup_arg;
1135   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1136   ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
1137   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1138   wakeup_arg.tid = 0;
1139   wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
1140   wakeup_arg.timed_lock_function = lock_function;
1141   wakeup_arg.clock = clock;
1142 
1143   pthread_t thread;
1144   ASSERT_EQ(0, pthread_create(&thread, nullptr,
1145       reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1146   WaitUntilThreadSleep(wakeup_arg.tid);
1147   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1148 
1149   ASSERT_EQ(0, pthread_join(thread, nullptr));
1150   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1151   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1152   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1153 }
1154 
TEST(pthread,pthread_rwlock_timedrdlock_timeout)1155 TEST(pthread, pthread_rwlock_timedrdlock_timeout) {
1156   pthread_rwlock_timedrdlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedrdlock);
1157 }
1158 
TEST(pthread,pthread_rwlock_timedrdlock_monotonic_np_timeout)1159 TEST(pthread, pthread_rwlock_timedrdlock_monotonic_np_timeout) {
1160 #if defined(__BIONIC__)
1161   pthread_rwlock_timedrdlock_timeout_helper(CLOCK_MONOTONIC,
1162                                             pthread_rwlock_timedrdlock_monotonic_np);
1163 #else   // __BIONIC__
1164   GTEST_SKIP() << "pthread_rwlock_timedrdlock_monotonic_np not available";
1165 #endif  // __BIONIC__
1166 }
1167 
TEST(pthread,pthread_rwlock_clockrdlock_monotonic_timeout)1168 TEST(pthread, pthread_rwlock_clockrdlock_monotonic_timeout) {
1169 #if defined(__BIONIC__)
1170   pthread_rwlock_timedrdlock_timeout_helper(
1171       CLOCK_MONOTONIC, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1172         return pthread_rwlock_clockrdlock(__rwlock, CLOCK_MONOTONIC, __timeout);
1173       });
1174 #else   // __BIONIC__
1175   GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1176 #endif  // __BIONIC__
1177 }
1178 
TEST(pthread,pthread_rwlock_clockrdlock_realtime_timeout)1179 TEST(pthread, pthread_rwlock_clockrdlock_realtime_timeout) {
1180 #if defined(__BIONIC__)
1181   pthread_rwlock_timedrdlock_timeout_helper(
1182       CLOCK_REALTIME, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1183         return pthread_rwlock_clockrdlock(__rwlock, CLOCK_REALTIME, __timeout);
1184       });
1185 #else   // __BIONIC__
1186   GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1187 #endif  // __BIONIC__
1188 }
1189 
TEST(pthread,pthread_rwlock_clockrdlock_invalid)1190 TEST(pthread, pthread_rwlock_clockrdlock_invalid) {
1191 #if defined(__BIONIC__)
1192   pthread_rwlock_t lock = PTHREAD_RWLOCK_INITIALIZER;
1193   timespec ts;
1194   EXPECT_EQ(EINVAL, pthread_rwlock_clockrdlock(&lock, CLOCK_PROCESS_CPUTIME_ID, &ts));
1195 #else   // __BIONIC__
1196   GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1197 #endif  // __BIONIC__
1198 }
1199 
pthread_rwlock_timedwrlock_timeout_helper(clockid_t clock,int (* lock_function)(pthread_rwlock_t * __rwlock,const timespec * __timeout))1200 static void pthread_rwlock_timedwrlock_timeout_helper(
1201     clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
1202   RwlockWakeupHelperArg wakeup_arg;
1203   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1204   ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
1205   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1206   wakeup_arg.tid = 0;
1207   wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
1208   wakeup_arg.timed_lock_function = lock_function;
1209   wakeup_arg.clock = clock;
1210 
1211   pthread_t thread;
1212   ASSERT_EQ(0, pthread_create(&thread, nullptr,
1213       reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1214   WaitUntilThreadSleep(wakeup_arg.tid);
1215   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1216 
1217   ASSERT_EQ(0, pthread_join(thread, nullptr));
1218   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1219   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1220   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1221 }
1222 
TEST(pthread,pthread_rwlock_timedwrlock_timeout)1223 TEST(pthread, pthread_rwlock_timedwrlock_timeout) {
1224   pthread_rwlock_timedwrlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedwrlock);
1225 }
1226 
TEST(pthread,pthread_rwlock_timedwrlock_monotonic_np_timeout)1227 TEST(pthread, pthread_rwlock_timedwrlock_monotonic_np_timeout) {
1228 #if defined(__BIONIC__)
1229   pthread_rwlock_timedwrlock_timeout_helper(CLOCK_MONOTONIC,
1230                                             pthread_rwlock_timedwrlock_monotonic_np);
1231 #else   // __BIONIC__
1232   GTEST_SKIP() << "pthread_rwlock_timedwrlock_monotonic_np not available";
1233 #endif  // __BIONIC__
1234 }
1235 
TEST(pthread,pthread_rwlock_clockwrlock_monotonic_timeout)1236 TEST(pthread, pthread_rwlock_clockwrlock_monotonic_timeout) {
1237 #if defined(__BIONIC__)
1238   pthread_rwlock_timedwrlock_timeout_helper(
1239       CLOCK_MONOTONIC, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1240         return pthread_rwlock_clockwrlock(__rwlock, CLOCK_MONOTONIC, __timeout);
1241       });
1242 #else   // __BIONIC__
1243   GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1244 #endif  // __BIONIC__
1245 }
1246 
TEST(pthread,pthread_rwlock_clockwrlock_realtime_timeout)1247 TEST(pthread, pthread_rwlock_clockwrlock_realtime_timeout) {
1248 #if defined(__BIONIC__)
1249   pthread_rwlock_timedwrlock_timeout_helper(
1250       CLOCK_REALTIME, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1251         return pthread_rwlock_clockwrlock(__rwlock, CLOCK_REALTIME, __timeout);
1252       });
1253 #else   // __BIONIC__
1254   GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1255 #endif  // __BIONIC__
1256 }
1257 
TEST(pthread,pthread_rwlock_clockwrlock_invalid)1258 TEST(pthread, pthread_rwlock_clockwrlock_invalid) {
1259 #if defined(__BIONIC__)
1260   pthread_rwlock_t lock = PTHREAD_RWLOCK_INITIALIZER;
1261   timespec ts;
1262   EXPECT_EQ(EINVAL, pthread_rwlock_clockwrlock(&lock, CLOCK_PROCESS_CPUTIME_ID, &ts));
1263 #else   // __BIONIC__
1264   GTEST_SKIP() << "pthread_rwlock_clockrwlock not available";
1265 #endif  // __BIONIC__
1266 }
1267 
1268 #if !defined(ANDROID_HOST_MUSL)
1269 // musl doesn't have pthread_rwlockattr_setkind_np
1270 class RwlockKindTestHelper {
1271  private:
1272   struct ThreadArg {
1273     RwlockKindTestHelper* helper;
1274     std::atomic<pid_t>& tid;
1275 
ThreadArgRwlockKindTestHelper::ThreadArg1276     ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid)
1277       : helper(helper), tid(tid) { }
1278   };
1279 
1280  public:
1281   pthread_rwlock_t lock;
1282 
1283  public:
RwlockKindTestHelper(int kind_type)1284   explicit RwlockKindTestHelper(int kind_type) {
1285     InitRwlock(kind_type);
1286   }
1287 
~RwlockKindTestHelper()1288   ~RwlockKindTestHelper() {
1289     DestroyRwlock();
1290   }
1291 
CreateWriterThread(pthread_t & thread,std::atomic<pid_t> & tid)1292   void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1293     tid = 0;
1294     ThreadArg* arg = new ThreadArg(this, tid);
1295     ASSERT_EQ(0, pthread_create(&thread, nullptr,
1296                                 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg));
1297   }
1298 
CreateReaderThread(pthread_t & thread,std::atomic<pid_t> & tid)1299   void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1300     tid = 0;
1301     ThreadArg* arg = new ThreadArg(this, tid);
1302     ASSERT_EQ(0, pthread_create(&thread, nullptr,
1303                                 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg));
1304   }
1305 
1306  private:
InitRwlock(int kind_type)1307   void InitRwlock(int kind_type) {
1308     pthread_rwlockattr_t attr;
1309     ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
1310     ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type));
1311     ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr));
1312     ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
1313   }
1314 
DestroyRwlock()1315   void DestroyRwlock() {
1316     ASSERT_EQ(0, pthread_rwlock_destroy(&lock));
1317   }
1318 
WriterThreadFn(ThreadArg * arg)1319   static void WriterThreadFn(ThreadArg* arg) {
1320     arg->tid = gettid();
1321 
1322     RwlockKindTestHelper* helper = arg->helper;
1323     ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock));
1324     ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1325     delete arg;
1326   }
1327 
ReaderThreadFn(ThreadArg * arg)1328   static void ReaderThreadFn(ThreadArg* arg) {
1329     arg->tid = gettid();
1330 
1331     RwlockKindTestHelper* helper = arg->helper;
1332     ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock));
1333     ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1334     delete arg;
1335   }
1336 };
1337 #endif
1338 
TEST(pthread,pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP)1339 TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) {
1340 #if !defined(ANDROID_HOST_MUSL)
1341   RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP);
1342   ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1343 
1344   pthread_t writer_thread;
1345   std::atomic<pid_t> writer_tid;
1346   helper.CreateWriterThread(writer_thread, writer_tid);
1347   WaitUntilThreadSleep(writer_tid);
1348 
1349   pthread_t reader_thread;
1350   std::atomic<pid_t> reader_tid;
1351   helper.CreateReaderThread(reader_thread, reader_tid);
1352   ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
1353 
1354   ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
1355   ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
1356 #else
1357   GTEST_SKIP() << "musl doesn't have pthread_rwlockattr_setkind_np";
1358 #endif
1359 }
1360 
TEST(pthread,pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)1361 TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) {
1362 #if !defined(ANDROID_HOST_MUSL)
1363   RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
1364   ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1365 
1366   pthread_t writer_thread;
1367   std::atomic<pid_t> writer_tid;
1368   helper.CreateWriterThread(writer_thread, writer_tid);
1369   WaitUntilThreadSleep(writer_tid);
1370 
1371   pthread_t reader_thread;
1372   std::atomic<pid_t> reader_tid;
1373   helper.CreateReaderThread(reader_thread, reader_tid);
1374   WaitUntilThreadSleep(reader_tid);
1375 
1376   ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
1377   ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
1378   ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
1379 #else
1380   GTEST_SKIP() << "musl doesn't have pthread_rwlockattr_setkind_np";
1381 #endif
1382 }
1383 
1384 static int g_once_fn_call_count = 0;
OnceFn()1385 static void OnceFn() {
1386   ++g_once_fn_call_count;
1387 }
1388 
TEST(pthread,pthread_once_smoke)1389 TEST(pthread, pthread_once_smoke) {
1390   pthread_once_t once_control = PTHREAD_ONCE_INIT;
1391   ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
1392   ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
1393   ASSERT_EQ(1, g_once_fn_call_count);
1394 }
1395 
1396 static std::string pthread_once_1934122_result = "";
1397 
Routine2()1398 static void Routine2() {
1399   pthread_once_1934122_result += "2";
1400 }
1401 
Routine1()1402 static void Routine1() {
1403   pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
1404   pthread_once_1934122_result += "1";
1405   pthread_once(&once_control_2, &Routine2);
1406 }
1407 
TEST(pthread,pthread_once_1934122)1408 TEST(pthread, pthread_once_1934122) {
1409   // Very old versions of Android couldn't call pthread_once from a
1410   // pthread_once init routine. http://b/1934122.
1411   pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
1412   ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
1413   ASSERT_EQ("12", pthread_once_1934122_result);
1414 }
1415 
1416 static int g_atfork_prepare_calls = 0;
AtForkPrepare1()1417 static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
AtForkPrepare2()1418 static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
1419 static int g_atfork_parent_calls = 0;
AtForkParent1()1420 static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
AtForkParent2()1421 static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
1422 static int g_atfork_child_calls = 0;
AtForkChild1()1423 static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
AtForkChild2()1424 static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
1425 
TEST(pthread,pthread_atfork_smoke_fork)1426 TEST(pthread, pthread_atfork_smoke_fork) {
1427   ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1428   ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
1429 
1430   g_atfork_prepare_calls = g_atfork_parent_calls = g_atfork_child_calls = 0;
1431   pid_t pid = fork();
1432   ASSERT_NE(-1, pid) << strerror(errno);
1433 
1434   // Child and parent calls are made in the order they were registered.
1435   if (pid == 0) {
1436     ASSERT_EQ(12, g_atfork_child_calls);
1437     _exit(0);
1438   }
1439   ASSERT_EQ(12, g_atfork_parent_calls);
1440 
1441   // Prepare calls are made in the reverse order.
1442   ASSERT_EQ(21, g_atfork_prepare_calls);
1443   AssertChildExited(pid, 0);
1444 }
1445 
TEST(pthread,pthread_atfork_smoke_vfork)1446 TEST(pthread, pthread_atfork_smoke_vfork) {
1447   ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1448   ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
1449 
1450   g_atfork_prepare_calls = g_atfork_parent_calls = g_atfork_child_calls = 0;
1451   pid_t pid = vfork();
1452   ASSERT_NE(-1, pid) << strerror(errno);
1453 
1454   // atfork handlers are not called.
1455   if (pid == 0) {
1456     ASSERT_EQ(0, g_atfork_child_calls);
1457     _exit(0);
1458   }
1459   ASSERT_EQ(0, g_atfork_parent_calls);
1460   ASSERT_EQ(0, g_atfork_prepare_calls);
1461   AssertChildExited(pid, 0);
1462 }
1463 
TEST(pthread,pthread_atfork_smoke__Fork)1464 TEST(pthread, pthread_atfork_smoke__Fork) {
1465 #if defined(__BIONIC__)
1466   ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1467   ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
1468 
1469   g_atfork_prepare_calls = g_atfork_parent_calls = g_atfork_child_calls = 0;
1470   pid_t pid = _Fork();
1471   ASSERT_NE(-1, pid) << strerror(errno);
1472 
1473   // atfork handlers are not called.
1474   if (pid == 0) {
1475     ASSERT_EQ(0, g_atfork_child_calls);
1476     _exit(0);
1477   }
1478   ASSERT_EQ(0, g_atfork_parent_calls);
1479   ASSERT_EQ(0, g_atfork_prepare_calls);
1480   AssertChildExited(pid, 0);
1481 #endif
1482 }
1483 
TEST(pthread,pthread_attr_getscope)1484 TEST(pthread, pthread_attr_getscope) {
1485   pthread_attr_t attr;
1486   ASSERT_EQ(0, pthread_attr_init(&attr));
1487 
1488   int scope;
1489   ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
1490   ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
1491 }
1492 
TEST(pthread,pthread_condattr_init)1493 TEST(pthread, pthread_condattr_init) {
1494   pthread_condattr_t attr;
1495   pthread_condattr_init(&attr);
1496 
1497   clockid_t clock;
1498   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1499   ASSERT_EQ(CLOCK_REALTIME, clock);
1500 
1501   int pshared;
1502   ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1503   ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
1504 }
1505 
TEST(pthread,pthread_condattr_setclock)1506 TEST(pthread, pthread_condattr_setclock) {
1507   pthread_condattr_t attr;
1508   pthread_condattr_init(&attr);
1509 
1510   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
1511   clockid_t clock;
1512   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1513   ASSERT_EQ(CLOCK_REALTIME, clock);
1514 
1515   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1516   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1517   ASSERT_EQ(CLOCK_MONOTONIC, clock);
1518 
1519   ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
1520 }
1521 
TEST(pthread,pthread_cond_broadcast__preserves_condattr_flags)1522 TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
1523 #if defined(__BIONIC__)
1524   pthread_condattr_t attr;
1525   pthread_condattr_init(&attr);
1526 
1527   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1528   ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
1529 
1530   pthread_cond_t cond_var;
1531   ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
1532 
1533   ASSERT_EQ(0, pthread_cond_signal(&cond_var));
1534   ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
1535 
1536   attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
1537   clockid_t clock;
1538   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1539   ASSERT_EQ(CLOCK_MONOTONIC, clock);
1540   int pshared;
1541   ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1542   ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
1543 #else  // !defined(__BIONIC__)
1544   GTEST_SKIP() << "bionic-only test";
1545 #endif  // !defined(__BIONIC__)
1546 }
1547 
1548 class pthread_CondWakeupTest : public ::testing::Test {
1549  protected:
1550   pthread_mutex_t mutex;
1551   pthread_cond_t cond;
1552 
1553   enum Progress {
1554     INITIALIZED,
1555     WAITING,
1556     SIGNALED,
1557     FINISHED,
1558   };
1559   std::atomic<Progress> progress;
1560   pthread_t thread;
1561   timespec ts;
1562   std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function;
1563 
1564  protected:
SetUp()1565   void SetUp() override {
1566     ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1567   }
1568 
InitCond(clockid_t clock=CLOCK_REALTIME)1569   void InitCond(clockid_t clock=CLOCK_REALTIME) {
1570     pthread_condattr_t attr;
1571     ASSERT_EQ(0, pthread_condattr_init(&attr));
1572     ASSERT_EQ(0, pthread_condattr_setclock(&attr, clock));
1573     ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
1574     ASSERT_EQ(0, pthread_condattr_destroy(&attr));
1575   }
1576 
StartWaitingThread(std::function<int (pthread_cond_t * cond,pthread_mutex_t * mutex)> wait_function)1577   void StartWaitingThread(
1578       std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function) {
1579     progress = INITIALIZED;
1580     this->wait_function = wait_function;
1581     ASSERT_EQ(0, pthread_create(&thread, nullptr, reinterpret_cast<void* (*)(void*)>(WaitThreadFn),
1582                                 this));
1583     while (progress != WAITING) {
1584       usleep(5000);
1585     }
1586     usleep(5000);
1587   }
1588 
RunTimedTest(clockid_t clock,std::function<int (pthread_cond_t * cond,pthread_mutex_t * mutex,const timespec * timeout)> wait_function)1589   void RunTimedTest(
1590       clockid_t clock,
1591       std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* timeout)>
1592           wait_function) {
1593     ASSERT_EQ(0, clock_gettime(clock, &ts));
1594     ts.tv_sec += 1;
1595 
1596     StartWaitingThread([&wait_function, this](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1597       return wait_function(cond, mutex, &ts);
1598     });
1599 
1600     progress = SIGNALED;
1601     ASSERT_EQ(0, pthread_cond_signal(&cond));
1602   }
1603 
RunTimedTest(clockid_t clock,std::function<int (pthread_cond_t * cond,pthread_mutex_t * mutex,clockid_t clock,const timespec * timeout)> wait_function)1604   void RunTimedTest(clockid_t clock, std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex,
1605                                                        clockid_t clock, const timespec* timeout)>
1606                                          wait_function) {
1607     RunTimedTest(clock, [clock, &wait_function](pthread_cond_t* cond, pthread_mutex_t* mutex,
1608                                                 const timespec* timeout) {
1609       return wait_function(cond, mutex, clock, timeout);
1610     });
1611   }
1612 
TearDown()1613   void TearDown() override {
1614     ASSERT_EQ(0, pthread_join(thread, nullptr));
1615     ASSERT_EQ(FINISHED, progress);
1616     ASSERT_EQ(0, pthread_cond_destroy(&cond));
1617     ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
1618   }
1619 
1620  private:
WaitThreadFn(pthread_CondWakeupTest * test)1621   static void WaitThreadFn(pthread_CondWakeupTest* test) {
1622     ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
1623     test->progress = WAITING;
1624     while (test->progress == WAITING) {
1625       ASSERT_EQ(0, test->wait_function(&test->cond, &test->mutex));
1626     }
1627     ASSERT_EQ(SIGNALED, test->progress);
1628     test->progress = FINISHED;
1629     ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
1630   }
1631 };
1632 
TEST_F(pthread_CondWakeupTest,signal_wait)1633 TEST_F(pthread_CondWakeupTest, signal_wait) {
1634   InitCond();
1635   StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1636     return pthread_cond_wait(cond, mutex);
1637   });
1638   progress = SIGNALED;
1639   ASSERT_EQ(0, pthread_cond_signal(&cond));
1640 }
1641 
TEST_F(pthread_CondWakeupTest,broadcast_wait)1642 TEST_F(pthread_CondWakeupTest, broadcast_wait) {
1643   InitCond();
1644   StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1645     return pthread_cond_wait(cond, mutex);
1646   });
1647   progress = SIGNALED;
1648   ASSERT_EQ(0, pthread_cond_broadcast(&cond));
1649 }
1650 
TEST_F(pthread_CondWakeupTest,signal_timedwait_CLOCK_REALTIME)1651 TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_REALTIME) {
1652   InitCond(CLOCK_REALTIME);
1653   RunTimedTest(CLOCK_REALTIME, pthread_cond_timedwait);
1654 }
1655 
TEST_F(pthread_CondWakeupTest,signal_timedwait_CLOCK_MONOTONIC)1656 TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC) {
1657   InitCond(CLOCK_MONOTONIC);
1658   RunTimedTest(CLOCK_MONOTONIC, pthread_cond_timedwait);
1659 }
1660 
TEST_F(pthread_CondWakeupTest,signal_timedwait_CLOCK_MONOTONIC_np)1661 TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC_np) {
1662 #if defined(__BIONIC__)
1663   InitCond(CLOCK_REALTIME);
1664   RunTimedTest(CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1665 #else   // __BIONIC__
1666   GTEST_SKIP() << "pthread_cond_timedwait_monotonic_np not available";
1667 #endif  // __BIONIC__
1668 }
1669 
TEST_F(pthread_CondWakeupTest,signal_clockwait_monotonic_monotonic)1670 TEST_F(pthread_CondWakeupTest, signal_clockwait_monotonic_monotonic) {
1671 #if defined(__BIONIC__)
1672   InitCond(CLOCK_MONOTONIC);
1673   RunTimedTest(CLOCK_MONOTONIC, pthread_cond_clockwait);
1674 #else   // __BIONIC__
1675   GTEST_SKIP() << "pthread_cond_clockwait not available";
1676 #endif  // __BIONIC__
1677 }
1678 
TEST_F(pthread_CondWakeupTest,signal_clockwait_monotonic_realtime)1679 TEST_F(pthread_CondWakeupTest, signal_clockwait_monotonic_realtime) {
1680 #if defined(__BIONIC__)
1681   InitCond(CLOCK_MONOTONIC);
1682   RunTimedTest(CLOCK_REALTIME, pthread_cond_clockwait);
1683 #else   // __BIONIC__
1684   GTEST_SKIP() << "pthread_cond_clockwait not available";
1685 #endif  // __BIONIC__
1686 }
1687 
TEST_F(pthread_CondWakeupTest,signal_clockwait_realtime_monotonic)1688 TEST_F(pthread_CondWakeupTest, signal_clockwait_realtime_monotonic) {
1689 #if defined(__BIONIC__)
1690   InitCond(CLOCK_REALTIME);
1691   RunTimedTest(CLOCK_MONOTONIC, pthread_cond_clockwait);
1692 #else   // __BIONIC__
1693   GTEST_SKIP() << "pthread_cond_clockwait not available";
1694 #endif  // __BIONIC__
1695 }
1696 
TEST_F(pthread_CondWakeupTest,signal_clockwait_realtime_realtime)1697 TEST_F(pthread_CondWakeupTest, signal_clockwait_realtime_realtime) {
1698 #if defined(__BIONIC__)
1699   InitCond(CLOCK_REALTIME);
1700   RunTimedTest(CLOCK_REALTIME, pthread_cond_clockwait);
1701 #else   // __BIONIC__
1702   GTEST_SKIP() << "pthread_cond_clockwait not available";
1703 #endif  // __BIONIC__
1704 }
1705 
pthread_cond_timedwait_timeout_helper(bool init_monotonic,clockid_t clock,int (* wait_function)(pthread_cond_t * __cond,pthread_mutex_t * __mutex,const timespec * __timeout))1706 static void pthread_cond_timedwait_timeout_helper(bool init_monotonic, clockid_t clock,
1707                                                   int (*wait_function)(pthread_cond_t* __cond,
1708                                                                        pthread_mutex_t* __mutex,
1709                                                                        const timespec* __timeout)) {
1710   pthread_mutex_t mutex;
1711   ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1712   pthread_cond_t cond;
1713 
1714   if (init_monotonic) {
1715     pthread_condattr_t attr;
1716     pthread_condattr_init(&attr);
1717 
1718     ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1719     clockid_t clock;
1720     ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1721     ASSERT_EQ(CLOCK_MONOTONIC, clock);
1722 
1723     ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
1724   } else {
1725     ASSERT_EQ(0, pthread_cond_init(&cond, nullptr));
1726   }
1727   ASSERT_EQ(0, pthread_mutex_lock(&mutex));
1728 
1729   timespec ts;
1730   ASSERT_EQ(0, clock_gettime(clock, &ts));
1731   ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
1732   ts.tv_nsec = -1;
1733   ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
1734   ts.tv_nsec = NS_PER_S;
1735   ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
1736   ts.tv_nsec = NS_PER_S - 1;
1737   ts.tv_sec = -1;
1738   ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
1739   ASSERT_EQ(0, pthread_mutex_unlock(&mutex));
1740 }
1741 
TEST(pthread,pthread_cond_timedwait_timeout)1742 TEST(pthread, pthread_cond_timedwait_timeout) {
1743   pthread_cond_timedwait_timeout_helper(false, CLOCK_REALTIME, pthread_cond_timedwait);
1744 }
1745 
TEST(pthread,pthread_cond_timedwait_monotonic_np_timeout)1746 TEST(pthread, pthread_cond_timedwait_monotonic_np_timeout) {
1747 #if defined(__BIONIC__)
1748   pthread_cond_timedwait_timeout_helper(false, CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1749   pthread_cond_timedwait_timeout_helper(true, CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1750 #else   // __BIONIC__
1751   GTEST_SKIP() << "pthread_cond_timedwait_monotonic_np not available";
1752 #endif  // __BIONIC__
1753 }
1754 
TEST(pthread,pthread_cond_clockwait_timeout)1755 TEST(pthread, pthread_cond_clockwait_timeout) {
1756 #if defined(__BIONIC__)
1757   pthread_cond_timedwait_timeout_helper(
1758       false, CLOCK_MONOTONIC,
1759       [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1760         return pthread_cond_clockwait(__cond, __mutex, CLOCK_MONOTONIC, __timeout);
1761       });
1762   pthread_cond_timedwait_timeout_helper(
1763       true, CLOCK_MONOTONIC,
1764       [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1765         return pthread_cond_clockwait(__cond, __mutex, CLOCK_MONOTONIC, __timeout);
1766       });
1767   pthread_cond_timedwait_timeout_helper(
1768       false, CLOCK_REALTIME,
1769       [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1770         return pthread_cond_clockwait(__cond, __mutex, CLOCK_REALTIME, __timeout);
1771       });
1772   pthread_cond_timedwait_timeout_helper(
1773       true, CLOCK_REALTIME,
1774       [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1775         return pthread_cond_clockwait(__cond, __mutex, CLOCK_REALTIME, __timeout);
1776       });
1777 #else   // __BIONIC__
1778   GTEST_SKIP() << "pthread_cond_clockwait not available";
1779 #endif  // __BIONIC__
1780 }
1781 
TEST(pthread,pthread_cond_clockwait_invalid)1782 TEST(pthread, pthread_cond_clockwait_invalid) {
1783 #if defined(__BIONIC__)
1784   pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
1785   pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
1786   timespec ts;
1787   EXPECT_EQ(EINVAL, pthread_cond_clockwait(&cond, &mutex, CLOCK_PROCESS_CPUTIME_ID, &ts));
1788 
1789 #else   // __BIONIC__
1790   GTEST_SKIP() << "pthread_cond_clockwait not available";
1791 #endif  // __BIONIC__
1792 }
1793 
TEST(pthread,pthread_attr_getstack__main_thread)1794 TEST(pthread, pthread_attr_getstack__main_thread) {
1795   // This test is only meaningful for the main thread, so make sure we're running on it!
1796   ASSERT_EQ(getpid(), syscall(__NR_gettid));
1797 
1798   // Get the main thread's attributes.
1799   pthread_attr_t attributes;
1800   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1801 
1802   // Check that we correctly report that the main thread has no guard page.
1803   size_t guard_size;
1804   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1805   ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1806 
1807   // Get the stack base and the stack size (both ways).
1808   void* stack_base;
1809   size_t stack_size;
1810   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1811   size_t stack_size2;
1812   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1813 
1814   // The two methods of asking for the stack size should agree.
1815   EXPECT_EQ(stack_size, stack_size2);
1816 
1817 #if defined(__BIONIC__)
1818   // Find stack in /proc/self/maps using a pointer to the stack.
1819   //
1820   // We do not use "[stack]" label because in native-bridge environment it is not
1821   // guaranteed to point to the right stack. A native bridge implementation may
1822   // keep separate stack for the guest code.
1823   void* maps_stack_hi = nullptr;
1824   std::vector<map_record> maps;
1825   ASSERT_TRUE(Maps::parse_maps(&maps));
1826   uintptr_t stack_address = reinterpret_cast<uintptr_t>(untag_address(&maps_stack_hi));
1827   for (const auto& map : maps) {
1828     if (map.addr_start <= stack_address && map.addr_end > stack_address){
1829       maps_stack_hi = reinterpret_cast<void*>(map.addr_end);
1830       break;
1831     }
1832   }
1833 
1834   // The high address of the /proc/self/maps stack region should equal stack_base + stack_size.
1835   // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1836   // region isn't very interesting.
1837   EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1838 
1839   // The stack size should correspond to RLIMIT_STACK.
1840   rlimit rl;
1841   ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
1842   uint64_t original_rlim_cur = rl.rlim_cur;
1843   if (rl.rlim_cur == RLIM_INFINITY) {
1844     rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1845   }
1846   EXPECT_EQ(rl.rlim_cur, stack_size);
1847 
1848   auto guard = android::base::make_scope_guard([&rl, original_rlim_cur]() {
1849     rl.rlim_cur = original_rlim_cur;
1850     ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1851   });
1852 
1853   //
1854   // What if RLIMIT_STACK is smaller than the stack's current extent?
1855   //
1856   rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1857   rl.rlim_max = RLIM_INFINITY;
1858   ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1859 
1860   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1861   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1862   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1863 
1864   EXPECT_EQ(stack_size, stack_size2);
1865   ASSERT_EQ(1024U, stack_size);
1866 
1867   //
1868   // What if RLIMIT_STACK isn't a whole number of pages?
1869   //
1870   rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1871   rl.rlim_max = RLIM_INFINITY;
1872   ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1873 
1874   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1875   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1876   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1877 
1878   EXPECT_EQ(stack_size, stack_size2);
1879   ASSERT_EQ(6666U, stack_size);
1880 #endif
1881 }
1882 
1883 struct GetStackSignalHandlerArg {
1884   volatile bool done;
1885   void* signal_stack_base;
1886   size_t signal_stack_size;
1887   void* main_stack_base;
1888   size_t main_stack_size;
1889 };
1890 
1891 static GetStackSignalHandlerArg getstack_signal_handler_arg;
1892 
getstack_signal_handler(int sig)1893 static void getstack_signal_handler(int sig) {
1894   ASSERT_EQ(SIGUSR1, sig);
1895   // Use sleep() to make current thread be switched out by the kernel to provoke the error.
1896   sleep(1);
1897   pthread_attr_t attr;
1898   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1899   void* stack_base;
1900   size_t stack_size;
1901   ASSERT_EQ(0, pthread_attr_getstack(&attr, &stack_base, &stack_size));
1902 
1903   // Verify if the stack used by the signal handler is the alternate stack just registered.
1904   ASSERT_LE(getstack_signal_handler_arg.signal_stack_base, &attr);
1905   ASSERT_LT(static_cast<void*>(untag_address(&attr)),
1906             static_cast<char*>(getstack_signal_handler_arg.signal_stack_base) +
1907                 getstack_signal_handler_arg.signal_stack_size);
1908 
1909   // Verify if the main thread's stack got in the signal handler is correct.
1910   ASSERT_EQ(getstack_signal_handler_arg.main_stack_base, stack_base);
1911   ASSERT_LE(getstack_signal_handler_arg.main_stack_size, stack_size);
1912 
1913   getstack_signal_handler_arg.done = true;
1914 }
1915 
1916 // The previous code obtained the main thread's stack by reading the entry in
1917 // /proc/self/task/<pid>/maps that was labeled [stack]. Unfortunately, on x86/x86_64, the kernel
1918 // relies on sp0 in task state segment(tss) to label the stack map with [stack]. If the kernel
1919 // switches a process while the main thread is in an alternate stack, then the kernel will label
1920 // the wrong map with [stack]. This test verifies that when the above situation happens, the main
1921 // thread's stack is found correctly.
TEST(pthread,pthread_attr_getstack_in_signal_handler)1922 TEST(pthread, pthread_attr_getstack_in_signal_handler) {
1923   // This test is only meaningful for the main thread, so make sure we're running on it!
1924   ASSERT_EQ(getpid(), syscall(__NR_gettid));
1925 
1926   const size_t sig_stack_size = 16 * 1024;
1927   void* sig_stack = mmap(nullptr, sig_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
1928                          -1, 0);
1929   ASSERT_NE(MAP_FAILED, sig_stack);
1930   stack_t ss;
1931   ss.ss_sp = sig_stack;
1932   ss.ss_size = sig_stack_size;
1933   ss.ss_flags = 0;
1934   stack_t oss;
1935   ASSERT_EQ(0, sigaltstack(&ss, &oss));
1936 
1937   pthread_attr_t attr;
1938   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1939   void* main_stack_base;
1940   size_t main_stack_size;
1941   ASSERT_EQ(0, pthread_attr_getstack(&attr, &main_stack_base, &main_stack_size));
1942 
1943   ScopedSignalHandler handler(SIGUSR1, getstack_signal_handler, SA_ONSTACK);
1944   getstack_signal_handler_arg.done = false;
1945   getstack_signal_handler_arg.signal_stack_base = sig_stack;
1946   getstack_signal_handler_arg.signal_stack_size = sig_stack_size;
1947   getstack_signal_handler_arg.main_stack_base = main_stack_base;
1948   getstack_signal_handler_arg.main_stack_size = main_stack_size;
1949   kill(getpid(), SIGUSR1);
1950   ASSERT_EQ(true, getstack_signal_handler_arg.done);
1951 
1952   ASSERT_EQ(0, sigaltstack(&oss, nullptr));
1953   ASSERT_EQ(0, munmap(sig_stack, sig_stack_size));
1954 }
1955 
pthread_attr_getstack_18908062_helper(void *)1956 static void pthread_attr_getstack_18908062_helper(void*) {
1957   char local_variable;
1958   pthread_attr_t attributes;
1959   pthread_getattr_np(pthread_self(), &attributes);
1960   void* stack_base;
1961   size_t stack_size;
1962   pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1963 
1964   // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1965   ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
1966   ASSERT_LT(untag_address(&local_variable), reinterpret_cast<char*>(stack_base) + stack_size);
1967 }
1968 
1969 // Check whether something on stack is in the range of
1970 // [stack_base, stack_base + stack_size). see b/18908062.
TEST(pthread,pthread_attr_getstack_18908062)1971 TEST(pthread, pthread_attr_getstack_18908062) {
1972   pthread_t t;
1973   ASSERT_EQ(0, pthread_create(&t, nullptr,
1974             reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
1975             nullptr));
1976   ASSERT_EQ(0, pthread_join(t, nullptr));
1977 }
1978 
1979 #if defined(__BIONIC__)
1980 static pthread_mutex_t pthread_gettid_np_mutex = PTHREAD_MUTEX_INITIALIZER;
1981 
pthread_gettid_np_helper(void * arg)1982 static void* pthread_gettid_np_helper(void* arg) {
1983   *reinterpret_cast<pid_t*>(arg) = gettid();
1984 
1985   // Wait for our parent to call pthread_gettid_np on us before exiting.
1986   pthread_mutex_lock(&pthread_gettid_np_mutex);
1987   pthread_mutex_unlock(&pthread_gettid_np_mutex);
1988   return nullptr;
1989 }
1990 #endif
1991 
TEST(pthread,pthread_gettid_np)1992 TEST(pthread, pthread_gettid_np) {
1993 #if defined(__BIONIC__)
1994   ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1995 
1996   // Ensure the other thread doesn't exit until after we've called
1997   // pthread_gettid_np on it.
1998   pthread_mutex_lock(&pthread_gettid_np_mutex);
1999 
2000   pid_t t_gettid_result;
2001   pthread_t t;
2002   pthread_create(&t, nullptr, pthread_gettid_np_helper, &t_gettid_result);
2003 
2004   pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
2005 
2006   // Release the other thread and wait for it to exit.
2007   pthread_mutex_unlock(&pthread_gettid_np_mutex);
2008   ASSERT_EQ(0, pthread_join(t, nullptr));
2009 
2010   ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
2011 #else
2012   GTEST_SKIP() << "pthread_gettid_np not available";
2013 #endif
2014 }
2015 
2016 static size_t cleanup_counter = 0;
2017 
AbortCleanupRoutine(void *)2018 static void AbortCleanupRoutine(void*) {
2019   abort();
2020 }
2021 
CountCleanupRoutine(void *)2022 static void CountCleanupRoutine(void*) {
2023   ++cleanup_counter;
2024 }
2025 
PthreadCleanupTester()2026 static void PthreadCleanupTester() {
2027   pthread_cleanup_push(CountCleanupRoutine, nullptr);
2028   pthread_cleanup_push(CountCleanupRoutine, nullptr);
2029   pthread_cleanup_push(AbortCleanupRoutine, nullptr);
2030 
2031   pthread_cleanup_pop(0); // Pop the abort without executing it.
2032   pthread_cleanup_pop(1); // Pop one count while executing it.
2033   ASSERT_EQ(1U, cleanup_counter);
2034   // Exit while the other count is still on the cleanup stack.
2035   pthread_exit(nullptr);
2036 
2037   // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
2038   pthread_cleanup_pop(0);
2039 }
2040 
PthreadCleanupStartRoutine(void *)2041 static void* PthreadCleanupStartRoutine(void*) {
2042   PthreadCleanupTester();
2043   return nullptr;
2044 }
2045 
TEST(pthread,pthread_cleanup_push__pthread_cleanup_pop)2046 TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
2047   pthread_t t;
2048   ASSERT_EQ(0, pthread_create(&t, nullptr, PthreadCleanupStartRoutine, nullptr));
2049   ASSERT_EQ(0, pthread_join(t, nullptr));
2050   ASSERT_EQ(2U, cleanup_counter);
2051 }
2052 
TEST(pthread,PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL)2053 TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
2054   ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
2055 }
2056 
TEST(pthread,pthread_mutexattr_gettype)2057 TEST(pthread, pthread_mutexattr_gettype) {
2058   pthread_mutexattr_t attr;
2059   ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2060 
2061   int attr_type;
2062 
2063   ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
2064   ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
2065   ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
2066 
2067   ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
2068   ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
2069   ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
2070 
2071   ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
2072   ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
2073   ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
2074 
2075   ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2076 }
2077 
TEST(pthread,pthread_mutexattr_protocol)2078 TEST(pthread, pthread_mutexattr_protocol) {
2079   pthread_mutexattr_t attr;
2080   ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2081 
2082   int protocol;
2083   ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
2084   ASSERT_EQ(PTHREAD_PRIO_NONE, protocol);
2085   for (size_t repeat = 0; repeat < 2; ++repeat) {
2086     for (int set_protocol : {PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT}) {
2087       ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, set_protocol));
2088       ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
2089       ASSERT_EQ(protocol, set_protocol);
2090     }
2091   }
2092 }
2093 
2094 struct PthreadMutex {
2095   pthread_mutex_t lock;
2096 
PthreadMutexPthreadMutex2097   explicit PthreadMutex(int mutex_type, int protocol = PTHREAD_PRIO_NONE) {
2098     init(mutex_type, protocol);
2099   }
2100 
~PthreadMutexPthreadMutex2101   ~PthreadMutex() {
2102     destroy();
2103   }
2104 
2105  private:
initPthreadMutex2106   void init(int mutex_type, int protocol) {
2107     pthread_mutexattr_t attr;
2108     ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2109     ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
2110     ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, protocol));
2111     ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
2112     ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2113   }
2114 
destroyPthreadMutex2115   void destroy() {
2116     ASSERT_EQ(0, pthread_mutex_destroy(&lock));
2117   }
2118 
2119   DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
2120 };
2121 
UnlockFromAnotherThread(pthread_mutex_t * mutex)2122 static int UnlockFromAnotherThread(pthread_mutex_t* mutex) {
2123   pthread_t thread;
2124   pthread_create(&thread, nullptr, [](void* mutex_voidp) -> void* {
2125     pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(mutex_voidp);
2126     intptr_t result = pthread_mutex_unlock(mutex);
2127     return reinterpret_cast<void*>(result);
2128   }, mutex);
2129   void* result;
2130   EXPECT_EQ(0, pthread_join(thread, &result));
2131   return reinterpret_cast<intptr_t>(result);
2132 };
2133 
TestPthreadMutexLockNormal(int protocol)2134 static void TestPthreadMutexLockNormal(int protocol) {
2135   PthreadMutex m(PTHREAD_MUTEX_NORMAL, protocol);
2136 
2137   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2138   if (protocol == PTHREAD_PRIO_INHERIT) {
2139     ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2140   }
2141   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2142   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2143   ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
2144   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2145 }
2146 
TestPthreadMutexLockErrorCheck(int protocol)2147 static void TestPthreadMutexLockErrorCheck(int protocol) {
2148   PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK, protocol);
2149 
2150   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2151   ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2152   ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
2153   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2154   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2155   if (protocol == PTHREAD_PRIO_NONE) {
2156     ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
2157   } else {
2158     ASSERT_EQ(EDEADLK, pthread_mutex_trylock(&m.lock));
2159   }
2160   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2161   ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
2162 }
2163 
TestPthreadMutexLockRecursive(int protocol)2164 static void TestPthreadMutexLockRecursive(int protocol) {
2165   PthreadMutex m(PTHREAD_MUTEX_RECURSIVE, protocol);
2166 
2167   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2168   ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2169   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2170   ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2171   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2172   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2173   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2174   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2175   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2176   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2177   ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
2178 }
2179 
TEST(pthread,pthread_mutex_lock_NORMAL)2180 TEST(pthread, pthread_mutex_lock_NORMAL) {
2181   TestPthreadMutexLockNormal(PTHREAD_PRIO_NONE);
2182 }
2183 
TEST(pthread,pthread_mutex_lock_ERRORCHECK)2184 TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
2185   TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_NONE);
2186 }
2187 
TEST(pthread,pthread_mutex_lock_RECURSIVE)2188 TEST(pthread, pthread_mutex_lock_RECURSIVE) {
2189   TestPthreadMutexLockRecursive(PTHREAD_PRIO_NONE);
2190 }
2191 
TEST(pthread,pthread_mutex_lock_pi)2192 TEST(pthread, pthread_mutex_lock_pi) {
2193   TestPthreadMutexLockNormal(PTHREAD_PRIO_INHERIT);
2194   TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_INHERIT);
2195   TestPthreadMutexLockRecursive(PTHREAD_PRIO_INHERIT);
2196 }
2197 
TEST(pthread,pthread_mutex_pi_count_limit)2198 TEST(pthread, pthread_mutex_pi_count_limit) {
2199 #if defined(__BIONIC__) && !defined(__LP64__)
2200   // Bionic only supports 65536 pi mutexes in 32-bit programs.
2201   pthread_mutexattr_t attr;
2202   ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2203   ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT));
2204   std::vector<pthread_mutex_t> mutexes(65536);
2205   // Test if we can use 65536 pi mutexes at the same time.
2206   // Run 2 times to check if freed pi mutexes can be recycled.
2207   for (int repeat = 0; repeat < 2; ++repeat) {
2208     for (auto& m : mutexes) {
2209       ASSERT_EQ(0, pthread_mutex_init(&m, &attr));
2210     }
2211     pthread_mutex_t m;
2212     ASSERT_EQ(ENOMEM, pthread_mutex_init(&m, &attr));
2213     for (auto& m : mutexes) {
2214       ASSERT_EQ(0, pthread_mutex_lock(&m));
2215     }
2216     for (auto& m : mutexes) {
2217       ASSERT_EQ(0, pthread_mutex_unlock(&m));
2218     }
2219     for (auto& m : mutexes) {
2220       ASSERT_EQ(0, pthread_mutex_destroy(&m));
2221     }
2222   }
2223   ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2224 #else
2225   GTEST_SKIP() << "pi mutex count not limited to 64Ki";
2226 #endif
2227 }
2228 
TEST(pthread,pthread_mutex_init_same_as_static_initializers)2229 TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
2230   pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
2231   PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
2232   ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
2233   pthread_mutex_destroy(&lock_normal);
2234 
2235 #if !defined(ANDROID_HOST_MUSL)
2236   // musl doesn't support PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP or
2237   // PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP.
2238   pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
2239   PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
2240   ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
2241   pthread_mutex_destroy(&lock_errorcheck);
2242 
2243   pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
2244   PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
2245   ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
2246   ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
2247 #endif
2248 }
2249 
2250 class MutexWakeupHelper {
2251  private:
2252   PthreadMutex m;
2253   enum Progress {
2254     LOCK_INITIALIZED,
2255     LOCK_WAITING,
2256     LOCK_RELEASED,
2257     LOCK_ACCESSED
2258   };
2259   std::atomic<Progress> progress;
2260   std::atomic<pid_t> tid;
2261 
thread_fn(MutexWakeupHelper * helper)2262   static void thread_fn(MutexWakeupHelper* helper) {
2263     helper->tid = gettid();
2264     ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
2265     helper->progress = LOCK_WAITING;
2266 
2267     ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
2268     ASSERT_EQ(LOCK_RELEASED, helper->progress);
2269     ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
2270 
2271     helper->progress = LOCK_ACCESSED;
2272   }
2273 
2274  public:
MutexWakeupHelper(int mutex_type)2275   explicit MutexWakeupHelper(int mutex_type) : m(mutex_type) {
2276   }
2277 
test()2278   void test() {
2279     ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2280     progress = LOCK_INITIALIZED;
2281     tid = 0;
2282 
2283     pthread_t thread;
2284     ASSERT_EQ(0, pthread_create(&thread, nullptr,
2285       reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
2286 
2287     WaitUntilThreadSleep(tid);
2288     ASSERT_EQ(LOCK_WAITING, progress);
2289 
2290     progress = LOCK_RELEASED;
2291     ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2292 
2293     ASSERT_EQ(0, pthread_join(thread, nullptr));
2294     ASSERT_EQ(LOCK_ACCESSED, progress);
2295   }
2296 };
2297 
TEST(pthread,pthread_mutex_NORMAL_wakeup)2298 TEST(pthread, pthread_mutex_NORMAL_wakeup) {
2299   MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
2300   helper.test();
2301 }
2302 
TEST(pthread,pthread_mutex_ERRORCHECK_wakeup)2303 TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
2304   MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
2305   helper.test();
2306 }
2307 
TEST(pthread,pthread_mutex_RECURSIVE_wakeup)2308 TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
2309   MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
2310   helper.test();
2311 }
2312 
GetThreadPriority(pid_t tid)2313 static int GetThreadPriority(pid_t tid) {
2314   // sched_getparam() returns the static priority of a thread, which can't reflect a thread's
2315   // priority after priority inheritance. So read /proc/<pid>/stat to get the dynamic priority.
2316   std::string filename = android::base::StringPrintf("/proc/%d/stat", tid);
2317   std::string content;
2318   int result = INT_MAX;
2319   if (!android::base::ReadFileToString(filename, &content)) {
2320     return result;
2321   }
2322   std::vector<std::string> strs = android::base::Split(content, " ");
2323   if (strs.size() < 18) {
2324     return result;
2325   }
2326   if (!android::base::ParseInt(strs[17], &result)) {
2327     return INT_MAX;
2328   }
2329   return result;
2330 }
2331 
2332 class PIMutexWakeupHelper {
2333 private:
2334   PthreadMutex m;
2335   int protocol;
2336   enum Progress {
2337     LOCK_INITIALIZED,
2338     LOCK_CHILD_READY,
2339     LOCK_WAITING,
2340     LOCK_RELEASED,
2341   };
2342   std::atomic<Progress> progress;
2343   std::atomic<pid_t> main_tid;
2344   std::atomic<pid_t> child_tid;
2345   PthreadMutex start_thread_m;
2346 
thread_fn(PIMutexWakeupHelper * helper)2347   static void thread_fn(PIMutexWakeupHelper* helper) {
2348     helper->child_tid = gettid();
2349     ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
2350     ASSERT_EQ(0, setpriority(PRIO_PROCESS, gettid(), 1));
2351     ASSERT_EQ(21, GetThreadPriority(gettid()));
2352     ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
2353     helper->progress = LOCK_CHILD_READY;
2354     ASSERT_EQ(0, pthread_mutex_lock(&helper->start_thread_m.lock));
2355 
2356     ASSERT_EQ(0, pthread_mutex_unlock(&helper->start_thread_m.lock));
2357     WaitUntilThreadSleep(helper->main_tid);
2358     ASSERT_EQ(LOCK_WAITING, helper->progress);
2359 
2360     if (helper->protocol == PTHREAD_PRIO_INHERIT) {
2361       ASSERT_EQ(20, GetThreadPriority(gettid()));
2362     } else {
2363       ASSERT_EQ(21, GetThreadPriority(gettid()));
2364     }
2365     helper->progress = LOCK_RELEASED;
2366     ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
2367   }
2368 
2369 public:
PIMutexWakeupHelper(int mutex_type,int protocol)2370   explicit PIMutexWakeupHelper(int mutex_type, int protocol)
2371       : m(mutex_type, protocol), protocol(protocol), start_thread_m(PTHREAD_MUTEX_NORMAL) {
2372   }
2373 
test()2374   void test() {
2375     ASSERT_EQ(0, pthread_mutex_lock(&start_thread_m.lock));
2376     main_tid = gettid();
2377     ASSERT_EQ(20, GetThreadPriority(main_tid));
2378     progress = LOCK_INITIALIZED;
2379     child_tid = 0;
2380 
2381     pthread_t thread;
2382     ASSERT_EQ(0, pthread_create(&thread, nullptr,
2383               reinterpret_cast<void* (*)(void*)>(PIMutexWakeupHelper::thread_fn), this));
2384 
2385     WaitUntilThreadSleep(child_tid);
2386     ASSERT_EQ(LOCK_CHILD_READY, progress);
2387     ASSERT_EQ(0, pthread_mutex_unlock(&start_thread_m.lock));
2388     progress = LOCK_WAITING;
2389     ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2390 
2391     ASSERT_EQ(LOCK_RELEASED, progress);
2392     ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2393     ASSERT_EQ(0, pthread_join(thread, nullptr));
2394   }
2395 };
2396 
TEST(pthread,pthread_mutex_pi_wakeup)2397 TEST(pthread, pthread_mutex_pi_wakeup) {
2398   for (int type : {PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK}) {
2399     for (int protocol : {PTHREAD_PRIO_INHERIT}) {
2400       PIMutexWakeupHelper helper(type, protocol);
2401       helper.test();
2402     }
2403   }
2404 }
2405 
TEST(pthread,pthread_mutex_owner_tid_limit)2406 TEST(pthread, pthread_mutex_owner_tid_limit) {
2407 #if defined(__BIONIC__) && !defined(__LP64__)
2408   FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
2409   ASSERT_TRUE(fp != nullptr);
2410   long pid_max;
2411   ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
2412   fclose(fp);
2413   // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
2414   ASSERT_LE(pid_max, 65536);
2415 #else
2416   GTEST_SKIP() << "pthread_mutex supports 32-bit tid";
2417 #endif
2418 }
2419 
pthread_mutex_timedlock_helper(clockid_t clock,int (* lock_function)(pthread_mutex_t * __mutex,const timespec * __timeout))2420 static void pthread_mutex_timedlock_helper(clockid_t clock,
2421                                            int (*lock_function)(pthread_mutex_t* __mutex,
2422                                                                 const timespec* __timeout)) {
2423   pthread_mutex_t m;
2424   ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2425 
2426   // If the mutex is already locked, pthread_mutex_timedlock should time out.
2427   ASSERT_EQ(0, pthread_mutex_lock(&m));
2428 
2429   timespec ts;
2430   ASSERT_EQ(0, clock_gettime(clock, &ts));
2431   ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
2432   ts.tv_nsec = -1;
2433   ASSERT_EQ(EINVAL, lock_function(&m, &ts));
2434   ts.tv_nsec = NS_PER_S;
2435   ASSERT_EQ(EINVAL, lock_function(&m, &ts));
2436   ts.tv_nsec = NS_PER_S - 1;
2437   ts.tv_sec = -1;
2438   ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
2439 
2440   // check we wait long enough for the lock.
2441   ASSERT_EQ(0, clock_gettime(clock, &ts));
2442   const int64_t start_ns = ts.tv_sec * NS_PER_S + ts.tv_nsec;
2443 
2444   // add a second to get deadline.
2445   ts.tv_sec += 1;
2446 
2447   ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
2448 
2449   // The timedlock must have waited at least 1 second before returning.
2450   clock_gettime(clock, &ts);
2451   const int64_t end_ns = ts.tv_sec * NS_PER_S + ts.tv_nsec;
2452   ASSERT_GT(end_ns - start_ns, NS_PER_S);
2453 
2454   // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
2455   ASSERT_EQ(0, pthread_mutex_unlock(&m));
2456 
2457   ASSERT_EQ(0, clock_gettime(clock, &ts));
2458   ts.tv_sec += 1;
2459   ASSERT_EQ(0, lock_function(&m, &ts));
2460 
2461   ASSERT_EQ(0, pthread_mutex_unlock(&m));
2462   ASSERT_EQ(0, pthread_mutex_destroy(&m));
2463 }
2464 
TEST(pthread,pthread_mutex_timedlock)2465 TEST(pthread, pthread_mutex_timedlock) {
2466   pthread_mutex_timedlock_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2467 }
2468 
TEST(pthread,pthread_mutex_timedlock_monotonic_np)2469 TEST(pthread, pthread_mutex_timedlock_monotonic_np) {
2470 #if defined(__BIONIC__)
2471   pthread_mutex_timedlock_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2472 #else   // __BIONIC__
2473   GTEST_SKIP() << "pthread_mutex_timedlock_monotonic_np not available";
2474 #endif  // __BIONIC__
2475 }
2476 
TEST(pthread,pthread_mutex_clocklock)2477 TEST(pthread, pthread_mutex_clocklock) {
2478 #if defined(__BIONIC__)
2479   pthread_mutex_timedlock_helper(
2480       CLOCK_MONOTONIC, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2481         return pthread_mutex_clocklock(__mutex, CLOCK_MONOTONIC, __timeout);
2482       });
2483   pthread_mutex_timedlock_helper(
2484       CLOCK_REALTIME, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2485         return pthread_mutex_clocklock(__mutex, CLOCK_REALTIME, __timeout);
2486       });
2487 #else   // __BIONIC__
2488   GTEST_SKIP() << "pthread_mutex_clocklock not available";
2489 #endif  // __BIONIC__
2490 }
2491 
pthread_mutex_timedlock_pi_helper(clockid_t clock,int (* lock_function)(pthread_mutex_t * __mutex,const timespec * __timeout))2492 static void pthread_mutex_timedlock_pi_helper(clockid_t clock,
2493                                               int (*lock_function)(pthread_mutex_t* __mutex,
2494                                                                    const timespec* __timeout)) {
2495   PthreadMutex m(PTHREAD_MUTEX_NORMAL, PTHREAD_PRIO_INHERIT);
2496 
2497   timespec ts;
2498   clock_gettime(clock, &ts);
2499   const int64_t start_ns = ts.tv_sec * NS_PER_S + ts.tv_nsec;
2500 
2501   // add a second to get deadline.
2502   ts.tv_sec += 1;
2503 
2504   ASSERT_EQ(0, lock_function(&m.lock, &ts));
2505 
2506   struct ThreadArgs {
2507     clockid_t clock;
2508     int (*lock_function)(pthread_mutex_t* __mutex, const timespec* __timeout);
2509     PthreadMutex& m;
2510   };
2511 
2512   ThreadArgs thread_args = {
2513     .clock = clock,
2514     .lock_function = lock_function,
2515     .m = m,
2516   };
2517 
2518   auto ThreadFn = [](void* arg) -> void* {
2519     auto args = static_cast<ThreadArgs*>(arg);
2520     timespec ts;
2521     clock_gettime(args->clock, &ts);
2522     ts.tv_sec += 1;
2523     intptr_t result = args->lock_function(&args->m.lock, &ts);
2524     return reinterpret_cast<void*>(result);
2525   };
2526 
2527   pthread_t thread;
2528   ASSERT_EQ(0, pthread_create(&thread, nullptr, ThreadFn, &thread_args));
2529   void* result;
2530   ASSERT_EQ(0, pthread_join(thread, &result));
2531   ASSERT_EQ(ETIMEDOUT, reinterpret_cast<intptr_t>(result));
2532 
2533   // The timedlock must have waited at least 1 second before returning.
2534   clock_gettime(clock, &ts);
2535   const int64_t end_ns = ts.tv_sec * NS_PER_S + ts.tv_nsec;
2536   ASSERT_GT(end_ns - start_ns, NS_PER_S);
2537 
2538   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2539 }
2540 
TEST(pthread,pthread_mutex_timedlock_pi)2541 TEST(pthread, pthread_mutex_timedlock_pi) {
2542   pthread_mutex_timedlock_pi_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2543 }
2544 
TEST(pthread,pthread_mutex_timedlock_monotonic_np_pi)2545 TEST(pthread, pthread_mutex_timedlock_monotonic_np_pi) {
2546 #if defined(__BIONIC__)
2547   pthread_mutex_timedlock_pi_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2548 #else   // __BIONIC__
2549   GTEST_SKIP() << "pthread_mutex_timedlock_monotonic_np not available";
2550 #endif  // __BIONIC__
2551 }
2552 
TEST(pthread,pthread_mutex_clocklock_pi)2553 TEST(pthread, pthread_mutex_clocklock_pi) {
2554 #if defined(__BIONIC__)
2555   pthread_mutex_timedlock_pi_helper(
2556       CLOCK_MONOTONIC, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2557         return pthread_mutex_clocklock(__mutex, CLOCK_MONOTONIC, __timeout);
2558       });
2559   pthread_mutex_timedlock_pi_helper(
2560       CLOCK_REALTIME, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2561         return pthread_mutex_clocklock(__mutex, CLOCK_REALTIME, __timeout);
2562       });
2563 #else   // __BIONIC__
2564   GTEST_SKIP() << "pthread_mutex_clocklock not available";
2565 #endif  // __BIONIC__
2566 }
2567 
TEST(pthread,pthread_mutex_clocklock_invalid)2568 TEST(pthread, pthread_mutex_clocklock_invalid) {
2569 #if defined(__BIONIC__)
2570   pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
2571   timespec ts;
2572   EXPECT_EQ(EINVAL, pthread_mutex_clocklock(&mutex, CLOCK_PROCESS_CPUTIME_ID, &ts));
2573 #else   // __BIONIC__
2574   GTEST_SKIP() << "pthread_mutex_clocklock not available";
2575 #endif  // __BIONIC__
2576 }
2577 
TEST_F(pthread_DeathTest,pthread_mutex_using_destroyed_mutex)2578 TEST_F(pthread_DeathTest, pthread_mutex_using_destroyed_mutex) {
2579 #if defined(__BIONIC__)
2580   pthread_mutex_t m;
2581   ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2582   ASSERT_EQ(0, pthread_mutex_destroy(&m));
2583   ASSERT_EXIT(pthread_mutex_lock(&m), ::testing::KilledBySignal(SIGABRT),
2584               "pthread_mutex_lock called on a destroyed mutex");
2585   ASSERT_EXIT(pthread_mutex_unlock(&m), ::testing::KilledBySignal(SIGABRT),
2586               "pthread_mutex_unlock called on a destroyed mutex");
2587   ASSERT_EXIT(pthread_mutex_trylock(&m), ::testing::KilledBySignal(SIGABRT),
2588               "pthread_mutex_trylock called on a destroyed mutex");
2589   timespec ts;
2590   ASSERT_EXIT(pthread_mutex_timedlock(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2591               "pthread_mutex_timedlock called on a destroyed mutex");
2592   ASSERT_EXIT(pthread_mutex_timedlock_monotonic_np(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2593               "pthread_mutex_timedlock_monotonic_np called on a destroyed mutex");
2594   ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_MONOTONIC, &ts), ::testing::KilledBySignal(SIGABRT),
2595               "pthread_mutex_clocklock called on a destroyed mutex");
2596   ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_REALTIME, &ts), ::testing::KilledBySignal(SIGABRT),
2597               "pthread_mutex_clocklock called on a destroyed mutex");
2598   ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_PROCESS_CPUTIME_ID, &ts),
2599               ::testing::KilledBySignal(SIGABRT),
2600               "pthread_mutex_clocklock called on a destroyed mutex");
2601   ASSERT_EXIT(pthread_mutex_destroy(&m), ::testing::KilledBySignal(SIGABRT),
2602               "pthread_mutex_destroy called on a destroyed mutex");
2603 #else
2604   GTEST_SKIP() << "bionic-only test";
2605 #endif
2606 }
2607 
2608 class StrictAlignmentAllocator {
2609  public:
allocate(size_t size,size_t alignment)2610   void* allocate(size_t size, size_t alignment) {
2611     char* p = new char[size + alignment * 2];
2612     allocated_array.push_back(p);
2613     while (!is_strict_aligned(p, alignment)) {
2614       ++p;
2615     }
2616     return p;
2617   }
2618 
~StrictAlignmentAllocator()2619   ~StrictAlignmentAllocator() {
2620     for (const auto& p : allocated_array) {
2621       delete[] p;
2622     }
2623   }
2624 
2625  private:
is_strict_aligned(char * p,size_t alignment)2626   bool is_strict_aligned(char* p, size_t alignment) {
2627     return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
2628   }
2629 
2630   std::vector<char*> allocated_array;
2631 };
2632 
TEST(pthread,pthread_types_allow_four_bytes_alignment)2633 TEST(pthread, pthread_types_allow_four_bytes_alignment) {
2634 #if defined(__BIONIC__)
2635   // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
2636   StrictAlignmentAllocator allocator;
2637   pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
2638                              allocator.allocate(sizeof(pthread_mutex_t), 4));
2639   ASSERT_EQ(0, pthread_mutex_init(mutex, nullptr));
2640   ASSERT_EQ(0, pthread_mutex_lock(mutex));
2641   ASSERT_EQ(0, pthread_mutex_unlock(mutex));
2642   ASSERT_EQ(0, pthread_mutex_destroy(mutex));
2643 
2644   pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
2645                            allocator.allocate(sizeof(pthread_cond_t), 4));
2646   ASSERT_EQ(0, pthread_cond_init(cond, nullptr));
2647   ASSERT_EQ(0, pthread_cond_signal(cond));
2648   ASSERT_EQ(0, pthread_cond_broadcast(cond));
2649   ASSERT_EQ(0, pthread_cond_destroy(cond));
2650 
2651   pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
2652                                allocator.allocate(sizeof(pthread_rwlock_t), 4));
2653   ASSERT_EQ(0, pthread_rwlock_init(rwlock, nullptr));
2654   ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
2655   ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2656   ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
2657   ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2658   ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
2659 
2660 #else
2661   GTEST_SKIP() << "bionic-only test";
2662 #endif
2663 }
2664 
TEST(pthread,pthread_mutex_lock_null_32)2665 TEST(pthread, pthread_mutex_lock_null_32) {
2666 #if defined(__BIONIC__) && !defined(__LP64__)
2667   // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2668   // EINVAL in that case: http://b/19995172.
2669   //
2670   // We decorate the public defintion with _Nonnull so that people recompiling
2671   // their code with get a warning and might fix their bug, but need to pass
2672   // NULL here to test that we remain compatible.
2673   pthread_mutex_t* null_value = nullptr;
2674   ASSERT_EQ(EINVAL, pthread_mutex_lock(null_value));
2675 #else
2676   GTEST_SKIP() << "32-bit bionic-only test";
2677 #endif
2678 }
2679 
TEST(pthread,pthread_mutex_unlock_null_32)2680 TEST(pthread, pthread_mutex_unlock_null_32) {
2681 #if defined(__BIONIC__) && !defined(__LP64__)
2682   // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2683   // EINVAL in that case: http://b/19995172.
2684   //
2685   // We decorate the public defintion with _Nonnull so that people recompiling
2686   // their code with get a warning and might fix their bug, but need to pass
2687   // NULL here to test that we remain compatible.
2688   pthread_mutex_t* null_value = nullptr;
2689   ASSERT_EQ(EINVAL, pthread_mutex_unlock(null_value));
2690 #else
2691   GTEST_SKIP() << "32-bit bionic-only test";
2692 #endif
2693 }
2694 
TEST_F(pthread_DeathTest,pthread_mutex_lock_null_64)2695 TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) {
2696 #if defined(__BIONIC__) && defined(__LP64__)
2697   pthread_mutex_t* null_value = nullptr;
2698   ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), "");
2699 #else
2700   GTEST_SKIP() << "64-bit bionic-only test";
2701 #endif
2702 }
2703 
TEST_F(pthread_DeathTest,pthread_mutex_unlock_null_64)2704 TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) {
2705 #if defined(__BIONIC__) && defined(__LP64__)
2706   pthread_mutex_t* null_value = nullptr;
2707   ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), "");
2708 #else
2709   GTEST_SKIP() << "64-bit bionic-only test";
2710 #endif
2711 }
2712 
2713 extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg);
2714 
2715 static volatile bool signal_handler_on_altstack_done;
2716 
2717 __attribute__((__noinline__))
signal_handler_backtrace()2718 static void signal_handler_backtrace() {
2719   // Check if we have enough stack space for unwinding.
2720   int count = 0;
2721   _Unwind_Backtrace(FrameCounter, &count);
2722   ASSERT_GT(count, 0);
2723 }
2724 
2725 __attribute__((__noinline__))
signal_handler_logging()2726 static void signal_handler_logging() {
2727   // Check if we have enough stack space for logging.
2728   std::string s(2048, '*');
2729   GTEST_LOG_(INFO) << s;
2730   signal_handler_on_altstack_done = true;
2731 }
2732 
2733 __attribute__((__noinline__))
signal_handler_snprintf()2734 static void signal_handler_snprintf() {
2735   // Check if we have enough stack space for snprintf to a PATH_MAX buffer, plus some extra.
2736   char buf[PATH_MAX + 2048];
2737   ASSERT_GT(snprintf(buf, sizeof(buf), "/proc/%d/status", getpid()), 0);
2738 }
2739 
SignalHandlerOnAltStack(int signo,siginfo_t *,void *)2740 static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) {
2741   ASSERT_EQ(SIGUSR1, signo);
2742   signal_handler_backtrace();
2743   signal_handler_logging();
2744   signal_handler_snprintf();
2745 }
2746 
TEST(pthread,big_enough_signal_stack)2747 TEST(pthread, big_enough_signal_stack) {
2748   signal_handler_on_altstack_done = false;
2749   ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK);
2750   kill(getpid(), SIGUSR1);
2751   ASSERT_TRUE(signal_handler_on_altstack_done);
2752 }
2753 
TEST(pthread,pthread_barrierattr_smoke)2754 TEST(pthread, pthread_barrierattr_smoke) {
2755   pthread_barrierattr_t attr;
2756   ASSERT_EQ(0, pthread_barrierattr_init(&attr));
2757   int pshared;
2758   ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2759   ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
2760   ASSERT_EQ(0, pthread_barrierattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
2761   ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2762   ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
2763   ASSERT_EQ(0, pthread_barrierattr_destroy(&attr));
2764 }
2765 
2766 struct BarrierTestHelperData {
2767   size_t thread_count;
2768   pthread_barrier_t barrier;
2769   std::atomic<int> finished_mask;
2770   std::atomic<int> serial_thread_count;
2771   size_t iteration_count;
2772   std::atomic<size_t> finished_iteration_count;
2773 
BarrierTestHelperDataBarrierTestHelperData2774   BarrierTestHelperData(size_t thread_count, size_t iteration_count)
2775       : thread_count(thread_count), finished_mask(0), serial_thread_count(0),
2776         iteration_count(iteration_count), finished_iteration_count(0) {
2777   }
2778 };
2779 
2780 struct BarrierTestHelperArg {
2781   int id;
2782   BarrierTestHelperData* data;
2783 };
2784 
BarrierTestHelper(BarrierTestHelperArg * arg)2785 static void BarrierTestHelper(BarrierTestHelperArg* arg) {
2786   for (size_t i = 0; i < arg->data->iteration_count; ++i) {
2787     int result = pthread_barrier_wait(&arg->data->barrier);
2788     if (result == PTHREAD_BARRIER_SERIAL_THREAD) {
2789       arg->data->serial_thread_count++;
2790     } else {
2791       ASSERT_EQ(0, result);
2792     }
2793     int mask = arg->data->finished_mask.fetch_or(1 << arg->id);
2794     mask |= 1 << arg->id;
2795     if (mask == ((1 << arg->data->thread_count) - 1)) {
2796       ASSERT_EQ(1, arg->data->serial_thread_count);
2797       arg->data->finished_iteration_count++;
2798       arg->data->finished_mask = 0;
2799       arg->data->serial_thread_count = 0;
2800     }
2801   }
2802 }
2803 
TEST(pthread,pthread_barrier_smoke)2804 TEST(pthread, pthread_barrier_smoke) {
2805   const size_t BARRIER_ITERATION_COUNT = 10;
2806   const size_t BARRIER_THREAD_COUNT = 10;
2807   BarrierTestHelperData data(BARRIER_THREAD_COUNT, BARRIER_ITERATION_COUNT);
2808   ASSERT_EQ(0, pthread_barrier_init(&data.barrier, nullptr, data.thread_count));
2809   std::vector<pthread_t> threads(data.thread_count);
2810   std::vector<BarrierTestHelperArg> args(threads.size());
2811   for (size_t i = 0; i < threads.size(); ++i) {
2812     args[i].id = i;
2813     args[i].data = &data;
2814     ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2815                                 reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &args[i]));
2816   }
2817   for (size_t i = 0; i < threads.size(); ++i) {
2818     ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2819   }
2820   ASSERT_EQ(data.iteration_count, data.finished_iteration_count);
2821   ASSERT_EQ(0, pthread_barrier_destroy(&data.barrier));
2822 }
2823 
2824 struct BarrierDestroyTestArg {
2825   std::atomic<int> tid;
2826   pthread_barrier_t* barrier;
2827 };
2828 
BarrierDestroyTestHelper(BarrierDestroyTestArg * arg)2829 static void BarrierDestroyTestHelper(BarrierDestroyTestArg* arg) {
2830   arg->tid = gettid();
2831   ASSERT_EQ(0, pthread_barrier_wait(arg->barrier));
2832 }
2833 
TEST(pthread,pthread_barrier_destroy)2834 TEST(pthread, pthread_barrier_destroy) {
2835   pthread_barrier_t barrier;
2836   ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, 2));
2837   pthread_t thread;
2838   BarrierDestroyTestArg arg;
2839   arg.tid = 0;
2840   arg.barrier = &barrier;
2841   ASSERT_EQ(0, pthread_create(&thread, nullptr,
2842                               reinterpret_cast<void* (*)(void*)>(BarrierDestroyTestHelper), &arg));
2843   WaitUntilThreadSleep(arg.tid);
2844   ASSERT_EQ(EBUSY, pthread_barrier_destroy(&barrier));
2845   ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier));
2846   // Verify if the barrier can be destroyed directly after pthread_barrier_wait().
2847   ASSERT_EQ(0, pthread_barrier_destroy(&barrier));
2848   ASSERT_EQ(0, pthread_join(thread, nullptr));
2849 #if defined(__BIONIC__)
2850   ASSERT_EQ(EINVAL, pthread_barrier_destroy(&barrier));
2851 #endif
2852 }
2853 
2854 struct BarrierOrderingTestHelperArg {
2855   pthread_barrier_t* barrier;
2856   size_t* array;
2857   size_t array_length;
2858   size_t id;
2859 };
2860 
BarrierOrderingTestHelper(BarrierOrderingTestHelperArg * arg)2861 void BarrierOrderingTestHelper(BarrierOrderingTestHelperArg* arg) {
2862   const size_t ITERATION_COUNT = 10000;
2863   for (size_t i = 1; i <= ITERATION_COUNT; ++i) {
2864     arg->array[arg->id] = i;
2865     int result = pthread_barrier_wait(arg->barrier);
2866     ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
2867     for (size_t j = 0; j < arg->array_length; ++j) {
2868       ASSERT_EQ(i, arg->array[j]);
2869     }
2870     result = pthread_barrier_wait(arg->barrier);
2871     ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
2872   }
2873 }
2874 
TEST(pthread,pthread_barrier_check_ordering)2875 TEST(pthread, pthread_barrier_check_ordering) {
2876   const size_t THREAD_COUNT = 4;
2877   pthread_barrier_t barrier;
2878   ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, THREAD_COUNT));
2879   size_t array[THREAD_COUNT];
2880   std::vector<pthread_t> threads(THREAD_COUNT);
2881   std::vector<BarrierOrderingTestHelperArg> args(THREAD_COUNT);
2882   for (size_t i = 0; i < THREAD_COUNT; ++i) {
2883     args[i].barrier = &barrier;
2884     args[i].array = array;
2885     args[i].array_length = THREAD_COUNT;
2886     args[i].id = i;
2887     ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2888                                 reinterpret_cast<void* (*)(void*)>(BarrierOrderingTestHelper),
2889                                 &args[i]));
2890   }
2891   for (size_t i = 0; i < THREAD_COUNT; ++i) {
2892     ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2893   }
2894 }
2895 
TEST(pthread,pthread_barrier_init_zero_count)2896 TEST(pthread, pthread_barrier_init_zero_count) {
2897   pthread_barrier_t barrier;
2898   ASSERT_EQ(EINVAL, pthread_barrier_init(&barrier, nullptr, 0));
2899 }
2900 
TEST(pthread,pthread_spinlock_smoke)2901 TEST(pthread, pthread_spinlock_smoke) {
2902   pthread_spinlock_t lock;
2903   ASSERT_EQ(0, pthread_spin_init(&lock, 0));
2904   ASSERT_EQ(0, pthread_spin_trylock(&lock));
2905   ASSERT_EQ(0, pthread_spin_unlock(&lock));
2906   ASSERT_EQ(0, pthread_spin_lock(&lock));
2907   ASSERT_EQ(EBUSY, pthread_spin_trylock(&lock));
2908   ASSERT_EQ(0, pthread_spin_unlock(&lock));
2909   ASSERT_EQ(0, pthread_spin_destroy(&lock));
2910 }
2911 
TEST(pthread,pthread_attr_getdetachstate__pthread_attr_setdetachstate)2912 TEST(pthread, pthread_attr_getdetachstate__pthread_attr_setdetachstate) {
2913   pthread_attr_t attr;
2914   ASSERT_EQ(0, pthread_attr_init(&attr));
2915 
2916   int state;
2917   ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2918   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2919   ASSERT_EQ(PTHREAD_CREATE_DETACHED, state);
2920 
2921   ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
2922   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2923   ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
2924 
2925   ASSERT_EQ(EINVAL, pthread_attr_setdetachstate(&attr, 123));
2926   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2927   ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
2928 }
2929 
TEST(pthread,pthread_create__mmap_failures)2930 TEST(pthread, pthread_create__mmap_failures) {
2931   // After thread is successfully created, native_bridge might need more memory to run it.
2932   SKIP_WITH_NATIVE_BRIDGE;
2933 
2934   pthread_attr_t attr;
2935   ASSERT_EQ(0, pthread_attr_init(&attr));
2936   ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2937 
2938   const auto kPageSize = sysconf(_SC_PAGE_SIZE);
2939 
2940   // Use up all the VMAs. By default this is 64Ki (though some will already be in use).
2941   std::vector<void*> pages;
2942   pages.reserve(64 * 1024);
2943   int prot = PROT_NONE;
2944   while (true) {
2945     void* page = mmap(nullptr, kPageSize, prot, MAP_ANON|MAP_PRIVATE, -1, 0);
2946     if (page == MAP_FAILED) break;
2947     pages.push_back(page);
2948     prot = (prot == PROT_NONE) ? PROT_READ : PROT_NONE;
2949   }
2950 
2951   // Try creating threads, freeing up a page each time we fail.
2952   size_t EAGAIN_count = 0;
2953   size_t i = 0;
2954   for (; i < pages.size(); ++i) {
2955     pthread_t t;
2956     int status = pthread_create(&t, &attr, IdFn, nullptr);
2957     if (status != EAGAIN) break;
2958     ++EAGAIN_count;
2959     ASSERT_EQ(0, munmap(pages[i], kPageSize));
2960   }
2961 
2962   // Creating a thread uses at least three VMAs: the combined stack and TLS, and a guard on each
2963   // side. So we should have seen at least three failures.
2964   ASSERT_GE(EAGAIN_count, 3U);
2965 
2966   for (; i < pages.size(); ++i) {
2967     ASSERT_EQ(0, munmap(pages[i], kPageSize));
2968   }
2969 }
2970 
TEST(pthread,pthread_setschedparam)2971 TEST(pthread, pthread_setschedparam) {
2972   sched_param p = { .sched_priority = INT_MIN };
2973   ASSERT_EQ(EINVAL, pthread_setschedparam(pthread_self(), INT_MIN, &p));
2974 }
2975 
TEST(pthread,pthread_setschedprio)2976 TEST(pthread, pthread_setschedprio) {
2977   ASSERT_EQ(EINVAL, pthread_setschedprio(pthread_self(), INT_MIN));
2978 }
2979 
TEST(pthread,pthread_attr_getinheritsched__pthread_attr_setinheritsched)2980 TEST(pthread, pthread_attr_getinheritsched__pthread_attr_setinheritsched) {
2981   pthread_attr_t attr;
2982   ASSERT_EQ(0, pthread_attr_init(&attr));
2983 
2984   int state;
2985   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2986   ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2987   ASSERT_EQ(PTHREAD_INHERIT_SCHED, state);
2988 
2989   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2990   ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2991   ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
2992 
2993   ASSERT_EQ(EINVAL, pthread_attr_setinheritsched(&attr, 123));
2994   ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2995   ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
2996 }
2997 
TEST(pthread,pthread_attr_setinheritsched__PTHREAD_INHERIT_SCHED__PTHREAD_EXPLICIT_SCHED)2998 TEST(pthread, pthread_attr_setinheritsched__PTHREAD_INHERIT_SCHED__PTHREAD_EXPLICIT_SCHED) {
2999   pthread_attr_t attr;
3000   ASSERT_EQ(0, pthread_attr_init(&attr));
3001 
3002   // If we set invalid scheduling attributes but choose to inherit, everything's fine...
3003   sched_param param = { .sched_priority = sched_get_priority_max(SCHED_FIFO) + 1 };
3004   ASSERT_EQ(0, pthread_attr_setschedparam(&attr, &param));
3005   ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
3006   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
3007 
3008   pthread_t t;
3009   ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, nullptr));
3010   ASSERT_EQ(0, pthread_join(t, nullptr));
3011 
3012 #if defined(__LP64__)
3013   // If we ask to use them, though, we'll see a failure...
3014   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
3015   ASSERT_EQ(EINVAL, pthread_create(&t, &attr, IdFn, nullptr));
3016 #else
3017   // For backwards compatibility with broken apps, we just ignore failures
3018   // to set scheduler attributes on LP32.
3019 #endif
3020 }
3021 
TEST(pthread,pthread_attr_setinheritsched_PTHREAD_INHERIT_SCHED_takes_effect)3022 TEST(pthread, pthread_attr_setinheritsched_PTHREAD_INHERIT_SCHED_takes_effect) {
3023   sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
3024   int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
3025   if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
3026   ASSERT_EQ(0, rc);
3027 
3028   pthread_attr_t attr;
3029   ASSERT_EQ(0, pthread_attr_init(&attr));
3030   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
3031 
3032   SpinFunctionHelper spin_helper;
3033   pthread_t t;
3034   ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
3035   int actual_policy;
3036   sched_param actual_param;
3037   ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
3038   ASSERT_EQ(SCHED_FIFO, actual_policy);
3039   spin_helper.UnSpin();
3040   ASSERT_EQ(0, pthread_join(t, nullptr));
3041 }
3042 
TEST(pthread,pthread_attr_setinheritsched_PTHREAD_EXPLICIT_SCHED_takes_effect)3043 TEST(pthread, pthread_attr_setinheritsched_PTHREAD_EXPLICIT_SCHED_takes_effect) {
3044   sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
3045   int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
3046   if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
3047   ASSERT_EQ(0, rc);
3048 
3049   pthread_attr_t attr;
3050   ASSERT_EQ(0, pthread_attr_init(&attr));
3051   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
3052   ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_OTHER));
3053 
3054   SpinFunctionHelper spin_helper;
3055   pthread_t t;
3056   ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
3057   int actual_policy;
3058   sched_param actual_param;
3059   ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
3060   ASSERT_EQ(SCHED_OTHER, actual_policy);
3061   spin_helper.UnSpin();
3062   ASSERT_EQ(0, pthread_join(t, nullptr));
3063 }
3064 
TEST(pthread,pthread_attr_setinheritsched__takes_effect_despite_SCHED_RESET_ON_FORK)3065 TEST(pthread, pthread_attr_setinheritsched__takes_effect_despite_SCHED_RESET_ON_FORK) {
3066   sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
3067   int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO | SCHED_RESET_ON_FORK, &param);
3068   if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
3069   ASSERT_EQ(0, rc);
3070 
3071   pthread_attr_t attr;
3072   ASSERT_EQ(0, pthread_attr_init(&attr));
3073   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
3074 
3075   SpinFunctionHelper spin_helper;
3076   pthread_t t;
3077   ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
3078   int actual_policy;
3079   sched_param actual_param;
3080   ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
3081   ASSERT_EQ(SCHED_FIFO  | SCHED_RESET_ON_FORK, actual_policy);
3082   spin_helper.UnSpin();
3083   ASSERT_EQ(0, pthread_join(t, nullptr));
3084 }
3085 
3086 extern "C" bool android_run_on_all_threads(bool (*func)(void*), void* arg);
3087 
TEST(pthread,run_on_all_threads)3088 TEST(pthread, run_on_all_threads) {
3089 #if defined(__BIONIC__)
3090   pthread_t t;
3091   ASSERT_EQ(
3092       0, pthread_create(
3093              &t, nullptr,
3094              [](void*) -> void* {
3095                pthread_attr_t detached;
3096                if (pthread_attr_init(&detached) != 0 ||
3097                    pthread_attr_setdetachstate(&detached, PTHREAD_CREATE_DETACHED) != 0) {
3098                  return reinterpret_cast<void*>(errno);
3099                }
3100 
3101                for (int i = 0; i != 1000; ++i) {
3102                  pthread_t t1, t2;
3103                  if (pthread_create(
3104                          &t1, &detached, [](void*) -> void* { return nullptr; }, nullptr) != 0 ||
3105                      pthread_create(
3106                          &t2, nullptr, [](void*) -> void* { return nullptr; }, nullptr) != 0 ||
3107                      pthread_join(t2, nullptr) != 0) {
3108                    return reinterpret_cast<void*>(errno);
3109                  }
3110                }
3111 
3112                if (pthread_attr_destroy(&detached) != 0) {
3113                  return reinterpret_cast<void*>(errno);
3114                }
3115                return nullptr;
3116              },
3117              nullptr));
3118 
3119   for (int i = 0; i != 1000; ++i) {
3120     ASSERT_TRUE(android_run_on_all_threads([](void* arg) { return arg == nullptr; }, nullptr));
3121   }
3122 
3123   void *retval;
3124   ASSERT_EQ(0, pthread_join(t, &retval));
3125   ASSERT_EQ(nullptr, retval);
3126 #else
3127   GTEST_SKIP() << "bionic-only test";
3128 #endif
3129 }
3130