1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <err.h>
18 #include <inttypes.h>
19 #include <pthread.h>
20 #include <sched.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <sys/mman.h>
24 #include <sys/syscall.h>
25 #include <sys/user.h>
26 #include <unistd.h>
27
28 #include <gtest/gtest.h>
29
30 #include <chrono>
31 #include <thread>
32 #include <vector>
33
34 #include <android-base/macros.h>
35 #include <android-base/threads.h>
36
37 #include "utils.h"
38
39 using namespace std::chrono_literals;
40
WaitUntilAllThreadsExited(pid_t * tids,size_t tid_count)41 static void WaitUntilAllThreadsExited(pid_t* tids, size_t tid_count) {
42 // Wait until all children have exited.
43 bool alive = true;
44 while (alive) {
45 alive = false;
46 for (size_t i = 0; i < tid_count; ++i) {
47 if (tids[i] != 0) {
48 if (syscall(__NR_tgkill, getpid(), tids[i], 0) == 0) {
49 alive = true;
50 } else {
51 EXPECT_ERRNO(ESRCH);
52 tids[i] = 0; // Skip in next loop.
53 }
54 }
55 }
56 sched_yield();
57 }
58 }
59
60 class LeakChecker {
61 public:
LeakChecker()62 LeakChecker() {
63 // Avoid resizing and using memory later.
64 // 64Ki is the default limit on VMAs per process.
65 maps_.reserve(64*1024);
66 Reset();
67 }
68
~LeakChecker()69 ~LeakChecker() {
70 Check();
71 }
72
Reset()73 void Reset() {
74 previous_size_ = GetMappingSize();
75 }
76
DumpTo(std::ostream & os) const77 void DumpTo(std::ostream& os) const {
78 os << previous_size_;
79 }
80
81 private:
82 size_t previous_size_;
83 std::vector<map_record> maps_;
84
Check()85 void Check() {
86 auto current_size = GetMappingSize();
87 if (current_size > previous_size_) {
88 FAIL() << "increase in process map size: " << previous_size_ << " -> " << current_size;
89 }
90 }
91
GetMappingSize()92 size_t GetMappingSize() {
93 if (!Maps::parse_maps(&maps_)) {
94 err(1, "failed to parse maps");
95 }
96
97 size_t result = 0;
98 for (const map_record& map : maps_) {
99 result += map.addr_end - map.addr_start;
100 }
101
102 return result;
103 }
104 };
105
operator <<(std::ostream & os,const LeakChecker & lc)106 std::ostream& operator<<(std::ostream& os, const LeakChecker& lc) {
107 lc.DumpTo(os);
108 return os;
109 }
110
111 // http://b/36045112
TEST(pthread_leak,join)112 TEST(pthread_leak, join) {
113 SKIP_WITH_NATIVE_BRIDGE; // http://b/37920774
114
115 // Warm up. HWASan allocates an extra page on the first iteration, but never after.
116 pthread_t thread;
117 ASSERT_EQ(0, pthread_create(
118 &thread, nullptr, [](void*) -> void* { return nullptr; }, nullptr));
119 ASSERT_EQ(0, pthread_join(thread, nullptr));
120
121 LeakChecker lc;
122
123 for (int i = 0; i < 100; ++i) {
124 ASSERT_EQ(0, pthread_create(
125 &thread, nullptr, [](void*) -> void* { return nullptr; }, nullptr));
126 ASSERT_EQ(0, pthread_join(thread, nullptr));
127 }
128 }
129
130 // http://b/36045112
TEST(pthread_leak,detach)131 TEST(pthread_leak, detach) {
132 SKIP_WITH_NATIVE_BRIDGE; // http://b/37920774
133
134 LeakChecker lc;
135
136 // Ancient devices with only 2 cores need a lower limit.
137 // http://b/129924384 and https://issuetracker.google.com/142210680.
138 const int thread_count = (sysconf(_SC_NPROCESSORS_CONF) > 2) ? 100 : 50;
139
140 for (size_t pass = 0; pass < 1; ++pass) {
141 struct thread_data { pthread_barrier_t* barrier; pid_t* tid; } threads[thread_count];
142
143 pthread_barrier_t barrier;
144 ASSERT_EQ(pthread_barrier_init(&barrier, nullptr, thread_count + 1), 0);
145
146 // Start child threads.
147 pid_t tids[thread_count];
148 for (int i = 0; i < thread_count; ++i) {
149 threads[i] = {&barrier, &tids[i]};
150 const auto thread_function = +[](void* ptr) -> void* {
151 thread_data* data = static_cast<thread_data*>(ptr);
152 *data->tid = gettid();
153 pthread_barrier_wait(data->barrier);
154 return nullptr;
155 };
156 pthread_t thread;
157 ASSERT_EQ(0, pthread_create(&thread, nullptr, thread_function, &threads[i]));
158 ASSERT_EQ(0, pthread_detach(thread));
159 }
160
161 pthread_barrier_wait(&barrier);
162 ASSERT_EQ(pthread_barrier_destroy(&barrier), 0);
163
164 WaitUntilAllThreadsExited(tids, thread_count);
165
166 // TODO(b/158573595): the test is flaky without the warmup pass.
167 if (pass == 0) lc.Reset();
168 }
169 }
170