1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gtest/gtest.h>
18
19 #include <ctype.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <limits.h>
24 #include <signal.h>
25 #include <stdarg.h>
26 #include <stdio.h>
27 #include <string.h>
28 #include <sys/wait.h>
29 #include <time.h>
30 #include <unistd.h>
31
32 #include <string>
33 #include <tuple>
34 #include <utility>
35 #include <vector>
36
37 #include "BionicDeathTest.h" // For selftest.
38
39 namespace testing {
40 namespace internal {
41
42 // Reuse of testing::internal::ColoredPrintf in gtest.
43 enum GTestColor {
44 COLOR_DEFAULT,
45 COLOR_RED,
46 COLOR_GREEN,
47 COLOR_YELLOW
48 };
49
50 void ColoredPrintf(GTestColor color, const char* fmt, ...);
51
52 } // namespace internal
53 } // namespace testing
54
55 using testing::internal::GTestColor;
56 using testing::internal::COLOR_DEFAULT;
57 using testing::internal::COLOR_RED;
58 using testing::internal::COLOR_GREEN;
59 using testing::internal::COLOR_YELLOW;
60 using testing::internal::ColoredPrintf;
61
62 constexpr int DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS = 60000;
63 constexpr int DEFAULT_GLOBAL_TEST_RUN_WARNLINE_MS = 2000;
64
65 // The time each test can run before killed for the reason of timeout.
66 // It takes effect only with --isolate option.
67 static int global_test_run_deadline_ms = DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS;
68
69 // The time each test can run before be warned for too much running time.
70 // It takes effect only with --isolate option.
71 static int global_test_run_warnline_ms = DEFAULT_GLOBAL_TEST_RUN_WARNLINE_MS;
72
73 // Return deadline duration for a test, in ms.
GetDeadlineInfo(const std::string &)74 static int GetDeadlineInfo(const std::string& /*test_name*/) {
75 return global_test_run_deadline_ms;
76 }
77
78 // Return warnline duration for a test, in ms.
GetWarnlineInfo(const std::string &)79 static int GetWarnlineInfo(const std::string& /*test_name*/) {
80 return global_test_run_warnline_ms;
81 }
82
PrintHelpInfo()83 static void PrintHelpInfo() {
84 printf("Bionic Unit Test Options:\n"
85 " -j [JOB_COUNT] or -j[JOB_COUNT]\n"
86 " Run up to JOB_COUNT tests in parallel.\n"
87 " Use isolation mode, Run each test in a separate process.\n"
88 " If JOB_COUNT is not given, it is set to the count of available processors.\n"
89 " --no-isolate\n"
90 " Don't use isolation mode, run all tests in a single process.\n"
91 " --deadline=[TIME_IN_MS]\n"
92 " Run each test in no longer than [TIME_IN_MS] time.\n"
93 " It takes effect only in isolation mode. Deafult deadline is 60000 ms.\n"
94 " --warnline=[TIME_IN_MS]\n"
95 " Test running longer than [TIME_IN_MS] will be warned.\n"
96 " It takes effect only in isolation mode. Default warnline is 2000 ms.\n"
97 " --gtest-filter=POSITIVE_PATTERNS[-NEGATIVE_PATTERNS]\n"
98 " Used as a synonym for --gtest_filter option in gtest.\n"
99 "Default bionic unit test option is -j.\n"
100 "In isolation mode, you can send SIGQUIT to the parent process to show current\n"
101 "running tests, or send SIGINT to the parent process to stop testing and\n"
102 "clean up current running tests.\n"
103 "\n");
104 }
105
106 enum TestResult {
107 TEST_SUCCESS = 0,
108 TEST_FAILED,
109 TEST_TIMEOUT
110 };
111
112 class Test {
113 public:
Test()114 Test() {} // For std::vector<Test>.
Test(const char * name)115 explicit Test(const char* name) : name_(name) {}
116
GetName() const117 const std::string& GetName() const { return name_; }
118
SetResult(TestResult result)119 void SetResult(TestResult result) { result_ = result; }
120
GetResult() const121 TestResult GetResult() const { return result_; }
122
SetTestTime(int64_t elapsed_time_ns)123 void SetTestTime(int64_t elapsed_time_ns) { elapsed_time_ns_ = elapsed_time_ns; }
124
GetTestTime() const125 int64_t GetTestTime() const { return elapsed_time_ns_; }
126
AppendTestOutput(const std::string & s)127 void AppendTestOutput(const std::string& s) { output_ += s; }
128
GetTestOutput() const129 const std::string& GetTestOutput() const { return output_; }
130
131 private:
132 const std::string name_;
133 TestResult result_;
134 int64_t elapsed_time_ns_;
135 std::string output_;
136 };
137
138 class TestCase {
139 public:
TestCase()140 TestCase() {} // For std::vector<TestCase>.
TestCase(const char * name)141 explicit TestCase(const char* name) : name_(name) {}
142
GetName() const143 const std::string& GetName() const { return name_; }
144
AppendTest(const char * test_name)145 void AppendTest(const char* test_name) {
146 test_list_.push_back(Test(test_name));
147 }
148
TestCount() const149 size_t TestCount() const { return test_list_.size(); }
150
GetTestName(size_t test_id) const151 std::string GetTestName(size_t test_id) const {
152 VerifyTestId(test_id);
153 return name_ + "." + test_list_[test_id].GetName();
154 }
155
GetTest(size_t test_id)156 Test& GetTest(size_t test_id) {
157 VerifyTestId(test_id);
158 return test_list_[test_id];
159 }
160
GetTest(size_t test_id) const161 const Test& GetTest(size_t test_id) const {
162 VerifyTestId(test_id);
163 return test_list_[test_id];
164 }
165
SetTestResult(size_t test_id,TestResult result)166 void SetTestResult(size_t test_id, TestResult result) {
167 VerifyTestId(test_id);
168 test_list_[test_id].SetResult(result);
169 }
170
GetTestResult(size_t test_id) const171 TestResult GetTestResult(size_t test_id) const {
172 VerifyTestId(test_id);
173 return test_list_[test_id].GetResult();
174 }
175
SetTestTime(size_t test_id,int64_t elapsed_time_ns)176 void SetTestTime(size_t test_id, int64_t elapsed_time_ns) {
177 VerifyTestId(test_id);
178 test_list_[test_id].SetTestTime(elapsed_time_ns);
179 }
180
GetTestTime(size_t test_id) const181 int64_t GetTestTime(size_t test_id) const {
182 VerifyTestId(test_id);
183 return test_list_[test_id].GetTestTime();
184 }
185
186 private:
VerifyTestId(size_t test_id) const187 void VerifyTestId(size_t test_id) const {
188 if(test_id >= test_list_.size()) {
189 fprintf(stderr, "test_id %zu out of range [0, %zu)\n", test_id, test_list_.size());
190 exit(1);
191 }
192 }
193
194 private:
195 const std::string name_;
196 std::vector<Test> test_list_;
197 };
198
199 class TestResultPrinter : public testing::EmptyTestEventListener {
200 public:
TestResultPrinter()201 TestResultPrinter() : pinfo_(NULL) {}
OnTestStart(const testing::TestInfo & test_info)202 virtual void OnTestStart(const testing::TestInfo& test_info) {
203 pinfo_ = &test_info; // Record test_info for use in OnTestPartResult.
204 }
205 virtual void OnTestPartResult(const testing::TestPartResult& result);
206
207 private:
208 const testing::TestInfo* pinfo_;
209 };
210
211 // Called after an assertion failure.
OnTestPartResult(const testing::TestPartResult & result)212 void TestResultPrinter::OnTestPartResult(const testing::TestPartResult& result) {
213 // If the test part succeeded, we don't need to do anything.
214 if (result.type() == testing::TestPartResult::kSuccess)
215 return;
216
217 // Print failure message from the assertion (e.g. expected this and got that).
218 printf("%s:(%d) Failure in test %s.%s\n%s\n", result.file_name(), result.line_number(),
219 pinfo_->test_case_name(), pinfo_->name(), result.message());
220 fflush(stdout);
221 }
222
NanoTime()223 static int64_t NanoTime() {
224 struct timespec t;
225 t.tv_sec = t.tv_nsec = 0;
226 clock_gettime(CLOCK_MONOTONIC, &t);
227 return static_cast<int64_t>(t.tv_sec) * 1000000000LL + t.tv_nsec;
228 }
229
EnumerateTests(int argc,char ** argv,std::vector<TestCase> & testcase_list)230 static bool EnumerateTests(int argc, char** argv, std::vector<TestCase>& testcase_list) {
231 std::string command;
232 for (int i = 0; i < argc; ++i) {
233 command += argv[i];
234 command += " ";
235 }
236 command += "--gtest_list_tests";
237 FILE* fp = popen(command.c_str(), "r");
238 if (fp == NULL) {
239 perror("popen");
240 return false;
241 }
242
243 char buf[200];
244 while (fgets(buf, sizeof(buf), fp) != NULL) {
245 char* p = buf;
246
247 while (*p != '\0' && isspace(*p)) {
248 ++p;
249 }
250 if (*p == '\0') continue;
251 char* start = p;
252 while (*p != '\0' && !isspace(*p)) {
253 ++p;
254 }
255 char* end = p;
256 while (*p != '\0' && isspace(*p)) {
257 ++p;
258 }
259 if (*p != '\0') {
260 // This is not we want, gtest must meet with some error when parsing the arguments.
261 fprintf(stderr, "argument error, check with --help\n");
262 return false;
263 }
264 *end = '\0';
265 if (*(end - 1) == '.') {
266 *(end - 1) = '\0';
267 testcase_list.push_back(TestCase(start));
268 } else {
269 testcase_list.back().AppendTest(start);
270 }
271 }
272 int result = pclose(fp);
273 return (result != -1 && WEXITSTATUS(result) == 0);
274 }
275
276 // Part of the following *Print functions are copied from external/gtest/src/gtest.cc:
277 // PrettyUnitTestResultPrinter. The reason for copy is that PrettyUnitTestResultPrinter
278 // is defined and used in gtest.cc, which is hard to reuse.
OnTestIterationStartPrint(const std::vector<TestCase> & testcase_list,size_t iteration,int iteration_count)279 static void OnTestIterationStartPrint(const std::vector<TestCase>& testcase_list, size_t iteration,
280 int iteration_count) {
281 if (iteration_count != 1) {
282 printf("\nRepeating all tests (iteration %zu) . . .\n\n", iteration);
283 }
284 ColoredPrintf(COLOR_GREEN, "[==========] ");
285
286 size_t testcase_count = testcase_list.size();
287 size_t test_count = 0;
288 for (const auto& testcase : testcase_list) {
289 test_count += testcase.TestCount();
290 }
291
292 printf("Running %zu %s from %zu %s.\n",
293 test_count, (test_count == 1) ? "test" : "tests",
294 testcase_count, (testcase_count == 1) ? "test case" : "test cases");
295 fflush(stdout);
296 }
297
298 // bionic cts test needs gtest output format.
299 #if defined(USING_GTEST_OUTPUT_FORMAT)
300
OnTestEndPrint(const TestCase & testcase,size_t test_id)301 static void OnTestEndPrint(const TestCase& testcase, size_t test_id) {
302 ColoredPrintf(COLOR_GREEN, "[ RUN ] ");
303 printf("%s\n", testcase.GetTestName(test_id).c_str());
304
305 const std::string& test_output = testcase.GetTest(test_id).GetTestOutput();
306 printf("%s", test_output.c_str());
307
308 TestResult result = testcase.GetTestResult(test_id);
309 if (result == TEST_SUCCESS) {
310 ColoredPrintf(COLOR_GREEN, "[ OK ] ");
311 } else {
312 ColoredPrintf(COLOR_RED, "[ FAILED ] ");
313 }
314 printf("%s", testcase.GetTestName(test_id).c_str());
315 if (testing::GTEST_FLAG(print_time)) {
316 printf(" (%" PRId64 " ms)", testcase.GetTestTime(test_id) / 1000000);
317 }
318 printf("\n");
319 fflush(stdout);
320 }
321
322 #else // !defined(USING_GTEST_OUTPUT_FORMAT)
323
OnTestEndPrint(const TestCase & testcase,size_t test_id)324 static void OnTestEndPrint(const TestCase& testcase, size_t test_id) {
325 TestResult result = testcase.GetTestResult(test_id);
326 if (result == TEST_SUCCESS) {
327 ColoredPrintf(COLOR_GREEN, "[ OK ] ");
328 } else if (result == TEST_FAILED) {
329 ColoredPrintf(COLOR_RED, "[ FAILED ] ");
330 } else if (result == TEST_TIMEOUT) {
331 ColoredPrintf(COLOR_RED, "[ TIMEOUT ] ");
332 }
333
334 printf("%s", testcase.GetTestName(test_id).c_str());
335 if (testing::GTEST_FLAG(print_time)) {
336 printf(" (%" PRId64 " ms)", testcase.GetTestTime(test_id) / 1000000);
337 }
338 printf("\n");
339
340 const std::string& test_output = testcase.GetTest(test_id).GetTestOutput();
341 printf("%s", test_output.c_str());
342 fflush(stdout);
343 }
344
345 #endif // !defined(USING_GTEST_OUTPUT_FORMAT)
346
OnTestIterationEndPrint(const std::vector<TestCase> & testcase_list,size_t,int64_t elapsed_time_ns)347 static void OnTestIterationEndPrint(const std::vector<TestCase>& testcase_list, size_t /*iteration*/,
348 int64_t elapsed_time_ns) {
349
350 std::vector<std::string> fail_test_name_list;
351 std::vector<std::pair<std::string, int64_t>> timeout_test_list;
352
353 // For tests run exceed warnline but not timeout.
354 std::vector<std::tuple<std::string, int64_t, int>> slow_test_list;
355 size_t testcase_count = testcase_list.size();
356 size_t test_count = 0;
357 size_t success_test_count = 0;
358
359 for (const auto& testcase : testcase_list) {
360 test_count += testcase.TestCount();
361 for (size_t i = 0; i < testcase.TestCount(); ++i) {
362 TestResult result = testcase.GetTestResult(i);
363 if (result == TEST_SUCCESS) {
364 ++success_test_count;
365 } else if (result == TEST_FAILED) {
366 fail_test_name_list.push_back(testcase.GetTestName(i));
367 } else if (result == TEST_TIMEOUT) {
368 timeout_test_list.push_back(std::make_pair(testcase.GetTestName(i),
369 testcase.GetTestTime(i)));
370 }
371 if (result != TEST_TIMEOUT &&
372 testcase.GetTestTime(i) / 1000000 >= GetWarnlineInfo(testcase.GetTestName(i))) {
373 slow_test_list.push_back(std::make_tuple(testcase.GetTestName(i),
374 testcase.GetTestTime(i),
375 GetWarnlineInfo(testcase.GetTestName(i))));
376 }
377 }
378 }
379
380 ColoredPrintf(COLOR_GREEN, "[==========] ");
381 printf("%zu %s from %zu %s ran.", test_count, (test_count == 1) ? "test" : "tests",
382 testcase_count, (testcase_count == 1) ? "test case" : "test cases");
383 if (testing::GTEST_FLAG(print_time)) {
384 printf(" (%" PRId64 " ms total)", elapsed_time_ns / 1000000);
385 }
386 printf("\n");
387 ColoredPrintf(COLOR_GREEN, "[ PASS ] ");
388 printf("%zu %s.\n", success_test_count, (success_test_count == 1) ? "test" : "tests");
389
390 // Print tests failed.
391 size_t fail_test_count = fail_test_name_list.size();
392 if (fail_test_count > 0) {
393 ColoredPrintf(COLOR_RED, "[ FAIL ] ");
394 printf("%zu %s, listed below:\n", fail_test_count, (fail_test_count == 1) ? "test" : "tests");
395 for (const auto& name : fail_test_name_list) {
396 ColoredPrintf(COLOR_RED, "[ FAIL ] ");
397 printf("%s\n", name.c_str());
398 }
399 }
400
401 // Print tests run timeout.
402 size_t timeout_test_count = timeout_test_list.size();
403 if (timeout_test_count > 0) {
404 ColoredPrintf(COLOR_RED, "[ TIMEOUT ] ");
405 printf("%zu %s, listed below:\n", timeout_test_count, (timeout_test_count == 1) ? "test" : "tests");
406 for (const auto& timeout_pair : timeout_test_list) {
407 ColoredPrintf(COLOR_RED, "[ TIMEOUT ] ");
408 printf("%s (stopped at %" PRId64 " ms)\n", timeout_pair.first.c_str(),
409 timeout_pair.second / 1000000);
410 }
411 }
412
413 // Print tests run exceed warnline.
414 size_t slow_test_count = slow_test_list.size();
415 if (slow_test_count > 0) {
416 ColoredPrintf(COLOR_YELLOW, "[ SLOW ] ");
417 printf("%zu %s, listed below:\n", slow_test_count, (slow_test_count == 1) ? "test" : "tests");
418 for (const auto& slow_tuple : slow_test_list) {
419 ColoredPrintf(COLOR_YELLOW, "[ SLOW ] ");
420 printf("%s (%" PRId64 " ms, exceed warnline %d ms)\n", std::get<0>(slow_tuple).c_str(),
421 std::get<1>(slow_tuple) / 1000000, std::get<2>(slow_tuple));
422 }
423 }
424
425 if (fail_test_count > 0) {
426 printf("\n%2zu FAILED %s\n", fail_test_count, (fail_test_count == 1) ? "TEST" : "TESTS");
427 }
428 if (timeout_test_count > 0) {
429 printf("%2zu TIMEOUT %s\n", timeout_test_count, (timeout_test_count == 1) ? "TEST" : "TESTS");
430 }
431 if (slow_test_count > 0) {
432 printf("%2zu SLOW %s\n", slow_test_count, (slow_test_count == 1) ? "TEST" : "TESTS");
433 }
434 fflush(stdout);
435 }
436
437 // Output xml file when --gtest_output is used, write this function as we can't reuse
438 // gtest.cc:XmlUnitTestResultPrinter. The reason is XmlUnitTestResultPrinter is totally
439 // defined in gtest.cc and not expose to outside. What's more, as we don't run gtest in
440 // the parent process, we don't have gtest classes which are needed by XmlUnitTestResultPrinter.
OnTestIterationEndXmlPrint(const std::string & xml_output_filename,const std::vector<TestCase> & testcase_list,time_t epoch_iteration_start_time,int64_t elapsed_time_ns)441 void OnTestIterationEndXmlPrint(const std::string& xml_output_filename,
442 const std::vector<TestCase>& testcase_list,
443 time_t epoch_iteration_start_time,
444 int64_t elapsed_time_ns) {
445 FILE* fp = fopen(xml_output_filename.c_str(), "w");
446 if (fp == NULL) {
447 fprintf(stderr, "failed to open '%s': %s\n", xml_output_filename.c_str(), strerror(errno));
448 exit(1);
449 }
450
451 size_t total_test_count = 0;
452 size_t total_failed_count = 0;
453 std::vector<size_t> failed_count_list(testcase_list.size(), 0);
454 std::vector<int64_t> elapsed_time_list(testcase_list.size(), 0);
455 for (size_t i = 0; i < testcase_list.size(); ++i) {
456 auto& testcase = testcase_list[i];
457 total_test_count += testcase.TestCount();
458 for (size_t j = 0; j < testcase.TestCount(); ++j) {
459 if (testcase.GetTestResult(j) != TEST_SUCCESS) {
460 ++failed_count_list[i];
461 }
462 elapsed_time_list[i] += testcase.GetTestTime(j);
463 }
464 total_failed_count += failed_count_list[i];
465 }
466
467 const tm* time_struct = localtime(&epoch_iteration_start_time);
468 char timestamp[40];
469 snprintf(timestamp, sizeof(timestamp), "%4d-%02d-%02dT%02d:%02d:%02d",
470 time_struct->tm_year + 1900, time_struct->tm_mon + 1, time_struct->tm_mday,
471 time_struct->tm_hour, time_struct->tm_min, time_struct->tm_sec);
472
473 fputs("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n", fp);
474 fprintf(fp, "<testsuites tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"",
475 total_test_count, total_failed_count);
476 fprintf(fp, " timestamp=\"%s\" time=\"%.3lf\" name=\"AllTests\">\n", timestamp, elapsed_time_ns / 1e9);
477 for (size_t i = 0; i < testcase_list.size(); ++i) {
478 auto& testcase = testcase_list[i];
479 fprintf(fp, " <testsuite name=\"%s\" tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"",
480 testcase.GetName().c_str(), testcase.TestCount(), failed_count_list[i]);
481 fprintf(fp, " time=\"%.3lf\">\n", elapsed_time_list[i] / 1e9);
482
483 for (size_t j = 0; j < testcase.TestCount(); ++j) {
484 fprintf(fp, " <testcase name=\"%s\" status=\"run\" time=\"%.3lf\" classname=\"%s\"",
485 testcase.GetTest(j).GetName().c_str(), testcase.GetTestTime(j) / 1e9,
486 testcase.GetName().c_str());
487 if (testcase.GetTestResult(j) == TEST_SUCCESS) {
488 fputs(" />\n", fp);
489 } else {
490 fputs(">\n", fp);
491 const std::string& test_output = testcase.GetTest(j).GetTestOutput();
492 fprintf(fp, " <failure message=\"%s\" type=\"\">\n", test_output.c_str());
493 fputs(" </failure>\n", fp);
494 fputs(" </testcase>\n", fp);
495 }
496 }
497
498 fputs(" </testsuite>\n", fp);
499 }
500 fputs("</testsuites>\n", fp);
501 fclose(fp);
502 }
503
504 struct ChildProcInfo {
505 pid_t pid;
506 int64_t start_time_ns;
507 int64_t end_time_ns;
508 int64_t deadline_end_time_ns; // The time when the test is thought of as timeout.
509 size_t testcase_id, test_id;
510 bool finished;
511 bool timed_out;
512 int exit_status;
513 int child_read_fd; // File descriptor to read child test failure info.
514 };
515
516 // Forked Child process, run the single test.
ChildProcessFn(int argc,char ** argv,const std::string & test_name)517 static void ChildProcessFn(int argc, char** argv, const std::string& test_name) {
518 char** new_argv = new char*[argc + 2];
519 memcpy(new_argv, argv, sizeof(char*) * argc);
520
521 char* filter_arg = new char [test_name.size() + 20];
522 strcpy(filter_arg, "--gtest_filter=");
523 strcat(filter_arg, test_name.c_str());
524 new_argv[argc] = filter_arg;
525 new_argv[argc + 1] = NULL;
526
527 int new_argc = argc + 1;
528 testing::InitGoogleTest(&new_argc, new_argv);
529 int result = RUN_ALL_TESTS();
530 exit(result);
531 }
532
RunChildProcess(const std::string & test_name,int testcase_id,int test_id,sigset_t sigmask,int argc,char ** argv)533 static ChildProcInfo RunChildProcess(const std::string& test_name, int testcase_id, int test_id,
534 sigset_t sigmask, int argc, char** argv) {
535 int pipefd[2];
536 int ret = pipe2(pipefd, O_NONBLOCK);
537 if (ret == -1) {
538 perror("pipe2 in RunTestInSeparateProc");
539 exit(1);
540 }
541 pid_t pid = fork();
542 if (pid == -1) {
543 perror("fork in RunTestInSeparateProc");
544 exit(1);
545 } else if (pid == 0) {
546 // In child process, run a single test.
547 close(pipefd[0]);
548 close(STDOUT_FILENO);
549 close(STDERR_FILENO);
550 dup2(pipefd[1], STDOUT_FILENO);
551 dup2(pipefd[1], STDERR_FILENO);
552
553 if (sigprocmask(SIG_SETMASK, &sigmask, NULL) == -1) {
554 perror("sigprocmask SIG_SETMASK");
555 exit(1);
556 }
557 ChildProcessFn(argc, argv, test_name);
558 // Unreachable.
559 }
560 // In parent process, initialize child process info.
561 close(pipefd[1]);
562 ChildProcInfo child_proc;
563 child_proc.child_read_fd = pipefd[0];
564 child_proc.pid = pid;
565 child_proc.start_time_ns = NanoTime();
566 child_proc.deadline_end_time_ns = child_proc.start_time_ns + GetDeadlineInfo(test_name) * 1000000LL;
567 child_proc.testcase_id = testcase_id;
568 child_proc.test_id = test_id;
569 child_proc.finished = false;
570 return child_proc;
571 }
572
HandleSignals(std::vector<TestCase> & testcase_list,std::vector<ChildProcInfo> & child_proc_list)573 static void HandleSignals(std::vector<TestCase>& testcase_list,
574 std::vector<ChildProcInfo>& child_proc_list) {
575 sigset_t waiting_mask;
576 sigemptyset(&waiting_mask);
577 sigaddset(&waiting_mask, SIGINT);
578 sigaddset(&waiting_mask, SIGQUIT);
579 timespec timeout;
580 timeout.tv_sec = timeout.tv_nsec = 0;
581 while (true) {
582 int signo = TEMP_FAILURE_RETRY(sigtimedwait(&waiting_mask, NULL, &timeout));
583 if (signo == -1) {
584 if (errno == EAGAIN) {
585 return; // Timeout, no pending signals.
586 }
587 perror("sigtimedwait");
588 exit(1);
589 } else if (signo == SIGQUIT) {
590 // Print current running tests.
591 printf("List of current running tests:\n");
592 for (auto& child_proc : child_proc_list) {
593 if (child_proc.pid != 0) {
594 std::string test_name = testcase_list[child_proc.testcase_id].GetTestName(child_proc.test_id);
595 int64_t current_time_ns = NanoTime();
596 int64_t run_time_ms = (current_time_ns - child_proc.start_time_ns) / 1000000;
597 printf(" %s (%" PRId64 " ms)\n", test_name.c_str(), run_time_ms);
598 }
599 }
600 } else if (signo == SIGINT) {
601 // Kill current running tests.
602 for (auto& child_proc : child_proc_list) {
603 if (child_proc.pid != 0) {
604 // Send SIGKILL to ensure the child process can be killed unconditionally.
605 kill(child_proc.pid, SIGKILL);
606 }
607 }
608 // SIGINT kills the parent process as well.
609 exit(1);
610 }
611 }
612 }
613
CheckChildProcExit(pid_t exit_pid,int exit_status,std::vector<ChildProcInfo> & child_proc_list)614 static bool CheckChildProcExit(pid_t exit_pid, int exit_status,
615 std::vector<ChildProcInfo>& child_proc_list) {
616 for (size_t i = 0; i < child_proc_list.size(); ++i) {
617 if (child_proc_list[i].pid == exit_pid) {
618 child_proc_list[i].finished = true;
619 child_proc_list[i].timed_out = false;
620 child_proc_list[i].exit_status = exit_status;
621 child_proc_list[i].end_time_ns = NanoTime();
622 return true;
623 }
624 }
625 return false;
626 }
627
CheckChildProcTimeout(std::vector<ChildProcInfo> & child_proc_list)628 static size_t CheckChildProcTimeout(std::vector<ChildProcInfo>& child_proc_list) {
629 int64_t current_time_ns = NanoTime();
630 size_t timeout_child_count = 0;
631 for (size_t i = 0; i < child_proc_list.size(); ++i) {
632 if (child_proc_list[i].deadline_end_time_ns <= current_time_ns) {
633 child_proc_list[i].finished = true;
634 child_proc_list[i].timed_out = true;
635 child_proc_list[i].end_time_ns = current_time_ns;
636 ++timeout_child_count;
637 }
638 }
639 return timeout_child_count;
640 }
641
WaitChildProcs(std::vector<TestCase> & testcase_list,std::vector<ChildProcInfo> & child_proc_list)642 static void WaitChildProcs(std::vector<TestCase>& testcase_list,
643 std::vector<ChildProcInfo>& child_proc_list) {
644 size_t finished_child_count = 0;
645 while (true) {
646 int status;
647 pid_t result;
648 while ((result = TEMP_FAILURE_RETRY(waitpid(-1, &status, WNOHANG))) > 0) {
649 if (CheckChildProcExit(result, status, child_proc_list)) {
650 ++finished_child_count;
651 }
652 }
653
654 if (result == -1) {
655 if (errno == ECHILD) {
656 // This happens when we have no running child processes.
657 return;
658 } else {
659 perror("waitpid");
660 exit(1);
661 }
662 } else if (result == 0) {
663 finished_child_count += CheckChildProcTimeout(child_proc_list);
664 }
665
666 if (finished_child_count > 0) {
667 return;
668 }
669
670 HandleSignals(testcase_list, child_proc_list);
671
672 // sleep 1 ms to avoid busy looping.
673 timespec sleep_time;
674 sleep_time.tv_sec = 0;
675 sleep_time.tv_nsec = 1000000;
676 nanosleep(&sleep_time, NULL);
677 }
678 }
679
WaitForOneChild(pid_t pid)680 static TestResult WaitForOneChild(pid_t pid) {
681 int exit_status;
682 pid_t result = TEMP_FAILURE_RETRY(waitpid(pid, &exit_status, 0));
683
684 TestResult test_result = TEST_SUCCESS;
685 if (result != pid || WEXITSTATUS(exit_status) != 0) {
686 test_result = TEST_FAILED;
687 }
688 return test_result;
689 }
690
CollectChildTestResult(const ChildProcInfo & child_proc,TestCase & testcase)691 static void CollectChildTestResult(const ChildProcInfo& child_proc, TestCase& testcase) {
692 int test_id = child_proc.test_id;
693 testcase.SetTestTime(test_id, child_proc.end_time_ns - child_proc.start_time_ns);
694 if (child_proc.timed_out) {
695 // The child process marked as timed_out has not exited, and we should kill it manually.
696 kill(child_proc.pid, SIGKILL);
697 WaitForOneChild(child_proc.pid);
698 }
699
700 while (true) {
701 char buf[1024];
702 ssize_t bytes_read = TEMP_FAILURE_RETRY(read(child_proc.child_read_fd, buf, sizeof(buf) - 1));
703 if (bytes_read > 0) {
704 buf[bytes_read] = '\0';
705 testcase.GetTest(test_id).AppendTestOutput(buf);
706 } else if (bytes_read == 0) {
707 break; // Read end.
708 } else {
709 if (errno == EAGAIN) {
710 // No data is available. This rarely happens, only when the child process created other
711 // processes which have not exited so far. But the child process has already exited or
712 // been killed, so the test has finished, and we shouldn't wait further.
713 break;
714 }
715 perror("read child_read_fd in RunTestInSeparateProc");
716 exit(1);
717 }
718 }
719 close(child_proc.child_read_fd);
720
721 if (child_proc.timed_out) {
722 testcase.SetTestResult(test_id, TEST_TIMEOUT);
723 char buf[1024];
724 snprintf(buf, sizeof(buf), "%s killed because of timeout at %" PRId64 " ms.\n",
725 testcase.GetTestName(test_id).c_str(), testcase.GetTestTime(test_id) / 1000000);
726 testcase.GetTest(test_id).AppendTestOutput(buf);
727
728 } else if (WIFSIGNALED(child_proc.exit_status)) {
729 // Record signal terminated test as failed.
730 testcase.SetTestResult(test_id, TEST_FAILED);
731 char buf[1024];
732 snprintf(buf, sizeof(buf), "%s terminated by signal: %s.\n",
733 testcase.GetTestName(test_id).c_str(), strsignal(WTERMSIG(child_proc.exit_status)));
734 testcase.GetTest(test_id).AppendTestOutput(buf);
735
736 } else {
737 testcase.SetTestResult(test_id, WEXITSTATUS(child_proc.exit_status) == 0 ?
738 TEST_SUCCESS : TEST_FAILED);
739 }
740 }
741
742 // We choose to use multi-fork and multi-wait here instead of multi-thread, because it always
743 // makes deadlock to use fork in multi-thread.
744 // Returns true if all tests run successfully, otherwise return false.
RunTestInSeparateProc(int argc,char ** argv,std::vector<TestCase> & testcase_list,int iteration_count,size_t job_count,const std::string & xml_output_filename)745 static bool RunTestInSeparateProc(int argc, char** argv, std::vector<TestCase>& testcase_list,
746 int iteration_count, size_t job_count,
747 const std::string& xml_output_filename) {
748 // Stop default result printer to avoid environment setup/teardown information for each test.
749 testing::UnitTest::GetInstance()->listeners().Release(
750 testing::UnitTest::GetInstance()->listeners().default_result_printer());
751 testing::UnitTest::GetInstance()->listeners().Append(new TestResultPrinter);
752
753 // Signals are blocked here as we want to handle them in HandleSignals() later.
754 sigset_t block_mask, orig_mask;
755 sigemptyset(&block_mask);
756 sigaddset(&block_mask, SIGINT);
757 sigaddset(&block_mask, SIGQUIT);
758 if (sigprocmask(SIG_BLOCK, &block_mask, &orig_mask) == -1) {
759 perror("sigprocmask SIG_BLOCK");
760 exit(1);
761 }
762
763 bool all_tests_passed = true;
764
765 for (size_t iteration = 1;
766 iteration_count < 0 || iteration <= static_cast<size_t>(iteration_count);
767 ++iteration) {
768 OnTestIterationStartPrint(testcase_list, iteration, iteration_count);
769 int64_t iteration_start_time_ns = NanoTime();
770 time_t epoch_iteration_start_time = time(NULL);
771
772 // Run up to job_count tests in parallel, each test in a child process.
773 std::vector<ChildProcInfo> child_proc_list;
774
775 // Next test to run is [next_testcase_id:next_test_id].
776 size_t next_testcase_id = 0;
777 size_t next_test_id = 0;
778
779 // Record how many tests are finished.
780 std::vector<size_t> finished_test_count_list(testcase_list.size(), 0);
781 size_t finished_testcase_count = 0;
782
783 while (finished_testcase_count < testcase_list.size()) {
784 // run up to job_count child processes.
785 while (child_proc_list.size() < job_count && next_testcase_id < testcase_list.size()) {
786 std::string test_name = testcase_list[next_testcase_id].GetTestName(next_test_id);
787 ChildProcInfo child_proc = RunChildProcess(test_name, next_testcase_id, next_test_id,
788 orig_mask, argc, argv);
789 child_proc_list.push_back(child_proc);
790 if (++next_test_id == testcase_list[next_testcase_id].TestCount()) {
791 next_test_id = 0;
792 ++next_testcase_id;
793 }
794 }
795
796 // Wait for any child proc finish or timeout.
797 WaitChildProcs(testcase_list, child_proc_list);
798
799 // Collect result.
800 auto it = child_proc_list.begin();
801 while (it != child_proc_list.end()) {
802 auto& child_proc = *it;
803 if (child_proc.finished == true) {
804 size_t testcase_id = child_proc.testcase_id;
805 size_t test_id = child_proc.test_id;
806 TestCase& testcase = testcase_list[testcase_id];
807
808 CollectChildTestResult(child_proc, testcase);
809 OnTestEndPrint(testcase, test_id);
810
811 if (++finished_test_count_list[testcase_id] == testcase.TestCount()) {
812 ++finished_testcase_count;
813 }
814 if (testcase.GetTestResult(test_id) != TEST_SUCCESS) {
815 all_tests_passed = false;
816 }
817
818 it = child_proc_list.erase(it);
819 } else {
820 ++it;
821 }
822 }
823 }
824
825 int64_t elapsed_time_ns = NanoTime() - iteration_start_time_ns;
826 OnTestIterationEndPrint(testcase_list, iteration, elapsed_time_ns);
827 if (!xml_output_filename.empty()) {
828 OnTestIterationEndXmlPrint(xml_output_filename, testcase_list, epoch_iteration_start_time,
829 elapsed_time_ns);
830 }
831 }
832
833 // Restore signal mask.
834 if (sigprocmask(SIG_SETMASK, &orig_mask, NULL) == -1) {
835 perror("sigprocmask SIG_SETMASK");
836 exit(1);
837 }
838
839 return all_tests_passed;
840 }
841
GetProcessorCount()842 static size_t GetProcessorCount() {
843 return static_cast<size_t>(sysconf(_SC_NPROCESSORS_ONLN));
844 }
845
AddPathSeparatorInTestProgramPath(std::vector<char * > & args)846 static void AddPathSeparatorInTestProgramPath(std::vector<char*>& args) {
847 // To run DeathTest in threadsafe mode, gtest requires that the user must invoke the
848 // test program via a valid path that contains at least one path separator.
849 // The reason is that gtest uses clone() + execve() to run DeathTest in threadsafe mode,
850 // and execve() doesn't read environment variable PATH, so execve() will not success
851 // until we specify the absolute path or relative path of the test program directly.
852 if (strchr(args[0], '/') == NULL) {
853 char path[PATH_MAX];
854 ssize_t path_len = readlink("/proc/self/exe", path, sizeof(path));
855 if (path_len <= 0 || path_len >= static_cast<ssize_t>(sizeof(path))) {
856 perror("readlink");
857 exit(1);
858 }
859 path[path_len] = '\0';
860 args[0] = strdup(path);
861 }
862 }
863
AddGtestFilterSynonym(std::vector<char * > & args)864 static void AddGtestFilterSynonym(std::vector<char*>& args) {
865 // Support --gtest-filter as a synonym for --gtest_filter.
866 for (size_t i = 1; i < args.size(); ++i) {
867 if (strncmp(args[i], "--gtest-filter", strlen("--gtest-filter")) == 0) {
868 args[i][7] = '_';
869 }
870 }
871 }
872
873 struct IsolationTestOptions {
874 bool isolate;
875 size_t job_count;
876 int test_deadline_ms;
877 int test_warnline_ms;
878 std::string gtest_color;
879 bool gtest_print_time;
880 int gtest_repeat;
881 std::string gtest_output;
882 };
883
884 // Pick options not for gtest: There are two parts in args, one part is used in isolation test mode
885 // as described in PrintHelpInfo(), the other part is handled by testing::InitGoogleTest() in
886 // gtest. PickOptions() picks the first part into IsolationTestOptions structure, leaving the second
887 // part in args.
888 // Arguments:
889 // args is used to pass in all command arguments, and pass out only the part of options for gtest.
890 // options is used to pass out test options in isolation mode.
891 // Return false if there is error in arguments.
PickOptions(std::vector<char * > & args,IsolationTestOptions & options)892 static bool PickOptions(std::vector<char*>& args, IsolationTestOptions& options) {
893 for (size_t i = 1; i < args.size(); ++i) {
894 if (strcmp(args[i], "--help") == 0 || strcmp(args[i], "-h") == 0) {
895 PrintHelpInfo();
896 options.isolate = false;
897 return true;
898 }
899 }
900
901 AddPathSeparatorInTestProgramPath(args);
902 AddGtestFilterSynonym(args);
903
904 // if --bionic-selftest argument is used, only enable self tests, otherwise remove self tests.
905 bool enable_selftest = false;
906 for (size_t i = 1; i < args.size(); ++i) {
907 if (strcmp(args[i], "--bionic-selftest") == 0) {
908 // This argument is to enable "bionic_selftest*" for self test, and is not shown in help info.
909 // Don't remove this option from arguments.
910 enable_selftest = true;
911 }
912 }
913 std::string gtest_filter_str;
914 for (size_t i = args.size() - 1; i >= 1; --i) {
915 if (strncmp(args[i], "--gtest_filter=", strlen("--gtest_filter=")) == 0) {
916 gtest_filter_str = std::string(args[i]);
917 args.erase(args.begin() + i);
918 break;
919 }
920 }
921 if (enable_selftest == true) {
922 args.push_back(strdup("--gtest_filter=bionic_selftest*"));
923 } else {
924 if (gtest_filter_str == "") {
925 gtest_filter_str = "--gtest_filter=-bionic_selftest*";
926 } else {
927 // Find if '-' for NEGATIVE_PATTERNS exists.
928 if (gtest_filter_str.find(":-") != std::string::npos) {
929 gtest_filter_str += ":bionic_selftest*";
930 } else {
931 gtest_filter_str += ":-bionic_selftest*";
932 }
933 }
934 args.push_back(strdup(gtest_filter_str.c_str()));
935 }
936
937 options.isolate = true;
938 // Parse arguments that make us can't run in isolation mode.
939 for (size_t i = 1; i < args.size(); ++i) {
940 if (strcmp(args[i], "--no-isolate") == 0) {
941 options.isolate = false;
942 } else if (strcmp(args[i], "--gtest_list_tests") == 0) {
943 options.isolate = false;
944 }
945 }
946
947 // Stop parsing if we will not run in isolation mode.
948 if (options.isolate == false) {
949 return true;
950 }
951
952 // Init default isolation test options.
953 options.job_count = GetProcessorCount();
954 options.test_deadline_ms = DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS;
955 options.test_warnline_ms = DEFAULT_GLOBAL_TEST_RUN_WARNLINE_MS;
956 options.gtest_color = testing::GTEST_FLAG(color);
957 options.gtest_print_time = testing::GTEST_FLAG(print_time);
958 options.gtest_repeat = testing::GTEST_FLAG(repeat);
959 options.gtest_output = testing::GTEST_FLAG(output);
960
961 // Parse arguments speficied for isolation mode.
962 for (size_t i = 1; i < args.size(); ++i) {
963 if (strncmp(args[i], "-j", strlen("-j")) == 0) {
964 char* p = args[i] + strlen("-j");
965 int count = 0;
966 if (*p != '\0') {
967 // Argument like -j5.
968 count = atoi(p);
969 } else if (args.size() > i + 1) {
970 // Arguments like -j 5.
971 count = atoi(args[i + 1]);
972 ++i;
973 }
974 if (count <= 0) {
975 fprintf(stderr, "invalid job count: %d\n", count);
976 return false;
977 }
978 options.job_count = static_cast<size_t>(count);
979 } else if (strncmp(args[i], "--deadline=", strlen("--deadline=")) == 0) {
980 int time_ms = atoi(args[i] + strlen("--deadline="));
981 if (time_ms <= 0) {
982 fprintf(stderr, "invalid deadline: %d\n", time_ms);
983 return false;
984 }
985 options.test_deadline_ms = time_ms;
986 } else if (strncmp(args[i], "--warnline=", strlen("--warnline=")) == 0) {
987 int time_ms = atoi(args[i] + strlen("--warnline="));
988 if (time_ms <= 0) {
989 fprintf(stderr, "invalid warnline: %d\n", time_ms);
990 return false;
991 }
992 options.test_warnline_ms = time_ms;
993 } else if (strncmp(args[i], "--gtest_color=", strlen("--gtest_color=")) == 0) {
994 options.gtest_color = args[i] + strlen("--gtest_color=");
995 } else if (strcmp(args[i], "--gtest_print_time=0") == 0) {
996 options.gtest_print_time = false;
997 } else if (strncmp(args[i], "--gtest_repeat=", strlen("--gtest_repeat=")) == 0) {
998 // If the value of gtest_repeat is < 0, then it indicates the tests
999 // should be repeated forever.
1000 options.gtest_repeat = atoi(args[i] + strlen("--gtest_repeat="));
1001 // Remove --gtest_repeat=xx from arguments, so child process only run one iteration for a single test.
1002 args.erase(args.begin() + i);
1003 --i;
1004 } else if (strncmp(args[i], "--gtest_output=", strlen("--gtest_output=")) == 0) {
1005 std::string output = args[i] + strlen("--gtest_output=");
1006 // generate output xml file path according to the strategy in gtest.
1007 bool success = true;
1008 if (strncmp(output.c_str(), "xml:", strlen("xml:")) == 0) {
1009 output = output.substr(strlen("xml:"));
1010 if (output.size() == 0) {
1011 success = false;
1012 }
1013 // Make absolute path.
1014 if (success && output[0] != '/') {
1015 char* cwd = getcwd(NULL, 0);
1016 if (cwd != NULL) {
1017 output = std::string(cwd) + "/" + output;
1018 free(cwd);
1019 } else {
1020 success = false;
1021 }
1022 }
1023 // Add file name if output is a directory.
1024 if (success && output.back() == '/') {
1025 output += "test_details.xml";
1026 }
1027 }
1028 if (success) {
1029 options.gtest_output = output;
1030 } else {
1031 fprintf(stderr, "invalid gtest_output file: %s\n", args[i]);
1032 return false;
1033 }
1034
1035 // Remove --gtest_output=xxx from arguments, so child process will not write xml file.
1036 args.erase(args.begin() + i);
1037 --i;
1038 }
1039 }
1040
1041 // Add --no-isolate in args to prevent child process from running in isolation mode again.
1042 // As DeathTest will try to call execve(), this argument should always be added.
1043 args.insert(args.begin() + 1, strdup("--no-isolate"));
1044 return true;
1045 }
1046
main(int argc,char ** argv)1047 int main(int argc, char** argv) {
1048 std::vector<char*> arg_list;
1049 for (int i = 0; i < argc; ++i) {
1050 arg_list.push_back(argv[i]);
1051 }
1052
1053 IsolationTestOptions options;
1054 if (PickOptions(arg_list, options) == false) {
1055 return 1;
1056 }
1057
1058 if (options.isolate == true) {
1059 // Set global variables.
1060 global_test_run_deadline_ms = options.test_deadline_ms;
1061 global_test_run_warnline_ms = options.test_warnline_ms;
1062 testing::GTEST_FLAG(color) = options.gtest_color.c_str();
1063 testing::GTEST_FLAG(print_time) = options.gtest_print_time;
1064 std::vector<TestCase> testcase_list;
1065
1066 argc = static_cast<int>(arg_list.size());
1067 arg_list.push_back(NULL);
1068 if (EnumerateTests(argc, arg_list.data(), testcase_list) == false) {
1069 return 1;
1070 }
1071 bool all_test_passed = RunTestInSeparateProc(argc, arg_list.data(), testcase_list,
1072 options.gtest_repeat, options.job_count, options.gtest_output);
1073 return all_test_passed ? 0 : 1;
1074 } else {
1075 argc = static_cast<int>(arg_list.size());
1076 arg_list.push_back(NULL);
1077 testing::InitGoogleTest(&argc, arg_list.data());
1078 return RUN_ALL_TESTS();
1079 }
1080 }
1081
1082 //################################################################################
1083 // Bionic Gtest self test, run this by --bionic-selftest option.
1084
TEST(bionic_selftest,test_success)1085 TEST(bionic_selftest, test_success) {
1086 ASSERT_EQ(1, 1);
1087 }
1088
TEST(bionic_selftest,test_fail)1089 TEST(bionic_selftest, test_fail) {
1090 ASSERT_EQ(0, 1);
1091 }
1092
TEST(bionic_selftest,test_time_warn)1093 TEST(bionic_selftest, test_time_warn) {
1094 sleep(4);
1095 }
1096
TEST(bionic_selftest,test_timeout)1097 TEST(bionic_selftest, test_timeout) {
1098 while (1) {}
1099 }
1100
TEST(bionic_selftest,test_signal_SEGV_terminated)1101 TEST(bionic_selftest, test_signal_SEGV_terminated) {
1102 char* p = reinterpret_cast<char*>(static_cast<intptr_t>(atoi("0")));
1103 *p = 3;
1104 }
1105
1106 class bionic_selftest_DeathTest : public BionicDeathTest {};
1107
deathtest_helper_success()1108 static void deathtest_helper_success() {
1109 ASSERT_EQ(1, 1);
1110 exit(0);
1111 }
1112
TEST_F(bionic_selftest_DeathTest,success)1113 TEST_F(bionic_selftest_DeathTest, success) {
1114 ASSERT_EXIT(deathtest_helper_success(), ::testing::ExitedWithCode(0), "");
1115 }
1116
deathtest_helper_fail()1117 static void deathtest_helper_fail() {
1118 ASSERT_EQ(1, 0);
1119 }
1120
TEST_F(bionic_selftest_DeathTest,fail)1121 TEST_F(bionic_selftest_DeathTest, fail) {
1122 ASSERT_EXIT(deathtest_helper_fail(), ::testing::ExitedWithCode(0), "");
1123 }
1124