1#!/usr/bin/env python3
2#
3# Copyright (C) 2017 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#      http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17"""Release test for simpleperf prebuilts.
18
19It includes below tests:
201. Test profiling Android apps on different Android versions (starting from Android N).
212. Test simpleperf python scripts on different Hosts (linux, darwin and windows) on x86_64.
223. Test using both devices and emulators.
234. Test using both `adb root` and `adb unroot`.
24
25"""
26
27import argparse
28from dataclasses import dataclass
29import fnmatch
30import inspect
31import multiprocessing as mp
32import os
33from pathlib import Path
34import re
35import sys
36import time
37from tqdm import tqdm
38import types
39from typing import List, Optional
40import unittest
41
42from simpleperf_utils import extant_dir, log_exit, remove, ArgParseFormatter
43
44from . api_profiler_test import *
45from . app_profiler_test import *
46from . app_test import *
47from . binary_cache_builder_test import *
48from . cpp_app_test import *
49from . debug_unwind_reporter_test import *
50from . inferno_test import *
51from . java_app_test import *
52from . kotlin_app_test import *
53from . pprof_proto_generator_test import *
54from . purgatorio_test import *
55from . report_html_test import *
56from . report_lib_test import *
57from . run_simpleperf_on_device_test import *
58from . tools_test import *
59from . test_utils import TestHelper
60
61
62def get_args() -> argparse.Namespace:
63    parser = argparse.ArgumentParser(description=__doc__, formatter_class=ArgParseFormatter)
64    parser.add_argument('--browser', action='store_true', help='open report html file in browser.')
65    parser.add_argument(
66        '-d', '--device', nargs='+',
67        help='set devices used to run tests. Each device in format name:serial-number')
68    parser.add_argument('--only-host-test', action='store_true', help='Only run host tests')
69    parser.add_argument('--list-tests', action='store_true', help='List tests')
70    parser.add_argument('--ndk-path', type=extant_dir, help='Set the path of a ndk release')
71    parser.add_argument('-p', '--pattern', nargs='+',
72                        help='Run tests matching the selected pattern.')
73    parser.add_argument('-r', '--repeat', type=int, default=1, help='times to repeat tests')
74    parser.add_argument('--test-from', help='Run tests following the selected test.')
75    parser.add_argument('--test-dir', default='test_dir', help='Directory to store test results')
76    return parser.parse_args()
77
78
79def get_all_tests() -> List[str]:
80    tests = []
81    for name, value in globals().items():
82        if isinstance(value, type) and issubclass(value, unittest.TestCase):
83            for member_name, member in inspect.getmembers(value):
84                if isinstance(member, (types.MethodType, types.FunctionType)):
85                    if member_name.startswith('test'):
86                        tests.append(name + '.' + member_name)
87    return sorted(tests)
88
89
90def get_host_tests() -> List[str]:
91    def filter_fn(test: str) -> bool:
92        return get_test_type(test) == 'host_test'
93    return list(filter(filter_fn, get_all_tests()))
94
95
96def get_filtered_tests(
97        tests: List[str],
98        test_from: Optional[str],
99        test_pattern: Optional[List[str]]) -> List[str]:
100    if test_from:
101        try:
102            tests = tests[tests.index(test_from):]
103        except ValueError:
104            log_exit("Can't find test %s" % test_from)
105    if test_pattern:
106        patterns = [re.compile(fnmatch.translate(x)) for x in test_pattern]
107        tests = [t for t in tests if any(pattern.match(t) for pattern in patterns)]
108        if not tests:
109            log_exit('No tests are matched.')
110    return tests
111
112
113def get_test_type(test: str) -> Optional[str]:
114    testcase_name, test_name = test.split('.')
115    if test_name == 'test_run_simpleperf_without_usb_connection':
116        return 'device_serialized_test'
117    if testcase_name in (
118        'TestApiProfiler', 'TestNativeProfiling', 'TestNativeLibDownloader',
119            'TestRecordingRealApps', 'TestRunSimpleperfOnDevice'):
120        return 'device_test'
121    if testcase_name.startswith('TestExample'):
122        return 'device_test'
123    if testcase_name in ('TestBinaryCacheBuilder', 'TestDebugUnwindReporter', 'TestInferno',
124                         'TestPprofProtoGenerator', 'TestPurgatorio', 'TestReportHtml',
125                         'TestReportLib', 'TestTools'):
126        return 'host_test'
127    return None
128
129
130def build_testdata(testdata_dir: Path):
131    """ Collect testdata in testdata_dir.
132        In system/extras/simpleperf/scripts, testdata comes from:
133            <script_dir>/../testdata, <script_dir>/test/script_testdata, <script_dir>/../demo
134        In prebuilts/simpleperf, testdata comes from:
135            <script_dir>/test/testdata
136    """
137    testdata_dir.mkdir()
138
139    script_test_dir = Path(__file__).resolve().parent
140    script_dir = script_test_dir.parent
141
142    source_dirs = [
143        script_test_dir / 'script_testdata',
144        script_test_dir / 'testdata',
145        script_dir.parent / 'testdata',
146        script_dir.parent / 'demo',
147        script_dir.parent / 'runtest',
148    ]
149
150    for source_dir in source_dirs:
151        if not source_dir.is_dir():
152            continue
153        for src_path in source_dir.iterdir():
154            dest_path = testdata_dir / src_path.name
155            if dest_path.exists():
156                continue
157            if src_path.is_file():
158                shutil.copyfile(src_path, dest_path)
159            elif src_path.is_dir():
160                shutil.copytree(src_path, dest_path)
161
162
163def run_tests(tests: List[str]) -> bool:
164    argv = [sys.argv[0]] + tests
165    test_runner = unittest.TextTestRunner(stream=TestHelper.log_fh, verbosity=0)
166    test_program = unittest.main(argv=argv, testRunner=test_runner,
167                                 exit=False, verbosity=0, module='test.do_test')
168    return test_program.result.wasSuccessful()
169
170
171def test_process_entry(tests: List[str], test_options: List[str], conn: mp.connection.Connection):
172    parser = argparse.ArgumentParser()
173    parser.add_argument('--browser', action='store_true')
174    parser.add_argument('--device', help='android device serial number')
175    parser.add_argument('--ndk-path', type=extant_dir)
176    parser.add_argument('--testdata-dir', type=extant_dir)
177    parser.add_argument('--test-dir', help='directory to store test results')
178    args = parser.parse_args(test_options)
179
180    TestHelper.init(args.test_dir, args.testdata_dir,
181                    args.browser, args.ndk_path, args.device, conn)
182    run_tests(tests)
183
184
185@dataclass
186class Device:
187    name: str
188    serial_number: str
189
190
191@dataclass
192class TestResult:
193    try_time: int
194    ok: bool
195
196
197class TestProcess:
198    """ Create a test process to run selected tests on a device. """
199
200    TEST_MAX_TRY_TIME = 10
201    TEST_TIMEOUT_IN_SEC = 10 * 60
202
203    def __init__(
204            self, test_type: str, tests: List[str],
205            device: Optional[Device],
206            repeat_index: int,
207            test_options: List[str]):
208        self.test_type = test_type
209        self.tests = tests
210        self.device = device
211        self.repeat_index = repeat_index
212        self.test_options = test_options
213        self.try_time = 1
214        self.test_results: Dict[str, TestResult] = {}
215        self.parent_conn: Optional[mp.connection.Connection] = None
216        self.proc: Optional[mp.Process] = None
217        self.last_update_time = 0.0
218        self._start_test_process()
219
220    def _start_test_process(self):
221        unfinished_tests = [test for test in self.tests if test not in self.test_results]
222        self.parent_conn, child_conn = mp.Pipe(duplex=False)
223        test_options = self.test_options[:]
224        test_options += ['--test-dir', str(self.test_dir)]
225        if self.device:
226            test_options += ['--device', self.device.serial_number]
227        self.proc = mp.Process(target=test_process_entry, args=(
228            unfinished_tests, test_options, child_conn))
229        self.proc.start()
230        self.last_update_time = time.time()
231
232    @property
233    def name(self) -> str:
234        name = self.test_type
235        if self.device:
236            name += '_' + self.device.name
237        name += '_repeat_%d' % self.repeat_index
238        return name
239
240    @property
241    def test_dir(self) -> Path:
242        """ Directory to run the tests. """
243        return Path.cwd() / (self.name + '_try_%d' % self.try_time)
244
245    @property
246    def alive(self) -> bool:
247        """ Return if the test process is alive. """
248        return self.proc.is_alive()
249
250    @property
251    def finished(self) -> bool:
252        """ Return if all tests are finished. """
253        return len(self.test_results) == len(self.tests)
254
255    def check_update(self):
256        """ Check if there is any test update. """
257        try:
258            while self.parent_conn.poll():
259                msg = self.parent_conn.recv()
260                self._process_msg(msg)
261                self.last_update_time = time.time()
262        except (EOFError, BrokenPipeError) as e:
263            pass
264        if time.time() - self.last_update_time > TestProcess.TEST_TIMEOUT_IN_SEC:
265            self.proc.terminate()
266
267    def _process_msg(self, msg: str):
268        test_name, test_success = msg.split()
269        test_success = test_success == 'OK'
270        self.test_results[test_name] = TestResult(self.try_time, test_success)
271
272    def join(self):
273        self.proc.join()
274
275    def restart(self) -> bool:
276        """ Create a new test process to run unfinished tests. """
277        if self.finished:
278            return False
279        if self.try_time == self.TEST_MAX_TRY_TIME:
280            """ Exceed max try time. So mark left tests as failed. """
281            for test in self.tests:
282                if test not in self.test_results:
283                    self.test_results[test] = TestResult(self.try_time, False)
284            return False
285
286        self.try_time += 1
287        self._start_test_process()
288        return True
289
290
291class ProgressBar:
292    def __init__(self, total_count: int):
293        self.total_bar = tqdm(
294            total=total_count, desc='test progress', ascii=' ##',
295            bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}, {rate_fmt}", position=0)
296        self.test_process_bars: Dict[str, tqdm] = {}
297
298    def update(self, test_proc: TestProcess):
299        if test_proc.name not in self.test_process_bars:
300            if not test_proc.alive:
301                return
302            bar = tqdm(total=len(test_proc.tests),
303                       desc=test_proc.name, ascii=' ##',
304                       bar_format="{l_bar}{bar} | {n_fmt}/{total_fmt} [{elapsed}]")
305            self.test_process_bars[test_proc.name] = bar
306        else:
307            bar = self.test_process_bars[test_proc.name]
308
309        add = len(test_proc.test_results) - bar.n
310        if add:
311            bar.update(add)
312            self.total_bar.update(add)
313        if not test_proc.alive:
314            bar.close()
315            del self.test_process_bars[test_proc.name]
316
317    def end_tests(self):
318        for bar in self.test_process_bars.values():
319            bar.close()
320        self.total_bar.close()
321
322
323class TestSummary:
324    def __init__(self, test_count: int):
325        self.summary_fh = open('test_summary.txt', 'w')
326        self.failed_summary_fh = open('failed_test_summary.txt', 'w')
327        self.results: Dict[Tuple[str, str], bool] = {}
328        self.test_count = test_count
329
330    @property
331    def failed_test_count(self) -> int:
332        return self.test_count - sum(1 for result in self.results.values() if result)
333
334    def update(self, test_proc: TestProcess):
335        for test, result in test_proc.test_results.items():
336            key = (test, '%s_try_%s' % (test_proc.name, result.try_time))
337            if key not in self.results:
338                self.results[key] = result.ok
339                self._write_result(key[0], key[1], result.ok)
340
341    def _write_result(self, test_name: str, test_env: str, test_result: bool):
342        print(
343            '%s    %s    %s' % (test_name, test_env, 'OK' if test_result else 'FAILED'),
344            file=self.summary_fh, flush=True)
345        if not test_result:
346            print('%s    %s    FAILED' % (test_name, test_env),
347                  file=self.failed_summary_fh, flush=True)
348
349    def end_tests(self):
350        # Show sorted results after testing.
351        self.summary_fh.seek(0, 0)
352        self.failed_summary_fh.seek(0, 0)
353        for key in sorted(self.results.keys()):
354            self._write_result(key[0], key[1], self.results[key])
355        self.summary_fh.close()
356        self.failed_summary_fh.close()
357
358
359class TestManager:
360    """ Create test processes, monitor their status and log test progresses. """
361
362    def __init__(self, args: argparse.Namespace):
363        self.repeat_count = args.repeat
364        self.test_options = self._build_test_options(args)
365        self.devices = self._build_test_devices(args)
366        self.progress_bar: Optional[ProgressBar] = None
367        self.test_summary: Optional[TestSummary] = None
368
369    def _build_test_devices(self, args: argparse.Namespace) -> List[Device]:
370        devices = []
371        if args.device:
372            for s in args.device:
373                name, serial_number = s.split(':')
374                devices.append(Device(name, serial_number))
375        else:
376            devices.append(Device('default', ''))
377        return devices
378
379    def _build_test_options(self, args: argparse.Namespace) -> List[str]:
380        test_options: List[str] = []
381        if args.browser:
382            test_options.append('--browser')
383        if args.ndk_path:
384            test_options += ['--ndk-path', args.ndk_path]
385        testdata_dir = Path('testdata').resolve()
386        test_options += ['--testdata-dir', str(testdata_dir)]
387        return test_options
388
389    def run_all_tests(self, tests: List[str]):
390        device_tests = []
391        device_serialized_tests = []
392        host_tests = []
393        for test in tests:
394            test_type = get_test_type(test)
395            assert test_type, f'No test type for test {test}'
396            if test_type == 'device_test':
397                device_tests.append(test)
398            if test_type == 'device_serialized_test':
399                device_serialized_tests.append(test)
400            if test_type == 'host_test':
401                host_tests.append(test)
402        total_test_count = (len(device_tests) + len(device_serialized_tests)
403                            ) * len(self.devices) * self.repeat_count + len(host_tests)
404        self.progress_bar = ProgressBar(total_test_count)
405        self.test_summary = TestSummary(total_test_count)
406        if device_tests:
407            self.run_device_tests(device_tests)
408        if device_serialized_tests:
409            self.run_device_serialized_tests(device_serialized_tests)
410        if host_tests:
411            self.run_host_tests(host_tests)
412        self.progress_bar.end_tests()
413        self.progress_bar = None
414        self.test_summary.end_tests()
415
416    def run_device_tests(self, tests: List[str]):
417        """ Tests can run in parallel on different devices. """
418        test_procs: List[TestProcess] = []
419        for device in self.devices:
420            test_procs.append(TestProcess('device_test', tests, device, 1, self.test_options))
421        self.wait_for_test_results(test_procs, self.repeat_count)
422
423    def run_device_serialized_tests(self, tests: List[str]):
424        """ Tests run on each device in order. """
425        for device in self.devices:
426            test_proc = TestProcess('device_serialized_test', tests, device, 1, self.test_options)
427            self.wait_for_test_results([test_proc], self.repeat_count)
428
429    def run_host_tests(self, tests: List[str]):
430        """ Tests run only once on host. """
431        test_proc = TestProcess('host_tests', tests, None, 1, self.test_options)
432        self.wait_for_test_results([test_proc], 1)
433
434    def wait_for_test_results(self, test_procs: List[TestProcess], repeat_count: int):
435        test_count = sum(len(test_proc.tests) for test_proc in test_procs)
436        while test_procs:
437            dead_procs: List[TestProcess] = []
438            # Check update.
439            for test_proc in test_procs:
440                if not test_proc.alive:
441                    dead_procs.append(test_proc)
442                test_proc.check_update()
443                self.progress_bar.update(test_proc)
444                self.test_summary.update(test_proc)
445
446            # Process dead procs.
447            for test_proc in dead_procs:
448                test_proc.join()
449                if not test_proc.finished and test_proc.restart():
450                    continue
451                test_procs.remove(test_proc)
452                if test_proc.repeat_index < repeat_count:
453                    test_procs.append(
454                        TestProcess(test_proc.test_type, test_proc.tests, test_proc.device,
455                                    test_proc.repeat_index + 1, test_proc.test_options))
456            time.sleep(0.1)
457        return True
458
459
460def run_tests_in_child_process(tests: List[str], args: argparse.Namespace) -> bool:
461    """ run tests in child processes, read test results through a pipe. """
462    mp.set_start_method('spawn')  # to be consistent on darwin, linux, windows
463    test_manager = TestManager(args)
464    test_manager.run_all_tests(tests)
465
466    total_test_count = test_manager.test_summary.test_count
467    failed_test_count = test_manager.test_summary.failed_test_count
468    if failed_test_count == 0:
469        print('All tests passed!')
470        return True
471    print('%d of %d tests failed. See %s/failed_test_summary.txt for details.' %
472          (failed_test_count, total_test_count, args.test_dir))
473    return False
474
475
476def main() -> bool:
477    args = get_args()
478    tests = get_host_tests() if args.only_host_test else get_all_tests()
479    tests = get_filtered_tests(tests, args.test_from, args.pattern)
480
481    if args.list_tests:
482        print('\n'.join(tests))
483        return True
484
485    test_dir = Path(args.test_dir).resolve()
486    remove(test_dir)
487    test_dir.mkdir(parents=True)
488    # Switch to the test dir.
489    os.chdir(test_dir)
490    build_testdata(Path('testdata'))
491    return run_tests_in_child_process(tests, args)
492