1#!/usr/bin/env python3
2#
3# Copyright 2017, The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#     http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17"""
18Command line utility for running Android tests through TradeFederation.
19
20atest helps automate the flow of building test modules across the Android
21code base and executing the tests via the TradeFederation test harness.
22
23atest is designed to support any test types that can be ran by TradeFederation.
24"""
25
26# pylint: disable=line-too-long
27
28from __future__ import print_function
29
30import collections
31import logging
32import os
33import sys
34import tempfile
35import time
36import platform
37
38from multiprocessing import Process
39
40import atest_arg_parser
41import atest_error
42import atest_execution_info
43import atest_utils
44import bug_detector
45import cli_translator
46import constants
47import module_info
48import result_reporter
49import test_runner_handler
50
51from metrics import metrics
52from metrics import metrics_base
53from metrics import metrics_utils
54from test_runners import regression_test_runner
55from tools import atest_tools
56
57EXPECTED_VARS = frozenset([
58    constants.ANDROID_BUILD_TOP,
59    'ANDROID_TARGET_OUT_TESTCASES',
60    constants.ANDROID_OUT])
61TEST_RUN_DIR_PREFIX = "%Y%m%d_%H%M%S"
62CUSTOM_ARG_FLAG = '--'
63OPTION_NOT_FOR_TEST_MAPPING = (
64    'Option `%s` does not work for running tests in TEST_MAPPING files')
65
66DEVICE_TESTS = 'tests that require device'
67HOST_TESTS = 'tests that do NOT require device'
68RESULT_HEADER_FMT = '\nResults from %(test_type)s:'
69RUN_HEADER_FMT = '\nRunning %(test_count)d %(test_type)s.'
70TEST_COUNT = 'test_count'
71TEST_TYPE = 'test_type'
72# Tasks that must run in the build time but unable to build by soong.
73# (e.g subprocesses that invoke host commands.)
74EXTRA_TASKS = {
75    'index-targets': atest_tools.index_targets
76}
77
78
79def _run_extra_tasks(join=False):
80    """Execute EXTRA_TASKS with multiprocessing.
81
82    Args:
83        join: A boolean that indicates the process should terminate when
84        the main process ends or keep itself alive. True indicates the
85        main process will wait for all subprocesses finish while False represents
86        killing all subprocesses when the main process exits.
87    """
88    _running_procs = []
89    for task in EXTRA_TASKS.values():
90        proc = Process(target=task)
91        proc.daemon = not join
92        proc.start()
93        _running_procs.append(proc)
94    if join:
95        for proc in _running_procs:
96            proc.join()
97
98
99def _parse_args(argv):
100    """Parse command line arguments.
101
102    Args:
103        argv: A list of arguments.
104
105    Returns:
106        An argspace.Namespace class instance holding parsed args.
107    """
108    # Store everything after '--' in custom_args.
109    pruned_argv = argv
110    custom_args_index = None
111    if CUSTOM_ARG_FLAG in argv:
112        custom_args_index = argv.index(CUSTOM_ARG_FLAG)
113        pruned_argv = argv[:custom_args_index]
114    parser = atest_arg_parser.AtestArgParser()
115    parser.add_atest_args()
116    args = parser.parse_args(pruned_argv)
117    args.custom_args = []
118    if custom_args_index is not None:
119        args.custom_args = argv[custom_args_index+1:]
120    return args
121
122
123def _configure_logging(verbose):
124    """Configure the logger.
125
126    Args:
127        verbose: A boolean. If true display DEBUG level logs.
128    """
129    log_format = '%(asctime)s %(filename)s:%(lineno)s:%(levelname)s: %(message)s'
130    datefmt = '%Y-%m-%d %H:%M:%S'
131    if verbose:
132        logging.basicConfig(level=logging.DEBUG,
133                            format=log_format, datefmt=datefmt)
134    else:
135        logging.basicConfig(level=logging.INFO,
136                            format=log_format, datefmt=datefmt)
137
138
139def _missing_environment_variables():
140    """Verify the local environment has been set up to run atest.
141
142    Returns:
143        List of strings of any missing environment variables.
144    """
145    missing = list(filter(None, [x for x in EXPECTED_VARS if not os.environ.get(x)]))
146    if missing:
147        logging.error('Local environment doesn\'t appear to have been '
148                      'initialized. Did you remember to run lunch? Expected '
149                      'Environment Variables: %s.', missing)
150    return missing
151
152
153def make_test_run_dir():
154    """Make the test run dir in ATEST_RESULT_ROOT.
155
156    Returns:
157        A string of the dir path.
158    """
159    if not os.path.exists(constants.ATEST_RESULT_ROOT):
160        os.makedirs(constants.ATEST_RESULT_ROOT)
161    ctime = time.strftime(TEST_RUN_DIR_PREFIX, time.localtime())
162    test_result_dir = tempfile.mkdtemp(prefix='%s_' % ctime,
163                                       dir=constants.ATEST_RESULT_ROOT)
164    return test_result_dir
165
166
167def get_extra_args(args):
168    """Get extra args for test runners.
169
170    Args:
171        args: arg parsed object.
172
173    Returns:
174        Dict of extra args for test runners to utilize.
175    """
176    extra_args = {}
177    if args.wait_for_debugger:
178        extra_args[constants.WAIT_FOR_DEBUGGER] = None
179    steps = args.steps or constants.ALL_STEPS
180    if constants.INSTALL_STEP not in steps:
181        extra_args[constants.DISABLE_INSTALL] = None
182    # The key and its value of the dict can be called via:
183    # if args.aaaa:
184    #     extra_args[constants.AAAA] = args.aaaa
185    arg_maps = {'all_abi': constants.ALL_ABI,
186                'collect_tests_only': constants.COLLECT_TESTS_ONLY,
187                'custom_args': constants.CUSTOM_ARGS,
188                'disable_teardown': constants.DISABLE_TEARDOWN,
189                'dry_run': constants.DRY_RUN,
190                'generate_baseline': constants.PRE_PATCH_ITERATIONS,
191                'generate_new_metrics': constants.POST_PATCH_ITERATIONS,
192                'host': constants.HOST,
193                'instant': constants.INSTANT,
194                'iterations': constants.ITERATIONS,
195                'rerun_until_failure': constants.RERUN_UNTIL_FAILURE,
196                'retry_any_failure': constants.RETRY_ANY_FAILURE,
197                'serial': constants.SERIAL,
198                'sharding': constants.SHARDING,
199                'tf_debug': constants.TF_DEBUG,
200                'tf_template': constants.TF_TEMPLATE,
201                'user_type': constants.USER_TYPE}
202    not_match = [k for k in arg_maps if k not in vars(args)]
203    if not_match:
204        raise AttributeError('%s object has no attribute %s'
205                             %(type(args).__name__, not_match))
206    extra_args.update({arg_maps.get(k): v for k, v in vars(args).items()
207                       if arg_maps.get(k) and v})
208    return extra_args
209
210
211def _get_regression_detection_args(args, results_dir):
212    """Get args for regression detection test runners.
213
214    Args:
215        args: parsed args object.
216        results_dir: string directory to store atest results.
217
218    Returns:
219        Dict of args for regression detection test runner to utilize.
220    """
221    regression_args = {}
222    pre_patch_folder = (os.path.join(results_dir, 'baseline-metrics') if args.generate_baseline
223                        else args.detect_regression.pop(0))
224    post_patch_folder = (os.path.join(results_dir, 'new-metrics') if args.generate_new_metrics
225                         else args.detect_regression.pop(0))
226    regression_args[constants.PRE_PATCH_FOLDER] = pre_patch_folder
227    regression_args[constants.POST_PATCH_FOLDER] = post_patch_folder
228    return regression_args
229
230
231def _validate_exec_mode(args, test_infos, host_tests=None):
232    """Validate all test execution modes are not in conflict.
233
234    Exit the program with error code if have device-only and host-only.
235    If no conflict and host side, add args.host=True.
236
237    Args:
238        args: parsed args object.
239        test_info: TestInfo object.
240        host_tests: True if all tests should be deviceless, False if all tests
241            should be device tests. Default is set to None, which means
242            tests can be either deviceless or device tests.
243    """
244    all_device_modes = [x.get_supported_exec_mode() for x in test_infos]
245    err_msg = None
246    # In the case of '$atest <device-only> --host', exit.
247    if (host_tests or args.host) and constants.DEVICE_TEST in all_device_modes:
248        err_msg = ('Test side and option(--host) conflict. Please remove '
249                   '--host if the test run on device side.')
250    # In the case of '$atest <host-only> <device-only> --host' or
251    # '$atest <host-only> <device-only>', exit.
252    if (constants.DEVICELESS_TEST in all_device_modes and
253            constants.DEVICE_TEST in all_device_modes):
254        err_msg = 'There are host-only and device-only tests in command.'
255    if host_tests is False and constants.DEVICELESS_TEST in all_device_modes:
256        err_msg = 'There are host-only tests in command.'
257    if err_msg:
258        logging.error(err_msg)
259        metrics_utils.send_exit_event(constants.EXIT_CODE_ERROR, logs=err_msg)
260        sys.exit(constants.EXIT_CODE_ERROR)
261    # In the case of '$atest <host-only>', we add --host to run on host-side.
262    # The option should only be overridden if `host_tests` is not set.
263    if not args.host and host_tests is None:
264        args.host = bool(constants.DEVICELESS_TEST in all_device_modes)
265
266
267def _validate_tm_tests_exec_mode(args, test_infos):
268    """Validate all test execution modes are not in conflict.
269
270    Split the tests in Test Mapping files into two groups, device tests and
271    deviceless tests running on host. Validate the tests' host setting.
272    For device tests, exit the program if any test is found for host-only.
273    For deviceless tests, exit the program if any test is found for device-only.
274
275    Args:
276        args: parsed args object.
277        test_info: TestInfo object.
278    """
279    device_test_infos, host_test_infos = _split_test_mapping_tests(
280        test_infos)
281    # No need to verify device tests if atest command is set to only run host
282    # tests.
283    if device_test_infos and not args.host:
284        _validate_exec_mode(args, device_test_infos, host_tests=False)
285    if host_test_infos:
286        _validate_exec_mode(args, host_test_infos, host_tests=True)
287
288
289def _will_run_tests(args):
290    """Determine if there are tests to run.
291
292    Currently only used by detect_regression to skip the test if just running
293    regression detection.
294
295    Args:
296        args: parsed args object.
297
298    Returns:
299        True if there are tests to run, false otherwise.
300    """
301    return not (args.detect_regression and len(args.detect_regression) == 2)
302
303
304# pylint: disable=no-else-return
305# This method is going to dispose, let's ignore pylint for now.
306def _has_valid_regression_detection_args(args):
307    """Validate regression detection args.
308
309    Args:
310        args: parsed args object.
311
312    Returns:
313        True if args are valid
314    """
315    if args.generate_baseline and args.generate_new_metrics:
316        logging.error('Cannot collect both baseline and new metrics'
317                      'at the same time.')
318        return False
319    if args.detect_regression is not None:
320        if not args.detect_regression:
321            logging.error('Need to specify at least 1 arg for'
322                          ' regression detection.')
323            return False
324        elif len(args.detect_regression) == 1:
325            if args.generate_baseline or args.generate_new_metrics:
326                return True
327            logging.error('Need to specify --generate-baseline or'
328                          ' --generate-new-metrics.')
329            return False
330        elif len(args.detect_regression) == 2:
331            if args.generate_baseline:
332                logging.error('Specified 2 metric paths and --generate-baseline'
333                              ', either drop --generate-baseline or drop a path')
334                return False
335            if args.generate_new_metrics:
336                logging.error('Specified 2 metric paths and --generate-new-metrics, '
337                              'either drop --generate-new-metrics or drop a path')
338                return False
339            return True
340        else:
341            logging.error('Specified more than 2 metric paths.')
342            return False
343    return True
344
345
346def _has_valid_test_mapping_args(args):
347    """Validate test mapping args.
348
349    Not all args work when running tests in TEST_MAPPING files. Validate the
350    args before running the tests.
351
352    Args:
353        args: parsed args object.
354
355    Returns:
356        True if args are valid
357    """
358    is_test_mapping = atest_utils.is_test_mapping(args)
359    if not is_test_mapping:
360        return True
361    options_to_validate = [
362        (args.generate_baseline, '--generate-baseline'),
363        (args.detect_regression, '--detect-regression'),
364        (args.generate_new_metrics, '--generate-new-metrics'),
365    ]
366    for arg_value, arg in options_to_validate:
367        if arg_value:
368            logging.error(OPTION_NOT_FOR_TEST_MAPPING, arg)
369            return False
370    return True
371
372
373def _validate_args(args):
374    """Validate setups and args.
375
376    Exit the program with error code if any setup or arg is invalid.
377
378    Args:
379        args: parsed args object.
380    """
381    if _missing_environment_variables():
382        sys.exit(constants.EXIT_CODE_ENV_NOT_SETUP)
383    if args.generate_baseline and args.generate_new_metrics:
384        logging.error(
385            'Cannot collect both baseline and new metrics at the same time.')
386        sys.exit(constants.EXIT_CODE_ERROR)
387    if not _has_valid_regression_detection_args(args):
388        sys.exit(constants.EXIT_CODE_ERROR)
389    if not _has_valid_test_mapping_args(args):
390        sys.exit(constants.EXIT_CODE_ERROR)
391
392
393def _print_module_info_from_module_name(mod_info, module_name):
394    """print out the related module_info for a module_name.
395
396    Args:
397        mod_info: ModuleInfo object.
398        module_name: A string of module.
399
400    Returns:
401        True if the module_info is found.
402    """
403    title_mapping = collections.OrderedDict()
404    title_mapping[constants.MODULE_COMPATIBILITY_SUITES] = 'Compatibility suite'
405    title_mapping[constants.MODULE_PATH] = 'Source code path'
406    title_mapping[constants.MODULE_INSTALLED] = 'Installed path'
407    target_module_info = mod_info.get_module_info(module_name)
408    is_module_found = False
409    if target_module_info:
410        atest_utils.colorful_print(module_name, constants.GREEN)
411        for title_key in title_mapping:
412            atest_utils.colorful_print("\t%s" % title_mapping[title_key],
413                                       constants.CYAN)
414            for info_value in target_module_info[title_key]:
415                print("\t\t{}".format(info_value))
416        is_module_found = True
417    return is_module_found
418
419
420def _print_test_info(mod_info, test_infos):
421    """Print the module information from TestInfos.
422
423    Args:
424        mod_info: ModuleInfo object.
425        test_infos: A list of TestInfos.
426
427    Returns:
428        Always return EXIT_CODE_SUCCESS
429    """
430    for test_info in test_infos:
431        _print_module_info_from_module_name(mod_info, test_info.test_name)
432        atest_utils.colorful_print("\tRelated build targets", constants.MAGENTA)
433        sorted_build_targets = sorted(list(test_info.build_targets))
434        print("\t\t{}".format(", ".join(sorted_build_targets)))
435        for build_target in sorted_build_targets:
436            if build_target != test_info.test_name:
437                _print_module_info_from_module_name(mod_info, build_target)
438        atest_utils.colorful_print("", constants.WHITE)
439    return constants.EXIT_CODE_SUCCESS
440
441
442def is_from_test_mapping(test_infos):
443    """Check that the test_infos came from TEST_MAPPING files.
444
445    Args:
446        test_infos: A set of TestInfos.
447
448    Returns:
449        True if the test infos are from TEST_MAPPING files.
450    """
451    return list(test_infos)[0].from_test_mapping
452
453
454def _split_test_mapping_tests(test_infos):
455    """Split Test Mapping tests into 2 groups: device tests and host tests.
456
457    Args:
458        test_infos: A set of TestInfos.
459
460    Returns:
461        A tuple of (device_test_infos, host_test_infos), where
462        device_test_infos: A set of TestInfos for tests that require device.
463        host_test_infos: A set of TestInfos for tests that do NOT require
464            device.
465    """
466    assert is_from_test_mapping(test_infos)
467    host_test_infos = {info for info in test_infos if info.host}
468    device_test_infos = {info for info in test_infos if not info.host}
469    return device_test_infos, host_test_infos
470
471
472# pylint: disable=too-many-locals
473def _run_test_mapping_tests(results_dir, test_infos, extra_args):
474    """Run all tests in TEST_MAPPING files.
475
476    Args:
477        results_dir: String directory to store atest results.
478        test_infos: A set of TestInfos.
479        extra_args: Dict of extra args to add to test run.
480
481    Returns:
482        Exit code.
483    """
484    device_test_infos, host_test_infos = _split_test_mapping_tests(test_infos)
485    # `host` option needs to be set to True to run host side tests.
486    host_extra_args = extra_args.copy()
487    host_extra_args[constants.HOST] = True
488    test_runs = [(host_test_infos, host_extra_args, HOST_TESTS)]
489    if extra_args.get(constants.HOST):
490        atest_utils.colorful_print(
491            'Option `--host` specified. Skip running device tests.',
492            constants.MAGENTA)
493    else:
494        test_runs.append((device_test_infos, extra_args, DEVICE_TESTS))
495
496    test_results = []
497    for tests, args, test_type in test_runs:
498        if not tests:
499            continue
500        header = RUN_HEADER_FMT % {TEST_COUNT: len(tests), TEST_TYPE: test_type}
501        atest_utils.colorful_print(header, constants.MAGENTA)
502        logging.debug('\n'.join([str(info) for info in tests]))
503        tests_exit_code, reporter = test_runner_handler.run_all_tests(
504            results_dir, tests, args, delay_print_summary=True)
505        atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
506        test_results.append((tests_exit_code, reporter, test_type))
507
508    all_tests_exit_code = constants.EXIT_CODE_SUCCESS
509    failed_tests = []
510    for tests_exit_code, reporter, test_type in test_results:
511        atest_utils.colorful_print(
512            RESULT_HEADER_FMT % {TEST_TYPE: test_type}, constants.MAGENTA)
513        result = tests_exit_code | reporter.print_summary()
514        if result:
515            failed_tests.append(test_type)
516        all_tests_exit_code |= result
517
518    # List failed tests at the end as a reminder.
519    if failed_tests:
520        atest_utils.colorful_print(
521            atest_utils.delimiter('=', 30, prenl=1), constants.YELLOW)
522        atest_utils.colorful_print(
523            '\nFollowing tests failed:', constants.MAGENTA)
524        for failure in failed_tests:
525            atest_utils.colorful_print(failure, constants.RED)
526
527    return all_tests_exit_code
528
529
530def _dry_run(results_dir, extra_args, test_infos):
531    """Only print the commands of the target tests rather than running them in actual.
532
533    Args:
534        results_dir: Path for saving atest logs.
535        extra_args: Dict of extra args for test runners to utilize.
536        test_infos: A list of TestInfos.
537
538    Returns:
539        A list of test commands.
540    """
541    all_run_cmds = []
542    for test_runner, tests in test_runner_handler.group_tests_by_test_runners(test_infos):
543        runner = test_runner(results_dir)
544        run_cmds = runner.generate_run_commands(tests, extra_args)
545        for run_cmd in run_cmds:
546            all_run_cmds.append(run_cmd)
547            print('Would run test via command: %s'
548                  % (atest_utils.colorize(run_cmd, constants.GREEN)))
549    return all_run_cmds
550
551def _print_testable_modules(mod_info, suite):
552    """Print the testable modules for a given suite.
553
554    Args:
555        mod_info: ModuleInfo object.
556        suite: A string of suite name.
557    """
558    testable_modules = mod_info.get_testable_modules(suite)
559    print('\n%s' % atest_utils.colorize('%s Testable %s modules' % (
560        len(testable_modules), suite), constants.CYAN))
561    print(atest_utils.delimiter('-'))
562    for module in sorted(testable_modules):
563        print('\t%s' % module)
564
565def _is_inside_android_root():
566    """Identify whether the cwd is inside of Android source tree.
567
568    Returns:
569        False if the cwd is outside of the source tree, True otherwise.
570    """
571    build_top = os.getenv(constants.ANDROID_BUILD_TOP, ' ')
572    return build_top in os.getcwd()
573
574def _non_action_validator(args):
575    """Method for non-action arguments such as --version, --help, --history,
576    --latest_result, etc.
577
578    Args:
579        args: An argspace.Namespace class instance holding parsed args.
580    """
581    if not _is_inside_android_root():
582        atest_utils.colorful_print(
583            "\nAtest must always work under ${}!".format(
584                constants.ANDROID_BUILD_TOP), constants.RED)
585        sys.exit(constants.EXIT_CODE_OUTSIDE_ROOT)
586    if args.version:
587        if os.path.isfile(constants.VERSION_FILE):
588            with open(constants.VERSION_FILE) as version_file:
589                print(version_file.read())
590        sys.exit(constants.EXIT_CODE_SUCCESS)
591    if args.help:
592        atest_arg_parser.print_epilog_text()
593        sys.exit(constants.EXIT_CODE_SUCCESS)
594    if args.history:
595        atest_execution_info.print_test_result(constants.ATEST_RESULT_ROOT,
596                                               args.history)
597        sys.exit(constants.EXIT_CODE_SUCCESS)
598    if args.latest_result:
599        atest_execution_info.print_test_result_by_path(
600            constants.LATEST_RESULT_FILE)
601        sys.exit(constants.EXIT_CODE_SUCCESS)
602    # TODO(b/131879842): remove below statement after they are fully removed.
603    if any((args.detect_regression,
604            args.generate_baseline,
605            args.generate_new_metrics)):
606        stop_msg = ('Please STOP using arguments below -- they are obsolete and '
607                    'will be removed in a very near future:\n'
608                    '\t--detect-regression\n'
609                    '\t--generate-baseline\n'
610                    '\t--generate-new-metrics\n')
611        msg = ('Please use below arguments instead:\n'
612               '\t--iterations\n'
613               '\t--rerun-until-failure\n'
614               '\t--retry-any-failure\n')
615        atest_utils.colorful_print(stop_msg, constants.RED)
616        atest_utils.colorful_print(msg, constants.CYAN)
617
618def _dry_run_validator(args, results_dir, extra_args, test_infos):
619    """Method which process --dry-run argument.
620
621    Args:
622        args: An argspace.Namespace class instance holding parsed args.
623        result_dir: A string path of the results dir.
624        extra_args: A dict of extra args for test runners to utilize.
625        test_infos: A list of test_info.
626    """
627    args.tests.sort()
628    dry_run_cmds = _dry_run(results_dir, extra_args, test_infos)
629    if args.verify_cmd_mapping:
630        try:
631            atest_utils.handle_test_runner_cmd(' '.join(args.tests),
632                                               dry_run_cmds,
633                                               do_verification=True)
634        except atest_error.DryRunVerificationError as e:
635            atest_utils.colorful_print(str(e), constants.RED)
636            return constants.EXIT_CODE_VERIFY_FAILURE
637    if args.update_cmd_mapping:
638        atest_utils.handle_test_runner_cmd(' '.join(args.tests),
639                                           dry_run_cmds)
640    sys.exit(constants.EXIT_CODE_SUCCESS)
641
642
643# pylint: disable=too-many-statements
644# pylint: disable=too-many-branches
645# pylint: disable=too-many-return-statements
646def main(argv, results_dir, args):
647    """Entry point of atest script.
648
649    Args:
650        argv: A list of arguments.
651        results_dir: A directory which stores the ATest execution information.
652        args: An argspace.Namespace class instance holding parsed args.
653
654    Returns:
655        Exit code.
656    """
657    _configure_logging(args.verbose)
658    _validate_args(args)
659    metrics_utils.get_start_time()
660    os_pyver = '{}:{}'.format(platform.platform(), platform.python_version())
661    metrics.AtestStartEvent(
662        command_line=' '.join(argv),
663        test_references=args.tests,
664        cwd=os.getcwd(),
665        os=os_pyver)
666    _non_action_validator(args)
667    mod_info = module_info.ModuleInfo(force_build=args.rebuild_module_info)
668    if args.rebuild_module_info:
669        _run_extra_tasks(join=True)
670    translator = cli_translator.CLITranslator(module_info=mod_info,
671                                              print_cache_msg=not args.clear_cache)
672    if args.list_modules:
673        _print_testable_modules(mod_info, args.list_modules)
674        return constants.EXIT_CODE_SUCCESS
675    # Clear cache if user pass -c option
676    if args.clear_cache:
677        atest_utils.clean_test_info_caches(args.tests)
678    build_targets = set()
679    test_infos = set()
680    if _will_run_tests(args):
681        build_targets, test_infos = translator.translate(args)
682        if not test_infos:
683            return constants.EXIT_CODE_TEST_NOT_FOUND
684        if not is_from_test_mapping(test_infos):
685            _validate_exec_mode(args, test_infos)
686        else:
687            _validate_tm_tests_exec_mode(args, test_infos)
688    if args.info:
689        return _print_test_info(mod_info, test_infos)
690    build_targets |= test_runner_handler.get_test_runner_reqs(mod_info,
691                                                              test_infos)
692    extra_args = get_extra_args(args)
693    if any((args.update_cmd_mapping, args.verify_cmd_mapping, args.dry_run)):
694        _dry_run_validator(args, results_dir, extra_args, test_infos)
695    if args.detect_regression:
696        build_targets |= (regression_test_runner.RegressionTestRunner('')
697                          .get_test_runner_build_reqs())
698    # args.steps will be None if none of -bit set, else list of params set.
699    steps = args.steps if args.steps else constants.ALL_STEPS
700    if build_targets and constants.BUILD_STEP in steps:
701        if constants.TEST_STEP in steps and not args.rebuild_module_info:
702            # Run extra tasks along with build step concurrently. Note that
703            # Atest won't index targets when only "-b" is given(without -t).
704            _run_extra_tasks(join=False)
705        # Add module-info.json target to the list of build targets to keep the
706        # file up to date.
707        build_targets.add(mod_info.module_info_target)
708        build_start = time.time()
709        success = atest_utils.build(build_targets, verbose=args.verbose)
710        metrics.BuildFinishEvent(
711            duration=metrics_utils.convert_duration(time.time() - build_start),
712            success=success,
713            targets=build_targets)
714        if not success:
715            return constants.EXIT_CODE_BUILD_FAILURE
716    elif constants.TEST_STEP not in steps:
717        logging.warning('Install step without test step currently not '
718                        'supported, installing AND testing instead.')
719        steps.append(constants.TEST_STEP)
720    tests_exit_code = constants.EXIT_CODE_SUCCESS
721    test_start = time.time()
722    if constants.TEST_STEP in steps:
723        if not is_from_test_mapping(test_infos):
724            tests_exit_code, reporter = test_runner_handler.run_all_tests(
725                results_dir, test_infos, extra_args)
726            atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
727        else:
728            tests_exit_code = _run_test_mapping_tests(
729                results_dir, test_infos, extra_args)
730    if args.detect_regression:
731        regression_args = _get_regression_detection_args(args, results_dir)
732        # TODO(b/110485713): Should not call run_tests here.
733        reporter = result_reporter.ResultReporter()
734        atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
735        tests_exit_code |= regression_test_runner.RegressionTestRunner(
736            '').run_tests(
737                None, regression_args, reporter)
738    metrics.RunTestsFinishEvent(
739        duration=metrics_utils.convert_duration(time.time() - test_start))
740    preparation_time = atest_execution_info.preparation_time(test_start)
741    if preparation_time:
742        # Send the preparation time only if it's set.
743        metrics.RunnerFinishEvent(
744            duration=metrics_utils.convert_duration(preparation_time),
745            success=True,
746            runner_name=constants.TF_PREPARATION,
747            test=[])
748    if tests_exit_code != constants.EXIT_CODE_SUCCESS:
749        tests_exit_code = constants.EXIT_CODE_TEST_FAILURE
750    return tests_exit_code
751
752if __name__ == '__main__':
753    RESULTS_DIR = make_test_run_dir()
754    ARGS = _parse_args(sys.argv[1:])
755    with atest_execution_info.AtestExecutionInfo(sys.argv[1:],
756                                                 RESULTS_DIR,
757                                                 ARGS) as result_file:
758        if not ARGS.no_metrics:
759            atest_utils.print_data_collection_notice()
760            USER_FROM_TOOL = os.getenv(constants.USER_FROM_TOOL, '')
761            if USER_FROM_TOOL == '':
762                metrics_base.MetricsBase.tool_name = constants.TOOL_NAME
763            else:
764                metrics_base.MetricsBase.tool_name = USER_FROM_TOOL
765
766        EXIT_CODE = main(sys.argv[1:], RESULTS_DIR, ARGS)
767        DETECTOR = bug_detector.BugDetector(sys.argv[1:], EXIT_CODE)
768        metrics.LocalDetectEvent(
769            detect_type=constants.DETECT_TYPE_BUG_DETECTED,
770            result=DETECTOR.caught_result)
771        if result_file:
772            print("Run 'atest --history' to review test result history.")
773    sys.exit(EXIT_CODE)
774