1#!/usr/bin/env python 2# 3# Copyright 2017, The Android Open Source Project 4# 5# Licensed under the Apache License, Version 2.0 (the "License"); 6# you may not use this file except in compliance with the License. 7# You may obtain a copy of the License at 8# 9# http://www.apache.org/licenses/LICENSE-2.0 10# 11# Unless required by applicable law or agreed to in writing, software 12# distributed under the License is distributed on an "AS IS" BASIS, 13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14# See the License for the specific language governing permissions and 15# limitations under the License. 16 17""" 18Command line utility for running Android tests through TradeFederation. 19 20atest helps automate the flow of building test modules across the Android 21code base and executing the tests via the TradeFederation test harness. 22 23atest is designed to support any test types that can be ran by TradeFederation. 24""" 25 26import logging 27import os 28import subprocess 29import sys 30import tempfile 31import time 32 33import atest_error 34import atest_utils 35import cli_translator 36# pylint: disable=import-error 37import constants 38import module_info 39import test_runner_handler 40from test_runners import regression_test_runner 41 42EXPECTED_VARS = frozenset([ 43 constants.ANDROID_BUILD_TOP, 44 'ANDROID_TARGET_OUT_TESTCASES', 45 constants.ANDROID_OUT]) 46BUILD_STEP = 'build' 47INSTALL_STEP = 'install' 48TEST_STEP = 'test' 49ALL_STEPS = [BUILD_STEP, INSTALL_STEP, TEST_STEP] 50TEST_RUN_DIR_PREFIX = 'atest_run_%s_' 51HELP_DESC = '''Build, install and run Android tests locally.''' 52REBUILD_MODULE_INFO_FLAG = '--rebuild-module-info' 53CUSTOM_ARG_FLAG = '--' 54 55EPILOG_TEXT = ''' 56 57 58- - - - - - - - - 59IDENTIFYING TESTS 60- - - - - - - - - 61 62 The positional argument <tests> should be a reference to one or more 63 of the tests you'd like to run. Multiple tests can be run in one command by 64 separating test references with spaces. 65 66 Usage template: atest <reference_to_test_1> <reference_to_test_2> 67 68 A <reference_to_test> can be satisfied by the test's MODULE NAME, 69 MODULE:CLASS, CLASS NAME, TF INTEGRATION TEST, FILE PATH or PACKAGE NAME. 70 Explanations and examples of each follow. 71 72 73 < MODULE NAME > 74 75 Identifying a test by its module name will run the entire module. Input 76 the name as it appears in the LOCAL_MODULE or LOCAL_PACKAGE_NAME 77 variables in that test's Android.mk or Android.bp file. 78 79 Note: Use < TF INTEGRATION TEST > to run non-module tests integrated 80 directly into TradeFed. 81 82 Examples: 83 atest FrameworksServicesTests 84 atest CtsJankDeviceTestCases 85 86 87 < MODULE:CLASS > 88 89 Identifying a test by its class name will run just the tests in that 90 class and not the whole module. MODULE:CLASS is the preferred way to run 91 a single class. MODULE is the same as described above. CLASS is the 92 name of the test class in the .java file. It can either be the fully 93 qualified class name or just the basic name. 94 95 Examples: 96 atest FrameworksServicesTests:ScreenDecorWindowTests 97 atest FrameworksServicesTests:com.android.server.wm.ScreenDecorWindowTests 98 atest CtsJankDeviceTestCases:CtsDeviceJankUi 99 100 101 < CLASS NAME > 102 103 A single class can also be run by referencing the class name without 104 the module name. 105 106 Examples: 107 atest ScreenDecorWindowTests 108 atest CtsDeviceJankUi 109 110 However, this will take more time than the equivalent MODULE:CLASS 111 reference, so we suggest using a MODULE:CLASS reference whenever 112 possible. Examples below are ordered by performance from the fastest 113 to the slowest: 114 115 Examples: 116 atest FrameworksServicesTests:com.android.server.wm.ScreenDecorWindowTests 117 atest FrameworksServicesTests:ScreenDecorWindowTests 118 atest ScreenDecorWindowTests 119 120 < TF INTEGRATION TEST > 121 122 To run tests that are integrated directly into TradeFed (non-modules), 123 input the name as it appears in the output of the "tradefed.sh list 124 configs" cmd. 125 126 Examples: 127 atest example/reboot 128 atest native-benchmark 129 130 131 < FILE PATH > 132 133 Both module-based tests and integration-based tests can be run by 134 inputting the path to their test file or dir as appropriate. A single 135 class can also be run by inputting the path to the class's java file. 136 Both relative and absolute paths are supported. 137 138 Example - 2 ways to run the `CtsJankDeviceTestCases` module via path: 139 1. run module from android <repo root>: 140 atest cts/tests/jank/jank 141 142 2. from <android root>/cts/tests/jank: 143 atest . 144 145 Example - run a specific class within CtsJankDeviceTestCases module 146 from <android repo> root via path: 147 atest cts/tests/jank/src/android/jank/cts/ui/CtsDeviceJankUi.java 148 149 Example - run an integration test from <android repo> root via path: 150 atest tools/tradefederation/contrib/res/config/example/reboot.xml 151 152 153 < PACKAGE NAME > 154 155 Atest supports searching tests from package name as well. 156 157 Examples: 158 atest com.android.server.wm 159 atest android.jank.cts 160 161 162- - - - - - - - - - - - - - - - - - - - - - - - - - 163SPECIFYING INDIVIDUAL STEPS: BUILD, INSTALL OR RUN 164- - - - - - - - - - - - - - - - - - - - - - - - - - 165 166 The -b, -i and -t options allow you to specify which steps you want to run. 167 If none of those options are given, then all steps are run. If any of these 168 options are provided then only the listed steps are run. 169 170 Note: -i alone is not currently support and can only be included with -t. 171 Both -b and -t can be run alone. 172 173 Examples: 174 atest -b <test> (just build targets) 175 atest -t <test> (run tests only) 176 atest -it <test> (install apk and run tests) 177 atest -bt <test> (build targets, run tests, but skip installing apk) 178 179 180 Atest now has the ability to force a test to skip its cleanup/teardown step. 181 Many tests, e.g. CTS, cleanup the device after the test is run, so trying to 182 rerun your test with -t will fail without having the --disable-teardown 183 parameter. Use -d before -t to skip the test clean up step and test iteratively. 184 185 atest -d <test> (disable installing apk and cleanning up device) 186 atest -t <test> 187 188 Note that -t disables both setup/install and teardown/cleanup of the 189 device. So you can continue to rerun your test with just 190 191 atest -t <test> 192 193 as many times as you want. 194 195 196- - - - - - - - - - - - - 197RUNNING SPECIFIC METHODS 198- - - - - - - - - - - - - 199 200 It is possible to run only specific methods within a test class. To run 201 only specific methods, identify the class in any of the ways supported for 202 identifying a class (MODULE:CLASS, FILE PATH, etc) and then append the 203 name of the method or method using the following template: 204 205 <reference_to_class>#<method1> 206 207 Multiple methods can be specified with commas: 208 209 <reference_to_class>#<method1>,<method2>,<method3>... 210 211 Examples: 212 atest com.android.server.wm.ScreenDecorWindowTests#testMultipleDecors 213 214 atest FrameworksServicesTests:ScreenDecorWindowTests#testFlagChange,testRemoval 215 216 217- - - - - - - - - - - - - 218RUNNING MULTIPLE CLASSES 219- - - - - - - - - - - - - 220 221 To run multiple classes, deliminate them with spaces just like you would 222 when running multiple tests. Atest will handle building and running 223 classes in the most efficient way possible, so specifying a subset of 224 classes in a module will improve performance over running the whole module. 225 226 227 Examples: 228 - two classes in same module: 229 atest FrameworksServicesTests:ScreenDecorWindowTests FrameworksServicesTests:DimmerTests 230 231 - two classes, different modules: 232 atest FrameworksServicesTests:ScreenDecorWindowTests CtsJankDeviceTestCases:CtsDeviceJankUi 233 234 235- - - - - - - - - - - 236REGRESSION DETECTION 237- - - - - - - - - - - 238 239 Generate pre-patch or post-patch metrics without running regression detection: 240 241 Example: 242 atest <test> --generate-baseline <optional iter> 243 atest <test> --generate-new-metrics <optional iter> 244 245 Local regression detection can be run in three options: 246 247 1) Provide a folder containing baseline (pre-patch) metrics (generated 248 previously). Atest will run the tests n (default 5) iterations, generate 249 a new set of post-patch metrics, and compare those against existing metrics. 250 251 Example: 252 atest <test> --detect-regression </path/to/baseline> --generate-new-metrics <optional iter> 253 254 2) Provide a folder containing post-patch metrics (generated previously). 255 Atest will run the tests n (default 5) iterations, generate a new set of 256 pre-patch metrics, and compare those against those provided. Note: the 257 developer needs to revert the device/tests to pre-patch state to generate 258 baseline metrics. 259 260 Example: 261 atest <test> --detect-regression </path/to/new> --generate-baseline <optional iter> 262 263 3) Provide 2 folders containing both pre-patch and post-patch metrics. Atest 264 will run no tests but the regression detection algorithm. 265 266 Example: 267 atest --detect-regression </path/to/baseline> </path/to/new> 268 269 270''' 271 272 273def _parse_args(argv): 274 """Parse command line arguments. 275 276 Args: 277 argv: A list of arguments. 278 279 Returns: 280 An argspace.Namespace class instance holding parsed args. 281 """ 282 import argparse 283 parser = argparse.ArgumentParser( 284 description=HELP_DESC, 285 epilog=EPILOG_TEXT, 286 formatter_class=argparse.RawTextHelpFormatter) 287 parser.add_argument('tests', nargs='*', help='Tests to build and/or run.') 288 parser.add_argument('-b', '--build', action='append_const', dest='steps', 289 const=BUILD_STEP, help='Run a build.') 290 parser.add_argument('-i', '--install', action='append_const', dest='steps', 291 const=INSTALL_STEP, help='Install an APK.') 292 parser.add_argument('-t', '--test', action='append_const', dest='steps', 293 const=TEST_STEP, 294 help='Run the tests. WARNING: Many test configs force cleanup ' 295 'of device after test run. In this case, -d must be used in previous ' 296 'test run to disable cleanup, for -t to work. Otherwise, ' 297 'device will need to be setup again with -i.') 298 parser.add_argument('-s', '--serial', 299 help='The device to run the test on.') 300 parser.add_argument('-d', '--disable-teardown', action='store_true', 301 help='Disables test teardown and cleanup.') 302 parser.add_argument('-m', REBUILD_MODULE_INFO_FLAG, action='store_true', 303 help='Forces a rebuild of the module-info.json file. ' 304 'This may be necessary following a repo sync or ' 305 'when writing a new test.') 306 parser.add_argument('-w', '--wait-for-debugger', action='store_true', 307 help='Only for instrumentation tests. Waits for ' 308 'debugger prior to execution.') 309 parser.add_argument('-v', '--verbose', action='store_true', 310 help='Display DEBUG level logging.') 311 parser.add_argument('--generate-baseline', nargs='?', type=int, const=5, default=0, 312 help='Generate baseline metrics, run 5 iterations by default. ' 313 'Provide an int argument to specify # iterations.') 314 parser.add_argument('--generate-new-metrics', nargs='?', type=int, const=5, default=0, 315 help='Generate new metrics, run 5 iterations by default. ' 316 'Provide an int argument to specify # iterations.') 317 parser.add_argument('--detect-regression', nargs='*', 318 help='Run regression detection algorithm. Supply ' 319 'path to baseline and/or new metrics folders.') 320 # This arg actually doesn't consume anything, it's primarily used for the 321 # help description and creating custom_args in the NameSpace object. 322 parser.add_argument('--', dest='custom_args', nargs='*', 323 help='Specify custom args for the test runners. ' 324 'Everything after -- will be consumed as custom ' 325 'args.') 326 # Store everything after '--' in custom_args. 327 pruned_argv = argv 328 custom_args_index = None 329 if CUSTOM_ARG_FLAG in argv: 330 custom_args_index = argv.index(CUSTOM_ARG_FLAG) 331 pruned_argv = argv[:custom_args_index] 332 args = parser.parse_args(pruned_argv) 333 args.custom_args = [] 334 if custom_args_index is not None: 335 args.custom_args = argv[custom_args_index+1:] 336 return args 337 338 339def _configure_logging(verbose): 340 """Configure the logger. 341 342 Args: 343 verbose: A boolean. If true display DEBUG level logs. 344 """ 345 if verbose: 346 logging.basicConfig(level=logging.DEBUG) 347 else: 348 logging.basicConfig(level=logging.INFO) 349 350 351def _missing_environment_variables(): 352 """Verify the local environment has been set up to run atest. 353 354 Returns: 355 List of strings of any missing environment variables. 356 """ 357 missing = filter(None, [x for x in EXPECTED_VARS if not os.environ.get(x)]) 358 if missing: 359 logging.error('Local environment doesn\'t appear to have been ' 360 'initialized. Did you remember to run lunch? Expected ' 361 'Environment Variables: %s.', missing) 362 return missing 363 364 365def make_test_run_dir(): 366 """Make the test run dir in tmp. 367 368 Returns: 369 A string of the dir path. 370 """ 371 utc_epoch_time = int(time.time()) 372 prefix = TEST_RUN_DIR_PREFIX % utc_epoch_time 373 return tempfile.mkdtemp(prefix=prefix) 374 375 376def run_tests(run_commands): 377 """Shell out and execute tradefed run commands. 378 379 Args: 380 run_commands: A list of strings of Tradefed run commands. 381 """ 382 logging.info('Running tests') 383 # TODO: Build result parser for run command. Until then display raw stdout. 384 for run_command in run_commands: 385 logging.debug('Executing command: %s', run_command) 386 subprocess.check_call(run_command, shell=True, stderr=subprocess.STDOUT) 387 388 389def get_extra_args(args): 390 """Get extra args for test runners. 391 392 Args: 393 args: arg parsed object. 394 395 Returns: 396 Dict of extra args for test runners to utilize. 397 """ 398 extra_args = {} 399 if args.wait_for_debugger: 400 extra_args[constants.WAIT_FOR_DEBUGGER] = None 401 steps = args.steps or ALL_STEPS 402 if INSTALL_STEP not in steps: 403 extra_args[constants.DISABLE_INSTALL] = None 404 if args.disable_teardown: 405 extra_args[constants.DISABLE_TEARDOWN] = args.disable_teardown 406 if args.generate_baseline: 407 extra_args[constants.PRE_PATCH_ITERATIONS] = args.generate_baseline 408 if args.serial: 409 extra_args[constants.SERIAL] = args.serial 410 if args.generate_new_metrics: 411 extra_args[constants.POST_PATCH_ITERATIONS] = args.generate_new_metrics 412 if args.custom_args: 413 extra_args[constants.CUSTOM_ARGS] = args.custom_args 414 return extra_args 415 416 417def _get_regression_detection_args(args, results_dir): 418 """Get args for regression detection test runners. 419 420 Args: 421 args: parsed args object. 422 results_dir: string directory to store atest results. 423 424 Returns: 425 Dict of args for regression detection test runner to utilize. 426 """ 427 regression_args = {} 428 pre_patch_folder = (os.path.join(results_dir, 'baseline-metrics') if args.generate_baseline 429 else args.detect_regression.pop(0)) 430 post_patch_folder = (os.path.join(results_dir, 'new-metrics') if args.generate_new_metrics 431 else args.detect_regression.pop(0)) 432 regression_args[constants.PRE_PATCH_FOLDER] = pre_patch_folder 433 regression_args[constants.POST_PATCH_FOLDER] = post_patch_folder 434 return regression_args 435 436 437def _will_run_tests(args): 438 """Determine if there are tests to run. 439 440 Currently only used by detect_regression to skip the test if just running regression detection. 441 442 Args: 443 args: parsed args object. 444 445 Returns: 446 True if there are tests to run, false otherwise. 447 """ 448 return not (args.detect_regression and len(args.detect_regression) == 2) 449 450 451def _has_valid_regression_detection_args(args): 452 """Validate regression detection args. 453 454 Args: 455 args: parsed args object. 456 457 Returns: 458 True if args are valid 459 """ 460 if args.generate_baseline and args.generate_new_metrics: 461 logging.error('Cannot collect both baseline and new metrics at the same time.') 462 return False 463 if args.detect_regression is not None: 464 if not args.detect_regression: 465 logging.error('Need to specify at least 1 arg for regression detection.') 466 return False 467 elif len(args.detect_regression) == 1: 468 if args.generate_baseline or args.generate_new_metrics: 469 return True 470 logging.error('Need to specify --generate-baseline or --generate-new-metrics.') 471 return False 472 elif len(args.detect_regression) == 2: 473 if args.generate_baseline: 474 logging.error('Specified 2 metric paths and --generate-baseline, ' 475 'either drop --generate-baseline or drop a path') 476 return False 477 if args.generate_new_metrics: 478 logging.error('Specified 2 metric paths and --generate-new-metrics, ' 479 'either drop --generate-new-metrics or drop a path') 480 return False 481 return True 482 else: 483 logging.error('Specified more than 2 metric paths.') 484 return False 485 return True 486 487 488def main(argv): 489 """Entry point of atest script. 490 491 Args: 492 argv: A list of arguments. 493 494 Returns: 495 Exit code. 496 """ 497 args = _parse_args(argv) 498 _configure_logging(args.verbose) 499 if _missing_environment_variables(): 500 return constants.EXIT_CODE_ENV_NOT_SETUP 501 if args.generate_baseline and args.generate_new_metrics: 502 logging.error('Cannot collect both baseline and new metrics at the same time.') 503 return constants.EXIT_CODE_ERROR 504 if not _has_valid_regression_detection_args(args): 505 return constants.EXIT_CODE_ERROR 506 results_dir = make_test_run_dir() 507 mod_info = module_info.ModuleInfo(force_build=args.rebuild_module_info) 508 translator = cli_translator.CLITranslator(module_info=mod_info) 509 build_targets = set() 510 test_infos = set() 511 if _will_run_tests(args): 512 try: 513 build_targets, test_infos = translator.translate(args.tests) 514 except atest_error.TestDiscoveryException: 515 logging.exception('Error occured in test discovery:') 516 logging.info('This can happen after a repo sync or if the test is ' 517 'new. Running: with "%s" may resolve the issue.', 518 REBUILD_MODULE_INFO_FLAG) 519 return constants.EXIT_CODE_TEST_NOT_FOUND 520 build_targets |= test_runner_handler.get_test_runner_reqs(mod_info, 521 test_infos) 522 extra_args = get_extra_args(args) 523 if args.detect_regression: 524 build_targets |= (regression_test_runner.RegressionTestRunner('') 525 .get_test_runner_build_reqs()) 526 # args.steps will be None if none of -bit set, else list of params set. 527 steps = args.steps if args.steps else ALL_STEPS 528 if build_targets and BUILD_STEP in steps: 529 # Add module-info.json target to the list of build targets to keep the 530 # file up to date. 531 build_targets.add(mod_info.module_info_target) 532 success = atest_utils.build(build_targets, args.verbose) 533 if not success: 534 return constants.EXIT_CODE_BUILD_FAILURE 535 elif TEST_STEP not in steps: 536 logging.warn('Install step without test step currently not ' 537 'supported, installing AND testing instead.') 538 steps.append(TEST_STEP) 539 if TEST_STEP in steps: 540 test_runner_handler.run_all_tests(results_dir, test_infos, extra_args) 541 if args.detect_regression: 542 regression_args = _get_regression_detection_args(args, results_dir) 543 regression_test_runner.RegressionTestRunner('').run_tests(None, regression_args) 544 return constants.EXIT_CODE_SUCCESS 545 546if __name__ == '__main__': 547 sys.exit(main(sys.argv[1:])) 548