1#!/usr/bin/env python3
2#
3# [VPYTHON:BEGIN]
4# python_version: "3.8"
5# [VPYTHON:END]
6#
7# Copyright 2017, The Android Open Source Project
8#
9# Licensed under the Apache License, Version 2.0 (the "License");
10# you may not use this file except in compliance with the License.
11# You may obtain a copy of the License at
12#
13#     http://www.apache.org/licenses/LICENSE-2.0
14#
15# Unless required by applicable law or agreed to in writing, software
16# distributed under the License is distributed on an "AS IS" BASIS,
17# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18# See the License for the specific language governing permissions and
19# limitations under the License.
20
21"""ART Run-Test TestRunner
22
23The testrunner runs the ART run-tests by simply invoking the script.
24It fetches the list of eligible tests from art/test directory, and list of
25disabled tests from art/test/knownfailures.json. It runs the tests by
26invoking art/test/run-test script and checks the exit value to decide if the
27test passed or failed.
28
29Before invoking the script, first build all the tests dependencies.
30There are two major build targets for building target and host tests
31dependencies:
321) test-art-host-run-test
332) test-art-target-run-test
34
35There are various options to invoke the script which are:
36-t: Either the test name as in art/test or the test name including the variant
37    information. Eg, "-t 001-HelloWorld",
38    "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-ndebuggable-001-HelloWorld32"
39-j: Number of thread workers to be used. Eg - "-j64"
40--dry-run: Instead of running the test name, just print its name.
41--verbose
42-b / --build-dependencies: to build the dependencies before running the test
43
44To specify any specific variants for the test, use --<<variant-name>>.
45For eg, for compiler type as optimizing, use --optimizing.
46
47
48In the end, the script will print the failed and skipped tests if any.
49
50"""
51import argparse
52import collections
53
54# b/140161314 diagnostics.
55try:
56  import concurrent.futures
57except Exception:
58  import sys
59  sys.stdout.write("\n\n" + sys.executable + " " + sys.version + "\n\n")
60  sys.stdout.flush()
61  raise
62
63import contextlib
64import datetime
65import fnmatch
66import itertools
67import json
68import multiprocessing
69import os
70import re
71import shlex
72import shutil
73import signal
74import subprocess
75import sys
76import tempfile
77import time
78
79import env
80from target_config import target_config
81from device_config import device_config
82
83# timeout for individual tests.
84# TODO: make it adjustable per tests and for buildbots
85#
86# Note: this needs to be larger than run-test timeouts, as long as this script
87#       does not push the value to run-test. run-test is somewhat complicated:
88#                      base: 25m  (large for ASAN)
89#        + timeout handling:  2m
90#        +   gcstress extra: 20m
91#        -----------------------
92#                            47m
93timeout = 3600 # 60 minutes
94
95# DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map
96# that has key as the test name (like 001-HelloWorld), and value as set of
97# variants that the test is disabled for.
98DISABLED_TEST_CONTAINER = {}
99
100# The Dict contains the list of all possible variants for a given type. For example,
101# for key TARGET, the value would be target and host. The list is used to parse
102# the test name given as the argument to run.
103VARIANT_TYPE_DICT = {}
104
105# The set of all variant sets that are incompatible and will always be skipped.
106NONFUNCTIONAL_VARIANT_SETS = set()
107
108# The set contains all the variants of each time.
109TOTAL_VARIANTS_SET = set()
110
111# The colors are used in the output. When a test passes, COLOR_PASS is used,
112# and so on.
113COLOR_ERROR = '\033[91m'
114COLOR_PASS = '\033[92m'
115COLOR_SKIP = '\033[93m'
116COLOR_NORMAL = '\033[0m'
117
118# The set contains the list of all the possible run tests that are in art/test
119# directory.
120RUN_TEST_SET = set()
121
122failed_tests = []
123skipped_tests = []
124
125# Flags
126n_thread = -1
127total_test_count = 0
128verbose = False
129dry_run = False
130ignore_skips = False
131build = False
132gdb = False
133gdb_arg = ''
134runtime_option = ''
135with_agent = []
136zipapex_loc = None
137run_test_option = []
138dex2oat_jobs = -1   # -1 corresponds to default threads for dex2oat
139run_all_configs = False
140
141# Dict containing extra arguments
142extra_arguments = { "host" : [], "target" : [] }
143
144# Dict to store user requested test variants.
145# key: variant_type.
146# value: set of variants user wants to run of type <key>.
147_user_input_variants = collections.defaultdict(set)
148
149def gather_test_info():
150  """The method gathers test information about the test to be run which includes
151  generating the list of total tests from the art/test directory and the list
152  of disabled test. It also maps various variants to types.
153  """
154  global TOTAL_VARIANTS_SET
155  # TODO: Avoid duplication of the variant names in different lists.
156  VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'}
157  VARIANT_TYPE_DICT['target'] = {'target', 'host', 'jvm'}
158  VARIANT_TYPE_DICT['trace'] = {'trace', 'ntrace', 'stream'}
159  VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image'}
160  VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
161  VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
162  VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'}
163  VARIANT_TYPE_DICT['cdex_level'] = {'cdex-none', 'cdex-fast'}
164  VARIANT_TYPE_DICT['relocate'] = {'relocate', 'no-relocate'}
165  VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
166  VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
167  VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress',
168                                'field-stress', 'step-stress'}
169  VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'jit-on-first-use',
170                                   'optimizing', 'regalloc_gc',
171                                   'speed-profile', 'baseline'}
172
173  # Regalloc_GC cannot work with prebuild.
174  NONFUNCTIONAL_VARIANT_SETS.add(frozenset({'regalloc_gc', 'prebuild'}))
175
176  for v_type in VARIANT_TYPE_DICT:
177    TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
178
179  test_dir = env.ANDROID_BUILD_TOP + '/art/test'
180  for f in os.listdir(test_dir):
181    if fnmatch.fnmatch(f, '[0-9]*'):
182      RUN_TEST_SET.add(f)
183
184
185def setup_test_env():
186  """The method sets default value for the various variants of the tests if they
187  are already not set.
188  """
189  if env.ART_TEST_BISECTION:
190    env.ART_TEST_RUN_TEST_NO_PREBUILD = True
191    env.ART_TEST_RUN_TEST_PREBUILD = False
192    # Bisection search writes to standard output.
193    env.ART_TEST_QUIET = False
194
195  global _user_input_variants
196  global run_all_configs
197  # These are the default variant-options we will use if nothing in the group is specified.
198  default_variants = {
199      'target': {'host', 'target'},
200      'prebuild': {'prebuild'},
201      'cdex_level': {'cdex-fast'},
202      'jvmti': { 'no-jvmti'},
203      'compiler': {'optimizing',
204                   'jit',
205                   'interpreter',
206                   'interp-ac',
207                   'speed-profile'},
208      'relocate': {'no-relocate'},
209      'trace': {'ntrace'},
210      'gc': {'cms'},
211      'jni': {'checkjni'},
212      'image': {'picimage'},
213      'debuggable': {'ndebuggable'},
214      'run': {'debug'},
215      # address_sizes_target depends on the target so it is dealt with below.
216  }
217  # We want to pull these early since the full VARIANT_TYPE_DICT has a few additional ones we don't
218  # want to pick up if we pass --all.
219  default_variants_keys = default_variants.keys()
220  if run_all_configs:
221    default_variants = VARIANT_TYPE_DICT
222
223  for key in default_variants_keys:
224    if not _user_input_variants[key]:
225      _user_input_variants[key] = default_variants[key]
226
227  _user_input_variants['address_sizes_target'] = collections.defaultdict(set)
228  if not _user_input_variants['address_sizes']:
229    _user_input_variants['address_sizes_target']['target'].add(
230        env.ART_PHONY_TEST_TARGET_SUFFIX)
231    _user_input_variants['address_sizes_target']['host'].add(
232        env.ART_PHONY_TEST_HOST_SUFFIX)
233    if env.ART_TEST_RUN_TEST_2ND_ARCH:
234      _user_input_variants['address_sizes_target']['host'].add(
235          env.ART_2ND_PHONY_TEST_HOST_SUFFIX)
236      _user_input_variants['address_sizes_target']['target'].add(
237          env.ART_2ND_PHONY_TEST_TARGET_SUFFIX)
238  else:
239    _user_input_variants['address_sizes_target']['host'] = _user_input_variants['address_sizes']
240    _user_input_variants['address_sizes_target']['target'] = _user_input_variants['address_sizes']
241
242  global n_thread
243  if n_thread == -1:
244    if 'target' in _user_input_variants['target']:
245      n_thread = get_default_threads('target')
246    else:
247      n_thread = get_default_threads('host')
248    print_text("Concurrency: " + str(n_thread) + "\n")
249
250  global extra_arguments
251  for target in _user_input_variants['target']:
252    extra_arguments[target] = find_extra_device_arguments(target)
253
254  if not sys.stdout.isatty():
255    global COLOR_ERROR
256    global COLOR_PASS
257    global COLOR_SKIP
258    global COLOR_NORMAL
259    COLOR_ERROR = ''
260    COLOR_PASS = ''
261    COLOR_SKIP = ''
262    COLOR_NORMAL = ''
263
264def find_extra_device_arguments(target):
265  """
266  Gets any extra arguments from the device_config.
267  """
268  device_name = target
269  if target == 'target':
270    device_name = get_device_name()
271  return device_config.get(device_name, { 'run-test-args' : [] })['run-test-args']
272
273def get_device_name():
274  """
275  Gets the value of ro.product.name from remote device.
276  """
277  proc = subprocess.Popen(['adb', 'shell', 'getprop', 'ro.product.name'],
278                          stderr=subprocess.STDOUT,
279                          stdout = subprocess.PIPE,
280                          universal_newlines=True)
281  # only wait 2 seconds.
282  output = proc.communicate(timeout = 2)[0]
283  success = not proc.wait()
284  if success:
285    return output.strip()
286  else:
287    print_text("Unable to determine device type!\n")
288    print_text("Continuing anyway.\n")
289    return "UNKNOWN_TARGET"
290
291def run_tests(tests):
292  """This method generates variants of the tests to be run and executes them.
293
294  Args:
295    tests: The set of tests to be run.
296  """
297  options_all = ''
298
299  # jvm does not run with all these combinations,
300  # or at least it doesn't make sense for most of them.
301  # TODO: support some jvm variants like jvmti ?
302  target_input_variants = _user_input_variants['target']
303  uncombinated_target_input_variants = []
304  if 'jvm' in target_input_variants:
305    _user_input_variants['target'].remove('jvm')
306    uncombinated_target_input_variants.append('jvm')
307
308  global total_test_count
309  total_test_count = len(tests)
310  if target_input_variants:
311    for variant_type in VARIANT_TYPE_DICT:
312      if not (variant_type == 'target' or 'address_sizes' in variant_type):
313        total_test_count *= len(_user_input_variants[variant_type])
314  target_address_combinations = 0
315  for target in target_input_variants:
316    for address_size in _user_input_variants['address_sizes_target'][target]:
317      target_address_combinations += 1
318  target_address_combinations += len(uncombinated_target_input_variants)
319  total_test_count *= target_address_combinations
320
321  if env.ART_TEST_WITH_STRACE:
322    options_all += ' --strace'
323
324  if env.ART_TEST_RUN_TEST_ALWAYS_CLEAN:
325    options_all += ' --always-clean'
326
327  if env.ART_TEST_BISECTION:
328    options_all += ' --bisection-search'
329
330  if gdb:
331    options_all += ' --gdb'
332    if gdb_arg:
333      options_all += ' --gdb-arg ' + gdb_arg
334
335  options_all += ' ' + ' '.join(run_test_option)
336
337  if runtime_option:
338    for opt in runtime_option:
339      options_all += ' --runtime-option ' + opt
340  if with_agent:
341    for opt in with_agent:
342      options_all += ' --with-agent ' + opt
343
344  if dex2oat_jobs != -1:
345    options_all += ' --dex2oat-jobs ' + str(dex2oat_jobs)
346
347  def iter_config(tests, input_variants, user_input_variants):
348    config = itertools.product(tests, input_variants, user_input_variants['run'],
349                                 user_input_variants['prebuild'], user_input_variants['compiler'],
350                                 user_input_variants['relocate'], user_input_variants['trace'],
351                                 user_input_variants['gc'], user_input_variants['jni'],
352                                 user_input_variants['image'],
353                                 user_input_variants['debuggable'], user_input_variants['jvmti'],
354                                 user_input_variants['cdex_level'])
355    return config
356
357  # [--host, --target] combines with all the other user input variants.
358  config = iter_config(tests, target_input_variants, _user_input_variants)
359  # [--jvm] currently combines with nothing else. most of the extra flags we'd insert
360  # would be unrecognizable by the 'java' binary, so avoid inserting any extra flags for now.
361  uncombinated_config = iter_config(tests, uncombinated_target_input_variants, { 'run': [''],
362      'prebuild': [''], 'compiler': [''],
363      'relocate': [''], 'trace': [''],
364      'gc': [''], 'jni': [''],
365      'image': [''],
366      'debuggable': [''], 'jvmti': [''],
367      'cdex_level': ['']})
368
369  def start_combination(executor, config_tuple, global_options, address_size):
370      test, target, run, prebuild, compiler, relocate, trace, gc, \
371      jni, image, debuggable, jvmti, cdex_level = config_tuple
372
373      # NB The order of components here should match the order of
374      # components in the regex parser in parse_test_name.
375      test_name = 'test-art-'
376      test_name += target + '-run-test-'
377      test_name += run + '-'
378      test_name += prebuild + '-'
379      test_name += compiler + '-'
380      test_name += relocate + '-'
381      test_name += trace + '-'
382      test_name += gc + '-'
383      test_name += jni + '-'
384      test_name += image + '-'
385      test_name += debuggable + '-'
386      test_name += jvmti + '-'
387      test_name += cdex_level + '-'
388      test_name += test
389      test_name += address_size
390
391      variant_set = {target, run, prebuild, compiler, relocate, trace, gc, jni,
392                     image, debuggable, jvmti, cdex_level, address_size}
393
394      options_test = global_options
395
396      if target == 'host':
397        options_test += ' --host'
398      elif target == 'jvm':
399        options_test += ' --jvm'
400
401      # Honor ART_TEST_CHROOT, ART_TEST_ANDROID_ROOT, ART_TEST_ANDROID_ART_ROOT,
402      # ART_TEST_ANDROID_I18N_ROOT, and ART_TEST_ANDROID_TZDATA_ROOT but only
403      # for target tests.
404      if target == 'target':
405        if env.ART_TEST_CHROOT:
406          options_test += ' --chroot ' + env.ART_TEST_CHROOT
407        if env.ART_TEST_ANDROID_ROOT:
408          options_test += ' --android-root ' + env.ART_TEST_ANDROID_ROOT
409        if env.ART_TEST_ANDROID_I18N_ROOT:
410            options_test += ' --android-i18n-root ' + env.ART_TEST_ANDROID_I18N_ROOT
411        if env.ART_TEST_ANDROID_ART_ROOT:
412          options_test += ' --android-art-root ' + env.ART_TEST_ANDROID_ART_ROOT
413        if env.ART_TEST_ANDROID_TZDATA_ROOT:
414          options_test += ' --android-tzdata-root ' + env.ART_TEST_ANDROID_TZDATA_ROOT
415
416      if run == 'ndebug':
417        options_test += ' -O'
418
419      if prebuild == 'prebuild':
420        options_test += ' --prebuild'
421      elif prebuild == 'no-prebuild':
422        options_test += ' --no-prebuild'
423
424      if cdex_level:
425        # Add option and remove the cdex- prefix.
426        options_test += ' --compact-dex-level ' + cdex_level.replace('cdex-','')
427
428      if compiler == 'optimizing':
429        options_test += ' --optimizing'
430      elif compiler == 'regalloc_gc':
431        options_test += ' --optimizing -Xcompiler-option --register-allocation-strategy=graph-color'
432      elif compiler == 'interpreter':
433        options_test += ' --interpreter'
434      elif compiler == 'interp-ac':
435        options_test += ' --interpreter --verify-soft-fail'
436      elif compiler == 'jit':
437        options_test += ' --jit'
438      elif compiler == 'jit-on-first-use':
439        options_test += ' --jit --runtime-option -Xjitthreshold:0'
440      elif compiler == 'speed-profile':
441        options_test += ' --random-profile'
442      elif compiler == 'baseline':
443        options_test += ' --baseline'
444
445      if relocate == 'relocate':
446        options_test += ' --relocate'
447      elif relocate == 'no-relocate':
448        options_test += ' --no-relocate'
449
450      if trace == 'trace':
451        options_test += ' --trace'
452      elif trace == 'stream':
453        options_test += ' --trace --stream'
454
455      if gc == 'gcverify':
456        options_test += ' --gcverify'
457      elif gc == 'gcstress':
458        options_test += ' --gcstress'
459
460      if jni == 'forcecopy':
461        options_test += ' --runtime-option -Xjniopts:forcecopy'
462      elif jni == 'checkjni':
463        options_test += ' --runtime-option -Xcheck:jni'
464
465      if image == 'no-image':
466        options_test += ' --no-image'
467
468      if debuggable == 'debuggable':
469        options_test += ' --debuggable --runtime-option -Xopaque-jni-ids:true'
470
471      if jvmti == 'jvmti-stress':
472        options_test += ' --jvmti-trace-stress --jvmti-redefine-stress --jvmti-field-stress'
473      elif jvmti == 'field-stress':
474        options_test += ' --jvmti-field-stress'
475      elif jvmti == 'trace-stress':
476        options_test += ' --jvmti-trace-stress'
477      elif jvmti == 'redefine-stress':
478        options_test += ' --jvmti-redefine-stress'
479      elif jvmti == 'step-stress':
480        options_test += ' --jvmti-step-stress'
481
482      if address_size == '64':
483        options_test += ' --64'
484
485        if env.DEX2OAT_HOST_INSTRUCTION_SET_FEATURES:
486          options_test += ' --instruction-set-features' + env.DEX2OAT_HOST_INSTRUCTION_SET_FEATURES
487
488      elif address_size == '32':
489        if env.HOST_2ND_ARCH_PREFIX_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES:
490          options_test += ' --instruction-set-features ' + \
491                          env.HOST_2ND_ARCH_PREFIX_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES
492
493      # TODO(http://36039166): This is a temporary solution to
494      # fix build breakages.
495      options_test = (' --output-path %s') % (
496          tempfile.mkdtemp(dir=env.ART_HOST_TEST_DIR)) + options_test
497
498      run_test_sh = env.ANDROID_BUILD_TOP + '/art/test/run-test'
499      command = ' '.join((run_test_sh, options_test, ' '.join(extra_arguments[target]), test))
500      return executor.submit(run_test, command, test, variant_set, test_name)
501
502  #  Use a context-manager to handle cleaning up the extracted zipapex if needed.
503  with handle_zipapex(zipapex_loc) as zipapex_opt:
504    options_all += zipapex_opt
505    global n_thread
506    with concurrent.futures.ThreadPoolExecutor(max_workers=n_thread) as executor:
507      test_futures = []
508      for config_tuple in config:
509        target = config_tuple[1]
510        for address_size in _user_input_variants['address_sizes_target'][target]:
511          test_futures.append(start_combination(executor, config_tuple, options_all, address_size))
512
513        for config_tuple in uncombinated_config:
514          test_futures.append(start_combination(executor, config_tuple, options_all, ""))  # no address size
515
516      tests_done = 0
517      for test_future in concurrent.futures.as_completed(test_futures):
518        (test, status, failure_info, test_time) = test_future.result()
519        tests_done += 1
520        print_test_info(tests_done, test, status, failure_info, test_time)
521        if failure_info and not env.ART_TEST_KEEP_GOING:
522          for f in test_futures:
523            f.cancel()
524          break
525      executor.shutdown(True)
526
527@contextlib.contextmanager
528def handle_zipapex(ziploc):
529  """Extracts the zipapex (if present) and handles cleanup.
530
531  If we are running out of a zipapex we want to unzip it once and have all the tests use the same
532  extracted contents. This extracts the files and handles cleanup if needed. It returns the
533  required extra arguments to pass to the run-test.
534  """
535  if ziploc is not None:
536    with tempfile.TemporaryDirectory() as tmpdir:
537      subprocess.check_call(["unzip", "-qq", ziploc, "apex_payload.zip", "-d", tmpdir])
538      subprocess.check_call(
539        ["unzip", "-qq", os.path.join(tmpdir, "apex_payload.zip"), "-d", tmpdir])
540      yield " --runtime-extracted-zipapex " + tmpdir
541  else:
542    yield ""
543
544def run_test(command, test, test_variant, test_name):
545  """Runs the test.
546
547  It invokes art/test/run-test script to run the test. The output of the script
548  is checked, and if it ends with "Succeeded!", it assumes that the tests
549  passed, otherwise, put it in the list of failed test. Before actually running
550  the test, it also checks if the test is placed in the list of disabled tests,
551  and if yes, it skips running it, and adds the test in the list of skipped
552  tests.
553
554  Args:
555    command: The command to be used to invoke the script
556    test: The name of the test without the variant information.
557    test_variant: The set of variant for the test.
558    test_name: The name of the test along with the variants.
559
560  Returns: a tuple of testname, status, optional failure info, and test time.
561  """
562  try:
563    if is_test_disabled(test, test_variant):
564      test_skipped = True
565      test_time = datetime.timedelta()
566    else:
567      test_skipped = False
568      test_start_time = time.monotonic()
569      if verbose:
570        print_text("Starting %s at %s\n" % (test_name, test_start_time))
571      if gdb:
572        proc = subprocess.Popen(command.split(), stderr=subprocess.STDOUT,
573                                universal_newlines=True, start_new_session=True)
574      else:
575        proc = subprocess.Popen(command.split(), stderr=subprocess.STDOUT, stdout = subprocess.PIPE,
576                                universal_newlines=True, start_new_session=True)
577      script_output = proc.communicate(timeout=timeout)[0]
578      test_passed = not proc.wait()
579      test_time_seconds = time.monotonic() - test_start_time
580      test_time = datetime.timedelta(seconds=test_time_seconds)
581
582    if not test_skipped:
583      if test_passed:
584        return (test_name, 'PASS', None, test_time)
585      else:
586        failed_tests.append((test_name, str(command) + "\n" + script_output))
587        return (test_name, 'FAIL', ('%s\n%s') % (command, script_output), test_time)
588    elif not dry_run:
589      skipped_tests.append(test_name)
590      return (test_name, 'SKIP', None, test_time)
591    else:
592      return (test_name, 'PASS', None, test_time)
593  except subprocess.TimeoutExpired as e:
594    if verbose:
595      print_text("Timeout of %s at %s\n" % (test_name, time.monotonic()))
596    test_time_seconds = time.monotonic() - test_start_time
597    test_time = datetime.timedelta(seconds=test_time_seconds)
598    failed_tests.append((test_name, 'Timed out in %d seconds' % timeout))
599
600    # HACK(b/142039427): Print extra backtraces on timeout.
601    if "-target-" in test_name:
602      for i in range(8):
603        proc_name = "dalvikvm" + test_name[-2:]
604        pidof = subprocess.run(["adb", "shell", "pidof", proc_name], stdout=subprocess.PIPE)
605        for pid in pidof.stdout.decode("ascii").split():
606          if i >= 4:
607            print_text("Backtrace of %s at %s\n" % (pid, time.monotonic()))
608            subprocess.run(["adb", "shell", "debuggerd", pid])
609            time.sleep(10)
610          task_dir = "/proc/%s/task" % pid
611          tids = subprocess.run(["adb", "shell", "ls", task_dir], stdout=subprocess.PIPE)
612          for tid in tids.stdout.decode("ascii").split():
613            for status in ["stat", "status"]:
614              filename = "%s/%s/%s" % (task_dir, tid, status)
615              print_text("Content of %s\n" % (filename))
616              subprocess.run(["adb", "shell", "cat", filename])
617        time.sleep(60)
618
619    # The python documentation states that it is necessary to actually kill the process.
620    os.killpg(proc.pid, signal.SIGKILL)
621    script_output = proc.communicate()
622
623    return (test_name, 'TIMEOUT', 'Timed out in %d seconds\n%s' % (timeout, command), test_time)
624  except Exception as e:
625    failed_tests.append((test_name, str(e)))
626    return (test_name, 'FAIL', ('%s\n%s\n\n') % (command, str(e)), datetime.timedelta())
627
628def print_test_info(test_count, test_name, result, failed_test_info="",
629                    test_time=datetime.timedelta()):
630  """Print the continous test information
631
632  If verbose is set to True, it continuously prints test status information
633  on a new line.
634  If verbose is set to False, it keeps on erasing test
635  information by overriding it with the latest test information. Also,
636  in this case it stictly makes sure that the information length doesn't
637  exceed the console width. It does so by shortening the test_name.
638
639  When a test fails, it prints the output of the run-test script and
640  command used to invoke the script. It doesn't override the failing
641  test information in either of the cases.
642  """
643
644  info = ''
645  if not verbose:
646    # Without --verbose, the testrunner erases passing test info. It
647    # does that by overriding the printed text with white spaces all across
648    # the console width.
649    console_width = int(os.popen('stty size', 'r').read().split()[1])
650    info = '\r' + ' ' * console_width + '\r'
651  try:
652    percent = (test_count * 100) / total_test_count
653    progress_info = ('[ %d%% %d/%d ]') % (
654      percent,
655      test_count,
656      total_test_count)
657    if test_time.total_seconds() != 0 and verbose:
658      info += '(%s)' % str(test_time)
659
660
661    if result == 'FAIL' or result == 'TIMEOUT':
662      if not verbose:
663        info += ('%s %s %s\n') % (
664          progress_info,
665          test_name,
666          COLOR_ERROR + result + COLOR_NORMAL)
667      else:
668        info += ('%s %s %s\n%s\n') % (
669          progress_info,
670          test_name,
671          COLOR_ERROR + result + COLOR_NORMAL,
672          failed_test_info)
673    else:
674      result_text = ''
675      if result == 'PASS':
676        result_text += COLOR_PASS + 'PASS' + COLOR_NORMAL
677      elif result == 'SKIP':
678        result_text += COLOR_SKIP + 'SKIP' + COLOR_NORMAL
679
680      if verbose:
681        info += ('%s %s %s\n') % (
682          progress_info,
683          test_name,
684          result_text)
685      else:
686        total_output_length = 2 # Two spaces
687        total_output_length += len(progress_info)
688        total_output_length += len(result)
689        allowed_test_length = console_width - total_output_length
690        test_name_len = len(test_name)
691        if allowed_test_length < test_name_len:
692          test_name = ('...%s') % (
693            test_name[-(allowed_test_length - 3):])
694        info += ('%s %s %s') % (
695          progress_info,
696          test_name,
697          result_text)
698    print_text(info)
699  except Exception as e:
700    print_text(('%s\n%s\n') % (test_name, str(e)))
701    failed_tests.append(test_name)
702
703def verify_knownfailure_entry(entry):
704  supported_field = {
705      'tests' : (list, str),
706      'test_patterns' : (list,),
707      'description' : (list, str),
708      'bug' : (str,),
709      'variant' : (str,),
710      'devices': (list, str),
711      'env_vars' : (dict,),
712      'zipapex' : (bool,),
713  }
714  for field in entry:
715    field_type = type(entry[field])
716    if field_type not in supported_field[field]:
717      raise ValueError('%s is not supported type for %s\n%s' % (
718          str(field_type),
719          field,
720          str(entry)))
721
722def get_disabled_test_info(device_name):
723  """Generate set of known failures.
724
725  It parses the art/test/knownfailures.json file to generate the list of
726  disabled tests.
727
728  Returns:
729    The method returns a dict of tests mapped to the variants list
730    for which the test should not be run.
731  """
732  known_failures_file = env.ANDROID_BUILD_TOP + '/art/test/knownfailures.json'
733  with open(known_failures_file) as known_failures_json:
734    known_failures_info = json.loads(known_failures_json.read())
735
736  disabled_test_info = {}
737  for failure in known_failures_info:
738    verify_knownfailure_entry(failure)
739    tests = failure.get('tests', [])
740    if isinstance(tests, str):
741      tests = [tests]
742    patterns = failure.get("test_patterns", [])
743    if (not isinstance(patterns, list)):
744      raise ValueError("test_patterns is not a list in %s" % failure)
745
746    tests += [f for f in RUN_TEST_SET if any(re.match(pat, f) is not None for pat in patterns)]
747    variants = parse_variants(failure.get('variant'))
748
749    # Treat a '"devices": "<foo>"' equivalent to 'target' variant if
750    # "foo" is present in "devices".
751    device_names = failure.get('devices', [])
752    if isinstance(device_names, str):
753      device_names = [device_names]
754    if len(device_names) != 0:
755      if device_name in device_names:
756        variants.add('target')
757      else:
758        # Skip adding test info as device_name is not present in "devices" entry.
759        continue
760
761    env_vars = failure.get('env_vars')
762
763    if check_env_vars(env_vars):
764      for test in tests:
765        if test not in RUN_TEST_SET:
766          raise ValueError('%s is not a valid run-test' % (
767              test))
768        if test in disabled_test_info:
769          disabled_test_info[test] = disabled_test_info[test].union(variants)
770        else:
771          disabled_test_info[test] = variants
772
773    zipapex_disable = failure.get("zipapex", False)
774    if zipapex_disable and zipapex_loc is not None:
775      for test in tests:
776        if test not in RUN_TEST_SET:
777          raise ValueError('%s is not a valid run-test' % (test))
778        if test in disabled_test_info:
779          disabled_test_info[test] = disabled_test_info[test].union(variants)
780        else:
781          disabled_test_info[test] = variants
782
783  return disabled_test_info
784
785def gather_disabled_test_info():
786  global DISABLED_TEST_CONTAINER
787  device_name = get_device_name() if 'target' in _user_input_variants['target'] else None
788  DISABLED_TEST_CONTAINER = get_disabled_test_info(device_name)
789
790def check_env_vars(env_vars):
791  """Checks if the env variables are set as required to run the test.
792
793  Returns:
794    True if all the env variables are set as required, otherwise False.
795  """
796
797  if not env_vars:
798    return True
799  for key in env_vars:
800    if env.get_env(key) != env_vars.get(key):
801      return False
802  return True
803
804
805def is_test_disabled(test, variant_set):
806  """Checks if the test along with the variant_set is disabled.
807
808  Args:
809    test: The name of the test as in art/test directory.
810    variant_set: Variants to be used for the test.
811  Returns:
812    True, if the test is disabled.
813  """
814  if dry_run:
815    return True
816  if test in env.EXTRA_DISABLED_TESTS:
817    return True
818  if ignore_skips:
819    return False
820  variants_list = DISABLED_TEST_CONTAINER.get(test, {})
821  for variants in variants_list:
822    variants_present = True
823    for variant in variants:
824      if variant not in variant_set:
825        variants_present = False
826        break
827    if variants_present:
828      return True
829  for bad_combo in NONFUNCTIONAL_VARIANT_SETS:
830    if bad_combo.issubset(variant_set):
831      return True
832  return False
833
834
835def parse_variants(variants):
836  """Parse variants fetched from art/test/knownfailures.json.
837  """
838  if not variants:
839    variants = ''
840    for variant in TOTAL_VARIANTS_SET:
841      variants += variant
842      variants += '|'
843    variants = variants[:-1]
844  variant_list = set()
845  or_variants = variants.split('|')
846  for or_variant in or_variants:
847    and_variants = or_variant.split('&')
848    variant = set()
849    for and_variant in and_variants:
850      and_variant = and_variant.strip()
851      if and_variant not in TOTAL_VARIANTS_SET:
852        raise ValueError('%s is not a valid variant' % (
853            and_variant))
854      variant.add(and_variant)
855    variant_list.add(frozenset(variant))
856  return variant_list
857
858def print_text(output):
859  sys.stdout.write(output)
860  sys.stdout.flush()
861
862def print_analysis():
863  if not verbose:
864    # Without --verbose, the testrunner erases passing test info. It
865    # does that by overriding the printed text with white spaces all across
866    # the console width.
867    console_width = int(os.popen('stty size', 'r').read().split()[1])
868    eraser_text = '\r' + ' ' * console_width + '\r'
869    print_text(eraser_text)
870
871  # Prints information about the total tests run.
872  # E.g., "2/38 (5%) tests passed".
873  passed_test_count = total_test_count - len(skipped_tests) - len(failed_tests)
874  passed_test_information = ('%d/%d (%d%%) %s passed.\n') % (
875      passed_test_count,
876      total_test_count,
877      (passed_test_count*100)/total_test_count,
878      'tests' if passed_test_count > 1 else 'test')
879  print_text(passed_test_information)
880
881  # Prints the list of skipped tests, if any.
882  if skipped_tests:
883    print_text(COLOR_SKIP + 'SKIPPED TESTS: ' + COLOR_NORMAL + '\n')
884    for test in skipped_tests:
885      print_text(test + '\n')
886    print_text('\n')
887
888  # Prints the list of failed tests, if any.
889  if failed_tests:
890    print_text(COLOR_ERROR + 'FAILED: ' + COLOR_NORMAL + '\n')
891    for test_info in failed_tests:
892      print_text(('%s\n%s\n' % (test_info[0], test_info[1])))
893    print_text(COLOR_ERROR + '----------' + COLOR_NORMAL + '\n')
894    for failed_test in sorted([test_info[0] for test_info in failed_tests]):
895      print_text(('%s\n' % (failed_test)))
896
897
898def parse_test_name(test_name):
899  """Parses the testname provided by the user.
900  It supports two types of test_name:
901  1) Like 001-HelloWorld. In this case, it will just verify if the test actually
902  exists and if it does, it returns the testname.
903  2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-pointer-ids-picimage-ndebuggable-001-HelloWorld32
904  In this case, it will parse all the variants and check if they are placed
905  correctly. If yes, it will set the various VARIANT_TYPES to use the
906  variants required to run the test. Again, it returns the test_name
907  without the variant information like 001-HelloWorld.
908  """
909  test_set = set()
910  for test in RUN_TEST_SET:
911    if test.startswith(test_name):
912      test_set.add(test)
913  if test_set:
914    return test_set
915
916  regex = '^test-art-'
917  regex += '(' + '|'.join(VARIANT_TYPE_DICT['target']) + ')-'
918  regex += 'run-test-'
919  regex += '(' + '|'.join(VARIANT_TYPE_DICT['run']) + ')-'
920  regex += '(' + '|'.join(VARIANT_TYPE_DICT['prebuild']) + ')-'
921  regex += '(' + '|'.join(VARIANT_TYPE_DICT['compiler']) + ')-'
922  regex += '(' + '|'.join(VARIANT_TYPE_DICT['relocate']) + ')-'
923  regex += '(' + '|'.join(VARIANT_TYPE_DICT['trace']) + ')-'
924  regex += '(' + '|'.join(VARIANT_TYPE_DICT['gc']) + ')-'
925  regex += '(' + '|'.join(VARIANT_TYPE_DICT['jni']) + ')-'
926  regex += '(' + '|'.join(VARIANT_TYPE_DICT['image']) + ')-'
927  regex += '(' + '|'.join(VARIANT_TYPE_DICT['debuggable']) + ')-'
928  regex += '(' + '|'.join(VARIANT_TYPE_DICT['jvmti']) + ')-'
929  regex += '(' + '|'.join(VARIANT_TYPE_DICT['cdex_level']) + ')-'
930  regex += '(' + '|'.join(RUN_TEST_SET) + ')'
931  regex += '(' + '|'.join(VARIANT_TYPE_DICT['address_sizes']) + ')$'
932  match = re.match(regex, test_name)
933  if match:
934    _user_input_variants['target'].add(match.group(1))
935    _user_input_variants['run'].add(match.group(2))
936    _user_input_variants['prebuild'].add(match.group(3))
937    _user_input_variants['compiler'].add(match.group(4))
938    _user_input_variants['relocate'].add(match.group(5))
939    _user_input_variants['trace'].add(match.group(6))
940    _user_input_variants['gc'].add(match.group(7))
941    _user_input_variants['jni'].add(match.group(8))
942    _user_input_variants['image'].add(match.group(9))
943    _user_input_variants['debuggable'].add(match.group(10))
944    _user_input_variants['jvmti'].add(match.group(11))
945    _user_input_variants['cdex_level'].add(match.group(12))
946    _user_input_variants['address_sizes'].add(match.group(14))
947    return {match.group(13)}
948  raise ValueError(test_name + " is not a valid test")
949
950
951def setup_env_for_build_target(build_target, parser, options):
952  """Setup environment for the build target
953
954  The method setup environment for the master-art-host targets.
955  """
956  os.environ.update(build_target['env'])
957  os.environ['SOONG_ALLOW_MISSING_DEPENDENCIES'] = 'true'
958  print_text('%s\n' % (str(os.environ)))
959
960  target_options = vars(parser.parse_args(build_target['flags']))
961  target_options['host'] = True
962  target_options['verbose'] = True
963  target_options['build'] = True
964  target_options['n_thread'] = options['n_thread']
965  target_options['dry_run'] = options['dry_run']
966
967  return target_options
968
969def get_default_threads(target):
970  if target == 'target':
971    adb_command = 'adb shell cat /sys/devices/system/cpu/present'
972    cpu_info_proc = subprocess.Popen(adb_command.split(), stdout=subprocess.PIPE)
973    cpu_info = cpu_info_proc.stdout.read()
974    if type(cpu_info) is bytes:
975      cpu_info = cpu_info.decode('utf-8')
976    cpu_info_regex = r'\d*-(\d*)'
977    match = re.match(cpu_info_regex, cpu_info)
978    if match:
979      return int(match.group(1))
980    else:
981      raise ValueError('Unable to predict the concurrency for the target. '
982                       'Is device connected?')
983  else:
984    return multiprocessing.cpu_count()
985
986def parse_option():
987  global verbose
988  global dry_run
989  global ignore_skips
990  global n_thread
991  global build
992  global gdb
993  global gdb_arg
994  global runtime_option
995  global run_test_option
996  global timeout
997  global dex2oat_jobs
998  global run_all_configs
999  global with_agent
1000  global zipapex_loc
1001
1002  parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
1003  parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)')
1004  global_group = parser.add_argument_group('Global options',
1005                                           'Options that affect all tests being run')
1006  global_group.add_argument('-j', type=int, dest='n_thread')
1007  global_group.add_argument('--timeout', default=timeout, type=int, dest='timeout')
1008  global_group.add_argument('--verbose', '-v', action='store_true', dest='verbose')
1009  global_group.add_argument('--dry-run', action='store_true', dest='dry_run')
1010  global_group.add_argument("--skip", action='append', dest="skips", default=[],
1011                            help="Skip the given test in all circumstances.")
1012  global_group.add_argument("--no-skips", dest="ignore_skips", action='store_true', default=False,
1013                            help="""Don't skip any run-test configurations listed in
1014                            knownfailures.json.""")
1015  global_group.add_argument('--no-build-dependencies',
1016                            action='store_false', dest='build',
1017                            help="""Don't build dependencies under any circumstances. This is the
1018                            behavior if ART_TEST_RUN_TEST_ALWAYS_BUILD is not set to 'true'.""")
1019  global_group.add_argument('-b', '--build-dependencies',
1020                            action='store_true', dest='build',
1021                            help="""Build dependencies under all circumstances. By default we will
1022                            not build dependencies unless ART_TEST_RUN_TEST_BUILD=true.""")
1023  global_group.add_argument('--build-target', dest='build_target', help='master-art-host targets')
1024  global_group.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD)
1025  global_group.add_argument('--gdb', action='store_true', dest='gdb')
1026  global_group.add_argument('--gdb-arg', dest='gdb_arg')
1027  global_group.add_argument('--run-test-option', action='append', dest='run_test_option',
1028                            default=[],
1029                            help="""Pass an option, unaltered, to the run-test script.
1030                            This should be enclosed in single-quotes to allow for spaces. The option
1031                            will be split using shlex.split() prior to invoking run-test.
1032                            Example \"--run-test-option='--with-agent libtifast.so=MethodExit'\"""")
1033  global_group.add_argument('--with-agent', action='append', dest='with_agent',
1034                            help="""Pass an agent to be attached to the runtime""")
1035  global_group.add_argument('--runtime-option', action='append', dest='runtime_option',
1036                            help="""Pass an option to the runtime. Runtime options
1037                            starting with a '-' must be separated by a '=', for
1038                            example '--runtime-option=-Xjitthreshold:0'.""")
1039  global_group.add_argument('--dex2oat-jobs', type=int, dest='dex2oat_jobs',
1040                            help='Number of dex2oat jobs')
1041  global_group.add_argument('--runtime-zipapex', dest='runtime_zipapex', default=None,
1042                            help='Location for runtime zipapex.')
1043  global_group.add_argument('-a', '--all', action='store_true', dest='run_all',
1044                            help="Run all the possible configurations for the input test set")
1045  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1046    var_group = parser.add_argument_group(
1047        '{}-type Options'.format(variant_type),
1048        "Options that control the '{}' variants.".format(variant_type))
1049    var_group.add_argument('--all-' + variant_type,
1050                           action='store_true',
1051                           dest='all_' + variant_type,
1052                           help='Enable all variants of ' + variant_type)
1053    for variant in variant_set:
1054      flag = '--' + variant
1055      var_group.add_argument(flag, action='store_true', dest=variant)
1056
1057  options = vars(parser.parse_args())
1058  # Handle the --all-<type> meta-options
1059  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1060    if options['all_' + variant_type]:
1061      for variant in variant_set:
1062        options[variant] = True
1063
1064  if options['build_target']:
1065    options = setup_env_for_build_target(target_config[options['build_target']],
1066                                         parser, options)
1067
1068  tests = None
1069  env.EXTRA_DISABLED_TESTS.update(set(options['skips']))
1070  if options['tests']:
1071    tests = set()
1072    for test_name in options['tests']:
1073      tests |= parse_test_name(test_name)
1074
1075  for variant_type in VARIANT_TYPE_DICT:
1076    for variant in VARIANT_TYPE_DICT[variant_type]:
1077      if options.get(variant):
1078        _user_input_variants[variant_type].add(variant)
1079
1080  if options['verbose']:
1081    verbose = True
1082  if options['n_thread']:
1083    n_thread = max(1, options['n_thread'])
1084  ignore_skips = options['ignore_skips']
1085  if options['dry_run']:
1086    dry_run = True
1087    verbose = True
1088  build = options['build']
1089  if options['gdb']:
1090    n_thread = 1
1091    gdb = True
1092    if options['gdb_arg']:
1093      gdb_arg = options['gdb_arg']
1094  runtime_option = options['runtime_option'];
1095  with_agent = options['with_agent'];
1096  run_test_option = sum(map(shlex.split, options['run_test_option']), [])
1097  zipapex_loc = options['runtime_zipapex']
1098
1099  timeout = options['timeout']
1100  if options['dex2oat_jobs']:
1101    dex2oat_jobs = options['dex2oat_jobs']
1102  if options['run_all']:
1103    run_all_configs = True
1104
1105  return tests
1106
1107def main():
1108  gather_test_info()
1109  user_requested_tests = parse_option()
1110  setup_test_env()
1111  gather_disabled_test_info()
1112  if build:
1113    build_targets = ''
1114    if 'host' in _user_input_variants['target']:
1115      build_targets += 'test-art-host-run-test-dependencies '
1116    if 'target' in _user_input_variants['target']:
1117      build_targets += 'test-art-target-run-test-dependencies '
1118    if 'jvm' in _user_input_variants['target']:
1119      build_targets += 'test-art-host-run-test-dependencies '
1120    build_command = env.ANDROID_BUILD_TOP + '/build/soong/soong_ui.bash --make-mode'
1121    build_command += ' DX='
1122    build_command += ' ' + build_targets
1123    if subprocess.call(build_command.split()):
1124      # Debugging for b/62653020
1125      if env.DIST_DIR:
1126        shutil.copyfile(env.SOONG_OUT_DIR + '/build.ninja', env.DIST_DIR + '/soong.ninja')
1127      sys.exit(1)
1128
1129  if user_requested_tests:
1130    run_tests(user_requested_tests)
1131  else:
1132    run_tests(RUN_TEST_SET)
1133
1134  print_analysis()
1135
1136  exit_code = 0 if len(failed_tests) == 0 else 1
1137  sys.exit(exit_code)
1138
1139if __name__ == '__main__':
1140  main()
1141