1# Copyright 2016 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5# repohooks/pre-upload.py currently does not run pylint. But for developers who
6# want to check their code manually we disable several harmless pylint warnings
7# which just distract from more serious remaining issues.
8#
9# The instance variables _host and _install_paths are not defined in __init__().
10# pylint: disable=attribute-defined-outside-init
11#
12# Many short variable names don't follow the naming convention.
13# pylint: disable=invalid-name
14#
15# _parse_result() and _dir_size() don't access self and could be functions.
16# pylint: disable=no-self-use
17#
18# _ChromeLogin and _TradefedLogCollector have no public methods.
19# pylint: disable=too-few-public-methods
20
21import contextlib
22import errno
23import glob
24import hashlib
25import lockfile
26import logging
27import os
28import pipes
29import random
30import re
31import shutil
32import stat
33import tempfile
34import urlparse
35
36from autotest_lib.client.bin import utils as client_utils
37from autotest_lib.client.common_lib import base_utils
38from autotest_lib.client.common_lib import error
39from autotest_lib.client.common_lib.cros import dev_server
40from autotest_lib.server import autotest
41from autotest_lib.server import test
42from autotest_lib.server import utils
43
44# TODO(ihf): If akeshet doesn't fix crbug.com/691046 delete metrics again.
45try:
46    from chromite.lib import metrics
47except ImportError:
48    metrics = utils.metrics_mock
49
50# TODO(ihf): Find a home for all these paths. This is getting out of hand.
51_SDK_TOOLS_DIR_M = 'gs://chromeos-arc-images/builds/git_mnc-dr-arc-dev-linux-static_sdk_tools/3554341'
52_SDK_TOOLS_FILES = ['aapt']
53# To stabilize adb behavior, we use dynamically linked adb.
54_ADB_DIR_M = 'gs://chromeos-arc-images/builds/git_mnc-dr-arc-dev-linux-cheets_arm-user/3554341'
55_ADB_FILES = ['adb']
56
57_ADB_POLLING_INTERVAL_SECONDS = 1
58_ADB_READY_TIMEOUT_SECONDS = 60
59_ANDROID_ADB_KEYS_PATH = '/data/misc/adb/adb_keys'
60
61_ARC_POLLING_INTERVAL_SECONDS = 1
62_ARC_READY_TIMEOUT_SECONDS = 60
63
64_TRADEFED_PREFIX = 'autotest-tradefed-install_'
65_TRADEFED_CACHE_LOCAL = '/tmp/autotest-tradefed-cache'
66_TRADEFED_CACHE_CONTAINER = '/usr/local/autotest/results/shared/cache'
67_TRADEFED_CACHE_CONTAINER_LOCK = '/usr/local/autotest/results/shared/lock'
68
69# According to dshi a drone has 500GB of disk space. It is ok for now to use
70# 10GB of disk space, as no more than 10 tests should run in parallel.
71# TODO(ihf): Investigate tighter cache size.
72_TRADEFED_CACHE_MAX_SIZE = (10 * 1024 * 1024 * 1024)
73
74
75class _ChromeLogin(object):
76    """Context manager to handle Chrome login state."""
77
78    def __init__(self, host):
79        self._host = host
80
81    def __enter__(self):
82        """Logs in to the Chrome."""
83        logging.info('Ensure Android is running...')
84        # If we can't login to Chrome and launch Android we want this job to
85        # die roughly after 5 minutes instead of hanging for the duration.
86        autotest.Autotest(self._host).run_timed_test('cheets_CTSHelper',
87                                                     timeout=300,
88                                                     check_client_result=True)
89
90    def __exit__(self, exc_type, exc_value, traceback):
91        """On exit, to wipe out all the login state, reboot the machine.
92
93        @param exc_type: Exception type if an exception is raised from the
94                         with-block.
95        @param exc_value: Exception instance if an exception is raised from
96                          the with-block.
97        @param traceback: Stack trace info if an exception is raised from
98                          the with-block.
99        @return None, indicating not to ignore an exception from the with-block
100                if raised.
101        """
102        logging.info('Rebooting...')
103        try:
104            self._host.reboot()
105        except Exception:
106            if exc_type is None:
107                raise
108            # If an exception is raise from the with-block, just record the
109            # exception for the rebooting to avoid ignoring the original
110            # exception.
111            logging.exception('Rebooting failed.')
112
113
114@contextlib.contextmanager
115def lock(filename):
116    """Prevents other autotest/tradefed instances from accessing cache.
117
118    @param filename: The file to be locked.
119    """
120    filelock = lockfile.FileLock(filename)
121    # It is tempting just to call filelock.acquire(3600). But the implementation
122    # has very poor temporal granularity (timeout/10), which is unsuitable for
123    # our needs. See /usr/lib64/python2.7/site-packages/lockfile/
124    attempts = 0
125    while not filelock.i_am_locking():
126        try:
127            attempts += 1
128            logging.info('Waiting for cache lock...')
129            filelock.acquire(random.randint(1, 5))
130        except (lockfile.AlreadyLocked, lockfile.LockTimeout):
131            if attempts > 1000:
132                # Normally we should aqcuire the lock in a few seconds. Once we
133                # wait on the order of hours either the dev server IO is
134                # overloaded or a lock didn't get cleaned up. Take one for the
135                # team, break the lock and report a failure. This should fix
136                # the lock for following tests. If the failure affects more than
137                # one job look for a deadlock or dev server overload.
138                logging.error('Permanent lock failure. Trying to break lock.')
139                filelock.break_lock()
140                raise error.TestFail('Error: permanent cache lock failure.')
141        else:
142            logging.info('Acquired cache lock after %d attempts.', attempts)
143    try:
144        yield
145    finally:
146        filelock.release()
147        logging.info('Released cache lock.')
148
149
150@contextlib.contextmanager
151def adb_keepalive(target, extra_paths):
152    """A context manager that keeps the adb connection alive.
153
154    AdbKeepalive will spin off a new process that will continuously poll for
155    adb's connected state, and will attempt to reconnect if it ever goes down.
156    This is the only way we can currently recover safely from (intentional)
157    reboots.
158
159    @param target: the hostname and port of the DUT.
160    @param extra_paths: any additional components to the PATH environment
161                        variable.
162    """
163    from autotest_lib.client.common_lib.cros import adb_keepalive as module
164    # |__file__| returns the absolute path of the compiled bytecode of the
165    # module. We want to run the original .py file, so we need to change the
166    # extension back.
167    script_filename = module.__file__.replace('.pyc', '.py')
168    job = base_utils.BgJob([script_filename, target],
169                           nickname='adb_keepalive', stderr_level=logging.DEBUG,
170                           stdout_tee=base_utils.TEE_TO_LOGS,
171                           stderr_tee=base_utils.TEE_TO_LOGS,
172                           extra_paths=extra_paths)
173
174    try:
175        yield
176    finally:
177        # The adb_keepalive.py script runs forever until SIGTERM is sent.
178        base_utils.nuke_subprocess(job.sp)
179        base_utils.join_bg_jobs([job])
180
181
182@contextlib.contextmanager
183def pushd(d):
184    """Defines pushd.
185    @param d: the directory to change to.
186    """
187    current = os.getcwd()
188    os.chdir(d)
189    try:
190        yield
191    finally:
192        os.chdir(current)
193
194
195class TradefedTest(test.test):
196    """Base class to prepare DUT to run tests via tradefed."""
197    version = 1
198
199    # TODO(ihf): Remove _ABD_DIR_M/_SDK_TOOLS_DIR_M defaults once M is dead.
200    def initialize(self, host=None, adb_dir=_ADB_DIR_M,
201                   sdk_tools_dir=_SDK_TOOLS_DIR_M):
202        """Sets up the tools and binary bundles for the test."""
203        logging.info('Hostname: %s', host.hostname)
204        self._host = host
205        self._install_paths = []
206        # Tests in the lab run within individual lxc container instances.
207        if utils.is_in_container():
208            cache_root = _TRADEFED_CACHE_CONTAINER
209        else:
210            cache_root = _TRADEFED_CACHE_LOCAL
211        # Quick sanity check and spew of java version installed on the server.
212        utils.run('java', args=('-version',), ignore_status=False, verbose=True,
213                  stdout_tee=utils.TEE_TO_LOGS, stderr_tee=utils.TEE_TO_LOGS)
214        # The content of the cache survives across jobs.
215        self._safe_makedirs(cache_root)
216        self._tradefed_cache = os.path.join(cache_root, 'cache')
217        self._tradefed_cache_lock = os.path.join(cache_root, 'lock')
218        # The content of the install location does not survive across jobs and
219        # is isolated (by using a unique path)_against other autotest instances.
220        # This is not needed for the lab, but if somebody wants to run multiple
221        # TradedefTest instance.
222        self._tradefed_install = tempfile.mkdtemp(prefix=_TRADEFED_PREFIX)
223        # Under lxc the cache is shared between multiple autotest/tradefed
224        # instances. We need to synchronize access to it. All binaries are
225        # installed through the (shared) cache into the local (unshared)
226        # lxc/autotest instance storage.
227        # If clearing the cache it must happen before all downloads.
228        self._clear_download_cache_if_needed()
229        # Set permissions (rwxr-xr-x) to the executable binaries.
230        permission = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH
231                | stat.S_IXOTH)
232        self._install_files(adb_dir, _ADB_FILES, permission)
233        self._install_files(sdk_tools_dir, _SDK_TOOLS_FILES, permission)
234
235    def cleanup(self):
236        """Cleans up any dirtied state."""
237        # Kill any lingering adb servers.
238        self._run('adb', verbose=True, args=('kill-server',))
239        logging.info('Cleaning up %s.', self._tradefed_install)
240        shutil.rmtree(self._tradefed_install)
241
242    def _login_chrome(self):
243        """Returns Chrome log-in context manager.
244
245        Please see also cheets_CTSHelper for details about how this works.
246        """
247        return _ChromeLogin(self._host)
248
249    def _get_adb_target(self):
250        return '{}:{}'.format(self._host.hostname, self._host.port)
251
252    def _try_adb_connect(self):
253        """Attempts to connect to adb on the DUT.
254
255        @return boolean indicating if adb connected successfully.
256        """
257        # This may fail return failure due to a race condition in adb connect
258        # (b/29370989). If adb is already connected, this command will
259        # immediately return success.
260        hostport = self._get_adb_target()
261        result = self._run(
262                'adb',
263                args=('connect', hostport),
264                verbose=True,
265                ignore_status=True)
266        logging.info('adb connect {}:\n{}'.format(hostport, result.stdout))
267        if result.exit_status != 0:
268            return False
269
270        result = self._run('adb', args=('devices',))
271        logging.info('adb devices:\n' + result.stdout)
272        if not re.search(
273                r'{}\s+(device|unauthorized)'.format(re.escape(hostport)),
274                result.stdout):
275            return False
276
277        # Actually test the connection with an adb command as there can be
278        # a race between detecting the connected device and actually being
279        # able to run a commmand with authenticated adb.
280        result = self._run('adb', args=('shell', 'exit'), ignore_status=True)
281        return result.exit_status == 0
282
283    def _android_shell(self, command):
284        """Run a command remotely on the device in an android shell
285
286        This function is strictly for internal use only, as commands do not run
287        in a fully consistent Android environment. Prefer adb shell instead.
288        """
289        self._host.run('android-sh -c ' + pipes.quote(command))
290
291    def _write_android_file(self, filename, data):
292        """Writes a file to a location relative to the android container.
293
294        This is an internal function used to bootstrap adb.
295        Tests should use adb push to write files.
296        """
297        android_cmd = 'echo %s > %s' % (pipes.quote(data),
298                                        pipes.quote(filename))
299        self._android_shell(android_cmd)
300
301    def _connect_adb(self):
302        """Sets up ADB connection to the ARC container."""
303        logging.info('Setting up adb connection.')
304        # Generate and push keys for adb.
305        # TODO(elijahtaylor): Extract this code to arc_common and de-duplicate
306        # code in arc.py on the client side tests.
307        key_path = os.path.join(self.tmpdir, 'test_key')
308        pubkey_path = key_path + '.pub'
309        self._run('adb', verbose=True, args=('keygen', pipes.quote(key_path)))
310        with open(pubkey_path, 'r') as f:
311            self._write_android_file(_ANDROID_ADB_KEYS_PATH, f.read())
312        self._android_shell('restorecon ' + pipes.quote(_ANDROID_ADB_KEYS_PATH))
313        os.environ['ADB_VENDOR_KEYS'] = key_path
314
315        # Kill existing adb server to ensure that the env var is picked up.
316        self._run('adb', verbose=True, args=('kill-server',))
317
318        # This starts adbd.
319        self._android_shell('setprop sys.usb.config mtp,adb')
320
321        # Also let it be automatically started upon reboot.
322        self._android_shell('setprop persist.sys.usb.config mtp,adb')
323
324        # adbd may take some time to come up. Repeatedly try to connect to adb.
325        utils.poll_for_condition(lambda: self._try_adb_connect(),
326                                 exception=error.TestFail(
327                                     'Error: Failed to set up adb connection'),
328                                 timeout=_ADB_READY_TIMEOUT_SECONDS,
329                                 sleep_interval=_ADB_POLLING_INTERVAL_SECONDS)
330
331        logging.info('Successfully setup adb connection.')
332
333    def _wait_for_arc_boot(self):
334        """Wait until ARC is fully booted.
335
336        Tests for the presence of the intent helper app to determine whether ARC
337        has finished booting.
338        """
339        def _intent_helper_running():
340            result = self._run('adb', args=('shell', 'pgrep', '-f',
341                                            'org.chromium.arc.intent_helper'))
342            return bool(result.stdout)
343        utils.poll_for_condition(
344            _intent_helper_running,
345            exception=error.TestFail(
346                'Error: Timed out waiting for intent helper.'),
347            timeout=_ARC_READY_TIMEOUT_SECONDS,
348            sleep_interval=_ARC_POLLING_INTERVAL_SECONDS)
349
350    def _disable_adb_install_dialog(self):
351        """Disables a dialog shown on adb install execution.
352
353        By default, on adb install execution, "Allow Google to regularly check
354        device activity ... " dialog is shown. It requires manual user action
355        so that tests are blocked at the point.
356        This method disables it.
357        """
358        logging.info('Disabling the adb install dialog.')
359        result = self._run(
360                'adb',
361                verbose=True,
362                args=(
363                        'shell',
364                        'settings',
365                        'put',
366                        'global',
367                        'verifier_verify_adb_installs',
368                        '0'))
369        logging.info('Disable adb dialog: %s', result.stdout)
370
371    def _ready_arc(self):
372        """Ready ARC and adb for running tests via tradefed."""
373        self._connect_adb()
374        self._disable_adb_install_dialog()
375        self._wait_for_arc_boot()
376
377    def _safe_makedirs(self, path):
378        """Creates a directory at |path| and its ancestors.
379
380        Unlike os.makedirs(), ignore errors even if directories exist.
381        """
382        try:
383            os.makedirs(path)
384        except OSError as e:
385            if not (e.errno == errno.EEXIST and os.path.isdir(path)):
386                raise
387
388    def _unzip(self, filename):
389        """Unzip the file.
390
391        The destination directory name will be the stem of filename.
392        E.g., _unzip('foo/bar/baz.zip') will create directory at
393        'foo/bar/baz', and then will inflate zip's content under the directory.
394        If here is already a directory at the stem, that directory will be used.
395
396        @param filename: Path to the zip archive.
397        @return Path to the inflated directory.
398        """
399        destination = os.path.splitext(filename)[0]
400        if os.path.isdir(destination):
401            return destination
402        self._safe_makedirs(destination)
403        utils.run('unzip', args=('-d', destination, filename))
404        return destination
405
406    def _dir_size(self, directory):
407        """Compute recursive size in bytes of directory."""
408        size = 0
409        for root, _, files in os.walk(directory):
410            size += sum(os.path.getsize(os.path.join(root, name))
411                    for name in files)
412        return size
413
414    def _clear_download_cache_if_needed(self):
415        """Invalidates cache to prevent it from growing too large."""
416        # If the cache is large enough to hold a working set, we can simply
417        # delete everything without thrashing.
418        # TODO(ihf): Investigate strategies like LRU.
419        with lock(self._tradefed_cache_lock):
420            size = self._dir_size(self._tradefed_cache)
421            if size > _TRADEFED_CACHE_MAX_SIZE:
422                logging.info('Current cache size=%d got too large. Clearing %s.'
423                        , size, self._tradefed_cache)
424                shutil.rmtree(self._tradefed_cache)
425                self._safe_makedirs(self._tradefed_cache)
426            else:
427                logging.info('Current cache size=%d of %s.', size,
428                        self._tradefed_cache)
429
430    def _download_to_cache(self, uri):
431        """Downloads the uri from the storage server.
432
433        It always checks the cache for available binaries first and skips
434        download if binaries are already in cache.
435
436        The caller of this function is responsible for holding the cache lock.
437
438        @param uri: The Google Storage or dl.google.com uri.
439        @return Path to the downloaded object, name.
440        """
441        # Split uri into 3 pieces for use by gsutil and also by wget.
442        parsed = urlparse.urlparse(uri)
443        filename = os.path.basename(parsed.path)
444        # We are hashing the uri instead of the binary. This is acceptable, as
445        # the uris are supposed to contain version information and an object is
446        # not supposed to be changed once created.
447        output_dir = os.path.join(self._tradefed_cache,
448                                  hashlib.md5(uri).hexdigest())
449        output = os.path.join(output_dir, filename)
450        # Check for existence of file.
451        if os.path.exists(output):
452            logging.info('Skipping download of %s, reusing %s.', uri, output)
453            return output
454        self._safe_makedirs(output_dir)
455
456        if parsed.scheme not in ['gs', 'http', 'https']:
457            raise error.TestFail('Error: Unknown download scheme %s' %
458                                 parsed.scheme)
459        if parsed.scheme in ['http', 'https']:
460            logging.info('Using wget to download %s to %s.', uri, output_dir)
461            # We are downloading 1 file at a time, hence using -O over -P.
462            utils.run(
463                'wget',
464                args=(
465                    '--report-speed=bits',
466                    '-O',
467                    output,
468                    uri),
469                verbose=True)
470            return output
471
472        if not client_utils.is_moblab():
473            # If the machine can access to the storage server directly,
474            # defer to "gsutil" for downloading.
475            logging.info('Host %s not in lab. Downloading %s directly to %s.',
476                    self._host.hostname, uri, output)
477            # b/17445576: gsutil rsync of individual files is not implemented.
478            utils.run('gsutil', args=('cp', uri, output), verbose=True)
479            return output
480
481        # We are in the moblab. Because the machine cannot access the storage
482        # server directly, use dev server to proxy.
483        logging.info('Host %s is in lab. Downloading %s by staging to %s.',
484                self._host.hostname, uri, output)
485
486        dirname = os.path.dirname(parsed.path)
487        archive_url = '%s://%s%s' % (parsed.scheme, parsed.netloc, dirname)
488
489        # First, request the devserver to download files into the lab network.
490        # TODO(ihf): Switch stage_artifacts to honor rsync. Then we don't have
491        # to shuffle files inside of tarballs.
492        info = self._host.host_info_store.get()
493        ds = dev_server.ImageServer.resolve(info.build)
494        ds.stage_artifacts(info.build, files=[filename],
495                           archive_url=archive_url)
496
497        # Then download files from the dev server.
498        # TODO(ihf): use rsync instead of wget. Are there 3 machines involved?
499        # Itself, dev_server plus DUT? Or is there just no rsync in moblab?
500        ds_src = '/'.join([ds.url(), 'static', dirname, filename])
501        logging.info('dev_server URL: %s', ds_src)
502        # Calls into DUT to pull uri from dev_server.
503        utils.run(
504                'wget',
505                args=(
506                        '--report-speed=bits',
507                        '-O',
508                        output,
509                        ds_src),
510                verbose=True)
511        return output
512
513    def _instance_copy(self, cache_path):
514        """Makes a copy of a file from the (shared) cache to a wholy owned
515        local instance. Also copies one level of cache directoy (MD5 named).
516        """
517        filename = os.path.basename(cache_path)
518        dirname = os.path.basename(os.path.dirname(cache_path))
519        instance_dir = os.path.join(self._tradefed_install, dirname)
520        # Make sure destination directory is named the same.
521        self._safe_makedirs(instance_dir)
522        instance_path = os.path.join(instance_dir, filename)
523        shutil.copyfile(cache_path, instance_path)
524        return instance_path
525
526    def _install_bundle(self, gs_uri):
527        """Downloads a zip file, installs it and returns the local path."""
528        if not gs_uri.endswith('.zip'):
529            raise error.TestFail('Error: Not a .zip file %s.', gs_uri)
530        # Atomic write through of file.
531        with lock(self._tradefed_cache_lock):
532            cache_path = self._download_to_cache(gs_uri)
533            local = self._instance_copy(cache_path)
534
535        unzipped = self._unzip(local)
536        self._abi = 'x86' if 'x86-x86' in unzipped else 'arm'
537        return unzipped
538
539    def _install_files(self, gs_dir, files, permission):
540        """Installs binary tools."""
541        for filename in files:
542            gs_uri = os.path.join(gs_dir, filename)
543            # Atomic write through of file.
544            with lock(self._tradefed_cache_lock):
545                cache_path = self._download_to_cache(gs_uri)
546                local = self._instance_copy(cache_path)
547            os.chmod(local, permission)
548            # Keep track of PATH.
549            self._install_paths.append(os.path.dirname(local))
550
551    def _copy_media(self, media):
552        """Calls copy_media to push media files to DUT via adb."""
553        logging.info('Copying media to device. This can take a few minutes.')
554        copy_media = os.path.join(media, 'copy_media.sh')
555        with pushd(media):
556            try:
557                self._run('file', args=('/bin/sh',), verbose=True,
558                          ignore_status=True, timeout=60,
559                          stdout_tee=utils.TEE_TO_LOGS,
560                          stderr_tee=utils.TEE_TO_LOGS)
561                self._run('sh', args=('--version',), verbose=True,
562                          ignore_status=True, timeout=60,
563                          stdout_tee=utils.TEE_TO_LOGS,
564                          stderr_tee=utils.TEE_TO_LOGS)
565            except:
566                logging.warning('Could not obtain sh version.')
567            self._run(
568                'sh',
569                args=('-e', copy_media, 'all'),
570                timeout=7200,  # Wait at most 2h for download of media files.
571                verbose=True,
572                ignore_status=False,
573                stdout_tee=utils.TEE_TO_LOGS,
574                stderr_tee=utils.TEE_TO_LOGS)
575
576    def _verify_media(self, media):
577        """Verify that the local media directory matches the DUT.
578        Used for debugging b/32978387 where we may see file corruption."""
579        # TODO(ihf): Remove function once b/32978387 is resolved.
580        # Find all files in the bbb_short and bbb_full directories, md5sum these
581        # files and sort by filename, both on the DUT and on the local tree.
582        logging.info('Computing md5 of remote media files.')
583        remote = self._run('adb', args=('shell',
584            'cd /sdcard/test; find ./bbb_short ./bbb_full -type f -print0 | '
585            'xargs -0 md5sum | grep -v "\.DS_Store" | sort -k 2'))
586        logging.info('Computing md5 of local media files.')
587        local = self._run('/bin/sh', args=('-c',
588            ('cd %s; find ./bbb_short ./bbb_full -type f -print0 | '
589            'xargs -0 md5sum | grep -v "\.DS_Store" | sort -k 2') % media))
590
591        # 'adb shell' terminates lines with CRLF. Normalize before comparing.
592        if remote.stdout.replace('\r\n','\n') != local.stdout:
593            logging.error('Some media files differ on DUT /sdcard/test vs. local.')
594            logging.info('media=%s', media)
595            logging.error('remote=%s', remote)
596            logging.error('local=%s', local)
597            # TODO(ihf): Return False.
598            return True
599        logging.info('Media files identical on DUT /sdcard/test vs. local.')
600        return True
601
602    def _push_media(self, CTS_URI):
603        """Downloads, caches and pushed media files to DUT."""
604        media = self._install_bundle(CTS_URI['media'])
605        base = os.path.splitext(os.path.basename(CTS_URI['media']))[0]
606        cts_media = os.path.join(media, base)
607        # TODO(ihf): this really should measure throughput in Bytes/s.
608        m = 'chromeos/autotest/infra_benchmark/cheets/push_media/duration'
609        fields = {'success': False,
610                  'dut_host_name': self._host.hostname}
611        with metrics.SecondsTimer(m, fields=fields) as c:
612            self._copy_media(cts_media)
613            c['success'] = True
614        if not self._verify_media(cts_media):
615            raise error.TestFail('Error: saw corruption pushing media files.')
616
617    def _run(self, *args, **kwargs):
618        """Executes the given command line.
619
620        To support SDK tools, such as adb or aapt, this adds _install_paths
621        to the extra_paths. Before invoking this, ensure _install_files() has
622        been called.
623        """
624        kwargs['extra_paths'] = (
625                kwargs.get('extra_paths', []) + self._install_paths)
626        return utils.run(*args, **kwargs)
627
628    def _collect_tradefed_global_log(self, result, destination):
629        """Collects the tradefed global log.
630
631        @param result: The result object from utils.run.
632        @param destination: Autotest result directory (destination of logs).
633        """
634        match = re.search(r'Saved log to /tmp/(tradefed_global_log_.*\.txt)',
635                          result.stdout)
636        if not match:
637            logging.error('no tradefed_global_log file is found')
638            return
639
640        name = match.group(1)
641        dest = os.path.join(destination, 'logs', 'tmp')
642        self._safe_makedirs(dest)
643        shutil.copy(os.path.join('/tmp', name), os.path.join(dest, name))
644
645    def _parse_tradefed_datetime(self, result, summary=None):
646        """Get the tradefed provided result ID consisting of a datetime stamp.
647
648        Unfortunately we are unable to tell tradefed where to store the results.
649        In the lab we have multiple instances of tradefed running in parallel
650        writing results and logs to the same base directory. This function
651        finds the identifier which tradefed used during the current run and
652        returns it for further processing of result files.
653
654        @param result: The result object from utils.run.
655        @param summary: Test result summary from runs so far.
656        @return datetime_id: The result ID chosen by tradefed.
657                             Example: '2016.07.14_00.34.50'.
658        """
659        # This string is show for both 'run' and 'continue' after all tests.
660        match = re.search(r': XML test result file generated at (\S+). Passed',
661                result.stdout)
662        if not (match and match.group(1)):
663            # TODO(ihf): Find out if we ever recover something interesting in
664            # this case. Otherwise delete it.
665            # Try harder to find the remains. This string shows before all
666            # tests but only with 'run', not 'continue'.
667            logging.warning('XML test result file incomplete?')
668            match = re.search(r': Created result dir (\S+)', result.stdout)
669            if not (match and match.group(1)):
670                error_msg = 'Test did not complete due to Chrome or ARC crash.'
671                if summary:
672                    error_msg += (' Test summary from previous runs: %s'
673                            % summary)
674                raise error.TestFail(error_msg)
675        datetime_id = match.group(1)
676        logging.info('Tradefed identified results and logs with %s.',
677                     datetime_id)
678        return datetime_id
679
680    def _parse_tradefed_datetime_N(self, result, summary=None):
681        """Get the tradefed provided result ID consisting of a datetime stamp.
682
683        Unfortunately we are unable to tell tradefed where to store the results.
684        In the lab we have multiple instances of tradefed running in parallel
685        writing results and logs to the same base directory. This function
686        finds the identifier which tradefed used during the current run and
687        returns it for further processing of result files.
688
689        @param result: The result object from utils.run.
690        @param summary: Test result summary from runs so far.
691        @return datetime_id: The result ID chosen by tradefed.
692                             Example: '2016.07.14_00.34.50'.
693        """
694        # This string is show for both 'run' and 'continue' after all tests.
695        match = re.search(r'(\d\d\d\d.\d\d.\d\d_\d\d.\d\d.\d\d)', result.stdout)
696        if not (match and match.group(1)):
697            error_msg = 'Error: Test did not complete. (Chrome or ARC crash?)'
698            if summary:
699                error_msg += (' Test summary from previous runs: %s'
700                        % summary)
701            raise error.TestFail(error_msg)
702        datetime_id = match.group(1)
703        logging.info('Tradefed identified results and logs with %s.',
704                     datetime_id)
705        return datetime_id
706
707    def _parse_result(self, result, waivers=None):
708        """Check the result from the tradefed output.
709
710        This extracts the test pass/fail/executed list from the output of
711        tradefed. It is up to the caller to handle inconsistencies.
712
713        @param result: The result object from utils.run.
714        @param waivers: a set() of tests which are permitted to fail.
715        """
716        # Parse the stdout to extract test status. In particular step over
717        # similar output for each ABI and just look at the final summary.
718        match = re.search(r'(XML test result file generated at (\S+). '
719                 r'Passed (\d+), Failed (\d+), Not Executed (\d+))',
720                 result.stdout)
721        if not match:
722            raise error.Test('Test log does not contain a summary.')
723
724        passed = int(match.group(3))
725        failed = int(match.group(4))
726        not_executed = int(match.group(5))
727        match = re.search(r'(Start test run of (\d+) packages, containing '
728                          r'(\d+(?:,\d+)?) tests)', result.stdout)
729        if match and match.group(3):
730            tests = int(match.group(3).replace(',', ''))
731        else:
732            # Unfortunately this happens. Assume it made no other mistakes.
733            logging.warning('Tradefed forgot to print number of tests.')
734            tests = passed + failed + not_executed
735        # TODO(rohitbm): make failure parsing more robust by extracting the list
736        # of failing tests instead of searching in the result blob. As well as
737        # only parse for waivers for the running ABI.
738        if waivers:
739            for testname in waivers:
740                # TODO(dhaddock): Find a more robust way to apply waivers.
741                fail_count = result.stdout.count(testname + ' FAIL')
742                if fail_count:
743                    if fail_count > 2:
744                        raise error.TestFail('Error: There are too many '
745                                             'failures found in the output to '
746                                             'be valid for applying waivers. '
747                                             'Please check output.')
748                    failed -= fail_count
749                    # To maintain total count consistency.
750                    passed += fail_count
751                    logging.info('Waived failure for %s %d time(s)',
752                                 testname, fail_count)
753        logging.info('tests=%d, passed=%d, failed=%d, not_executed=%d',
754                tests, passed, failed, not_executed)
755        if failed < 0:
756            raise error.TestFail('Error: Internal waiver book keeping has '
757                                 'become inconsistent.')
758        return (tests, passed, failed, not_executed)
759
760    # TODO(kinaba): Add unit test.
761    def _parse_result_v2(self, result, accumulative_count=False, waivers=None):
762        """Check the result from the tradefed-v2 output.
763
764        This extracts the test pass/fail/executed list from the output of
765        tradefed. It is up to the caller to handle inconsistencies.
766
767        @param result: The result object from utils.run.
768        @param accumulative_count: set True if using an old version of tradefed
769                                   that prints test count in accumulative way.
770        @param waivers: a set() of tests which are permitted to fail.
771        """
772        # Regular expressions for start/end messages of each test-run chunk.
773        abi_re = r'armeabi-v7a|x86'
774        # TODO(kinaba): use the current running module name.
775        module_re = r'\S+'
776        start_re = re.compile(r'(?:Start|Continu)ing (%s) %s with'
777                              r' (\d+(?:,\d+)?) test' % (abi_re, module_re))
778        end_re = re.compile(r'(%s) %s (?:complet|fail)ed in .*\.'
779                            r' (\d+) passed, (\d+) failed, (\d+) not executed'
780                            % (abi_re, module_re))
781
782        # Records the result per each ABI.
783        total_test = dict()
784        total_pass = dict()
785        total_fail = dict()
786        last_notexec = dict()
787
788        # ABI and the test count for the current chunk.
789        abi = None
790        ntest = None
791
792        for line in result.stdout.splitlines():
793            # Beginning of a chunk of tests.
794            match = start_re.search(line)
795            if match:
796               if abi:
797                   raise error.TestFail('Error: Unexpected test start: ' + line)
798               abi = match.group(1)
799               ntest = int(match.group(2).replace(',',''))
800            else:
801               # End of the current chunk.
802               match = end_re.search(line)
803               if not match:
804                   continue
805
806               if abi != match.group(1):
807                   raise error.TestFail('Error: Unexpected test end: ' + line)
808               npass, nfail, nnotexec = map(int, match.group(2,3,4))
809
810               # When the test crashes too ofen, tradefed seems to finish the
811               # iteration by running "0 tests, 0 passed, ...". Do not count
812               # that in.
813               if ntest > 0:
814                   if accumulative_count:
815                       total_test[abi] = ntest
816                       total_pass[abi] = npass
817                       total_fail[abi] = nfail
818                   else:
819                       total_test[abi] = (total_test.get(abi, 0) + ntest -
820                           last_notexec.get(abi, 0))
821                       total_pass[abi] = total_pass.get(abi, 0) + npass
822                       total_fail[abi] = total_fail.get(abi, 0) + nfail
823                   last_notexec[abi] = nnotexec
824               abi = None
825
826        if abi:
827            raise error.TestFail('Error: No end message for the last chunk.')
828
829        # TODO(rohitbm): make failure parsing more robust by extracting the list
830        # of failing tests instead of searching in the result blob. As well as
831        # only parse for waivers for the running ABI.
832        waived = 0
833        if waivers:
834            abis = total_test.keys()
835            for testname in waivers:
836                # TODO(dhaddock): Find a more robust way to apply waivers.
837                fail_count = (result.stdout.count(testname + ' FAIL') +
838                              result.stdout.count(testname + ' fail'))
839                if fail_count:
840                    if fail_count > len(abis):
841                        raise error.TestFail('Error: Found %d failures for %s '
842                                             'but there are only %d abis: %s' %
843                                             (fail_count, testname, len(abis),
844                                             abis))
845                    waived += fail_count
846                    logging.info('Waived failure for %s %d time(s)',
847                                 testname, fail_count)
848        counts = tuple(sum(count_per_abi.values()) for count_per_abi in
849            (total_test, total_pass, total_fail, last_notexec)) + (waived,)
850        msg = ('tests=%d, passed=%d, failed=%d, not_executed=%d, waived=%d' %
851               counts)
852        logging.info(msg)
853        if counts[2] - waived < 0:
854            raise error.TestFail('Error: Internal waiver bookkeeping has '
855                                 'become inconsistent (%s)' % msg)
856        return counts
857
858    def _collect_logs(self, repository, datetime, destination):
859        """Collects the tradefed logs.
860
861        It is legal to collect the same logs multiple times. This is normal
862        after 'tradefed continue' updates existing logs with new results.
863
864        @param repository: Full path to tradefeds output on disk.
865        @param datetime: The identifier which tradefed assigned to the run.
866                         Currently this looks like '2016.07.14_00.34.50'.
867        @param destination: Autotest result directory (destination of logs).
868        """
869        logging.info('Collecting tradefed testResult.xml and logs to %s.',
870                     destination)
871        repository_results = os.path.join(repository, 'results')
872        repository_logs = os.path.join(repository, 'logs')
873        # Because other tools rely on the currently chosen Google storage paths
874        # we need to keep destination_results in
875        # cheets_CTS.*/results/android-cts/2016.mm.dd_hh.mm.ss(/|.zip)
876        # and destination_logs in
877        # cheets_CTS.*/results/android-cts/logs/2016.mm.dd_hh.mm.ss/
878        destination_results = destination
879        destination_results_datetime = os.path.join(destination_results,
880                                                    datetime)
881        destination_results_datetime_zip = destination_results_datetime + '.zip'
882        destination_logs = os.path.join(destination, 'logs')
883        destination_logs_datetime = os.path.join(destination_logs, datetime)
884        # We may have collected the same logs before, clean old versions.
885        if os.path.exists(destination_results_datetime_zip):
886            os.remove(destination_results_datetime_zip)
887        if os.path.exists(destination_results_datetime):
888            shutil.rmtree(destination_results_datetime)
889        if os.path.exists(destination_logs_datetime):
890            shutil.rmtree(destination_logs_datetime)
891        shutil.copytree(
892                os.path.join(repository_results, datetime),
893                destination_results_datetime)
894        # Copying the zip file has to happen after the tree so the destination
895        # directory is available.
896        shutil.copy(
897                os.path.join(repository_results, datetime) + '.zip',
898                destination_results_datetime_zip)
899        shutil.copytree(
900                os.path.join(repository_logs, datetime),
901                destination_logs_datetime)
902
903    def _get_expected_failures(self, directory):
904        """Return a list of expected failures.
905
906        @return: a list of expected failures.
907        """
908        logging.info('Loading expected failures from %s.', directory)
909        expected_fail_dir = os.path.join(self.bindir, directory)
910        expected_fail_files = glob.glob(expected_fail_dir + '/*.' + self._abi)
911        expected_failures = set()
912        for expected_fail_file in expected_fail_files:
913            try:
914                file_path = os.path.join(expected_fail_dir, expected_fail_file)
915                with open(file_path) as f:
916                    lines = set(f.read().splitlines())
917                    logging.info('Loaded %d expected failures from %s',
918                                 len(lines), expected_fail_file)
919                    expected_failures |= lines
920            except IOError as e:
921                logging.error('Error loading %s (%s).', file_path, e.strerror)
922        logging.info('Finished loading expected failures: %s',
923                     expected_failures)
924        return expected_failures
925