1#
2# Copyright 2008 Google Inc. Released under the GPL v2
3
4#pylint: disable=missing-docstring
5
6import StringIO
7import errno
8import itertools
9import logging
10import os
11import pickle
12import random
13import re
14import resource
15import select
16import shutil
17import signal
18import socket
19import string
20import struct
21import subprocess
22import textwrap
23import time
24import urllib2
25import urlparse
26import warnings
27
28try:
29    import hashlib
30except ImportError:
31    import md5
32    import sha
33
34from autotest_lib.client.common_lib import error, logging_manager
35
36
37def deprecated(func):
38    """This is a decorator which can be used to mark functions as deprecated.
39    It will result in a warning being emmitted when the function is used."""
40    def new_func(*args, **dargs):
41        warnings.warn("Call to deprecated function %s." % func.__name__,
42                      category=DeprecationWarning)
43        return func(*args, **dargs)
44    new_func.__name__ = func.__name__
45    new_func.__doc__ = func.__doc__
46    new_func.__dict__.update(func.__dict__)
47    return new_func
48
49
50class _NullStream(object):
51    def write(self, data):
52        pass
53
54
55    def flush(self):
56        pass
57
58
59TEE_TO_LOGS = object()
60_the_null_stream = _NullStream()
61
62DEFAULT_STDOUT_LEVEL = logging.DEBUG
63DEFAULT_STDERR_LEVEL = logging.ERROR
64
65# prefixes for logging stdout/stderr of commands
66STDOUT_PREFIX = '[stdout] '
67STDERR_PREFIX = '[stderr] '
68
69# safe characters for the shell (do not need quoting)
70SHELL_QUOTING_WHITELIST = frozenset(string.ascii_letters +
71                                    string.digits +
72                                    '_-+=')
73
74
75def custom_warning_handler(message, category, filename, lineno, file=None,
76                           line=None):
77    """Custom handler to log at the WARNING error level. Ignores |file|."""
78    logging.warning(warnings.formatwarning(message, category, filename, lineno,
79                                           line))
80
81warnings.showwarning = custom_warning_handler
82
83def get_stream_tee_file(stream, level, prefix=''):
84    if stream is None:
85        return _the_null_stream
86    if stream is TEE_TO_LOGS:
87        return logging_manager.LoggingFile(level=level, prefix=prefix)
88    return stream
89
90
91def _join_with_nickname(base_string, nickname):
92    if nickname:
93        return '%s BgJob "%s" ' % (base_string, nickname)
94    return base_string
95
96
97# TODO: Cleanup and possibly eliminate no_pipes, which is only used
98# in our master-ssh connection process, while fixing underlying
99# semantics problem in BgJob. See crbug.com/279312
100class BgJob(object):
101    def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
102                 stdin=None, stderr_level=DEFAULT_STDERR_LEVEL, nickname=None,
103                 no_pipes=False, env=None, extra_paths=None):
104        """Create and start a new BgJob.
105
106        This constructor creates a new BgJob, and uses Popen to start a new
107        subprocess with given command. It returns without blocking on execution
108        of the subprocess.
109
110        After starting a new BgJob, use output_prepare to connect the process's
111        stdout and stderr pipes to the stream of your choice.
112
113        When the job is running, the jobs's output streams are only read from
114        when process_output is called.
115
116        @param command: command to be executed in new subprocess. May be either
117                        a list, or a string (in which case Popen will be called
118                        with shell=True)
119        @param stdout_tee: Optional additional stream that the process's stdout
120                           stream output will be written to. Or, specify
121                           base_utils.TEE_TO_LOGS and the output will handled by
122                           the standard logging_manager.
123        @param stderr_tee: Same as stdout_tee, but for stderr.
124        @param verbose: Boolean, make BgJob logging more verbose.
125        @param stdin: Stream object, will be passed to Popen as the new
126                      process's stdin.
127        @param stderr_level: A logging level value. If stderr_tee was set to
128                             base_utils.TEE_TO_LOGS, sets the level that tee'd
129                             stderr output will be logged at. Ignored
130                             otherwise.
131        @param nickname: Optional string, to be included in logging messages
132        @param no_pipes: Boolean, default False. If True, this subprocess
133                         created by this BgJob does NOT use subprocess.PIPE
134                         for its stdin or stderr streams. Instead, these
135                         streams are connected to the logging manager
136                         (regardless of the values of stdout_tee and
137                         stderr_tee).
138                         If no_pipes is True, then calls to output_prepare,
139                         process_output, and cleanup will result in an
140                         InvalidBgJobCall exception. no_pipes should be
141                         True for BgJobs that do not interact via stdout/stderr
142                         with other BgJobs, or long runing background jobs that
143                         will never be joined with join_bg_jobs, such as the
144                         master-ssh connection BgJob.
145        @param env: Dict containing environment variables used in subprocess.
146        @param extra_paths: Optional string list, to be prepended to the PATH
147                            env variable in env (or os.environ dict if env is
148                            not specified).
149        """
150        self.command = command
151        self._no_pipes = no_pipes
152        if no_pipes:
153            stdout_tee = TEE_TO_LOGS
154            stderr_tee = TEE_TO_LOGS
155        self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL,
156                prefix=_join_with_nickname(STDOUT_PREFIX, nickname))
157        self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level,
158                prefix=_join_with_nickname(STDERR_PREFIX, nickname))
159        self.result = CmdResult(command)
160
161        # allow for easy stdin input by string, we'll let subprocess create
162        # a pipe for stdin input and we'll write to it in the wait loop
163        if isinstance(stdin, basestring):
164            self.string_stdin = stdin
165            stdin = subprocess.PIPE
166        else:
167            self.string_stdin = None
168
169
170        if no_pipes:
171            stdout_param = self.stdout_tee
172            stderr_param = self.stderr_tee
173        else:
174            stdout_param = subprocess.PIPE
175            stderr_param = subprocess.PIPE
176
177        # Prepend extra_paths to env['PATH'] if necessary.
178        if extra_paths:
179            env = (os.environ if env is None else env).copy()
180            oldpath = env.get('PATH')
181            env['PATH'] = os.pathsep.join(
182                    extra_paths + ([oldpath] if oldpath else []))
183
184        if verbose:
185            logging.debug("Running '%s'", command)
186        if type(command) == list:
187            self.sp = subprocess.Popen(command,
188                                       stdout=stdout_param,
189                                       stderr=stderr_param,
190                                       preexec_fn=self._reset_sigpipe,
191                                       stdin=stdin,
192                                       env=env,
193                                       close_fds=True)
194        else:
195            self.sp = subprocess.Popen(command, stdout=stdout_param,
196                                       stderr=stderr_param,
197                                       preexec_fn=self._reset_sigpipe, shell=True,
198                                       executable="/bin/bash",
199                                       stdin=stdin,
200                                       env=env,
201                                       close_fds=True)
202
203        self._output_prepare_called = False
204        self._process_output_warned = False
205        self._cleanup_called = False
206        self.stdout_file = _the_null_stream
207        self.stderr_file = _the_null_stream
208
209    def output_prepare(self, stdout_file=_the_null_stream,
210                       stderr_file=_the_null_stream):
211        """Connect the subprocess's stdout and stderr to streams.
212
213        Subsequent calls to output_prepare are permitted, and will reassign
214        the streams. However, this will have the side effect that the ultimate
215        call to cleanup() will only remember the stdout and stderr data up to
216        the last output_prepare call when saving this data to BgJob.result.
217
218        @param stdout_file: Stream that output from the process's stdout pipe
219                            will be written to. Default: a null stream.
220        @param stderr_file: Stream that output from the process's stdout pipe
221                            will be written to. Default: a null stream.
222        """
223        if self._no_pipes:
224            raise error.InvalidBgJobCall('Cannot call output_prepare on a '
225                                         'job with no_pipes=True.')
226        if self._output_prepare_called:
227            logging.warning('BgJob [%s] received a duplicate call to '
228                            'output prepare. Allowing, but this may result '
229                            'in data missing from BgJob.result.')
230        self.stdout_file = stdout_file
231        self.stderr_file = stderr_file
232        self._output_prepare_called = True
233
234
235    def process_output(self, stdout=True, final_read=False):
236        """Read from process's output stream, and write data to destinations.
237
238        This function reads up to 1024 bytes from the background job's
239        stdout or stderr stream, and writes the resulting data to the BgJob's
240        output tee and to the stream set up in output_prepare.
241
242        Warning: Calls to process_output will block on reads from the
243        subprocess stream, and will block on writes to the configured
244        destination stream.
245
246        @param stdout: True = read and process data from job's stdout.
247                       False = from stderr.
248                       Default: True
249        @param final_read: Do not read only 1024 bytes from stream. Instead,
250                           read and process all data until end of the stream.
251
252        """
253        if self._no_pipes:
254            raise error.InvalidBgJobCall('Cannot call process_output on '
255                                         'a job with no_pipes=True')
256        if not self._output_prepare_called and not self._process_output_warned:
257            logging.warning('BgJob with command [%s] handled a process_output '
258                            'call before output_prepare was called. '
259                            'Some output data discarded. '
260                            'Future warnings suppressed.',
261                            self.command)
262            self._process_output_warned = True
263        if stdout:
264            pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
265        else:
266            pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
267
268        if final_read:
269            # read in all the data we can from pipe and then stop
270            data = []
271            while select.select([pipe], [], [], 0)[0]:
272                data.append(os.read(pipe.fileno(), 1024))
273                if len(data[-1]) == 0:
274                    break
275            data = "".join(data)
276        else:
277            # perform a single read
278            data = os.read(pipe.fileno(), 1024)
279        buf.write(data)
280        tee.write(data)
281
282
283    def cleanup(self):
284        """Clean up after BgJob.
285
286        Flush the stdout_tee and stderr_tee buffers, close the
287        subprocess stdout and stderr buffers, and saves data from
288        the configured stdout and stderr destination streams to
289        self.result. Duplicate calls ignored with a warning.
290        """
291        if self._no_pipes:
292            raise error.InvalidBgJobCall('Cannot call cleanup on '
293                                         'a job with no_pipes=True')
294        if self._cleanup_called:
295            logging.warning('BgJob [%s] received a duplicate call to '
296                            'cleanup. Ignoring.', self.command)
297            return
298        try:
299            self.stdout_tee.flush()
300            self.stderr_tee.flush()
301            self.sp.stdout.close()
302            self.sp.stderr.close()
303            self.result.stdout = self.stdout_file.getvalue()
304            self.result.stderr = self.stderr_file.getvalue()
305        finally:
306            self._cleanup_called = True
307
308
309    def _reset_sigpipe(self):
310        signal.signal(signal.SIGPIPE, signal.SIG_DFL)
311
312
313def ip_to_long(ip):
314    # !L is a long in network byte order
315    return struct.unpack('!L', socket.inet_aton(ip))[0]
316
317
318def long_to_ip(number):
319    # See above comment.
320    return socket.inet_ntoa(struct.pack('!L', number))
321
322
323def create_subnet_mask(bits):
324    return (1 << 32) - (1 << 32-bits)
325
326
327def format_ip_with_mask(ip, mask_bits):
328    masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
329    return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
330
331
332def normalize_hostname(alias):
333    ip = socket.gethostbyname(alias)
334    return socket.gethostbyaddr(ip)[0]
335
336
337def get_ip_local_port_range():
338    match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
339                     read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
340    return (int(match.group(1)), int(match.group(2)))
341
342
343def set_ip_local_port_range(lower, upper):
344    write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
345                   '%d %d\n' % (lower, upper))
346
347
348def read_one_line(filename):
349    return open(filename, 'r').readline().rstrip('\n')
350
351
352def read_file(filename):
353    f = open(filename)
354    try:
355        return f.read()
356    finally:
357        f.close()
358
359
360def get_field(data, param, linestart="", sep=" "):
361    """
362    Parse data from string.
363    @param data: Data to parse.
364        example:
365          data:
366             cpu   324 345 34  5 345
367             cpu0  34  11  34 34  33
368             ^^^^
369             start of line
370             params 0   1   2  3   4
371    @param param: Position of parameter after linestart marker.
372    @param linestart: String to which start line with parameters.
373    @param sep: Separator between parameters regular expression.
374    """
375    search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
376    find = search.search(data)
377    if find != None:
378        return re.split("%s" % sep, find.group(1))[param]
379    else:
380        print "There is no line which starts with %s in data." % linestart
381        return None
382
383
384def write_one_line(filename, line):
385    open_write_close(filename, str(line).rstrip('\n') + '\n')
386
387
388def open_write_close(filename, data):
389    f = open(filename, 'w')
390    try:
391        f.write(data)
392    finally:
393        f.close()
394
395
396def locate_file(path, base_dir=None):
397    """Locates a file.
398
399    @param path: The path of the file being located. Could be absolute or relative
400        path. For relative path, it tries to locate the file from base_dir.
401    @param base_dir (optional): Base directory of the relative path.
402
403    @returns Absolute path of the file if found. None if path is None.
404    @raises error.TestFail if the file is not found.
405    """
406    if path is None:
407        return None
408
409    if not os.path.isabs(path) and base_dir is not None:
410        # Assume the relative path is based in autotest directory.
411        path = os.path.join(base_dir, path)
412    if not os.path.isfile(path):
413        raise error.TestFail('ERROR: Unable to find %s' % path)
414    return path
415
416
417def matrix_to_string(matrix, header=None):
418    """
419    Return a pretty, aligned string representation of a nxm matrix.
420
421    This representation can be used to print any tabular data, such as
422    database results. It works by scanning the lengths of each element
423    in each column, and determining the format string dynamically.
424
425    @param matrix: Matrix representation (list with n rows of m elements).
426    @param header: Optional tuple or list with header elements to be displayed.
427    """
428    if type(header) is list:
429        header = tuple(header)
430    lengths = []
431    if header:
432        for column in header:
433            lengths.append(len(column))
434    for row in matrix:
435        for i, column in enumerate(row):
436            column = unicode(column).encode("utf-8")
437            cl = len(column)
438            try:
439                ml = lengths[i]
440                if cl > ml:
441                    lengths[i] = cl
442            except IndexError:
443                lengths.append(cl)
444
445    lengths = tuple(lengths)
446    format_string = ""
447    for length in lengths:
448        format_string += "%-" + str(length) + "s "
449    format_string += "\n"
450
451    matrix_str = ""
452    if header:
453        matrix_str += format_string % header
454    for row in matrix:
455        matrix_str += format_string % tuple(row)
456
457    return matrix_str
458
459
460def read_keyval(path, type_tag=None):
461    """
462    Read a key-value pair format file into a dictionary, and return it.
463    Takes either a filename or directory name as input. If it's a
464    directory name, we assume you want the file to be called keyval.
465
466    @param path: Full path of the file to read from.
467    @param type_tag: If not None, only keyvals with key ending
468                     in a suffix {type_tag} will be collected.
469    """
470    if os.path.isdir(path):
471        path = os.path.join(path, 'keyval')
472    if not os.path.exists(path):
473        return {}
474
475    if type_tag:
476        pattern = r'^([-\.\w]+)\{%s\}=(.*)$' % type_tag
477    else:
478        pattern = r'^([-\.\w]+)=(.*)$'
479
480    keyval = {}
481    f = open(path)
482    for line in f:
483        line = re.sub('#.*', '', line).rstrip()
484        if not line:
485            continue
486        match = re.match(pattern, line)
487        if match:
488            key = match.group(1)
489            value = match.group(2)
490            if re.search('^\d+$', value):
491                value = int(value)
492            elif re.search('^(\d+\.)?\d+$', value):
493                value = float(value)
494            keyval[key] = value
495        else:
496            raise ValueError('Invalid format line: %s' % line)
497    f.close()
498    return keyval
499
500
501def write_keyval(path, dictionary, type_tag=None, tap_report=None):
502    """
503    Write a key-value pair format file out to a file. This uses append
504    mode to open the file, so existing text will not be overwritten or
505    reparsed.
506
507    If type_tag is None, then the key must be composed of alphanumeric
508    characters (or dashes+underscores). However, if type-tag is not
509    null then the keys must also have "{type_tag}" as a suffix. At
510    the moment the only valid values of type_tag are "attr" and "perf".
511
512    @param path: full path of the file to be written
513    @param dictionary: the items to write
514    @param type_tag: see text above
515    """
516    if os.path.isdir(path):
517        path = os.path.join(path, 'keyval')
518    keyval = open(path, 'a')
519
520    if type_tag is None:
521        key_regex = re.compile(r'^[-\.\w]+$')
522    else:
523        if type_tag not in ('attr', 'perf'):
524            raise ValueError('Invalid type tag: %s' % type_tag)
525        escaped_tag = re.escape(type_tag)
526        key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
527    try:
528        for key in sorted(dictionary.keys()):
529            if not key_regex.search(key):
530                raise ValueError('Invalid key: %s' % key)
531            keyval.write('%s=%s\n' % (key, dictionary[key]))
532    finally:
533        keyval.close()
534
535    # same for tap
536    if tap_report is not None and tap_report.do_tap_report:
537        tap_report.record_keyval(path, dictionary, type_tag=type_tag)
538
539
540def is_url(path):
541    """Return true if path looks like a URL"""
542    # for now, just handle http and ftp
543    url_parts = urlparse.urlparse(path)
544    return (url_parts[0] in ('http', 'ftp'))
545
546
547def urlopen(url, data=None, timeout=5):
548    """Wrapper to urllib2.urlopen with timeout addition."""
549
550    # Save old timeout
551    old_timeout = socket.getdefaulttimeout()
552    socket.setdefaulttimeout(timeout)
553    try:
554        return urllib2.urlopen(url, data=data)
555    finally:
556        socket.setdefaulttimeout(old_timeout)
557
558
559def urlretrieve(url, filename, data=None, timeout=300):
560    """Retrieve a file from given url."""
561    logging.debug('Fetching %s -> %s', url, filename)
562
563    src_file = urlopen(url, data=data, timeout=timeout)
564    try:
565        dest_file = open(filename, 'wb')
566        try:
567            shutil.copyfileobj(src_file, dest_file)
568        finally:
569            dest_file.close()
570    finally:
571        src_file.close()
572
573
574def hash(type, input=None):
575    """
576    Returns an hash object of type md5 or sha1. This function is implemented in
577    order to encapsulate hash objects in a way that is compatible with python
578    2.4 and python 2.6 without warnings.
579
580    Note that even though python 2.6 hashlib supports hash types other than
581    md5 and sha1, we are artificially limiting the input values in order to
582    make the function to behave exactly the same among both python
583    implementations.
584
585    @param input: Optional input string that will be used to update the hash.
586    """
587    if type not in ['md5', 'sha1']:
588        raise ValueError("Unsupported hash type: %s" % type)
589
590    try:
591        hash = hashlib.new(type)
592    except NameError:
593        if type == 'md5':
594            hash = md5.new()
595        elif type == 'sha1':
596            hash = sha.new()
597
598    if input:
599        hash.update(input)
600
601    return hash
602
603
604def get_file(src, dest, permissions=None):
605    """Get a file from src, which can be local or a remote URL"""
606    if src == dest:
607        return
608
609    if is_url(src):
610        urlretrieve(src, dest)
611    else:
612        shutil.copyfile(src, dest)
613
614    if permissions:
615        os.chmod(dest, permissions)
616    return dest
617
618
619def unmap_url(srcdir, src, destdir='.'):
620    """
621    Receives either a path to a local file or a URL.
622    returns either the path to the local file, or the fetched URL
623
624    unmap_url('/usr/src', 'foo.tar', '/tmp')
625                            = '/usr/src/foo.tar'
626    unmap_url('/usr/src', 'http://site/file', '/tmp')
627                            = '/tmp/file'
628                            (after retrieving it)
629    """
630    if is_url(src):
631        url_parts = urlparse.urlparse(src)
632        filename = os.path.basename(url_parts[2])
633        dest = os.path.join(destdir, filename)
634        return get_file(src, dest)
635    else:
636        return os.path.join(srcdir, src)
637
638
639def update_version(srcdir, preserve_srcdir, new_version, install,
640                   *args, **dargs):
641    """
642    Make sure srcdir is version new_version
643
644    If not, delete it and install() the new version.
645
646    In the preserve_srcdir case, we just check it's up to date,
647    and if not, we rerun install, without removing srcdir
648    """
649    versionfile = os.path.join(srcdir, '.version')
650    install_needed = True
651
652    if os.path.exists(versionfile):
653        old_version = pickle.load(open(versionfile))
654        if old_version == new_version:
655            install_needed = False
656
657    if install_needed:
658        if not preserve_srcdir and os.path.exists(srcdir):
659            shutil.rmtree(srcdir)
660        install(*args, **dargs)
661        if os.path.exists(srcdir):
662            pickle.dump(new_version, open(versionfile, 'w'))
663
664
665def get_stderr_level(stderr_is_expected):
666    if stderr_is_expected:
667        return DEFAULT_STDOUT_LEVEL
668    return DEFAULT_STDERR_LEVEL
669
670
671def run(command, timeout=None, ignore_status=False,
672        stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
673        stderr_is_expected=None, args=(), nickname=None, ignore_timeout=False,
674        env=None, extra_paths=None):
675    """
676    Run a command on the host.
677
678    @param command: the command line string.
679    @param timeout: time limit in seconds before attempting to kill the
680            running process. The run() function will take a few seconds
681            longer than 'timeout' to complete if it has to kill the process.
682    @param ignore_status: do not raise an exception, no matter what the exit
683            code of the command is.
684    @param stdout_tee: optional file-like object to which stdout data
685            will be written as it is generated (data will still be stored
686            in result.stdout).
687    @param stderr_tee: likewise for stderr.
688    @param verbose: if True, log the command being run.
689    @param stdin: stdin to pass to the executed process (can be a file
690            descriptor, a file object of a real file or a string).
691    @param stderr_is_expected: if True, stderr will be logged at the same level
692            as stdout
693    @param args: sequence of strings of arguments to be given to the command
694            inside " quotes after they have been escaped for that; each
695            element in the sequence will be given as a separate command
696            argument
697    @param nickname: Short string that will appear in logging messages
698                     associated with this command.
699    @param ignore_timeout: If True, timeouts are ignored otherwise if a
700            timeout occurs it will raise CmdTimeoutError.
701    @param env: Dict containing environment variables used in a subprocess.
702    @param extra_paths: Optional string list, to be prepended to the PATH
703                        env variable in env (or os.environ dict if env is
704                        not specified).
705
706    @return a CmdResult object or None if the command timed out and
707            ignore_timeout is True
708
709    @raise CmdError: the exit code of the command execution was not 0
710    @raise CmdTimeoutError: the command timed out and ignore_timeout is False.
711    """
712    if isinstance(args, basestring):
713        raise TypeError('Got a string for the "args" keyword argument, '
714                        'need a sequence.')
715
716    # In some cases, command will actually be a list
717    # (For example, see get_user_hash in client/cros/cryptohome.py.)
718    # So, to cover that case, detect if it's a string or not and convert it
719    # into one if necessary.
720    if not isinstance(command, basestring):
721        command = ' '.join([sh_quote_word(arg) for arg in command])
722
723    command = ' '.join([command] + [sh_quote_word(arg) for arg in args])
724    if stderr_is_expected is None:
725        stderr_is_expected = ignore_status
726
727    try:
728        bg_job = join_bg_jobs(
729            (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
730                   stderr_level=get_stderr_level(stderr_is_expected),
731                   nickname=nickname, env=env, extra_paths=extra_paths),),
732            timeout)[0]
733    except error.CmdTimeoutError:
734        if not ignore_timeout:
735            raise
736        return None
737
738    if not ignore_status and bg_job.result.exit_status:
739        raise error.CmdError(command, bg_job.result,
740                             "Command returned non-zero exit status")
741
742    return bg_job.result
743
744
745def run_parallel(commands, timeout=None, ignore_status=False,
746                 stdout_tee=None, stderr_tee=None,
747                 nicknames=[]):
748    """
749    Behaves the same as run() with the following exceptions:
750
751    - commands is a list of commands to run in parallel.
752    - ignore_status toggles whether or not an exception should be raised
753      on any error.
754
755    @return: a list of CmdResult objects
756    """
757    bg_jobs = []
758    for (command, nickname) in itertools.izip_longest(commands, nicknames):
759        bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
760                             stderr_level=get_stderr_level(ignore_status),
761                             nickname=nickname))
762
763    # Updates objects in bg_jobs list with their process information
764    join_bg_jobs(bg_jobs, timeout)
765
766    for bg_job in bg_jobs:
767        if not ignore_status and bg_job.result.exit_status:
768            raise error.CmdError(command, bg_job.result,
769                                 "Command returned non-zero exit status")
770
771    return [bg_job.result for bg_job in bg_jobs]
772
773
774@deprecated
775def run_bg(command):
776    """Function deprecated. Please use BgJob class instead."""
777    bg_job = BgJob(command)
778    return bg_job.sp, bg_job.result
779
780
781def join_bg_jobs(bg_jobs, timeout=None):
782    """Joins the bg_jobs with the current thread.
783
784    Returns the same list of bg_jobs objects that was passed in.
785    """
786    ret, timeout_error = 0, False
787    for bg_job in bg_jobs:
788        bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
789
790    try:
791        # We are holding ends to stdin, stdout pipes
792        # hence we need to be sure to close those fds no mater what
793        start_time = time.time()
794        timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
795
796        for bg_job in bg_jobs:
797            # Process stdout and stderr
798            bg_job.process_output(stdout=True,final_read=True)
799            bg_job.process_output(stdout=False,final_read=True)
800    finally:
801        # close our ends of the pipes to the sp no matter what
802        for bg_job in bg_jobs:
803            bg_job.cleanup()
804
805    if timeout_error:
806        # TODO: This needs to be fixed to better represent what happens when
807        # running in parallel. However this is backwards compatable, so it will
808        # do for the time being.
809        raise error.CmdTimeoutError(
810                bg_jobs[0].command, bg_jobs[0].result,
811                "Command(s) did not complete within %d seconds" % timeout)
812
813
814    return bg_jobs
815
816
817def _wait_for_commands(bg_jobs, start_time, timeout):
818    """Waits for background jobs by select polling their stdout/stderr.
819
820    @param bg_jobs: A list of background jobs to wait on.
821    @param start_time: Time used to calculate the timeout lifetime of a job.
822    @param timeout: The timeout of the list of bg_jobs.
823
824    @return: True if the return was due to a timeout, False otherwise.
825    """
826
827    # To check for processes which terminate without producing any output
828    # a 1 second timeout is used in select.
829    SELECT_TIMEOUT = 1
830
831    read_list = []
832    write_list = []
833    reverse_dict = {}
834
835    for bg_job in bg_jobs:
836        read_list.append(bg_job.sp.stdout)
837        read_list.append(bg_job.sp.stderr)
838        reverse_dict[bg_job.sp.stdout] = (bg_job, True)
839        reverse_dict[bg_job.sp.stderr] = (bg_job, False)
840        if bg_job.string_stdin is not None:
841            write_list.append(bg_job.sp.stdin)
842            reverse_dict[bg_job.sp.stdin] = bg_job
843
844    if timeout:
845        stop_time = start_time + timeout
846        time_left = stop_time - time.time()
847    else:
848        time_left = None # so that select never times out
849
850    while not timeout or time_left > 0:
851        # select will return when we may write to stdin, when there is
852        # stdout/stderr output we can read (including when it is
853        # EOF, that is the process has terminated) or when a non-fatal
854        # signal was sent to the process. In the last case the select returns
855        # EINTR, and we continue waiting for the job if the signal handler for
856        # the signal that interrupted the call allows us to.
857        try:
858            read_ready, write_ready, _ = select.select(read_list, write_list,
859                                                       [], SELECT_TIMEOUT)
860        except select.error as v:
861            if v[0] == errno.EINTR:
862                logging.warning(v)
863                continue
864            else:
865                raise
866        # os.read() has to be used instead of
867        # subproc.stdout.read() which will otherwise block
868        for file_obj in read_ready:
869            bg_job, is_stdout = reverse_dict[file_obj]
870            bg_job.process_output(is_stdout)
871
872        for file_obj in write_ready:
873            # we can write PIPE_BUF bytes without blocking
874            # POSIX requires PIPE_BUF is >= 512
875            bg_job = reverse_dict[file_obj]
876            file_obj.write(bg_job.string_stdin[:512])
877            bg_job.string_stdin = bg_job.string_stdin[512:]
878            # no more input data, close stdin, remove it from the select set
879            if not bg_job.string_stdin:
880                file_obj.close()
881                write_list.remove(file_obj)
882                del reverse_dict[file_obj]
883
884        all_jobs_finished = True
885        for bg_job in bg_jobs:
886            if bg_job.result.exit_status is not None:
887                continue
888
889            bg_job.result.exit_status = bg_job.sp.poll()
890            if bg_job.result.exit_status is not None:
891                # process exited, remove its stdout/stdin from the select set
892                bg_job.result.duration = time.time() - start_time
893                read_list.remove(bg_job.sp.stdout)
894                read_list.remove(bg_job.sp.stderr)
895                del reverse_dict[bg_job.sp.stdout]
896                del reverse_dict[bg_job.sp.stderr]
897            else:
898                all_jobs_finished = False
899
900        if all_jobs_finished:
901            return False
902
903        if timeout:
904            time_left = stop_time - time.time()
905
906    # Kill all processes which did not complete prior to timeout
907    for bg_job in bg_jobs:
908        if bg_job.result.exit_status is not None:
909            continue
910
911        logging.warning('run process timeout (%s) fired on: %s', timeout,
912                        bg_job.command)
913        if nuke_subprocess(bg_job.sp) is None:
914            # If process could not be SIGKILL'd, log kernel stack.
915            logging.warning(read_file('/proc/%d/stack' % bg_job.sp.pid))
916        bg_job.result.exit_status = bg_job.sp.poll()
917        bg_job.result.duration = time.time() - start_time
918
919    return True
920
921
922def pid_is_alive(pid):
923    """
924    True if process pid exists and is not yet stuck in Zombie state.
925    Zombies are impossible to move between cgroups, etc.
926    pid can be integer, or text of integer.
927    """
928    path = '/proc/%s/stat' % pid
929
930    try:
931        stat = read_one_line(path)
932    except IOError:
933        if not os.path.exists(path):
934            # file went away
935            return False
936        raise
937
938    return stat.split()[2] != 'Z'
939
940
941def signal_pid(pid, sig):
942    """
943    Sends a signal to a process id. Returns True if the process terminated
944    successfully, False otherwise.
945    """
946    try:
947        os.kill(pid, sig)
948    except OSError:
949        # The process may have died before we could kill it.
950        pass
951
952    for i in range(5):
953        if not pid_is_alive(pid):
954            return True
955        time.sleep(1)
956
957    # The process is still alive
958    return False
959
960
961def nuke_subprocess(subproc):
962    # check if the subprocess is still alive, first
963    if subproc.poll() is not None:
964        return subproc.poll()
965
966    # the process has not terminated within timeout,
967    # kill it via an escalating series of signals.
968    signal_queue = [signal.SIGTERM, signal.SIGKILL]
969    for sig in signal_queue:
970        signal_pid(subproc.pid, sig)
971        if subproc.poll() is not None:
972            return subproc.poll()
973
974
975def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
976    # the process has not terminated within timeout,
977    # kill it via an escalating series of signals.
978    pid_path = '/proc/%d/'
979    if not os.path.exists(pid_path % pid):
980        # Assume that if the pid does not exist in proc it is already dead.
981        logging.error('No listing in /proc for pid:%d.', pid)
982        raise error.AutoservPidAlreadyDeadError('Could not kill nonexistant '
983                                                'pid: %s.', pid)
984    for sig in signal_queue:
985        if signal_pid(pid, sig):
986            return
987
988    # no signal successfully terminated the process
989    raise error.AutoservRunError('Could not kill %d for process name: %s' % (
990            pid, get_process_name(pid)), None)
991
992
993def system(command, timeout=None, ignore_status=False):
994    """
995    Run a command
996
997    @param timeout: timeout in seconds
998    @param ignore_status: if ignore_status=False, throw an exception if the
999            command's exit code is non-zero
1000            if ignore_stauts=True, return the exit code.
1001
1002    @return exit status of command
1003            (note, this will always be zero unless ignore_status=True)
1004    """
1005    return run(command, timeout=timeout, ignore_status=ignore_status,
1006               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
1007
1008
1009def system_parallel(commands, timeout=None, ignore_status=False):
1010    """This function returns a list of exit statuses for the respective
1011    list of commands."""
1012    return [bg_jobs.exit_status for bg_jobs in
1013            run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
1014                         stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1015
1016
1017def system_output(command, timeout=None, ignore_status=False,
1018                  retain_output=False, args=()):
1019    """
1020    Run a command and return the stdout output.
1021
1022    @param command: command string to execute.
1023    @param timeout: time limit in seconds before attempting to kill the
1024            running process. The function will take a few seconds longer
1025            than 'timeout' to complete if it has to kill the process.
1026    @param ignore_status: do not raise an exception, no matter what the exit
1027            code of the command is.
1028    @param retain_output: set to True to make stdout/stderr of the command
1029            output to be also sent to the logging system
1030    @param args: sequence of strings of arguments to be given to the command
1031            inside " quotes after they have been escaped for that; each
1032            element in the sequence will be given as a separate command
1033            argument
1034
1035    @return a string with the stdout output of the command.
1036    """
1037    if retain_output:
1038        out = run(command, timeout=timeout, ignore_status=ignore_status,
1039                  stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
1040                  args=args).stdout
1041    else:
1042        out = run(command, timeout=timeout, ignore_status=ignore_status,
1043                  args=args).stdout
1044    if out[-1:] == '\n':
1045        out = out[:-1]
1046    return out
1047
1048
1049def system_output_parallel(commands, timeout=None, ignore_status=False,
1050                           retain_output=False):
1051    if retain_output:
1052        out = [bg_job.stdout for bg_job
1053               in run_parallel(commands, timeout=timeout,
1054                               ignore_status=ignore_status,
1055                               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1056    else:
1057        out = [bg_job.stdout for bg_job in run_parallel(commands,
1058                                  timeout=timeout, ignore_status=ignore_status)]
1059    for x in out:
1060        if out[-1:] == '\n': out = out[:-1]
1061    return out
1062
1063
1064def strip_unicode(input):
1065    if type(input) == list:
1066        return [strip_unicode(i) for i in input]
1067    elif type(input) == dict:
1068        output = {}
1069        for key in input.keys():
1070            output[str(key)] = strip_unicode(input[key])
1071        return output
1072    elif type(input) == unicode:
1073        return str(input)
1074    else:
1075        return input
1076
1077
1078def get_cpu_percentage(function, *args, **dargs):
1079    """Returns a tuple containing the CPU% and return value from function call.
1080
1081    This function calculates the usage time by taking the difference of
1082    the user and system times both before and after the function call.
1083    """
1084    child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
1085    self_pre = resource.getrusage(resource.RUSAGE_SELF)
1086    start = time.time()
1087    to_return = function(*args, **dargs)
1088    elapsed = time.time() - start
1089    self_post = resource.getrusage(resource.RUSAGE_SELF)
1090    child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
1091
1092    # Calculate CPU Percentage
1093    s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
1094    c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
1095    cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
1096
1097    return cpu_percent, to_return
1098
1099
1100def get_arch(run_function=run):
1101    """
1102    Get the hardware architecture of the machine.
1103    If specified, run_function should return a CmdResult object and throw a
1104    CmdError exception.
1105    If run_function is anything other than utils.run(), it is used to
1106    execute the commands. By default (when set to utils.run()) this will
1107    just examine os.uname()[4].
1108    """
1109
1110    # Short circuit from the common case.
1111    if run_function == run:
1112        return re.sub(r'i\d86$', 'i386', os.uname()[4])
1113
1114    # Otherwise, use the run_function in case it hits a remote machine.
1115    arch = run_function('/bin/uname -m').stdout.rstrip()
1116    if re.match(r'i\d86$', arch):
1117        arch = 'i386'
1118    return arch
1119
1120def get_arch_userspace(run_function=run):
1121    """
1122    Get the architecture by userspace (possibly different from kernel).
1123    """
1124    archs = {
1125        'arm': 'ELF 32-bit.*, ARM,',
1126        'i386': 'ELF 32-bit.*, Intel 80386,',
1127        'x86_64': 'ELF 64-bit.*, x86-64,',
1128    }
1129
1130    cmd = 'file --brief --dereference /bin/sh'
1131    filestr = run_function(cmd).stdout.rstrip()
1132    for a, regex in archs.iteritems():
1133        if re.match(regex, filestr):
1134            return a
1135
1136    return get_arch()
1137
1138
1139def get_num_logical_cpus_per_socket(run_function=run):
1140    """
1141    Get the number of cores (including hyperthreading) per cpu.
1142    run_function is used to execute the commands. It defaults to
1143    utils.run() but a custom method (if provided) should be of the
1144    same schema as utils.run. It should return a CmdResult object and
1145    throw a CmdError exception.
1146    """
1147    siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
1148    num_siblings = map(int,
1149                       re.findall(r'^siblings\s*:\s*(\d+)\s*$',
1150                                  siblings, re.M))
1151    if len(num_siblings) == 0:
1152        raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
1153    if min(num_siblings) != max(num_siblings):
1154        raise error.TestError('Number of siblings differ %r' %
1155                              num_siblings)
1156    return num_siblings[0]
1157
1158
1159def merge_trees(src, dest):
1160    """
1161    Merges a source directory tree at 'src' into a destination tree at
1162    'dest'. If a path is a file in both trees than the file in the source
1163    tree is APPENDED to the one in the destination tree. If a path is
1164    a directory in both trees then the directories are recursively merged
1165    with this function. In any other case, the function will skip the
1166    paths that cannot be merged (instead of failing).
1167    """
1168    if not os.path.exists(src):
1169        return # exists only in dest
1170    elif not os.path.exists(dest):
1171        if os.path.isfile(src):
1172            shutil.copy2(src, dest) # file only in src
1173        else:
1174            shutil.copytree(src, dest, symlinks=True) # dir only in src
1175        return
1176    elif os.path.isfile(src) and os.path.isfile(dest):
1177        # src & dest are files in both trees, append src to dest
1178        destfile = open(dest, "a")
1179        try:
1180            srcfile = open(src)
1181            try:
1182                destfile.write(srcfile.read())
1183            finally:
1184                srcfile.close()
1185        finally:
1186            destfile.close()
1187    elif os.path.isdir(src) and os.path.isdir(dest):
1188        # src & dest are directories in both trees, so recursively merge
1189        for name in os.listdir(src):
1190            merge_trees(os.path.join(src, name), os.path.join(dest, name))
1191    else:
1192        # src & dest both exist, but are incompatible
1193        return
1194
1195
1196class CmdResult(object):
1197    """
1198    Command execution result.
1199
1200    command:     String containing the command line itself
1201    exit_status: Integer exit code of the process
1202    stdout:      String containing stdout of the process
1203    stderr:      String containing stderr of the process
1204    duration:    Elapsed wall clock time running the process
1205    """
1206
1207
1208    def __init__(self, command="", stdout="", stderr="",
1209                 exit_status=None, duration=0):
1210        self.command = command
1211        self.exit_status = exit_status
1212        self.stdout = stdout
1213        self.stderr = stderr
1214        self.duration = duration
1215
1216
1217    def __repr__(self):
1218        wrapper = textwrap.TextWrapper(width = 78,
1219                                       initial_indent="\n    ",
1220                                       subsequent_indent="    ")
1221
1222        stdout = self.stdout.rstrip()
1223        if stdout:
1224            stdout = "\nstdout:\n%s" % stdout
1225
1226        stderr = self.stderr.rstrip()
1227        if stderr:
1228            stderr = "\nstderr:\n%s" % stderr
1229
1230        return ("* Command: %s\n"
1231                "Exit status: %s\n"
1232                "Duration: %s\n"
1233                "%s"
1234                "%s"
1235                % (wrapper.fill(str(self.command)), self.exit_status,
1236                self.duration, stdout, stderr))
1237
1238
1239class run_randomly:
1240    def __init__(self, run_sequentially=False):
1241        # Run sequentially is for debugging control files
1242        self.test_list = []
1243        self.run_sequentially = run_sequentially
1244
1245
1246    def add(self, *args, **dargs):
1247        test = (args, dargs)
1248        self.test_list.append(test)
1249
1250
1251    def run(self, fn):
1252        while self.test_list:
1253            test_index = random.randint(0, len(self.test_list)-1)
1254            if self.run_sequentially:
1255                test_index = 0
1256            (args, dargs) = self.test_list.pop(test_index)
1257            fn(*args, **dargs)
1258
1259
1260def import_site_module(path, module, dummy=None, modulefile=None):
1261    """
1262    Try to import the site specific module if it exists.
1263
1264    @param path full filename of the source file calling this (ie __file__)
1265    @param module full module name
1266    @param dummy dummy value to return in case there is no symbol to import
1267    @param modulefile module filename
1268
1269    @return site specific module or dummy
1270
1271    @raises ImportError if the site file exists but imports fails
1272    """
1273    short_module = module[module.rfind(".") + 1:]
1274
1275    if not modulefile:
1276        modulefile = short_module + ".py"
1277
1278    if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
1279        return __import__(module, {}, {}, [short_module])
1280    return dummy
1281
1282
1283def import_site_symbol(path, module, name, dummy=None, modulefile=None):
1284    """
1285    Try to import site specific symbol from site specific file if it exists
1286
1287    @param path full filename of the source file calling this (ie __file__)
1288    @param module full module name
1289    @param name symbol name to be imported from the site file
1290    @param dummy dummy value to return in case there is no symbol to import
1291    @param modulefile module filename
1292
1293    @return site specific symbol or dummy
1294
1295    @raises ImportError if the site file exists but imports fails
1296    """
1297    module = import_site_module(path, module, modulefile=modulefile)
1298    if not module:
1299        return dummy
1300
1301    # special unique value to tell us if the symbol can't be imported
1302    cant_import = object()
1303
1304    obj = getattr(module, name, cant_import)
1305    if obj is cant_import:
1306        return dummy
1307
1308    return obj
1309
1310
1311def import_site_class(path, module, classname, baseclass, modulefile=None):
1312    """
1313    Try to import site specific class from site specific file if it exists
1314
1315    Args:
1316        path: full filename of the source file calling this (ie __file__)
1317        module: full module name
1318        classname: class name to be loaded from site file
1319        baseclass: base class object to return when no site file present or
1320            to mixin when site class exists but is not inherited from baseclass
1321        modulefile: module filename
1322
1323    Returns: baseclass if site specific class does not exist, the site specific
1324        class if it exists and is inherited from baseclass or a mixin of the
1325        site specific class and baseclass when the site specific class exists
1326        and is not inherited from baseclass
1327
1328    Raises: ImportError if the site file exists but imports fails
1329    """
1330
1331    res = import_site_symbol(path, module, classname, None, modulefile)
1332    if res:
1333        if not issubclass(res, baseclass):
1334            # if not a subclass of baseclass then mix in baseclass with the
1335            # site specific class object and return the result
1336            res = type(classname, (res, baseclass), {})
1337    else:
1338        res = baseclass
1339
1340    return res
1341
1342
1343def import_site_function(path, module, funcname, dummy, modulefile=None):
1344    """
1345    Try to import site specific function from site specific file if it exists
1346
1347    Args:
1348        path: full filename of the source file calling this (ie __file__)
1349        module: full module name
1350        funcname: function name to be imported from site file
1351        dummy: dummy function to return in case there is no function to import
1352        modulefile: module filename
1353
1354    Returns: site specific function object or dummy
1355
1356    Raises: ImportError if the site file exists but imports fails
1357    """
1358
1359    return import_site_symbol(path, module, funcname, dummy, modulefile)
1360
1361
1362def _get_pid_path(program_name):
1363    my_path = os.path.dirname(__file__)
1364    return os.path.abspath(os.path.join(my_path, "..", "..",
1365                                        "%s.pid" % program_name))
1366
1367
1368def write_pid(program_name):
1369    """
1370    Try to drop <program_name>.pid in the main autotest directory.
1371
1372    Args:
1373      program_name: prefix for file name
1374    """
1375    pidfile = open(_get_pid_path(program_name), "w")
1376    try:
1377        pidfile.write("%s\n" % os.getpid())
1378    finally:
1379        pidfile.close()
1380
1381
1382def delete_pid_file_if_exists(program_name):
1383    """
1384    Tries to remove <program_name>.pid from the main autotest directory.
1385    """
1386    pidfile_path = _get_pid_path(program_name)
1387
1388    try:
1389        os.remove(pidfile_path)
1390    except OSError:
1391        if not os.path.exists(pidfile_path):
1392            return
1393        raise
1394
1395
1396def get_pid_from_file(program_name):
1397    """
1398    Reads the pid from <program_name>.pid in the autotest directory.
1399
1400    @param program_name the name of the program
1401    @return the pid if the file exists, None otherwise.
1402    """
1403    pidfile_path = _get_pid_path(program_name)
1404    if not os.path.exists(pidfile_path):
1405        return None
1406
1407    pidfile = open(_get_pid_path(program_name), 'r')
1408
1409    try:
1410        try:
1411            pid = int(pidfile.readline())
1412        except IOError:
1413            if not os.path.exists(pidfile_path):
1414                return None
1415            raise
1416    finally:
1417        pidfile.close()
1418
1419    return pid
1420
1421
1422def get_process_name(pid):
1423    """
1424    Get process name from PID.
1425    @param pid: PID of process.
1426    @return: Process name if PID stat file exists or 'Dead PID' if it does not.
1427    """
1428    pid_stat_path = "/proc/%d/stat"
1429    if not os.path.exists(pid_stat_path % pid):
1430        return "Dead Pid"
1431    return get_field(read_file(pid_stat_path % pid), 1)[1:-1]
1432
1433
1434def program_is_alive(program_name):
1435    """
1436    Checks if the process is alive and not in Zombie state.
1437
1438    @param program_name the name of the program
1439    @return True if still alive, False otherwise
1440    """
1441    pid = get_pid_from_file(program_name)
1442    if pid is None:
1443        return False
1444    return pid_is_alive(pid)
1445
1446
1447def signal_program(program_name, sig=signal.SIGTERM):
1448    """
1449    Sends a signal to the process listed in <program_name>.pid
1450
1451    @param program_name the name of the program
1452    @param sig signal to send
1453    """
1454    pid = get_pid_from_file(program_name)
1455    if pid:
1456        signal_pid(pid, sig)
1457
1458
1459def get_relative_path(path, reference):
1460    """Given 2 absolute paths "path" and "reference", compute the path of
1461    "path" as relative to the directory "reference".
1462
1463    @param path the absolute path to convert to a relative path
1464    @param reference an absolute directory path to which the relative
1465        path will be computed
1466    """
1467    # normalize the paths (remove double slashes, etc)
1468    assert(os.path.isabs(path))
1469    assert(os.path.isabs(reference))
1470
1471    path = os.path.normpath(path)
1472    reference = os.path.normpath(reference)
1473
1474    # we could use os.path.split() but it splits from the end
1475    path_list = path.split(os.path.sep)[1:]
1476    ref_list = reference.split(os.path.sep)[1:]
1477
1478    # find the longest leading common path
1479    for i in xrange(min(len(path_list), len(ref_list))):
1480        if path_list[i] != ref_list[i]:
1481            # decrement i so when exiting this loop either by no match or by
1482            # end of range we are one step behind
1483            i -= 1
1484            break
1485    i += 1
1486    # drop the common part of the paths, not interested in that anymore
1487    del path_list[:i]
1488
1489    # for each uncommon component in the reference prepend a ".."
1490    path_list[:0] = ['..'] * (len(ref_list) - i)
1491
1492    return os.path.join(*path_list)
1493
1494
1495def sh_escape(command):
1496    """
1497    Escape special characters from a command so that it can be passed
1498    as a double quoted (" ") string in a (ba)sh command.
1499
1500    Args:
1501            command: the command string to escape.
1502
1503    Returns:
1504            The escaped command string. The required englobing double
1505            quotes are NOT added and so should be added at some point by
1506            the caller.
1507
1508    See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1509    """
1510    command = command.replace("\\", "\\\\")
1511    command = command.replace("$", r'\$')
1512    command = command.replace('"', r'\"')
1513    command = command.replace('`', r'\`')
1514    return command
1515
1516
1517def sh_quote_word(text, whitelist=SHELL_QUOTING_WHITELIST):
1518    r"""Quote a string to make it safe as a single word in a shell command.
1519
1520    POSIX shell syntax recognizes no escape characters inside a single-quoted
1521    string.  So, single quotes can safely quote any string of characters except
1522    a string with a single quote character.  A single quote character must be
1523    quoted with the sequence '\'' which translates to:
1524        '  -> close current quote
1525        \' -> insert a literal single quote
1526        '  -> reopen quoting again.
1527
1528    This is safe for all combinations of characters, including embedded and
1529    trailing backslashes in odd or even numbers.
1530
1531    This is also safe for nesting, e.g. the following is a valid use:
1532
1533        adb_command = 'adb shell %s' % (
1534                sh_quote_word('echo %s' % sh_quote_word('hello world')))
1535
1536    @param text: The string to be quoted into a single word for the shell.
1537    @param whitelist: Optional list of characters that do not need quoting.
1538                      Defaults to a known good list of characters.
1539
1540    @return A string, possibly quoted, safe as a single word for a shell.
1541    """
1542    if all(c in whitelist for c in text):
1543        return text
1544    return "'" + text.replace("'", r"'\''") + "'"
1545
1546
1547def configure(extra=None, configure='./configure'):
1548    """
1549    Run configure passing in the correct host, build, and target options.
1550
1551    @param extra: extra command line arguments to pass to configure
1552    @param configure: which configure script to use
1553    """
1554    args = []
1555    if 'CHOST' in os.environ:
1556        args.append('--host=' + os.environ['CHOST'])
1557    if 'CBUILD' in os.environ:
1558        args.append('--build=' + os.environ['CBUILD'])
1559    if 'CTARGET' in os.environ:
1560        args.append('--target=' + os.environ['CTARGET'])
1561    if extra:
1562        args.append(extra)
1563
1564    system('%s %s' % (configure, ' '.join(args)))
1565
1566
1567def make(extra='', make='make', timeout=None, ignore_status=False):
1568    """
1569    Run make, adding MAKEOPTS to the list of options.
1570
1571    @param extra: extra command line arguments to pass to make.
1572    """
1573    cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
1574    return system(cmd, timeout=timeout, ignore_status=ignore_status)
1575
1576
1577def compare_versions(ver1, ver2):
1578    """Version number comparison between ver1 and ver2 strings.
1579
1580    >>> compare_tuple("1", "2")
1581    -1
1582    >>> compare_tuple("foo-1.1", "foo-1.2")
1583    -1
1584    >>> compare_tuple("1.2", "1.2a")
1585    -1
1586    >>> compare_tuple("1.2b", "1.2a")
1587    1
1588    >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1589    -1
1590
1591    Args:
1592        ver1: version string
1593        ver2: version string
1594
1595    Returns:
1596        int:  1 if ver1 >  ver2
1597              0 if ver1 == ver2
1598             -1 if ver1 <  ver2
1599    """
1600    ax = re.split('[.-]', ver1)
1601    ay = re.split('[.-]', ver2)
1602    while len(ax) > 0 and len(ay) > 0:
1603        cx = ax.pop(0)
1604        cy = ay.pop(0)
1605        maxlen = max(len(cx), len(cy))
1606        c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
1607        if c != 0:
1608            return c
1609    return cmp(len(ax), len(ay))
1610
1611
1612def args_to_dict(args):
1613    """Convert autoserv extra arguments in the form of key=val or key:val to a
1614    dictionary.  Each argument key is converted to lowercase dictionary key.
1615
1616    Args:
1617        args - list of autoserv extra arguments.
1618
1619    Returns:
1620        dictionary
1621    """
1622    arg_re = re.compile(r'(\w+)[:=](.*)$')
1623    dict = {}
1624    for arg in args:
1625        match = arg_re.match(arg)
1626        if match:
1627            dict[match.group(1).lower()] = match.group(2)
1628        else:
1629            logging.warning("args_to_dict: argument '%s' doesn't match "
1630                            "'%s' pattern. Ignored.", arg, arg_re.pattern)
1631    return dict
1632
1633
1634def get_unused_port():
1635    """
1636    Finds a semi-random available port. A race condition is still
1637    possible after the port number is returned, if another process
1638    happens to bind it.
1639
1640    Returns:
1641        A port number that is unused on both TCP and UDP.
1642    """
1643
1644    def try_bind(port, socket_type, socket_proto):
1645        s = socket.socket(socket.AF_INET, socket_type, socket_proto)
1646        try:
1647            try:
1648                s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1649                s.bind(('', port))
1650                return s.getsockname()[1]
1651            except socket.error:
1652                return None
1653        finally:
1654            s.close()
1655
1656    # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1657    # same port over and over. So always try TCP first.
1658    while True:
1659        # Ask the OS for an unused port.
1660        port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1661        # Check if this port is unused on the other protocol.
1662        if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1663            return port
1664
1665
1666def ask(question, auto=False):
1667    """
1668    Raw input with a prompt that emulates logging.
1669
1670    @param question: Question to be asked
1671    @param auto: Whether to return "y" instead of asking the question
1672    """
1673    if auto:
1674        logging.info("%s (y/n) y", question)
1675        return "y"
1676    return raw_input("%s INFO | %s (y/n) " %
1677                     (time.strftime("%H:%M:%S", time.localtime()), question))
1678
1679
1680def rdmsr(address, cpu=0):
1681    """
1682    Reads an x86 MSR from the specified CPU, returns as long integer.
1683    """
1684    with open('/dev/cpu/%s/msr' % cpu, 'r', 0) as fd:
1685        fd.seek(address)
1686        return struct.unpack('=Q', fd.read(8))[0]
1687
1688
1689def wait_for_value(func,
1690                   expected_value=None,
1691                   min_threshold=None,
1692                   max_threshold=None,
1693                   timeout_sec=10):
1694    """
1695    Returns the value of func().  If |expected_value|, |min_threshold|, and
1696    |max_threshold| are not set, returns immediately.
1697
1698    If |expected_value| is set, polls the return value until |expected_value| is
1699    reached, and returns that value.
1700
1701    If either |max_threshold| or |min_threshold| is set, this function will
1702    will repeatedly call func() until the return value reaches or exceeds one of
1703    these thresholds.
1704
1705    Polling will stop after |timeout_sec| regardless of these thresholds.
1706
1707    @param func: function whose return value is to be waited on.
1708    @param expected_value: wait for func to return this value.
1709    @param min_threshold: wait for func value to reach or fall below this value.
1710    @param max_threshold: wait for func value to reach or rise above this value.
1711    @param timeout_sec: Number of seconds to wait before giving up and
1712                        returning whatever value func() last returned.
1713
1714    Return value:
1715        The most recent return value of func().
1716    """
1717    value = None
1718    start_time_sec = time.time()
1719    while True:
1720        value = func()
1721        if (expected_value is None and \
1722            min_threshold is None and \
1723            max_threshold is None) or \
1724           (expected_value is not None and value == expected_value) or \
1725           (min_threshold is not None and value <= min_threshold) or \
1726           (max_threshold is not None and value >= max_threshold):
1727            break
1728
1729        if time.time() - start_time_sec >= timeout_sec:
1730            break
1731        time.sleep(0.1)
1732
1733    return value
1734
1735
1736def wait_for_value_changed(func,
1737                           old_value=None,
1738                           timeout_sec=10):
1739    """
1740    Returns the value of func().
1741
1742    The function polls the return value until it is different from |old_value|,
1743    and returns that value.
1744
1745    Polling will stop after |timeout_sec|.
1746
1747    @param func: function whose return value is to be waited on.
1748    @param old_value: wait for func to return a value different from this.
1749    @param timeout_sec: Number of seconds to wait before giving up and
1750                        returning whatever value func() last returned.
1751
1752    @returns The most recent return value of func().
1753    """
1754    value = None
1755    start_time_sec = time.time()
1756    while True:
1757        value = func()
1758        if value != old_value:
1759            break
1760
1761        if time.time() - start_time_sec >= timeout_sec:
1762            break
1763        time.sleep(0.1)
1764
1765    return value
1766