1# Lint as: python2, python3
2# pylint: disable=missing-docstring
3
4from __future__ import absolute_import
5from __future__ import division
6from __future__ import print_function
7import copy
8import errno
9import fcntl
10import logging
11import os
12import re
13import six
14import six.moves.cPickle as pickle
15import tempfile
16import time
17import traceback
18import weakref
19from autotest_lib.client.common_lib import autotemp, error, log
20
21
22class job_directory(object):
23    """Represents a job.*dir directory."""
24
25
26    class JobDirectoryException(error.AutotestError):
27        """Generic job_directory exception superclass."""
28
29
30    class MissingDirectoryException(JobDirectoryException):
31        """Raised when a directory required by the job does not exist."""
32        def __init__(self, path):
33            Exception.__init__(self, 'Directory %s does not exist' % path)
34
35
36    class UncreatableDirectoryException(JobDirectoryException):
37        """Raised when a directory required by the job is missing and cannot
38        be created."""
39        def __init__(self, path, error):
40            msg = 'Creation of directory %s failed with exception %s'
41            msg %= (path, error)
42            Exception.__init__(self, msg)
43
44
45    class UnwritableDirectoryException(JobDirectoryException):
46        """Raised when a writable directory required by the job exists
47        but is not writable."""
48        def __init__(self, path):
49            msg = 'Directory %s exists but is not writable' % path
50            Exception.__init__(self, msg)
51
52
53    def __init__(self, path, is_writable=False):
54        """
55        Instantiate a job directory.
56
57        @param path: The path of the directory. If None a temporary directory
58            will be created instead.
59        @param is_writable: If True, expect the directory to be writable.
60
61        @raise MissingDirectoryException: raised if is_writable=False and the
62            directory does not exist.
63        @raise UnwritableDirectoryException: raised if is_writable=True and
64            the directory exists but is not writable.
65        @raise UncreatableDirectoryException: raised if is_writable=True, the
66            directory does not exist and it cannot be created.
67        """
68        if path is None:
69            if is_writable:
70                self._tempdir = autotemp.tempdir(unique_id='autotest')
71                self.path = self._tempdir.name
72            else:
73                raise self.MissingDirectoryException(path)
74        else:
75            self._tempdir = None
76            self.path = path
77        self._ensure_valid(is_writable)
78
79
80    def _ensure_valid(self, is_writable):
81        """
82        Ensure that this is a valid directory.
83
84        Will check if a directory exists, can optionally also enforce that
85        it be writable. It can optionally create it if necessary. Creation
86        will still fail if the path is rooted in a non-writable directory, or
87        if a file already exists at the given location.
88
89        @param dir_path A path where a directory should be located
90        @param is_writable A boolean indicating that the directory should
91            not only exist, but also be writable.
92
93        @raises MissingDirectoryException raised if is_writable=False and the
94            directory does not exist.
95        @raises UnwritableDirectoryException raised if is_writable=True and
96            the directory is not wrtiable.
97        @raises UncreatableDirectoryException raised if is_writable=True, the
98            directory does not exist and it cannot be created
99        """
100        # ensure the directory exists
101        if is_writable:
102            try:
103                os.makedirs(self.path)
104            except OSError as e:
105                if e.errno != errno.EEXIST or not os.path.isdir(self.path):
106                    raise self.UncreatableDirectoryException(self.path, e)
107        elif not os.path.isdir(self.path):
108            raise self.MissingDirectoryException(self.path)
109
110        # if is_writable=True, also check that the directory is writable
111        if is_writable and not os.access(self.path, os.W_OK):
112            raise self.UnwritableDirectoryException(self.path)
113
114
115    @staticmethod
116    def property_factory(attribute):
117        """
118        Create a job.*dir -> job._*dir.path property accessor.
119
120        @param attribute A string with the name of the attribute this is
121            exposed as. '_'+attribute must then be attribute that holds
122            either None or a job_directory-like object.
123
124        @returns A read-only property object that exposes a job_directory path
125        """
126        @property
127        def dir_property(self):
128            underlying_attribute = getattr(self, '_' + attribute)
129            if underlying_attribute is None:
130                return None
131            else:
132                return underlying_attribute.path
133        return dir_property
134
135
136# decorator for use with job_state methods
137def with_backing_lock(method):
138    """A decorator to perform a lock-*-unlock cycle.
139
140    When applied to a method, this decorator will automatically wrap
141    calls to the method in a backing file lock and before the call
142    followed by a backing file unlock.
143    """
144    def wrapped_method(self, *args, **dargs):
145        already_have_lock = self._backing_file_lock is not None
146        if not already_have_lock:
147            self._lock_backing_file()
148        try:
149            return method(self, *args, **dargs)
150        finally:
151            if not already_have_lock:
152                self._unlock_backing_file()
153    wrapped_method.__name__ = method.__name__
154    wrapped_method.__doc__ = method.__doc__
155    return wrapped_method
156
157
158# decorator for use with job_state methods
159def with_backing_file(method):
160    """A decorator to perform a lock-read-*-write-unlock cycle.
161
162    When applied to a method, this decorator will automatically wrap
163    calls to the method in a lock-and-read before the call followed by a
164    write-and-unlock. Any operation that is reading or writing state
165    should be decorated with this method to ensure that backing file
166    state is consistently maintained.
167    """
168    @with_backing_lock
169    def wrapped_method(self, *args, **dargs):
170        self._read_from_backing_file()
171        try:
172            return method(self, *args, **dargs)
173        finally:
174            self._write_to_backing_file()
175    wrapped_method.__name__ = method.__name__
176    wrapped_method.__doc__ = method.__doc__
177    return wrapped_method
178
179
180
181class job_state(object):
182    """A class for managing explicit job and user state, optionally persistent.
183
184    The class allows you to save state by name (like a dictionary). Any state
185    stored in this class should be picklable and deep copyable. While this is
186    not enforced it is recommended that only valid python identifiers be used
187    as names. Additionally, the namespace 'stateful_property' is used for
188    storing the valued associated with properties constructed using the
189    property_factory method.
190    """
191
192    NO_DEFAULT = object()
193    PICKLE_PROTOCOL = 2  # highest protocol available in python 2.4
194
195
196    def __init__(self):
197        """Initialize the job state."""
198        self._state = {}
199        self._backing_file = None
200        self._backing_file_initialized = False
201        self._backing_file_lock = None
202
203
204    def _lock_backing_file(self):
205        """Acquire a lock on the backing file."""
206        if self._backing_file:
207            self._backing_file_lock = open(self._backing_file, 'a')
208            fcntl.flock(self._backing_file_lock, fcntl.LOCK_EX)
209
210
211    def _unlock_backing_file(self):
212        """Release a lock on the backing file."""
213        if self._backing_file_lock:
214            fcntl.flock(self._backing_file_lock, fcntl.LOCK_UN)
215            self._backing_file_lock.close()
216            self._backing_file_lock = None
217
218
219    def read_from_file(self, file_path, merge=True):
220        """Read in any state from the file at file_path.
221
222        When merge=True, any state specified only in-memory will be preserved.
223        Any state specified on-disk will be set in-memory, even if an in-memory
224        setting already exists.
225
226        @param file_path: The path where the state should be read from. It must
227            exist but it can be empty.
228        @param merge: If true, merge the on-disk state with the in-memory
229            state. If false, replace the in-memory state with the on-disk
230            state.
231
232        @warning: This method is intentionally concurrency-unsafe. It makes no
233            attempt to control concurrent access to the file at file_path.
234        """
235
236        # we can assume that the file exists
237        if os.path.getsize(file_path) == 0:
238            on_disk_state = {}
239        else:
240            # This _is_ necessary in the instance that the pickled job is transferred between the
241            # server_job and the job on the DUT. The two can be on different autotest versions
242            # (e.g. for non-SSP / client tests the server-side is versioned with the drone vs
243            # client-side versioned with the Chrome OS being tested).
244            try:
245                with open(file_path, 'r') as rf:
246                    on_disk_state = pickle.load(rf)
247            except UnicodeDecodeError:
248                with open(file_path, 'rb') as rf:
249                    on_disk_state = pickle.load(rf)
250        if merge:
251            # merge the on-disk state with the in-memory state
252            for namespace, namespace_dict in six.iteritems(on_disk_state):
253                in_memory_namespace = self._state.setdefault(namespace, {})
254                for name, value in six.iteritems(namespace_dict):
255                    if name in in_memory_namespace:
256                        if in_memory_namespace[name] != value:
257                            logging.info('Persistent value of %s.%s from %s '
258                                         'overridding existing in-memory '
259                                         'value', namespace, name, file_path)
260                            in_memory_namespace[name] = value
261                        else:
262                            logging.debug('Value of %s.%s is unchanged, '
263                                          'skipping import', namespace, name)
264                    else:
265                        logging.debug('Importing %s.%s from state file %s',
266                                      namespace, name, file_path)
267                        in_memory_namespace[name] = value
268        else:
269            # just replace the in-memory state with the on-disk state
270            self._state = on_disk_state
271
272        # lock the backing file before we refresh it
273        with_backing_lock(self.__class__._write_to_backing_file)(self)
274
275
276    def write_to_file(self, file_path):
277        """Write out the current state to the given path.
278
279        @param file_path: The path where the state should be written out to.
280            Must be writable.
281
282        @warning: This method is intentionally concurrency-unsafe. It makes no
283            attempt to control concurrent access to the file at file_path.
284        """
285        with open(file_path, 'wb') as wf:
286            pickle.dump(self._state, wf, self.PICKLE_PROTOCOL)
287
288    def _read_from_backing_file(self):
289        """Refresh the current state from the backing file.
290
291        If the backing file has never been read before (indicated by checking
292        self._backing_file_initialized) it will merge the file with the
293        in-memory state, rather than overwriting it.
294        """
295        if self._backing_file:
296            merge_backing_file = not self._backing_file_initialized
297            self.read_from_file(self._backing_file, merge=merge_backing_file)
298            self._backing_file_initialized = True
299
300
301    def _write_to_backing_file(self):
302        """Flush the current state to the backing file."""
303        if self._backing_file:
304            self.write_to_file(self._backing_file)
305
306
307    @with_backing_file
308    def _synchronize_backing_file(self):
309        """Synchronizes the contents of the in-memory and on-disk state."""
310        # state is implicitly synchronized in _with_backing_file methods
311        pass
312
313
314    def set_backing_file(self, file_path):
315        """Change the path used as the backing file for the persistent state.
316
317        When a new backing file is specified if a file already exists then
318        its contents will be added into the current state, with conflicts
319        between the file and memory being resolved in favor of the file
320        contents. The file will then be kept in sync with the (combined)
321        in-memory state. The syncing can be disabled by setting this to None.
322
323        @param file_path: A path on the filesystem that can be read from and
324            written to, or None to turn off the backing store.
325        """
326        self._synchronize_backing_file()
327        self._backing_file = file_path
328        self._backing_file_initialized = False
329        self._synchronize_backing_file()
330
331
332    @with_backing_file
333    def get(self, namespace, name, default=NO_DEFAULT):
334        """Returns the value associated with a particular name.
335
336        @param namespace: The namespace that the property should be stored in.
337        @param name: The name the value was saved with.
338        @param default: A default value to return if no state is currently
339            associated with var.
340
341        @return: A deep copy of the value associated with name. Note that this
342            explicitly returns a deep copy to avoid problems with mutable
343            values; mutations are not persisted or shared.
344        @raise KeyError: raised when no state is associated with var and a
345            default value is not provided.
346        """
347        if self.has(namespace, name):
348            return copy.deepcopy(self._state[namespace][name])
349        elif default is self.NO_DEFAULT:
350            raise KeyError('No key %s in namespace %s' % (name, namespace))
351        else:
352            return default
353
354
355    @with_backing_file
356    def set(self, namespace, name, value):
357        """Saves the value given with the provided name.
358
359        @param namespace: The namespace that the property should be stored in.
360        @param name: The name the value should be saved with.
361        @param value: The value to save.
362        """
363        namespace_dict = self._state.setdefault(namespace, {})
364        namespace_dict[name] = copy.deepcopy(value)
365        logging.debug('Persistent state %s.%s now set to %r', namespace,
366                      name, value)
367
368
369    @with_backing_file
370    def has(self, namespace, name):
371        """Return a boolean indicating if namespace.name is defined.
372
373        @param namespace: The namespace to check for a definition.
374        @param name: The name to check for a definition.
375
376        @return: True if the given name is defined in the given namespace and
377            False otherwise.
378        """
379        return namespace in self._state and name in self._state[namespace]
380
381
382    @with_backing_file
383    def discard(self, namespace, name):
384        """If namespace.name is a defined value, deletes it.
385
386        @param namespace: The namespace that the property is stored in.
387        @param name: The name the value is saved with.
388        """
389        if self.has(namespace, name):
390            del self._state[namespace][name]
391            if len(self._state[namespace]) == 0:
392                del self._state[namespace]
393            logging.debug('Persistent state %s.%s deleted', namespace, name)
394        else:
395            logging.debug(
396                'Persistent state %s.%s not defined so nothing is discarded',
397                namespace, name)
398
399
400    @with_backing_file
401    def discard_namespace(self, namespace):
402        """Delete all defined namespace.* names.
403
404        @param namespace: The namespace to be cleared.
405        """
406        if namespace in self._state:
407            del self._state[namespace]
408        logging.debug('Persistent state %s.* deleted', namespace)
409
410
411    @staticmethod
412    def property_factory(state_attribute, property_attribute, default,
413                         namespace='global_properties'):
414        """
415        Create a property object for an attribute using self.get and self.set.
416
417        @param state_attribute: A string with the name of the attribute on
418            job that contains the job_state instance.
419        @param property_attribute: A string with the name of the attribute
420            this property is exposed as.
421        @param default: A default value that should be used for this property
422            if it is not set.
423        @param namespace: The namespace to store the attribute value in.
424
425        @return: A read-write property object that performs self.get calls
426            to read the value and self.set calls to set it.
427        """
428        def getter(job):
429            state = getattr(job, state_attribute)
430            return state.get(namespace, property_attribute, default)
431        def setter(job, value):
432            state = getattr(job, state_attribute)
433            state.set(namespace, property_attribute, value)
434        return property(getter, setter)
435
436
437class status_log_entry(object):
438    """Represents a single status log entry."""
439
440    RENDERED_NONE_VALUE = '----'
441    TIMESTAMP_FIELD = 'timestamp'
442    LOCALTIME_FIELD = 'localtime'
443
444    # non-space whitespace is forbidden in any fields
445    BAD_CHAR_REGEX = re.compile(r'[\t\n\r\v\f]')
446
447    def _init_message(self, message):
448        """Handle the message which describs event to be recorded.
449
450        Break the message line into a single-line message that goes into the
451        database, and a block of additional lines that goes into the status
452        log but will never be parsed
453        When detecting a bad char in message, replace it with space instead
454        of raising an exception that cannot be parsed by tko parser.
455
456        @param message: the input message.
457
458        @return: filtered message without bad characters.
459        """
460        message_lines = message.splitlines()
461        if message_lines:
462            self.message = message_lines[0]
463            self.extra_message_lines = message_lines[1:]
464        else:
465            self.message = ''
466            self.extra_message_lines = []
467
468        self.message = self.message.replace('\t', ' ' * 8)
469        self.message = self.BAD_CHAR_REGEX.sub(' ', self.message)
470
471
472    def __init__(self, status_code, subdir, operation, message, fields,
473                 timestamp=None):
474        """Construct a status.log entry.
475
476        @param status_code: A message status code. Must match the codes
477            accepted by autotest_lib.common_lib.log.is_valid_status.
478        @param subdir: A valid job subdirectory, or None.
479        @param operation: Description of the operation, or None.
480        @param message: A printable string describing event to be recorded.
481        @param fields: A dictionary of arbitrary alphanumeric key=value pairs
482            to be included in the log, or None.
483        @param timestamp: An optional integer timestamp, in the same format
484            as a time.time() timestamp. If unspecified, the current time is
485            used.
486
487        @raise ValueError: if any of the parameters are invalid
488        """
489        if not log.is_valid_status(status_code):
490            raise ValueError('status code %r is not valid' % status_code)
491        self.status_code = status_code
492
493        if subdir and self.BAD_CHAR_REGEX.search(subdir):
494            raise ValueError('Invalid character in subdir string')
495        self.subdir = subdir
496
497        if operation and self.BAD_CHAR_REGEX.search(operation):
498            raise ValueError('Invalid character in operation string')
499        self.operation = operation
500
501        self._init_message(message)
502
503        if not fields:
504            self.fields = {}
505        else:
506            self.fields = fields.copy()
507        for key, value in six.iteritems(self.fields):
508            if type(value) is int:
509                value = str(value)
510            if self.BAD_CHAR_REGEX.search(key + value):
511                raise ValueError('Invalid character in %r=%r field'
512                                 % (key, value))
513
514        # build up the timestamp
515        if timestamp is None:
516            timestamp = int(time.time())
517        self.fields[self.TIMESTAMP_FIELD] = str(timestamp)
518        self.fields[self.LOCALTIME_FIELD] = time.strftime(
519            '%b %d %H:%M:%S', time.localtime(timestamp))
520
521
522    def is_start(self):
523        """Indicates if this status log is the start of a new nested block.
524
525        @return: A boolean indicating if this entry starts a new nested block.
526        """
527        return self.status_code == 'START'
528
529
530    def is_end(self):
531        """Indicates if this status log is the end of a nested block.
532
533        @return: A boolean indicating if this entry ends a nested block.
534        """
535        return self.status_code.startswith('END ')
536
537
538    def render(self):
539        """Render the status log entry into a text string.
540
541        @return: A text string suitable for writing into a status log file.
542        """
543        # combine all the log line data into a tab-delimited string
544        subdir = self.subdir or self.RENDERED_NONE_VALUE
545        operation = self.operation or self.RENDERED_NONE_VALUE
546        extra_fields = ['%s=%s' % field for field in six.iteritems(self.fields)]
547        line_items = [self.status_code, subdir, operation]
548        line_items += extra_fields + [self.message]
549        first_line = '\t'.join(line_items)
550
551        # append the extra unparsable lines, two-space indented
552        all_lines = [first_line]
553        all_lines += ['  ' + line for line in self.extra_message_lines]
554        return '\n'.join(all_lines)
555
556
557    @classmethod
558    def parse(cls, line):
559        """Parse a status log entry from a text string.
560
561        This method is the inverse of render; it should always be true that
562        parse(entry.render()) produces a new status_log_entry equivalent to
563        entry.
564
565        @return: A new status_log_entry instance with fields extracted from the
566            given status line. If the line is an extra message line then None
567            is returned.
568        """
569        # extra message lines are always prepended with two spaces
570        if line.startswith('  '):
571            return None
572
573        line = line.lstrip('\t')  # ignore indentation
574        entry_parts = line.split('\t')
575        if len(entry_parts) < 4:
576            raise ValueError('%r is not a valid status line' % line)
577        status_code, subdir, operation = entry_parts[:3]
578        if subdir == cls.RENDERED_NONE_VALUE:
579            subdir = None
580        if operation == cls.RENDERED_NONE_VALUE:
581            operation = None
582        message = entry_parts[-1]
583        fields = dict(part.split('=', 1) for part in entry_parts[3:-1])
584        if cls.TIMESTAMP_FIELD in fields:
585            timestamp = int(fields[cls.TIMESTAMP_FIELD])
586        else:
587            timestamp = None
588        return cls(status_code, subdir, operation, message, fields, timestamp)
589
590
591class status_indenter(object):
592    """Abstract interface that a status log indenter should use."""
593
594    @property
595    def indent(self):
596        raise NotImplementedError
597
598
599    def increment(self):
600        """Increase indentation by one level."""
601        raise NotImplementedError
602
603
604    def decrement(self):
605        """Decrease indentation by one level."""
606
607
608class status_logger(object):
609    """Represents a status log file. Responsible for translating messages
610    into on-disk status log lines.
611
612    @property global_filename: The filename to write top-level logs to.
613    @property subdir_filename: The filename to write subdir-level logs to.
614    """
615    def __init__(self, job, indenter, global_filename='status',
616                 subdir_filename='status', record_hook=None):
617        """Construct a logger instance.
618
619        @param job: A reference to the job object this is logging for. Only a
620            weak reference to the job is held, to avoid a
621            status_logger <-> job circular reference.
622        @param indenter: A status_indenter instance, for tracking the
623            indentation level.
624        @param global_filename: An optional filename to initialize the
625            self.global_filename attribute.
626        @param subdir_filename: An optional filename to initialize the
627            self.subdir_filename attribute.
628        @param record_hook: An optional function to be called before an entry
629            is logged. The function should expect a single parameter, a
630            copy of the status_log_entry object.
631        """
632        self._jobref = weakref.ref(job)
633        self._indenter = indenter
634        self.global_filename = global_filename
635        self.subdir_filename = subdir_filename
636        self._record_hook = record_hook
637
638
639    def render_entry(self, log_entry):
640        """Render a status_log_entry as it would be written to a log file.
641
642        @param log_entry: A status_log_entry instance to be rendered.
643
644        @return: The status log entry, rendered as it would be written to the
645            logs (including indentation).
646        """
647        if log_entry.is_end():
648            indent = self._indenter.indent - 1
649        else:
650            indent = self._indenter.indent
651        return '\t' * indent + log_entry.render().rstrip('\n')
652
653
654    def record_entry(self, log_entry, log_in_subdir=True):
655        """Record a status_log_entry into the appropriate status log files.
656
657        @param log_entry: A status_log_entry instance to be recorded into the
658                status logs.
659        @param log_in_subdir: A boolean that indicates (when true) that subdir
660                logs should be written into the subdirectory status log file.
661        """
662        # acquire a strong reference for the duration of the method
663        job = self._jobref()
664        if job is None:
665            logging.warning('Something attempted to write a status log entry '
666                            'after its job terminated, ignoring the attempt.')
667            logging.warning(traceback.format_stack())
668            return
669
670        # call the record hook if one was given
671        if self._record_hook:
672            self._record_hook(log_entry)
673
674        # figure out where we need to log to
675        log_files = [os.path.join(job.resultdir, self.global_filename)]
676        if log_in_subdir and log_entry.subdir:
677            log_files.append(os.path.join(job.resultdir, log_entry.subdir,
678                                          self.subdir_filename))
679
680        # write out to entry to the log files
681        log_text = self.render_entry(log_entry)
682        for log_file in log_files:
683            fileobj = open(log_file, 'a')
684            try:
685                print(log_text, file=fileobj)
686            finally:
687                fileobj.close()
688
689        # adjust the indentation if this was a START or END entry
690        if log_entry.is_start():
691            self._indenter.increment()
692        elif log_entry.is_end():
693            self._indenter.decrement()
694
695
696class base_job(object):
697    """An abstract base class for the various autotest job classes.
698
699    @property autodir: The top level autotest directory.
700    @property clientdir: The autotest client directory.
701    @property serverdir: The autotest server directory. [OPTIONAL]
702    @property resultdir: The directory where results should be written out.
703        [WRITABLE]
704
705    @property pkgdir: The job packages directory. [WRITABLE]
706    @property tmpdir: The job temporary directory. [WRITABLE]
707    @property testdir: The job test directory. [WRITABLE]
708    @property site_testdir: The job site test directory. [WRITABLE]
709
710    @property bindir: The client bin/ directory.
711    @property profdir: The client profilers/ directory.
712    @property toolsdir: The client tools/ directory.
713
714    @property control: A path to the control file to be executed. [OPTIONAL]
715    @property hosts: A set of all live Host objects currently in use by the
716        job. Code running in the context of a local client can safely assume
717        that this set contains only a single entry.
718    @property machines: A list of the machine names associated with the job.
719    @property user: The user executing the job.
720    @property tag: A tag identifying the job. Often used by the scheduler to
721        give a name of the form NUMBER-USERNAME/HOSTNAME.
722    @property args: A list of addtional miscellaneous command-line arguments
723        provided when starting the job.
724
725    @property automatic_test_tag: A string which, if set, will be automatically
726        added to the test name when running tests.
727
728    @property default_profile_only: A boolean indicating the default value of
729        profile_only used by test.execute. [PERSISTENT]
730    @property drop_caches: A boolean indicating if caches should be dropped
731        before each test is executed.
732    @property drop_caches_between_iterations: A boolean indicating if caches
733        should be dropped before each test iteration is executed.
734    @property run_test_cleanup: A boolean indicating if test.cleanup should be
735        run by default after a test completes, if the run_cleanup argument is
736        not specified. [PERSISTENT]
737
738    @property num_tests_run: The number of tests run during the job. [OPTIONAL]
739    @property num_tests_failed: The number of tests failed during the job.
740        [OPTIONAL]
741
742    @property harness: An instance of the client test harness. Only available
743        in contexts where client test execution happens. [OPTIONAL]
744    @property logging: An instance of the logging manager associated with the
745        job.
746    @property profilers: An instance of the profiler manager associated with
747        the job.
748    @property sysinfo: An instance of the sysinfo object. Only available in
749        contexts where it's possible to collect sysinfo.
750    @property warning_manager: A class for managing which types of WARN
751        messages should be logged and which should be supressed. [OPTIONAL]
752    @property warning_loggers: A set of readable streams that will be monitored
753        for WARN messages to be logged. [OPTIONAL]
754    @property max_result_size_KB: Maximum size of test results should be
755        collected in KB. [OPTIONAL]
756
757    Abstract methods:
758        _find_base_directories [CLASSMETHOD]
759            Returns the location of autodir, clientdir and serverdir
760
761        _find_resultdir
762            Returns the location of resultdir. Gets a copy of any parameters
763            passed into base_job.__init__. Can return None to indicate that
764            no resultdir is to be used.
765
766        _get_status_logger
767            Returns a status_logger instance for recording job status logs.
768    """
769
770    # capture the dependency on several helper classes with factories
771    _job_directory = job_directory
772    _job_state = job_state
773
774
775    # all the job directory attributes
776    autodir = _job_directory.property_factory('autodir')
777    clientdir = _job_directory.property_factory('clientdir')
778    serverdir = _job_directory.property_factory('serverdir')
779    resultdir = _job_directory.property_factory('resultdir')
780    pkgdir = _job_directory.property_factory('pkgdir')
781    tmpdir = _job_directory.property_factory('tmpdir')
782    testdir = _job_directory.property_factory('testdir')
783    site_testdir = _job_directory.property_factory('site_testdir')
784    bindir = _job_directory.property_factory('bindir')
785    profdir = _job_directory.property_factory('profdir')
786    toolsdir = _job_directory.property_factory('toolsdir')
787
788
789    # all the generic persistent properties
790    tag = _job_state.property_factory('_state', 'tag', '')
791    default_profile_only = _job_state.property_factory(
792        '_state', 'default_profile_only', False)
793    run_test_cleanup = _job_state.property_factory(
794        '_state', 'run_test_cleanup', True)
795    automatic_test_tag = _job_state.property_factory(
796        '_state', 'automatic_test_tag', None)
797    max_result_size_KB = _job_state.property_factory(
798        '_state', 'max_result_size_KB', 0)
799    fast = _job_state.property_factory(
800        '_state', 'fast', False)
801
802    # the use_sequence_number property
803    _sequence_number = _job_state.property_factory(
804        '_state', '_sequence_number', None)
805    def _get_use_sequence_number(self):
806        return bool(self._sequence_number)
807    def _set_use_sequence_number(self, value):
808        if value:
809            self._sequence_number = 1
810        else:
811            self._sequence_number = None
812    use_sequence_number = property(_get_use_sequence_number,
813                                   _set_use_sequence_number)
814
815    # parent job id is passed in from autoserv command line. It's only used in
816    # server job. The property is added here for unittest
817    # (base_job_unittest.py) to be consistent on validating public properties of
818    # a base_job object.
819    parent_job_id = None
820
821    def __init__(self, *args, **dargs):
822        # initialize the base directories, all others are relative to these
823        autodir, clientdir, serverdir = self._find_base_directories()
824        self._autodir = self._job_directory(autodir)
825        self._clientdir = self._job_directory(clientdir)
826        # TODO(scottz): crosbug.com/38259, needed to pass unittests for now.
827        self.label = None
828        if serverdir:
829            self._serverdir = self._job_directory(serverdir)
830        else:
831            self._serverdir = None
832
833        # initialize all the other directories relative to the base ones
834        self._initialize_dir_properties()
835        self._resultdir = self._job_directory(
836            self._find_resultdir(*args, **dargs), True)
837        self._execution_contexts = []
838
839        # initialize all the job state
840        self._state = self._job_state()
841
842
843    @classmethod
844    def _find_base_directories(cls):
845        raise NotImplementedError()
846
847
848    def _initialize_dir_properties(self):
849        """
850        Initializes all the secondary self.*dir properties. Requires autodir,
851        clientdir and serverdir to already be initialized.
852        """
853        # create some stubs for use as shortcuts
854        def readonly_dir(*args):
855            return self._job_directory(os.path.join(*args))
856        def readwrite_dir(*args):
857            return self._job_directory(os.path.join(*args), True)
858
859        # various client-specific directories
860        self._bindir = readonly_dir(self.clientdir, 'bin')
861        self._profdir = readonly_dir(self.clientdir, 'profilers')
862        self._pkgdir = readwrite_dir(self.clientdir, 'packages')
863        self._toolsdir = readonly_dir(self.clientdir, 'tools')
864
865        # directories which are in serverdir on a server, clientdir on a client
866        # tmp tests, and site_tests need to be read_write for client, but only
867        # read for server.
868        if self.serverdir:
869            root = self.serverdir
870            r_or_rw_dir = readonly_dir
871        else:
872            root = self.clientdir
873            r_or_rw_dir = readwrite_dir
874        self._testdir = r_or_rw_dir(root, 'tests')
875        self._site_testdir = r_or_rw_dir(root, 'site_tests')
876
877        # various server-specific directories
878        if self.serverdir:
879            self._tmpdir = readwrite_dir(tempfile.gettempdir())
880        else:
881            self._tmpdir = readwrite_dir(root, 'tmp')
882
883
884    def _find_resultdir(self, *args, **dargs):
885        raise NotImplementedError()
886
887
888    def push_execution_context(self, resultdir):
889        """
890        Save off the current context of the job and change to the given one.
891
892        In practice method just changes the resultdir, but it may become more
893        extensive in the future. The expected use case is for when a child
894        job needs to be executed in some sort of nested context (for example
895        the way parallel_simple does). The original context can be restored
896        with a pop_execution_context call.
897
898        @param resultdir: The new resultdir, relative to the current one.
899        """
900        new_dir = self._job_directory(
901            os.path.join(self.resultdir, resultdir), True)
902        self._execution_contexts.append(self._resultdir)
903        self._resultdir = new_dir
904
905
906    def pop_execution_context(self):
907        """
908        Reverse the effects of the previous push_execution_context call.
909
910        @raise IndexError: raised when the stack of contexts is empty.
911        """
912        if not self._execution_contexts:
913            raise IndexError('No old execution context to restore')
914        self._resultdir = self._execution_contexts.pop()
915
916
917    def get_state(self, name, default=_job_state.NO_DEFAULT):
918        """Returns the value associated with a particular name.
919
920        @param name: The name the value was saved with.
921        @param default: A default value to return if no state is currently
922            associated with var.
923
924        @return: A deep copy of the value associated with name. Note that this
925            explicitly returns a deep copy to avoid problems with mutable
926            values; mutations are not persisted or shared.
927        @raise KeyError: raised when no state is associated with var and a
928            default value is not provided.
929        """
930        try:
931            return self._state.get('public', name, default=default)
932        except KeyError:
933            raise KeyError(name)
934
935
936    def set_state(self, name, value):
937        """Saves the value given with the provided name.
938
939        @param name: The name the value should be saved with.
940        @param value: The value to save.
941        """
942        self._state.set('public', name, value)
943
944
945    def _build_tagged_test_name(self, testname, dargs):
946        """Builds the fully tagged testname and subdirectory for job.run_test.
947
948        @param testname: The base name of the test
949        @param dargs: The ** arguments passed to run_test. And arguments
950            consumed by this method will be removed from the dictionary.
951
952        @return: A 3-tuple of the full name of the test, the subdirectory it
953            should be stored in, and the full tag of the subdir.
954        """
955        tag_parts = []
956
957        # build up the parts of the tag used for the test name
958        main_testpath = dargs.get('main_testpath', "")
959        base_tag = dargs.pop('tag', None)
960        if base_tag:
961            tag_parts.append(str(base_tag))
962        if self.use_sequence_number:
963            tag_parts.append('_%02d_' % self._sequence_number)
964            self._sequence_number += 1
965        if self.automatic_test_tag:
966            tag_parts.append(self.automatic_test_tag)
967        full_testname = '.'.join([testname] + tag_parts)
968
969        # build up the subdir and tag as well
970        subdir_tag = dargs.pop('subdir_tag', None)
971        if subdir_tag:
972            tag_parts.append(subdir_tag)
973        subdir = '.'.join([testname] + tag_parts)
974        subdir = os.path.join(main_testpath, subdir)
975        tag = '.'.join(tag_parts)
976
977        return full_testname, subdir, tag
978
979
980    def _make_test_outputdir(self, subdir):
981        """Creates an output directory for a test to run it.
982
983        @param subdir: The subdirectory of the test. Generally computed by
984            _build_tagged_test_name.
985
986        @return: A job_directory instance corresponding to the outputdir of
987            the test.
988        @raise TestError: If the output directory is invalid.
989        """
990        # explicitly check that this subdirectory is new
991        path = os.path.join(self.resultdir, subdir)
992        if os.path.exists(path):
993            msg = ('%s already exists; multiple tests cannot run with the '
994                   'same subdirectory' % subdir)
995            raise error.TestError(msg)
996
997        # create the outputdir and raise a TestError if it isn't valid
998        try:
999            outputdir = self._job_directory(path, True)
1000            return outputdir
1001        except self._job_directory.JobDirectoryException as e:
1002            logging.exception('%s directory creation failed with %s',
1003                              subdir, e)
1004            raise error.TestError('%s directory creation failed' % subdir)
1005
1006
1007    def record(self, status_code, subdir, operation, status='',
1008               optional_fields=None):
1009        """Record a job-level status event.
1010
1011        Logs an event noteworthy to the Autotest job as a whole. Messages will
1012        be written into a global status log file, as well as a subdir-local
1013        status log file (if subdir is specified).
1014
1015        @param status_code: A string status code describing the type of status
1016            entry being recorded. It must pass log.is_valid_status to be
1017            considered valid.
1018        @param subdir: A specific results subdirectory this also applies to, or
1019            None. If not None the subdirectory must exist.
1020        @param operation: A string describing the operation that was run.
1021        @param status: An optional human-readable message describing the status
1022            entry, for example an error message or "completed successfully".
1023        @param optional_fields: An optional dictionary of addtional named fields
1024            to be included with the status message. Every time timestamp and
1025            localtime entries are generated with the current time and added
1026            to this dictionary.
1027        """
1028        entry = status_log_entry(status_code, subdir, operation, status,
1029                                 optional_fields)
1030        self.record_entry(entry)
1031
1032
1033    def record_entry(self, entry, log_in_subdir=True):
1034        """Record a job-level status event, using a status_log_entry.
1035
1036        This is the same as self.record but using an existing status log
1037        entry object rather than constructing one for you.
1038
1039        @param entry: A status_log_entry object
1040        @param log_in_subdir: A boolean that indicates (when true) that subdir
1041                logs should be written into the subdirectory status log file.
1042        """
1043        self._get_status_logger().record_entry(entry, log_in_subdir)
1044