1import json
2import math
3import os
4import re
5
6import common
7from autotest_lib.tko import models
8from autotest_lib.tko import status_lib
9from autotest_lib.tko import utils as tko_utils
10from autotest_lib.tko.parsers import base
11from autotest_lib.tko.parsers import version_0
12
13
14class job(version_0.job):
15    """Represents a job."""
16
17    def exit_status(self):
18        """Returns the string exit status of this job."""
19
20        # Find the .autoserv_execute path.
21        top_dir = tko_utils.find_toplevel_job_dir(self.dir)
22        if not top_dir:
23            return 'ABORT'
24        execute_path = os.path.join(top_dir, '.autoserv_execute')
25
26        # If for some reason we can't read the status code, assume disaster.
27        if not os.path.exists(execute_path):
28            return 'ABORT'
29        lines = open(execute_path).readlines()
30        if len(lines) < 2:
31            return 'ABORT'
32        try:
33            status_code = int(lines[1])
34        except ValueError:
35            return 'ABORT'
36
37        if not os.WIFEXITED(status_code):
38            # Looks like a signal - an ABORT.
39            return 'ABORT'
40        elif os.WEXITSTATUS(status_code) != 0:
41            # Looks like a non-zero exit - a failure.
42            return 'FAIL'
43        else:
44            # Looks like exit code == 0.
45            return 'GOOD'
46
47
48class kernel(models.kernel):
49    """Represents a kernel."""
50
51    def __init__(self, base, patches):
52        if base:
53            patches = [patch(*p.split()) for p in patches]
54            hashes = [p.hash for p in patches]
55            kernel_hash = self.compute_hash(base, hashes)
56        else:
57            base = 'UNKNOWN'
58            patches = []
59            kernel_hash = 'UNKNOWN'
60        super(kernel, self).__init__(base, patches, kernel_hash)
61
62
63class test(models.test):
64    """Represents a test."""
65
66    @staticmethod
67    def load_iterations(keyval_path):
68        return iteration.load_from_keyval(keyval_path)
69
70
71class iteration(models.iteration):
72    """Represents an iteration."""
73
74    @staticmethod
75    def parse_line_into_dicts(line, attr_dict, perf_dict):
76        key, val_type, value = "", "", ""
77
78        # Figure out what the key, value and keyval type are.
79        typed_match = re.search('^([^=]*)\{(\w*)\}=(.*)$', line)
80        if typed_match:
81            key, val_type, value = typed_match.groups()
82        else:
83            # Old-fashioned untyped match, assume perf.
84            untyped_match = re.search('^([^=]*)=(.*)$', line)
85            if untyped_match:
86                key, value = untyped_match.groups()
87                val_type = 'perf'
88
89        # Parse the actual value into a dict.
90        try:
91            if val_type == 'attr':
92                attr_dict[key] = value
93            elif val_type == 'perf':
94                # first check if value is in the form of 'mean+-deviation'
95                if isinstance(value, str):
96                    r = re.compile('(\d+.?\d*)\+-(\d+.?\d*)')
97                    match = r.match(value)
98                    if match:
99                        perf_dict[key] = float(match.group(1))
100                        perf_dict['%s_dev' % key] = float(match.group(2))
101                        return
102                # otherwise try to interpret as a regular float
103                perf_dict[key] = float(value)
104            else:
105                raise ValueError
106        except ValueError:
107            msg = ('WARNING: line "%s" found in test '
108                   'iteration keyval could not be parsed')
109            msg %= line
110            tko_utils.dprint(msg)
111
112
113class perf_value_iteration(models.perf_value_iteration):
114    """Represents a perf value iteration."""
115
116    @staticmethod
117    def parse_line_into_dict(line):
118        """
119        Parse a perf measurement text line into a dictionary.
120
121        The line is assumed to be a JSON-formatted string containing key/value
122        pairs, where each pair represents a piece of information associated
123        with a measured perf metric:
124
125            'description': a string description for the perf metric.
126            'value': a numeric value, or list of numeric values.
127            'units': the string units associated with the perf metric.
128            'higher_is_better': a boolean whether a higher value is considered
129                better.  If False, a lower value is considered better.
130            'graph': a string indicating the name of the perf dashboard graph
131                     on which the perf data will be displayed.
132
133        The resulting dictionary will also have a standard deviation key/value
134        pair, 'stddev'.  If the perf measurement value is a list of values
135        instead of a single value, then the average and standard deviation of
136        the list of values is computed and stored.  If a single value, the
137        value itself is used, and is associated with a standard deviation of 0.
138
139        @param line: A string line of JSON text from a perf measurements output
140            file.
141
142        @return A dictionary containing the parsed perf measurement information
143            along with a computed standard deviation value (key 'stddev'), or
144            an empty dictionary if the inputted line cannot be parsed.
145        """
146        try:
147            perf_dict = json.loads(line)
148        except ValueError:
149            msg = 'Could not parse perf measurements line as json: "%s"' % line
150            tko_utils.dprint(msg)
151            return {}
152
153        def mean_and_standard_deviation(data):
154            """
155            Computes the mean and standard deviation of a list of numbers.
156
157            @param data: A list of numbers.
158
159            @return A 2-tuple (mean, standard_deviation) computed from the list
160                of numbers.
161
162            """
163            n = len(data)
164            if n == 0:
165                return 0.0, 0.0
166            if n == 1:
167                return data[0], 0.0
168            mean = float(sum(data)) / n
169            # Divide by n-1 to compute "sample standard deviation".
170            variance = sum([(elem - mean) ** 2 for elem in data]) / (n - 1)
171            return mean, math.sqrt(variance)
172
173        value = perf_dict['value']
174        perf_dict['stddev'] = 0.0
175        if isinstance(value, list):
176            value, stddev = mean_and_standard_deviation(map(float, value))
177            perf_dict['value'] = value
178            perf_dict['stddev'] = stddev
179
180        return perf_dict
181
182
183class status_line(version_0.status_line):
184    """Represents a status line."""
185
186    def __init__(self, indent, status, subdir, testname, reason,
187                 optional_fields):
188        # Handle INFO fields.
189        if status == 'INFO':
190            self.type = 'INFO'
191            self.indent = indent
192            self.status = self.subdir = self.testname = self.reason = None
193            self.optional_fields = optional_fields
194        else:
195            # Everything else is backwards compatible.
196            super(status_line, self).__init__(indent, status, subdir,
197                                              testname, reason,
198                                              optional_fields)
199
200
201    def is_successful_reboot(self, current_status):
202        """
203        Checks whether the status represents a successful reboot.
204
205        @param current_status: A string representing the current status.
206
207        @return True, if the status represents a successful reboot, or False
208            if not.
209
210        """
211        # Make sure this is a reboot line.
212        if self.testname != 'reboot':
213            return False
214
215        # Make sure this was not a failure.
216        if status_lib.is_worse_than_or_equal_to(current_status, 'FAIL'):
217            return False
218
219        # It must have been a successful reboot.
220        return True
221
222
223    def get_kernel(self):
224        # Get the base kernel version.
225        fields = self.optional_fields
226        base = re.sub('-autotest$', '', fields.get('kernel', ''))
227        # Get a list of patches.
228        patches = []
229        patch_index = 0
230        while ('patch%d' % patch_index) in fields:
231            patches.append(fields['patch%d' % patch_index])
232            patch_index += 1
233        # Create a new kernel instance.
234        return kernel(base, patches)
235
236
237    def get_timestamp(self):
238        return tko_utils.get_timestamp(self.optional_fields, 'timestamp')
239
240
241# The default implementations from version 0 will do for now.
242patch = version_0.patch
243
244
245class parser(base.parser):
246    """Represents a parser."""
247
248    @staticmethod
249    def make_job(dir):
250        return job(dir)
251
252
253    @staticmethod
254    def make_dummy_abort(indent, subdir, testname, timestamp, reason):
255        """
256        Creates an abort string.
257
258        @param indent: The number of indentation levels for the string.
259        @param subdir: The subdirectory name.
260        @param testname: The test name.
261        @param timestamp: The timestamp value.
262        @param reason: The reason string.
263
264        @return A string describing the abort.
265
266        """
267        indent = '\t' * indent
268        if not subdir:
269            subdir = '----'
270        if not testname:
271            testname = '----'
272
273        # There is no guarantee that this will be set.
274        timestamp_field = ''
275        if timestamp:
276            timestamp_field = '\ttimestamp=%s' % timestamp
277
278        msg = indent + 'END ABORT\t%s\t%s%s\t%s'
279        return msg % (subdir, testname, timestamp_field, reason)
280
281
282    @staticmethod
283    def put_back_line_and_abort(
284        line_buffer, line, indent, subdir, testname, timestamp, reason):
285        """
286        Appends a line to the line buffer and aborts.
287
288        @param line_buffer: A line_buffer object.
289        @param line: A line to append to the line buffer.
290        @param indent: The number of indentation levels.
291        @param subdir: The subdirectory name.
292        @param testname: The test name.
293        @param timestamp: The timestamp value.
294        @param reason: The reason string.
295
296        """
297        tko_utils.dprint('Unexpected indent: aborting log parse')
298        line_buffer.put_back(line)
299        abort = parser.make_dummy_abort(
300            indent, subdir, testname, timestamp, reason)
301        line_buffer.put_back(abort)
302
303
304    def state_iterator(self, buffer):
305        """
306        Yields a list of tests out of the buffer.
307
308        @param buffer: a buffer object
309
310        """
311        line = None
312        new_tests = []
313        job_count, boot_count = 0, 0
314        min_stack_size = 0
315        stack = status_lib.status_stack()
316        current_kernel = kernel("", [])  # UNKNOWN
317        current_status = status_lib.statuses[-1]
318        current_reason = None
319        started_time_stack = [None]
320        subdir_stack = [None]
321        testname_stack = [None]
322        running_test = None
323        running_reasons = set()
324        ignored_lines = []
325        yield []   # We're ready to start running.
326
327        def print_ignored_lines():
328            """
329            Prints the ignored_lines using tko_utils.dprint method.
330            """
331            num_lines = len(ignored_lines)
332            if num_lines > 2:
333                tko_utils.dprint('The following %s lines were ignored:' %
334                                 num_lines)
335                tko_utils.dprint('%r' % ignored_lines[0])
336                tko_utils.dprint('...')
337                tko_utils.dprint('%r' % ignored_lines[-1])
338            elif num_lines == 2:
339                tko_utils.dprint('The following %s lines were ignored:' %
340                                 num_lines)
341                tko_utils.dprint('%r' % ignored_lines[0])
342                tko_utils.dprint('%r' % ignored_lines[-1])
343            elif num_lines == 1:
344                tko_utils.dprint('The following line was ignored:')
345                tko_utils.dprint('%r' % ignored_lines[0])
346
347        # Create a RUNNING SERVER_JOB entry to represent the entire test.
348        running_job = test.parse_partial_test(self.job, '----', 'SERVER_JOB',
349                                              '', current_kernel,
350                                              self.job.started_time)
351        new_tests.append(running_job)
352
353        while True:
354            # Are we finished with parsing?
355            if buffer.size() == 0 and self.finished:
356                if ignored_lines:
357                    print_ignored_lines()
358                    ignored_lines = []
359                if stack.size() == 0:
360                    break
361                # We have status lines left on the stack;
362                # we need to implicitly abort them first.
363                tko_utils.dprint('\nUnexpected end of job, aborting')
364                abort_subdir_stack = list(subdir_stack)
365                if self.job.aborted_by:
366                    reason = 'Job aborted by %s' % self.job.aborted_by
367                    reason += self.job.aborted_on.strftime(
368                        ' at %b %d %H:%M:%S')
369                else:
370                    reason = 'Job aborted unexpectedly'
371
372                timestamp = line.optional_fields.get('timestamp')
373                for i in reversed(xrange(stack.size())):
374                    if abort_subdir_stack:
375                        subdir = abort_subdir_stack.pop()
376                    else:
377                        subdir = None
378                    abort = self.make_dummy_abort(
379                        i, subdir, subdir, timestamp, reason)
380                    buffer.put(abort)
381
382            # Stop processing once the buffer is empty.
383            if buffer.size() == 0:
384                yield new_tests
385                new_tests = []
386                continue
387
388            # Reinitialize the per-iteration state.
389            started_time = None
390            finished_time = None
391
392            # Get the next line.
393            raw_line = status_lib.clean_raw_line(buffer.get())
394            line = status_line.parse_line(raw_line)
395            if line is None:
396                ignored_lines.append(raw_line)
397                continue
398            elif ignored_lines:
399                print_ignored_lines()
400                ignored_lines = []
401
402            # Do an initial sanity check of the indentation.
403            expected_indent = stack.size()
404            if line.type == 'END':
405                expected_indent -= 1
406            if line.indent < expected_indent:
407                # ABORT the current level if indentation was unexpectedly low.
408                self.put_back_line_and_abort(
409                    buffer, raw_line, stack.size() - 1, subdir_stack[-1],
410                    testname_stack[-1], line.optional_fields.get('timestamp'),
411                    line.reason)
412                continue
413            elif line.indent > expected_indent:
414                # Ignore the log if the indent was unexpectedly high.
415                tko_utils.dprint('ignoring line because of extra indentation')
416                continue
417
418            # Initial line processing.
419            if line.type == 'START':
420                stack.start()
421                started_time = line.get_timestamp()
422                testname = None
423                if (line.testname is None and line.subdir is None
424                    and not running_test):
425                    # We just started a client; all tests are relative to here.
426                    min_stack_size = stack.size()
427                    # Start a "RUNNING" CLIENT_JOB entry.
428                    job_name = 'CLIENT_JOB.%d' % job_count
429                    running_client = test.parse_partial_test(self.job, None,
430                                                             job_name,
431                                                             '', current_kernel,
432                                                             started_time)
433                    msg = 'RUNNING: %s\n%s\n'
434                    msg %= (running_client.status, running_client.testname)
435                    tko_utils.dprint(msg)
436                    new_tests.append(running_client)
437                    testname = running_client.testname
438                elif stack.size() == min_stack_size + 1 and not running_test:
439                    # We just started a new test; insert a running record.
440                    running_reasons = set()
441                    if line.reason:
442                        running_reasons.add(line.reason)
443                    running_test = test.parse_partial_test(self.job,
444                                                           line.subdir,
445                                                           line.testname,
446                                                           line.reason,
447                                                           current_kernel,
448                                                           started_time)
449                    msg = 'RUNNING: %s\nSubdir: %s\nTestname: %s\n%s'
450                    msg %= (running_test.status, running_test.subdir,
451                            running_test.testname, running_test.reason)
452                    tko_utils.dprint(msg)
453                    new_tests.append(running_test)
454                    testname = running_test.testname
455                started_time_stack.append(started_time)
456                subdir_stack.append(line.subdir)
457                testname_stack.append(testname)
458                continue
459            elif line.type == 'INFO':
460                fields = line.optional_fields
461                # Update the current kernel if one is defined in the info.
462                if 'kernel' in fields:
463                    current_kernel = line.get_kernel()
464                # Update the SERVER_JOB reason if one was logged for an abort.
465                if 'job_abort_reason' in fields:
466                    running_job.reason = fields['job_abort_reason']
467                    new_tests.append(running_job)
468                continue
469            elif line.type == 'STATUS':
470                # Update the stacks.
471                if line.subdir and stack.size() > min_stack_size:
472                    subdir_stack[-1] = line.subdir
473                    testname_stack[-1] = line.testname
474                # Update the status, start and finished times.
475                stack.update(line.status)
476                if status_lib.is_worse_than_or_equal_to(line.status,
477                                                        current_status):
478                    if line.reason:
479                        # Update the status of a currently running test.
480                        if running_test:
481                            running_reasons.add(line.reason)
482                            running_reasons = tko_utils.drop_redundant_messages(
483                                    running_reasons)
484                            sorted_reasons = sorted(running_reasons)
485                            running_test.reason = ', '.join(sorted_reasons)
486                            current_reason = running_test.reason
487                            new_tests.append(running_test)
488                            msg = 'update RUNNING reason: %s' % line.reason
489                            tko_utils.dprint(msg)
490                        else:
491                            current_reason = line.reason
492                    current_status = stack.current_status()
493                started_time = None
494                finished_time = line.get_timestamp()
495                # If this is a non-test entry there's nothing else to do.
496                if line.testname is None and line.subdir is None:
497                    continue
498            elif line.type == 'END':
499                # Grab the current subdir off of the subdir stack, or, if this
500                # is the end of a job, just pop it off.
501                if (line.testname is None and line.subdir is None
502                    and not running_test):
503                    min_stack_size = stack.size() - 1
504                    subdir_stack.pop()
505                    testname_stack.pop()
506                else:
507                    line.subdir = subdir_stack.pop()
508                    testname_stack.pop()
509                    if not subdir_stack[-1] and stack.size() > min_stack_size:
510                        subdir_stack[-1] = line.subdir
511                # Update the status, start and finished times.
512                stack.update(line.status)
513                current_status = stack.end()
514                if stack.size() > min_stack_size:
515                    stack.update(current_status)
516                    current_status = stack.current_status()
517                started_time = started_time_stack.pop()
518                finished_time = line.get_timestamp()
519                # Update the current kernel.
520                if line.is_successful_reboot(current_status):
521                    current_kernel = line.get_kernel()
522                # Adjust the testname if this is a reboot.
523                if line.testname == 'reboot' and line.subdir is None:
524                    line.testname = 'boot.%d' % boot_count
525            else:
526                assert False
527
528            # Have we just finished a test?
529            if stack.size() <= min_stack_size:
530                # If there was no testname, just use the subdir.
531                if line.testname is None:
532                    line.testname = line.subdir
533                # If there was no testname or subdir, use 'CLIENT_JOB'.
534                if line.testname is None:
535                    line.testname = 'CLIENT_JOB.%d' % job_count
536                    running_test = running_client
537                    job_count += 1
538                    if not status_lib.is_worse_than_or_equal_to(
539                        current_status, 'ABORT'):
540                        # A job hasn't really failed just because some of the
541                        # tests it ran have.
542                        current_status = 'GOOD'
543
544                if not current_reason:
545                    current_reason = line.reason
546                new_test = test.parse_test(self.job,
547                                           line.subdir,
548                                           line.testname,
549                                           current_status,
550                                           current_reason,
551                                           current_kernel,
552                                           started_time,
553                                           finished_time,
554                                           running_test)
555                running_test = None
556                current_status = status_lib.statuses[-1]
557                current_reason = None
558                if new_test.testname == ('boot.%d' % boot_count):
559                    boot_count += 1
560                msg = 'ADD: %s\nSubdir: %s\nTestname: %s\n%s'
561                msg %= (new_test.status, new_test.subdir,
562                        new_test.testname, new_test.reason)
563                tko_utils.dprint(msg)
564                new_tests.append(new_test)
565
566        # The job is finished; produce the final SERVER_JOB entry and exit.
567        final_job = test.parse_test(self.job, '----', 'SERVER_JOB',
568                                    self.job.exit_status(), running_job.reason,
569                                    current_kernel,
570                                    self.job.started_time,
571                                    self.job.finished_time,
572                                    running_job)
573        new_tests.append(final_job)
574        yield new_tests
575