1import json, math, os, re
2
3from autotest_lib.tko import models, status_lib, utils as tko_utils
4from autotest_lib.tko.parsers import base, version_0
5
6
7class job(version_0.job):
8    """Represents a job."""
9
10    def exit_status(self):
11        """Returns the string exit status of this job."""
12
13        # Find the .autoserv_execute path.
14        top_dir = tko_utils.find_toplevel_job_dir(self.dir)
15        if not top_dir:
16            return 'ABORT'
17        execute_path = os.path.join(top_dir, '.autoserv_execute')
18
19        # If for some reason we can't read the status code, assume disaster.
20        if not os.path.exists(execute_path):
21            return 'ABORT'
22        lines = open(execute_path).readlines()
23        if len(lines) < 2:
24            return 'ABORT'
25        try:
26            status_code = int(lines[1])
27        except ValueError:
28            return 'ABORT'
29
30        if not os.WIFEXITED(status_code):
31            # Looks like a signal - an ABORT.
32            return 'ABORT'
33        elif os.WEXITSTATUS(status_code) != 0:
34            # Looks like a non-zero exit - a failure.
35            return 'FAIL'
36        else:
37            # Looks like exit code == 0.
38            return 'GOOD'
39
40
41class kernel(models.kernel):
42    """Represents a kernel."""
43
44    def __init__(self, base, patches):
45        if base:
46            patches = [patch(*p.split()) for p in patches]
47            hashes = [p.hash for p in patches]
48            kernel_hash = self.compute_hash(base, hashes)
49        else:
50            base = 'UNKNOWN'
51            patches = []
52            kernel_hash = 'UNKNOWN'
53        super(kernel, self).__init__(base, patches, kernel_hash)
54
55
56class test(models.test):
57    """Represents a test."""
58
59    @staticmethod
60    def load_iterations(keyval_path):
61        return iteration.load_from_keyval(keyval_path)
62
63
64    @staticmethod
65    def load_perf_values(perf_values_file):
66        return perf_value_iteration.load_from_perf_values_file(
67                perf_values_file)
68
69
70class iteration(models.iteration):
71    """Represents an iteration."""
72
73    @staticmethod
74    def parse_line_into_dicts(line, attr_dict, perf_dict):
75        key, val_type, value = "", "", ""
76
77        # Figure out what the key, value and keyval type are.
78        typed_match = re.search('^([^=]*)\{(\w*)\}=(.*)$', line)
79        if typed_match:
80            key, val_type, value = typed_match.groups()
81        else:
82            # Old-fashioned untyped match, assume perf.
83            untyped_match = re.search('^([^=]*)=(.*)$', line)
84            if untyped_match:
85                key, value = untyped_match.groups()
86                val_type = 'perf'
87
88        # Parse the actual value into a dict.
89        try:
90            if val_type == 'attr':
91                attr_dict[key] = value
92            elif val_type == 'perf':
93                perf_dict[key] = float(value)
94            else:
95                raise ValueError
96        except ValueError:
97            msg = ('WARNING: line "%s" found in test '
98                   'iteration keyval could not be parsed')
99            msg %= line
100            tko_utils.dprint(msg)
101
102
103class perf_value_iteration(models.perf_value_iteration):
104    """Represents a perf value iteration."""
105
106    @staticmethod
107    def parse_line_into_dict(line):
108        """
109        Parse a perf measurement text line into a dictionary.
110
111        The line is assumed to be a JSON-formatted string containing key/value
112        pairs, where each pair represents a piece of information associated
113        with a measured perf metric:
114
115            'description': a string description for the perf metric.
116            'value': a numeric value, or list of numeric values.
117            'units': the string units associated with the perf metric.
118            'higher_is_better': a boolean whether a higher value is considered
119                better.  If False, a lower value is considered better.
120            'graph': a string indicating the name of the perf dashboard graph
121                     on which the perf data will be displayed.
122
123        The resulting dictionary will also have a standard deviation key/value
124        pair, 'stddev'.  If the perf measurement value is a list of values
125        instead of a single value, then the average and standard deviation of
126        the list of values is computed and stored.  If a single value, the
127        value itself is used, and is associated with a standard deviation of 0.
128
129        @param line: A string line of JSON text from a perf measurements output
130            file.
131
132        @return A dictionary containing the parsed perf measurement information
133            along with a computed standard deviation value (key 'stddev'), or
134            an empty dictionary if the inputted line cannot be parsed.
135        """
136        try:
137            perf_dict = json.loads(line)
138        except ValueError:
139            msg = 'Could not parse perf measurements line as json: "%s"' % line
140            tko_utils.dprint(msg)
141            return {}
142
143        def mean_and_standard_deviation(data):
144            """
145            Computes the mean and standard deviation of a list of numbers.
146
147            @param data: A list of numbers.
148
149            @return A 2-tuple (mean, standard_deviation) computed from the list
150                of numbers.
151
152            """
153            n = len(data)
154            if n == 0:
155                return 0.0, 0.0
156            if n == 1:
157                return data[0], 0.0
158            mean = float(sum(data)) / n
159            # Divide by n-1 to compute "sample standard deviation".
160            variance = sum([(elem - mean) ** 2 for elem in data]) / (n - 1)
161            return mean, math.sqrt(variance)
162
163        value = perf_dict['value']
164        perf_dict['stddev'] = 0.0
165        if isinstance(value, list):
166            value, stddev = mean_and_standard_deviation(map(float, value))
167            perf_dict['value'] = value
168            perf_dict['stddev'] = stddev
169
170        return perf_dict
171
172
173class status_line(version_0.status_line):
174    """Represents a status line."""
175
176    def __init__(self, indent, status, subdir, testname, reason,
177                 optional_fields):
178        # Handle INFO fields.
179        if status == 'INFO':
180            self.type = 'INFO'
181            self.indent = indent
182            self.status = self.subdir = self.testname = self.reason = None
183            self.optional_fields = optional_fields
184        else:
185            # Everything else is backwards compatible.
186            super(status_line, self).__init__(indent, status, subdir,
187                                              testname, reason,
188                                              optional_fields)
189
190
191    def is_successful_reboot(self, current_status):
192        """
193        Checks whether the status represents a successful reboot.
194
195        @param current_status: A string representing the current status.
196
197        @return True, if the status represents a successful reboot, or False
198            if not.
199
200        """
201        # Make sure this is a reboot line.
202        if self.testname != 'reboot':
203            return False
204
205        # Make sure this was not a failure.
206        if status_lib.is_worse_than_or_equal_to(current_status, 'FAIL'):
207            return False
208
209        # It must have been a successful reboot.
210        return True
211
212
213    def get_kernel(self):
214        # Get the base kernel version.
215        fields = self.optional_fields
216        base = re.sub('-autotest$', '', fields.get('kernel', ''))
217        # Get a list of patches.
218        patches = []
219        patch_index = 0
220        while ('patch%d' % patch_index) in fields:
221            patches.append(fields['patch%d' % patch_index])
222            patch_index += 1
223        # Create a new kernel instance.
224        return kernel(base, patches)
225
226
227    def get_timestamp(self):
228        return tko_utils.get_timestamp(self.optional_fields, 'timestamp')
229
230
231# The default implementations from version 0 will do for now.
232patch = version_0.patch
233
234
235class parser(base.parser):
236    """Represents a parser."""
237
238    @staticmethod
239    def make_job(dir):
240        return job(dir)
241
242
243    @staticmethod
244    def make_dummy_abort(indent, subdir, testname, timestamp, reason):
245        """
246        Creates an abort string.
247
248        @param indent: The number of indentation levels for the string.
249        @param subdir: The subdirectory name.
250        @param testname: The test name.
251        @param timestamp: The timestamp value.
252        @param reason: The reason string.
253
254        @return A string describing the abort.
255
256        """
257        indent = '\t' * indent
258        if not subdir:
259            subdir = '----'
260        if not testname:
261            testname = '----'
262
263        # There is no guarantee that this will be set.
264        timestamp_field = ''
265        if timestamp:
266            timestamp_field = '\ttimestamp=%s' % timestamp
267
268        msg = indent + 'END ABORT\t%s\t%s%s\t%s'
269        return msg % (subdir, testname, timestamp_field, reason)
270
271
272    @staticmethod
273    def put_back_line_and_abort(
274        line_buffer, line, indent, subdir, timestamp, reason):
275        """
276        Appends a line to the line buffer and aborts.
277
278        @param line_buffer: A line_buffer object.
279        @param line: A line to append to the line buffer.
280        @param indent: The number of indentation levels.
281        @param subdir: The subdirectory name.
282        @param timestamp: The timestamp value.
283        @param reason: The reason string.
284
285        """
286        tko_utils.dprint('Unexpected indent: aborting log parse')
287        line_buffer.put_back(line)
288        abort = parser.make_dummy_abort(
289            indent, subdir, subdir, timestamp, reason)
290        line_buffer.put_back(abort)
291
292
293    def state_iterator(self, buffer):
294        line = None
295        new_tests = []
296        job_count, boot_count = 0, 0
297        min_stack_size = 0
298        stack = status_lib.status_stack()
299        current_kernel = kernel("", [])  # UNKNOWN
300        current_status = status_lib.statuses[-1]
301        current_reason = None
302        started_time_stack = [None]
303        subdir_stack = [None]
304        running_test = None
305        running_reasons = set()
306        ignored_lines = []
307        yield []   # We're ready to start running.
308
309        def print_ignored_lines():
310            tko_utils.dprint('The following lines were ignored:')
311            for line in ignored_lines:
312                tko_utils.dprint(line)
313            tko_utils.dprint('---------------------------------')
314
315        # Create a RUNNING SERVER_JOB entry to represent the entire test.
316        running_job = test.parse_partial_test(self.job, '----', 'SERVER_JOB',
317                                              '', current_kernel,
318                                              self.job.started_time)
319        new_tests.append(running_job)
320
321        while True:
322            # Are we finished with parsing?
323            if buffer.size() == 0 and self.finished:
324                if ignored_lines:
325                    print_ignored_lines()
326                    ignored_lines = []
327                if stack.size() == 0:
328                    break
329                # We have status lines left on the stack;
330                # we need to implicitly abort them first.
331                tko_utils.dprint('\nUnexpected end of job, aborting')
332                abort_subdir_stack = list(subdir_stack)
333                if self.job.aborted_by:
334                    reason = 'Job aborted by %s' % self.job.aborted_by
335                    reason += self.job.aborted_on.strftime(
336                        ' at %b %d %H:%M:%S')
337                else:
338                    reason = 'Job aborted unexpectedly'
339
340                timestamp = line.optional_fields.get('timestamp')
341                for i in reversed(xrange(stack.size())):
342                    if abort_subdir_stack:
343                        subdir = abort_subdir_stack.pop()
344                    else:
345                        subdir = None
346                    abort = self.make_dummy_abort(
347                        i, subdir, subdir, timestamp, reason)
348                    buffer.put(abort)
349
350            # Stop processing once the buffer is empty.
351            if buffer.size() == 0:
352                yield new_tests
353                new_tests = []
354                continue
355
356            # Reinitialize the per-iteration state.
357            started_time = None
358            finished_time = None
359
360            # Get the next line.
361            raw_line = status_lib.clean_raw_line(buffer.get())
362            line = status_line.parse_line(raw_line)
363            if line is None:
364                ignored_lines.append(raw_line)
365                continue
366            elif ignored_lines:
367                print_ignored_lines()
368                ignored_lines = []
369
370            # Do an initial sanity check of the indentation.
371            expected_indent = stack.size()
372            if line.type == 'END':
373                expected_indent -= 1
374            if line.indent < expected_indent:
375                # ABORT the current level if indentation was unexpectedly low.
376                self.put_back_line_and_abort(
377                    buffer, raw_line, stack.size() - 1, subdir_stack[-1],
378                    line.optional_fields.get('timestamp'), line.reason)
379                continue
380            elif line.indent > expected_indent:
381                # Ignore the log if the indent was unexpectedly high.
382                tko_utils.dprint('ignoring line because of extra indentation')
383                continue
384
385            # Initial line processing.
386            if line.type == 'START':
387                stack.start()
388                started_time = line.get_timestamp()
389                if (line.testname is None and line.subdir is None
390                    and not running_test):
391                    # We just started a client; all tests are relative to here.
392                    min_stack_size = stack.size()
393                    # Start a "RUNNING" CLIENT_JOB entry.
394                    job_name = 'CLIENT_JOB.%d' % job_count
395                    running_client = test.parse_partial_test(self.job, None,
396                                                             job_name,
397                                                             '', current_kernel,
398                                                             started_time)
399                    msg = 'RUNNING: %s\n%s\n'
400                    msg %= (running_client.status, running_client.testname)
401                    tko_utils.dprint(msg)
402                    new_tests.append(running_client)
403                elif stack.size() == min_stack_size + 1 and not running_test:
404                    # We just started a new test; insert a running record.
405                    running_reasons = set()
406                    if line.reason:
407                        running_reasons.add(line.reason)
408                    running_test = test.parse_partial_test(self.job,
409                                                           line.subdir,
410                                                           line.testname,
411                                                           line.reason,
412                                                           current_kernel,
413                                                           started_time)
414                    msg = 'RUNNING: %s\nSubdir: %s\nTestname: %s\n%s'
415                    msg %= (running_test.status, running_test.subdir,
416                            running_test.testname, running_test.reason)
417                    tko_utils.dprint(msg)
418                    new_tests.append(running_test)
419                started_time_stack.append(started_time)
420                subdir_stack.append(line.subdir)
421                continue
422            elif line.type == 'INFO':
423                fields = line.optional_fields
424                # Update the current kernel if one is defined in the info.
425                if 'kernel' in fields:
426                    current_kernel = line.get_kernel()
427                # Update the SERVER_JOB reason if one was logged for an abort.
428                if 'job_abort_reason' in fields:
429                    running_job.reason = fields['job_abort_reason']
430                    new_tests.append(running_job)
431                continue
432            elif line.type == 'STATUS':
433                # Update the stacks.
434                if line.subdir and stack.size() > min_stack_size:
435                    subdir_stack[-1] = line.subdir
436                # Update the status, start and finished times.
437                stack.update(line.status)
438                if status_lib.is_worse_than_or_equal_to(line.status,
439                                                        current_status):
440                    if line.reason:
441                        # Update the status of a currently running test.
442                        if running_test:
443                            running_reasons.add(line.reason)
444                            running_reasons = tko_utils.drop_redundant_messages(
445                                    running_reasons)
446                            sorted_reasons = sorted(running_reasons)
447                            running_test.reason = ', '.join(sorted_reasons)
448                            current_reason = running_test.reason
449                            new_tests.append(running_test)
450                            msg = 'update RUNNING reason: %s' % line.reason
451                            tko_utils.dprint(msg)
452                        else:
453                            current_reason = line.reason
454                    current_status = stack.current_status()
455                started_time = None
456                finished_time = line.get_timestamp()
457                # If this is a non-test entry there's nothing else to do.
458                if line.testname is None and line.subdir is None:
459                    continue
460            elif line.type == 'END':
461                # Grab the current subdir off of the subdir stack, or, if this
462                # is the end of a job, just pop it off.
463                if (line.testname is None and line.subdir is None
464                    and not running_test):
465                    min_stack_size = stack.size() - 1
466                    subdir_stack.pop()
467                else:
468                    line.subdir = subdir_stack.pop()
469                    if not subdir_stack[-1] and stack.size() > min_stack_size:
470                        subdir_stack[-1] = line.subdir
471                # Update the status, start and finished times.
472                stack.update(line.status)
473                current_status = stack.end()
474                if stack.size() > min_stack_size:
475                    stack.update(current_status)
476                    current_status = stack.current_status()
477                started_time = started_time_stack.pop()
478                finished_time = line.get_timestamp()
479                # Update the current kernel.
480                if line.is_successful_reboot(current_status):
481                    current_kernel = line.get_kernel()
482                # Adjust the testname if this is a reboot.
483                if line.testname == 'reboot' and line.subdir is None:
484                    line.testname = 'boot.%d' % boot_count
485            else:
486                assert False
487
488            # Have we just finished a test?
489            if stack.size() <= min_stack_size:
490                # If there was no testname, just use the subdir.
491                if line.testname is None:
492                    line.testname = line.subdir
493                # If there was no testname or subdir, use 'CLIENT_JOB'.
494                if line.testname is None:
495                    line.testname = 'CLIENT_JOB.%d' % job_count
496                    running_test = running_client
497                    job_count += 1
498                    if not status_lib.is_worse_than_or_equal_to(
499                        current_status, 'ABORT'):
500                        # A job hasn't really failed just because some of the
501                        # tests it ran have.
502                        current_status = 'GOOD'
503
504                if not current_reason:
505                    current_reason = line.reason
506                new_test = test.parse_test(self.job,
507                                           line.subdir,
508                                           line.testname,
509                                           current_status,
510                                           current_reason,
511                                           current_kernel,
512                                           started_time,
513                                           finished_time,
514                                           running_test)
515                running_test = None
516                current_status = status_lib.statuses[-1]
517                current_reason = None
518                if new_test.testname == ('boot.%d' % boot_count):
519                    boot_count += 1
520                msg = 'ADD: %s\nSubdir: %s\nTestname: %s\n%s'
521                msg %= (new_test.status, new_test.subdir,
522                        new_test.testname, new_test.reason)
523                tko_utils.dprint(msg)
524                new_tests.append(new_test)
525
526        # The job is finished; produce the final SERVER_JOB entry and exit.
527        final_job = test.parse_test(self.job, '----', 'SERVER_JOB',
528                                    self.job.exit_status(), running_job.reason,
529                                    current_kernel,
530                                    self.job.started_time,
531                                    self.job.finished_time,
532                                    running_job)
533        new_tests.append(final_job)
534        yield new_tests
535