1# Lint as: python2, python3
2# Copyright (c) 2017 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Utility class to parse the output of a gtest suite run."""
7
8import re
9
10
11class gtest_parser(object):
12    """This class knows how to understand GTest test output.
13
14    The code was borrowed with minor changes from chrome utility gtest_command.
15        http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/master/
16        log_parser/gtest_command.py?view=markup
17    """
18
19    def __init__(self):
20        # State tracking for log parsing
21        self._current_test = ''
22        self._failure_description = []
23        self._current_suppression_hash = ''
24        self._current_suppression = []
25
26        # Line number currently being processed.
27        self._line_number = 0
28
29        # List of parsing errors, as human-readable strings.
30        self.internal_error_lines = []
31
32        # Tests are stored here as 'test.name': (status, [description]).
33        # The status should be one of ('started', 'OK', 'failed', 'timeout').
34        # The description is a list of lines detailing the test's error, as
35        # reported in the log.
36        self._test_status = {}
37
38        # Suppressions are stored here as 'hash': [suppression].
39        self._suppressions = {}
40
41        # This may be either text or a number. It will be used in the phrase
42        # '%s disabled' or '%s flaky' on the waterfall display.
43        self.disabled_tests = 0
44        self.flaky_tests = 0
45
46        # Regular expressions for parsing GTest logs. Test names look like
47        #   SomeTestCase.SomeTest
48        #   SomeName/SomeTestCase.SomeTest/1
49        # This regexp also matches SomeName.SomeTest/1, which should be
50        # harmless.
51        test_name_regexp = r'((\w+/)?\w+\.\w+(\.\w+)?(/\d+)?)'
52        self._test_start = re.compile('\[\s+RUN\s+\] ' + test_name_regexp)
53        self._test_ok = re.compile('\[\s+OK\s+\] ' + test_name_regexp)
54        self._test_fail = re.compile('\[\s+FAILED\s+\] ' + test_name_regexp)
55        self._test_timeout = re.compile(
56            'Test timeout \([0-9]+ ms\) exceeded for ' + test_name_regexp)
57        self._disabled = re.compile('  YOU HAVE (\d+) DISABLED TEST')
58        self._flaky = re.compile('  YOU HAVE (\d+) FLAKY TEST')
59
60        self._suppression_start = re.compile(
61            'Suppression \(error hash=#([0-9A-F]+)#\):')
62        self._suppression_end = re.compile('^}\s*$')
63
64        # TODO b:169251326 terms below are set outside of this codebase
65        # and should be updated when possible. ("master" -> "main")
66        self._main_name_re = re.compile('\[Running for master: "([^"]*)"')
67        self.main_name = ''
68
69        self._error_logging_start_re = re.compile('=' * 70)
70        self._error_logging_test_name_re = re.compile(
71            '[FAIL|ERROR]: ' + test_name_regexp)
72        self._error_logging_end_re = re.compile('-' * 70)
73        self._error_logging_first_dash_found = False
74
75    def _TestsByStatus(self, status, include_fails, include_flaky):
76        """Returns list of tests with the given status.
77
78        Args:
79            status: test results status to search for.
80            include_fails: If False, tests containing 'FAILS_' anywhere in
81                their names will be excluded from the list.
82            include_flaky: If False, tests containing 'FLAKY_' anywhere in
83                their names will be excluded from the list.
84        Returns:
85            List of tests with the status.
86        """
87        test_list = [x[0] for x in self._test_status.items()
88                     if self._StatusOfTest(x[0]) == status]
89
90        if not include_fails:
91            test_list = [x for x in test_list if x.find('FAILS_') == -1]
92        if not include_flaky:
93            test_list = [x for x in test_list if x.find('FLAKY_') == -1]
94
95        return test_list
96
97    def _StatusOfTest(self, test):
98        """Returns the status code for the given test, or 'not known'."""
99        test_status = self._test_status.get(test, ('not known', []))
100        return test_status[0]
101
102    def _RecordError(self, line, reason):
103        """Record a log line that produced a parsing error.
104
105        Args:
106            line: text of the line at which the error occurred.
107            reason: a string describing the error.
108        """
109        self.internal_error_lines.append("%s: %s [%s]" % (self._line_number,
110                                                          line.strip(),
111                                                          reason))
112
113    def TotalTests(self):
114        """Returns the number of parsed tests."""
115        return len(self._test_status)
116
117    def PassedTests(self):
118        """Returns list of tests that passed."""
119        return self._TestsByStatus('OK', False, False)
120
121    def FailedTests(self, include_fails=False, include_flaky=False):
122        """Returns list of tests that failed, timed out, or didn't finish.
123
124        This list will be incorrect until the complete log has been processed,
125        because it will show currently running tests as having failed.
126
127        Args:
128            include_fails: If true, all failing tests with FAILS_ in their
129                names will be included. Otherwise, they will only be included
130                if they crashed.
131            include_flaky: If true, all failing tests with FLAKY_ in their
132                names will be included. Otherwise, they will only be included
133                if they crashed.
134        Returns:
135            List of failed tests.
136        """
137        return (self._TestsByStatus('failed', include_fails, include_flaky) +
138                self._TestsByStatus('timeout', include_fails, include_flaky) +
139                self._TestsByStatus('started', include_fails, include_flaky))
140
141    def FailureDescription(self, test):
142        """Returns a list containing the failure description for the given test.
143
144        If the test didn't fail or timeout, returns [].
145        Args:
146            test: Name to test to find failure reason.
147        Returns:
148            List of test name, and failure string.
149        """
150        test_status = self._test_status.get(test, ('', []))
151        return test_status[1]
152
153    def SuppressionHashes(self):
154        """Returns list of suppression hashes found in the log."""
155        return list(self._suppressions.keys())
156
157    def Suppression(self, suppression_hash):
158        """Returns a list containing the suppression for a given hash.
159
160        If the suppression hash doesn't exist, returns [].
161
162        Args:
163            suppression_hash: name of hash.
164        Returns:
165            List of suppression for the hash.
166        """
167        return self._suppressions.get(suppression_hash, [])
168
169    def ProcessLogLine(self, line):
170        """This is called once with each line of the test log."""
171
172        # Track line number for error messages.
173        self._line_number += 1
174
175        if not self.main_name:
176            results = self._main_name_re.search(line)
177            if results:
178                self.main_name = results.group(1)
179
180        # Is it a line reporting disabled tests?
181        results = self._disabled.search(line)
182        if results:
183            try:
184                disabled = int(results.group(1))
185            except ValueError:
186                disabled = 0
187            if disabled > 0 and isinstance(self.disabled_tests, int):
188                self.disabled_tests += disabled
189            else:
190                # If we can't parse the line, at least give a heads-up. This is
191                # a safety net for a case that shouldn't happen but isn't a
192                # fatal error.
193                self.disabled_tests = 'some'
194            return
195
196        # Is it a line reporting flaky tests?
197        results = self._flaky.search(line)
198        if results:
199            try:
200                flaky = int(results.group(1))
201            except ValueError:
202                flaky = 0
203            if flaky > 0 and isinstance(self.flaky_tests, int):
204                self.flaky_tests = flaky
205            else:
206                # If we can't parse the line, at least give a heads-up. This is
207                # a safety net for a case that shouldn't happen but isn't a
208                # fatal error.
209                self.flaky_tests = 'some'
210            return
211
212        # Is it the start of a test?
213        results = self._test_start.search(line)
214        if results:
215            test_name = results.group(1)
216            if test_name in self._test_status:
217                self._RecordError(line, 'test started more than once')
218                return
219            if self._current_test:
220                status = self._StatusOfTest(self._current_test)
221                if status in ('OK', 'failed', 'timeout'):
222                    self._RecordError(line, 'start while in status %s' % status)
223                    return
224                if status not in ('failed', 'timeout'):
225                    self._test_status[self._current_test] = (
226                        'failed', self._failure_description)
227            self._test_status[test_name] = ('started', ['Did not complete.'])
228            self._current_test = test_name
229            self._failure_description = []
230            return
231
232        # Is it a test success line?
233        results = self._test_ok.search(line)
234        if results:
235            test_name = results.group(1)
236            status = self._StatusOfTest(test_name)
237            if status != 'started':
238                self._RecordError(line, 'success while in status %s' % status)
239                return
240            self._test_status[test_name] = ('OK', [])
241            self._failure_description = []
242            self._current_test = ''
243            return
244
245        # Is it a test failure line?
246        results = self._test_fail.search(line)
247        if results:
248            test_name = results.group(1)
249            status = self._StatusOfTest(test_name)
250            if status not in ('started', 'failed', 'timeout'):
251                self._RecordError(line, 'failure while in status %s' % status)
252                return
253            # Don't overwrite the failure description when a failing test is
254            # listed a second time in the summary, or if it was already
255            # recorded as timing out.
256            if status not in ('failed', 'timeout'):
257                self._test_status[test_name] = ('failed',
258                                                self._failure_description)
259            self._failure_description = []
260            self._current_test = ''
261            return
262
263        # Is it a test timeout line?
264        results = self._test_timeout.search(line)
265        if results:
266            test_name = results.group(1)
267            status = self._StatusOfTest(test_name)
268            if status not in ('started', 'failed'):
269                self._RecordError(line, 'timeout while in status %s' % status)
270                return
271            self._test_status[test_name] = (
272                'timeout', self._failure_description + ['Killed (timed out).'])
273            self._failure_description = []
274            self._current_test = ''
275            return
276
277        # Is it the start of a new valgrind suppression?
278        results = self._suppression_start.search(line)
279        if results:
280            suppression_hash = results.group(1)
281            if suppression_hash in self._suppressions:
282                self._RecordError(line, 'suppression reported more than once')
283                return
284            self._suppressions[suppression_hash] = []
285            self._current_suppression_hash = suppression_hash
286            self._current_suppression = [line]
287            return
288
289        # Is it the end of a valgrind suppression?
290        results = self._suppression_end.search(line)
291        if results and self._current_suppression_hash:
292            self._current_suppression.append(line)
293            self._suppressions[self._current_suppression_hash] = (
294                self._current_suppression)
295            self._current_suppression_hash = ''
296            self._current_suppression = []
297            return
298
299        # Is it the start of a test summary error message?
300        results = self._error_logging_test_name_re.search(line)
301        if results:
302            test_name = results.group(1)
303            self._test_status[test_name] = ('failed', ['Output not found.'])
304            self._current_test = test_name
305            self._failure_description = []
306            self._error_logging_first_dash_found = False
307            return
308
309        # Is it the start of the next test summary signaling the end
310        # of the previous message?
311        results = self._error_logging_start_re.search(line)
312        if results and self._current_test:
313            self._test_status[self._current_test] = ('failed',
314                                                     self._failure_description)
315            self._failure_description = []
316            self._current_test = ''
317            return
318
319        # Is it the end of the extra test failure summaries?
320        results = self._error_logging_end_re.search(line)
321        if results and self._current_test:
322            if self._error_logging_first_dash_found:
323                self._test_status[self._current_test] = (
324                    'failed', self._failure_description)
325                self._failure_description = []
326                self._current_test = ''
327            self._error_logging_first_dash_found = True
328            return
329
330        # Random line: if we're in a suppression, collect it. Suppressions are
331        # generated after all tests are finished, so this should always belong
332        # to the current suppression hash.
333        if self._current_suppression_hash:
334            self._current_suppression.append(line)
335            return
336
337        # Random line: if we're in a test, collect it for the failure
338        # description. Tests may run simultaneously, so this might be off, but
339        # it's worth a try.
340        if self._current_test:
341            self._failure_description.append(line)
342