1#!/usr/bin/python
2# Copyright 2015 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6# pylint: disable-msg=W0311
7
8from collections import namedtuple
9import argparse
10import glob
11import json
12import os
13import pprint
14import re
15import subprocess
16
17_EXPECTATIONS_DIR = 'expectations'
18_AUTOTEST_RESULT_TEMPLATE = 'gs://chromeos-autotest-results/%s-chromeos-test/chromeos*/graphics_dEQP/debug/graphics_dEQP.DEBUG'
19# Use this template for tryjob results:
20#_AUTOTEST_RESULT_TEMPLATE = 'gs://chromeos-autotest-results/%s-ihf/*/graphics_dEQP/debug/graphics_dEQP.DEBUG'
21_BOARD_REGEX = re.compile(r'ChromeOS BOARD = (.+)')
22_CPU_FAMILY_REGEX = re.compile(r'ChromeOS CPU family = (.+)')
23_GPU_FAMILY_REGEX = re.compile(r'ChromeOS GPU family = (.+)')
24_TEST_FILTER_REGEX = re.compile(r'dEQP test filter = (.+)')
25_HASTY_MODE_REGEX = re.compile(r'\'hasty\': \'True\'|Running in hasty mode.')
26
27#04/23 07:30:21.624 INFO |graphics_d:0240| TestCase: dEQP-GLES3.functional.shaders.operator.unary_operator.bitwise_not.highp_ivec3_vertex
28#04/23 07:30:21.840 INFO |graphics_d:0261| Result: Pass
29_TEST_RESULT_REGEX = re.compile(r'TestCase: (.+?)$\n.+? Result: (.+?)$',
30                                re.MULTILINE)
31_HASTY_TEST_RESULT_REGEX = re.compile(
32    r'\[stdout\] Test case \'(.+?)\'..$\n'
33    r'.+?\[stdout\]   (Pass|Fail|QualityWarning) \((.+)\)', re.MULTILINE)
34Logfile = namedtuple('Logfile', 'job_id name gs_path')
35
36
37def execute(cmd_list):
38  sproc = subprocess.Popen(cmd_list, stdout=subprocess.PIPE)
39  return sproc.communicate()[0]
40
41
42def get_metadata(s):
43  cpu = re.search(_CPU_FAMILY_REGEX, s).group(1)
44  gpu = re.search(_GPU_FAMILY_REGEX, s).group(1)
45  board = re.search(_BOARD_REGEX, s).group(1)
46  filter = re.search(_TEST_FILTER_REGEX, s).group(1)
47  hasty = False
48  if re.search(_HASTY_MODE_REGEX, s):
49    hasty = True
50  print('Found results from %s for GPU = %s, filter = %s and hasty = %r.' %
51        (board, gpu, filter, hasty))
52  return board, gpu, filter, hasty
53
54
55def get_logs_from_gs(autotest_result_path):
56  logs = []
57  gs_paths = execute(['gsutil', 'ls', autotest_result_path]).splitlines()
58  for gs_path in gs_paths:
59    job_id = gs_path.split('/')[3].split('-')[0]
60    # DEBUG logs have more information than INFO logs, especially for hasty.
61    name = os.path.join('logs', job_id + '_graphics_dEQP.DEBUG')
62    logs.append(Logfile(job_id, name, gs_path))
63  for log in logs:
64    execute(['gsutil', 'cp', log.gs_path, log.name])
65  return logs
66
67
68def get_local_logs():
69  logs = []
70  for name in glob.glob(os.path.join('logs', '*_graphics_dEQP.INFO')):
71    job_id = name.split('_')[0]
72    logs.append(Logfile(job_id, name, name))
73  for name in glob.glob(os.path.join('logs', '*_graphics_dEQP.DEBUG')):
74    job_id = name.split('_')[0]
75    logs.append(Logfile(job_id, name, name))
76  return logs
77
78
79def get_all_tests(text):
80  tests = []
81  for test, result in re.findall(_TEST_RESULT_REGEX, text):
82    tests.append((test, result))
83  for test, result, details in re.findall(_HASTY_TEST_RESULT_REGEX, text):
84    tests.append((test, result))
85  return tests
86
87
88def get_not_passing_tests(text):
89  not_passing = []
90  for test, result in re.findall(_TEST_RESULT_REGEX, text):
91    if not (result == 'Pass' or result == 'NotSupported'):
92      not_passing.append((test, result))
93  for test, result, details in re.findall(_HASTY_TEST_RESULT_REGEX, text):
94    if result != 'Pass':
95      not_passing.append((test, result))
96  return not_passing
97
98
99def load_expectation_dict(json_file):
100  data = {}
101  if os.path.isfile(json_file):
102    print('Loading file ' + json_file)
103    with open(json_file, 'r') as f:
104      text = f.read()
105      data = json.loads(text)
106  return data
107
108
109def load_expectations(json_file):
110  data = load_expectation_dict(json_file)
111  expectations = {}
112  # Convert from dictionary of lists to dictionary of sets.
113  for key in data:
114    expectations[key] = set(data[key])
115  return expectations
116
117
118def expectation_list_to_dict(tests):
119  data = {}
120  tests = list(set(tests))
121  for test, result in tests:
122    if data.has_key(result):
123      new_list = list(set(data[result].append(test)))
124      data.pop(result)
125      data[result] = new_list
126    else:
127      data[result] = [test]
128  return data
129
130
131def save_expectation_dict(expectation_path, expectation_dict):
132  # Clean up obsolete expectations.
133  for file_name in glob.glob(expectation_path + '.*'):
134    if not '.hasty.' in file_name or '.hasty' in expectation_path:
135      os.remove(file_name)
136  # Dump json for next iteration.
137  with open(expectation_path + '.json', 'w') as f:
138    json.dump(expectation_dict,
139              f,
140              sort_keys=True,
141              indent=4,
142              separators=(',', ': '))
143  # Dump plain text for autotest.
144  for key in expectation_dict:
145    if expectation_dict[key]:
146      with open(expectation_path + '.' + key, 'w') as f:
147        for test in expectation_dict[key]:
148          f.write(test)
149          f.write('\n')
150
151
152# Figure out duplicates and move them to Flaky result set/list.
153def process_flaky(status_dict):
154  """Figure out duplicates and move them to Flaky result set/list."""
155  clean_dict = {}
156  flaky = set([])
157  if status_dict.has_key('Flaky'):
158    flaky = status_dict['Flaky']
159
160  # FLaky tests are tests with 2 distinct results.
161  for key1 in status_dict.keys():
162    for key2 in status_dict.keys():
163      if key1 != key2:
164        flaky |= status_dict[key1] & status_dict[key2]
165
166  # Remove Flaky tests from other status and convert to dict of list.
167  for key in status_dict.keys():
168    if key != 'Flaky':
169      not_flaky = list(status_dict[key] - flaky)
170      not_flaky.sort()
171      print('Number of "%s" is %d.' % (key, len(not_flaky)))
172      clean_dict[key] = not_flaky
173
174  # And finally process flaky list/set.
175  flaky_list = list(flaky)
176  flaky_list.sort()
177  clean_dict['Flaky'] = flaky_list
178
179  return clean_dict
180
181
182def merge_expectation_list(expectation_path, tests):
183  status_dict = {}
184  expectation_json = expectation_path + '.json'
185  if os.access(expectation_json, os.R_OK):
186    status_dict = load_expectations(expectation_json)
187  else:
188    print 'Could not load', expectation_json
189  for test, result in tests:
190    if status_dict.has_key(result):
191      new_set = status_dict[result]
192      new_set.add(test)
193      status_dict.pop(result)
194      status_dict[result] = new_set
195    else:
196      status_dict[result] = set([test])
197  clean_dict = process_flaky(status_dict)
198  save_expectation_dict(expectation_path, clean_dict)
199
200
201def load_log(name):
202  """Load test log and clean it from stderr spew."""
203  with open(name) as f:
204    lines = f.read().splitlines()
205  text = ''
206  for line in lines:
207    if ('dEQP test filter =' in line or 'ChromeOS BOARD = ' in line or
208        'ChromeOS CPU family =' in line or 'ChromeOS GPU family =' in line or
209        'TestCase: ' in line or 'Result: ' in line or
210        'Test Options: ' in line or 'Running in hasty mode.' in line or
211        # For hasty logs we have:
212        ' Pass (' in line or ' Fail (' in line or 'QualityWarning (' in line or
213        ' Test case \'' in line):
214      text += line + '\n'
215  # TODO(ihf): Warn about or reject log files missing the end marker.
216  return text
217
218
219def process_logs(logs):
220  for log in logs:
221    text = load_log(log.name)
222    if text:
223      print('================================================================')
224      print('Loading %s...' % log.name)
225      _, gpu, filter, hasty = get_metadata(text)
226      tests = get_all_tests(text)
227      print('Found %d test results.' % len(tests))
228      if tests:
229        # GPU family goes first in path to simplify adding/deleting families.
230        output_path = os.path.join(_EXPECTATIONS_DIR, gpu)
231        if not os.access(output_path, os.R_OK):
232          os.makedirs(output_path)
233        expectation_path = os.path.join(output_path, filter)
234        if hasty:
235          expectation_path = os.path.join(output_path, filter + '.hasty')
236        merge_expectation_list(expectation_path, tests)
237
238
239argparser = argparse.ArgumentParser(
240    description='Download from GS and process dEQP logs into expectations.')
241argparser.add_argument(
242    'result_ids',
243    metavar='result_id',
244    nargs='*',  # Zero or more result_ids specified.
245    help='List of result log IDs (wildcards for gsutil like 5678* are ok).')
246args = argparser.parse_args()
247
248print pprint.pformat(args)
249# This is somewhat optional. Remove existing expectations to start clean, but
250# feel free to process them incrementally.
251execute(['rm', '-rf', _EXPECTATIONS_DIR])
252for id in args.result_ids:
253  gs_path = _AUTOTEST_RESULT_TEMPLATE % id
254  logs = get_logs_from_gs(gs_path)
255
256# This will include the just downloaded logs from GS as well.
257logs = get_local_logs()
258process_logs(logs)
259