1#!/usr/bin/python3
2#
3# Copyright (C) 2023 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License"); you may not
6# use this file except in compliance with the License. You may obtain a copy of
7# the License at
8#
9#      http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14# License for the specific language governing permissions and limitations under
15# the License.
16#
17"""Compare failed tests in CTS/VTS test_result.xml.
18
19Given two report files (A and B), this script compare them in two modes:
20  One-way mode: For all the failed tests in A, list the tests and the results in
21                both reports.
22  Two-way mode: For all the tests in A and B, list the tests and the results in
23                both reports. If a test only exists in one report, show null
24                in another one.
25  N-way mode: Summarize each module in all reports. The module with the lowest
26              pass rate among all reports will be listed at top.
27
28Usage example:
29  ./compare_cts_reports.py -r test_result_1.xml test_result_2.xml
30  -r test_result_3.xml -m 1 -d output_dir [-o]
31  For this command line, the script aggregates test_result_1.xml and
32  test_result_2.xml as one report, and then compare it with test_result_3.xml
33  under one-way mode. The comparison result is written into output_dir/diff.csv.
34
35  ./compare_cts_reports.py -f parsed_result -r test_result.xml -m n -d tmp/
36  For this command line, the script load the report from the directory
37  parsed_result/, and then summarize the comparison between this report and
38  test_result.xml.
39"""
40
41import argparse
42import csv
43import json
44import os
45import re
46import tempfile
47
48import aggregate_cts_reports
49import parse_cts_report
50import constant
51
52
53def one_way_compare(reports, diff_csv):
54  """Compare two reports in One-way Mode.
55
56  Given two sets of reports, aggregate them into two reports (A and B).
57  Then, list all failed tests in A, and show result of the same test in A and B.
58
59  Args:
60    reports: list of reports
61    diff_csv: path to csv which stores comparison results
62  """
63
64  report_a = reports[0]
65  report_b = reports[1]
66
67  with open(diff_csv, 'w') as diff_csvfile:
68    diff_writer = csv.writer(diff_csvfile)
69    diff_writer.writerow(['module_name', 'abi', 'class_name', 'test_name',
70                          'result in A', 'result in B'])
71
72    for keys in report_a.gen_keys_list():
73      module_name, abi, class_name, test_name = keys
74      result_in_a = report_a.get_test_status(
75          module_name, abi, class_name, test_name
76      )
77
78      if parse_cts_report.CtsReport.is_fail(result_in_a):
79        result_in_b = report_b.get_test_status(
80            module_name, abi, class_name, test_name
81        )
82
83        diff_writer.writerow(
84            [module_name, abi, class_name, test_name, result_in_a, result_in_b]
85        )
86
87
88def two_way_compare(reports, diff_csv):
89  """Compare two reports in Two-way Mode.
90
91  Given two sets of reports, aggregate them into two reports (A and B).
92  Then, list all tests and show the results in A and B. If a test result exists
93  in only one report, consider the result as NO_DATA in another report.
94
95  Args:
96    reports: list of reports
97    diff_csv: path to csv which stores comparison results
98  """
99
100  diff = {}
101
102  for i, report in enumerate(reports):
103    for keys in report.gen_keys_list():
104      module_name, abi, class_name, test_name = keys
105
106      abis = diff.setdefault(module_name, {})
107      test_classes = abis.setdefault(abi, {})
108      tests = test_classes.setdefault(class_name, {})
109
110      result = report.get_test_status(module_name, abi, class_name, test_name)
111
112      if test_name not in tests:
113        tests[test_name] = [constant.NO_DATA, constant.NO_DATA]
114
115      tests[test_name][i] = result
116
117  with open(diff_csv, 'w') as diff_csvfile:
118    diff_writer = csv.writer(diff_csvfile)
119    diff_writer.writerow(['module_name', 'abi', 'class_name', 'test_name',
120                          'result in A', 'result in B'])
121
122    for module_name, abis in diff.items():
123      for abi, test_classes in abis.items():
124        for class_name, tests in test_classes.items():
125          for test_name, results in tests.items():
126            if results[0] != results[1]:
127              row = [module_name, abi, class_name, test_name] + results
128              diff_writer.writerow(row)
129
130
131def gen_summary_row(reports, module_with_abi, item):
132  """Generate one row of diff.csv.
133
134  According to module_with_abi and item, find the value of each report and
135  return as a list.
136
137  Args:
138    reports: list of CtsReport object
139    module_with_abi: combined module_name and abi
140    item: the attribute to find in report
141
142  Returns:
143    row: list to write into output file
144  """
145
146  row = []
147
148  abi_with_bracket = re.findall(r'\[[^\[^\]]+\]$', module_with_abi)[0]
149
150  module_name = module_with_abi.removesuffix(abi_with_bracket)
151  abi = abi_with_bracket[1:-1]
152
153  for report in reports:
154    module_summary = (
155        report.module_summaries[module_name]
156        if module_name in report.module_summaries
157        else {}
158    )
159
160    summary = module_summary[abi] if abi in module_summary else None
161
162    if not summary:
163      row.append(0.0 if item == constant.PASS_RATE else 0)
164    elif item == constant.TESTED_ITEMS:
165      row.append(summary.tested_items)
166    elif item == constant.PASS_RATE:
167      row.append(summary.pass_rate)
168    elif item in parse_cts_report.CtsReport.STATUS_ORDER:
169      row.append(summary.counter[item])
170    else:
171      raise ValueError(f"Invalid value '{item}' for argument 'item'")
172
173  return row
174
175
176def n_way_compare(reports, diff_csv):
177  """Compare multiple reports in N-way Mode.
178
179  Given multiple sets of reports, aggregate them into reports. Then, summarize
180  the results in these reports. Write the summary into diff_csv, where the
181  module with the lowest pass rate among all reports will be displayed first.
182
183  Args:
184    reports: list of reports
185    diff_csv: path to csv which stores comparison results
186  """
187
188  modules_min_rate = {}
189  report_titles = []
190
191  for i, report in enumerate(reports):
192    device_name = (
193        f'device_{report.info["build_device"]}'
194        if 'build_device' in report.info
195        else f'build_id_{report.info["build_id"]}'
196    )
197    report_titles.append(f'{i}_{device_name}')
198
199    for module_name, abis in report.module_summaries.items():
200      for abi, summary in abis.items():
201        module_with_abi = f'{module_name}[{abi}]'
202
203        pass_rate = summary.pass_rate
204
205        if module_with_abi not in modules_min_rate:
206          modules_min_rate[module_with_abi] = pass_rate if i == 0 else 0.0
207        elif pass_rate < modules_min_rate[module_with_abi]:
208          modules_min_rate[module_with_abi] = pass_rate
209
210  module_names = modules_min_rate.keys()
211  module_order = sorted(
212      module_names, key=lambda module_name: modules_min_rate[module_name]
213  )
214
215  items = parse_cts_report.CtsReport.STATUS_ORDER + [
216      constant.TESTED_ITEMS,
217      constant.PASS_RATE,
218  ]
219
220  with open(diff_csv, 'w') as diff_csvfile:
221    diff_writer = csv.writer(diff_csvfile)
222    diff_writer.writerow(['module_with_abi', 'item'] + report_titles)
223
224    for module_with_abi in module_order:
225      for item in items:
226        row = gen_summary_row(reports, module_with_abi, item)
227        diff_writer.writerow([module_with_abi, item] + row)
228
229
230def load_parsed_report(report_dir, ignore_abi=False):
231  """Load CtsReport() from a directory that stores a parsed report."""
232
233  if not os.path.isdir(report_dir):
234    raise FileNotFoundError(f'{report_dir} is not a directory')
235
236  info_path = os.path.join(report_dir, 'info.json')
237  result_path = os.path.join(report_dir, 'result.csv')
238
239  for f in [info_path, result_path]:
240    if not os.path.exists(f):
241      raise FileNotFoundError(f"file {f} doesn't exist.")
242
243  with open(info_path, 'r') as info_jsonfile:
244    info = json.load(info_jsonfile)
245
246  report = parse_cts_report.CtsReport(info)
247
248  with open(result_path, 'r') as result_csvfile:
249    report.load_from_csv(result_csvfile, ignore_abi)
250
251  return report
252
253
254def main():
255  parser = argparse.ArgumentParser()
256
257  parser.add_argument('-r', '--report', nargs='+',
258                      dest='cts_reports', action='append',
259                      help=('Path to cts reports. Each flag -r is followed by '
260                            'a group of files to be aggregated as one report.'))
261  parser.add_argument('-f', '--folder',
262                      dest='cts_reports', action='append',
263                      help=('Path to folder that stores intermediate files '
264                            'of parsed reports.'))
265  parser.add_argument('--mode', '-m', required=True, choices=['1', '2', 'n'],
266                      help=('Comparison mode. 1: One-way mode. '
267                            '2: Two-way mode. n: N-way mode.'))
268  parser.add_argument('--output-dir', '-d', required=True,
269                      help='Directory to store output files.')
270  parser.add_argument('--csv', default='diff.csv', help='Path to csv output.')
271  parser.add_argument('--output-files', '-o', action='store_true',
272                      help='Output parsed csv files.')
273  parser.add_argument('--ignore-abi', action='store_true',
274                      help='Ignore the tests ABI while comparing.')
275
276  args = parser.parse_args()
277
278  mode = args.mode
279  reports = args.cts_reports
280
281  if (mode in ['1', '2']) and (len(reports) != 2):
282    msg = 'Two sets of reports are required for one-way and two-way mode.'
283    raise UserWarning(msg)
284
285  output_dir = args.output_dir
286  if not os.path.exists(output_dir):
287    raise FileNotFoundError(f'Output directory {output_dir} does not exist.')
288
289  diff_csv = os.path.join(output_dir, args.csv)
290
291  ctsreports = []
292  ignore_abi = args.ignore_abi
293  for i, report_path in enumerate(reports):
294    # path(s) from the `--report` flag is a list
295    is_report_files = isinstance(report_path, list)
296
297    if is_report_files:
298      report = aggregate_cts_reports.aggregate_cts_reports(
299          report_path, constant.ALL_TEST_ABIS, ignore_abi)
300    else:
301      report = load_parsed_report(report_path, ignore_abi)
302
303    if is_report_files and args.output_files:
304      device_name = report.info['build_device']
305      sub_dir_name = tempfile.mkdtemp(
306          prefix=f'{i}_{device_name}_', dir=output_dir
307      )
308      report.output_files(sub_dir_name)
309
310    ctsreports.append(report)
311
312  if args.mode == '1':
313    one_way_compare(ctsreports, diff_csv)
314  elif args.mode == '2':
315    two_way_compare(ctsreports, diff_csv)
316  elif args.mode == 'n':
317    n_way_compare(ctsreports, diff_csv)
318  else:
319    raise ValueError(f'Unexpected argument for --mode: {args.mode}')
320
321
322if __name__ == '__main__':
323  main()
324