1# Copyright 2014 The Chromium Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5import optparse 6import os 7import sys 8 9from catapult_base import cloud_storage # pylint: disable=import-error 10 11from telemetry.core import util 12from telemetry.internal.results import buildbot_output_formatter 13from telemetry.internal.results import chart_json_output_formatter 14from telemetry.internal.results import csv_pivot_table_output_formatter 15from telemetry.internal.results import gtest_progress_reporter 16from telemetry.internal.results import html_output_formatter 17from telemetry.internal.results import json_output_formatter 18from telemetry.internal.results import page_test_results 19from telemetry.internal.results import progress_reporter 20 21# Allowed output formats. The default is the first item in the list. 22_OUTPUT_FORMAT_CHOICES = ('html', 'buildbot', 'gtest', 'json', 23 'chartjson', 'csv-pivot-table', 'none') 24 25 26# Filenames to use for given output formats. 27_OUTPUT_FILENAME_LOOKUP = { 28 'html': 'results.html', 29 'json': 'results.json', 30 'chartjson': 'results-chart.json', 31 'csv-pivot-table': 'results-pivot-table.csv' 32} 33 34 35def AddResultsOptions(parser): 36 group = optparse.OptionGroup(parser, 'Results options') 37 group.add_option('--output-format', action='append', dest='output_formats', 38 choices=_OUTPUT_FORMAT_CHOICES, default=[], 39 help='Output format. Defaults to "%%default". ' 40 'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES)) 41 group.add_option('-o', '--output', 42 dest='output_file', 43 default=None, 44 help='Redirects output to a file. Defaults to stdout.') 45 group.add_option('--output-dir', default=util.GetBaseDir(), 46 help='Where to save output data after the run.') 47 group.add_option('--output-trace-tag', 48 default='', 49 help='Append a tag to the key of each result trace. Use ' 50 'with html, buildbot, csv-pivot-table output formats.') 51 group.add_option('--reset-results', action='store_true', 52 help='Delete all stored results.') 53 group.add_option('--upload-results', action='store_true', 54 help='Upload the results to cloud storage.') 55 group.add_option('--upload-bucket', default='output', 56 choices=cloud_storage.BUCKET_ALIAS_NAMES, 57 help='Storage bucket to use for the uploaded results. ' + 58 'Defaults to output bucket. Supported values are: ' + 59 ', '.join(cloud_storage.BUCKET_ALIAS_NAMES) + '.') 60 group.add_option('--results-label', 61 default=None, 62 help='Optional label to use for the results of a run .') 63 group.add_option('--suppress_gtest_report', 64 default=False, 65 help='Whether to suppress GTest progress report.') 66 parser.add_option_group(group) 67 68 69def ProcessCommandLineArgs(parser, args): 70 # TODO(ariblue): Delete this flag entirely at some future data, when the 71 # existence of such a flag has been long forgotten. 72 if args.output_file: 73 parser.error('This flag is deprecated. Please use --output-dir instead.') 74 75 try: 76 os.makedirs(args.output_dir) 77 except OSError: 78 # Do nothing if the output directory already exists. Existing files will 79 # get overwritten. 80 pass 81 82 args.output_dir = os.path.expanduser(args.output_dir) 83 84 85def _GetOutputStream(output_format, output_dir): 86 assert output_format in _OUTPUT_FORMAT_CHOICES, 'Must specify a valid format.' 87 assert output_format not in ('gtest', 'none'), ( 88 'Cannot set stream for \'gtest\' or \'none\' output formats.') 89 90 if output_format == 'buildbot': 91 return sys.stdout 92 93 assert output_format in _OUTPUT_FILENAME_LOOKUP, ( 94 'No known filename for the \'%s\' output format' % output_format) 95 output_file = os.path.join(output_dir, _OUTPUT_FILENAME_LOOKUP[output_format]) 96 97 # TODO(eakuefner): Factor this hack out after we rewrite HTMLOutputFormatter. 98 if output_format == 'html': 99 open(output_file, 'a').close() # Create file if it doesn't exist. 100 return open(output_file, 'r+') 101 else: 102 return open(output_file, 'w+') 103 104 105def _GetProgressReporter(output_skipped_tests_summary, suppress_gtest_report): 106 if suppress_gtest_report: 107 return progress_reporter.ProgressReporter() 108 109 return gtest_progress_reporter.GTestProgressReporter( 110 sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary) 111 112 113def CreateResults(benchmark_metadata, options, 114 value_can_be_added_predicate=lambda v, is_first: True): 115 """ 116 Args: 117 options: Contains the options specified in AddResultsOptions. 118 """ 119 if not options.output_formats: 120 options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]] 121 122 output_formatters = [] 123 for output_format in options.output_formats: 124 if output_format == 'none' or output_format == "gtest": 125 continue 126 127 output_stream = _GetOutputStream(output_format, options.output_dir) 128 if output_format == 'csv-pivot-table': 129 output_formatters.append( 130 csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter( 131 output_stream, trace_tag=options.output_trace_tag)) 132 elif output_format == 'buildbot': 133 output_formatters.append( 134 buildbot_output_formatter.BuildbotOutputFormatter( 135 output_stream, trace_tag=options.output_trace_tag)) 136 elif output_format == 'html': 137 # TODO(chrishenry): We show buildbot output so that users can grep 138 # through the results easily without needing to open the html 139 # file. Another option for this is to output the results directly 140 # in gtest-style results (via some sort of progress reporter), 141 # as we plan to enable gtest-style output for all output formatters. 142 output_formatters.append( 143 buildbot_output_formatter.BuildbotOutputFormatter( 144 sys.stdout, trace_tag=options.output_trace_tag)) 145 output_formatters.append(html_output_formatter.HtmlOutputFormatter( 146 output_stream, benchmark_metadata, options.reset_results, 147 options.upload_results, options.browser_type, 148 options.results_label)) 149 elif output_format == 'json': 150 output_formatters.append(json_output_formatter.JsonOutputFormatter( 151 output_stream, benchmark_metadata)) 152 elif output_format == 'chartjson': 153 output_formatters.append( 154 chart_json_output_formatter.ChartJsonOutputFormatter( 155 output_stream, benchmark_metadata)) 156 else: 157 # Should never be reached. The parser enforces the choices. 158 raise Exception('Invalid --output-format "%s". Valid choices are: %s' 159 % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES))) 160 161 # TODO(chrishenry): This is here to not change the output of 162 # gtest. Let's try enabling skipped tests summary for gtest test 163 # results too (in a separate patch), and see if we break anything. 164 output_skipped_tests_summary = 'gtest' in options.output_formats 165 166 reporter = _GetProgressReporter(output_skipped_tests_summary, 167 options.suppress_gtest_report) 168 return page_test_results.PageTestResults( 169 output_formatters=output_formatters, progress_reporter=reporter, 170 output_dir=options.output_dir, 171 value_can_be_added_predicate=value_can_be_added_predicate) 172