1# Copyright 2013 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import re
6import sys
7
8import json
9import math
10
11from telemetry.util import perf_result_data_type
12
13
14# Mapping from result type to test output
15RESULT_TYPES = {perf_result_data_type.UNIMPORTANT: 'RESULT ',
16                perf_result_data_type.DEFAULT: '*RESULT ',
17                perf_result_data_type.INFORMATIONAL: '',
18                perf_result_data_type.UNIMPORTANT_HISTOGRAM: 'HISTOGRAM ',
19                perf_result_data_type.HISTOGRAM: '*HISTOGRAM '}
20
21
22def _EscapePerfResult(s):
23  """Escapes |s| for use in a perf result."""
24  return re.sub(r'[\:|=/#&,]', '_', s)
25
26
27def FlattenList(values):
28  """Returns a simple list without sub-lists."""
29  ret = []
30  for entry in values:
31    if isinstance(entry, list):
32      ret.extend(FlattenList(entry))
33    else:
34      ret.append(entry)
35  return ret
36
37
38def GeomMeanAndStdDevFromHistogram(histogram_json):
39  histogram = json.loads(histogram_json)
40  # Handle empty histograms gracefully.
41  if not 'buckets' in histogram:
42    return 0.0, 0.0
43  count = 0
44  sum_of_logs = 0
45  for bucket in histogram['buckets']:
46    if 'high' in bucket:
47      bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
48    else:
49      bucket['mean'] = bucket['low']
50    if bucket['mean'] > 0:
51      sum_of_logs += math.log(bucket['mean']) * bucket['count']
52      count += bucket['count']
53
54  if count == 0:
55    return 0.0, 0.0
56
57  sum_of_squares = 0
58  geom_mean = math.exp(sum_of_logs / count)
59  for bucket in histogram['buckets']:
60    if bucket['mean'] > 0:
61      sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
62  return geom_mean, math.sqrt(sum_of_squares / count)
63
64
65def _ValueToString(v):
66  # Special case for floats so we don't print using scientific notation.
67  if isinstance(v, float):
68    return '%f' % v
69  else:
70    return str(v)
71
72
73def _MeanAndStdDevFromList(values):
74  avg = None
75  sd = None
76  if len(values) > 1:
77    try:
78      value = '[%s]' % ','.join([_ValueToString(v) for v in values])
79      avg = sum([float(v) for v in values]) / len(values)
80      sqdiffs = [(float(v) - avg) ** 2 for v in values]
81      variance = sum(sqdiffs) / (len(values) - 1)
82      sd = math.sqrt(variance)
83    except ValueError:
84      value = ', '.join(values)
85  else:
86    value = values[0]
87  return value, avg, sd
88
89
90def PrintPages(page_list):
91  """Prints list of pages to stdout in the format required by perf tests."""
92  print 'Pages: [%s]' % ','.join([_EscapePerfResult(p) for p in page_list])
93
94
95def PrintPerfResult(measurement, trace, values, units,
96                    result_type=perf_result_data_type.DEFAULT,
97                    print_to_stdout=True):
98  """Prints numerical data to stdout in the format required by perf tests.
99
100  The string args may be empty but they must not contain any colons (:) or
101  equals signs (=).
102  This is parsed by the buildbot using:
103  http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/process_log_utils.py
104
105  Args:
106    measurement: A description of the quantity being measured, e.g. "vm_peak".
107        On the dashboard, this maps to a particular graph. Mandatory.
108    trace: A description of the particular data point, e.g. "reference".
109        On the dashboard, this maps to a particular "line" in the graph.
110        Mandatory.
111    values: A list of numeric measured values. An N-dimensional list will be
112        flattened and treated as a simple list.
113    units: A description of the units of measure, e.g. "bytes".
114    result_type: Accepts values of perf_result_data_type.ALL_TYPES.
115    print_to_stdout: If True, prints the output in stdout instead of returning
116        the output to caller.
117
118    Returns:
119      String of the formated perf result.
120  """
121  assert perf_result_data_type.IsValidType(result_type), \
122         'result type: %s is invalid' % result_type
123
124  trace_name = _EscapePerfResult(trace)
125
126  if (result_type == perf_result_data_type.UNIMPORTANT or
127      result_type == perf_result_data_type.DEFAULT or
128      result_type == perf_result_data_type.INFORMATIONAL):
129    assert isinstance(values, list)
130    assert '/' not in measurement
131    flattened_values = FlattenList(values)
132    assert len(flattened_values)
133    value, avg, sd = _MeanAndStdDevFromList(flattened_values)
134    output = '%s%s: %s%s%s %s' % (
135        RESULT_TYPES[result_type],
136        _EscapePerfResult(measurement),
137        trace_name,
138        # Do not show equal sign if the trace is empty. Usually it happens when
139        # measurement is enough clear to describe the result.
140        '= ' if trace_name else '',
141        value,
142        units)
143  else:
144    assert perf_result_data_type.IsHistogram(result_type)
145    assert isinstance(values, list)
146    # The histograms can only be printed individually, there's no computation
147    # across different histograms.
148    assert len(values) == 1
149    value = values[0]
150    output = '%s%s: %s= %s %s' % (
151        RESULT_TYPES[result_type],
152        _EscapePerfResult(measurement),
153        trace_name,
154        value,
155        units)
156    avg, sd = GeomMeanAndStdDevFromHistogram(value)
157
158  if avg:
159    output += '\nAvg %s: %f%s' % (measurement, avg, units)
160  if sd:
161    output += '\nSd  %s: %f%s' % (measurement, sd, units)
162  if print_to_stdout:
163    print output
164    sys.stdout.flush()
165  return output
166