1#!/usr/bin/env python 2# 3# Copyright 2011 Google Inc. 4# 5# Use of this source code is governed by a BSD-style license that can be 6# found in the LICENSE file. 7 8 9from __future__ import print_function 10import bench_util 11import getopt 12import httplib 13import itertools 14import json 15import os 16import re 17import sys 18import urllib 19import urllib2 20import xml.sax.saxutils 21 22# Maximum expected number of characters we expect in an svn revision. 23MAX_SVN_REV_LENGTH = 5 24 25# Indices for getting elements from bench expectation files. 26# See bench_expectations_<builder>.txt for details. 27EXPECTED_IDX = -3 28LB_IDX = -2 29UB_IDX = -1 30 31# Indices of the tuple of dictionaries containing slower and faster alerts. 32SLOWER = 0 33FASTER = 1 34 35# URL prefix for the bench dashboard page. Showing recent 15 days of data. 36DASHBOARD_URL_PREFIX = 'http://go/skpdash/#15' 37 38def usage(): 39 """Prints simple usage information.""" 40 41 print('-a <representation_alg> bench representation algorithm to use. ') 42 print(' Defaults to "25th". See bench_util.py for details.') 43 print('-b <builder> name of the builder whose bench data we are checking.') 44 print('-d <dir> a directory containing bench_<revision>_<scalar> files.') 45 print('-e <file> file containing expected bench builder values/ranges.') 46 print(' Will raise exception if actual bench values are out of range.') 47 print(' See bench_expectations_<builder>.txt for data format / examples.') 48 print('-r <revision> the git commit hash or svn revision for checking ') 49 print(' bench values.') 50 51 52class Label: 53 """The information in a label. 54 55 (str, str, str, str, {str:str})""" 56 def __init__(self, bench, config, time_type, settings): 57 self.bench = bench 58 self.config = config 59 self.time_type = time_type 60 self.settings = settings 61 62 def __repr__(self): 63 return "Label(%s, %s, %s, %s)" % ( 64 str(self.bench), 65 str(self.config), 66 str(self.time_type), 67 str(self.settings), 68 ) 69 70 def __str__(self): 71 return "%s_%s_%s_%s" % ( 72 str(self.bench), 73 str(self.config), 74 str(self.time_type), 75 str(self.settings), 76 ) 77 78 def __eq__(self, other): 79 return (self.bench == other.bench and 80 self.config == other.config and 81 self.time_type == other.time_type and 82 self.settings == other.settings) 83 84 def __hash__(self): 85 return (hash(self.bench) ^ 86 hash(self.config) ^ 87 hash(self.time_type) ^ 88 hash(frozenset(self.settings.iteritems()))) 89 90def create_bench_dict(revision_data_points): 91 """Convert current revision data into a dictionary of line data. 92 93 Args: 94 revision_data_points: a list of bench data points 95 96 Returns: 97 a dictionary of this form: 98 keys = Label objects 99 values = the corresponding bench value 100 """ 101 bench_dict = {} 102 for point in revision_data_points: 103 point_name = Label(point.bench,point.config,point.time_type, 104 point.settings) 105 if point_name not in bench_dict: 106 bench_dict[point_name] = point.time 107 else: 108 raise Exception('Duplicate expectation entry: ' + str(point_name)) 109 110 return bench_dict 111 112def read_expectations(expectations, filename): 113 """Reads expectations data from file and put in expectations dict.""" 114 for expectation in open(filename).readlines(): 115 elements = expectation.strip().split(',') 116 if not elements[0] or elements[0].startswith('#'): 117 continue 118 if len(elements) != 5: 119 raise Exception("Invalid expectation line format: %s" % 120 expectation) 121 bench_entry = elements[0] + ',' + elements[1] 122 if bench_entry in expectations: 123 raise Exception("Dup entries for bench expectation %s" % 124 bench_entry) 125 # [<Bench_BmpConfig_TimeType>,<Platform-Alg>] -> (LB, UB, EXPECTED) 126 expectations[bench_entry] = (float(elements[LB_IDX]), 127 float(elements[UB_IDX]), 128 float(elements[EXPECTED_IDX])) 129 130def check_expectations(lines, expectations, key_suffix): 131 """Check if any bench results are outside of expected range. 132 133 For each input line in lines, checks the expectations dictionary to see if 134 the bench is out of the given range. 135 136 Args: 137 lines: dictionary mapping Label objects to the bench values. 138 expectations: dictionary returned by read_expectations(). 139 key_suffix: string of <Platform>-<Alg> containing the bot platform and the 140 bench representation algorithm. 141 142 Returns: 143 No return value. 144 145 Raises: 146 Exception containing bench data that are out of range, if any. 147 """ 148 # The platform for this bot, to pass to the dashboard plot. 149 platform = key_suffix[ : key_suffix.rfind('-')] 150 # Tuple of dictionaries recording exceptions that are slower and faster, 151 # respectively. Each dictionary maps off_ratio (ratio of actual to expected) 152 # to a list of corresponding exception messages. 153 exceptions = ({}, {}) 154 for line in lines: 155 line_str = str(line) 156 line_str = line_str[ : line_str.find('_{')] 157 # Extracts bench and config from line_str, which is in the format 158 # <bench-picture-name>.skp_<config>_ 159 bench, config = line_str.strip('_').split('.skp_') 160 bench_platform_key = line_str + ',' + key_suffix 161 if bench_platform_key not in expectations: 162 continue 163 this_bench_value = lines[line] 164 this_min, this_max, this_expected = expectations[bench_platform_key] 165 if this_bench_value < this_min or this_bench_value > this_max: 166 off_ratio = this_bench_value / this_expected 167 exception = 'Bench %s out of range [%s, %s] (%s vs %s, %s%%).' % ( 168 bench_platform_key, this_min, this_max, this_bench_value, 169 this_expected, (off_ratio - 1) * 100) 170 exception += '\n' + '~'.join([ 171 DASHBOARD_URL_PREFIX, bench, platform, config]) 172 if off_ratio > 1: # Bench is slower. 173 exceptions[SLOWER].setdefault(off_ratio, []).append(exception) 174 else: 175 exceptions[FASTER].setdefault(off_ratio, []).append(exception) 176 outputs = [] 177 for i in [SLOWER, FASTER]: 178 if exceptions[i]: 179 ratios = exceptions[i].keys() 180 ratios.sort(reverse=True) 181 li = [] 182 for ratio in ratios: 183 li.extend(exceptions[i][ratio]) 184 header = '%s benches got slower (sorted by %% difference):' % len(li) 185 if i == FASTER: 186 header = header.replace('slower', 'faster') 187 outputs.extend(['', header] + li) 188 189 if outputs: 190 # Directly raising Exception will have stderr outputs tied to the line 191 # number of the script, so use sys.stderr.write() instead. 192 # Add a trailing newline to supress new line checking errors. 193 sys.stderr.write('\n'.join(['Exception:'] + outputs + ['\n'])) 194 exit(1) 195 196 197def main(): 198 """Parses command line and checks bench expectations.""" 199 try: 200 opts, _ = getopt.getopt(sys.argv[1:], 201 "a:b:d:e:r:", 202 "default-setting=") 203 except getopt.GetoptError as err: 204 print(str(err)) 205 usage() 206 sys.exit(2) 207 208 directory = None 209 bench_expectations = {} 210 rep = '25th' # bench representation algorithm, default to 25th 211 rev = None # git commit hash or svn revision number 212 bot = None 213 214 try: 215 for option, value in opts: 216 if option == "-a": 217 rep = value 218 elif option == "-b": 219 bot = value 220 elif option == "-d": 221 directory = value 222 elif option == "-e": 223 read_expectations(bench_expectations, value) 224 elif option == "-r": 225 rev = value 226 else: 227 usage() 228 assert False, "unhandled option" 229 except ValueError: 230 usage() 231 sys.exit(2) 232 233 if directory is None or bot is None or rev is None: 234 usage() 235 sys.exit(2) 236 237 platform_and_alg = bot + '-' + rep 238 239 data_points = bench_util.parse_skp_bench_data(directory, rev, rep) 240 241 bench_dict = create_bench_dict(data_points) 242 243 if bench_expectations: 244 check_expectations(bench_dict, bench_expectations, platform_and_alg) 245 246 247if __name__ == "__main__": 248 main() 249