1# Copyright 2013 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Parses the command line, discovers the appropriate benchmarks, and runs them.
6
7Handles benchmark configuration, but all the logic for
8actually running the benchmark is in Benchmark and PageRunner."""
9
10import argparse
11import hashlib
12import json
13import logging
14import os
15import sys
16
17
18# We need to set logging format here to make sure that any other modules
19# imported by telemetry doesn't set the logging format before this, which will
20# make this a no-op call.
21# (See: https://docs.python.org/2/library/logging.html#logging.basicConfig)
22logging.basicConfig(
23    format='(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d  '
24           '%(message)s')
25
26
27from telemetry import benchmark
28from telemetry.core import discover
29from telemetry import decorators
30from telemetry.internal.browser import browser_finder
31from telemetry.internal.browser import browser_options
32from telemetry.internal.util import binary_manager
33from telemetry.internal.util import command_line
34from telemetry.internal.util import ps_util
35from telemetry.util import matching
36from telemetry import project_config
37
38
39# TODO(aiolos): Remove this once clients move over to project_config version.
40ProjectConfig = project_config.ProjectConfig
41
42
43def _IsBenchmarkEnabled(benchmark_class, possible_browser):
44  return (issubclass(benchmark_class, benchmark.Benchmark) and
45          not benchmark_class.ShouldDisable(possible_browser) and
46          decorators.IsEnabled(benchmark_class, possible_browser)[0])
47
48
49def PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout):
50  """ Print benchmarks that are not filtered in the same order of benchmarks in
51  the |benchmarks| list.
52
53  Args:
54    benchmarks: the list of benchmarks to be printed (in the same order of the
55      list).
56    possible_browser: the possible_browser instance that's used for checking
57      which benchmarks are enabled.
58    output_pipe: the stream in which benchmarks are printed on.
59  """
60  if not benchmarks:
61    print >> output_pipe, 'No benchmarks found!'
62    return
63  b = None  # Need this to stop pylint from complaining undefined variable.
64  if any(not issubclass(b, benchmark.Benchmark) for b in benchmarks):
65    assert False, '|benchmarks| param contains non benchmark class: %s' % b
66
67  # Align the benchmark names to the longest one.
68  format_string = '  %%-%ds %%s' % max(len(b.Name()) for b in benchmarks)
69  disabled_benchmarks = []
70
71  print >> output_pipe, 'Available benchmarks %sare:' % (
72      'for %s ' % possible_browser.browser_type if possible_browser else '')
73
74  # Sort the benchmarks by benchmark name.
75  benchmarks = sorted(benchmarks, key=lambda b: b.Name())
76  for b in benchmarks:
77    if not possible_browser or _IsBenchmarkEnabled(b, possible_browser):
78      print >> output_pipe, format_string % (b.Name(), b.Description())
79    else:
80      disabled_benchmarks.append(b)
81
82  if disabled_benchmarks:
83    print >> output_pipe, (
84        '\nDisabled benchmarks for %s are (force run with -d):' %
85        possible_browser.browser_type)
86    for b in disabled_benchmarks:
87      print >> output_pipe, format_string % (b.Name(), b.Description())
88  print >> output_pipe, (
89      'Pass --browser to list benchmarks for another browser.\n')
90
91
92class Help(command_line.OptparseCommand):
93  """Display help information about a command"""
94
95  usage = '[command]'
96
97  def __init__(self, commands):
98    self._all_commands = commands
99
100  def Run(self, args):
101    if len(args.positional_args) == 1:
102      commands = _MatchingCommands(args.positional_args[0], self._all_commands)
103      if len(commands) == 1:
104        command = commands[0]
105        parser = command.CreateParser()
106        command.AddCommandLineArgs(parser, None)
107        parser.print_help()
108        return 0
109
110    print >> sys.stderr, ('usage: %s [command] [<options>]' % _ScriptName())
111    print >> sys.stderr, 'Available commands are:'
112    for command in self._all_commands:
113      print >> sys.stderr, '  %-10s %s' % (
114          command.Name(), command.Description())
115    print >> sys.stderr, ('"%s help <command>" to see usage information '
116                          'for a specific command.' % _ScriptName())
117    return 0
118
119
120class List(command_line.OptparseCommand):
121  """Lists the available benchmarks"""
122
123  usage = '[benchmark_name] [<options>]'
124
125  @classmethod
126  def CreateParser(cls):
127    options = browser_options.BrowserFinderOptions()
128    parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
129    return parser
130
131  @classmethod
132  def AddCommandLineArgs(cls, parser, _):
133    parser.add_option('-j', '--json-output-file', type='string')
134    parser.add_option('-n', '--num-shards', type='int', default=1)
135
136  @classmethod
137  def ProcessCommandLineArgs(cls, parser, args, environment):
138    if not args.positional_args:
139      args.benchmarks = _Benchmarks(environment)
140    elif len(args.positional_args) == 1:
141      args.benchmarks = _MatchBenchmarkName(args.positional_args[0],
142                                            environment, exact_matches=False)
143    else:
144      parser.error('Must provide at most one benchmark name.')
145
146  def Run(self, args):
147    possible_browser = browser_finder.FindBrowser(args)
148    if args.browser_type in (
149        'release', 'release_x64', 'debug', 'debug_x64', 'canary',
150        'android-chromium', 'android-chrome'):
151      args.browser_type = 'reference'
152      possible_reference_browser = browser_finder.FindBrowser(args)
153    else:
154      possible_reference_browser = None
155    if args.json_output_file:
156      with open(args.json_output_file, 'w') as f:
157        f.write(_GetJsonBenchmarkList(possible_browser,
158                                      possible_reference_browser,
159                                      args.benchmarks, args.num_shards))
160    else:
161      PrintBenchmarkList(args.benchmarks, possible_browser)
162    return 0
163
164
165class Run(command_line.OptparseCommand):
166  """Run one or more benchmarks (default)"""
167
168  usage = 'benchmark_name [page_set] [<options>]'
169
170  @classmethod
171  def CreateParser(cls):
172    options = browser_options.BrowserFinderOptions()
173    parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
174    return parser
175
176  @classmethod
177  def AddCommandLineArgs(cls, parser, environment):
178    benchmark.AddCommandLineArgs(parser)
179
180    # Allow benchmarks to add their own command line options.
181    matching_benchmarks = []
182    for arg in sys.argv[1:]:
183      matching_benchmarks += _MatchBenchmarkName(arg, environment)
184
185    if matching_benchmarks:
186      # TODO(dtu): After move to argparse, add command-line args for all
187      # benchmarks to subparser. Using subparsers will avoid duplicate
188      # arguments.
189      matching_benchmark = matching_benchmarks.pop()
190      matching_benchmark.AddCommandLineArgs(parser)
191      # The benchmark's options override the defaults!
192      matching_benchmark.SetArgumentDefaults(parser)
193
194  @classmethod
195  def ProcessCommandLineArgs(cls, parser, args, environment):
196    all_benchmarks = _Benchmarks(environment)
197    if not args.positional_args:
198      possible_browser = (
199          browser_finder.FindBrowser(args) if args.browser_type else None)
200      PrintBenchmarkList(all_benchmarks, possible_browser)
201      sys.exit(-1)
202
203    input_benchmark_name = args.positional_args[0]
204    matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment)
205    if not matching_benchmarks:
206      print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
207      print >> sys.stderr
208      most_likely_matched_benchmarks = matching.GetMostLikelyMatchedObject(
209          all_benchmarks, input_benchmark_name, lambda x: x.Name())
210      if most_likely_matched_benchmarks:
211        print >> sys.stderr, 'Do you mean any of those benchmarks below?'
212        PrintBenchmarkList(most_likely_matched_benchmarks, None, sys.stderr)
213      sys.exit(-1)
214
215    if len(matching_benchmarks) > 1:
216      print >> sys.stderr, ('Multiple benchmarks named "%s".' %
217                            input_benchmark_name)
218      print >> sys.stderr, 'Did you mean one of these?'
219      print >> sys.stderr
220      PrintBenchmarkList(matching_benchmarks, None, sys.stderr)
221      sys.exit(-1)
222
223    benchmark_class = matching_benchmarks.pop()
224    if len(args.positional_args) > 1:
225      parser.error('Too many arguments.')
226
227    assert issubclass(benchmark_class, benchmark.Benchmark), (
228        'Trying to run a non-Benchmark?!')
229
230    benchmark.ProcessCommandLineArgs(parser, args)
231    benchmark_class.ProcessCommandLineArgs(parser, args)
232
233    cls._benchmark = benchmark_class
234
235  def Run(self, args):
236    return min(255, self._benchmark().Run(args))
237
238
239def _ScriptName():
240  return os.path.basename(sys.argv[0])
241
242
243def _MatchingCommands(string, commands):
244  return [command for command in commands
245         if command.Name().startswith(string)]
246
247@decorators.Cache
248def _Benchmarks(environment):
249  benchmarks = []
250  for search_dir in environment.benchmark_dirs:
251    benchmarks += discover.DiscoverClasses(search_dir,
252                                           environment.top_level_dir,
253                                           benchmark.Benchmark,
254                                           index_by_class_name=True).values()
255  return benchmarks
256
257def _MatchBenchmarkName(input_benchmark_name, environment, exact_matches=True):
258  def _Matches(input_string, search_string):
259    if search_string.startswith(input_string):
260      return True
261    for part in search_string.split('.'):
262      if part.startswith(input_string):
263        return True
264    return False
265
266  # Exact matching.
267  if exact_matches:
268    # Don't add aliases to search dict, only allow exact matching for them.
269    if input_benchmark_name in environment.benchmark_aliases:
270      exact_match = environment.benchmark_aliases[input_benchmark_name]
271    else:
272      exact_match = input_benchmark_name
273
274    for benchmark_class in _Benchmarks(environment):
275      if exact_match == benchmark_class.Name():
276        return [benchmark_class]
277    return []
278
279  # Fuzzy matching.
280  return [benchmark_class for benchmark_class in _Benchmarks(environment)
281          if _Matches(input_benchmark_name, benchmark_class.Name())]
282
283
284def GetBenchmarkByName(name, environment):
285  matched = _MatchBenchmarkName(name, environment, exact_matches=True)
286  # With exact_matches, len(matched) is either 0 or 1.
287  if len(matched) == 0:
288    return None
289  return matched[0]
290
291
292def _GetJsonBenchmarkList(possible_browser, possible_reference_browser,
293                          benchmark_classes, num_shards):
294  """Returns a list of all enabled benchmarks in a JSON format expected by
295  buildbots.
296
297  JSON format:
298  { "version": <int>,
299    "steps": {
300      <string>: {
301        "device_affinity": <int>,
302        "cmd": <string>,
303        "perf_dashboard_id": <string>,
304      },
305      ...
306    }
307  }
308  """
309  output = {
310    'version': 1,
311    'steps': {
312    }
313  }
314  for benchmark_class in benchmark_classes:
315    if not _IsBenchmarkEnabled(benchmark_class, possible_browser):
316      continue
317
318    base_name = benchmark_class.Name()
319    base_cmd = [sys.executable, os.path.realpath(sys.argv[0]),
320                '-v', '--output-format=chartjson', '--upload-results',
321                base_name]
322    perf_dashboard_id = base_name
323
324    # Based on the current timings, we shift the result of the hash function to
325    # achieve better load balancing. Those shift values are to be revised when
326    # necessary. The shift value is calculated such that the total cycle time
327    # is minimized.
328    hash_shift = {
329      2 : 47,  # for old desktop configurations with 2 slaves
330      5 : 56,  # for new desktop configurations with 5 slaves
331      21 : 43  # for Android 3 slaves 7 devices configurations
332    }
333    shift = hash_shift.get(num_shards, 0)
334    base_name_hash = hashlib.sha1(base_name).hexdigest()
335    device_affinity = (int(base_name_hash, 16) >> shift) % num_shards
336
337    output['steps'][base_name] = {
338      'cmd': ' '.join(base_cmd + [
339            '--browser=%s' % possible_browser.browser_type]),
340      'device_affinity': device_affinity,
341      'perf_dashboard_id': perf_dashboard_id,
342    }
343    if (possible_reference_browser and
344        _IsBenchmarkEnabled(benchmark_class, possible_reference_browser)):
345      output['steps'][base_name + '.reference'] = {
346        'cmd': ' '.join(base_cmd + [
347              '--browser=reference', '--output-trace-tag=_ref']),
348        'device_affinity': device_affinity,
349        'perf_dashboard_id': perf_dashboard_id,
350      }
351
352  return json.dumps(output, indent=2, sort_keys=True)
353
354
355def main(environment, extra_commands=None):
356  ps_util.EnableListingStrayProcessesUponExitHook()
357
358  # Get the command name from the command line.
359  if len(sys.argv) > 1 and sys.argv[1] == '--help':
360    sys.argv[1] = 'help'
361
362  command_name = 'run'
363  for arg in sys.argv[1:]:
364    if not arg.startswith('-'):
365      command_name = arg
366      break
367
368  # TODO(eakuefner): Remove this hack after we port to argparse.
369  if command_name == 'help' and len(sys.argv) > 2 and sys.argv[2] == 'run':
370    command_name = 'run'
371    sys.argv[2] = '--help'
372
373  if extra_commands is None:
374    extra_commands = []
375  all_commands = [Help, List, Run] + extra_commands
376
377  # Validate and interpret the command name.
378  commands = _MatchingCommands(command_name, all_commands)
379  if len(commands) > 1:
380    print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'
381                          % (command_name, _ScriptName()))
382    for command in commands:
383      print >> sys.stderr, '  %-10s %s' % (
384          command.Name(), command.Description())
385    return 1
386  if commands:
387    command = commands[0]
388  else:
389    command = Run
390
391  binary_manager.InitDependencyManager(environment.client_config)
392
393  # Parse and run the command.
394  parser = command.CreateParser()
395  command.AddCommandLineArgs(parser, environment)
396
397  # Set the default chrome root variable.
398  parser.set_defaults(chrome_root=environment.default_chrome_root)
399
400
401  if isinstance(parser, argparse.ArgumentParser):
402    commandline_args = sys.argv[1:]
403    options, args = parser.parse_known_args(commandline_args[1:])
404    command.ProcessCommandLineArgs(parser, options, args, environment)
405  else:
406    options, args = parser.parse_args()
407    if commands:
408      args = args[1:]
409    options.positional_args = args
410    command.ProcessCommandLineArgs(parser, options, environment)
411
412  if command == Help:
413    command_instance = command(all_commands)
414  else:
415    command_instance = command()
416  if isinstance(command_instance, command_line.OptparseCommand):
417    return command_instance.Run(options)
418  else:
419    return command_instance.Run(options, args)
420