1#!/usr/bin/env python2
2"""Generate summary report for ChromeOS toolchain waterfalls."""
3
4# Desired future features (to be added):
5# - arguments to allow generating only the main waterfall report,
6#   or only the rotating builder reports, or only the failures
7#   report; or the waterfall reports without the failures report.
8# - Better way of figuring out which dates/builds to generate
9#   reports for: probably an argument specifying a date or a date
10#   range, then use something like the new buildbot utils to
11#   query the build logs to find the right build numbers for the
12#   builders for the specified dates.
13# - Store/get the json/data files in mobiletc-prebuild's x20 area.
14# - Update data in json file to reflect, for each testsuite, which
15#   tests are not expected to run on which boards; update this
16#   script to use that data appropriately.
17# - Make sure user's prodaccess is up-to-date before trying to use
18#   this script.
19# - Add some nice formatting/highlighting to reports.
20
21from __future__ import print_function
22
23import argparse
24import getpass
25import json
26import os
27import re
28import shutil
29import sys
30import time
31
32from cros_utils import command_executer
33
34# All the test suites whose data we might want for the reports.
35TESTS = (('bvt-inline', 'HWTest'), ('bvt-cq', 'HWTest'), ('security', 'HWTest'),
36         ('kernel_daily_regression', 'HWTest'), ('kernel_daily_benchmarks',
37                                                 'HWTest'),)
38
39# The main waterfall builders, IN THE ORDER IN WHICH WE WANT THEM
40# LISTED IN THE REPORT.
41WATERFALL_BUILDERS = [
42    'amd64-llvm-next-toolchain',
43    'arm-llvm-next-toolchain',
44    'arm64-llvm-next-toolchain',
45]
46
47DATA_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/'
48ARCHIVE_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-reports/'
49DOWNLOAD_DIR = '/tmp/waterfall-logs'
50MAX_SAVE_RECORDS = 7
51BUILD_DATA_FILE = '%s/build-data.txt' % DATA_DIR
52GCC_ROTATING_BUILDER = 'gcc_toolchain'
53LLVM_ROTATING_BUILDER = 'llvm_next_toolchain'
54ROTATING_BUILDERS = [GCC_ROTATING_BUILDER, LLVM_ROTATING_BUILDER]
55
56# For int-to-string date conversion.  Note, the index of the month in this
57# list needs to correspond to the month's integer value.  i.e. 'Sep' must
58# be as MONTHS[9].
59MONTHS = [
60    '', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct',
61    'Nov', 'Dec'
62]
63
64
65def format_date(int_date):
66  """Convert an integer date to a string date. YYYYMMDD -> YYYY-MMM-DD"""
67
68  if int_date == 0:
69    return 'today'
70
71  tmp_date = int_date
72  day = tmp_date % 100
73  tmp_date = tmp_date / 100
74  month = tmp_date % 100
75  year = tmp_date / 100
76
77  month_str = MONTHS[month]
78  date_str = '%d-%s-%d' % (year, month_str, day)
79  return date_str
80
81
82def EmailReport(report_file, report_type, date, email_to):
83  subject = '%s Waterfall Summary report, %s' % (report_type, date)
84  sendgmr_path = '/google/data/ro/projects/gws-sre/sendgmr'
85  command = ('%s --to=%s --subject="%s" --body_file=%s' %
86             (sendgmr_path, email_to, subject, report_file))
87  command_executer.GetCommandExecuter().RunCommand(command)
88
89
90def PruneOldFailures(failure_dict, int_date):
91  earliest_date = int_date - MAX_SAVE_RECORDS
92  for suite in failure_dict:
93    suite_dict = failure_dict[suite]
94    test_keys_to_remove = []
95    for test in suite_dict:
96      test_dict = suite_dict[test]
97      msg_keys_to_remove = []
98      for msg in test_dict:
99        fails = test_dict[msg]
100        i = 0
101        while i < len(fails) and fails[i][0] <= earliest_date:
102          i += 1
103        new_fails = fails[i:]
104        test_dict[msg] = new_fails
105        if len(new_fails) == 0:
106          msg_keys_to_remove.append(msg)
107
108      for k in msg_keys_to_remove:
109        del test_dict[k]
110
111      suite_dict[test] = test_dict
112      if len(test_dict) == 0:
113        test_keys_to_remove.append(test)
114
115    for k in test_keys_to_remove:
116      del suite_dict[k]
117
118    failure_dict[suite] = suite_dict
119
120
121def GetBuildID(build_bot, date):
122  """Get the build id for a build_bot at a given date."""
123  day = '{day:02d}'.format(day=date % 100)
124  mon = MONTHS[date / 100 % 100]
125  date_string = mon + ' ' + day
126  if build_bot in WATERFALL_BUILDERS:
127    url = 'https://uberchromegw.corp.google.com/i/chromeos/' + \
128          'builders/%s?numbuilds=200' % build_bot
129  if build_bot in ROTATING_BUILDERS:
130    url = 'https://uberchromegw.corp.google.com/i/chromiumos.tryserver/' + \
131          'builders/%s?numbuilds=200' % build_bot
132  command = 'sso_client %s' % url
133  retval = 1
134  retry_time = 3
135  while retval and retry_time:
136    retval, output, _ = \
137        command_executer.GetCommandExecuter().RunCommandWOutput(command, \
138        print_to_console=False)
139    retry_time -= 1
140
141  if retval:
142    return []
143
144  out = output.split('\n')
145  line_num = 0
146  build_id = []
147  # Parse the output like this
148  # <td>Dec 14 10:55</td>
149  # <td class="revision">??</td>
150  # <td failure</td><td><a href="../builders/gcc_toolchain/builds/109">#109</a>
151  while line_num < len(out):
152    if date_string in out[line_num]:
153      if line_num + 2 < len(out):
154        build_num_line = out[line_num + 2]
155        raw_num = re.findall(r'builds/\d+', build_num_line)
156        # raw_num is ['builds/109'] in the example.
157        if raw_num:
158          build_id.append(int(raw_num[0].split('/')[1]))
159    line_num += 1
160  return build_id
161
162
163def GenerateFailuresReport(fail_dict, date):
164  filename = 'waterfall_report.failures.%s.txt' % date
165  date_string = format_date(date)
166  with open(filename, 'w') as out_file:
167    # Write failure report section.
168    out_file.write('\n\nSummary of Test Failures as of %s\n\n' % date_string)
169
170    # We want to sort the errors and output them in order of the ones that occur
171    # most often.  So we have to collect the data about all of them, then sort
172    # it.
173    error_groups = []
174    for suite in fail_dict:
175      suite_dict = fail_dict[suite]
176      if suite_dict:
177        for test in suite_dict:
178          test_dict = suite_dict[test]
179          for err_msg in test_dict:
180            err_list = test_dict[err_msg]
181            sorted_list = sorted(err_list, key=lambda x: x[0], reverse=True)
182            err_group = [len(sorted_list), suite, test, err_msg, sorted_list]
183            error_groups.append(err_group)
184
185    # Sort the errors by the number of errors of each type. Then output them in
186    # order.
187    sorted_errors = sorted(error_groups, key=lambda x: x[0], reverse=True)
188    for i in range(0, len(sorted_errors)):
189      err_group = sorted_errors[i]
190      suite = err_group[1]
191      test = err_group[2]
192      err_msg = err_group[3]
193      err_list = err_group[4]
194      out_file.write('Suite: %s\n' % suite)
195      out_file.write('    %s (%d failures)\n' % (test, len(err_list)))
196      out_file.write('    (%s)\n' % err_msg)
197      for i in range(0, len(err_list)):
198        err = err_list[i]
199        out_file.write('        %s, %s, %s\n' % (format_date(err[0]), err[1],
200                                                 err[2]))
201      out_file.write('\n')
202
203  print('Report generated in %s.' % filename)
204  return filename
205
206
207def GenerateWaterfallReport(report_dict, fail_dict, waterfall_type, date,
208                            omit_failures):
209  """Write out the actual formatted report."""
210
211  filename = 'waterfall_report.%s_waterfall.%s.txt' % (waterfall_type, date)
212
213  date_string = ''
214  date_list = report_dict['date']
215  num_dates = len(date_list)
216  i = 0
217  for d in date_list:
218    date_string += d
219    if i < num_dates - 1:
220      date_string += ', '
221    i += 1
222
223  if waterfall_type == 'main':
224    report_list = WATERFALL_BUILDERS
225  else:
226    report_list = report_dict.keys()
227
228  with open(filename, 'w') as out_file:
229    # Write Report Header
230    out_file.write('\nStatus of %s Waterfall Builds from %s\n\n' %
231                   (waterfall_type, date_string))
232    out_file.write('                                                          '
233                   '                kernel       kernel\n')
234    out_file.write('                         Build    bvt-         bvt-cq     '
235                   ' security       daily        daily\n')
236    out_file.write('                         status  inline                   '
237                   '              regression   benchmarks\n')
238    out_file.write('                               [P/ F/ DR]*   [P/ F /DR]*  '
239                   '[P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]*\n\n')
240
241    # Write daily waterfall status section.
242    for i in range(0, len(report_list)):
243      builder = report_list[i]
244      if builder == 'date':
245        continue
246
247      if builder not in report_dict:
248        out_file.write('Unable to find information for %s.\n\n' % builder)
249        continue
250
251      build_dict = report_dict[builder]
252      status = build_dict.get('build_status', 'bad')
253      inline = build_dict.get('bvt-inline', '[??/ ?? /??]')
254      cq = build_dict.get('bvt-cq', '[??/ ?? /??]')
255      inline_color = build_dict.get('bvt-inline-color', '')
256      cq_color = build_dict.get('bvt-cq-color', '')
257      if 'x86' not in builder:
258        security = build_dict.get('security', '[??/ ?? /??]')
259        security_color = build_dict.get('security-color', '')
260        if 'gcc' in builder:
261          regression = build_dict.get('kernel_daily_regression', '[??/ ?? /??]')
262          bench = build_dict.get('kernel_daily_benchmarks', '[??/ ?? /??]')
263          regression_color = build_dict.get('kernel_daily_regression-color', '')
264          bench_color = build_dict.get('kernel_daily_benchmarks-color', '')
265          out_file.write('                                  %6s        %6s'
266                         '      %6s      %6s      %6s\n' %
267                         (inline_color, cq_color, security_color,
268                          regression_color, bench_color))
269          out_file.write('%25s %3s  %s %s %s %s %s\n' %
270                         (builder, status, inline, cq, security, regression,
271                          bench))
272        else:
273          out_file.write('                                  %6s        %6s'
274                         '      %6s\n' % (inline_color, cq_color,
275                                          security_color))
276          out_file.write('%25s %3s  %s %s %s\n' % (builder, status, inline, cq,
277                                                   security))
278      else:
279        out_file.write('                                  %6s        %6s\n' %
280                       (inline_color, cq_color))
281        out_file.write('%25s %3s  %s %s\n' % (builder, status, inline, cq))
282      if 'build_link' in build_dict:
283        out_file.write('%s\n\n' % build_dict['build_link'])
284
285    out_file.write('\n\n*P = Number of tests in suite that Passed; F = '
286                   'Number of tests in suite that Failed; DR = Number of tests'
287                   ' in suite that Didn\'t Run.\n')
288
289    if omit_failures:
290      print('Report generated in %s.' % filename)
291      return filename
292
293    # Write failure report section.
294    out_file.write('\n\nSummary of Test Failures as of %s\n\n' % date_string)
295
296    # We want to sort the errors and output them in order of the ones that occur
297    # most often.  So we have to collect the data about all of them, then sort
298    # it.
299    error_groups = []
300    for suite in fail_dict:
301      suite_dict = fail_dict[suite]
302      if suite_dict:
303        for test in suite_dict:
304          test_dict = suite_dict[test]
305          for err_msg in test_dict:
306            err_list = test_dict[err_msg]
307            sorted_list = sorted(err_list, key=lambda x: x[0], reverse=True)
308            err_group = [len(sorted_list), suite, test, err_msg, sorted_list]
309            error_groups.append(err_group)
310
311    # Sort the errors by the number of errors of each type. Then output them in
312    # order.
313    sorted_errors = sorted(error_groups, key=lambda x: x[0], reverse=True)
314    for i in range(0, len(sorted_errors)):
315      err_group = sorted_errors[i]
316      suite = err_group[1]
317      test = err_group[2]
318      err_msg = err_group[3]
319      err_list = err_group[4]
320      out_file.write('Suite: %s\n' % suite)
321      out_file.write('    %s (%d failures)\n' % (test, len(err_list)))
322      out_file.write('    (%s)\n' % err_msg)
323      for i in range(0, len(err_list)):
324        err = err_list[i]
325        out_file.write('        %s, %s, %s\n' % (format_date(err[0]), err[1],
326                                                 err[2]))
327      out_file.write('\n')
328
329  print('Report generated in %s.' % filename)
330  return filename
331
332
333def UpdateReport(report_dict, builder, test, report_date, build_link,
334                 test_summary, board, color):
335  """Update the data in our report dictionary with current test's data."""
336
337  if 'date' not in report_dict:
338    report_dict['date'] = [report_date]
339  elif report_date not in report_dict['date']:
340    # It is possible that some of the builders started/finished on different
341    # days, so we allow for multiple dates in the reports.
342    report_dict['date'].append(report_date)
343
344  build_key = ''
345  if builder == GCC_ROTATING_BUILDER:
346    build_key = '%s-gcc-toolchain' % board
347  elif builder == LLVM_ROTATING_BUILDER:
348    build_key = '%s-llvm-next-toolchain' % board
349  else:
350    build_key = builder
351
352  if build_key not in report_dict.keys():
353    build_dict = dict()
354  else:
355    build_dict = report_dict[build_key]
356
357  if 'build_link' not in build_dict:
358    build_dict['build_link'] = build_link
359
360  if 'date' not in build_dict:
361    build_dict['date'] = report_date
362
363  if 'board' in build_dict and build_dict['board'] != board:
364    raise RuntimeError(
365        'Error: Two different boards (%s,%s) in one build (%s)!' %
366        (board, build_dict['board'], build_link))
367  build_dict['board'] = board
368
369  color_key = '%s-color' % test
370  build_dict[color_key] = color
371
372  # Check to see if we already have a build status for this build_key
373  status = ''
374  if 'build_status' in build_dict.keys():
375    # Use current build_status, unless current test failed (see below).
376    status = build_dict['build_status']
377
378  if not test_summary:
379    # Current test data was not available, so something was bad with build.
380    build_dict['build_status'] = 'bad'
381    build_dict[test] = '[  no data  ]'
382  else:
383    build_dict[test] = test_summary
384    if not status:
385      # Current test ok; no other data, so assume build was ok.
386      build_dict['build_status'] = 'ok'
387
388  report_dict[build_key] = build_dict
389
390
391def UpdateBuilds(builds):
392  """Update the data in our build-data.txt file."""
393
394  # The build data file records the last build number for which we
395  # generated a report.  When we generate the next report, we read
396  # this data and increment it to get the new data; when we finish
397  # generating the reports, we write the updated values into this file.
398  # NOTE: One side effect of doing this at the end:  If the script
399  # fails in the middle of generating a report, this data does not get
400  # updated.
401  with open(BUILD_DATA_FILE, 'w') as fp:
402    gcc_max = 0
403    llvm_max = 0
404    for b in builds:
405      if b[0] == GCC_ROTATING_BUILDER:
406        gcc_max = max(gcc_max, b[1])
407      elif b[0] == LLVM_ROTATING_BUILDER:
408        llvm_max = max(llvm_max, b[1])
409      else:
410        fp.write('%s,%d\n' % (b[0], b[1]))
411    if gcc_max > 0:
412      fp.write('%s,%d\n' % (GCC_ROTATING_BUILDER, gcc_max))
413    if llvm_max > 0:
414      fp.write('%s,%d\n' % (LLVM_ROTATING_BUILDER, llvm_max))
415
416
417def GetBuilds(date=0):
418  """Get build id from builds."""
419
420  # If date is set, get the build id from waterfall.
421  builds = []
422
423  if date:
424    for builder in WATERFALL_BUILDERS + ROTATING_BUILDERS:
425      build_ids = GetBuildID(builder, date)
426      for build_id in build_ids:
427        builds.append((builder, build_id))
428    return builds
429
430  # If date is not set, we try to get the most recent builds.
431  # Read the values of the last builds used to generate a report, and
432  # increment them appropriately, to get values for generating the
433  # current report.  (See comments in UpdateBuilds).
434  with open(BUILD_DATA_FILE, 'r') as fp:
435    lines = fp.readlines()
436
437  for l in lines:
438    l = l.rstrip()
439    words = l.split(',')
440    builder = words[0]
441    build = int(words[1])
442    builds.append((builder, build + 1))
443    # NOTE: We are assuming here that there are always 2 daily builds in
444    # each of the rotating builders.  I am not convinced this is a valid
445    # assumption.
446    if builder in ROTATING_BUILDERS:
447      builds.append((builder, build + 2))
448
449  return builds
450
451
452def RecordFailures(failure_dict, platform, suite, builder, int_date, log_file,
453                   build_num, failed):
454  """Read and update the stored data about test  failures."""
455
456  # Get the dictionary for this particular test suite from the failures
457  # dictionary.
458  suite_dict = failure_dict[suite]
459
460  # Read in the entire log file for this test/build.
461  with open(log_file, 'r') as in_file:
462    lines = in_file.readlines()
463
464  # Update the entries in the failure dictionary for each test within this suite
465  # that failed.
466  for test in failed:
467    # Check to see if there is already an entry in the suite dictionary for this
468    # test; if so use that, otherwise create a new entry.
469    if test in suite_dict:
470      test_dict = suite_dict[test]
471    else:
472      test_dict = dict()
473    # Parse the lines from the log file, looking for lines that indicate this
474    # test failed.
475    msg = ''
476    for l in lines:
477      words = l.split()
478      if len(words) < 3:
479        continue
480      if ((words[0] == test and words[1] == 'ERROR:') or
481          (words[0] == 'provision' and words[1] == 'FAIL:')):
482        words = words[2:]
483        # Get the error message for the failure.
484        msg = ' '.join(words)
485    if not msg:
486      msg = 'Unknown_Error'
487
488    # Look for an existing entry for this error message in the test dictionary.
489    # If found use that, otherwise create a new entry for this error message.
490    if msg in test_dict:
491      error_list = test_dict[msg]
492    else:
493      error_list = list()
494    # Create an entry for this new failure
495    new_item = [int_date, platform, builder, build_num]
496    # Add this failure to the error list if it's not already there.
497    if new_item not in error_list:
498      error_list.append([int_date, platform, builder, build_num])
499    # Sort the error list by date.
500    error_list.sort(key=lambda x: x[0])
501    # Calculate the earliest date to save; delete records for older failures.
502    earliest_date = int_date - MAX_SAVE_RECORDS
503    i = 0
504    while i < len(error_list) and error_list[i][0] <= earliest_date:
505      i += 1
506    if i > 0:
507      error_list = error_list[i:]
508    # Save the error list in the test's dictionary, keyed on error_msg.
509    test_dict[msg] = error_list
510
511    # Save the updated test dictionary in the test_suite dictionary.
512    suite_dict[test] = test_dict
513
514  # Save the updated test_suite dictionary in the failure dictionary.
515  failure_dict[suite] = suite_dict
516
517
518def ParseLogFile(log_file, test_data_dict, failure_dict, test, builder,
519                 build_num, build_link):
520  """Parse the log file from the given builder, build_num and test.
521
522     Also adds the results for this test to our test results dictionary,
523     and calls RecordFailures, to update our test failure data.
524  """
525
526  print('Parsing file %s' % log_file)
527  lines = []
528  with open(log_file, 'r') as infile:
529    lines = infile.readlines()
530
531  passed = {}
532  failed = {}
533  not_run = {}
534  date = ''
535  status = ''
536  board = ''
537  num_provision_errors = 0
538  build_ok = True
539  afe_line = ''
540
541  for line in lines:
542    if line.rstrip() == '<title>404 Not Found</title>':
543      print('Warning: File for %s (build number %d), %s was not found.' %
544            (builder, build_num, test))
545      build_ok = False
546      break
547    if '[ PASSED ]' in line:
548      test_name = line.split()[0]
549      if test_name != 'Suite':
550        passed[test_name] = True
551    elif '[ FAILED ]' in line:
552      test_name = line.split()[0]
553      if test_name == 'provision':
554        num_provision_errors += 1
555        not_run[test_name] = True
556      elif test_name != 'Suite':
557        failed[test_name] = True
558    elif line.startswith('started: '):
559      date = line.rstrip()
560      date = date[9:]
561      date_obj = time.strptime(date, '%a %b %d %H:%M:%S %Y')
562      int_date = (
563          date_obj.tm_year * 10000 + date_obj.tm_mon * 100 + date_obj.tm_mday)
564      date = time.strftime('%a %b %d %Y', date_obj)
565    elif not status and line.startswith('status: '):
566      status = line.rstrip()
567      words = status.split(':')
568      status = words[-1]
569    elif line.find('Suite passed with a warning') != -1:
570      status = 'WARNING'
571    elif line.startswith('@@@STEP_LINK@Link to suite@'):
572      afe_line = line.rstrip()
573      words = afe_line.split('@')
574      for w in words:
575        if w.startswith('http'):
576          afe_line = w
577          afe_line = afe_line.replace('&amp;', '&')
578    elif 'INFO: RunCommand:' in line:
579      words = line.split()
580      for i in range(0, len(words) - 1):
581        if words[i] == '--board':
582          board = words[i + 1]
583
584  test_dict = test_data_dict[test]
585  test_list = test_dict['tests']
586
587  if build_ok:
588    for t in test_list:
589      if not t in passed and not t in failed:
590        not_run[t] = True
591
592    total_pass = len(passed)
593    total_fail = len(failed)
594    total_notrun = len(not_run)
595
596  else:
597    total_pass = 0
598    total_fail = 0
599    total_notrun = 0
600    status = 'Not found.'
601  if not build_ok:
602    return [], date, board, 0, '     '
603
604  build_dict = dict()
605  build_dict['id'] = build_num
606  build_dict['builder'] = builder
607  build_dict['date'] = date
608  build_dict['build_link'] = build_link
609  build_dict['total_pass'] = total_pass
610  build_dict['total_fail'] = total_fail
611  build_dict['total_not_run'] = total_notrun
612  build_dict['afe_job_link'] = afe_line
613  build_dict['provision_errors'] = num_provision_errors
614  if status.strip() == 'SUCCESS':
615    build_dict['color'] = 'green '
616  elif status.strip() == 'FAILURE':
617    build_dict['color'] = ' red  '
618  elif status.strip() == 'WARNING':
619    build_dict['color'] = 'orange'
620  else:
621    build_dict['color'] = '      '
622
623  # Use YYYYMMDD (integer) as the build record key
624  if build_ok:
625    if board in test_dict:
626      board_dict = test_dict[board]
627    else:
628      board_dict = dict()
629    board_dict[int_date] = build_dict
630
631  # Only keep the last 5 records (based on date)
632  keys_list = board_dict.keys()
633  if len(keys_list) > MAX_SAVE_RECORDS:
634    min_key = min(keys_list)
635    del board_dict[min_key]
636
637  # Make sure changes get back into the main dictionary
638  test_dict[board] = board_dict
639  test_data_dict[test] = test_dict
640
641  if len(failed) > 0:
642    RecordFailures(failure_dict, board, test, builder, int_date, log_file,
643                   build_num, failed)
644
645  summary_result = '[%2d/ %2d/ %2d]' % (total_pass, total_fail, total_notrun)
646
647  return summary_result, date, board, int_date, build_dict['color']
648
649
650def DownloadLogFile(builder, buildnum, test, test_family):
651
652  ce = command_executer.GetCommandExecuter()
653  os.system('mkdir -p %s/%s/%s' % (DOWNLOAD_DIR, builder, test))
654  if builder in ROTATING_BUILDERS:
655    source = ('https://uberchromegw.corp.google.com/i/chromiumos.tryserver'
656              '/builders/%s/builds/%d/steps/%s%%20%%5B%s%%5D/logs/stdio' %
657              (builder, buildnum, test_family, test))
658    build_link = ('https://uberchromegw.corp.google.com/i/chromiumos.tryserver'
659                  '/builders/%s/builds/%d' % (builder, buildnum))
660  else:
661    source = ('https://uberchromegw.corp.google.com/i/chromeos/builders/%s/'
662              'builds/%d/steps/%s%%20%%5B%s%%5D/logs/stdio' %
663              (builder, buildnum, test_family, test))
664    build_link = ('https://uberchromegw.corp.google.com/i/chromeos/builders/%s'
665                  '/builds/%d' % (builder, buildnum))
666
667  target = '%s/%s/%s/%d' % (DOWNLOAD_DIR, builder, test, buildnum)
668  if not os.path.isfile(target) or os.path.getsize(target) == 0:
669    cmd = 'sso_client %s > %s' % (source, target)
670    status = ce.RunCommand(cmd)
671    if status != 0:
672      return '', ''
673
674  return target, build_link
675
676
677# Check for prodaccess.
678def CheckProdAccess():
679  status, output, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
680      'prodcertstatus')
681  if status != 0:
682    return False
683  # Verify that status is not expired
684  if 'expires' in output:
685    return True
686  return False
687
688
689def ValidOptions(parser, options):
690  too_many_options = False
691  if options.main:
692    if options.rotating or options.failures_report:
693      too_many_options = True
694  elif options.rotating and options.failures_report:
695    too_many_options = True
696
697  if too_many_options:
698    parser.error('Can only specify one of --main, --rotating or'
699                 ' --failures_report.')
700
701  conflicting_failure_options = False
702  if options.failures_report and options.omit_failures:
703    conflicting_failure_options = True
704    parser.error('Cannot specify both --failures_report and --omit_failures.')
705
706  email_ok = True
707  if options.email and options.email.find('@') == -1:
708    email_ok = False
709    parser.error('"%s" is not a valid email address; it must contain "@..."' %
710                 options.email)
711
712  return not too_many_options and not conflicting_failure_options and email_ok
713
714
715def Main(argv):
716  """Main function for this script."""
717  parser = argparse.ArgumentParser()
718  parser.add_argument(
719      '--main',
720      dest='main',
721      default=False,
722      action='store_true',
723      help='Generate report only for main waterfall '
724      'builders.')
725  parser.add_argument(
726      '--rotating',
727      dest='rotating',
728      default=False,
729      action='store_true',
730      help='Generate report only for rotating builders.')
731  parser.add_argument(
732      '--failures_report',
733      dest='failures_report',
734      default=False,
735      action='store_true',
736      help='Only generate the failures section of the report.')
737  parser.add_argument(
738      '--omit_failures',
739      dest='omit_failures',
740      default=False,
741      action='store_true',
742      help='Do not generate the failures section of the report.')
743  parser.add_argument(
744      '--no_update',
745      dest='no_update',
746      default=False,
747      action='store_true',
748      help='Run reports, but do not update the data files.')
749  parser.add_argument(
750      '--date',
751      dest='date',
752      default=0,
753      type=int,
754      help='The date YYYYMMDD of waterfall report.')
755  parser.add_argument(
756      '--email',
757      dest='email',
758      default='',
759      help='Email address to use for sending the report.')
760
761  options = parser.parse_args(argv)
762
763  if not ValidOptions(parser, options):
764    return 1
765
766  main_only = options.main
767  rotating_only = options.rotating
768  failures_report = options.failures_report
769  omit_failures = options.omit_failures
770  date = options.date
771
772  test_data_dict = dict()
773  failure_dict = dict()
774
775  prod_access = CheckProdAccess()
776  if not prod_access:
777    print('ERROR: Please run prodaccess first.')
778    return
779
780  with open('%s/waterfall-test-data.json' % DATA_DIR, 'r') as input_file:
781    test_data_dict = json.load(input_file)
782
783  with open('%s/test-failure-data.json' % DATA_DIR, 'r') as fp:
784    failure_dict = json.load(fp)
785
786  builds = GetBuilds(date)
787
788  waterfall_report_dict = dict()
789  rotating_report_dict = dict()
790  int_date = 0
791  for test_desc in TESTS:
792    test, test_family = test_desc
793    for build in builds:
794      (builder, buildnum) = build
795      if test.startswith('kernel') and 'llvm' in builder:
796        continue
797      if 'x86' in builder and not test.startswith('bvt'):
798        continue
799      target, build_link = DownloadLogFile(builder, buildnum, test, test_family)
800
801      if os.path.exists(target):
802        test_summary, report_date, board, tmp_date, color = ParseLogFile(
803            target, test_data_dict, failure_dict, test, builder, buildnum,
804            build_link)
805        if not test_summary:
806          continue
807
808        if tmp_date != 0:
809          int_date = tmp_date
810
811        if builder in ROTATING_BUILDERS:
812          UpdateReport(rotating_report_dict, builder, test, report_date,
813                       build_link, test_summary, board, color)
814        else:
815          UpdateReport(waterfall_report_dict, builder, test, report_date,
816                       build_link, test_summary, board, color)
817
818  PruneOldFailures(failure_dict, int_date)
819
820  if options.email:
821    email_to = options.email
822  else:
823    email_to = getpass.getuser()
824
825  if waterfall_report_dict and not rotating_only and not failures_report:
826    main_report = GenerateWaterfallReport(waterfall_report_dict, failure_dict,
827                                          'main', int_date, omit_failures)
828    EmailReport(main_report, 'Main', format_date(int_date), email_to)
829    shutil.copy(main_report, ARCHIVE_DIR)
830  if rotating_report_dict and not main_only and not failures_report:
831    rotating_report = GenerateWaterfallReport(
832        rotating_report_dict, failure_dict, 'rotating', int_date, omit_failures)
833    EmailReport(rotating_report, 'Rotating', format_date(int_date), email_to)
834    shutil.copy(rotating_report, ARCHIVE_DIR)
835
836  if failures_report:
837    failures_report = GenerateFailuresReport(failure_dict, int_date)
838    EmailReport(failures_report, 'Failures', format_date(int_date), email_to)
839    shutil.copy(failures_report, ARCHIVE_DIR)
840
841  if not options.no_update:
842    with open('%s/waterfall-test-data.json' % DATA_DIR, 'w') as out_file:
843      json.dump(test_data_dict, out_file, indent=2)
844
845    with open('%s/test-failure-data.json' % DATA_DIR, 'w') as out_file:
846      json.dump(failure_dict, out_file, indent=2)
847
848    UpdateBuilds(builds)
849
850
851if __name__ == '__main__':
852  Main(sys.argv[1:])
853  sys.exit(0)
854