1#!/usr/bin/env python2 2"""Generate summary report for ChromeOS toolchain waterfalls.""" 3 4# Desired future features (to be added): 5# - arguments to allow generating only the main waterfall report, 6# or only the rotating builder reports, or only the failures 7# report; or the waterfall reports without the failures report. 8# - Better way of figuring out which dates/builds to generate 9# reports for: probably an argument specifying a date or a date 10# range, then use something like the new buildbot utils to 11# query the build logs to find the right build numbers for the 12# builders for the specified dates. 13# - Store/get the json/data files in mobiletc-prebuild's x20 area. 14# - Update data in json file to reflect, for each testsuite, which 15# tests are not expected to run on which boards; update this 16# script to use that data appropriately. 17# - Make sure user's prodaccess is up-to-date before trying to use 18# this script. 19# - Add some nice formatting/highlighting to reports. 20 21from __future__ import print_function 22 23import argparse 24import getpass 25import json 26import os 27import re 28import shutil 29import sys 30import time 31 32from cros_utils import command_executer 33 34# All the test suites whose data we might want for the reports. 35TESTS = (('bvt-inline', 'HWTest'), ('bvt-cq', 'HWTest'), ('security', 'HWTest'), 36 ('kernel_daily_regression', 'HWTest'), ('kernel_daily_benchmarks', 37 'HWTest'),) 38 39# The main waterfall builders, IN THE ORDER IN WHICH WE WANT THEM 40# LISTED IN THE REPORT. 41WATERFALL_BUILDERS = [ 42 'amd64-gcc-toolchain', 'arm-gcc-toolchain', 'arm64-gcc-toolchain', 43 'x86-gcc-toolchain', 'amd64-llvm-toolchain', 'arm-llvm-toolchain', 44 'arm64-llvm-toolchain', 'x86-llvm-toolchain', 'amd64-llvm-next-toolchain', 45 'arm-llvm-next-toolchain', 'arm64-llvm-next-toolchain', 46 'x86-llvm-next-toolchain' 47] 48 49DATA_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/' 50ARCHIVE_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-reports/' 51DOWNLOAD_DIR = '/tmp/waterfall-logs' 52MAX_SAVE_RECORDS = 7 53BUILD_DATA_FILE = '%s/build-data.txt' % DATA_DIR 54GCC_ROTATING_BUILDER = 'gcc_toolchain' 55LLVM_ROTATING_BUILDER = 'llvm_next_toolchain' 56ROTATING_BUILDERS = [GCC_ROTATING_BUILDER, LLVM_ROTATING_BUILDER] 57 58# For int-to-string date conversion. Note, the index of the month in this 59# list needs to correspond to the month's integer value. i.e. 'Sep' must 60# be as MONTHS[9]. 61MONTHS = [ 62 '', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 63 'Nov', 'Dec' 64] 65 66 67def format_date(int_date): 68 """Convert an integer date to a string date. YYYYMMDD -> YYYY-MMM-DD""" 69 70 if int_date == 0: 71 return 'today' 72 73 tmp_date = int_date 74 day = tmp_date % 100 75 tmp_date = tmp_date / 100 76 month = tmp_date % 100 77 year = tmp_date / 100 78 79 month_str = MONTHS[month] 80 date_str = '%d-%s-%d' % (year, month_str, day) 81 return date_str 82 83 84def EmailReport(report_file, report_type, date): 85 subject = '%s Waterfall Summary report, %s' % (report_type, date) 86 email_to = getpass.getuser() 87 sendgmr_path = '/google/data/ro/projects/gws-sre/sendgmr' 88 command = ('%s --to=%s@google.com --subject="%s" --body_file=%s' % 89 (sendgmr_path, email_to, subject, report_file)) 90 command_executer.GetCommandExecuter().RunCommand(command) 91 92 93def PruneOldFailures(failure_dict, int_date): 94 earliest_date = int_date - MAX_SAVE_RECORDS 95 for suite in failure_dict: 96 suite_dict = failure_dict[suite] 97 test_keys_to_remove = [] 98 for test in suite_dict: 99 test_dict = suite_dict[test] 100 msg_keys_to_remove = [] 101 for msg in test_dict: 102 fails = test_dict[msg] 103 i = 0 104 while i < len(fails) and fails[i][0] <= earliest_date: 105 i += 1 106 new_fails = fails[i:] 107 test_dict[msg] = new_fails 108 if len(new_fails) == 0: 109 msg_keys_to_remove.append(msg) 110 111 for k in msg_keys_to_remove: 112 del test_dict[k] 113 114 suite_dict[test] = test_dict 115 if len(test_dict) == 0: 116 test_keys_to_remove.append(test) 117 118 for k in test_keys_to_remove: 119 del suite_dict[k] 120 121 failure_dict[suite] = suite_dict 122 123 124def GetBuildID(build_bot, date): 125 """Get the build id for a build_bot at a given date.""" 126 day = '{day:02d}'.format(day=date % 100) 127 mon = MONTHS[date / 100 % 100] 128 date_string = mon + ' ' + day 129 if build_bot in WATERFALL_BUILDERS: 130 url = 'https://uberchromegw.corp.google.com/i/chromeos/' + \ 131 'builders/%s?numbuilds=200' % build_bot 132 if build_bot in ROTATING_BUILDERS: 133 url = 'https://uberchromegw.corp.google.com/i/chromiumos.tryserver/' + \ 134 'builders/%s?numbuilds=200' % build_bot 135 command = 'sso_client %s' % url 136 retval = 1 137 retry_time = 3 138 while retval and retry_time: 139 retval, output, _ = \ 140 command_executer.GetCommandExecuter().RunCommandWOutput(command, \ 141 print_to_console=False) 142 retry_time -= 1 143 144 if retval: 145 return [] 146 147 out = output.split('\n') 148 line_num = 0 149 build_id = [] 150 # Parse the output like this 151 # <td>Dec 14 10:55</td> 152 # <td class="revision">??</td> 153 # <td failure</td><td><a href="../builders/gcc_toolchain/builds/109">#109</a> 154 while line_num < len(out): 155 if date_string in out[line_num]: 156 if line_num + 2 < len(out): 157 build_num_line = out[line_num + 2] 158 raw_num = re.findall(r'builds/\d+', build_num_line) 159 # raw_num is ['builds/109'] in the example. 160 if raw_num: 161 build_id.append(int(raw_num[0].split('/')[1])) 162 line_num += 1 163 return build_id 164 165 166def GenerateFailuresReport(fail_dict, date): 167 filename = 'waterfall_report.failures.%s.txt' % date 168 date_string = format_date(date) 169 with open(filename, 'w') as out_file: 170 # Write failure report section. 171 out_file.write('\n\nSummary of Test Failures as of %s\n\n' % date_string) 172 173 # We want to sort the errors and output them in order of the ones that occur 174 # most often. So we have to collect the data about all of them, then sort 175 # it. 176 error_groups = [] 177 for suite in fail_dict: 178 suite_dict = fail_dict[suite] 179 if suite_dict: 180 for test in suite_dict: 181 test_dict = suite_dict[test] 182 for err_msg in test_dict: 183 err_list = test_dict[err_msg] 184 sorted_list = sorted(err_list, key=lambda x: x[0], reverse=True) 185 err_group = [len(sorted_list), suite, test, err_msg, sorted_list] 186 error_groups.append(err_group) 187 188 # Sort the errors by the number of errors of each type. Then output them in 189 # order. 190 sorted_errors = sorted(error_groups, key=lambda x: x[0], reverse=True) 191 for i in range(0, len(sorted_errors)): 192 err_group = sorted_errors[i] 193 suite = err_group[1] 194 test = err_group[2] 195 err_msg = err_group[3] 196 err_list = err_group[4] 197 out_file.write('Suite: %s\n' % suite) 198 out_file.write(' %s (%d failures)\n' % (test, len(err_list))) 199 out_file.write(' (%s)\n' % err_msg) 200 for i in range(0, len(err_list)): 201 err = err_list[i] 202 out_file.write(' %s, %s, %s\n' % (format_date(err[0]), err[1], 203 err[2])) 204 out_file.write('\n') 205 206 print('Report generated in %s.' % filename) 207 return filename 208 209 210def GenerateWaterfallReport(report_dict, fail_dict, waterfall_type, date, 211 omit_failures): 212 """Write out the actual formatted report.""" 213 214 filename = 'waterfall_report.%s_waterfall.%s.txt' % (waterfall_type, date) 215 216 date_string = '' 217 date_list = report_dict['date'] 218 num_dates = len(date_list) 219 i = 0 220 for d in date_list: 221 date_string += d 222 if i < num_dates - 1: 223 date_string += ', ' 224 i += 1 225 226 if waterfall_type == 'main': 227 report_list = WATERFALL_BUILDERS 228 else: 229 report_list = report_dict.keys() 230 231 with open(filename, 'w') as out_file: 232 # Write Report Header 233 out_file.write('\nStatus of %s Waterfall Builds from %s\n\n' % 234 (waterfall_type, date_string)) 235 out_file.write(' ' 236 ' kernel kernel\n') 237 out_file.write(' Build bvt- bvt-cq ' 238 ' security daily daily\n') 239 out_file.write(' status inline ' 240 ' regression benchmarks\n') 241 out_file.write(' [P/ F/ DR]* [P/ F /DR]* ' 242 '[P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]*\n\n') 243 244 # Write daily waterfall status section. 245 for i in range(0, len(report_list)): 246 builder = report_list[i] 247 if builder == 'date': 248 continue 249 250 if builder not in report_dict: 251 out_file.write('Unable to find information for %s.\n\n' % builder) 252 continue 253 254 build_dict = report_dict[builder] 255 status = build_dict.get('build_status', 'bad') 256 inline = build_dict.get('bvt-inline', '[??/ ?? /??]') 257 cq = build_dict.get('bvt-cq', '[??/ ?? /??]') 258 inline_color = build_dict.get('bvt-inline-color', '') 259 cq_color = build_dict.get('bvt-cq-color', '') 260 if 'x86' not in builder: 261 security = build_dict.get('security', '[??/ ?? /??]') 262 security_color = build_dict.get('security-color', '') 263 if 'gcc' in builder: 264 regression = build_dict.get('kernel_daily_regression', '[??/ ?? /??]') 265 bench = build_dict.get('kernel_daily_benchmarks', '[??/ ?? /??]') 266 regression_color = build_dict.get('kernel_daily_regression-color', '') 267 bench_color = build_dict.get('kernel_daily_benchmarks-color', '') 268 out_file.write(' %6s %6s' 269 ' %6s %6s %6s\n' % 270 (inline_color, cq_color, security_color, 271 regression_color, bench_color)) 272 out_file.write('%25s %3s %s %s %s %s %s\n' % 273 (builder, status, inline, cq, security, regression, 274 bench)) 275 else: 276 out_file.write(' %6s %6s' 277 ' %6s\n' % (inline_color, cq_color, 278 security_color)) 279 out_file.write('%25s %3s %s %s %s\n' % (builder, status, inline, cq, 280 security)) 281 else: 282 out_file.write(' %6s %6s\n' % 283 (inline_color, cq_color)) 284 out_file.write('%25s %3s %s %s\n' % (builder, status, inline, cq)) 285 if 'build_link' in build_dict: 286 out_file.write('%s\n\n' % build_dict['build_link']) 287 288 out_file.write('\n\n*P = Number of tests in suite that Passed; F = ' 289 'Number of tests in suite that Failed; DR = Number of tests' 290 ' in suite that Didn\'t Run.\n') 291 292 if omit_failures: 293 print('Report generated in %s.' % filename) 294 return filename 295 296 # Write failure report section. 297 out_file.write('\n\nSummary of Test Failures as of %s\n\n' % date_string) 298 299 # We want to sort the errors and output them in order of the ones that occur 300 # most often. So we have to collect the data about all of them, then sort 301 # it. 302 error_groups = [] 303 for suite in fail_dict: 304 suite_dict = fail_dict[suite] 305 if suite_dict: 306 for test in suite_dict: 307 test_dict = suite_dict[test] 308 for err_msg in test_dict: 309 err_list = test_dict[err_msg] 310 sorted_list = sorted(err_list, key=lambda x: x[0], reverse=True) 311 err_group = [len(sorted_list), suite, test, err_msg, sorted_list] 312 error_groups.append(err_group) 313 314 # Sort the errors by the number of errors of each type. Then output them in 315 # order. 316 sorted_errors = sorted(error_groups, key=lambda x: x[0], reverse=True) 317 for i in range(0, len(sorted_errors)): 318 err_group = sorted_errors[i] 319 suite = err_group[1] 320 test = err_group[2] 321 err_msg = err_group[3] 322 err_list = err_group[4] 323 out_file.write('Suite: %s\n' % suite) 324 out_file.write(' %s (%d failures)\n' % (test, len(err_list))) 325 out_file.write(' (%s)\n' % err_msg) 326 for i in range(0, len(err_list)): 327 err = err_list[i] 328 out_file.write(' %s, %s, %s\n' % (format_date(err[0]), err[1], 329 err[2])) 330 out_file.write('\n') 331 332 print('Report generated in %s.' % filename) 333 return filename 334 335 336def UpdateReport(report_dict, builder, test, report_date, build_link, 337 test_summary, board, color): 338 """Update the data in our report dictionary with current test's data.""" 339 340 if 'date' not in report_dict: 341 report_dict['date'] = [report_date] 342 elif report_date not in report_dict['date']: 343 # It is possible that some of the builders started/finished on different 344 # days, so we allow for multiple dates in the reports. 345 report_dict['date'].append(report_date) 346 347 build_key = '' 348 if builder == GCC_ROTATING_BUILDER: 349 build_key = '%s-gcc-toolchain' % board 350 elif builder == LLVM_ROTATING_BUILDER: 351 build_key = '%s-llvm-next-toolchain' % board 352 else: 353 build_key = builder 354 355 if build_key not in report_dict.keys(): 356 build_dict = dict() 357 else: 358 build_dict = report_dict[build_key] 359 360 if 'build_link' not in build_dict: 361 build_dict['build_link'] = build_link 362 363 if 'date' not in build_dict: 364 build_dict['date'] = report_date 365 366 if 'board' in build_dict and build_dict['board'] != board: 367 raise RuntimeError( 368 'Error: Two different boards (%s,%s) in one build (%s)!' % 369 (board, build_dict['board'], build_link)) 370 build_dict['board'] = board 371 372 color_key = '%s-color' % test 373 build_dict[color_key] = color 374 375 # Check to see if we already have a build status for this build_key 376 status = '' 377 if 'build_status' in build_dict.keys(): 378 # Use current build_status, unless current test failed (see below). 379 status = build_dict['build_status'] 380 381 if not test_summary: 382 # Current test data was not available, so something was bad with build. 383 build_dict['build_status'] = 'bad' 384 build_dict[test] = '[ no data ]' 385 else: 386 build_dict[test] = test_summary 387 if not status: 388 # Current test ok; no other data, so assume build was ok. 389 build_dict['build_status'] = 'ok' 390 391 report_dict[build_key] = build_dict 392 393 394def UpdateBuilds(builds): 395 """Update the data in our build-data.txt file.""" 396 397 # The build data file records the last build number for which we 398 # generated a report. When we generate the next report, we read 399 # this data and increment it to get the new data; when we finish 400 # generating the reports, we write the updated values into this file. 401 # NOTE: One side effect of doing this at the end: If the script 402 # fails in the middle of generating a report, this data does not get 403 # updated. 404 with open(BUILD_DATA_FILE, 'w') as fp: 405 gcc_max = 0 406 llvm_max = 0 407 for b in builds: 408 if b[0] == GCC_ROTATING_BUILDER: 409 gcc_max = max(gcc_max, b[1]) 410 elif b[0] == LLVM_ROTATING_BUILDER: 411 llvm_max = max(llvm_max, b[1]) 412 else: 413 fp.write('%s,%d\n' % (b[0], b[1])) 414 if gcc_max > 0: 415 fp.write('%s,%d\n' % (GCC_ROTATING_BUILDER, gcc_max)) 416 if llvm_max > 0: 417 fp.write('%s,%d\n' % (LLVM_ROTATING_BUILDER, llvm_max)) 418 419 420def GetBuilds(date=0): 421 """Get build id from builds.""" 422 423 # If date is set, get the build id from waterfall. 424 builds = [] 425 426 if date: 427 for builder in WATERFALL_BUILDERS + ROTATING_BUILDERS: 428 build_ids = GetBuildID(builder, date) 429 for build_id in build_ids: 430 builds.append((builder, build_id)) 431 return builds 432 433 # If date is not set, we try to get the most recent builds. 434 # Read the values of the last builds used to generate a report, and 435 # increment them appropriately, to get values for generating the 436 # current report. (See comments in UpdateBuilds). 437 with open(BUILD_DATA_FILE, 'r') as fp: 438 lines = fp.readlines() 439 440 for l in lines: 441 l = l.rstrip() 442 words = l.split(',') 443 builder = words[0] 444 build = int(words[1]) 445 builds.append((builder, build + 1)) 446 # NOTE: We are assuming here that there are always 2 daily builds in 447 # each of the rotating builders. I am not convinced this is a valid 448 # assumption. 449 if builder in ROTATING_BUILDERS: 450 builds.append((builder, build + 2)) 451 452 return builds 453 454 455def RecordFailures(failure_dict, platform, suite, builder, int_date, log_file, 456 build_num, failed): 457 """Read and update the stored data about test failures.""" 458 459 # Get the dictionary for this particular test suite from the failures 460 # dictionary. 461 suite_dict = failure_dict[suite] 462 463 # Read in the entire log file for this test/build. 464 with open(log_file, 'r') as in_file: 465 lines = in_file.readlines() 466 467 # Update the entries in the failure dictionary for each test within this suite 468 # that failed. 469 for test in failed: 470 # Check to see if there is already an entry in the suite dictionary for this 471 # test; if so use that, otherwise create a new entry. 472 if test in suite_dict: 473 test_dict = suite_dict[test] 474 else: 475 test_dict = dict() 476 # Parse the lines from the log file, looking for lines that indicate this 477 # test failed. 478 msg = '' 479 for l in lines: 480 words = l.split() 481 if len(words) < 3: 482 continue 483 if ((words[0] == test and words[1] == 'ERROR:') or 484 (words[0] == 'provision' and words[1] == 'FAIL:')): 485 words = words[2:] 486 # Get the error message for the failure. 487 msg = ' '.join(words) 488 if not msg: 489 msg = 'Unknown_Error' 490 491 # Look for an existing entry for this error message in the test dictionary. 492 # If found use that, otherwise create a new entry for this error message. 493 if msg in test_dict: 494 error_list = test_dict[msg] 495 else: 496 error_list = list() 497 # Create an entry for this new failure 498 new_item = [int_date, platform, builder, build_num] 499 # Add this failure to the error list if it's not already there. 500 if new_item not in error_list: 501 error_list.append([int_date, platform, builder, build_num]) 502 # Sort the error list by date. 503 error_list.sort(key=lambda x: x[0]) 504 # Calculate the earliest date to save; delete records for older failures. 505 earliest_date = int_date - MAX_SAVE_RECORDS 506 i = 0 507 while i < len(error_list) and error_list[i][0] <= earliest_date: 508 i += 1 509 if i > 0: 510 error_list = error_list[i:] 511 # Save the error list in the test's dictionary, keyed on error_msg. 512 test_dict[msg] = error_list 513 514 # Save the updated test dictionary in the test_suite dictionary. 515 suite_dict[test] = test_dict 516 517 # Save the updated test_suite dictionary in the failure dictionary. 518 failure_dict[suite] = suite_dict 519 520 521def ParseLogFile(log_file, test_data_dict, failure_dict, test, builder, 522 build_num, build_link): 523 """Parse the log file from the given builder, build_num and test. 524 525 Also adds the results for this test to our test results dictionary, 526 and calls RecordFailures, to update our test failure data. 527 """ 528 529 lines = [] 530 with open(log_file, 'r') as infile: 531 lines = infile.readlines() 532 533 passed = {} 534 failed = {} 535 not_run = {} 536 date = '' 537 status = '' 538 board = '' 539 num_provision_errors = 0 540 build_ok = True 541 afe_line = '' 542 543 for line in lines: 544 if line.rstrip() == '<title>404 Not Found</title>': 545 print('Warning: File for %s (build number %d), %s was not found.' % 546 (builder, build_num, test)) 547 build_ok = False 548 break 549 if '[ PASSED ]' in line: 550 test_name = line.split()[0] 551 if test_name != 'Suite': 552 passed[test_name] = True 553 elif '[ FAILED ]' in line: 554 test_name = line.split()[0] 555 if test_name == 'provision': 556 num_provision_errors += 1 557 not_run[test_name] = True 558 elif test_name != 'Suite': 559 failed[test_name] = True 560 elif line.startswith('started: '): 561 date = line.rstrip() 562 date = date[9:] 563 date_obj = time.strptime(date, '%a %b %d %H:%M:%S %Y') 564 int_date = ( 565 date_obj.tm_year * 10000 + date_obj.tm_mon * 100 + date_obj.tm_mday) 566 date = time.strftime('%a %b %d %Y', date_obj) 567 elif not status and line.startswith('status: '): 568 status = line.rstrip() 569 words = status.split(':') 570 status = words[-1] 571 elif line.find('Suite passed with a warning') != -1: 572 status = 'WARNING' 573 elif line.startswith('@@@STEP_LINK@Link to suite@'): 574 afe_line = line.rstrip() 575 words = afe_line.split('@') 576 for w in words: 577 if w.startswith('http'): 578 afe_line = w 579 afe_line = afe_line.replace('&', '&') 580 elif 'INFO: RunCommand:' in line: 581 words = line.split() 582 for i in range(0, len(words) - 1): 583 if words[i] == '--board': 584 board = words[i + 1] 585 586 test_dict = test_data_dict[test] 587 test_list = test_dict['tests'] 588 589 if build_ok: 590 for t in test_list: 591 if not t in passed and not t in failed: 592 not_run[t] = True 593 594 total_pass = len(passed) 595 total_fail = len(failed) 596 total_notrun = len(not_run) 597 598 else: 599 total_pass = 0 600 total_fail = 0 601 total_notrun = 0 602 status = 'Not found.' 603 if not build_ok: 604 return [], date, board, 0, ' ' 605 606 build_dict = dict() 607 build_dict['id'] = build_num 608 build_dict['builder'] = builder 609 build_dict['date'] = date 610 build_dict['build_link'] = build_link 611 build_dict['total_pass'] = total_pass 612 build_dict['total_fail'] = total_fail 613 build_dict['total_not_run'] = total_notrun 614 build_dict['afe_job_link'] = afe_line 615 build_dict['provision_errors'] = num_provision_errors 616 if status.strip() == 'SUCCESS': 617 build_dict['color'] = 'green ' 618 elif status.strip() == 'FAILURE': 619 build_dict['color'] = ' red ' 620 elif status.strip() == 'WARNING': 621 build_dict['color'] = 'orange' 622 else: 623 build_dict['color'] = ' ' 624 625 # Use YYYYMMDD (integer) as the build record key 626 if build_ok: 627 if board in test_dict: 628 board_dict = test_dict[board] 629 else: 630 board_dict = dict() 631 board_dict[int_date] = build_dict 632 633 # Only keep the last 5 records (based on date) 634 keys_list = board_dict.keys() 635 if len(keys_list) > MAX_SAVE_RECORDS: 636 min_key = min(keys_list) 637 del board_dict[min_key] 638 639 # Make sure changes get back into the main dictionary 640 test_dict[board] = board_dict 641 test_data_dict[test] = test_dict 642 643 if len(failed) > 0: 644 RecordFailures(failure_dict, board, test, builder, int_date, log_file, 645 build_num, failed) 646 647 summary_result = '[%2d/ %2d/ %2d]' % (total_pass, total_fail, total_notrun) 648 649 return summary_result, date, board, int_date, build_dict['color'] 650 651 652def DownloadLogFile(builder, buildnum, test, test_family): 653 654 ce = command_executer.GetCommandExecuter() 655 os.system('mkdir -p %s/%s/%s' % (DOWNLOAD_DIR, builder, test)) 656 if builder in ROTATING_BUILDERS: 657 source = ('https://uberchromegw.corp.google.com/i/chromiumos.tryserver' 658 '/builders/%s/builds/%d/steps/%s%%20%%5B%s%%5D/logs/stdio' % 659 (builder, buildnum, test_family, test)) 660 build_link = ('https://uberchromegw.corp.google.com/i/chromiumos.tryserver' 661 '/builders/%s/builds/%d' % (builder, buildnum)) 662 else: 663 source = ('https://uberchromegw.corp.google.com/i/chromeos/builders/%s/' 664 'builds/%d/steps/%s%%20%%5B%s%%5D/logs/stdio' % 665 (builder, buildnum, test_family, test)) 666 build_link = ('https://uberchromegw.corp.google.com/i/chromeos/builders/%s' 667 '/builds/%d' % (builder, buildnum)) 668 669 target = '%s/%s/%s/%d' % (DOWNLOAD_DIR, builder, test, buildnum) 670 if not os.path.isfile(target) or os.path.getsize(target) == 0: 671 cmd = 'sso_client %s > %s' % (source, target) 672 status = ce.RunCommand(cmd) 673 if status != 0: 674 return '', '' 675 676 return target, build_link 677 678 679# Check for prodaccess. 680def CheckProdAccess(): 681 status, output, _ = command_executer.GetCommandExecuter().RunCommandWOutput( 682 'prodcertstatus') 683 if status != 0: 684 return False 685 # Verify that status is not expired 686 if 'expires' in output: 687 return True 688 return False 689 690 691def ValidOptions(parser, options): 692 too_many_options = False 693 if options.main: 694 if options.rotating or options.failures_report: 695 too_many_options = True 696 elif options.rotating and options.failures_report: 697 too_many_options = True 698 699 if too_many_options: 700 parser.error('Can only specify one of --main, --rotating or' 701 ' --failures_report.') 702 703 conflicting_failure_options = False 704 if options.failures_report and options.omit_failures: 705 conflicting_failure_options = True 706 parser.error('Cannot specify both --failures_report and --omit_failures.') 707 708 return not too_many_options and not conflicting_failure_options 709 710 711def Main(argv): 712 """Main function for this script.""" 713 parser = argparse.ArgumentParser() 714 parser.add_argument( 715 '--main', 716 dest='main', 717 default=False, 718 action='store_true', 719 help='Generate report only for main waterfall ' 720 'builders.') 721 parser.add_argument( 722 '--rotating', 723 dest='rotating', 724 default=False, 725 action='store_true', 726 help='Generate report only for rotating builders.') 727 parser.add_argument( 728 '--failures_report', 729 dest='failures_report', 730 default=False, 731 action='store_true', 732 help='Only generate the failures section of the report.') 733 parser.add_argument( 734 '--omit_failures', 735 dest='omit_failures', 736 default=False, 737 action='store_true', 738 help='Do not generate the failures section of the report.') 739 parser.add_argument( 740 '--no_update', 741 dest='no_update', 742 default=False, 743 action='store_true', 744 help='Run reports, but do not update the data files.') 745 parser.add_argument( 746 '--date', 747 dest='date', 748 default=0, 749 type=int, 750 help='The date YYYYMMDD of waterfall report.') 751 752 options = parser.parse_args(argv) 753 754 if not ValidOptions(parser, options): 755 return 1 756 757 main_only = options.main 758 rotating_only = options.rotating 759 failures_report = options.failures_report 760 omit_failures = options.omit_failures 761 date = options.date 762 763 test_data_dict = dict() 764 failure_dict = dict() 765 766 prod_access = CheckProdAccess() 767 if not prod_access: 768 print('ERROR: Please run prodaccess first.') 769 return 770 771 with open('%s/waterfall-test-data.json' % DATA_DIR, 'r') as input_file: 772 test_data_dict = json.load(input_file) 773 774 with open('%s/test-failure-data.json' % DATA_DIR, 'r') as fp: 775 failure_dict = json.load(fp) 776 777 builds = GetBuilds(date) 778 779 waterfall_report_dict = dict() 780 rotating_report_dict = dict() 781 int_date = 0 782 for test_desc in TESTS: 783 test, test_family = test_desc 784 for build in builds: 785 (builder, buildnum) = build 786 if test.startswith('kernel') and 'llvm' in builder: 787 continue 788 if 'x86' in builder and not test.startswith('bvt'): 789 continue 790 target, build_link = DownloadLogFile(builder, buildnum, test, test_family) 791 792 if os.path.exists(target): 793 test_summary, report_date, board, tmp_date, color = ParseLogFile( 794 target, test_data_dict, failure_dict, test, builder, buildnum, 795 build_link) 796 797 if tmp_date != 0: 798 int_date = tmp_date 799 800 if builder in ROTATING_BUILDERS: 801 UpdateReport(rotating_report_dict, builder, test, report_date, 802 build_link, test_summary, board, color) 803 else: 804 UpdateReport(waterfall_report_dict, builder, test, report_date, 805 build_link, test_summary, board, color) 806 807 PruneOldFailures(failure_dict, int_date) 808 809 if waterfall_report_dict and not rotating_only and not failures_report: 810 main_report = GenerateWaterfallReport(waterfall_report_dict, failure_dict, 811 'main', int_date, omit_failures) 812 EmailReport(main_report, 'Main', format_date(int_date)) 813 shutil.copy(main_report, ARCHIVE_DIR) 814 if rotating_report_dict and not main_only and not failures_report: 815 rotating_report = GenerateWaterfallReport( 816 rotating_report_dict, failure_dict, 'rotating', int_date, omit_failures) 817 EmailReport(rotating_report, 'Rotating', format_date(int_date)) 818 shutil.copy(rotating_report, ARCHIVE_DIR) 819 820 if failures_report: 821 failures_report = GenerateFailuresReport(failure_dict, int_date) 822 EmailReport(failures_report, 'Failures', format_date(int_date)) 823 shutil.copy(failures_report, ARCHIVE_DIR) 824 825 if not options.no_update: 826 with open('%s/waterfall-test-data.json' % DATA_DIR, 'w') as out_file: 827 json.dump(test_data_dict, out_file, indent=2) 828 829 with open('%s/test-failure-data.json' % DATA_DIR, 'w') as out_file: 830 json.dump(failure_dict, out_file, indent=2) 831 832 UpdateBuilds(builds) 833 834 835if __name__ == '__main__': 836 Main(sys.argv[1:]) 837 sys.exit(0) 838