1import json 2import math 3import os 4import re 5 6import common 7from autotest_lib.tko import models 8from autotest_lib.tko import status_lib 9from autotest_lib.tko import utils as tko_utils 10from autotest_lib.tko.parsers import base 11from autotest_lib.tko.parsers import version_0 12 13 14class job(version_0.job): 15 """Represents a job.""" 16 17 def exit_status(self): 18 """Returns the string exit status of this job.""" 19 20 # Find the .autoserv_execute path. 21 top_dir = tko_utils.find_toplevel_job_dir(self.dir) 22 if not top_dir: 23 return 'ABORT' 24 execute_path = os.path.join(top_dir, '.autoserv_execute') 25 26 # If for some reason we can't read the status code, assume disaster. 27 if not os.path.exists(execute_path): 28 return 'ABORT' 29 lines = open(execute_path).readlines() 30 if len(lines) < 2: 31 return 'ABORT' 32 try: 33 status_code = int(lines[1]) 34 except ValueError: 35 return 'ABORT' 36 37 if not os.WIFEXITED(status_code): 38 # Looks like a signal - an ABORT. 39 return 'ABORT' 40 elif os.WEXITSTATUS(status_code) != 0: 41 # Looks like a non-zero exit - a failure. 42 return 'FAIL' 43 else: 44 # Looks like exit code == 0. 45 return 'GOOD' 46 47 48class kernel(models.kernel): 49 """Represents a kernel.""" 50 51 def __init__(self, base, patches): 52 if base: 53 patches = [patch(*p.split()) for p in patches] 54 hashes = [p.hash for p in patches] 55 kernel_hash = self.compute_hash(base, hashes) 56 else: 57 base = 'UNKNOWN' 58 patches = [] 59 kernel_hash = 'UNKNOWN' 60 super(kernel, self).__init__(base, patches, kernel_hash) 61 62 63class test(models.test): 64 """Represents a test.""" 65 66 @staticmethod 67 def load_iterations(keyval_path): 68 return iteration.load_from_keyval(keyval_path) 69 70 71 @staticmethod 72 def load_perf_values(perf_values_file): 73 return perf_value_iteration.load_from_perf_values_file( 74 perf_values_file) 75 76 77class iteration(models.iteration): 78 """Represents an iteration.""" 79 80 @staticmethod 81 def parse_line_into_dicts(line, attr_dict, perf_dict): 82 key, val_type, value = "", "", "" 83 84 # Figure out what the key, value and keyval type are. 85 typed_match = re.search('^([^=]*)\{(\w*)\}=(.*)$', line) 86 if typed_match: 87 key, val_type, value = typed_match.groups() 88 else: 89 # Old-fashioned untyped match, assume perf. 90 untyped_match = re.search('^([^=]*)=(.*)$', line) 91 if untyped_match: 92 key, value = untyped_match.groups() 93 val_type = 'perf' 94 95 # Parse the actual value into a dict. 96 try: 97 if val_type == 'attr': 98 attr_dict[key] = value 99 elif val_type == 'perf': 100 # first check if value is in the form of 'mean+-deviation' 101 if isinstance(value, str): 102 r = re.compile('(\d+.?\d*)\+-(\d+.?\d*)') 103 match = r.match(value) 104 if match: 105 perf_dict[key] = float(match.group(1)) 106 perf_dict['%s_dev' % key] = float(match.group(2)) 107 return 108 # otherwise try to interpret as a regular float 109 perf_dict[key] = float(value) 110 else: 111 raise ValueError 112 except ValueError: 113 msg = ('WARNING: line "%s" found in test ' 114 'iteration keyval could not be parsed') 115 msg %= line 116 tko_utils.dprint(msg) 117 118 119class perf_value_iteration(models.perf_value_iteration): 120 """Represents a perf value iteration.""" 121 122 @staticmethod 123 def parse_line_into_dict(line): 124 """ 125 Parse a perf measurement text line into a dictionary. 126 127 The line is assumed to be a JSON-formatted string containing key/value 128 pairs, where each pair represents a piece of information associated 129 with a measured perf metric: 130 131 'description': a string description for the perf metric. 132 'value': a numeric value, or list of numeric values. 133 'units': the string units associated with the perf metric. 134 'higher_is_better': a boolean whether a higher value is considered 135 better. If False, a lower value is considered better. 136 'graph': a string indicating the name of the perf dashboard graph 137 on which the perf data will be displayed. 138 139 The resulting dictionary will also have a standard deviation key/value 140 pair, 'stddev'. If the perf measurement value is a list of values 141 instead of a single value, then the average and standard deviation of 142 the list of values is computed and stored. If a single value, the 143 value itself is used, and is associated with a standard deviation of 0. 144 145 @param line: A string line of JSON text from a perf measurements output 146 file. 147 148 @return A dictionary containing the parsed perf measurement information 149 along with a computed standard deviation value (key 'stddev'), or 150 an empty dictionary if the inputted line cannot be parsed. 151 """ 152 try: 153 perf_dict = json.loads(line) 154 except ValueError: 155 msg = 'Could not parse perf measurements line as json: "%s"' % line 156 tko_utils.dprint(msg) 157 return {} 158 159 def mean_and_standard_deviation(data): 160 """ 161 Computes the mean and standard deviation of a list of numbers. 162 163 @param data: A list of numbers. 164 165 @return A 2-tuple (mean, standard_deviation) computed from the list 166 of numbers. 167 168 """ 169 n = len(data) 170 if n == 0: 171 return 0.0, 0.0 172 if n == 1: 173 return data[0], 0.0 174 mean = float(sum(data)) / n 175 # Divide by n-1 to compute "sample standard deviation". 176 variance = sum([(elem - mean) ** 2 for elem in data]) / (n - 1) 177 return mean, math.sqrt(variance) 178 179 value = perf_dict['value'] 180 perf_dict['stddev'] = 0.0 181 if isinstance(value, list): 182 value, stddev = mean_and_standard_deviation(map(float, value)) 183 perf_dict['value'] = value 184 perf_dict['stddev'] = stddev 185 186 return perf_dict 187 188 189class status_line(version_0.status_line): 190 """Represents a status line.""" 191 192 def __init__(self, indent, status, subdir, testname, reason, 193 optional_fields): 194 # Handle INFO fields. 195 if status == 'INFO': 196 self.type = 'INFO' 197 self.indent = indent 198 self.status = self.subdir = self.testname = self.reason = None 199 self.optional_fields = optional_fields 200 else: 201 # Everything else is backwards compatible. 202 super(status_line, self).__init__(indent, status, subdir, 203 testname, reason, 204 optional_fields) 205 206 207 def is_successful_reboot(self, current_status): 208 """ 209 Checks whether the status represents a successful reboot. 210 211 @param current_status: A string representing the current status. 212 213 @return True, if the status represents a successful reboot, or False 214 if not. 215 216 """ 217 # Make sure this is a reboot line. 218 if self.testname != 'reboot': 219 return False 220 221 # Make sure this was not a failure. 222 if status_lib.is_worse_than_or_equal_to(current_status, 'FAIL'): 223 return False 224 225 # It must have been a successful reboot. 226 return True 227 228 229 def get_kernel(self): 230 # Get the base kernel version. 231 fields = self.optional_fields 232 base = re.sub('-autotest$', '', fields.get('kernel', '')) 233 # Get a list of patches. 234 patches = [] 235 patch_index = 0 236 while ('patch%d' % patch_index) in fields: 237 patches.append(fields['patch%d' % patch_index]) 238 patch_index += 1 239 # Create a new kernel instance. 240 return kernel(base, patches) 241 242 243 def get_timestamp(self): 244 return tko_utils.get_timestamp(self.optional_fields, 'timestamp') 245 246 247# The default implementations from version 0 will do for now. 248patch = version_0.patch 249 250 251class parser(base.parser): 252 """Represents a parser.""" 253 254 @staticmethod 255 def make_job(dir): 256 return job(dir) 257 258 259 @staticmethod 260 def make_dummy_abort(indent, subdir, testname, timestamp, reason): 261 """ 262 Creates an abort string. 263 264 @param indent: The number of indentation levels for the string. 265 @param subdir: The subdirectory name. 266 @param testname: The test name. 267 @param timestamp: The timestamp value. 268 @param reason: The reason string. 269 270 @return A string describing the abort. 271 272 """ 273 indent = '\t' * indent 274 if not subdir: 275 subdir = '----' 276 if not testname: 277 testname = '----' 278 279 # There is no guarantee that this will be set. 280 timestamp_field = '' 281 if timestamp: 282 timestamp_field = '\ttimestamp=%s' % timestamp 283 284 msg = indent + 'END ABORT\t%s\t%s%s\t%s' 285 return msg % (subdir, testname, timestamp_field, reason) 286 287 288 @staticmethod 289 def put_back_line_and_abort( 290 line_buffer, line, indent, subdir, testname, timestamp, reason): 291 """ 292 Appends a line to the line buffer and aborts. 293 294 @param line_buffer: A line_buffer object. 295 @param line: A line to append to the line buffer. 296 @param indent: The number of indentation levels. 297 @param subdir: The subdirectory name. 298 @param testname: The test name. 299 @param timestamp: The timestamp value. 300 @param reason: The reason string. 301 302 """ 303 tko_utils.dprint('Unexpected indent: aborting log parse') 304 line_buffer.put_back(line) 305 abort = parser.make_dummy_abort( 306 indent, subdir, testname, timestamp, reason) 307 line_buffer.put_back(abort) 308 309 310 def state_iterator(self, buffer): 311 """ 312 Yields a list of tests out of the buffer. 313 314 @param buffer: a buffer object 315 316 """ 317 line = None 318 new_tests = [] 319 job_count, boot_count = 0, 0 320 min_stack_size = 0 321 stack = status_lib.status_stack() 322 current_kernel = kernel("", []) # UNKNOWN 323 current_status = status_lib.statuses[-1] 324 current_reason = None 325 started_time_stack = [None] 326 subdir_stack = [None] 327 testname_stack = [None] 328 running_test = None 329 running_reasons = set() 330 ignored_lines = [] 331 yield [] # We're ready to start running. 332 333 def print_ignored_lines(): 334 """ 335 Prints the ignored_lines using tko_utils.dprint method. 336 """ 337 tko_utils.dprint('The following lines were ignored:') 338 for line in ignored_lines: 339 tko_utils.dprint(line) 340 tko_utils.dprint('---------------------------------') 341 342 # Create a RUNNING SERVER_JOB entry to represent the entire test. 343 running_job = test.parse_partial_test(self.job, '----', 'SERVER_JOB', 344 '', current_kernel, 345 self.job.started_time) 346 new_tests.append(running_job) 347 348 while True: 349 # Are we finished with parsing? 350 if buffer.size() == 0 and self.finished: 351 if ignored_lines: 352 print_ignored_lines() 353 ignored_lines = [] 354 if stack.size() == 0: 355 break 356 # We have status lines left on the stack; 357 # we need to implicitly abort them first. 358 tko_utils.dprint('\nUnexpected end of job, aborting') 359 abort_subdir_stack = list(subdir_stack) 360 if self.job.aborted_by: 361 reason = 'Job aborted by %s' % self.job.aborted_by 362 reason += self.job.aborted_on.strftime( 363 ' at %b %d %H:%M:%S') 364 else: 365 reason = 'Job aborted unexpectedly' 366 367 timestamp = line.optional_fields.get('timestamp') 368 for i in reversed(xrange(stack.size())): 369 if abort_subdir_stack: 370 subdir = abort_subdir_stack.pop() 371 else: 372 subdir = None 373 abort = self.make_dummy_abort( 374 i, subdir, subdir, timestamp, reason) 375 buffer.put(abort) 376 377 # Stop processing once the buffer is empty. 378 if buffer.size() == 0: 379 yield new_tests 380 new_tests = [] 381 continue 382 383 # Reinitialize the per-iteration state. 384 started_time = None 385 finished_time = None 386 387 # Get the next line. 388 raw_line = status_lib.clean_raw_line(buffer.get()) 389 line = status_line.parse_line(raw_line) 390 if line is None: 391 ignored_lines.append(raw_line) 392 continue 393 elif ignored_lines: 394 print_ignored_lines() 395 ignored_lines = [] 396 397 # Do an initial sanity check of the indentation. 398 expected_indent = stack.size() 399 if line.type == 'END': 400 expected_indent -= 1 401 if line.indent < expected_indent: 402 # ABORT the current level if indentation was unexpectedly low. 403 self.put_back_line_and_abort( 404 buffer, raw_line, stack.size() - 1, subdir_stack[-1], 405 testname_stack[-1], line.optional_fields.get('timestamp'), 406 line.reason) 407 continue 408 elif line.indent > expected_indent: 409 # Ignore the log if the indent was unexpectedly high. 410 tko_utils.dprint('ignoring line because of extra indentation') 411 continue 412 413 # Initial line processing. 414 if line.type == 'START': 415 stack.start() 416 started_time = line.get_timestamp() 417 testname = None 418 if (line.testname is None and line.subdir is None 419 and not running_test): 420 # We just started a client; all tests are relative to here. 421 min_stack_size = stack.size() 422 # Start a "RUNNING" CLIENT_JOB entry. 423 job_name = 'CLIENT_JOB.%d' % job_count 424 running_client = test.parse_partial_test(self.job, None, 425 job_name, 426 '', current_kernel, 427 started_time) 428 msg = 'RUNNING: %s\n%s\n' 429 msg %= (running_client.status, running_client.testname) 430 tko_utils.dprint(msg) 431 new_tests.append(running_client) 432 testname = running_client.testname 433 elif stack.size() == min_stack_size + 1 and not running_test: 434 # We just started a new test; insert a running record. 435 running_reasons = set() 436 if line.reason: 437 running_reasons.add(line.reason) 438 running_test = test.parse_partial_test(self.job, 439 line.subdir, 440 line.testname, 441 line.reason, 442 current_kernel, 443 started_time) 444 msg = 'RUNNING: %s\nSubdir: %s\nTestname: %s\n%s' 445 msg %= (running_test.status, running_test.subdir, 446 running_test.testname, running_test.reason) 447 tko_utils.dprint(msg) 448 new_tests.append(running_test) 449 testname = running_test.testname 450 started_time_stack.append(started_time) 451 subdir_stack.append(line.subdir) 452 testname_stack.append(testname) 453 continue 454 elif line.type == 'INFO': 455 fields = line.optional_fields 456 # Update the current kernel if one is defined in the info. 457 if 'kernel' in fields: 458 current_kernel = line.get_kernel() 459 # Update the SERVER_JOB reason if one was logged for an abort. 460 if 'job_abort_reason' in fields: 461 running_job.reason = fields['job_abort_reason'] 462 new_tests.append(running_job) 463 continue 464 elif line.type == 'STATUS': 465 # Update the stacks. 466 if line.subdir and stack.size() > min_stack_size: 467 subdir_stack[-1] = line.subdir 468 testname_stack[-1] = line.testname 469 # Update the status, start and finished times. 470 stack.update(line.status) 471 if status_lib.is_worse_than_or_equal_to(line.status, 472 current_status): 473 if line.reason: 474 # Update the status of a currently running test. 475 if running_test: 476 running_reasons.add(line.reason) 477 running_reasons = tko_utils.drop_redundant_messages( 478 running_reasons) 479 sorted_reasons = sorted(running_reasons) 480 running_test.reason = ', '.join(sorted_reasons) 481 current_reason = running_test.reason 482 new_tests.append(running_test) 483 msg = 'update RUNNING reason: %s' % line.reason 484 tko_utils.dprint(msg) 485 else: 486 current_reason = line.reason 487 current_status = stack.current_status() 488 started_time = None 489 finished_time = line.get_timestamp() 490 # If this is a non-test entry there's nothing else to do. 491 if line.testname is None and line.subdir is None: 492 continue 493 elif line.type == 'END': 494 # Grab the current subdir off of the subdir stack, or, if this 495 # is the end of a job, just pop it off. 496 if (line.testname is None and line.subdir is None 497 and not running_test): 498 min_stack_size = stack.size() - 1 499 subdir_stack.pop() 500 testname_stack.pop() 501 else: 502 line.subdir = subdir_stack.pop() 503 testname_stack.pop() 504 if not subdir_stack[-1] and stack.size() > min_stack_size: 505 subdir_stack[-1] = line.subdir 506 # Update the status, start and finished times. 507 stack.update(line.status) 508 current_status = stack.end() 509 if stack.size() > min_stack_size: 510 stack.update(current_status) 511 current_status = stack.current_status() 512 started_time = started_time_stack.pop() 513 finished_time = line.get_timestamp() 514 # Update the current kernel. 515 if line.is_successful_reboot(current_status): 516 current_kernel = line.get_kernel() 517 # Adjust the testname if this is a reboot. 518 if line.testname == 'reboot' and line.subdir is None: 519 line.testname = 'boot.%d' % boot_count 520 else: 521 assert False 522 523 # Have we just finished a test? 524 if stack.size() <= min_stack_size: 525 # If there was no testname, just use the subdir. 526 if line.testname is None: 527 line.testname = line.subdir 528 # If there was no testname or subdir, use 'CLIENT_JOB'. 529 if line.testname is None: 530 line.testname = 'CLIENT_JOB.%d' % job_count 531 running_test = running_client 532 job_count += 1 533 if not status_lib.is_worse_than_or_equal_to( 534 current_status, 'ABORT'): 535 # A job hasn't really failed just because some of the 536 # tests it ran have. 537 current_status = 'GOOD' 538 539 if not current_reason: 540 current_reason = line.reason 541 new_test = test.parse_test(self.job, 542 line.subdir, 543 line.testname, 544 current_status, 545 current_reason, 546 current_kernel, 547 started_time, 548 finished_time, 549 running_test) 550 running_test = None 551 current_status = status_lib.statuses[-1] 552 current_reason = None 553 if new_test.testname == ('boot.%d' % boot_count): 554 boot_count += 1 555 msg = 'ADD: %s\nSubdir: %s\nTestname: %s\n%s' 556 msg %= (new_test.status, new_test.subdir, 557 new_test.testname, new_test.reason) 558 tko_utils.dprint(msg) 559 new_tests.append(new_test) 560 561 # The job is finished; produce the final SERVER_JOB entry and exit. 562 final_job = test.parse_test(self.job, '----', 'SERVER_JOB', 563 self.job.exit_status(), running_job.reason, 564 current_kernel, 565 self.job.started_time, 566 self.job.finished_time, 567 running_job) 568 new_tests.append(final_job) 569 yield new_tests 570