1# Copyright 2012 the V8 project authors. All rights reserved. 2# Redistribution and use in source and binary forms, with or without 3# modification, are permitted provided that the following conditions are 4# met: 5# 6# * Redistributions of source code must retain the above copyright 7# notice, this list of conditions and the following disclaimer. 8# * Redistributions in binary form must reproduce the above 9# copyright notice, this list of conditions and the following 10# disclaimer in the documentation and/or other materials provided 11# with the distribution. 12# * Neither the name of Google Inc. nor the names of its 13# contributors may be used to endorse or promote products derived 14# from this software without specific prior written permission. 15# 16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 29import collections 30import os 31import re 32import shutil 33import sys 34import time 35 36from pool import Pool 37from . import commands 38from . import perfdata 39from . import statusfile 40from . import testsuite 41from . import utils 42from ..objects import output 43 44 45# Base dir of the v8 checkout. 46BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( 47 os.path.abspath(__file__))))) 48TEST_DIR = os.path.join(BASE_DIR, "test") 49 50 51class Instructions(object): 52 def __init__(self, command, test_id, timeout, verbose): 53 self.command = command 54 self.id = test_id 55 self.timeout = timeout 56 self.verbose = verbose 57 58 59# Structure that keeps global information per worker process. 60ProcessContext = collections.namedtuple( 61 "process_context", ["suites", "context"]) 62 63 64def MakeProcessContext(context): 65 """Generate a process-local context. 66 67 This reloads all suites per process and stores the global context. 68 69 Args: 70 context: The global context from the test runner. 71 """ 72 suite_paths = utils.GetSuitePaths(TEST_DIR) 73 suites = {} 74 for root in suite_paths: 75 # Don't reinitialize global state as this is concurrently called from 76 # different processes. 77 suite = testsuite.TestSuite.LoadTestSuite( 78 os.path.join(TEST_DIR, root), global_init=False) 79 if suite: 80 suites[suite.name] = suite 81 return ProcessContext(suites, context) 82 83 84def GetCommand(test, context): 85 d8testflag = [] 86 shell = test.shell() 87 if shell == "d8": 88 d8testflag = ["--test"] 89 if utils.IsWindows(): 90 shell += ".exe" 91 if context.random_seed: 92 d8testflag += ["--random-seed=%s" % context.random_seed] 93 cmd = (context.command_prefix + 94 [os.path.abspath(os.path.join(context.shell_dir, shell))] + 95 d8testflag + 96 test.suite.GetFlagsForTestCase(test, context) + 97 context.extra_flags) 98 return cmd 99 100 101def _GetInstructions(test, context): 102 command = GetCommand(test, context) 103 timeout = context.timeout 104 if ("--stress-opt" in test.flags or 105 "--stress-opt" in context.mode_flags or 106 "--stress-opt" in context.extra_flags): 107 timeout *= 4 108 if "--noenable-vfp3" in context.extra_flags: 109 timeout *= 2 110 # FIXME(machenbach): Make this more OO. Don't expose default outcomes or 111 # the like. 112 if statusfile.IsSlow(test.outcomes or [statusfile.PASS]): 113 timeout *= 2 114 return Instructions(command, test.id, timeout, context.verbose) 115 116 117class Job(object): 118 """Stores data to be sent over the multi-process boundary. 119 120 All contained fields will be pickled/unpickled. 121 """ 122 123 def Run(self, process_context): 124 """Executes the job. 125 126 Args: 127 process_context: Process-local information that is initialized by the 128 executing worker. 129 """ 130 raise NotImplementedError() 131 132 133def SetupProblem(exception, test): 134 stderr = ">>> EXCEPTION: %s\n" % exception 135 match = re.match(r"^.*No such file or directory: '(.*)'$", str(exception)) 136 if match: 137 # Extra debuging information when files are claimed missing. 138 f = match.group(1) 139 stderr += ">>> File %s exists? -> %s\n" % (f, os.path.exists(f)) 140 return test.id, output.Output(1, False, "", stderr, None), 0 141 142 143class TestJob(Job): 144 def __init__(self, test): 145 self.test = test 146 147 def _rename_coverage_data(self, output, context): 148 """Rename coverage data. 149 150 Rename files with PIDs to files with unique test IDs, because the number 151 of tests might be higher than pid_max. E.g.: 152 d8.1234.sancov -> d8.test.42.1.sancov, where 1234 was the process' PID, 153 42 is the test ID and 1 is the attempt (the same test might be rerun on 154 failures). 155 """ 156 if context.sancov_dir and output.pid is not None: 157 sancov_file = os.path.join( 158 context.sancov_dir, "%s.%d.sancov" % (self.test.shell(), output.pid)) 159 160 # Some tests are expected to fail and don't produce coverage data. 161 if os.path.exists(sancov_file): 162 parts = sancov_file.split(".") 163 new_sancov_file = ".".join( 164 parts[:-2] + 165 ["test", str(self.test.id), str(self.test.run)] + 166 parts[-1:] 167 ) 168 assert not os.path.exists(new_sancov_file) 169 os.rename(sancov_file, new_sancov_file) 170 171 def Run(self, process_context): 172 try: 173 # Retrieve a new suite object on the worker-process side. The original 174 # suite object isn't pickled. 175 self.test.SetSuiteObject(process_context.suites) 176 instr = _GetInstructions(self.test, process_context.context) 177 except Exception, e: 178 return SetupProblem(e, self.test) 179 180 start_time = time.time() 181 output = commands.Execute(instr.command, instr.verbose, instr.timeout) 182 self._rename_coverage_data(output, process_context.context) 183 return (instr.id, output, time.time() - start_time) 184 185 186def RunTest(job, process_context): 187 return job.Run(process_context) 188 189 190class Runner(object): 191 192 def __init__(self, suites, progress_indicator, context): 193 self.datapath = os.path.join("out", "testrunner_data") 194 self.perf_data_manager = perfdata.GetPerfDataManager( 195 context, self.datapath) 196 self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode) 197 self.perf_failures = False 198 self.printed_allocations = False 199 self.tests = [ t for s in suites for t in s.tests ] 200 if not context.no_sorting: 201 for t in self.tests: 202 t.duration = self.perfdata.FetchPerfData(t) or 1.0 203 slow_key = lambda t: statusfile.IsSlow(t.outcomes) 204 self.tests.sort(key=slow_key, reverse=True) 205 self.tests.sort(key=lambda t: t.duration, reverse=True) 206 self._CommonInit(suites, progress_indicator, context) 207 208 def _CommonInit(self, suites, progress_indicator, context): 209 self.total = 0 210 for s in suites: 211 for t in s.tests: 212 t.id = self.total 213 self.total += 1 214 self.indicator = progress_indicator 215 progress_indicator.SetRunner(self) 216 self.context = context 217 self.succeeded = 0 218 self.remaining = self.total 219 self.failed = [] 220 self.crashed = 0 221 self.reran_tests = 0 222 223 def _RunPerfSafe(self, fun): 224 try: 225 fun() 226 except Exception, e: 227 print("PerfData exception: %s" % e) 228 self.perf_failures = True 229 230 def _MaybeRerun(self, pool, test): 231 if test.run <= self.context.rerun_failures_count: 232 # Possibly rerun this test if its run count is below the maximum per 233 # test. <= as the flag controls reruns not including the first run. 234 if test.run == 1: 235 # Count the overall number of reran tests on the first rerun. 236 if self.reran_tests < self.context.rerun_failures_max: 237 self.reran_tests += 1 238 else: 239 # Don't rerun this if the overall number of rerun tests has been 240 # reached. 241 return 242 if test.run >= 2 and test.duration > self.context.timeout / 20.0: 243 # Rerun slow tests at most once. 244 return 245 246 # Rerun this test. 247 test.duration = None 248 test.output = None 249 test.run += 1 250 pool.add([TestJob(test)]) 251 self.remaining += 1 252 self.total += 1 253 254 def _ProcessTestNormal(self, test, result, pool): 255 test.output = result[1] 256 test.duration = result[2] 257 has_unexpected_output = test.suite.HasUnexpectedOutput(test) 258 if has_unexpected_output: 259 self.failed.append(test) 260 if test.output.HasCrashed(): 261 self.crashed += 1 262 else: 263 self.succeeded += 1 264 self.remaining -= 1 265 # For the indicator, everything that happens after the first run is treated 266 # as unexpected even if it flakily passes in order to include it in the 267 # output. 268 self.indicator.HasRun(test, has_unexpected_output or test.run > 1) 269 if has_unexpected_output: 270 # Rerun test failures after the indicator has processed the results. 271 self._VerbosePrint("Attempting to rerun test after failure.") 272 self._MaybeRerun(pool, test) 273 # Update the perf database if the test succeeded. 274 return not has_unexpected_output 275 276 def _ProcessTestPredictable(self, test, result, pool): 277 def HasDifferentAllocations(output1, output2): 278 def AllocationStr(stdout): 279 for line in reversed((stdout or "").splitlines()): 280 if line.startswith("### Allocations = "): 281 self.printed_allocations = True 282 return line 283 return "" 284 return (AllocationStr(output1.stdout) != AllocationStr(output2.stdout)) 285 286 # Always pass the test duration for the database update. 287 test.duration = result[2] 288 if test.run == 1 and result[1].HasTimedOut(): 289 # If we get a timeout in the first run, we are already in an 290 # unpredictable state. Just report it as a failure and don't rerun. 291 test.output = result[1] 292 self.remaining -= 1 293 self.failed.append(test) 294 self.indicator.HasRun(test, True) 295 if test.run > 1 and HasDifferentAllocations(test.output, result[1]): 296 # From the second run on, check for different allocations. If a 297 # difference is found, call the indicator twice to report both tests. 298 # All runs of each test are counted as one for the statistic. 299 self.remaining -= 1 300 self.failed.append(test) 301 self.indicator.HasRun(test, True) 302 test.output = result[1] 303 self.indicator.HasRun(test, True) 304 elif test.run >= 3: 305 # No difference on the third run -> report a success. 306 self.remaining -= 1 307 self.succeeded += 1 308 test.output = result[1] 309 self.indicator.HasRun(test, False) 310 else: 311 # No difference yet and less than three runs -> add another run and 312 # remember the output for comparison. 313 test.run += 1 314 test.output = result[1] 315 pool.add([TestJob(test)]) 316 # Always update the perf database. 317 return True 318 319 def Run(self, jobs): 320 self.indicator.Starting() 321 self._RunInternal(jobs) 322 self.indicator.Done() 323 if self.failed: 324 return 1 325 elif self.remaining: 326 return 2 327 return 0 328 329 def _RunInternal(self, jobs): 330 pool = Pool(jobs) 331 test_map = {} 332 queued_exception = [None] 333 def gen_tests(): 334 for test in self.tests: 335 assert test.id >= 0 336 test_map[test.id] = test 337 try: 338 yield [TestJob(test)] 339 except Exception, e: 340 # If this failed, save the exception and re-raise it later (after 341 # all other tests have had a chance to run). 342 queued_exception[0] = e 343 continue 344 try: 345 it = pool.imap_unordered( 346 fn=RunTest, 347 gen=gen_tests(), 348 process_context_fn=MakeProcessContext, 349 process_context_args=[self.context], 350 ) 351 for result in it: 352 if result.heartbeat: 353 self.indicator.Heartbeat() 354 continue 355 test = test_map[result.value[0]] 356 if self.context.predictable: 357 update_perf = self._ProcessTestPredictable(test, result.value, pool) 358 else: 359 update_perf = self._ProcessTestNormal(test, result.value, pool) 360 if update_perf: 361 self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test)) 362 finally: 363 self._VerbosePrint("Closing process pool.") 364 pool.terminate() 365 self._VerbosePrint("Closing database connection.") 366 self._RunPerfSafe(lambda: self.perf_data_manager.close()) 367 if self.perf_failures: 368 # Nuke perf data in case of failures. This might not work on windows as 369 # some files might still be open. 370 print "Deleting perf test data due to db corruption." 371 shutil.rmtree(self.datapath) 372 if queued_exception[0]: 373 raise queued_exception[0] 374 375 # Make sure that any allocations were printed in predictable mode (if we 376 # ran any tests). 377 assert ( 378 not self.total or 379 not self.context.predictable or 380 self.printed_allocations 381 ) 382 383 def _VerbosePrint(self, text): 384 if self.context.verbose: 385 print text 386 sys.stdout.flush() 387 388 389class BreakNowException(Exception): 390 def __init__(self, value): 391 self.value = value 392 def __str__(self): 393 return repr(self.value) 394