1# Copyright 2012 the V8 project authors. All rights reserved.
2# Redistribution and use in source and binary forms, with or without
3# modification, are permitted provided that the following conditions are
4# met:
5#
6#     * Redistributions of source code must retain the above copyright
7#       notice, this list of conditions and the following disclaimer.
8#     * Redistributions in binary form must reproduce the above
9#       copyright notice, this list of conditions and the following
10#       disclaimer in the documentation and/or other materials provided
11#       with the distribution.
12#     * Neither the name of Google Inc. nor the names of its
13#       contributors may be used to endorse or promote products derived
14#       from this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28
29import collections
30import os
31import shutil
32import sys
33import time
34
35from pool import Pool
36from . import commands
37from . import perfdata
38from . import statusfile
39from . import testsuite
40from . import utils
41
42
43# Base dir of the v8 checkout.
44BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
45    os.path.abspath(__file__)))))
46TEST_DIR = os.path.join(BASE_DIR, "test")
47
48
49class Instructions(object):
50  def __init__(self, command, dep_command, test_id, timeout, verbose):
51    self.command = command
52    self.dep_command = dep_command
53    self.id = test_id
54    self.timeout = timeout
55    self.verbose = verbose
56
57
58# Structure that keeps global information per worker process.
59ProcessContext = collections.namedtuple(
60    "process_context", ["suites", "context"])
61
62
63def MakeProcessContext(context):
64  """Generate a process-local context.
65
66  This reloads all suites per process and stores the global context.
67
68  Args:
69    context: The global context from the test runner.
70  """
71  suite_paths = utils.GetSuitePaths(TEST_DIR)
72  suites = {}
73  for root in suite_paths:
74    # Don't reinitialize global state as this is concurrently called from
75    # different processes.
76    suite = testsuite.TestSuite.LoadTestSuite(
77        os.path.join(TEST_DIR, root), global_init=False)
78    if suite:
79      suites[suite.name] = suite
80  return ProcessContext(suites, context)
81
82
83def GetCommand(test, context):
84  d8testflag = []
85  shell = test.suite.shell()
86  if shell == "d8":
87    d8testflag = ["--test"]
88  if utils.IsWindows():
89    shell += ".exe"
90  if context.random_seed:
91    d8testflag += ["--random-seed=%s" % context.random_seed]
92  cmd = (context.command_prefix +
93         [os.path.abspath(os.path.join(context.shell_dir, shell))] +
94         d8testflag +
95         test.suite.GetFlagsForTestCase(test, context) +
96         context.extra_flags)
97  return cmd
98
99
100def _GetInstructions(test, context):
101  command = GetCommand(test, context)
102  timeout = context.timeout
103  if ("--stress-opt" in test.flags or
104      "--stress-opt" in context.mode_flags or
105      "--stress-opt" in context.extra_flags):
106    timeout *= 4
107  if "--noenable-vfp3" in context.extra_flags:
108    timeout *= 2
109  # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
110  # the like.
111  if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
112    timeout *= 2
113  if test.dependency is not None:
114    dep_command = [ c.replace(test.path, test.dependency) for c in command ]
115  else:
116    dep_command = None
117  return Instructions(
118      command, dep_command, test.id, timeout, context.verbose)
119
120
121class Job(object):
122  """Stores data to be sent over the multi-process boundary.
123
124  All contained fields will be pickled/unpickled.
125  """
126
127  def Run(self, process_context):
128    """Executes the job.
129
130    Args:
131      process_context: Process-local information that is initialized by the
132                       executing worker.
133    """
134    raise NotImplementedError()
135
136
137class TestJob(Job):
138  def __init__(self, test):
139    self.test = test
140
141  def Run(self, process_context):
142    # Retrieve a new suite object on the worker-process side. The original
143    # suite object isn't pickled.
144    self.test.SetSuiteObject(process_context.suites)
145    instr = _GetInstructions(self.test, process_context.context)
146
147    start_time = time.time()
148    if instr.dep_command is not None:
149      dep_output = commands.Execute(
150          instr.dep_command, instr.verbose, instr.timeout)
151      # TODO(jkummerow): We approximate the test suite specific function
152      # IsFailureOutput() by just checking the exit code here. Currently
153      # only cctests define dependencies, for which this simplification is
154      # correct.
155      if dep_output.exit_code != 0:
156        return (instr.id, dep_output, time.time() - start_time)
157    output = commands.Execute(instr.command, instr.verbose, instr.timeout)
158    return (instr.id, output, time.time() - start_time)
159
160
161def RunTest(job, process_context):
162  return job.Run(process_context)
163
164
165class Runner(object):
166
167  def __init__(self, suites, progress_indicator, context):
168    self.datapath = os.path.join("out", "testrunner_data")
169    self.perf_data_manager = perfdata.GetPerfDataManager(
170        context, self.datapath)
171    self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
172    self.perf_failures = False
173    self.printed_allocations = False
174    self.tests = [ t for s in suites for t in s.tests ]
175    if not context.no_sorting:
176      for t in self.tests:
177        t.duration = self.perfdata.FetchPerfData(t) or 1.0
178      slow_key = lambda t: statusfile.IsSlow(t.outcomes)
179      self.tests.sort(key=slow_key, reverse=True)
180      self.tests.sort(key=lambda t: t.duration, reverse=True)
181    self._CommonInit(suites, progress_indicator, context)
182
183  def _CommonInit(self, suites, progress_indicator, context):
184    self.total = 0
185    for s in suites:
186      for t in s.tests:
187        t.id = self.total
188        self.total += 1
189    self.indicator = progress_indicator
190    progress_indicator.SetRunner(self)
191    self.context = context
192    self.succeeded = 0
193    self.remaining = self.total
194    self.failed = []
195    self.crashed = 0
196    self.reran_tests = 0
197
198  def _RunPerfSafe(self, fun):
199    try:
200      fun()
201    except Exception, e:
202      print("PerfData exception: %s" % e)
203      self.perf_failures = True
204
205  def _MaybeRerun(self, pool, test):
206    if test.run <= self.context.rerun_failures_count:
207      # Possibly rerun this test if its run count is below the maximum per
208      # test. <= as the flag controls reruns not including the first run.
209      if test.run == 1:
210        # Count the overall number of reran tests on the first rerun.
211        if self.reran_tests < self.context.rerun_failures_max:
212          self.reran_tests += 1
213        else:
214          # Don't rerun this if the overall number of rerun tests has been
215          # reached.
216          return
217      if test.run >= 2 and test.duration > self.context.timeout / 20.0:
218        # Rerun slow tests at most once.
219        return
220
221      # Rerun this test.
222      test.duration = None
223      test.output = None
224      test.run += 1
225      pool.add([TestJob(test)])
226      self.remaining += 1
227      self.total += 1
228
229  def _ProcessTestNormal(self, test, result, pool):
230    self.indicator.AboutToRun(test)
231    test.output = result[1]
232    test.duration = result[2]
233    has_unexpected_output = test.suite.HasUnexpectedOutput(test)
234    if has_unexpected_output:
235      self.failed.append(test)
236      if test.output.HasCrashed():
237        self.crashed += 1
238    else:
239      self.succeeded += 1
240    self.remaining -= 1
241    # For the indicator, everything that happens after the first run is treated
242    # as unexpected even if it flakily passes in order to include it in the
243    # output.
244    self.indicator.HasRun(test, has_unexpected_output or test.run > 1)
245    if has_unexpected_output:
246      # Rerun test failures after the indicator has processed the results.
247      self._VerbosePrint("Attempting to rerun test after failure.")
248      self._MaybeRerun(pool, test)
249    # Update the perf database if the test succeeded.
250    return not has_unexpected_output
251
252  def _ProcessTestPredictable(self, test, result, pool):
253    def HasDifferentAllocations(output1, output2):
254      def AllocationStr(stdout):
255        for line in reversed((stdout or "").splitlines()):
256          if line.startswith("### Allocations = "):
257            self.printed_allocations = True
258            return line
259        return ""
260      return (AllocationStr(output1.stdout) != AllocationStr(output2.stdout))
261
262    # Always pass the test duration for the database update.
263    test.duration = result[2]
264    if test.run == 1 and result[1].HasTimedOut():
265      # If we get a timeout in the first run, we are already in an
266      # unpredictable state. Just report it as a failure and don't rerun.
267      self.indicator.AboutToRun(test)
268      test.output = result[1]
269      self.remaining -= 1
270      self.failed.append(test)
271      self.indicator.HasRun(test, True)
272    if test.run > 1 and HasDifferentAllocations(test.output, result[1]):
273      # From the second run on, check for different allocations. If a
274      # difference is found, call the indicator twice to report both tests.
275      # All runs of each test are counted as one for the statistic.
276      self.indicator.AboutToRun(test)
277      self.remaining -= 1
278      self.failed.append(test)
279      self.indicator.HasRun(test, True)
280      self.indicator.AboutToRun(test)
281      test.output = result[1]
282      self.indicator.HasRun(test, True)
283    elif test.run >= 3:
284      # No difference on the third run -> report a success.
285      self.indicator.AboutToRun(test)
286      self.remaining -= 1
287      self.succeeded += 1
288      test.output = result[1]
289      self.indicator.HasRun(test, False)
290    else:
291      # No difference yet and less than three runs -> add another run and
292      # remember the output for comparison.
293      test.run += 1
294      test.output = result[1]
295      pool.add([TestJob(test)])
296    # Always update the perf database.
297    return True
298
299  def Run(self, jobs):
300    self.indicator.Starting()
301    self._RunInternal(jobs)
302    self.indicator.Done()
303    if self.failed:
304      return 1
305    elif self.remaining:
306      return 2
307    return 0
308
309  def _RunInternal(self, jobs):
310    pool = Pool(jobs)
311    test_map = {}
312    queued_exception = [None]
313    def gen_tests():
314      for test in self.tests:
315        assert test.id >= 0
316        test_map[test.id] = test
317        try:
318          yield [TestJob(test)]
319        except Exception, e:
320          # If this failed, save the exception and re-raise it later (after
321          # all other tests have had a chance to run).
322          queued_exception[0] = e
323          continue
324    try:
325      it = pool.imap_unordered(
326          fn=RunTest,
327          gen=gen_tests(),
328          process_context_fn=MakeProcessContext,
329          process_context_args=[self.context],
330      )
331      for result in it:
332        if result.heartbeat:
333          self.indicator.Heartbeat()
334          continue
335        test = test_map[result.value[0]]
336        if self.context.predictable:
337          update_perf = self._ProcessTestPredictable(test, result.value, pool)
338        else:
339          update_perf = self._ProcessTestNormal(test, result.value, pool)
340        if update_perf:
341          self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test))
342    finally:
343      self._VerbosePrint("Closing process pool.")
344      pool.terminate()
345      self._VerbosePrint("Closing database connection.")
346      self._RunPerfSafe(lambda: self.perf_data_manager.close())
347      if self.perf_failures:
348        # Nuke perf data in case of failures. This might not work on windows as
349        # some files might still be open.
350        print "Deleting perf test data due to db corruption."
351        shutil.rmtree(self.datapath)
352    if queued_exception[0]:
353      raise queued_exception[0]
354
355    # Make sure that any allocations were printed in predictable mode (if we
356    # ran any tests).
357    assert (
358        not self.total or
359        not self.context.predictable or
360        self.printed_allocations
361    )
362
363  def _VerbosePrint(self, text):
364    if self.context.verbose:
365      print text
366      sys.stdout.flush()
367
368
369class BreakNowException(Exception):
370  def __init__(self, value):
371    self.value = value
372  def __str__(self):
373    return repr(self.value)
374