1# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""
5Runs the piglit OpenGL suite of tests.
6"""
7
8import logging, os, re
9from autotest_lib.client.bin import test, utils
10from autotest_lib.client.common_lib import error
11from autotest_lib.client.cros.graphics import graphics_utils
12from optparse import OptionParser
13
14class graphics_Piglit(test.test):
15    """
16    Collection of automated tests for OpenGL implementations.
17
18    The binaries are pulled into test images via media-lib/piglit.
19    http://piglit.freedesktop.org
20    """
21    version = 2
22    preserve_srcdir = True
23    GSC = None
24    piglit_path = '/usr/local/piglit'
25
26    def initialize(self):
27        self.GSC = graphics_utils.GraphicsStateChecker()
28
29    def cleanup(self):
30        if self.GSC:
31            keyvals = self.GSC.get_memory_keyvals()
32            for key, val in keyvals.iteritems():
33                self.output_perf_value(description=key, value=val,
34                                       units='bytes', higher_is_better=False)
35            self.GSC.finalize()
36            self.write_perf_keyval(keyvals)
37
38    def run_once(self, test='cros-driver.py', args=[]):
39        parser = OptionParser()
40        parser.add_option('-t',
41                          '--test-name',
42                          dest='testName',
43                          default='',
44                          help='Run a specific piglit test.')
45        options, args = parser.parse_args(args)
46        gpu_family = utils.get_gpu_family()
47        logging.info('Detected gpu family %s.', gpu_family)
48        # TODO(djkurtz): Delete this once piglit runs on mali/tegra.
49        if gpu_family in ['mali', 'tegra']:
50            logging.info('Not running any tests, passing by default.')
51            return
52
53        # Keep a copy of stdout in piglit-run.log.
54        log_path = os.path.join(self.outputdir, 'piglit-run.log')
55        # Keep the html results in the cros-driver directory.
56        results_path = os.path.join(self.outputdir, 'cros-driver')
57        # The location of the piglit executable script.
58        run_path = os.path.join(self.piglit_path, 'bin/piglit')
59        summary = ''
60        if not (os.path.exists(run_path)):
61            raise error.TestError('piglit not found at %s' % self.piglit_path)
62
63        os.chdir(self.piglit_path)
64        logging.info('cd %s', os.getcwd())
65        # Piglit by default wants to run multiple tests in separate processes
66        # concurrently. Strictly serialize this using --no-concurrency.
67        # Now --dmesg also implies no concurrency but we want to be explicit.
68        flags = 'run -v --dmesg --no-concurrency'
69        if (options.testName != ''):
70            flags = flags + ' -t ' + options.testName
71        cmd = 'python %s %s %s %s' % (run_path, flags, test, self.outputdir)
72        # Pipe stdout and stderr into piglit-run.log for later analysis.
73        cmd = cmd + ' | tee ' + log_path
74        cmd = graphics_utils.xcommand(cmd)
75        logging.info(cmd)
76        utils.run(cmd,
77                  stderr_is_expected = False,
78                  stdout_tee = utils.TEE_TO_LOGS,
79                  stderr_tee = utils.TEE_TO_LOGS)
80
81        # Make sure logs get written before continuing.
82        utils.run('sync')
83        # Convert results.json file to human readable html.
84        cmd = ('python %s summary html --overwrite -e all %s %s/results.json' %
85                  (run_path, results_path, self.outputdir))
86        utils.run(cmd,
87                  stderr_is_expected = False,
88                  stdout_tee = utils.TEE_TO_LOGS,
89                  stderr_tee = utils.TEE_TO_LOGS)
90        # Make sure logs get written before continuing.
91        utils.run('sync')
92
93        # Count number of pass, fail, warn and skip in piglit-run.log (could
94        # also use results.json)
95        f = open(log_path, 'r')
96        summary = f.read()
97        f.close()
98        if not summary:
99            raise error.TestError('Test summary was empty')
100
101        # Output counts for future processing.
102        keyvals = {}
103        for k in ['pass', 'fail', 'crash', 'warn', 'skip']:
104            num = len(re.findall(r'' + k + ' :: ', summary))
105            keyvals['count_subtests_' + k] = num
106            logging.info('Piglit: %d ' + k, num)
107            self.output_perf_value(description=k, value=num,
108                                   units='count', higher_is_better=(k=='pass'))
109
110        self.write_perf_keyval(keyvals)
111