1#!/usr/bin/python3 2 3# Copyright 2017, The Android Open Source Project 4# 5# Licensed under the Apache License, Version 2.0 (the "License"); 6# you may not use this file except in compliance with the License. 7# You may obtain a copy of the License at 8# 9# http://www.apache.org/licenses/LICENSE-2.0 10# 11# Unless required by applicable law or agreed to in writing, software 12# distributed under the License is distributed on an "AS IS" BASIS, 13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14# See the License for the specific language governing permissions and 15# limitations under the License. 16 17 18"""NN Model Test Compiler Test. 19 20Runs subdirectories of tests for the test generator/compiler. 21""" 22 23import filecmp 24import glob 25import os 26import re 27import shutil 28import subprocess 29import sys 30import unittest 31 32 33__author__ = 'Android' 34 35 36DOTTED_LINE = '................' 37 38class OrigFile: 39 OrigDir = None 40 41class TestGeneratorTests(unittest.TestCase): 42 """Class to contain all the unittest test cases. 43 44 Tests will be dynamically added to this class as methods. 45 No static tests, so this class is initially empty. 46 47 """ 48 pass 49 50 51def GenerateTests(dir_name): 52 """Creates a test method that can be added as method to GenerateTests.""" 53 cwd = os.getcwd() 54 def Test(self): 55 os.chdir(cwd) 56 ExecTest(dir_name, self) 57 return Test 58 59 60def AddUnitTests(test_dirs): 61 """Adds a test to Tests for each directory in test_dirs.""" 62 63 for t in test_dirs: 64 # Must start with 'test_' according to unittest 65 test_name = 'test_%s' % t 66 test = GenerateTests(t) 67 # Add test as method to TestGeneratorTests with test_name as method name 68 setattr(TestGeneratorTests, test_name, test) 69 70 71class Options(object): 72 verbose = 0 73 cleanup = 1 74 update_cts = 0 75 zero_return = 0 76 77 78def CompareFiles(actual, expect): 79 """Compares actual and expect for equality.""" 80 if not os.path.isfile(actual): 81 if Options.verbose: 82 print ('Could not find %s' % actual) 83 return False 84 if not os.path.isfile(expect): 85 if Options.verbose: 86 print ('Could not find %s' % expect) 87 return False 88 89 return filecmp.cmp(actual, expect, False) 90 91 92def CopyIfDifferent(src, dst): 93 """Updates dst if it is different from src.""" 94 if not CompareFiles(src, dst): 95 if Options.verbose: 96 print ('Copying from %s to %s' % (src, dst)) 97 shutil.copyfile(src, dst) 98 99 100def GetCommandLineArgs(filename): 101 """Extracts command line arguments from first comment line in a file.""" 102 f = open(filename, 'r') 103 line = f.readline() 104 f.close() 105 if line[0] == '/' and line[1] == '/': 106 return line[2:].strip() 107 else: 108 return '' 109 110 111def ReadFileToStr(filename): 112 """Returns contents of file as a str.""" 113 with open(filename, 'r') as f: 114 return f.read() 115 116 117def ReportIfDifferFromExpected(tests, name, file1, file2): 118 """Fails tests if file1 and file2 differ.""" 119 if not CompareFiles(file1, file2): 120 if Options.verbose: 121 err_message = ('%s is different:\n' 122 'expected:\n%s\n%s%s\n\n' 123 'actual:\n%s\n%s%s\n') % ( 124 name, 125 DOTTED_LINE, ReadFileToStr(file1), DOTTED_LINE, 126 DOTTED_LINE, ReadFileToStr(file2), DOTTED_LINE) 127 else: 128 err_message = '%s is different' % name 129 tests.fail(err_message) 130 131 132def GetRSFiles(): 133 """Returns a list of files in cwd with extension '.rs' or '.fs'.""" 134 rs_files = glob.glob('*.mod.py') 135 rs_files.sort() 136 return rs_files 137 138 139def GetOutDir(): 140 return os.path.abspath(os.path.join(OrigFile.OrigDir, "../")) 141 142 143# Declare/define cache variable for GetOutDir to cache results 144# This way we only need to call subprocesses once to get the directory 145GetOutDir.cache = None 146 147 148def CreateCmd(run_vts): 149 """Creates the test command to run for the current test.""" 150 cmd_string = ('%s/%s_generator.py' 151 ) % (GetOutDir(), "cts" if not run_vts else "vts") 152 base_args = cmd_string.split() 153 rs_files = GetRSFiles() 154 155 # Extra command line arguments can be placed as // comments at the start of 156 # any .rs file. We automatically bundle up all of these extra args and invoke 157 # llvm-rs-cc with them. 158 extra_args_str = '' 159 for rs_file in rs_files: 160 extra_args_str += GetCommandLineArgs(rs_file) 161 extra_args = extra_args_str.split() 162 163 args = base_args + extra_args + rs_files 164 return args 165 166def Cleanup(): 167 """Cleans up the cwd of any tmp files created in current test.""" 168 try: 169 os.remove('stdout.txt') 170 os.remove('stderr.txt') 171 shutil.rmtree('tmp/') 172 except OSError: 173 pass 174 175 176def CheckTestResult(dir_name, subprocess_ret, tests, args): 177 """Checks the result of the subprocess command to see if it passed/failed. 178 179 If dir_name starts with 'F_', then subprocess is expected to fail. 180 If it instead succeeded, then this test is failed. 181 Vice versa with a dir_name starting with 'P_'. 182 183 Args: 184 dir_name: name of current directory/test name 185 subprocess_ret: return code of subprocess 186 tests: unittest, call tests.fail(reason) when failure 187 args: the arguments for the command that was run 188 """ 189 if dir_name[0:2] == 'F_': 190 if subprocess_ret == 0: 191 if Options.verbose: 192 err_message = ('Command (%s) passed on invalid input\n' 193 'stdout:\n%s\n%s%s\n') % ( 194 ' '.join(args), 195 DOTTED_LINE, ReadFileToStr('stdout.txt'), DOTTED_LINE 196 ) 197 else: 198 err_message = 'Command passed on invalid input' 199 tests.fail(err_message) 200 elif dir_name[0:2] == 'P_': 201 if subprocess_ret != 0: 202 if Options.verbose: 203 err_message = ('Command (%s) failed on valid input\n' 204 'stderr:\n%s\n%s%s\n') % ( 205 ' '.join(args), 206 DOTTED_LINE, ReadFileToStr('stderr.txt'), DOTTED_LINE 207 ) 208 else: 209 err_message = 'Command failed on valid input' 210 tests.fail(err_message) 211 else: 212 tests.fail('Invalid test name: ' + dir_name + 213 ', should start with F_ or P_') 214 215 216 217def ExecTest(dir_name, tests): 218 """Executes an test generator test from dir_name.""" 219 220 os.chdir(dir_name) 221 stdout_file = open('stdout.txt', 'w+') 222 stderr_file = open('stderr.txt', 'w+') 223 run_vts = (dir_name[2:5] == 'vts') 224 args = CreateCmd(run_vts) 225 226 if Options.verbose > 1: 227 print ('Executing:', ' '.join(args)) 228 229 # Execute the command and check the resulting shell return value. 230 # All tests that are expected to FAIL have directory names that 231 # start with 'F_'. Other tests that are expected to PASS have 232 # directory names that start with 'P_'. 233 ret = 0 234 try: 235 ret = subprocess.call(args, stdout=stdout_file, stderr=stderr_file) 236 except OSError: 237 tests.fail('subprocess.call failed: ' + ' '.join(args)) 238 239 stdout_file.close() 240 stderr_file.close() 241 242 CheckTestResult(dir_name, ret, tests, args) 243 244 ReportIfDifferFromExpected(tests, 'stdout', 'stdout.txt.expect', 'stdout.txt') 245 ReportIfDifferFromExpected(tests, 'stderr', 'stderr.txt.expect', 'stderr.txt') 246 247 if Options.cleanup: 248 Cleanup() 249 250 251def Usage(): 252 """Print out usage information.""" 253 print ('Usage: %s [OPTION]... [TESTNAME]...' 254 'Renderscript Compiler Test Harness\n' 255 'Runs TESTNAMEs (all tests by default)\n' 256 'Available Options:\n' 257 ' -h, --help Help message\n' 258 ' -n, --no-cleanup Don\'t clean up after running tests\n' 259 ' -v, --verbose Verbose output. Enter multiple -v to get more verbose.\n' 260 ' -z, --zero-return Return 0 as exit code no matter if tests fail. Required for TreeHugger.\n' 261 ) % (sys.argv[0]), 262 return 263 264 265def main(): 266 """Runs the unittest suite. 267 268 Parses command line arguments, adds test directories as tests. 269 270 Returns: 271 0 if '-z' flag is set. 272 Else unittest.main() returns with its own error code. 273 """ 274 275 OrigFile.OrigDir = os.path.dirname(os.path.abspath(__file__)) 276 # Chdir to the directory this file is in since tests are in this directory 277 os.chdir(OrigFile.OrigDir) 278 files = [] 279 for arg in sys.argv[1:]: 280 if arg in ('-h', '--help'): 281 Usage() 282 return 0 283 elif arg in ('-n', '--no-cleanup'): 284 Options.cleanup = 0 285 elif arg in ('-u', '--update-cts'): 286 Options.update_cts = 1 287 elif arg in ('-v', '--verbose'): 288 Options.verbose += 1 289 elif arg in ('-z', '--zero-return'): 290 Options.zero_return = 1 291 else: 292 # Test list to run 293 if os.path.isdir(arg): 294 files.append(arg) 295 else: 296 print >> sys.stderr, 'Invalid test or option: %s' % arg 297 return 1 298 299 if not files: 300 file_names = os.listdir('.') 301 # Test names must start with 'F_' or 'P_' 302 # 'F_' tests are expected to fail 303 # 'P_' tests are expected to pass 304 for f in file_names: 305 if os.path.isdir(f) and (f[0:2] == 'F_' or f[0:2] == 'P_'): 306 files.append(f) 307 files.sort() 308 309 AddUnitTests(files) 310 311 # verbosity=2 is necessary for PythonUnitTestRunner to parse the results 312 # Otherwise verbosity does not matter 313 # If Options.zero_return is set, do not let unittest.main() exit 314 # This is necessary in TreeHugger to distinguish between failing tests and 315 # failing to execute the python script 316 # If Options.zero_return is not set, let unittest.main() exit 317 # In this case it will return a non-zero code if any tests fail 318 unittest_exit = Options.zero_return == 0 319 unittest.main(verbosity=2, 320 argv=[sys.argv[0]] + ['TestGeneratorTests'], 321 exit=unittest_exit) 322 323 return 0 324 325 326if __name__ == '__main__': 327 sys.exit(main()) 328 329