/test/vts/utils/python/cpu/ |
D | cpu_frequency_scaling.py | 71 results = self._shell.Execute("cat /sys/devices/system/cpu/present") 72 asserts.assertEqual(len(results[const.STDOUT]), 1) 73 stdout_lines = results[const.STDOUT][0].split("\n") 105 results = self._shell.Execute( 108 asserts.assertEqual(1, len(results[const.EXIT_CODE])) 109 if not results[const.EXIT_CODE][0]: 110 freq = [int(x) for x in results[const.STDOUT][0].split()] 138 results = self._shell.Execute(target_cmd) 140 len(results[const.STDOUT])) 141 if any(results[const.EXIT_CODE]): [all …]
|
/test/vts/utils/python/file/ |
D | target_file_utils.py | 37 results = shell.Execute(cmd) 38 return results[const.EXIT_CODE][0] == 0 87 results = shell.Execute(cmd) 88 logging.debug("%s: Shell command '%s' results: %s", path, cmd, results) 90 if results[const.EXIT_CODE][0] != 0: 91 raise IOError(results[const.STDERR][0]) 93 stdout = str(results[const.STDOUT][0]) 112 results = shell.Execute(cmd) 113 logging.debug("%s: Shell command '%s' results: %s", filepath, cmd, results) 116 if results[const.EXIT_CODE][0] != 0: [all …]
|
/test/vts/utils/python/mirror/ |
D | native_entity_mirror.py | 222 results = self._client.CallApi( 224 if (isinstance(results, tuple) and len(results) == 2 225 and isinstance(results[1], dict) 226 and "coverage" in results[1]): 227 self._last_raw_code_coverage_data = results[1]["coverage"] 228 results = results[0] 230 if isinstance(results, list): # Non-HIDL HAL does not return list. 232 for i, _ in enumerate(results): 233 result = results[i] 241 results[i] = None [all …]
|
/test/vts-testcase/hal/treble/platform_version/ |
D | VtsTreblePlatformVersionTest.py | 37 results = self.dut.shell.Execute("getprop " + prop) 39 asserts.assertEqual(results[const.EXIT_CODE][0], 0, 41 asserts.assertTrue(len(results[const.STDOUT][0].strip()) > 0, 44 if (results[const.EXIT_CODE][0] != 0 or 45 len(results[const.STDOUT][0].strip()) == 0): 49 result = results[const.STDOUT][0].strip() 58 results = self.dut.shell.Execute("printenv " + env) 59 if (results[const.EXIT_CODE][0] != 0 or 60 len(results[const.STDOUT][0].strip()) == 0): 64 result = results[const.STDOUT][0].strip()
|
/test/vti/dashboard/src/main/java/com/android/vts/entity/ |
D | TestCaseRunEntity.java | 54 private List<Integer> results; field in TestCaseRunEntity 92 this.results = new ArrayList<>(); in TestCaseRunEntity() 104 this.results = new ArrayList<>(); in TestCaseRunEntity() 139 if (testCaseNames.size() == results.size()) { in onLoad() 142 int result = results.get(index).intValue(); in onLoad() 159 this.results.add(result); in addTestCase() 180 List<Integer> results = new ArrayList<>(); in toEntity() local 183 results.add(testCase.result); in toEntity() 186 testCaseRunEntity.setUnindexedProperty(RESULTS, results); in toEntity() 211 List<Long> results = (List<Long>) e.getProperty(RESULTS); in fromEntity() local [all …]
|
/test/mlts/benchmark/src/com/android/nn/benchmark/core/ |
D | BenchmarkResult.java | 193 List<Pair<String, Float>> results = new ArrayList<>(); in getEvaluatorResults() local 195 results.add(new Pair<>(mEvaluatorKeys[i], mEvaluatorResults[i])); in getEvaluatorResults() 197 return results; in getEvaluatorResults() 265 Bundle results = new Bundle(); in toBundle() local 267 results.putString(testName + "_error", mBenchmarkError); in toBundle() 268 return results; in toBundle() 271 mLatencyInference.putToBundle(results, testName + "_inference"); in toBundle() 272 results.putFloat(testName + "_inference_mean_square_error", in toBundle() 274 results.putFloat(testName + "_inference_max_single_error", mMaxSingleError); in toBundle() 276 results.putFloat(testName + "_inference_" + mEvaluatorKeys[i], mEvaluatorResults[i]); in toBundle() [all …]
|
D | LatencyResult.java | 43 public LatencyResult(float[] results) { in LatencyResult() argument 44 mIterations = results.length; in LatencyResult() 48 for (float result : results) { in LatencyResult() 57 for (float result : results) { in LatencyResult() 68 for (float result : results) { in LatencyResult() 112 public void putToBundle(Bundle results, String prefix) { in putToBundle() argument 114 results.putFloat(prefix + "_avg", getMeanTimeSec() * 1000.0f); in putToBundle() 115 results.putFloat(prefix + "_std_dev", mTimeStdDeviation * 1000.0f); in putToBundle() 116 results.putFloat(prefix + "_total_time", mTotalTimeSec * 1000.0f); in putToBundle() 117 results.putInt(prefix + "_iterations", mIterations); in putToBundle()
|
D | OutputMeanStdDev.java | 43 float[] results = new float[mNumOutputs]; in denormalize() local 45 results[i] = mMeanStdDevs[i].denormalize(values[i]); in denormalize() 47 return results; in denormalize()
|
D | Processor.java | 208 Pair<List<InferenceInOutSequence>, List<InferenceResult>> results; in runBenchmarkLoop() local 211 results = mTest.runBenchmarkCompleteInputSet(1, maxTime); in runBenchmarkLoop() 213 results = mTest.runBenchmark(maxTime); in runBenchmarkLoop() 216 results = mTest.runInferenceOnce(); in runBenchmarkLoop() 221 results.first, in runBenchmarkLoop() 222 results.second, in runBenchmarkLoop()
|
/test/vts-testcase/kernel/ltp/shell_environment/ |
D | shell_environment.py | 76 results = self.shell.Execute('cat %s' % 78 if (not results or results[const.EXIT_CODE][0] or 79 not results[const.STDOUT][0]): 81 "\n Command results: {}".format(results)) 84 cpu_info = results[const.STDOUT][0].strip()
|
/test/vts-testcase/performance/fmq_benchmark/ |
D | FmqPerformanceTest.py | 84 results = self.dut.shell.Execute([ 90 asserts.assertEqual(len(results[const.STDOUT]), 2) 92 any(results[const.EXIT_CODE]), 99 results = self.dut.shell.Execute([ 110 asserts.assertEqual(len(results[const.STDOUT]), 2) 112 any(results[const.EXIT_CODE]), "FmqPerformanceTest failed.") 117 stdout_lines = results[const.STDOUT][1].split("\n")
|
/test/vti/dashboard/src/test/java/com/android/vts/entity/ |
D | TestCaseRunEntityTest.java | 35 List<Integer> results = Arrays.asList(1, 1, 1, 1, 1, 1, 1); in saveTest() local 47 for (int index = 0; index < results.size(); index++) { in saveTest() 49 int result = results.get(index); in saveTest() 54 assertEquals(loadedTestCaseRunEntity.getTestCases().size(), results.size()); in saveTest() 56 (Integer) loadedTestCaseRunEntity.getTestCases().get(0).result, results.get(0)); in saveTest()
|
/test/vts-testcase/performance/binder_benchmark/ |
D | BinderPerformanceTest.py | 111 results = self.dut.shell.Execute([ 118 asserts.assertEqual(len(results[const.STDOUT]), 2) 119 logging.info("stderr: %s", results[const.STDERR][1]) 120 logging.info("stdout: %s", results[const.STDOUT][1]) 122 any(results[const.EXIT_CODE]), 125 results[const.STDOUT][1])
|
/test/mlts/benchmark/results/ |
D | generate_result.py | 161 results = [] 163 results.append(parser.read_benchmark_result()) 165 return (benchmark_info, results) 168 def group_results(results): argument 172 for result in results: 177 for name, results in groupings.items(): 179 results)) 180 other = sorted(filter(lambda x: x is not baseline, results), 425 results = [results_with_bl.baseline] + results_with_bl.other 426 tags = [result.backend_type for result in results] [all …]
|
/test/vts-testcase/performance/hwbinder_benchmark/ |
D | HwBinderPerformanceTest.py | 115 results = self.dut.shell.Execute([ 125 asserts.assertEqual(len(results[const.STDOUT]), 2) 126 logging.info("stderr: %s", results[const.STDERR][1]) 127 logging.info("stdout: %s", results[const.STDOUT][1]) 129 any(results[const.EXIT_CODE]), 132 results[const.STDOUT][1])
|
/test/vts-testcase/performance/audio_loopback_test/ |
D | AudioLoopbackTest.py | 106 results = self.dut.shell.Execute( 108 while results[const.EXIT_CODE][0]: 111 results = self.dut.shell.Execute( 114 results = self.dut.shell.Execute( 116 asserts.assertFalse(results[const.EXIT_CODE][0], 118 stdout_lines = results[const.STDOUT][0].split("\n")
|
/test/vts/runners/host/ |
D | base_test.py | 139 self.results = records.TestResult() 531 if self.results.class_errors: 541 message_b = self.web.GenerateReportMessage(self.results.requested, 542 self.results.executed) 698 logging.error(RESULT_LINE_TEMPLATE, self.results.progressStr, 732 logging.info(RESULT_LINE_TEMPLATE, self.results.progressStr, 754 logging.info(RESULT_LINE_TEMPLATE, self.results.progressStr, 839 logging.info(RESULT_LINE_TEMPLATE, self.results.progressStr, 966 logging.info(TEST_CASE_TEMPLATE, self.results.progressStr, test_name) 1048 self.results.removeRecord(tr_record) [all …]
|
/test/vts/utils/python/profiling/ |
D | profiling_utils.py | 158 results = dut.shell.Execute("ls " + target_trace_file) 159 asserts.assertTrue(results, "failed to find trace file") 160 stdout_lines = results[const.STDOUT][0].split("\n") 176 results = cmd_utils.ExecuteShellCommand(file_cmd) 177 if results[const.EXIT_CODE][0] != 0: 178 logging.error(results[const.STDERR][0]) 258 results = cmd_utils.ExecuteShellCommand(trace_processor_cmd) 259 if any(results[cmd_utils.EXIT_CODE]): 261 logging.error("stdout: %s" % results[const.STDOUT]) 262 logging.error("stderr: %s" % results[const.STDERR]) [all …]
|
/test/vts-testcase/performance/hwbinder_latency_test/ |
D | HwBinderLatencyTest.py | 104 results = self.dut.shell.Execute([ 113 asserts.assertEqual(len(results[const.STDOUT]), 2) 114 logging.info("stderr: %s", results[const.STDERR][1]) 115 logging.info("stdout: %s", results[const.STDOUT][1]) 117 any(results[const.EXIT_CODE]), 119 json_result = json.loads(results[const.STDOUT][1]);
|
/test/vts/utils/python/instrumentation/ |
D | test_framework_instrumentation.py | 148 results = [] 157 results.append(ei) 167 results.append(ei) 169 results.sort(key=operator.attrgetter('time_cpu')) 174 for e in results:
|
/test/vts/testcases/template/hal_hidl_replay_test/ |
D | hal_hidl_replay_test.py | 195 results = self.shell.Execute( 201 results[const.EXIT_CODE][0], 203 % (results[const.EXIT_CODE][0], results[const.STDOUT][0], 204 results[const.STDERR][0])) 207 for line in results[const.STDOUT][0].split('\n'):
|
/test/vts-testcase/performance/binder_throughput_test/ |
D | BinderThroughputBenchmark.py | 152 results = self.dut.shell.Execute( 159 asserts.assertEqual(len(results[const.STDOUT]), 2) 160 logging.info("stderr: %s", results[const.STDERR][1]) 161 stdout_lines = results[const.STDOUT][1].split("\n") 165 any(results[const.EXIT_CODE]),
|
/test/vts-testcase/performance/hwbinder_throughput_test/ |
D | HwBinderThroughputBenchmark.py | 155 results = self.dut.shell.Execute( 163 asserts.assertEqual(len(results[const.STDOUT]), 2) 164 logging.info("stderr: %s", results[const.STDERR][1]) 165 stdout_lines = results[const.STDOUT][1].split("\n") 169 any(results[const.EXIT_CODE]),
|
/test/mlts/benchmark/jni/ |
D | run_tflite.cpp | 298 std::vector<InferenceResult>* results) { in benchmark() argument 371 results->push_back(result); in benchmark() 491 std::vector<float>* results) { in benchmarkSingleTypeOfCompilation() argument 492 if (results != nullptr) { in benchmarkSingleTypeOfCompilation() 493 results->clear(); in benchmarkSingleTypeOfCompilation() 538 if (results != nullptr) { in benchmarkSingleTypeOfCompilation() 539 results->push_back(compilationTime); in benchmarkSingleTypeOfCompilation() 555 std::vector<float>* results) { in benchmarkSingleTypeOfCompilationWithWarmup() argument 564 success = benchmarkSingleTypeOfCompilation(type, maxNumIterations, runTimeout, results); in benchmarkSingleTypeOfCompilationWithWarmup()
|
/test/vts-testcase/kernel/api/sysfs/src/com/android/tests/sysfs/ |
D | KernelApiSysfsTest.java | 224 String results = getDevice().pullFileContents(wakeLockPath).trim(); in testWakeLock() local 225 HashSet<String> activeSources = new HashSet<>(Arrays.asList(results.split(" "))); in testWakeLock() 233 results = getDevice().pullFileContents(wakeLockPath).trim(); in testWakeLock() 234 activeSources = new HashSet<>(Arrays.asList(results.split(" "))); in testWakeLock() 238 results = getDevice().pullFileContents(wakeUnLockPath).trim(); in testWakeLock() 239 activeSources = new HashSet<>(Arrays.asList(results.split(" "))); in testWakeLock()
|