1# Copyright 2017 The Chromium OS Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5import os 6import re 7import csv 8import json 9import time 10import shutil 11import urllib 12import urllib2 13import logging 14import httplib 15 16from autotest_lib.client.common_lib.cros import tpm_utils 17from autotest_lib.server import autotest 18from autotest_lib.server import test 19from autotest_lib.server.cros.multimedia import remote_facade_factory 20 21 22PERF_CAPTURE_ITERATIONS = 15 #Number of data points that will be uploaded. 23PERF_CAPTURE_DURATION = 3600 #Duration in secs of each data point capture. 24SAMPLE_INTERVAL = 60 25METRIC_INTERVAL = 600 26STABILIZATION_DURATION = 60 27_MEASUREMENT_DURATION_SECONDS = 10 28TMP_DIRECTORY = '/tmp/' 29PERF_FILE_NAME_PREFIX = 'perf' 30VERSION_PATTERN = r'^(\d+)\.(\d+)\.(\d+)$' 31DASHBOARD_UPLOAD_URL = 'https://chromeperf.appspot.com/add_point' 32 33 34class PerfUploadingError(Exception): 35 """Exception raised in perf_uploader.""" 36 pass 37 38 39class enterprise_LongevityTrackerServer(test.test): 40 """Run Longevity Test: Collect performance data over long duration. 41 42 Run enterprise_KioskEnrollment and clear the TPM as necessary. After 43 enterprise enrollment is successful, collect and log cpu, memory, and 44 temperature data from the device under test. 45 """ 46 version = 1 47 48 49 def initialize(self): 50 self.temp_dir = os.path.split(self.tmpdir)[0] 51 52 53 def _get_cpu_usage(self): 54 """Returns cpu usage in %.""" 55 cpu_usage_start = self.system_facade.get_cpu_usage() 56 time.sleep(_MEASUREMENT_DURATION_SECONDS) 57 cpu_usage_end = self.system_facade.get_cpu_usage() 58 return self.system_facade.compute_active_cpu_time(cpu_usage_start, 59 cpu_usage_end) * 100 60 61 62 def _get_memory_usage(self): 63 """Returns total used memory in %.""" 64 total_memory = self.system_facade.get_mem_total() 65 return ((total_memory - self.system_facade.get_mem_free()) 66 * 100 / total_memory) 67 68 69 def _get_temperature_data(self): 70 """Returns temperature sensor data in fahrenheit.""" 71 ectool = self.client.run('ectool version', ignore_status=True) 72 if not ectool.exit_status: 73 ec_temp = self.system_facade.get_ec_temperatures() 74 return ec_temp[1] 75 else: 76 temp_sensor_name = 'temp0' 77 if not temp_sensor_name: 78 return 0 79 MOSYS_OUTPUT_RE = re.compile('(\w+)="(.*?)"') 80 values = {} 81 cmd = 'mosys -k sensor print thermal %s' % temp_sensor_name 82 for kv in MOSYS_OUTPUT_RE.finditer(self.client.run_output(cmd)): 83 key, value = kv.groups() 84 if key == 'reading': 85 value = int(value) 86 values[key] = value 87 return values['reading'] 88 89 90 #TODO(krishnargv@): Add a method to retrieve the version of the 91 # Kiosk app from its manifest. 92 def _initialize_test_variables(self): 93 """Initialize test variables that will be uploaded to the dashboard.""" 94 self.subtest_name = self.kiosk_app_name 95 self.board_name = self.system_facade.get_current_board() 96 self.chromeos_version = self.system_facade.get_chromeos_release_version() 97 self.epoch_minutes = str(int(time.time() / 60)) 98 self.point_id = self._get_point_id(self.chromeos_version, 99 self.epoch_minutes) 100 self.test_suite_name = self.tagged_testname 101 logging.info("Board Name: %s", self.board_name) 102 logging.info("Chromeos Version: %r", self.chromeos_version) 103 logging.info("Point_id: %r", self.point_id) 104 105 106 #TODO(krishnargv): Replace _get_point_id with a call to the 107 # _get_id_from_version method of the perf_uploader.py. 108 def _get_point_id(self, cros_version, epoch_minutes): 109 """Compute point ID from ChromeOS version number and epoch minutes. 110 111 @param cros_version: String of ChromeOS version number. 112 @param epoch_minutes: String of minutes since 1970. 113 114 @return unique integer ID computed from given version and epoch. 115 """ 116 # Number of digits from each part of the Chrome OS version string. 117 cros_version_col_widths = [0, 4, 3, 2] 118 119 def get_digits(version_num, column_widths): 120 if re.match(VERSION_PATTERN, version_num): 121 computed_string = '' 122 version_parts = version_num.split('.') 123 for i, version_part in enumerate(version_parts): 124 if column_widths[i]: 125 computed_string += version_part.zfill(column_widths[i]) 126 return computed_string 127 else: 128 return None 129 130 cros_digits = get_digits(cros_version, cros_version_col_widths) 131 epoch_digits = epoch_minutes[-8:] 132 if not cros_digits: 133 return None 134 return int(epoch_digits + cros_digits) 135 136 137 def _open_perf_file(self, file_path): 138 """Open a perf file. Write header line if new. Return file object. 139 140 If the file on |file_path| already exists, then open file for 141 appending only. Otherwise open for writing only. 142 143 @param file_path: file path for perf file. 144 @returns file object for the perf file. 145 """ 146 if os.path.isfile(file_path): 147 perf_file = open(file_path, 'a+') 148 else: 149 perf_file = open(file_path, 'w') 150 perf_file.write('Time,CPU,Memory,Temperature (C)\r\n') 151 return perf_file 152 153 154 def elapsed_time(self, mark_time): 155 """Get time elapsed since |mark_time|. 156 157 @param mark_time: point in time from which elapsed time is measured. 158 @returns time elapsed since the marked time. 159 """ 160 return time.time() - mark_time 161 162 163 def modulo_time(self, timer, interval): 164 """Get time eplased on |timer| for the |interval| modulus. 165 166 Value returned is used to adjust the timer so that it is synchronized 167 with the current interval. 168 169 @param timer: time on timer, in seconds. 170 @param interval: period of time in seconds. 171 @returns time elapsed from the start of the current interval. 172 """ 173 return timer % int(interval) 174 175 176 def syncup_time(self, timer, interval): 177 """Get time remaining on |timer| for the |interval| modulus. 178 179 Value returned is used to induce sleep just long enough to put the 180 process back in sync with the timer. 181 182 @param timer: time on timer, in seconds. 183 @param interval: period of time in seconds. 184 @returns time remaining till the end of the current interval. 185 """ 186 return interval - (timer % int(interval)) 187 188 189 #TODO(krishnargv): Replace _format_data_for_upload with a call to the 190 # _format_for_upload method of the perf_uploader.py 191 def _format_data_for_upload(self, chart_data): 192 """Collect chart data into an uploadable data JSON object. 193 194 @param chart_data: performance results formatted as chart data. 195 """ 196 perf_values = { 197 'format_version': '1.0', 198 'benchmark_name': self.test_suite_name, 199 'charts': chart_data, 200 } 201 #TODO(krishnargv): Add a method to capture the chrome_version. 202 dash_entry = { 203 'master': 'ChromeOS_Enterprise', 204 'bot': 'cros-%s' % self.board_name, 205 'point_id': self.point_id, 206 'versions': { 207 'cros_version': self.chromeos_version, 208 209 }, 210 'supplemental': { 211 'default_rev': 'r_cros_version', 212 'kiosk_app_name': 'a_' + self.kiosk_app_name, 213 214 }, 215 'chart_data': perf_values 216 } 217 return {'data': json.dumps(dash_entry)} 218 219 220 #TODO(krishnargv): Replace _send_to_dashboard with a call to the 221 # _send_to_dashboard method of the perf_uploader.py 222 def _send_to_dashboard(self, data_obj): 223 """Send formatted perf data to the perf dashboard. 224 225 @param data_obj: data object as returned by _format_data_for_upload(). 226 227 @raises PerfUploadingError if an exception was raised when uploading. 228 """ 229 logging.debug('Data_obj to be uploaded: %s', data_obj) 230 encoded = urllib.urlencode(data_obj) 231 req = urllib2.Request(DASHBOARD_UPLOAD_URL, encoded) 232 try: 233 urllib2.urlopen(req) 234 except urllib2.HTTPError as e: 235 raise PerfUploadingError('HTTPError: %d %s for JSON %s\n' % 236 (e.code, e.msg, data_obj['data'])) 237 except urllib2.URLError as e: 238 raise PerfUploadingError('URLError: %s for JSON %s\n' % 239 (str(e.reason), data_obj['data'])) 240 except httplib.HTTPException: 241 raise PerfUploadingError('HTTPException for JSON %s\n' % 242 data_obj['data']) 243 244 245 def _append_to_aggregated_file(self, ts_file, ag_file): 246 """Append contents of perf timestamp file to perf aggregated file. 247 248 @param ts_file: file handle for performance timestamped file. 249 @param ag_file: file handle for performance aggregated file. 250 """ 251 next(ts_file) # Skip fist line (the header) of timestamped file. 252 for line in ts_file: 253 ag_file.write(line) 254 255 256 def _copy_aggregated_to_resultsdir(self, aggregated_fpath): 257 """Copy perf aggregated file to results dir for AutoTest results. 258 259 Note: The AutoTest results default directory is located at /usr/local/ 260 autotest/results/default/longevity_Tracker/results 261 262 @param aggregated_fpath: file path to Aggregated performance values. 263 """ 264 results_fpath = os.path.join(self.resultsdir, 'perf.csv') 265 shutil.copy(aggregated_fpath, results_fpath) 266 logging.info('Copied %s to %s)', aggregated_fpath, results_fpath) 267 268 269 def _write_perf_keyvals(self, perf_results): 270 """Write perf results to keyval file for AutoTest results. 271 272 @param perf_results: dict of attribute performance metrics. 273 """ 274 perf_keyval = {} 275 perf_keyval['cpu_usage'] = perf_results['cpu'] 276 perf_keyval['memory_usage'] = perf_results['mem'] 277 perf_keyval['temperature'] = perf_results['temp'] 278 self.write_perf_keyval(perf_keyval) 279 280 281 def _write_perf_results(self, perf_results): 282 """Write perf results to results-chart.json file for Perf Dashboard. 283 284 @param perf_results: dict of attribute performance metrics. 285 """ 286 cpu_metric = perf_results['cpu'] 287 mem_metric = perf_results['mem'] 288 ec_metric = perf_results['temp'] 289 self.output_perf_value(description='cpu_usage', value=cpu_metric, 290 units='%', higher_is_better=False) 291 self.output_perf_value(description='mem_usage', value=mem_metric, 292 units='%', higher_is_better=False) 293 self.output_perf_value(description='max_temp', value=ec_metric, 294 units='Celsius', higher_is_better=False) 295 296 297 def _read_perf_results(self): 298 """Read perf results from results-chart.json file for Perf Dashboard. 299 300 @returns dict of perf results, formatted as JSON chart data. 301 """ 302 results_file = os.path.join(self.resultsdir, 'results-chart.json') 303 with open(results_file, 'r') as fp: 304 contents = fp.read() 305 chart_data = json.loads(contents) 306 # TODO(krishnargv): refactor this with a better method to delete. 307 open(results_file, 'w').close() 308 return chart_data 309 310 311 def _record_perf_measurements(self, perf_values, perf_writer): 312 """Record attribute performance measurements, and write to file. 313 314 @param perf_values: dict of attribute performance values. 315 @param perf_writer: file to write performance measurements. 316 """ 317 # Get performance measurements. 318 cpu_usage = '%.3f' % self._get_cpu_usage() 319 mem_usage = '%.3f' % self._get_memory_usage() 320 max_temp = '%.3f' % self._get_temperature_data() 321 322 # Append measurements to attribute lists in perf values dictionary. 323 perf_values['cpu'].append(cpu_usage) 324 perf_values['mem'].append(mem_usage) 325 perf_values['temp'].append(max_temp) 326 327 # Write performance measurements to perf timestamped file. 328 time_stamp = time.strftime('%Y/%m/%d %H:%M:%S') 329 perf_writer.writerow([time_stamp, cpu_usage, mem_usage, max_temp]) 330 logging.info('Time: %s, CPU: %s, Mem: %s, Temp: %s', 331 time_stamp, cpu_usage, mem_usage, max_temp) 332 333 334 def _record_90th_metrics(self, perf_values, perf_metrics): 335 """Record 90th percentile metric of attribute performance values. 336 337 @param perf_values: dict attribute performance values. 338 @param perf_metrics: dict attribute 90%-ile performance metrics. 339 """ 340 # Calculate 90th percentile for each attribute. 341 cpu_values = perf_values['cpu'] 342 mem_values = perf_values['mem'] 343 temp_values = perf_values['temp'] 344 cpu_metric = sorted(cpu_values)[(len(cpu_values) * 9) // 10] 345 mem_metric = sorted(mem_values)[(len(mem_values) * 9) // 10] 346 temp_metric = sorted(temp_values)[(len(temp_values) * 9) // 10] 347 logging.info('Performance values: %s', perf_values) 348 logging.info('90th percentile: cpu: %s, mem: %s, temp: %s', 349 cpu_metric, mem_metric, temp_metric) 350 351 # Append 90th percentile to each attribute performance metric. 352 perf_metrics['cpu'].append(cpu_metric) 353 perf_metrics['mem'].append(mem_metric) 354 perf_metrics['temp'].append(temp_metric) 355 356 357 def _get_median_metrics(self, metrics): 358 """Returns median of each attribute performance metric. 359 360 If no metric values were recorded, return 0 for each metric. 361 362 @param metrics: dict of attribute performance metric lists. 363 @returns dict of attribute performance metric medians. 364 """ 365 if len(metrics['cpu']): 366 cpu_metric = sorted(metrics['cpu'])[len(metrics['cpu']) // 2] 367 mem_metric = sorted(metrics['mem'])[len(metrics['mem']) // 2] 368 temp_metric = sorted(metrics['temp'])[len(metrics['temp']) // 2] 369 else: 370 cpu_metric = 0 371 mem_metric = 0 372 temp_metric = 0 373 logging.info('Median of 90th percentile: cpu: %s, mem: %s, temp: %s', 374 cpu_metric, mem_metric, temp_metric) 375 return {'cpu': cpu_metric, 'mem': mem_metric, 'temp': temp_metric} 376 377 378 def _setup_kiosk_app_on_dut(self, kiosk_app_attributes=None): 379 """Enroll the DUT and setup a Kiosk app.""" 380 info = self.client.host_info_store.get() 381 app_config_id = info.get_label_value('app_config_id') 382 if app_config_id and app_config_id.startswith(':'): 383 app_config_id = app_config_id[1:] 384 if kiosk_app_attributes: 385 kiosk_app_attributes = kiosk_app_attributes.rstrip() 386 self.kiosk_app_name, ext_id = kiosk_app_attributes.split(':')[:2] 387 388 tpm_utils.ClearTPMOwnerRequest(self.client) 389 logging.info("Enrolling the DUT to Kiosk mode") 390 autotest.Autotest(self.client).run_test( 391 'enterprise_KioskEnrollment', 392 kiosk_app_attributes=kiosk_app_attributes, 393 check_client_result=True) 394 395 if self.kiosk_app_name == 'riseplayer': 396 self.kiosk_facade.config_rise_player(ext_id, app_config_id) 397 398 399 def _run_perf_capture_cycle(self): 400 """Track performance of Chrome OS over a long period of time. 401 402 This method collects performance measurements, and calculates metrics 403 to upload to the performance dashboard. It creates two files to 404 collect and store performance values and results: perf_<timestamp>.csv 405 and perf_aggregated.csv. 406 407 At the start, it creates a unique perf timestamped file in the test's 408 temp_dir. As the cycle runs, it saves a time-stamped performance 409 value after each sample interval. Periodically, it calculates 410 the 90th percentile performance metrics from these values. 411 412 The perf_<timestamp> files on the device will survive multiple runs 413 of the longevity_Tracker by the server-side test, and will also 414 survive multiple runs of the server-side test. 415 416 At the end, it opens the perf aggregated file in the test's temp_dir, 417 and appends the contents of the perf timestamped file. It then 418 copies the perf aggregated file to the results directory as perf.csv. 419 This perf.csv file will be consumed by the AutoTest backend when the 420 server-side test ends. 421 422 Note that the perf_aggregated.csv file will grow larger with each run 423 of longevity_Tracker on the device by the server-side test. However, 424 the server-side test will delete file in the end. 425 426 This method will capture perf metrics every SAMPLE_INTERVAL secs, at 427 each METRIC_INTERVAL the 90 percentile of the collected metrics is 428 calculated and saved. The perf capture runs for PERF_CAPTURE_DURATION 429 secs. At the end of the PERF_CAPTURE_DURATION time interval the median 430 value of all 90th percentile metrics is returned. 431 432 @returns list of median performance metrics. 433 """ 434 test_start_time = time.time() 435 436 perf_values = {'cpu': [], 'mem': [], 'temp': []} 437 perf_metrics = {'cpu': [], 'mem': [], 'temp': []} 438 439 # Create perf_<timestamp> file and writer. 440 timestamp_fname = (PERF_FILE_NAME_PREFIX + 441 time.strftime('_%Y-%m-%d_%H-%M') + '.csv') 442 timestamp_fpath = os.path.join(self.temp_dir, timestamp_fname) 443 timestamp_file = self._open_perf_file(timestamp_fpath) 444 timestamp_writer = csv.writer(timestamp_file) 445 446 # Align time of loop start with the sample interval. 447 test_elapsed_time = self.elapsed_time(test_start_time) 448 time.sleep(self.syncup_time(test_elapsed_time, SAMPLE_INTERVAL)) 449 test_elapsed_time = self.elapsed_time(test_start_time) 450 451 metric_start_time = time.time() 452 metric_prev_time = metric_start_time 453 454 metric_elapsed_prev_time = self.elapsed_time(metric_prev_time) 455 offset = self.modulo_time(metric_elapsed_prev_time, METRIC_INTERVAL) 456 metric_timer = metric_elapsed_prev_time + offset 457 458 while self.elapsed_time(test_start_time) <= PERF_CAPTURE_DURATION: 459 self._record_perf_measurements(perf_values, timestamp_writer) 460 461 # Periodically calculate and record 90th percentile metrics. 462 metric_elapsed_prev_time = self.elapsed_time(metric_prev_time) 463 metric_timer = metric_elapsed_prev_time + offset 464 if metric_timer >= METRIC_INTERVAL: 465 self._record_90th_metrics(perf_values, perf_metrics) 466 perf_values = {'cpu': [], 'mem': [], 'temp': []} 467 468 # Set previous time to current time. 469 metric_prev_time = time.time() 470 metric_elapsed_prev_time = self.elapsed_time(metric_prev_time) 471 472 metric_elapsed_time = self.elapsed_time(metric_start_time) 473 offset = self.modulo_time(metric_elapsed_time, METRIC_INTERVAL) 474 475 # Set the timer to time elapsed plus offset to next interval. 476 metric_timer = metric_elapsed_prev_time + offset 477 478 # Sync the loop time to the sample interval. 479 test_elapsed_time = self.elapsed_time(test_start_time) 480 time.sleep(self.syncup_time(test_elapsed_time, SAMPLE_INTERVAL)) 481 482 # Close perf timestamp file. 483 timestamp_file.close() 484 485 # Open perf timestamp file to read, and aggregated file to append. 486 timestamp_file = open(timestamp_fpath, 'r') 487 aggregated_fname = (PERF_FILE_NAME_PREFIX + '_aggregated.csv') 488 aggregated_fpath = os.path.join(self.temp_dir, aggregated_fname) 489 aggregated_file = self._open_perf_file(aggregated_fpath) 490 491 # Append contents of perf timestamp file to perf aggregated file. 492 self._append_to_aggregated_file(timestamp_file, aggregated_file) 493 timestamp_file.close() 494 aggregated_file.close() 495 496 # Copy perf aggregated file to test results directory. 497 self._copy_aggregated_to_resultsdir(aggregated_fpath) 498 499 # Return median of each attribute performance metric. 500 logging.info("Perf_metrics: %r ", perf_metrics) 501 return self._get_median_metrics(perf_metrics) 502 503 504 def run_once(self, host=None, kiosk_app_attributes=None): 505 self.client = host 506 self.kiosk_app_name = None 507 508 factory = remote_facade_factory.RemoteFacadeFactory( 509 host, no_chrome=True) 510 self.system_facade = factory.create_system_facade() 511 self.kiosk_facade = factory.create_kiosk_facade() 512 513 self._setup_kiosk_app_on_dut(kiosk_app_attributes) 514 time.sleep(STABILIZATION_DURATION) 515 self._initialize_test_variables() 516 517 self.perf_results = {'cpu': '0', 'mem': '0', 'temp': '0'} 518 for iteration in range(PERF_CAPTURE_ITERATIONS): 519 #TODO(krishnargv@): Add a method to verify that the Kiosk app is 520 # active and is running on the DUT. 521 logging.info("Running perf_capture Iteration: %d", iteration+1) 522 self.perf_results = self._run_perf_capture_cycle() 523 self._write_perf_keyvals(self.perf_results) 524 self._write_perf_results(self.perf_results) 525 526 # Post perf results directly to performance dashboard. You may view 527 # uploaded data at https://chromeperf.appspot.com/new_points, 528 # with test path pattern=ChromeOS_Enterprise/cros-*/longevity*/* 529 chart_data = self._read_perf_results() 530 data_obj = self._format_data_for_upload(chart_data) 531 self._send_to_dashboard(data_obj) 532 tpm_utils.ClearTPMOwnerRequest(self.client) 533