1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Library to run fio scripts.
6
7fio_runner launch fio and collect results.
8The output dictionary can be add to autotest keyval:
9        results = {}
10        results.update(fio_util.fio_runner(job_file, env_vars))
11        self.write_perf_keyval(results)
12
13Decoding class can be invoked independently.
14
15"""
16
17import json
18import logging
19import re
20
21import common
22from autotest_lib.client.bin import utils
23
24class fio_graph_generator():
25    """
26    Generate graph from fio log that created when specified these options.
27    - write_bw_log
28    - write_iops_log
29    - write_lat_log
30
31    The following limitations apply
32    - Log file name must be in format jobname_testpass
33    - Graph is generate using Google graph api -> Internet require to view.
34    """
35
36    html_head = """
37<html>
38  <head>
39    <script type="text/javascript" src="https://www.google.com/jsapi"></script>
40    <script type="text/javascript">
41      google.load("visualization", "1", {packages:["corechart"]});
42      google.setOnLoadCallback(drawChart);
43      function drawChart() {
44"""
45
46    html_tail = """
47        var chart_div = document.getElementById('chart_div');
48        var chart = new google.visualization.ScatterChart(chart_div);
49        chart.draw(data, options);
50      }
51    </script>
52  </head>
53  <body>
54    <div id="chart_div" style="width: 100%; height: 100%;"></div>
55  </body>
56</html>
57"""
58
59    h_title = { True: 'Percentile', False: 'Time (s)' }
60    v_title = { 'bw'  : 'Bandwidth (KB/s)',
61                'iops': 'IOPs',
62                'lat' : 'Total latency (us)',
63                'clat': 'Completion latency (us)',
64                'slat': 'Submission latency (us)' }
65    graph_title = { 'bw'  : 'bandwidth',
66                    'iops': 'IOPs',
67                    'lat' : 'total latency',
68                    'clat': 'completion latency',
69                    'slat': 'submission latency' }
70
71    test_name = ''
72    test_type = ''
73    pass_list = ''
74
75    @classmethod
76    def _parse_log_file(cls, file_name, pass_index, pass_count, percentile):
77        """
78        Generate row for google.visualization.DataTable from one log file.
79        Log file is the one that generated using write_{bw,lat,iops}_log
80        option in the FIO job file.
81
82        The fio log file format is  timestamp, value, direction, blocksize
83        The output format for each row is { c: list of { v: value} }
84
85        @param file_name:  log file name to read data from
86        @param pass_index: index of current run pass
87        @param pass_count: number of all test run passes
88        @param percentile: flag to use percentile as key instead of timestamp
89
90        @return: list of data rows in google.visualization.DataTable format
91        """
92        # Read data from log
93        with open(file_name, 'r') as f:
94            data = []
95
96            for line in f.readlines():
97                if not line:
98                    break
99                t, v, _, _ = [int(x) for x in line.split(', ')]
100                data.append([t / 1000.0, v])
101
102        # Sort & calculate percentile
103        if percentile:
104            data.sort(key=lambda x: x[1])
105            l = len(data)
106            for i in range(l):
107                data[i][0] = 100 * (i + 0.5) / l
108
109        # Generate the data row
110        all_row = []
111        row = [None] * (pass_count + 1)
112        for d in data:
113            row[0] = {'v' : '%.3f' % d[0]}
114            row[pass_index + 1] = {'v': d[1]}
115            all_row.append({'c': row[:]})
116
117        return all_row
118
119    @classmethod
120    def _gen_data_col(cls, pass_list, percentile):
121        """
122        Generate col for google.visualization.DataTable
123
124        The output format is list of dict of label and type. In this case,
125        type is always number.
126
127        @param pass_list:  list of test run passes
128        @param percentile: flag to use percentile as key instead of timestamp
129
130        @return: list of column in google.visualization.DataTable format
131        """
132        if percentile:
133            col_name_list = ['percentile'] + [p[0] for p in pass_list]
134        else:
135            col_name_list = ['time'] + [p[0] for p in pass_list]
136
137        return [{'label': name, 'type': 'number'} for name in col_name_list]
138
139    @classmethod
140    def _gen_data_row(cls, test_type, pass_list, percentile):
141        """
142        Generate row for google.visualization.DataTable by generate all log
143        file name and call _parse_log_file for each file
144
145        @param test_type: type of value collected for current test. i.e. IOPs
146        @param pass_list: list of run passes for current test
147        @param percentile: flag to use percentile as key instead of timestamp
148
149        @return: list of data rows in google.visualization.DataTable format
150        """
151        all_row = []
152        pass_count = len(pass_list)
153        for pass_index, log_file_name in enumerate([p[1] for p in pass_list]):
154            all_row.extend(cls._parse_log_file(log_file_name, pass_index,
155                                                pass_count, percentile))
156        return all_row
157
158    @classmethod
159    def _write_data(cls, f, test_type, pass_list, percentile):
160        """
161        Write google.visualization.DataTable object to output file.
162        https://developers.google.com/chart/interactive/docs/reference
163
164        @param f: html file to update
165        @param test_type: type of value collected for current test. i.e. IOPs
166        @param pass_list: list of run passes for current test
167        @param percentile: flag to use percentile as key instead of timestamp
168        """
169        col = cls._gen_data_col(pass_list, percentile)
170        row = cls._gen_data_row(test_type, pass_list, percentile)
171        data_dict = {'cols' : col, 'rows' : row}
172
173        f.write('var data = new google.visualization.DataTable(')
174        json.dump(data_dict, f)
175        f.write(');\n')
176
177    @classmethod
178    def _write_option(cls, f, test_name, test_type, percentile):
179        """
180        Write option to render scatter graph to output file.
181        https://google-developers.appspot.com/chart/interactive/docs/gallery/scatterchart
182
183        @param test_name: name of current workload. i.e. randwrite
184        @param test_type: type of value collected for current test. i.e. IOPs
185        @param percentile: flag to use percentile as key instead of timestamp
186        """
187        option = {'pointSize': 1}
188        if percentile:
189            option['title'] = ('Percentile graph of %s for %s workload' %
190                               (cls.graph_title[test_type], test_name))
191        else:
192            option['title'] = ('Graph of %s for %s workload over time' %
193                               (cls.graph_title[test_type], test_name))
194
195        option['hAxis'] = {'title': cls.h_title[percentile]}
196        option['vAxis'] = {'title': cls.v_title[test_type]}
197
198        f.write('var options = ')
199        json.dump(option, f)
200        f.write(';\n')
201
202    @classmethod
203    def _write_graph(cls, test_name, test_type, pass_list, percentile=False):
204        """
205        Generate graph for test name / test type
206
207        @param test_name: name of current workload. i.e. randwrite
208        @param test_type: type of value collected for current test. i.e. IOPs
209        @param pass_list: list of run passes for current test
210        @param percentile: flag to use percentile as key instead of timestamp
211        """
212        logging.info('fio_graph_generator._write_graph %s %s %s',
213                     test_name, test_type, str(pass_list))
214
215
216        if percentile:
217            out_file_name = '%s_%s_percentile.html' % (test_name, test_type)
218        else:
219            out_file_name = '%s_%s.html' % (test_name, test_type)
220
221        with open(out_file_name, 'w') as f:
222            f.write(cls.html_head)
223            cls._write_data(f, test_type, pass_list, percentile)
224            cls._write_option(f, test_name, test_type, percentile)
225            f.write(cls.html_tail)
226
227    def __init__(self, test_name, test_type, pass_list):
228        """
229        @param test_name: name of current workload. i.e. randwrite
230        @param test_type: type of value collected for current test. i.e. IOPs
231        @param pass_list: list of run passes for current test
232        """
233        self.test_name = test_name
234        self.test_type = test_type
235        self.pass_list = pass_list
236
237    def run(self):
238        """
239        Run the graph generator.
240        """
241        self._write_graph(self.test_name, self.test_type, self.pass_list, False)
242        self._write_graph(self.test_name, self.test_type, self.pass_list, True)
243
244
245def fio_parse_dict(d, prefix):
246    """
247    Parse fio json dict
248
249    Recursively flaten json dict to generate autotest perf dict
250
251    @param d: input dict
252    @param prefix: name prefix of the key
253    """
254
255    # No need to parse something that didn't run such as read stat in write job.
256    if 'io_bytes' in d and d['io_bytes'] == 0:
257        return {}
258
259    results = {}
260    for k, v in d.items():
261
262        # remove >, >=, <, <=
263        for c in '>=<':
264            k = k.replace(c, '')
265
266        key = prefix + '_' + k
267
268        if type(v) is dict:
269            results.update(fio_parse_dict(v, key))
270        else:
271            results[key] = v
272    return results
273
274
275def fio_parser(lines, prefix=None):
276    """
277    Parse the json fio output
278
279    This collects all metrics given by fio and labels them according to unit
280    of measurement and test case name.
281
282    @param lines: text output of json fio output.
283    @param prefix: prefix for result keys.
284    """
285    results = {}
286    fio_dict = json.loads(lines)
287
288    if prefix:
289        prefix = prefix + '_'
290    else:
291        prefix = ''
292
293    results[prefix + 'fio_version'] = fio_dict['fio version']
294
295    if 'disk_util' in fio_dict:
296        results.update(fio_parse_dict(fio_dict['disk_util'][0],
297                                      prefix + 'disk'))
298
299    for job in fio_dict['jobs']:
300        job_prefix = '_' + prefix + job['jobname']
301        job.pop('jobname')
302
303
304        for k, v in job.iteritems():
305            # Igonre "job options", its alphanumerc keys confuses tko.
306            # Besides, these keys are redundant.
307            if k == 'job options':
308                continue
309            results.update(fio_parse_dict({k:v}, job_prefix))
310
311    return results
312
313def fio_generate_graph():
314    """
315    Scan for fio log file in output directory and send data to generate each
316    graph to fio_graph_generator class.
317    """
318    log_types = ['bw', 'iops', 'lat', 'clat', 'slat']
319
320    # move fio log to result dir
321    for log_type in log_types:
322        logging.info('log_type %s', log_type)
323        logs = utils.system_output('ls *_%s.*log' % log_type, ignore_status=True)
324        if not logs:
325            continue
326
327        pattern = r"""(?P<jobname>.*)_                    # jobname
328                      ((?P<runpass>p\d+)_|)               # pass
329                      (?P<type>bw|iops|lat|clat|slat)     # type
330                      (.(?P<thread>\d+)|)                 # thread id for newer fio.
331                      .log
332                   """
333        matcher = re.compile(pattern, re.X)
334
335        pass_list = []
336        current_job = ''
337
338        for log in logs.split():
339            match = matcher.match(log)
340            if not match:
341                logging.warn('Unknown log file %s', log)
342                continue
343
344            jobname = match.group('jobname')
345            runpass = match.group('runpass') or '1'
346            if match.group('thread'):
347                runpass += '_' +  match.group('thread')
348
349            # All files for particular job name are group together for create
350            # graph that can compare performance between result from each pass.
351            if jobname != current_job:
352                if pass_list:
353                    fio_graph_generator(current_job, log_type, pass_list).run()
354                current_job = jobname
355                pass_list = []
356            pass_list.append((runpass, log))
357
358        if pass_list:
359            fio_graph_generator(current_job, log_type, pass_list).run()
360
361
362        cmd = 'mv *_%s.*log results' % log_type
363        utils.run(cmd, ignore_status=True)
364        utils.run('mv *.html results', ignore_status=True)
365
366
367def fio_runner(test, job, env_vars,
368               name_prefix=None,
369               graph_prefix=None):
370    """
371    Runs fio.
372
373    Build a result keyval and performence json.
374    The JSON would look like:
375    {"description": "<name_prefix>_<modle>_<size>G",
376     "graph": "<graph_prefix>_1m_write_wr_lat_99.00_percent_usec",
377     "higher_is_better": false, "units": "us", "value": "xxxx"}
378    {...
379
380
381    @param test: test to upload perf value
382    @param job: fio config file to use
383    @param env_vars: environment variable fio will substituete in the fio
384        config file.
385    @param name_prefix: prefix of the descriptions to use in chrome perfi
386        dashboard.
387    @param graph_prefix: prefix of the graph name in chrome perf dashboard
388        and result keyvals.
389    @return fio results.
390
391    """
392
393    # running fio with ionice -c 3 so it doesn't lock out other
394    # processes from the disk while it is running.
395    # If you want to run the fio test for performance purposes,
396    # take out the ionice and disable hung process detection:
397    # "echo 0 > /proc/sys/kernel/hung_task_timeout_secs"
398    # -c 3 = Idle
399    # Tried lowest priority for "best effort" but still failed
400    ionice = 'ionice -c 3'
401    options = ['--output-format=json']
402    fio_cmd_line = ' '.join([env_vars, ionice, 'fio',
403                             ' '.join(options),
404                             '"' + job + '"'])
405    fio = utils.run(fio_cmd_line)
406
407    logging.debug(fio.stdout)
408
409    fio_generate_graph()
410
411    filename = re.match('.*FILENAME=(?P<f>[^ ]*)', env_vars).group('f')
412    diskname = utils.get_disk_from_filename(filename)
413
414    if diskname:
415        model = utils.get_disk_model(diskname)
416        size = utils.get_disk_size_gb(diskname)
417        perfdb_name = '%s_%dG' % (model, size)
418    else:
419        perfdb_name = filename.replace('/', '_')
420
421    if name_prefix:
422        perfdb_name = name_prefix + '_' + perfdb_name
423
424    result = fio_parser(fio.stdout, prefix=name_prefix)
425    if not graph_prefix:
426        graph_prefix = ''
427
428    for k, v in result.iteritems():
429        # Remove the prefix for value, and replace it the graph prefix.
430        if name_prefix:
431            k = k.replace('_' + name_prefix, graph_prefix)
432
433        # Make graph name to be same as the old code.
434        if k.endswith('bw'):
435            test.output_perf_value(description=perfdb_name, graph=k, value=v,
436                                   units='KB_per_sec', higher_is_better=True)
437        elif k.rstrip('0').endswith('clat_percentile_99.'):
438            test.output_perf_value(description=perfdb_name, graph=k, value=v,
439                                   units='us', higher_is_better=False)
440        elif k.rstrip('0').endswith('clat_ns_percentile_99.'):
441            test.output_perf_value(description=perfdb_name, graph=k, value=v,
442                                   units='ns', higher_is_better=False)
443    return result
444