1#!/usr/bin/python
2# @lint-avoid-python-3-compatibility-imports
3#
4# zfsslower  Trace slow ZFS operations.
5#            For Linux, uses BCC, eBPF.
6#
7# USAGE: zfsslower [-h] [-j] [-p PID] [min_ms]
8#
9# This script traces common ZFS file operations: reads, writes, opens, and
10# syncs. It measures the time spent in these operations, and prints details
11# for each that exceeded a threshold.
12#
13# WARNING: This adds low-overhead instrumentation to these ZFS operations,
14# including reads and writes from the file system cache. Such reads and writes
15# can be very frequent (depending on the workload; eg, 1M/sec), at which
16# point the overhead of this tool (even if it prints no "slower" events) can
17# begin to become significant.
18#
19# This works by using kernel dynamic tracing of the ZPL interface, and will
20# need updates to match any changes to this interface.
21#
22# By default, a minimum millisecond threshold of 10 is used.
23#
24# Copyright 2016 Netflix, Inc.
25# Licensed under the Apache License, Version 2.0 (the "License")
26#
27# 14-Feb-2016   Brendan Gregg   Created this.
28# 16-Oct-2016   Dina Goldshtein -p to filter by process ID.
29
30from __future__ import print_function
31from bcc import BPF
32import argparse
33from time import strftime
34import ctypes as ct
35
36# arguments
37examples = """examples:
38    ./zfsslower             # trace operations slower than 10 ms (default)
39    ./zfsslower 1           # trace operations slower than 1 ms
40    ./zfsslower -j 1        # ... 1 ms, parsable output (csv)
41    ./zfsslower 0           # trace all operations (warning: verbose)
42    ./zfsslower -p 185      # trace PID 185 only
43"""
44parser = argparse.ArgumentParser(
45    description="Trace common ZFS file operations slower than a threshold",
46    formatter_class=argparse.RawDescriptionHelpFormatter,
47    epilog=examples)
48parser.add_argument("-j", "--csv", action="store_true",
49    help="just print fields: comma-separated values")
50parser.add_argument("-p", "--pid",
51    help="trace this PID only")
52parser.add_argument("min_ms", nargs="?", default='10',
53    help="minimum I/O duration to trace, in ms (default 10)")
54parser.add_argument("--ebpf", action="store_true",
55    help=argparse.SUPPRESS)
56args = parser.parse_args()
57min_ms = int(args.min_ms)
58pid = args.pid
59csv = args.csv
60debug = 0
61
62# define BPF program
63bpf_text = """
64#include <uapi/linux/ptrace.h>
65#include <linux/fs.h>
66#include <linux/sched.h>
67#include <linux/dcache.h>
68
69// XXX: switch these to char's when supported
70#define TRACE_READ      0
71#define TRACE_WRITE     1
72#define TRACE_OPEN      2
73#define TRACE_FSYNC     3
74
75struct val_t {
76    u64 ts;
77    u64 offset;
78    struct file *fp;
79};
80
81struct data_t {
82    // XXX: switch some to u32's when supported
83    u64 ts_us;
84    u64 type;
85    u64 size;
86    u64 offset;
87    u64 delta_us;
88    u64 pid;
89    char task[TASK_COMM_LEN];
90    char file[DNAME_INLINE_LEN];
91};
92
93BPF_HASH(entryinfo, u64, struct val_t);
94BPF_PERF_OUTPUT(events);
95
96//
97// Store timestamp and size on entry
98//
99
100// zpl_read(), zpl_write():
101int trace_rw_entry(struct pt_regs *ctx, struct file *filp, char __user *buf,
102    size_t len, loff_t *ppos)
103{
104    u64 id = bpf_get_current_pid_tgid();
105    u32 pid = id >> 32; // PID is higher part
106
107    if (FILTER_PID)
108        return 0;
109
110    // store filep and timestamp by id
111    struct val_t val = {};
112    val.ts = bpf_ktime_get_ns();
113    val.fp = filp;
114    val.offset = *ppos;
115    if (val.fp)
116        entryinfo.update(&id, &val);
117
118    return 0;
119}
120
121// zpl_open():
122int trace_open_entry(struct pt_regs *ctx, struct inode *inode,
123    struct file *filp)
124{
125    u64 id = bpf_get_current_pid_tgid();
126    u32 pid = id >> 32; // PID is higher part
127
128    if (FILTER_PID)
129        return 0;
130
131    // store filep and timestamp by id
132    struct val_t val = {};
133    val.ts = bpf_ktime_get_ns();
134    val.fp = filp;
135    val.offset = 0;
136    if (val.fp)
137        entryinfo.update(&id, &val);
138
139    return 0;
140}
141
142// zpl_fsync():
143int trace_fsync_entry(struct pt_regs *ctx, struct file *filp)
144{
145    u64 id = bpf_get_current_pid_tgid();
146    u32 pid = id >> 32; // PID is higher part
147
148    if (FILTER_PID)
149        return 0;
150
151    // store filp and timestamp by id
152    struct val_t val = {};
153    val.ts = bpf_ktime_get_ns();
154    val.fp = filp;
155    val.offset = 0;
156    if (val.fp)
157        entryinfo.update(&id, &val);
158
159    return 0;
160}
161
162//
163// Output
164//
165
166static int trace_return(struct pt_regs *ctx, int type)
167{
168    struct val_t *valp;
169    u64 id = bpf_get_current_pid_tgid();
170    u32 pid = id >> 32; // PID is higher part
171
172    valp = entryinfo.lookup(&id);
173    if (valp == 0) {
174        // missed tracing issue or filtered
175        return 0;
176    }
177
178    // calculate delta
179    u64 ts = bpf_ktime_get_ns();
180    u64 delta_us = (ts - valp->ts) / 1000;
181    entryinfo.delete(&id);
182    if (FILTER_US)
183        return 0;
184
185    // populate output struct
186    u32 size = PT_REGS_RC(ctx);
187    struct data_t data = {.type = type, .size = size, .delta_us = delta_us,
188        .pid = pid};
189    data.ts_us = ts / 1000;
190    data.offset = valp->offset;
191    bpf_get_current_comm(&data.task, sizeof(data.task));
192
193    struct qstr qs = valp->fp->f_path.dentry->d_name;
194    if (qs.len == 0)
195        return 0;
196    bpf_probe_read(&data.file, sizeof(data.file), (void *)qs.name);
197
198    // output
199    events.perf_submit(ctx, &data, sizeof(data));
200
201    return 0;
202}
203
204int trace_read_return(struct pt_regs *ctx)
205{
206    return trace_return(ctx, TRACE_READ);
207}
208
209int trace_write_return(struct pt_regs *ctx)
210{
211    return trace_return(ctx, TRACE_WRITE);
212}
213
214int trace_open_return(struct pt_regs *ctx)
215{
216    return trace_return(ctx, TRACE_OPEN);
217}
218
219int trace_fsync_return(struct pt_regs *ctx)
220{
221    return trace_return(ctx, TRACE_FSYNC);
222}
223
224"""
225if min_ms == 0:
226    bpf_text = bpf_text.replace('FILTER_US', '0')
227else:
228    bpf_text = bpf_text.replace('FILTER_US',
229        'delta_us <= %s' % str(min_ms * 1000))
230if args.pid:
231    bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
232else:
233    bpf_text = bpf_text.replace('FILTER_PID', '0')
234if debug or args.ebpf:
235    print(bpf_text)
236    if args.ebpf:
237        exit()
238
239# kernel->user event data: struct data_t
240DNAME_INLINE_LEN = 32   # linux/dcache.h
241TASK_COMM_LEN = 16      # linux/sched.h
242class Data(ct.Structure):
243    _fields_ = [
244        ("ts_us", ct.c_ulonglong),
245        ("type", ct.c_ulonglong),
246        ("size", ct.c_ulonglong),
247        ("offset", ct.c_ulonglong),
248        ("delta_us", ct.c_ulonglong),
249        ("pid", ct.c_ulonglong),
250        ("task", ct.c_char * TASK_COMM_LEN),
251        ("file", ct.c_char * DNAME_INLINE_LEN)
252    ]
253
254# process event
255def print_event(cpu, data, size):
256    event = ct.cast(data, ct.POINTER(Data)).contents
257
258    type = 'R'
259    if event.type == 1:
260        type = 'W'
261    elif event.type == 2:
262        type = 'O'
263    elif event.type == 3:
264        type = 'S'
265
266    if (csv):
267        print("%d,%s,%d,%s,%d,%d,%d,%s" % (
268            event.ts_us, event.task.decode('utf-8', 'replace'), event.pid,
269            type, event.size, event.offset, event.delta_us,
270            event.file.decode('utf-8', 'replace')))
271        return
272    print("%-8s %-14.14s %-6s %1s %-7s %-8d %7.2f %s" % (strftime("%H:%M:%S"),
273        event.task.decode('utf-8', 'replace'), event.pid, type, event.size,
274        event.offset / 1024, float(event.delta_us) / 1000,
275        event.file.decode('utf-8', 'replace')))
276
277# initialize BPF
278b = BPF(text=bpf_text)
279
280# common file functions
281if BPF.get_kprobe_functions(b'zpl_iter'):
282    b.attach_kprobe(event="zpl_iter_read", fn_name="trace_rw_entry")
283    b.attach_kprobe(event="zpl_iter_write", fn_name="trace_rw_entry")
284elif BPF.get_kprobe_functions(b'zpl_aio'):
285    b.attach_kprobe(event="zpl_aio_read", fn_name="trace_rw_entry")
286    b.attach_kprobe(event="zpl_aio_write", fn_name="trace_rw_entry")
287else:
288    b.attach_kprobe(event="zpl_read", fn_name="trace_rw_entry")
289    b.attach_kprobe(event="zpl_write", fn_name="trace_rw_entry")
290b.attach_kprobe(event="zpl_open", fn_name="trace_open_entry")
291b.attach_kprobe(event="zpl_fsync", fn_name="trace_fsync_entry")
292if BPF.get_kprobe_functions(b'zpl_iter'):
293    b.attach_kretprobe(event="zpl_iter_read", fn_name="trace_read_return")
294    b.attach_kretprobe(event="zpl_iter_write", fn_name="trace_write_return")
295elif BPF.get_kprobe_functions(b'zpl_aio'):
296    b.attach_kretprobe(event="zpl_aio_read", fn_name="trace_read_return")
297    b.attach_kretprobe(event="zpl_aio_write", fn_name="trace_write_return")
298else:
299    b.attach_kretprobe(event="zpl_read", fn_name="trace_read_return")
300    b.attach_kretprobe(event="zpl_write", fn_name="trace_write_return")
301b.attach_kretprobe(event="zpl_open", fn_name="trace_open_return")
302b.attach_kretprobe(event="zpl_fsync", fn_name="trace_fsync_return")
303
304# header
305if (csv):
306    print("ENDTIME_us,TASK,PID,TYPE,BYTES,OFFSET_b,LATENCY_us,FILE")
307else:
308    if min_ms == 0:
309        print("Tracing ZFS operations")
310    else:
311        print("Tracing ZFS operations slower than %d ms" % min_ms)
312    print("%-8s %-14s %-6s %1s %-7s %-8s %7s %s" % ("TIME", "COMM", "PID", "T",
313        "BYTES", "OFF_KB", "LAT(ms)", "FILENAME"))
314
315# read events
316b["events"].open_perf_buffer(print_event, page_cnt=64)
317while 1:
318    b.perf_buffer_poll()
319