1 /*
2  * blktrace output analysis: generate a timeline & gather statistics
3  *
4  * Copyright (C) 2006 Alan D. Brunelle <Alan.Brunelle@hp.com>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  *
20  */
21 
22 #include <stdio.h>
23 #include <unistd.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <fcntl.h>
27 #include <sys/mman.h>
28 #include <string.h>
29 
30 #include "blktrace.h"
31 #include "globals.h"
32 
33 #define DEF_LEN	(16 * 1024 * 1024)
34 
35 static int fd;
36 static void *cur_map = MAP_FAILED;
37 static off_t cur_min, cur, cur_max, total_size;
38 static size_t len;
39 static struct blk_io_trace *next_t;
40 static long pgsz;
41 
42 int data_is_native = -1;
43 
min_len(size_t a,size_t b)44 static inline size_t min_len(size_t a, size_t b)
45 {
46 	return a < b ? a : b;
47 }
48 
convert_to_cpu(struct blk_io_trace * t,struct blk_io_trace * tp,void ** pdu)49 static inline size_t convert_to_cpu(struct blk_io_trace *t,
50                                     struct blk_io_trace *tp,
51 				    void **pdu)
52 {
53 	if (data_is_native == -1)
54 		check_data_endianness(t->magic);
55 
56 	if (data_is_native)
57 		memcpy(tp, t, sizeof(*tp));
58 	else {
59 		tp->magic	= be32_to_cpu(t->magic);
60 		tp->sequence	= be32_to_cpu(t->sequence);
61 		tp->time	= be64_to_cpu(t->time);
62 		tp->sector	= be64_to_cpu(t->sector);
63 		tp->bytes	= be32_to_cpu(t->bytes);
64 		tp->action	= be32_to_cpu(t->action);
65 		tp->pid		= be32_to_cpu(t->pid);
66 		tp->device	= be32_to_cpu(t->device);
67 		tp->cpu		= be16_to_cpu(t->cpu);
68 		tp->error	= be16_to_cpu(t->error);
69 		tp->pdu_len	= be16_to_cpu(t->pdu_len);
70 	}
71 
72 	if (tp->pdu_len) {
73 		*pdu = malloc(tp->pdu_len);
74 		memcpy(*pdu, t+1, tp->pdu_len);
75 	} else
76 		*pdu = NULL;
77 
78 	return sizeof(*tp) + tp->pdu_len;
79 }
80 
move_map(void)81 static int move_map(void)
82 {
83 	if (cur_map != MAP_FAILED)
84 		munmap(cur_map, len);
85 
86 	cur_min = (cur & ~(pgsz-1));
87 	len = min_len(DEF_LEN, total_size - cur_min);
88 	if (len < sizeof(*next_t))
89 		return 0;
90 
91 	cur_map = mmap(NULL, len, PROT_READ, MAP_SHARED, fd,
92 		       cur_min);
93 	if (cur_map == MAP_FAILED) {
94 		perror("mmap");
95 		exit(1);
96 	}
97 
98 	cur_max = cur_min + len;
99 	return (cur < cur_max);
100 }
101 
setup_ifile(char * fname)102 void setup_ifile(char *fname)
103 {
104 	struct stat buf;
105 
106 	pgsz = sysconf(_SC_PAGESIZE);
107 
108 	fd = my_open(fname, O_RDONLY);
109 	if (fd < 0) {
110 		perror(fname);
111 		exit(1);
112 	}
113 	if (fstat(fd, &buf) < 0) {
114 		perror(fname);
115 		exit(1);
116 	}
117 	total_size = buf.st_size;
118 
119 	if (!move_map())
120 		exit(0);
121 }
122 
cleanup_ifile(void)123 void cleanup_ifile(void)
124 {
125 	if (cur_map != MAP_FAILED)
126 		munmap(cur_map, len);
127 	close(fd);
128 }
129 
next_trace(struct blk_io_trace * t,void ** pdu)130 int next_trace(struct blk_io_trace *t, void **pdu)
131 {
132 	size_t this_len;
133 
134 	if ((cur + 512) > cur_max)
135 		if (!move_map()) {
136 			cleanup_ifile();
137 			return 0;
138 		}
139 
140 	next_t = cur_map + (cur - cur_min);
141 	this_len = convert_to_cpu(next_t, t, pdu);
142 	cur += this_len;
143 
144 	return 1;
145 }
146 
pct_done(void)147 double pct_done(void)
148 {
149 	return 100.0 * ((double)cur / (double)total_size);
150 }
151