1 /*
2 * sync/psync engine
3 *
4 * IO engine that does regular read(2)/write(2) with lseek(2) to transfer
5 * data and IO engine that does regular pread(2)/pwrite(2) to transfer data.
6 *
7 */
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <unistd.h>
11 #include <sys/uio.h>
12 #include <errno.h>
13 #include <assert.h>
14
15 #include "../fio.h"
16
17 /*
18 * Sync engine uses engine_data to store last offset
19 */
20 #define LAST_POS(f) ((f)->engine_data)
21
22 struct syncio_data {
23 struct iovec *iovecs;
24 struct io_u **io_us;
25 unsigned int queued;
26 unsigned int events;
27 unsigned long queued_bytes;
28
29 unsigned long long last_offset;
30 struct fio_file *last_file;
31 enum fio_ddir last_ddir;
32 };
33
fio_syncio_prep(struct thread_data * td,struct io_u * io_u)34 static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
35 {
36 struct fio_file *f = io_u->file;
37
38 if (!ddir_rw(io_u->ddir))
39 return 0;
40
41 if (LAST_POS(f) != -1ULL && LAST_POS(f) == io_u->offset)
42 return 0;
43
44 if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
45 td_verror(td, errno, "lseek");
46 return 1;
47 }
48
49 return 0;
50 }
51
fio_io_end(struct thread_data * td,struct io_u * io_u,int ret)52 static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
53 {
54 if (io_u->file && ret >= 0 && ddir_rw(io_u->ddir))
55 LAST_POS(io_u->file) = io_u->offset + ret;
56
57 if (ret != (int) io_u->xfer_buflen) {
58 if (ret >= 0) {
59 io_u->resid = io_u->xfer_buflen - ret;
60 io_u->error = 0;
61 return FIO_Q_COMPLETED;
62 } else
63 io_u->error = errno;
64 }
65
66 if (io_u->error) {
67 io_u_log_error(td, io_u);
68 td_verror(td, io_u->error, "xfer");
69 }
70
71 return FIO_Q_COMPLETED;
72 }
73
74 #ifdef CONFIG_PWRITEV
fio_pvsyncio_queue(struct thread_data * td,struct io_u * io_u)75 static int fio_pvsyncio_queue(struct thread_data *td, struct io_u *io_u)
76 {
77 struct syncio_data *sd = td->io_ops->data;
78 struct iovec *iov = &sd->iovecs[0];
79 struct fio_file *f = io_u->file;
80 int ret;
81
82 fio_ro_check(td, io_u);
83
84 iov->iov_base = io_u->xfer_buf;
85 iov->iov_len = io_u->xfer_buflen;
86
87 if (io_u->ddir == DDIR_READ)
88 ret = preadv(f->fd, iov, 1, io_u->offset);
89 else if (io_u->ddir == DDIR_WRITE)
90 ret = pwritev(f->fd, iov, 1, io_u->offset);
91 else if (io_u->ddir == DDIR_TRIM) {
92 do_io_u_trim(td, io_u);
93 return FIO_Q_COMPLETED;
94 } else
95 ret = do_io_u_sync(td, io_u);
96
97 return fio_io_end(td, io_u, ret);
98 }
99 #endif
100
fio_psyncio_queue(struct thread_data * td,struct io_u * io_u)101 static int fio_psyncio_queue(struct thread_data *td, struct io_u *io_u)
102 {
103 struct fio_file *f = io_u->file;
104 int ret;
105
106 fio_ro_check(td, io_u);
107
108 if (io_u->ddir == DDIR_READ)
109 ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
110 else if (io_u->ddir == DDIR_WRITE)
111 ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
112 else if (io_u->ddir == DDIR_TRIM) {
113 do_io_u_trim(td, io_u);
114 return FIO_Q_COMPLETED;
115 } else
116 ret = do_io_u_sync(td, io_u);
117
118 return fio_io_end(td, io_u, ret);
119 }
120
fio_syncio_queue(struct thread_data * td,struct io_u * io_u)121 static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u)
122 {
123 struct fio_file *f = io_u->file;
124 int ret;
125
126 fio_ro_check(td, io_u);
127
128 if (io_u->ddir == DDIR_READ)
129 ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
130 else if (io_u->ddir == DDIR_WRITE)
131 ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
132 else if (io_u->ddir == DDIR_TRIM) {
133 do_io_u_trim(td, io_u);
134 return FIO_Q_COMPLETED;
135 } else
136 ret = do_io_u_sync(td, io_u);
137
138 return fio_io_end(td, io_u, ret);
139 }
140
fio_vsyncio_getevents(struct thread_data * td,unsigned int min,unsigned int max,const struct timespec fio_unused * t)141 static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min,
142 unsigned int max,
143 const struct timespec fio_unused *t)
144 {
145 struct syncio_data *sd = td->io_ops->data;
146 int ret;
147
148 if (min) {
149 ret = sd->events;
150 sd->events = 0;
151 } else
152 ret = 0;
153
154 dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret);
155 return ret;
156 }
157
fio_vsyncio_event(struct thread_data * td,int event)158 static struct io_u *fio_vsyncio_event(struct thread_data *td, int event)
159 {
160 struct syncio_data *sd = td->io_ops->data;
161
162 return sd->io_us[event];
163 }
164
fio_vsyncio_append(struct thread_data * td,struct io_u * io_u)165 static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u)
166 {
167 struct syncio_data *sd = td->io_ops->data;
168
169 if (ddir_sync(io_u->ddir))
170 return 0;
171
172 if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
173 io_u->ddir == sd->last_ddir)
174 return 1;
175
176 return 0;
177 }
178
fio_vsyncio_set_iov(struct syncio_data * sd,struct io_u * io_u,int idx)179 static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u,
180 int idx)
181 {
182 sd->io_us[idx] = io_u;
183 sd->iovecs[idx].iov_base = io_u->xfer_buf;
184 sd->iovecs[idx].iov_len = io_u->xfer_buflen;
185 sd->last_offset = io_u->offset + io_u->xfer_buflen;
186 sd->last_file = io_u->file;
187 sd->last_ddir = io_u->ddir;
188 sd->queued_bytes += io_u->xfer_buflen;
189 sd->queued++;
190 }
191
fio_vsyncio_queue(struct thread_data * td,struct io_u * io_u)192 static int fio_vsyncio_queue(struct thread_data *td, struct io_u *io_u)
193 {
194 struct syncio_data *sd = td->io_ops->data;
195
196 fio_ro_check(td, io_u);
197
198 if (!fio_vsyncio_append(td, io_u)) {
199 dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued);
200 /*
201 * If we can't append and have stuff queued, tell fio to
202 * commit those first and then retry this io
203 */
204 if (sd->queued)
205 return FIO_Q_BUSY;
206 if (ddir_sync(io_u->ddir)) {
207 int ret = do_io_u_sync(td, io_u);
208
209 return fio_io_end(td, io_u, ret);
210 }
211
212 sd->queued = 0;
213 sd->queued_bytes = 0;
214 fio_vsyncio_set_iov(sd, io_u, 0);
215 } else {
216 if (sd->queued == td->o.iodepth) {
217 dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued);
218 return FIO_Q_BUSY;
219 }
220
221 dprint(FD_IO, "vsyncio_queue: append\n");
222 fio_vsyncio_set_iov(sd, io_u, sd->queued);
223 }
224
225 dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued);
226 return FIO_Q_QUEUED;
227 }
228
229 /*
230 * Check that we transferred all bytes, or saw an error, etc
231 */
fio_vsyncio_end(struct thread_data * td,ssize_t bytes)232 static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes)
233 {
234 struct syncio_data *sd = td->io_ops->data;
235 struct io_u *io_u;
236 unsigned int i;
237 int err;
238
239 /*
240 * transferred everything, perfect
241 */
242 if (bytes == sd->queued_bytes)
243 return 0;
244
245 err = errno;
246 for (i = 0; i < sd->queued; i++) {
247 io_u = sd->io_us[i];
248
249 if (bytes == -1) {
250 io_u->error = err;
251 } else {
252 unsigned int this_io;
253
254 this_io = bytes;
255 if (this_io > io_u->xfer_buflen)
256 this_io = io_u->xfer_buflen;
257
258 io_u->resid = io_u->xfer_buflen - this_io;
259 io_u->error = 0;
260 bytes -= this_io;
261 }
262 }
263
264 if (bytes == -1) {
265 td_verror(td, err, "xfer vsync");
266 return -err;
267 }
268
269 return 0;
270 }
271
fio_vsyncio_commit(struct thread_data * td)272 static int fio_vsyncio_commit(struct thread_data *td)
273 {
274 struct syncio_data *sd = td->io_ops->data;
275 struct fio_file *f;
276 ssize_t ret;
277
278 if (!sd->queued)
279 return 0;
280
281 io_u_mark_submit(td, sd->queued);
282 f = sd->last_file;
283
284 if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) {
285 int err = -errno;
286
287 td_verror(td, errno, "lseek");
288 return err;
289 }
290
291 if (sd->last_ddir == DDIR_READ)
292 ret = readv(f->fd, sd->iovecs, sd->queued);
293 else
294 ret = writev(f->fd, sd->iovecs, sd->queued);
295
296 dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret);
297 sd->events = sd->queued;
298 sd->queued = 0;
299 return fio_vsyncio_end(td, ret);
300 }
301
fio_vsyncio_init(struct thread_data * td)302 static int fio_vsyncio_init(struct thread_data *td)
303 {
304 struct syncio_data *sd;
305
306 sd = malloc(sizeof(*sd));
307 memset(sd, 0, sizeof(*sd));
308 sd->last_offset = -1ULL;
309 sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
310 sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
311
312 td->io_ops->data = sd;
313 return 0;
314 }
315
fio_vsyncio_cleanup(struct thread_data * td)316 static void fio_vsyncio_cleanup(struct thread_data *td)
317 {
318 struct syncio_data *sd = td->io_ops->data;
319
320 free(sd->iovecs);
321 free(sd->io_us);
322 free(sd);
323 }
324
325 static struct ioengine_ops ioengine_rw = {
326 .name = "sync",
327 .version = FIO_IOOPS_VERSION,
328 .prep = fio_syncio_prep,
329 .queue = fio_syncio_queue,
330 .open_file = generic_open_file,
331 .close_file = generic_close_file,
332 .get_file_size = generic_get_file_size,
333 .flags = FIO_SYNCIO,
334 };
335
336 static struct ioengine_ops ioengine_prw = {
337 .name = "psync",
338 .version = FIO_IOOPS_VERSION,
339 .queue = fio_psyncio_queue,
340 .open_file = generic_open_file,
341 .close_file = generic_close_file,
342 .get_file_size = generic_get_file_size,
343 .flags = FIO_SYNCIO,
344 };
345
346 static struct ioengine_ops ioengine_vrw = {
347 .name = "vsync",
348 .version = FIO_IOOPS_VERSION,
349 .init = fio_vsyncio_init,
350 .cleanup = fio_vsyncio_cleanup,
351 .queue = fio_vsyncio_queue,
352 .commit = fio_vsyncio_commit,
353 .event = fio_vsyncio_event,
354 .getevents = fio_vsyncio_getevents,
355 .open_file = generic_open_file,
356 .close_file = generic_close_file,
357 .get_file_size = generic_get_file_size,
358 .flags = FIO_SYNCIO,
359 };
360
361 #ifdef CONFIG_PWRITEV
362 static struct ioengine_ops ioengine_pvrw = {
363 .name = "pvsync",
364 .version = FIO_IOOPS_VERSION,
365 .init = fio_vsyncio_init,
366 .cleanup = fio_vsyncio_cleanup,
367 .queue = fio_pvsyncio_queue,
368 .open_file = generic_open_file,
369 .close_file = generic_close_file,
370 .get_file_size = generic_get_file_size,
371 .flags = FIO_SYNCIO,
372 };
373 #endif
374
fio_syncio_register(void)375 static void fio_init fio_syncio_register(void)
376 {
377 register_ioengine(&ioengine_rw);
378 register_ioengine(&ioengine_prw);
379 register_ioengine(&ioengine_vrw);
380 #ifdef CONFIG_PWRITEV
381 register_ioengine(&ioengine_pvrw);
382 #endif
383 }
384
fio_syncio_unregister(void)385 static void fio_exit fio_syncio_unregister(void)
386 {
387 unregister_ioengine(&ioengine_rw);
388 unregister_ioengine(&ioengine_prw);
389 unregister_ioengine(&ioengine_vrw);
390 #ifdef CONFIG_PWRITEV
391 unregister_ioengine(&ioengine_pvrw);
392 #endif
393 }
394