1 /*
2  * The io parts of the fio tool, includes workers for sync and mmap'ed
3  * io, as well as both posix and linux libaio support.
4  *
5  * sync io is implemented on top of aio.
6  *
7  * This is not really specific to fio, if the get_io_u/put_io_u and
8  * structures was pulled into this as well it would be a perfectly
9  * generic io engine that could be used for other projects.
10  *
11  */
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <unistd.h>
15 #include <string.h>
16 #include <dlfcn.h>
17 #include <fcntl.h>
18 #include <assert.h>
19 
20 #include "fio.h"
21 #include "diskutil.h"
22 
23 static FLIST_HEAD(engine_list);
24 
check_engine_ops(struct ioengine_ops * ops)25 static int check_engine_ops(struct ioengine_ops *ops)
26 {
27 	if (ops->version != FIO_IOOPS_VERSION) {
28 		log_err("bad ioops version %d (want %d)\n", ops->version,
29 							FIO_IOOPS_VERSION);
30 		return 1;
31 	}
32 
33 	if (!ops->queue) {
34 		log_err("%s: no queue handler\n", ops->name);
35 		return 1;
36 	}
37 
38 	/*
39 	 * sync engines only need a ->queue()
40 	 */
41 	if (ops->flags & FIO_SYNCIO)
42 		return 0;
43 
44 	if (!ops->event) {
45 		log_err("%s: no event handler\n", ops->name);
46 		return 1;
47 	}
48 	if (!ops->getevents) {
49 		log_err("%s: no getevents handler\n", ops->name);
50 		return 1;
51 	}
52 	if (!ops->queue) {
53 		log_err("%s: no queue handler\n", ops->name);
54 		return 1;
55 	}
56 
57 	return 0;
58 }
59 
unregister_ioengine(struct ioengine_ops * ops)60 void unregister_ioengine(struct ioengine_ops *ops)
61 {
62 	dprint(FD_IO, "ioengine %s unregistered\n", ops->name);
63 	flist_del(&ops->list);
64 	INIT_FLIST_HEAD(&ops->list);
65 }
66 
register_ioengine(struct ioengine_ops * ops)67 void register_ioengine(struct ioengine_ops *ops)
68 {
69 	dprint(FD_IO, "ioengine %s registered\n", ops->name);
70 	INIT_FLIST_HEAD(&ops->list);
71 	flist_add_tail(&ops->list, &engine_list);
72 }
73 
find_ioengine(const char * name)74 static struct ioengine_ops *find_ioengine(const char *name)
75 {
76 	struct ioengine_ops *ops;
77 	struct flist_head *entry;
78 
79 	flist_for_each(entry, &engine_list) {
80 		ops = flist_entry(entry, struct ioengine_ops, list);
81 		if (!strcmp(name, ops->name))
82 			return ops;
83 	}
84 
85 	return NULL;
86 }
87 
dlopen_ioengine(struct thread_data * td,const char * engine_lib)88 static struct ioengine_ops *dlopen_ioengine(struct thread_data *td,
89 					    const char *engine_lib)
90 {
91 	struct ioengine_ops *ops;
92 	void *dlhandle;
93 
94 	dprint(FD_IO, "dload engine %s\n", engine_lib);
95 
96 	dlerror();
97 	dlhandle = dlopen(engine_lib, RTLD_LAZY);
98 	if (!dlhandle) {
99 		td_vmsg(td, -1, dlerror(), "dlopen");
100 		return NULL;
101 	}
102 
103 	/*
104 	 * Unlike the included modules, external engines should have a
105 	 * non-static ioengine structure that we can reference.
106 	 */
107 	ops = dlsym(dlhandle, engine_lib);
108 	if (!ops)
109 		ops = dlsym(dlhandle, "ioengine");
110 
111 	/*
112 	 * For some external engines (like C++ ones) it is not that trivial
113 	 * to provide a non-static ionengine structure that we can reference.
114 	 * Instead we call a method which allocates the required ioengine
115 	 * structure.
116 	 */
117 	if (!ops) {
118 		get_ioengine_t get_ioengine = dlsym(dlhandle, "get_ioengine");
119 
120 		if (get_ioengine)
121 			get_ioengine(&ops);
122 	}
123 
124 	if (!ops) {
125 		td_vmsg(td, -1, dlerror(), "dlsym");
126 		dlclose(dlhandle);
127 		return NULL;
128 	}
129 
130 	ops->dlhandle = dlhandle;
131 	return ops;
132 }
133 
load_ioengine(struct thread_data * td,const char * name)134 struct ioengine_ops *load_ioengine(struct thread_data *td, const char *name)
135 {
136 	struct ioengine_ops *ops, *ret;
137 	char engine[16];
138 
139 	dprint(FD_IO, "load ioengine %s\n", name);
140 
141 	strncpy(engine, name, sizeof(engine) - 1);
142 
143 	/*
144 	 * linux libaio has alias names, so convert to what we want
145 	 */
146 	if (!strncmp(engine, "linuxaio", 8) || !strncmp(engine, "aio", 3))
147 		strcpy(engine, "libaio");
148 
149 	ops = find_ioengine(engine);
150 	if (!ops)
151 		ops = dlopen_ioengine(td, name);
152 
153 	if (!ops) {
154 		log_err("fio: engine %s not loadable\n", name);
155 		return NULL;
156 	}
157 
158 	/*
159 	 * Check that the required methods are there.
160 	 */
161 	if (check_engine_ops(ops))
162 		return NULL;
163 
164 	ret = malloc(sizeof(*ret));
165 	memcpy(ret, ops, sizeof(*ret));
166 	ret->data = NULL;
167 
168 	return ret;
169 }
170 
171 /*
172  * For cleaning up an ioengine which never made it to init().
173  */
free_ioengine(struct thread_data * td)174 void free_ioengine(struct thread_data *td)
175 {
176 	dprint(FD_IO, "free ioengine %s\n", td->io_ops->name);
177 
178 	if (td->eo && td->io_ops->options) {
179 		options_free(td->io_ops->options, td->eo);
180 		free(td->eo);
181 		td->eo = NULL;
182 	}
183 
184 	if (td->io_ops->dlhandle)
185 		dlclose(td->io_ops->dlhandle);
186 
187 	free(td->io_ops);
188 	td->io_ops = NULL;
189 }
190 
close_ioengine(struct thread_data * td)191 void close_ioengine(struct thread_data *td)
192 {
193 	dprint(FD_IO, "close ioengine %s\n", td->io_ops->name);
194 
195 	if (td->io_ops->cleanup) {
196 		td->io_ops->cleanup(td);
197 		td->io_ops->data = NULL;
198 	}
199 
200 	free_ioengine(td);
201 }
202 
td_io_prep(struct thread_data * td,struct io_u * io_u)203 int td_io_prep(struct thread_data *td, struct io_u *io_u)
204 {
205 	dprint_io_u(io_u, "prep");
206 	fio_ro_check(td, io_u);
207 
208 	lock_file(td, io_u->file, io_u->ddir);
209 
210 	if (td->io_ops->prep) {
211 		int ret = td->io_ops->prep(td, io_u);
212 
213 		dprint(FD_IO, "->prep(%p)=%d\n", io_u, ret);
214 		if (ret)
215 			unlock_file(td, io_u->file);
216 		return ret;
217 	}
218 
219 	return 0;
220 }
221 
td_io_getevents(struct thread_data * td,unsigned int min,unsigned int max,const struct timespec * t)222 int td_io_getevents(struct thread_data *td, unsigned int min, unsigned int max,
223 		    const struct timespec *t)
224 {
225 	int r = 0;
226 
227 	/*
228 	 * For ioengine=rdma one side operation RDMA_WRITE or RDMA_READ,
229 	 * server side gets a message from the client
230 	 * side that the task is finished, and
231 	 * td->done is set to 1 after td_io_commit(). In this case,
232 	 * there is no need to reap complete event in server side.
233 	 */
234 	if (td->done)
235 		return 0;
236 
237 	if (min > 0 && td->io_ops->commit) {
238 		r = td->io_ops->commit(td);
239 		if (r < 0)
240 			goto out;
241 	}
242 	if (max > td->cur_depth)
243 		max = td->cur_depth;
244 	if (min > max)
245 		max = min;
246 
247 	r = 0;
248 	if (max && td->io_ops->getevents)
249 		r = td->io_ops->getevents(td, min, max, t);
250 out:
251 	if (r >= 0) {
252 		/*
253 		 * Reflect that our submitted requests were retrieved with
254 		 * whatever OS async calls are in the underlying engine.
255 		 */
256 		td->io_u_in_flight -= r;
257 		io_u_mark_complete(td, r);
258 	} else
259 		td_verror(td, r, "get_events");
260 
261 	dprint(FD_IO, "getevents: %d\n", r);
262 	return r;
263 }
264 
td_io_queue(struct thread_data * td,struct io_u * io_u)265 int td_io_queue(struct thread_data *td, struct io_u *io_u)
266 {
267 	int ret;
268 
269 	dprint_io_u(io_u, "queue");
270 	fio_ro_check(td, io_u);
271 
272 	assert((io_u->flags & IO_U_F_FLIGHT) == 0);
273 	io_u->flags |= IO_U_F_FLIGHT;
274 
275 	assert(fio_file_open(io_u->file));
276 
277 	/*
278 	 * If using a write iolog, store this entry.
279 	 */
280 	log_io_u(td, io_u);
281 
282 	io_u->error = 0;
283 	io_u->resid = 0;
284 
285 	if (td->io_ops->flags & FIO_SYNCIO) {
286 		if (fio_fill_issue_time(td))
287 			fio_gettime(&io_u->issue_time, NULL);
288 
289 		/*
290 		 * only used for iolog
291 		 */
292 		if (td->o.read_iolog_file)
293 			memcpy(&td->last_issue, &io_u->issue_time,
294 					sizeof(struct timeval));
295 	}
296 
297 	if (ddir_rw(acct_ddir(io_u))) {
298 		td->io_issues[acct_ddir(io_u)]++;
299 		td->io_issue_bytes[acct_ddir(io_u)] += io_u->xfer_buflen;
300 	}
301 
302 	ret = td->io_ops->queue(td, io_u);
303 
304 	unlock_file(td, io_u->file);
305 
306 	if (ret == FIO_Q_BUSY && ddir_rw(acct_ddir(io_u))) {
307 		td->io_issues[acct_ddir(io_u)]--;
308 		td->io_issue_bytes[acct_ddir(io_u)] -= io_u->xfer_buflen;
309 	}
310 
311 	/*
312 	 * If an error was seen and the io engine didn't propagate it
313 	 * back to 'td', do so.
314 	 */
315 	if (io_u->error && !td->error)
316 		td_verror(td, io_u->error, "td_io_queue");
317 
318 	/*
319 	 * Add warning for O_DIRECT so that users have an easier time
320 	 * spotting potentially bad alignment. If this triggers for the first
321 	 * IO, then it's likely an alignment problem or because the host fs
322 	 * does not support O_DIRECT
323 	 */
324 	if (io_u->error == EINVAL && td->io_issues[io_u->ddir & 1] == 1 &&
325 	    td->o.odirect) {
326 
327 		log_info("fio: first direct IO errored. File system may not "
328 			 "support direct IO, or iomem_align= is bad.\n");
329 	}
330 
331 	if (!td->io_ops->commit || io_u->ddir == DDIR_TRIM) {
332 		io_u_mark_submit(td, 1);
333 		io_u_mark_complete(td, 1);
334 	}
335 
336 	if (ret == FIO_Q_COMPLETED) {
337 		if (ddir_rw(io_u->ddir)) {
338 			io_u_mark_depth(td, 1);
339 			td->ts.total_io_u[io_u->ddir]++;
340 		}
341 	} else if (ret == FIO_Q_QUEUED) {
342 		int r;
343 
344 		if (ddir_rw(io_u->ddir)) {
345 			td->io_u_queued++;
346 			td->ts.total_io_u[io_u->ddir]++;
347 		}
348 
349 		if (td->io_u_queued >= td->o.iodepth_batch) {
350 			r = td_io_commit(td);
351 			if (r < 0)
352 				return r;
353 		}
354 	}
355 
356 	if ((td->io_ops->flags & FIO_SYNCIO) == 0) {
357 		if (fio_fill_issue_time(td))
358 			fio_gettime(&io_u->issue_time, NULL);
359 
360 		/*
361 		 * only used for iolog
362 		 */
363 		if (td->o.read_iolog_file)
364 			memcpy(&td->last_issue, &io_u->issue_time,
365 					sizeof(struct timeval));
366 	}
367 
368 	return ret;
369 }
370 
td_io_init(struct thread_data * td)371 int td_io_init(struct thread_data *td)
372 {
373 	int ret = 0;
374 
375 	if (td->io_ops->init) {
376 		ret = td->io_ops->init(td);
377 		if (ret && td->o.iodepth > 1) {
378 			log_err("fio: io engine init failed. Perhaps try"
379 				" reducing io depth?\n");
380 		}
381 		if (!td->error)
382 			td->error = ret;
383 	}
384 
385 	if (!ret && (td->io_ops->flags & FIO_NOIO))
386 		td->flags |= TD_F_NOIO;
387 
388 	return ret;
389 }
390 
td_io_commit(struct thread_data * td)391 int td_io_commit(struct thread_data *td)
392 {
393 	int ret;
394 
395 	dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
396 
397 	if (!td->cur_depth || !td->io_u_queued)
398 		return 0;
399 
400 	io_u_mark_depth(td, td->io_u_queued);
401 
402 	if (td->io_ops->commit) {
403 		ret = td->io_ops->commit(td);
404 		if (ret)
405 			td_verror(td, -ret, "io commit");
406 	}
407 
408 	/*
409 	 * Reflect that events were submitted as async IO requests.
410 	 */
411 	td->io_u_in_flight += td->io_u_queued;
412 	td->io_u_queued = 0;
413 
414 	return 0;
415 }
416 
td_io_open_file(struct thread_data * td,struct fio_file * f)417 int td_io_open_file(struct thread_data *td, struct fio_file *f)
418 {
419 	assert(!fio_file_open(f));
420 	assert(f->fd == -1);
421 
422 	if (td->io_ops->open_file(td, f)) {
423 		if (td->error == EINVAL && td->o.odirect)
424 			log_err("fio: destination does not support O_DIRECT\n");
425 		if (td->error == EMFILE) {
426 			log_err("fio: try reducing/setting openfiles (failed"
427 				" at %u of %u)\n", td->nr_open_files,
428 							td->o.nr_files);
429 		}
430 
431 		assert(f->fd == -1);
432 		assert(!fio_file_open(f));
433 		return 1;
434 	}
435 
436 	fio_file_reset(td, f);
437 	fio_file_set_open(f);
438 	fio_file_clear_closing(f);
439 	disk_util_inc(f->du);
440 
441 	td->nr_open_files++;
442 	get_file(f);
443 
444 	if (f->filetype == FIO_TYPE_PIPE) {
445 		if (td_random(td)) {
446 			log_err("fio: can't seek on pipes (no random io)\n");
447 			goto err;
448 		}
449 	}
450 
451 	if (td->io_ops->flags & FIO_DISKLESSIO)
452 		goto done;
453 
454 	if (td->o.invalidate_cache && file_invalidate_cache(td, f))
455 		goto err;
456 
457 	if (td->o.fadvise_hint &&
458 	    (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_FILE)) {
459 		int flags;
460 
461 		if (td_random(td))
462 			flags = POSIX_FADV_RANDOM;
463 		else
464 			flags = POSIX_FADV_SEQUENTIAL;
465 
466 		if (posix_fadvise(f->fd, f->file_offset, f->io_size, flags) < 0) {
467 			td_verror(td, errno, "fadvise");
468 			goto err;
469 		}
470 	}
471 
472 #ifdef FIO_OS_DIRECTIO
473 	/*
474 	 * Some OS's have a distinct call to mark the file non-buffered,
475 	 * instead of using O_DIRECT (Solaris)
476 	 */
477 	if (td->o.odirect) {
478 		int ret = fio_set_odirect(f->fd);
479 
480 		if (ret) {
481 			td_verror(td, ret, "fio_set_odirect");
482 			log_err("fio: the file system does not seem to support direct IO\n");
483 			goto err;
484 		}
485 	}
486 #endif
487 
488 done:
489 	log_file(td, f, FIO_LOG_OPEN_FILE);
490 	return 0;
491 err:
492 	disk_util_dec(f->du);
493 	if (td->io_ops->close_file)
494 		td->io_ops->close_file(td, f);
495 	return 1;
496 }
497 
td_io_close_file(struct thread_data * td,struct fio_file * f)498 int td_io_close_file(struct thread_data *td, struct fio_file *f)
499 {
500 	if (!fio_file_closing(f))
501 		log_file(td, f, FIO_LOG_CLOSE_FILE);
502 
503 	/*
504 	 * mark as closing, do real close when last io on it has completed
505 	 */
506 	fio_file_set_closing(f);
507 
508 	disk_util_dec(f->du);
509 
510 	if (td->o.file_lock_mode != FILE_LOCK_NONE)
511 		unlock_file_all(td, f);
512 
513 	return put_file(td, f);
514 }
515 
td_io_unlink_file(struct thread_data * td,struct fio_file * f)516 int td_io_unlink_file(struct thread_data *td, struct fio_file *f)
517 {
518 	if (td->io_ops->unlink_file)
519 		return td->io_ops->unlink_file(td, f);
520 	else
521 		return unlink(f->file_name);
522 }
523 
td_io_get_file_size(struct thread_data * td,struct fio_file * f)524 int td_io_get_file_size(struct thread_data *td, struct fio_file *f)
525 {
526 	if (!td->io_ops->get_file_size)
527 		return 0;
528 
529 	return td->io_ops->get_file_size(td, f);
530 }
531 
do_sync_file_range(const struct thread_data * td,struct fio_file * f)532 static int do_sync_file_range(const struct thread_data *td,
533 			      struct fio_file *f)
534 {
535 	off64_t offset, nbytes;
536 
537 	offset = f->first_write;
538 	nbytes = f->last_write - f->first_write;
539 
540 	if (!nbytes)
541 		return 0;
542 
543 	return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range);
544 }
545 
do_io_u_sync(const struct thread_data * td,struct io_u * io_u)546 int do_io_u_sync(const struct thread_data *td, struct io_u *io_u)
547 {
548 	int ret;
549 
550 	if (io_u->ddir == DDIR_SYNC) {
551 		ret = fsync(io_u->file->fd);
552 	} else if (io_u->ddir == DDIR_DATASYNC) {
553 #ifdef CONFIG_FDATASYNC
554 		ret = fdatasync(io_u->file->fd);
555 #else
556 		ret = io_u->xfer_buflen;
557 		io_u->error = EINVAL;
558 #endif
559 	} else if (io_u->ddir == DDIR_SYNC_FILE_RANGE)
560 		ret = do_sync_file_range(td, io_u->file);
561 	else {
562 		ret = io_u->xfer_buflen;
563 		io_u->error = EINVAL;
564 	}
565 
566 	if (ret < 0)
567 		io_u->error = errno;
568 
569 	return ret;
570 }
571 
do_io_u_trim(const struct thread_data * td,struct io_u * io_u)572 int do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
573 {
574 #ifndef FIO_HAVE_TRIM
575 	io_u->error = EINVAL;
576 	return 0;
577 #else
578 	struct fio_file *f = io_u->file;
579 	int ret;
580 
581 	ret = os_trim(f->fd, io_u->offset, io_u->xfer_buflen);
582 	if (!ret)
583 		return io_u->xfer_buflen;
584 
585 	io_u->error = ret;
586 	return 0;
587 #endif
588 }
589 
fio_show_ioengine_help(const char * engine)590 int fio_show_ioengine_help(const char *engine)
591 {
592 	struct flist_head *entry;
593 	struct thread_data td;
594 	char *sep;
595 	int ret = 1;
596 
597 	if (!engine || !*engine) {
598 		log_info("Available IO engines:\n");
599 		flist_for_each(entry, &engine_list) {
600 			td.io_ops = flist_entry(entry, struct ioengine_ops,
601 						list);
602 			log_info("\t%s\n", td.io_ops->name);
603 		}
604 		return 0;
605 	}
606 	sep = strchr(engine, ',');
607 	if (sep) {
608 		*sep = 0;
609 		sep++;
610 	}
611 
612 	memset(&td, 0, sizeof(td));
613 
614 	td.io_ops = load_ioengine(&td, engine);
615 	if (!td.io_ops) {
616 		log_info("IO engine %s not found\n", engine);
617 		return 1;
618 	}
619 
620 	if (td.io_ops->options)
621 		ret = show_cmd_help(td.io_ops->options, sep);
622 	else
623 		log_info("IO engine %s has no options\n", td.io_ops->name);
624 
625 	free_ioengine(&td);
626 
627 	return ret;
628 }
629