1 #include <unistd.h>
2 #include <fcntl.h>
3 #include <string.h>
4 #include <assert.h>
5 #include <dirent.h>
6 #include <libgen.h>
7 #include <sys/stat.h>
8 #include <sys/mman.h>
9 #include <sys/types.h>
10 
11 #include "fio.h"
12 #include "smalloc.h"
13 #include "filehash.h"
14 #include "options.h"
15 #include "os/os.h"
16 #include "hash.h"
17 #include "lib/axmap.h"
18 
19 #ifdef CONFIG_LINUX_FALLOCATE
20 #include <linux/falloc.h>
21 #endif
22 
23 static int root_warn;
24 
25 static FLIST_HEAD(filename_list);
26 
clear_error(struct thread_data * td)27 static inline void clear_error(struct thread_data *td)
28 {
29 	td->error = 0;
30 	td->verror[0] = '\0';
31 }
32 
33 /*
34  * Leaves f->fd open on success, caller must close
35  */
extend_file(struct thread_data * td,struct fio_file * f)36 static int extend_file(struct thread_data *td, struct fio_file *f)
37 {
38 	int r, new_layout = 0, unlink_file = 0, flags;
39 	unsigned long long left;
40 	unsigned int bs;
41 	char *b = NULL;
42 
43 	if (read_only) {
44 		log_err("fio: refusing extend of file due to read-only\n");
45 		return 0;
46 	}
47 
48 	/*
49 	 * check if we need to lay the file out complete again. fio
50 	 * does that for operations involving reads, or for writes
51 	 * where overwrite is set
52 	 */
53 	if (td_read(td) ||
54 	   (td_write(td) && td->o.overwrite && !td->o.file_append) ||
55 	    (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
56 		new_layout = 1;
57 	if (td_write(td) && !td->o.overwrite && !td->o.file_append)
58 		unlink_file = 1;
59 
60 	if (unlink_file || new_layout) {
61 		dprint(FD_FILE, "layout unlink %s\n", f->file_name);
62 		if ((td_io_unlink_file(td, f) < 0) && (errno != ENOENT)) {
63 			td_verror(td, errno, "unlink");
64 			return 1;
65 		}
66 	}
67 
68 	flags = O_WRONLY | O_CREAT;
69 	if (new_layout)
70 		flags |= O_TRUNC;
71 
72 #ifdef WIN32
73 	flags |= _O_BINARY;
74 #endif
75 
76 	dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
77 	f->fd = open(f->file_name, flags, 0644);
78 	if (f->fd < 0) {
79 		td_verror(td, errno, "open");
80 		return 1;
81 	}
82 
83 #ifdef CONFIG_POSIX_FALLOCATE
84 	if (!td->o.fill_device) {
85 		switch (td->o.fallocate_mode) {
86 		case FIO_FALLOCATE_NONE:
87 			break;
88 		case FIO_FALLOCATE_POSIX:
89 			dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
90 				 f->file_name,
91 				 (unsigned long long) f->real_file_size);
92 
93 			r = posix_fallocate(f->fd, 0, f->real_file_size);
94 			if (r > 0) {
95 				log_err("fio: posix_fallocate fails: %s\n",
96 						strerror(r));
97 			}
98 			break;
99 #ifdef CONFIG_LINUX_FALLOCATE
100 		case FIO_FALLOCATE_KEEP_SIZE:
101 			dprint(FD_FILE,
102 				"fallocate(FALLOC_FL_KEEP_SIZE) "
103 				"file %s size %llu\n", f->file_name,
104 				(unsigned long long) f->real_file_size);
105 
106 			r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
107 					f->real_file_size);
108 			if (r != 0)
109 				td_verror(td, errno, "fallocate");
110 
111 			break;
112 #endif /* CONFIG_LINUX_FALLOCATE */
113 		default:
114 			log_err("fio: unknown fallocate mode: %d\n",
115 				td->o.fallocate_mode);
116 			assert(0);
117 		}
118 	}
119 #endif /* CONFIG_POSIX_FALLOCATE */
120 
121 	if (!new_layout)
122 		goto done;
123 
124 	/*
125 	 * The size will be -1ULL when fill_device is used, so don't truncate
126 	 * or fallocate this file, just write it
127 	 */
128 	if (!td->o.fill_device) {
129 		dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
130 					(unsigned long long) f->real_file_size);
131 		if (ftruncate(f->fd, f->real_file_size) == -1) {
132 			if (errno != EFBIG) {
133 				td_verror(td, errno, "ftruncate");
134 				goto err;
135 			}
136 		}
137 	}
138 
139 	b = malloc(td->o.max_bs[DDIR_WRITE]);
140 
141 	left = f->real_file_size;
142 	while (left && !td->terminate) {
143 		bs = td->o.max_bs[DDIR_WRITE];
144 		if (bs > left)
145 			bs = left;
146 
147 		fill_io_buffer(td, b, bs, bs);
148 
149 		r = write(f->fd, b, bs);
150 
151 		if (r > 0) {
152 			left -= r;
153 			continue;
154 		} else {
155 			if (r < 0) {
156 				int __e = errno;
157 
158 				if (__e == ENOSPC) {
159 					if (td->o.fill_device)
160 						break;
161 					log_info("fio: ENOSPC on laying out "
162 						 "file, stopping\n");
163 					break;
164 				}
165 				td_verror(td, errno, "write");
166 			} else
167 				td_verror(td, EIO, "write");
168 
169 			break;
170 		}
171 	}
172 
173 	if (td->terminate) {
174 		dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
175 		td_io_unlink_file(td, f);
176 	} else if (td->o.create_fsync) {
177 		if (fsync(f->fd) < 0) {
178 			td_verror(td, errno, "fsync");
179 			goto err;
180 		}
181 	}
182 	if (td->o.fill_device && !td_write(td)) {
183 		fio_file_clear_size_known(f);
184 		if (td_io_get_file_size(td, f))
185 			goto err;
186 		if (f->io_size > f->real_file_size)
187 			f->io_size = f->real_file_size;
188 	}
189 
190 	free(b);
191 done:
192 	return 0;
193 err:
194 	close(f->fd);
195 	f->fd = -1;
196 	if (b)
197 		free(b);
198 	return 1;
199 }
200 
pre_read_file(struct thread_data * td,struct fio_file * f)201 static int pre_read_file(struct thread_data *td, struct fio_file *f)
202 {
203 	int ret = 0, r, did_open = 0, old_runstate;
204 	unsigned long long left;
205 	unsigned int bs;
206 	char *b;
207 
208 	if (td->io_ops->flags & FIO_PIPEIO)
209 		return 0;
210 
211 	if (!fio_file_open(f)) {
212 		if (td->io_ops->open_file(td, f)) {
213 			log_err("fio: cannot pre-read, failed to open file\n");
214 			return 1;
215 		}
216 		did_open = 1;
217 	}
218 
219 	old_runstate = td_bump_runstate(td, TD_PRE_READING);
220 
221 	bs = td->o.max_bs[DDIR_READ];
222 	b = malloc(bs);
223 	memset(b, 0, bs);
224 
225 	if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
226 		td_verror(td, errno, "lseek");
227 		log_err("fio: failed to lseek pre-read file\n");
228 		ret = 1;
229 		goto error;
230 	}
231 
232 	left = f->io_size;
233 
234 	while (left && !td->terminate) {
235 		if (bs > left)
236 			bs = left;
237 
238 		r = read(f->fd, b, bs);
239 
240 		if (r == (int) bs) {
241 			left -= bs;
242 			continue;
243 		} else {
244 			td_verror(td, EIO, "pre_read");
245 			break;
246 		}
247 	}
248 
249 error:
250 	td_restore_runstate(td, old_runstate);
251 
252 	if (did_open)
253 		td->io_ops->close_file(td, f);
254 
255 	free(b);
256 	return ret;
257 }
258 
get_rand_file_size(struct thread_data * td)259 static unsigned long long get_rand_file_size(struct thread_data *td)
260 {
261 	unsigned long long ret, sized;
262 	unsigned long r;
263 
264 	r = __rand(&td->file_size_state);
265 	sized = td->o.file_size_high - td->o.file_size_low;
266 	ret = (unsigned long long) ((double) sized * (r / (FRAND_MAX + 1.0)));
267 	ret += td->o.file_size_low;
268 	ret -= (ret % td->o.rw_min_bs);
269 	return ret;
270 }
271 
file_size(struct thread_data * td,struct fio_file * f)272 static int file_size(struct thread_data *td, struct fio_file *f)
273 {
274 	struct stat st;
275 
276 	if (stat(f->file_name, &st) == -1) {
277 		td_verror(td, errno, "fstat");
278 		return 1;
279 	}
280 
281 	f->real_file_size = st.st_size;
282 	return 0;
283 }
284 
bdev_size(struct thread_data * td,struct fio_file * f)285 static int bdev_size(struct thread_data *td, struct fio_file *f)
286 {
287 	unsigned long long bytes = 0;
288 	int r;
289 
290 	if (td->io_ops->open_file(td, f)) {
291 		log_err("fio: failed opening blockdev %s for size check\n",
292 			f->file_name);
293 		return 1;
294 	}
295 
296 	r = blockdev_size(f, &bytes);
297 	if (r) {
298 		td_verror(td, r, "blockdev_size");
299 		goto err;
300 	}
301 
302 	if (!bytes) {
303 		log_err("%s: zero sized block device?\n", f->file_name);
304 		goto err;
305 	}
306 
307 	f->real_file_size = bytes;
308 	td->io_ops->close_file(td, f);
309 	return 0;
310 err:
311 	td->io_ops->close_file(td, f);
312 	return 1;
313 }
314 
char_size(struct thread_data * td,struct fio_file * f)315 static int char_size(struct thread_data *td, struct fio_file *f)
316 {
317 #ifdef FIO_HAVE_CHARDEV_SIZE
318 	unsigned long long bytes = 0;
319 	int r;
320 
321 	if (td->io_ops->open_file(td, f)) {
322 		log_err("fio: failed opening blockdev %s for size check\n",
323 			f->file_name);
324 		return 1;
325 	}
326 
327 	r = chardev_size(f, &bytes);
328 	if (r) {
329 		td_verror(td, r, "chardev_size");
330 		goto err;
331 	}
332 
333 	if (!bytes) {
334 		log_err("%s: zero sized char device?\n", f->file_name);
335 		goto err;
336 	}
337 
338 	f->real_file_size = bytes;
339 	td->io_ops->close_file(td, f);
340 	return 0;
341 err:
342 	td->io_ops->close_file(td, f);
343 	return 1;
344 #else
345 	f->real_file_size = -1ULL;
346 	return 0;
347 #endif
348 }
349 
get_file_size(struct thread_data * td,struct fio_file * f)350 static int get_file_size(struct thread_data *td, struct fio_file *f)
351 {
352 	int ret = 0;
353 
354 	if (fio_file_size_known(f))
355 		return 0;
356 
357 	if (f->filetype == FIO_TYPE_FILE)
358 		ret = file_size(td, f);
359 	else if (f->filetype == FIO_TYPE_BD)
360 		ret = bdev_size(td, f);
361 	else if (f->filetype == FIO_TYPE_CHAR)
362 		ret = char_size(td, f);
363 	else
364 		f->real_file_size = -1;
365 
366 	if (ret)
367 		return ret;
368 
369 	if (f->file_offset > f->real_file_size) {
370 		log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
371 					(unsigned long long) f->file_offset,
372 					(unsigned long long) f->real_file_size);
373 		return 1;
374 	}
375 
376 	fio_file_set_size_known(f);
377 	return 0;
378 }
379 
__file_invalidate_cache(struct thread_data * td,struct fio_file * f,unsigned long long off,unsigned long long len)380 static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
381 				   unsigned long long off,
382 				   unsigned long long len)
383 {
384 	int ret = 0;
385 
386 #ifdef CONFIG_ESX
387 	return 0;
388 #endif
389 
390 	if (len == -1ULL)
391 		len = f->io_size;
392 	if (off == -1ULL)
393 		off = f->file_offset;
394 
395 	if (len == -1ULL || off == -1ULL)
396 		return 0;
397 
398 	dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
399 								len);
400 
401 	if (td->io_ops->invalidate)
402 		ret = td->io_ops->invalidate(td, f);
403 	else if (f->filetype == FIO_TYPE_FILE)
404 		ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
405 	else if (f->filetype == FIO_TYPE_BD) {
406 		ret = blockdev_invalidate_cache(f);
407 		if (ret < 0 && errno == EACCES && geteuid()) {
408 			if (!root_warn) {
409 				log_err("fio: only root may flush block "
410 					"devices. Cache flush bypassed!\n");
411 				root_warn = 1;
412 			}
413 			ret = 0;
414 		}
415 	} else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
416 		ret = 0;
417 
418 	/*
419 	 * Cache flushing isn't a fatal condition, and we know it will
420 	 * happen on some platforms where we don't have the proper
421 	 * function to flush eg block device caches. So just warn and
422 	 * continue on our way.
423 	 */
424 	if (ret) {
425 		log_info("fio: cache invalidation of %s failed: %s\n", f->file_name, strerror(errno));
426 		ret = 0;
427 	}
428 
429 	return 0;
430 
431 }
432 
file_invalidate_cache(struct thread_data * td,struct fio_file * f)433 int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
434 {
435 	if (!fio_file_open(f))
436 		return 0;
437 
438 	return __file_invalidate_cache(td, f, -1ULL, -1ULL);
439 }
440 
generic_close_file(struct thread_data fio_unused * td,struct fio_file * f)441 int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
442 {
443 	int ret = 0;
444 
445 	dprint(FD_FILE, "fd close %s\n", f->file_name);
446 
447 	remove_file_hash(f);
448 
449 	if (close(f->fd) < 0)
450 		ret = errno;
451 
452 	f->fd = -1;
453 
454 	if (f->shadow_fd != -1) {
455 		close(f->shadow_fd);
456 		f->shadow_fd = -1;
457 	}
458 
459 	f->engine_data = 0;
460 	return ret;
461 }
462 
file_lookup_open(struct fio_file * f,int flags)463 int file_lookup_open(struct fio_file *f, int flags)
464 {
465 	struct fio_file *__f;
466 	int from_hash;
467 
468 	__f = lookup_file_hash(f->file_name);
469 	if (__f) {
470 		dprint(FD_FILE, "found file in hash %s\n", f->file_name);
471 		/*
472 		 * racy, need the __f->lock locked
473 		 */
474 		f->lock = __f->lock;
475 		from_hash = 1;
476 	} else {
477 		dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
478 		from_hash = 0;
479 	}
480 
481 #ifdef WIN32
482 	flags |= _O_BINARY;
483 #endif
484 
485 	f->fd = open(f->file_name, flags, 0600);
486 	return from_hash;
487 }
488 
file_close_shadow_fds(struct thread_data * td)489 static int file_close_shadow_fds(struct thread_data *td)
490 {
491 	struct fio_file *f;
492 	int num_closed = 0;
493 	unsigned int i;
494 
495 	for_each_file(td, f, i) {
496 		if (f->shadow_fd == -1)
497 			continue;
498 
499 		close(f->shadow_fd);
500 		f->shadow_fd = -1;
501 		num_closed++;
502 	}
503 
504 	return num_closed;
505 }
506 
generic_open_file(struct thread_data * td,struct fio_file * f)507 int generic_open_file(struct thread_data *td, struct fio_file *f)
508 {
509 	int is_std = 0;
510 	int flags = 0;
511 	int from_hash = 0;
512 
513 	dprint(FD_FILE, "fd open %s\n", f->file_name);
514 
515 	if (td_trim(td) && f->filetype != FIO_TYPE_BD) {
516 		log_err("fio: trim only applies to block device\n");
517 		return 1;
518 	}
519 
520 	if (!strcmp(f->file_name, "-")) {
521 		if (td_rw(td)) {
522 			log_err("fio: can't read/write to stdin/out\n");
523 			return 1;
524 		}
525 		is_std = 1;
526 
527 		/*
528 		 * move output logging to stderr, if we are writing to stdout
529 		 */
530 		if (td_write(td))
531 			f_out = stderr;
532 	}
533 
534 	if (td_trim(td))
535 		goto skip_flags;
536 	if (td->o.odirect)
537 		flags |= OS_O_DIRECT;
538 	if (td->o.oatomic) {
539 		if (!FIO_O_ATOMIC) {
540 			td_verror(td, EINVAL, "OS does not support atomic IO");
541 			return 1;
542 		}
543 		flags |= OS_O_DIRECT | FIO_O_ATOMIC;
544 	}
545 	if (td->o.sync_io)
546 		flags |= O_SYNC;
547 	if (td->o.create_on_open)
548 		flags |= O_CREAT;
549 skip_flags:
550 	if (f->filetype != FIO_TYPE_FILE)
551 		flags |= FIO_O_NOATIME;
552 
553 open_again:
554 	if (td_write(td)) {
555 		if (!read_only)
556 			flags |= O_RDWR;
557 
558 		if (f->filetype == FIO_TYPE_FILE)
559 			flags |= O_CREAT;
560 
561 		if (is_std)
562 			f->fd = dup(STDOUT_FILENO);
563 		else
564 			from_hash = file_lookup_open(f, flags);
565 	} else if (td_read(td)) {
566 		if (f->filetype == FIO_TYPE_CHAR && !read_only)
567 			flags |= O_RDWR;
568 		else
569 			flags |= O_RDONLY;
570 
571 		if (is_std)
572 			f->fd = dup(STDIN_FILENO);
573 		else
574 			from_hash = file_lookup_open(f, flags);
575 	} else { //td trim
576 		flags |= O_RDWR;
577 		from_hash = file_lookup_open(f, flags);
578 	}
579 
580 	if (f->fd == -1) {
581 		char buf[FIO_VERROR_SIZE];
582 		int __e = errno;
583 
584 		if (__e == EPERM && (flags & FIO_O_NOATIME)) {
585 			flags &= ~FIO_O_NOATIME;
586 			goto open_again;
587 		}
588 		if (__e == EMFILE && file_close_shadow_fds(td))
589 			goto open_again;
590 
591 		snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
592 
593 		if (__e == EINVAL && (flags & OS_O_DIRECT)) {
594 			log_err("fio: looks like your file system does not " \
595 				"support direct=1/buffered=0\n");
596 		}
597 
598 		td_verror(td, __e, buf);
599 		return 1;
600 	}
601 
602 	if (!from_hash && f->fd != -1) {
603 		if (add_file_hash(f)) {
604 			int fio_unused ret;
605 
606 			/*
607 			 * Stash away descriptor for later close. This is to
608 			 * work-around a "feature" on Linux, where a close of
609 			 * an fd that has been opened for write will trigger
610 			 * udev to call blkid to check partitions, fs id, etc.
611 			 * That pollutes the device cache, which can slow down
612 			 * unbuffered accesses.
613 			 */
614 			if (f->shadow_fd == -1)
615 				f->shadow_fd = f->fd;
616 			else {
617 				/*
618 			 	 * OK to ignore, we haven't done anything
619 				 * with it
620 				 */
621 				ret = generic_close_file(td, f);
622 			}
623 			goto open_again;
624 		}
625 	}
626 
627 	return 0;
628 }
629 
generic_get_file_size(struct thread_data * td,struct fio_file * f)630 int generic_get_file_size(struct thread_data *td, struct fio_file *f)
631 {
632 	return get_file_size(td, f);
633 }
634 
635 /*
636  * open/close all files, so that ->real_file_size gets set
637  */
get_file_sizes(struct thread_data * td)638 static int get_file_sizes(struct thread_data *td)
639 {
640 	struct fio_file *f;
641 	unsigned int i;
642 	int err = 0;
643 
644 	for_each_file(td, f, i) {
645 		dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
646 								f->file_name);
647 
648 		if (td_io_get_file_size(td, f)) {
649 			if (td->error != ENOENT) {
650 				log_err("%s\n", td->verror);
651 				err = 1;
652 				break;
653 			}
654 			clear_error(td);
655 		}
656 
657 		if (f->real_file_size == -1ULL && td->o.size)
658 			f->real_file_size = td->o.size / td->o.nr_files;
659 	}
660 
661 	return err;
662 }
663 
664 struct fio_mount {
665 	struct flist_head list;
666 	const char *base;
667 	char __base[256];
668 	unsigned int key;
669 };
670 
671 /*
672  * Get free number of bytes for each file on each unique mount.
673  */
get_fs_free_counts(struct thread_data * td)674 static unsigned long long get_fs_free_counts(struct thread_data *td)
675 {
676 	struct flist_head *n, *tmp;
677 	unsigned long long ret = 0;
678 	struct fio_mount *fm;
679 	FLIST_HEAD(list);
680 	struct fio_file *f;
681 	unsigned int i;
682 
683 	for_each_file(td, f, i) {
684 		struct stat sb;
685 		char buf[256];
686 
687 		if (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_CHAR) {
688 			if (f->real_file_size != -1ULL)
689 				ret += f->real_file_size;
690 			continue;
691 		} else if (f->filetype != FIO_TYPE_FILE)
692 			continue;
693 
694 		buf[255] = '\0';
695 		strncpy(buf, f->file_name, 255);
696 
697 		if (stat(buf, &sb) < 0) {
698 			if (errno != ENOENT)
699 				break;
700 			strcpy(buf, ".");
701 			if (stat(buf, &sb) < 0)
702 				break;
703 		}
704 
705 		fm = NULL;
706 		flist_for_each(n, &list) {
707 			fm = flist_entry(n, struct fio_mount, list);
708 			if (fm->key == sb.st_dev)
709 				break;
710 
711 			fm = NULL;
712 		}
713 
714 		if (fm)
715 			continue;
716 
717 		fm = calloc(1, sizeof(*fm));
718 		strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
719 		fm->base = basename(fm->__base);
720 		fm->key = sb.st_dev;
721 		flist_add(&fm->list, &list);
722 	}
723 
724 	flist_for_each_safe(n, tmp, &list) {
725 		unsigned long long sz;
726 
727 		fm = flist_entry(n, struct fio_mount, list);
728 		flist_del(&fm->list);
729 
730 		sz = get_fs_size(fm->base);
731 		if (sz && sz != -1ULL)
732 			ret += sz;
733 
734 		free(fm);
735 	}
736 
737 	return ret;
738 }
739 
get_start_offset(struct thread_data * td,struct fio_file * f)740 uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
741 {
742 	struct thread_options *o = &td->o;
743 
744 	if (o->file_append && f->filetype == FIO_TYPE_FILE)
745 		return f->real_file_size;
746 
747 	return td->o.start_offset +
748 		td->subjob_number * td->o.offset_increment;
749 }
750 
751 /*
752  * Open the files and setup files sizes, creating files if necessary.
753  */
setup_files(struct thread_data * td)754 int setup_files(struct thread_data *td)
755 {
756 	unsigned long long total_size, extend_size;
757 	struct thread_options *o = &td->o;
758 	struct fio_file *f;
759 	unsigned int i, nr_fs_extra = 0;
760 	int err = 0, need_extend;
761 	int old_state;
762 	const unsigned int bs = td_min_bs(td);
763 	uint64_t fs = 0;
764 
765 	dprint(FD_FILE, "setup files\n");
766 
767 	old_state = td_bump_runstate(td, TD_SETTING_UP);
768 
769 	if (o->read_iolog_file)
770 		goto done;
771 
772 	/*
773 	 * if ioengine defines a setup() method, it's responsible for
774 	 * opening the files and setting f->real_file_size to indicate
775 	 * the valid range for that file.
776 	 */
777 	if (td->io_ops->setup)
778 		err = td->io_ops->setup(td);
779 	else
780 		err = get_file_sizes(td);
781 
782 	if (err)
783 		goto err_out;
784 
785 	/*
786 	 * check sizes. if the files/devices do not exist and the size
787 	 * isn't passed to fio, abort.
788 	 */
789 	total_size = 0;
790 	for_each_file(td, f, i) {
791 		if (f->real_file_size == -1ULL)
792 			total_size = -1ULL;
793 		else
794 			total_size += f->real_file_size;
795 	}
796 
797 	if (o->fill_device)
798 		td->fill_device_size = get_fs_free_counts(td);
799 
800 	/*
801 	 * device/file sizes are zero and no size given, punt
802 	 */
803 	if ((!total_size || total_size == -1ULL) && !o->size &&
804 	    !(td->io_ops->flags & FIO_NOIO) && !o->fill_device &&
805 	    !(o->nr_files && (o->file_size_low || o->file_size_high))) {
806 		log_err("%s: you need to specify size=\n", o->name);
807 		td_verror(td, EINVAL, "total_file_size");
808 		goto err_out;
809 	}
810 
811 	/*
812 	 * Calculate per-file size and potential extra size for the
813 	 * first files, if needed.
814 	 */
815 	if (!o->file_size_low && o->nr_files) {
816 		uint64_t all_fs;
817 
818 		fs = o->size / o->nr_files;
819 		all_fs = fs * o->nr_files;
820 
821 		if (all_fs < o->size)
822 			nr_fs_extra = (o->size - all_fs) / bs;
823 	}
824 
825 	/*
826 	 * now file sizes are known, so we can set ->io_size. if size= is
827 	 * not given, ->io_size is just equal to ->real_file_size. if size
828 	 * is given, ->io_size is size / nr_files.
829 	 */
830 	extend_size = total_size = 0;
831 	need_extend = 0;
832 	for_each_file(td, f, i) {
833 		f->file_offset = get_start_offset(td, f);
834 
835 		if (!o->file_size_low) {
836 			/*
837 			 * no file size range given, file size is equal to
838 			 * total size divided by number of files. If that is
839 			 * zero, set it to the real file size. If the size
840 			 * doesn't divide nicely with the min blocksize,
841 			 * make the first files bigger.
842 			 */
843 			f->io_size = fs;
844 			if (nr_fs_extra) {
845 				nr_fs_extra--;
846 				f->io_size += bs;
847 			}
848 
849 			if (!f->io_size)
850 				f->io_size = f->real_file_size - f->file_offset;
851 		} else if (f->real_file_size < o->file_size_low ||
852 			   f->real_file_size > o->file_size_high) {
853 			if (f->file_offset > o->file_size_low)
854 				goto err_offset;
855 			/*
856 			 * file size given. if it's fixed, use that. if it's a
857 			 * range, generate a random size in-between.
858 			 */
859 			if (o->file_size_low == o->file_size_high)
860 				f->io_size = o->file_size_low - f->file_offset;
861 			else {
862 				f->io_size = get_rand_file_size(td)
863 						- f->file_offset;
864 			}
865 		} else
866 			f->io_size = f->real_file_size - f->file_offset;
867 
868 		if (f->io_size == -1ULL)
869 			total_size = -1ULL;
870 		else {
871                         if (o->size_percent)
872                                 f->io_size = (f->io_size * o->size_percent) / 100;
873 			total_size += f->io_size;
874 		}
875 
876 		if (f->filetype == FIO_TYPE_FILE &&
877 		    (f->io_size + f->file_offset) > f->real_file_size &&
878 		    !(td->io_ops->flags & FIO_DISKLESSIO)) {
879 			if (!o->create_on_open) {
880 				need_extend++;
881 				extend_size += (f->io_size + f->file_offset);
882 			} else
883 				f->real_file_size = f->io_size + f->file_offset;
884 			fio_file_set_extend(f);
885 		}
886 	}
887 
888 	if (!o->size || (total_size && o->size > total_size))
889 		o->size = total_size;
890 
891 	if (o->size < td_min_bs(td)) {
892 		log_err("fio: blocksize too large for data set\n");
893 		goto err_out;
894 	}
895 
896 	/*
897 	 * See if we need to extend some files
898 	 */
899 	if (need_extend) {
900 		temp_stall_ts = 1;
901 		if (output_format == FIO_OUTPUT_NORMAL)
902 			log_info("%s: Laying out IO file(s) (%u file(s) /"
903 				 " %lluMB)\n", o->name, need_extend,
904 					extend_size >> 20);
905 
906 		for_each_file(td, f, i) {
907 			unsigned long long old_len = -1ULL, extend_len = -1ULL;
908 
909 			if (!fio_file_extend(f))
910 				continue;
911 
912 			assert(f->filetype == FIO_TYPE_FILE);
913 			fio_file_clear_extend(f);
914 			if (!o->fill_device) {
915 				old_len = f->real_file_size;
916 				extend_len = f->io_size + f->file_offset -
917 						old_len;
918 			}
919 			f->real_file_size = (f->io_size + f->file_offset);
920 			err = extend_file(td, f);
921 			if (err)
922 				break;
923 
924 			err = __file_invalidate_cache(td, f, old_len,
925 								extend_len);
926 
927 			/*
928 			 * Shut up static checker
929 			 */
930 			if (f->fd != -1)
931 				close(f->fd);
932 
933 			f->fd = -1;
934 			if (err)
935 				break;
936 		}
937 		temp_stall_ts = 0;
938 	}
939 
940 	if (err)
941 		goto err_out;
942 
943 	if (!o->zone_size)
944 		o->zone_size = o->size;
945 
946 	/*
947 	 * iolog already set the total io size, if we read back
948 	 * stored entries.
949 	 */
950 	if (!o->read_iolog_file) {
951 		if (o->io_limit)
952 			td->total_io_size = o->io_limit * o->loops;
953 		else
954 			td->total_io_size = o->size * o->loops;
955 	}
956 
957 done:
958 	if (o->create_only)
959 		td->done = 1;
960 
961 	td_restore_runstate(td, old_state);
962 	return 0;
963 err_offset:
964 	log_err("%s: you need to specify valid offset=\n", o->name);
965 err_out:
966 	td_restore_runstate(td, old_state);
967 	return 1;
968 }
969 
pre_read_files(struct thread_data * td)970 int pre_read_files(struct thread_data *td)
971 {
972 	struct fio_file *f;
973 	unsigned int i;
974 
975 	dprint(FD_FILE, "pre_read files\n");
976 
977 	for_each_file(td, f, i) {
978 		pre_read_file(td, f);
979 	}
980 
981 	return 1;
982 }
983 
__init_rand_distribution(struct thread_data * td,struct fio_file * f)984 static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
985 {
986 	unsigned int range_size, seed;
987 	unsigned long nranges;
988 	uint64_t fsize;
989 
990 	range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
991 	fsize = min(f->real_file_size, f->io_size);
992 
993 	nranges = (fsize + range_size - 1) / range_size;
994 
995 	seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
996 	if (!td->o.rand_repeatable)
997 		seed = td->rand_seeds[4];
998 
999 	if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1000 		zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1001 	else
1002 		pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1003 
1004 	return 1;
1005 }
1006 
init_rand_distribution(struct thread_data * td)1007 static int init_rand_distribution(struct thread_data *td)
1008 {
1009 	struct fio_file *f;
1010 	unsigned int i;
1011 	int state;
1012 
1013 	if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1014 		return 0;
1015 
1016 	state = td_bump_runstate(td, TD_SETTING_UP);
1017 
1018 	for_each_file(td, f, i)
1019 		__init_rand_distribution(td, f);
1020 
1021 	td_restore_runstate(td, state);
1022 
1023 	return 1;
1024 }
1025 
init_random_map(struct thread_data * td)1026 int init_random_map(struct thread_data *td)
1027 {
1028 	unsigned long long blocks;
1029 	struct fio_file *f;
1030 	unsigned int i;
1031 
1032 	if (init_rand_distribution(td))
1033 		return 0;
1034 	if (!td_random(td))
1035 		return 0;
1036 
1037 	for_each_file(td, f, i) {
1038 		uint64_t fsize = min(f->real_file_size, f->io_size);
1039 
1040 		blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1041 
1042 		if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1043 			unsigned long seed;
1044 
1045 			seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1046 
1047 			if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1048 				fio_file_set_lfsr(f);
1049 				continue;
1050 			}
1051 		} else if (!td->o.norandommap) {
1052 			f->io_axmap = axmap_new(blocks);
1053 			if (f->io_axmap) {
1054 				fio_file_set_axmap(f);
1055 				continue;
1056 			}
1057 		} else if (td->o.norandommap)
1058 			continue;
1059 
1060 		if (!td->o.softrandommap) {
1061 			log_err("fio: failed allocating random map. If running"
1062 				" a large number of jobs, try the 'norandommap'"
1063 				" option or set 'softrandommap'. Or give"
1064 				" a larger --alloc-size to fio.\n");
1065 			return 1;
1066 		}
1067 
1068 		log_info("fio: file %s failed allocating random map. Running "
1069 			 "job without.\n", f->file_name);
1070 	}
1071 
1072 	return 0;
1073 }
1074 
close_files(struct thread_data * td)1075 void close_files(struct thread_data *td)
1076 {
1077 	struct fio_file *f;
1078 	unsigned int i;
1079 
1080 	for_each_file(td, f, i) {
1081 		if (fio_file_open(f))
1082 			td_io_close_file(td, f);
1083 	}
1084 }
1085 
close_and_free_files(struct thread_data * td)1086 void close_and_free_files(struct thread_data *td)
1087 {
1088 	struct fio_file *f;
1089 	unsigned int i;
1090 
1091 	dprint(FD_FILE, "close files\n");
1092 
1093 	for_each_file(td, f, i) {
1094 		if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1095 			dprint(FD_FILE, "free unlink %s\n", f->file_name);
1096 			td_io_unlink_file(td, f);
1097 		}
1098 
1099 		if (fio_file_open(f))
1100 			td_io_close_file(td, f);
1101 
1102 		remove_file_hash(f);
1103 
1104 		if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1105 			dprint(FD_FILE, "free unlink %s\n", f->file_name);
1106 			td_io_unlink_file(td, f);
1107 		}
1108 
1109 		sfree(f->file_name);
1110 		f->file_name = NULL;
1111 		if (fio_file_axmap(f)) {
1112 			axmap_free(f->io_axmap);
1113 			f->io_axmap = NULL;
1114 		}
1115 		sfree(f);
1116 	}
1117 
1118 	td->o.filename = NULL;
1119 	free(td->files);
1120 	free(td->file_locks);
1121 	td->files_index = 0;
1122 	td->files = NULL;
1123 	td->file_locks = NULL;
1124 	td->o.file_lock_mode = FILE_LOCK_NONE;
1125 	td->o.nr_files = 0;
1126 }
1127 
get_file_type(struct fio_file * f)1128 static void get_file_type(struct fio_file *f)
1129 {
1130 	struct stat sb;
1131 
1132 	if (!strcmp(f->file_name, "-"))
1133 		f->filetype = FIO_TYPE_PIPE;
1134 	else
1135 		f->filetype = FIO_TYPE_FILE;
1136 
1137 	/* \\.\ is the device namespace in Windows, where every file is
1138 	 * a block device */
1139 	if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1140 		f->filetype = FIO_TYPE_BD;
1141 
1142 	if (!stat(f->file_name, &sb)) {
1143 		if (S_ISBLK(sb.st_mode))
1144 			f->filetype = FIO_TYPE_BD;
1145 		else if (S_ISCHR(sb.st_mode))
1146 			f->filetype = FIO_TYPE_CHAR;
1147 		else if (S_ISFIFO(sb.st_mode))
1148 			f->filetype = FIO_TYPE_PIPE;
1149 	}
1150 }
1151 
__is_already_allocated(const char * fname)1152 static int __is_already_allocated(const char *fname)
1153 {
1154 	struct flist_head *entry;
1155 	char *filename;
1156 
1157 	if (flist_empty(&filename_list))
1158 		return 0;
1159 
1160 	flist_for_each(entry, &filename_list) {
1161 		filename = flist_entry(entry, struct file_name, list)->filename;
1162 
1163 		if (strcmp(filename, fname) == 0)
1164 			return 1;
1165 	}
1166 
1167 	return 0;
1168 }
1169 
is_already_allocated(const char * fname)1170 static int is_already_allocated(const char *fname)
1171 {
1172 	int ret;
1173 
1174 	fio_file_hash_lock();
1175 	ret = __is_already_allocated(fname);
1176 	fio_file_hash_unlock();
1177 	return ret;
1178 }
1179 
set_already_allocated(const char * fname)1180 static void set_already_allocated(const char *fname)
1181 {
1182 	struct file_name *fn;
1183 
1184 	fn = malloc(sizeof(struct file_name));
1185 	fn->filename = strdup(fname);
1186 
1187 	fio_file_hash_lock();
1188 	if (!__is_already_allocated(fname)) {
1189 		flist_add_tail(&fn->list, &filename_list);
1190 		fn = NULL;
1191 	}
1192 	fio_file_hash_unlock();
1193 
1194 	if (fn) {
1195 		free(fn->filename);
1196 		free(fn);
1197 	}
1198 }
1199 
1200 
free_already_allocated(void)1201 static void free_already_allocated(void)
1202 {
1203 	struct flist_head *entry, *tmp;
1204 	struct file_name *fn;
1205 
1206 	if (flist_empty(&filename_list))
1207 		return;
1208 
1209 	fio_file_hash_lock();
1210 	flist_for_each_safe(entry, tmp, &filename_list) {
1211 		fn = flist_entry(entry, struct file_name, list);
1212 		free(fn->filename);
1213 		flist_del(&fn->list);
1214 		free(fn);
1215 	}
1216 
1217 	fio_file_hash_unlock();
1218 }
1219 
alloc_new_file(struct thread_data * td)1220 static struct fio_file *alloc_new_file(struct thread_data *td)
1221 {
1222 	struct fio_file *f;
1223 
1224 	f = smalloc(sizeof(*f));
1225 	if (!f) {
1226 		log_err("fio: smalloc OOM\n");
1227 		assert(0);
1228 		return NULL;
1229 	}
1230 
1231 	f->fd = -1;
1232 	f->shadow_fd = -1;
1233 	fio_file_reset(td, f);
1234 	return f;
1235 }
1236 
add_file(struct thread_data * td,const char * fname,int numjob,int inc)1237 int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1238 {
1239 	int cur_files = td->files_index;
1240 	char file_name[PATH_MAX];
1241 	struct fio_file *f;
1242 	int len = 0;
1243 
1244 	dprint(FD_FILE, "add file %s\n", fname);
1245 
1246 	if (td->o.directory)
1247 		len = set_name_idx(file_name, td->o.directory, numjob);
1248 
1249 	sprintf(file_name + len, "%s", fname);
1250 
1251 	/* clean cloned siblings using existing files */
1252 	if (numjob && is_already_allocated(file_name))
1253 		return 0;
1254 
1255 	f = alloc_new_file(td);
1256 
1257 	if (td->files_size <= td->files_index) {
1258 		unsigned int new_size = td->o.nr_files + 1;
1259 
1260 		dprint(FD_FILE, "resize file array to %d files\n", new_size);
1261 
1262 		td->files = realloc(td->files, new_size * sizeof(f));
1263 		if (td->files == NULL) {
1264 			log_err("fio: realloc OOM\n");
1265 			assert(0);
1266 		}
1267 		if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1268 			td->file_locks = realloc(td->file_locks, new_size);
1269 			if (!td->file_locks) {
1270 				log_err("fio: realloc OOM\n");
1271 				assert(0);
1272 			}
1273 			td->file_locks[cur_files] = FILE_LOCK_NONE;
1274 		}
1275 		td->files_size = new_size;
1276 	}
1277 	td->files[cur_files] = f;
1278 	f->fileno = cur_files;
1279 
1280 	/*
1281 	 * init function, io engine may not be loaded yet
1282 	 */
1283 	if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
1284 		f->real_file_size = -1ULL;
1285 
1286 	f->file_name = smalloc_strdup(file_name);
1287 	if (!f->file_name) {
1288 		log_err("fio: smalloc OOM\n");
1289 		assert(0);
1290 	}
1291 
1292 	get_file_type(f);
1293 
1294 	switch (td->o.file_lock_mode) {
1295 	case FILE_LOCK_NONE:
1296 		break;
1297 	case FILE_LOCK_READWRITE:
1298 		f->rwlock = fio_rwlock_init();
1299 		break;
1300 	case FILE_LOCK_EXCLUSIVE:
1301 		f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1302 		break;
1303 	default:
1304 		log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1305 		assert(0);
1306 	}
1307 
1308 	td->files_index++;
1309 	if (f->filetype == FIO_TYPE_FILE)
1310 		td->nr_normal_files++;
1311 
1312 	set_already_allocated(file_name);
1313 
1314 	if (inc)
1315 		td->o.nr_files++;
1316 
1317 	dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1318 							cur_files);
1319 
1320 	return cur_files;
1321 }
1322 
add_file_exclusive(struct thread_data * td,const char * fname)1323 int add_file_exclusive(struct thread_data *td, const char *fname)
1324 {
1325 	struct fio_file *f;
1326 	unsigned int i;
1327 
1328 	for_each_file(td, f, i) {
1329 		if (!strcmp(f->file_name, fname))
1330 			return i;
1331 	}
1332 
1333 	return add_file(td, fname, 0, 1);
1334 }
1335 
get_file(struct fio_file * f)1336 void get_file(struct fio_file *f)
1337 {
1338 	dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1339 	assert(fio_file_open(f));
1340 	f->references++;
1341 }
1342 
put_file(struct thread_data * td,struct fio_file * f)1343 int put_file(struct thread_data *td, struct fio_file *f)
1344 {
1345 	int f_ret = 0, ret = 0;
1346 
1347 	dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1348 
1349 	if (!fio_file_open(f)) {
1350 		assert(f->fd == -1);
1351 		return 0;
1352 	}
1353 
1354 	assert(f->references);
1355 	if (--f->references)
1356 		return 0;
1357 
1358 	if (should_fsync(td) && td->o.fsync_on_close) {
1359 		f_ret = fsync(f->fd);
1360 		if (f_ret < 0)
1361 			f_ret = errno;
1362 	}
1363 
1364 	if (td->io_ops->close_file)
1365 		ret = td->io_ops->close_file(td, f);
1366 
1367 	if (!ret)
1368 		ret = f_ret;
1369 
1370 	td->nr_open_files--;
1371 	fio_file_clear_open(f);
1372 	assert(f->fd == -1);
1373 	return ret;
1374 }
1375 
lock_file(struct thread_data * td,struct fio_file * f,enum fio_ddir ddir)1376 void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1377 {
1378 	if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1379 		return;
1380 
1381 	if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1382 		if (ddir == DDIR_READ)
1383 			fio_rwlock_read(f->rwlock);
1384 		else
1385 			fio_rwlock_write(f->rwlock);
1386 	} else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1387 		fio_mutex_down(f->lock);
1388 
1389 	td->file_locks[f->fileno] = td->o.file_lock_mode;
1390 }
1391 
unlock_file(struct thread_data * td,struct fio_file * f)1392 void unlock_file(struct thread_data *td, struct fio_file *f)
1393 {
1394 	if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1395 		return;
1396 
1397 	if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1398 		fio_rwlock_unlock(f->rwlock);
1399 	else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1400 		fio_mutex_up(f->lock);
1401 
1402 	td->file_locks[f->fileno] = FILE_LOCK_NONE;
1403 }
1404 
unlock_file_all(struct thread_data * td,struct fio_file * f)1405 void unlock_file_all(struct thread_data *td, struct fio_file *f)
1406 {
1407 	if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1408 		return;
1409 	if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1410 		unlock_file(td, f);
1411 }
1412 
recurse_dir(struct thread_data * td,const char * dirname)1413 static int recurse_dir(struct thread_data *td, const char *dirname)
1414 {
1415 	struct dirent *dir;
1416 	int ret = 0;
1417 	DIR *D;
1418 
1419 	D = opendir(dirname);
1420 	if (!D) {
1421 		char buf[FIO_VERROR_SIZE];
1422 
1423 		snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1424 		td_verror(td, errno, buf);
1425 		return 1;
1426 	}
1427 
1428 	while ((dir = readdir(D)) != NULL) {
1429 		char full_path[PATH_MAX];
1430 		struct stat sb;
1431 
1432 		if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1433 			continue;
1434 
1435 		sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1436 
1437 		if (lstat(full_path, &sb) == -1) {
1438 			if (errno != ENOENT) {
1439 				td_verror(td, errno, "stat");
1440 				ret = 1;
1441 				break;
1442 			}
1443 		}
1444 
1445 		if (S_ISREG(sb.st_mode)) {
1446 			add_file(td, full_path, 0, 1);
1447 			continue;
1448 		}
1449 		if (!S_ISDIR(sb.st_mode))
1450 			continue;
1451 
1452 		ret = recurse_dir(td, full_path);
1453 		if (ret)
1454 			break;
1455 	}
1456 
1457 	closedir(D);
1458 	return ret;
1459 }
1460 
add_dir_files(struct thread_data * td,const char * path)1461 int add_dir_files(struct thread_data *td, const char *path)
1462 {
1463 	int ret = recurse_dir(td, path);
1464 
1465 	if (!ret)
1466 		log_info("fio: opendir added %d files\n", td->o.nr_files);
1467 
1468 	return ret;
1469 }
1470 
dup_files(struct thread_data * td,struct thread_data * org)1471 void dup_files(struct thread_data *td, struct thread_data *org)
1472 {
1473 	struct fio_file *f;
1474 	unsigned int i;
1475 
1476 	dprint(FD_FILE, "dup files: %d\n", org->files_index);
1477 
1478 	if (!org->files)
1479 		return;
1480 
1481 	td->files = malloc(org->files_index * sizeof(f));
1482 
1483 	if (td->o.file_lock_mode != FILE_LOCK_NONE)
1484 		td->file_locks = malloc(org->files_index);
1485 
1486 	for_each_file(org, f, i) {
1487 		struct fio_file *__f;
1488 
1489 		__f = alloc_new_file(td);
1490 
1491 		if (f->file_name) {
1492 			__f->file_name = smalloc_strdup(f->file_name);
1493 			if (!__f->file_name) {
1494 				log_err("fio: smalloc OOM\n");
1495 				assert(0);
1496 			}
1497 
1498 			__f->filetype = f->filetype;
1499 		}
1500 
1501 		if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1502 			__f->lock = f->lock;
1503 		else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1504 			__f->rwlock = f->rwlock;
1505 
1506 		td->files[i] = __f;
1507 	}
1508 }
1509 
1510 /*
1511  * Returns the index that matches the filename, or -1 if not there
1512  */
get_fileno(struct thread_data * td,const char * fname)1513 int get_fileno(struct thread_data *td, const char *fname)
1514 {
1515 	struct fio_file *f;
1516 	unsigned int i;
1517 
1518 	for_each_file(td, f, i)
1519 		if (!strcmp(f->file_name, fname))
1520 			return i;
1521 
1522 	return -1;
1523 }
1524 
1525 /*
1526  * For log usage, where we add/open/close files automatically
1527  */
free_release_files(struct thread_data * td)1528 void free_release_files(struct thread_data *td)
1529 {
1530 	close_files(td);
1531 	td->o.nr_files = 0;
1532 	td->o.open_files = 0;
1533 	td->files_index = 0;
1534 	td->nr_normal_files = 0;
1535 }
1536 
fio_file_reset(struct thread_data * td,struct fio_file * f)1537 void fio_file_reset(struct thread_data *td, struct fio_file *f)
1538 {
1539 	int i;
1540 
1541 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1542 		f->last_pos[i] = f->file_offset;
1543 		f->last_start[i] = -1ULL;
1544 	}
1545 
1546 	if (fio_file_axmap(f))
1547 		axmap_reset(f->io_axmap);
1548 	else if (fio_file_lfsr(f))
1549 		lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1550 }
1551 
fio_files_done(struct thread_data * td)1552 int fio_files_done(struct thread_data *td)
1553 {
1554 	struct fio_file *f;
1555 	unsigned int i;
1556 
1557 	for_each_file(td, f, i)
1558 		if (!fio_file_done(f))
1559 			return 0;
1560 
1561 	return 1;
1562 }
1563 
1564 /* free memory used in initialization phase only */
filesetup_mem_free(void)1565 void filesetup_mem_free(void)
1566 {
1567 	free_already_allocated();
1568 }
1569