Lines Matching refs:td

144 static int __check_min_rate(struct thread_data *td, struct timeval *now,  in __check_min_rate()  argument
157 if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir]) in __check_min_rate()
163 if (mtime_since(&td->start, now) < 2000) in __check_min_rate()
166 iops += td->this_io_blocks[ddir]; in __check_min_rate()
167 bytes += td->this_io_bytes[ddir]; in __check_min_rate()
168 ratemin += td->o.ratemin[ddir]; in __check_min_rate()
169 rate_iops += td->o.rate_iops[ddir]; in __check_min_rate()
170 rate_iops_min += td->o.rate_iops_min[ddir]; in __check_min_rate()
175 if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) { in __check_min_rate()
176 spent = mtime_since(&td->lastrate[ddir], now); in __check_min_rate()
177 if (spent < td->o.ratecycle) in __check_min_rate()
180 if (td->o.rate[ddir]) { in __check_min_rate()
184 if (bytes < td->rate_bytes[ddir]) { in __check_min_rate()
185 log_err("%s: min rate %u not met\n", td->o.name, in __check_min_rate()
190 rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent; in __check_min_rate()
195 bytes < td->rate_bytes[ddir]) { in __check_min_rate()
197 " %luKB/sec\n", td->o.name, in __check_min_rate()
208 td->o.name, rate_iops); in __check_min_rate()
212 rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent; in __check_min_rate()
217 iops < td->rate_blocks[ddir]) { in __check_min_rate()
219 " got %lu\n", td->o.name, in __check_min_rate()
226 td->rate_bytes[ddir] = bytes; in __check_min_rate()
227 td->rate_blocks[ddir] = iops; in __check_min_rate()
228 memcpy(&td->lastrate[ddir], now, sizeof(*now)); in __check_min_rate()
232 static int check_min_rate(struct thread_data *td, struct timeval *now, in check_min_rate() argument
238 ret |= __check_min_rate(td, now, DDIR_READ); in check_min_rate()
240 ret |= __check_min_rate(td, now, DDIR_WRITE); in check_min_rate()
242 ret |= __check_min_rate(td, now, DDIR_TRIM); in check_min_rate()
251 static void cleanup_pending_aio(struct thread_data *td) in cleanup_pending_aio() argument
258 r = io_u_queued_complete(td, 0, NULL); in cleanup_pending_aio()
265 if (td->io_ops->cancel) { in cleanup_pending_aio()
269 io_u_qiter(&td->io_u_all, io_u, i) { in cleanup_pending_aio()
271 r = td->io_ops->cancel(td, io_u); in cleanup_pending_aio()
273 put_io_u(td, io_u); in cleanup_pending_aio()
278 if (td->cur_depth) in cleanup_pending_aio()
279 r = io_u_queued_complete(td, td->cur_depth, NULL); in cleanup_pending_aio()
286 static int fio_io_sync(struct thread_data *td, struct fio_file *f) in fio_io_sync() argument
288 struct io_u *io_u = __get_io_u(td); in fio_io_sync()
297 if (td_io_prep(td, io_u)) { in fio_io_sync()
298 put_io_u(td, io_u); in fio_io_sync()
303 ret = td_io_queue(td, io_u); in fio_io_sync()
305 td_verror(td, io_u->error, "td_io_queue"); in fio_io_sync()
306 put_io_u(td, io_u); in fio_io_sync()
309 if (io_u_queued_complete(td, 1, NULL) < 0) in fio_io_sync()
313 td_verror(td, io_u->error, "td_io_queue"); in fio_io_sync()
317 if (io_u_sync_complete(td, io_u, NULL) < 0) in fio_io_sync()
320 if (td_io_commit(td)) in fio_io_sync()
328 static int fio_file_fsync(struct thread_data *td, struct fio_file *f) in fio_file_fsync() argument
333 return fio_io_sync(td, f); in fio_file_fsync()
335 if (td_io_open_file(td, f)) in fio_file_fsync()
338 ret = fio_io_sync(td, f); in fio_file_fsync()
339 td_io_close_file(td, f); in fio_file_fsync()
343 static inline void __update_tv_cache(struct thread_data *td) in __update_tv_cache() argument
345 fio_gettime(&td->tv_cache, NULL); in __update_tv_cache()
348 static inline void update_tv_cache(struct thread_data *td) in update_tv_cache() argument
350 if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask) in update_tv_cache()
351 __update_tv_cache(td); in update_tv_cache()
354 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t) in runtime_exceeded() argument
356 if (in_ramp_time(td)) in runtime_exceeded()
358 if (!td->o.timeout) in runtime_exceeded()
360 if (utime_since(&td->epoch, t) >= td->o.timeout) in runtime_exceeded()
366 static int break_on_this_error(struct thread_data *td, enum fio_ddir ddir, in break_on_this_error() argument
371 if (ret < 0 || td->error) { in break_on_this_error()
372 int err = td->error; in break_on_this_error()
379 if (!(td->o.continue_on_error & (1 << eb))) in break_on_this_error()
382 if (td_non_fatal_error(td, eb, err)) { in break_on_this_error()
387 update_error_count(td, err); in break_on_this_error()
388 td_clear_error(td); in break_on_this_error()
391 } else if (td->o.fill_device && err == ENOSPC) { in break_on_this_error()
396 td_clear_error(td); in break_on_this_error()
397 fio_mark_td_terminate(td); in break_on_this_error()
404 update_error_count(td, err); in break_on_this_error()
412 static void check_update_rusage(struct thread_data *td) in check_update_rusage() argument
414 if (td->update_rusage) { in check_update_rusage()
415 td->update_rusage = 0; in check_update_rusage()
416 update_rusage_stat(td); in check_update_rusage()
417 fio_mutex_up(td->rusage_sem); in check_update_rusage()
421 static int wait_for_completions(struct thread_data *td, struct timeval *time, in wait_for_completions() argument
424 const int full = queue_full(td); in wait_for_completions()
431 min_evts = min(td->o.iodepth_batch_complete, td->cur_depth); in wait_for_completions()
435 if (time && (__should_check_rate(td, DDIR_READ) || in wait_for_completions()
436 __should_check_rate(td, DDIR_WRITE) || in wait_for_completions()
437 __should_check_rate(td, DDIR_TRIM))) in wait_for_completions()
441 ret = io_u_queued_complete(td, min_evts, bytes_done); in wait_for_completions()
444 } while (full && (td->cur_depth > td->o.iodepth_low)); in wait_for_completions()
453 static void do_verify(struct thread_data *td, uint64_t verify_bytes) in do_verify() argument
467 for_each_file(td, f, i) { in do_verify()
470 if (fio_io_sync(td, f)) in do_verify()
472 if (file_invalidate_cache(td, f)) in do_verify()
476 check_update_rusage(td); in do_verify()
478 if (td->error) in do_verify()
481 td_set_runstate(td, TD_VERIFYING); in do_verify()
484 while (!td->terminate) { in do_verify()
488 update_tv_cache(td); in do_verify()
489 check_update_rusage(td); in do_verify()
491 if (runtime_exceeded(td, &td->tv_cache)) { in do_verify()
492 __update_tv_cache(td); in do_verify()
493 if (runtime_exceeded(td, &td->tv_cache)) { in do_verify()
494 fio_mark_td_terminate(td); in do_verify()
499 if (flow_threshold_exceeded(td)) in do_verify()
502 if (!td->o.experimental_verify) { in do_verify()
503 io_u = __get_io_u(td); in do_verify()
507 if (get_next_verify(td, io_u)) { in do_verify()
508 put_io_u(td, io_u); in do_verify()
512 if (td_io_prep(td, io_u)) { in do_verify()
513 put_io_u(td, io_u); in do_verify()
517 if (ddir_rw_sum(bytes_done) + td->o.rw_min_bs > verify_bytes) in do_verify()
520 while ((io_u = get_io_u(td)) != NULL) { in do_verify()
537 td->io_issues[DDIR_READ]++; in do_verify()
538 put_io_u(td, io_u); in do_verify()
548 put_io_u(td, io_u); in do_verify()
557 if (verify_state_should_stop(td, io_u)) { in do_verify()
558 put_io_u(td, io_u); in do_verify()
562 if (td->o.verify_async) in do_verify()
568 if (!td->o.disable_slat) in do_verify()
571 ret = td_io_queue(td, io_u); in do_verify()
576 clear_io_u(td, io_u); in do_verify()
584 td_verror(td, EIO, "full resid"); in do_verify()
585 put_io_u(td, io_u); in do_verify()
594 td->ts.short_io_u[io_u->ddir]++; in do_verify()
600 requeue_io_u(td, &io_u); in do_verify()
603 ret = io_u_sync_complete(td, io_u, bytes_done); in do_verify()
611 requeue_io_u(td, &io_u); in do_verify()
612 ret2 = td_io_commit(td); in do_verify()
618 td_verror(td, -ret, "td_io_queue"); in do_verify()
622 if (break_on_this_error(td, ddir, &ret)) in do_verify()
631 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); in do_verify()
632 if (full || !td->o.iodepth_batch_complete) in do_verify()
633 ret = wait_for_completions(td, NULL, bytes_done); in do_verify()
639 check_update_rusage(td); in do_verify()
641 if (!td->error) { in do_verify()
642 min_events = td->cur_depth; in do_verify()
645 ret = io_u_queued_complete(td, min_events, NULL); in do_verify()
647 cleanup_pending_aio(td); in do_verify()
649 td_set_runstate(td, TD_RUNNING); in do_verify()
654 static unsigned int exceeds_number_ios(struct thread_data *td) in exceeds_number_ios() argument
658 if (!td->o.number_ios) in exceeds_number_ios()
661 number_ios = ddir_rw_sum(td->io_blocks); in exceeds_number_ios()
662 number_ios += td->io_u_queued + td->io_u_in_flight; in exceeds_number_ios()
664 return number_ios >= (td->o.number_ios * td->loops); in exceeds_number_ios()
667 static int io_issue_bytes_exceeded(struct thread_data *td) in io_issue_bytes_exceeded() argument
671 if (td_rw(td)) in io_issue_bytes_exceeded()
672 bytes = td->io_issue_bytes[DDIR_READ] + td->io_issue_bytes[DDIR_WRITE]; in io_issue_bytes_exceeded()
673 else if (td_write(td)) in io_issue_bytes_exceeded()
674 bytes = td->io_issue_bytes[DDIR_WRITE]; in io_issue_bytes_exceeded()
675 else if (td_read(td)) in io_issue_bytes_exceeded()
676 bytes = td->io_issue_bytes[DDIR_READ]; in io_issue_bytes_exceeded()
678 bytes = td->io_issue_bytes[DDIR_TRIM]; in io_issue_bytes_exceeded()
680 if (td->o.io_limit) in io_issue_bytes_exceeded()
681 limit = td->o.io_limit; in io_issue_bytes_exceeded()
683 limit = td->o.size; in io_issue_bytes_exceeded()
685 limit *= td->loops; in io_issue_bytes_exceeded()
686 return bytes >= limit || exceeds_number_ios(td); in io_issue_bytes_exceeded()
689 static int io_complete_bytes_exceeded(struct thread_data *td) in io_complete_bytes_exceeded() argument
693 if (td_rw(td)) in io_complete_bytes_exceeded()
694 bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE]; in io_complete_bytes_exceeded()
695 else if (td_write(td)) in io_complete_bytes_exceeded()
696 bytes = td->this_io_bytes[DDIR_WRITE]; in io_complete_bytes_exceeded()
697 else if (td_read(td)) in io_complete_bytes_exceeded()
698 bytes = td->this_io_bytes[DDIR_READ]; in io_complete_bytes_exceeded()
700 bytes = td->this_io_bytes[DDIR_TRIM]; in io_complete_bytes_exceeded()
702 if (td->o.io_limit) in io_complete_bytes_exceeded()
703 limit = td->o.io_limit; in io_complete_bytes_exceeded()
705 limit = td->o.size; in io_complete_bytes_exceeded()
707 limit *= td->loops; in io_complete_bytes_exceeded()
708 return bytes >= limit || exceeds_number_ios(td); in io_complete_bytes_exceeded()
717 static uint64_t do_io(struct thread_data *td) in do_io() argument
724 if (in_ramp_time(td)) in do_io()
725 td_set_runstate(td, TD_RAMP); in do_io()
727 td_set_runstate(td, TD_RUNNING); in do_io()
729 lat_target_init(td); in do_io()
731 total_bytes = td->o.size; in do_io()
736 if (td_write(td) && td_random(td) && td->o.norandommap) in do_io()
737 total_bytes = max(total_bytes, (uint64_t) td->o.io_limit); in do_io()
743 if (td->o.verify != VERIFY_NONE && in do_io()
744 (td_write(td) && td->o.verify_backlog)) in do_io()
745 total_bytes += td->o.size; in do_io()
747 while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || in do_io()
748 (!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) || in do_io()
749 td->o.time_based) { in do_io()
755 check_update_rusage(td); in do_io()
757 if (td->terminate || td->done) in do_io()
760 update_tv_cache(td); in do_io()
762 if (runtime_exceeded(td, &td->tv_cache)) { in do_io()
763 __update_tv_cache(td); in do_io()
764 if (runtime_exceeded(td, &td->tv_cache)) { in do_io()
765 fio_mark_td_terminate(td); in do_io()
770 if (flow_threshold_exceeded(td)) in do_io()
776 io_u = get_io_u(td); in do_io()
785 if (td->o.latency_target) in do_io()
797 if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ && in do_io()
798 ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) { in do_io()
800 if (!td->o.verify_pattern_bytes) { in do_io()
801 io_u->rand_seed = __rand(&td->verify_state); in do_io()
803 io_u->rand_seed *= __rand(&td->verify_state); in do_io()
806 if (verify_state_should_stop(td, io_u)) { in do_io()
807 put_io_u(td, io_u); in do_io()
811 if (td->o.verify_async) in do_io()
815 td_set_runstate(td, TD_VERIFYING); in do_io()
816 } else if (in_ramp_time(td)) in do_io()
817 td_set_runstate(td, TD_RAMP); in do_io()
819 td_set_runstate(td, TD_RUNNING); in do_io()
826 if (td_write(td) && io_u->ddir == DDIR_WRITE && in do_io()
827 td->o.do_verify && in do_io()
828 td->o.verify != VERIFY_NONE && in do_io()
829 !td->o.experimental_verify) in do_io()
830 log_io_piece(td, io_u); in do_io()
832 ret = td_io_queue(td, io_u); in do_io()
837 unlog_io_piece(td, io_u); in do_io()
838 clear_io_u(td, io_u); in do_io()
845 trim_io_piece(td, io_u); in do_io()
851 unlog_io_piece(td, io_u); in do_io()
852 td_verror(td, EIO, "full resid"); in do_io()
853 put_io_u(td, io_u); in do_io()
862 td->ts.short_io_u[io_u->ddir]++; in do_io()
867 requeue_io_u(td, &io_u); in do_io()
870 if (__should_check_rate(td, DDIR_READ) || in do_io()
871 __should_check_rate(td, DDIR_WRITE) || in do_io()
872 __should_check_rate(td, DDIR_TRIM)) in do_io()
875 ret = io_u_sync_complete(td, io_u, bytes_done); in do_io()
887 if (td->io_ops->commit == NULL) in do_io()
888 io_u_queued(td, io_u); in do_io()
892 unlog_io_piece(td, io_u); in do_io()
893 requeue_io_u(td, &io_u); in do_io()
894 ret2 = td_io_commit(td); in do_io()
900 put_io_u(td, io_u); in do_io()
904 if (break_on_this_error(td, ddir, &ret)) in do_io()
913 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); in do_io()
914 if (full || !td->o.iodepth_batch_complete) in do_io()
915 ret = wait_for_completions(td, &comp_time, bytes_done); in do_io()
918 if (!ddir_rw_sum(bytes_done) && !(td->io_ops->flags & FIO_NOIO)) in do_io()
921 if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) { in do_io()
922 if (check_min_rate(td, &comp_time, bytes_done)) { in do_io()
924 fio_terminate_threads(td->groupid); in do_io()
925 td_verror(td, EIO, "check_min_rate"); in do_io()
929 if (!in_ramp_time(td) && td->o.latency_target) in do_io()
930 lat_target_check(td); in do_io()
932 if (td->o.thinktime) { in do_io()
935 b = ddir_rw_sum(td->io_blocks); in do_io()
936 if (!(b % td->o.thinktime_blocks)) { in do_io()
939 io_u_quiesce(td); in do_io()
941 if (td->o.thinktime_spin) in do_io()
942 usec_spin(td->o.thinktime_spin); in do_io()
944 left = td->o.thinktime - td->o.thinktime_spin; in do_io()
946 usec_sleep(td, left); in do_io()
951 check_update_rusage(td); in do_io()
953 if (td->trim_entries) in do_io()
954 log_err("fio: %lu trim entries leaked?\n", td->trim_entries); in do_io()
956 if (td->o.fill_device && td->error == ENOSPC) { in do_io()
957 td->error = 0; in do_io()
958 fio_mark_td_terminate(td); in do_io()
960 if (!td->error) { in do_io()
963 i = td->cur_depth; in do_io()
965 ret = io_u_queued_complete(td, i, bytes_done); in do_io()
966 if (td->o.fill_device && td->error == ENOSPC) in do_io()
967 td->error = 0; in do_io()
970 if (should_fsync(td) && td->o.end_fsync) { in do_io()
971 td_set_runstate(td, TD_FSYNCING); in do_io()
973 for_each_file(td, f, i) { in do_io()
974 if (!fio_file_fsync(td, f)) in do_io()
982 cleanup_pending_aio(td); in do_io()
987 if (!ddir_rw_sum(td->this_io_bytes)) in do_io()
988 td->done = 1; in do_io()
993 static void cleanup_io_u(struct thread_data *td) in cleanup_io_u() argument
997 while ((io_u = io_u_qpop(&td->io_u_freelist)) != NULL) { in cleanup_io_u()
999 if (td->io_ops->io_u_free) in cleanup_io_u()
1000 td->io_ops->io_u_free(td, io_u); in cleanup_io_u()
1005 free_io_mem(td); in cleanup_io_u()
1007 io_u_rexit(&td->io_u_requeues); in cleanup_io_u()
1008 io_u_qexit(&td->io_u_freelist); in cleanup_io_u()
1009 io_u_qexit(&td->io_u_all); in cleanup_io_u()
1011 if (td->last_write_comp) in cleanup_io_u()
1012 sfree(td->last_write_comp); in cleanup_io_u()
1015 static int init_io_u(struct thread_data *td) in init_io_u() argument
1023 max_units = td->o.iodepth; in init_io_u()
1024 max_bs = td_max_bs(td); in init_io_u()
1025 min_write = td->o.min_bs[DDIR_WRITE]; in init_io_u()
1026 td->orig_buffer_size = (unsigned long long) max_bs in init_io_u()
1029 if ((td->io_ops->flags & FIO_NOIO) || !(td_read(td) || td_write(td))) in init_io_u()
1033 err += io_u_rinit(&td->io_u_requeues, td->o.iodepth); in init_io_u()
1034 err += io_u_qinit(&td->io_u_freelist, td->o.iodepth); in init_io_u()
1035 err += io_u_qinit(&td->io_u_all, td->o.iodepth); in init_io_u()
1048 if (td->o.odirect || td->o.mem_align || td->o.oatomic || in init_io_u()
1049 (td->io_ops->flags & FIO_RAWIO)) in init_io_u()
1050 td->orig_buffer_size += page_mask + td->o.mem_align; in init_io_u()
1052 if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) { in init_io_u()
1055 bs = td->orig_buffer_size + td->o.hugepage_size - 1; in init_io_u()
1056 td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1); in init_io_u()
1059 if (td->orig_buffer_size != (size_t) td->orig_buffer_size) { in init_io_u()
1064 if (data_xfer && allocate_io_mem(td)) in init_io_u()
1067 if (td->o.odirect || td->o.mem_align || td->o.oatomic || in init_io_u()
1068 (td->io_ops->flags & FIO_RAWIO)) in init_io_u()
1069 p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align; in init_io_u()
1071 p = td->orig_buffer; in init_io_u()
1078 if (td->terminate) in init_io_u()
1096 if (td_write(td)) in init_io_u()
1097 io_u_fill_buffer(td, io_u, min_write, max_bs); in init_io_u()
1098 if (td_write(td) && td->o.verify_pattern_bytes) { in init_io_u()
1103 fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0); in init_io_u()
1109 io_u_qpush(&td->io_u_freelist, io_u); in init_io_u()
1115 io_u_qpush(&td->io_u_all, io_u); in init_io_u()
1117 if (td->io_ops->io_u_init) { in init_io_u()
1118 int ret = td->io_ops->io_u_init(td, io_u); in init_io_u()
1129 if (td->o.verify != VERIFY_NONE) { in init_io_u()
1130 td->last_write_comp = scalloc(max_units, sizeof(uint64_t)); in init_io_u()
1131 if (!td->last_write_comp) { in init_io_u()
1140 static int switch_ioscheduler(struct thread_data *td) in switch_ioscheduler() argument
1146 if (td->io_ops->flags & FIO_DISKLESSIO) in switch_ioscheduler()
1149 sprintf(tmp, "%s/queue/scheduler", td->sysfs_root); in switch_ioscheduler()
1158 td_verror(td, errno, "fopen iosched"); in switch_ioscheduler()
1165 ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f); in switch_ioscheduler()
1167 td_verror(td, errno, "fwrite"); in switch_ioscheduler()
1179 td_verror(td, errno, "fread"); in switch_ioscheduler()
1186 sprintf(tmp2, "[%s]", td->o.ioscheduler); in switch_ioscheduler()
1188 log_err("fio: io scheduler %s not found\n", td->o.ioscheduler); in switch_ioscheduler()
1189 td_verror(td, EINVAL, "iosched_switch"); in switch_ioscheduler()
1198 static int keep_running(struct thread_data *td) in keep_running() argument
1202 if (td->done) in keep_running()
1204 if (td->o.time_based) in keep_running()
1206 if (td->o.loops) { in keep_running()
1207 td->o.loops--; in keep_running()
1210 if (exceeds_number_ios(td)) in keep_running()
1213 if (td->o.io_limit) in keep_running()
1214 limit = td->o.io_limit; in keep_running()
1216 limit = td->o.size; in keep_running()
1218 if (limit != -1ULL && ddir_rw_sum(td->io_bytes) < limit) { in keep_running()
1225 diff = limit - ddir_rw_sum(td->io_bytes); in keep_running()
1226 if (diff < td_max_bs(td)) in keep_running()
1229 if (fio_files_done(td)) in keep_running()
1258 static uint64_t do_dry_run(struct thread_data *td) in do_dry_run() argument
1262 td_set_runstate(td, TD_RUNNING); in do_dry_run()
1264 while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || in do_dry_run()
1265 (!flist_empty(&td->trim_list)) || !io_complete_bytes_exceeded(td)) { in do_dry_run()
1269 if (td->terminate || td->done) in do_dry_run()
1272 io_u = get_io_u(td); in do_dry_run()
1280 td->io_issues[acct_ddir(io_u)]++; in do_dry_run()
1282 io_u_mark_depth(td, 1); in do_dry_run()
1283 td->ts.total_io_u[io_u->ddir]++; in do_dry_run()
1286 if (td_write(td) && io_u->ddir == DDIR_WRITE && in do_dry_run()
1287 td->o.do_verify && in do_dry_run()
1288 td->o.verify != VERIFY_NONE && in do_dry_run()
1289 !td->o.experimental_verify) in do_dry_run()
1290 log_io_piece(td, io_u); in do_dry_run()
1292 ret = io_u_sync_complete(td, io_u, bytes_done); in do_dry_run()
1306 struct thread_data *td = data; in thread_main() local
1307 struct thread_options *o = &td->o; in thread_main()
1314 td->pid = getpid(); in thread_main()
1316 td->pid = gettid(); in thread_main()
1320 dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid); in thread_main()
1323 fio_server_send_start(td); in thread_main()
1325 INIT_FLIST_HEAD(&td->io_log_list); in thread_main()
1326 INIT_FLIST_HEAD(&td->io_hist_list); in thread_main()
1327 INIT_FLIST_HEAD(&td->verify_list); in thread_main()
1328 INIT_FLIST_HEAD(&td->trim_list); in thread_main()
1329 INIT_FLIST_HEAD(&td->next_rand_list); in thread_main()
1330 pthread_mutex_init(&td->io_u_lock, NULL); in thread_main()
1331 td->io_hist_tree = RB_ROOT; in thread_main()
1334 pthread_cond_init(&td->verify_cond, &attr); in thread_main()
1335 pthread_cond_init(&td->free_cond, &attr); in thread_main()
1337 td_set_runstate(td, TD_INITIALIZED); in thread_main()
1341 fio_mutex_down(td->mutex); in thread_main()
1349 td_verror(td, errno, "setgid"); in thread_main()
1353 td_verror(td, errno, "setuid"); in thread_main()
1370 ret = fio_cpus_split(&o->cpumask, td->thread_number - 1); in thread_main()
1374 td_verror(td, EINVAL, "cpus_split"); in thread_main()
1378 ret = fio_setaffinity(td->pid, o->cpumask); in thread_main()
1380 td_verror(td, errno, "cpu_set_affinity"); in thread_main()
1392 td_verror(td, errno, "Does not support NUMA API\n"); in thread_main()
1401 td_verror(td, errno, \ in thread_main()
1437 if (fio_pin_memory(td)) in thread_main()
1444 if (init_iolog(td)) in thread_main()
1447 if (init_io_u(td)) in thread_main()
1450 if (o->verify_async && verify_async_init(td)) in thread_main()
1457 td_verror(td, errno, "ioprio_set"); in thread_main()
1462 if (o->cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt)) in thread_main()
1467 td_verror(td, errno, "nice"); in thread_main()
1471 if (o->ioscheduler && switch_ioscheduler(td)) in thread_main()
1474 if (!o->create_serialize && setup_files(td)) in thread_main()
1477 if (td_io_init(td)) in thread_main()
1480 if (init_random_map(td)) in thread_main()
1487 if (pre_read_files(td) < 0) in thread_main()
1491 if (td->flags & TD_F_COMPRESS_LOG) in thread_main()
1492 tp_init(&td->tp_data); in thread_main()
1494 fio_verify_init(td); in thread_main()
1496 fio_gettime(&td->epoch, NULL); in thread_main()
1497 fio_getrusage(&td->ru_start); in thread_main()
1499 while (keep_running(td)) { in thread_main()
1502 fio_gettime(&td->start, NULL); in thread_main()
1503 memcpy(&td->bw_sample_time, &td->start, sizeof(td->start)); in thread_main()
1504 memcpy(&td->iops_sample_time, &td->start, sizeof(td->start)); in thread_main()
1505 memcpy(&td->tv_cache, &td->start, sizeof(td->start)); in thread_main()
1509 memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time, in thread_main()
1510 sizeof(td->bw_sample_time)); in thread_main()
1511 memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time, in thread_main()
1512 sizeof(td->bw_sample_time)); in thread_main()
1513 memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time, in thread_main()
1514 sizeof(td->bw_sample_time)); in thread_main()
1518 clear_io_state(td); in thread_main()
1520 prune_io_piece_log(td); in thread_main()
1522 if (td->o.verify_only && (td_write(td) || td_rw(td))) in thread_main()
1523 verify_bytes = do_dry_run(td); in thread_main()
1525 verify_bytes = do_io(td); in thread_main()
1530 if (td_read(td) && td->io_bytes[DDIR_READ]) { in thread_main()
1531 elapsed = mtime_since_now(&td->start); in thread_main()
1532 td->ts.runtime[DDIR_READ] += elapsed; in thread_main()
1534 if (td_write(td) && td->io_bytes[DDIR_WRITE]) { in thread_main()
1535 elapsed = mtime_since_now(&td->start); in thread_main()
1536 td->ts.runtime[DDIR_WRITE] += elapsed; in thread_main()
1538 if (td_trim(td) && td->io_bytes[DDIR_TRIM]) { in thread_main()
1539 elapsed = mtime_since_now(&td->start); in thread_main()
1540 td->ts.runtime[DDIR_TRIM] += elapsed; in thread_main()
1542 fio_gettime(&td->start, NULL); in thread_main()
1545 if (td->error || td->terminate) in thread_main()
1550 (td->io_ops->flags & FIO_UNIDIR)) in thread_main()
1553 clear_io_state(td); in thread_main()
1555 fio_gettime(&td->start, NULL); in thread_main()
1557 do_verify(td, verify_bytes); in thread_main()
1560 td->ts.runtime[DDIR_READ] += mtime_since_now(&td->start); in thread_main()
1561 fio_gettime(&td->start, NULL); in thread_main()
1564 if (td->error || td->terminate) in thread_main()
1568 update_rusage_stat(td); in thread_main()
1569 td->ts.total_run_time = mtime_since_now(&td->epoch); in thread_main()
1570 td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ]; in thread_main()
1571 td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE]; in thread_main()
1572 td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM]; in thread_main()
1574 if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) && in thread_main()
1575 (td->o.verify != VERIFY_NONE && td_write(td))) { in thread_main()
1579 state = get_all_io_list(td->thread_number, &sz); in thread_main()
1586 fio_unpin_memory(td); in thread_main()
1588 fio_writeout_logs(td); in thread_main()
1590 if (td->flags & TD_F_COMPRESS_LOG) in thread_main()
1591 tp_exit(&td->tp_data); in thread_main()
1597 fio_terminate_threads(td->groupid); in thread_main()
1600 if (td->error) in thread_main()
1601 log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error, in thread_main()
1602 td->verror); in thread_main()
1605 verify_async_exit(td); in thread_main()
1607 close_and_free_files(td); in thread_main()
1608 cleanup_io_u(td); in thread_main()
1609 close_ioengine(td); in thread_main()
1610 cgroup_shutdown(td, &cgroup_mnt); in thread_main()
1611 verify_free_state(td); in thread_main()
1616 td_verror(td, ret, "fio_cpuset_exit"); in thread_main()
1623 write_iolog_close(td); in thread_main()
1625 fio_mutex_remove(td->mutex); in thread_main()
1626 td->mutex = NULL; in thread_main()
1628 td_set_runstate(td, TD_EXITED); in thread_main()
1634 check_update_rusage(td); in thread_main()
1636 return (void *) (uintptr_t) td->error; in thread_main()
1646 struct thread_data *td; in fork_main() local
1664 td = data + offset * sizeof(struct thread_data); in fork_main()
1665 ret = thread_main(td); in fork_main()
1670 static void dump_td_info(struct thread_data *td) in dump_td_info() argument
1673 "be stuck. Doing forceful exit of this job.\n", td->o.name, in dump_td_info()
1674 (unsigned long) time_since_now(&td->terminate_time)); in dump_td_info()
1683 struct thread_data *td; in reap_threads() local
1691 for_each_td(td, i) { in reap_threads()
1698 if (td->io_ops && !strcmp(td->io_ops->name, "cpuio")) in reap_threads()
1703 if (!td->pid) { in reap_threads()
1707 if (td->runstate == TD_REAPED) in reap_threads()
1709 if (td->o.use_thread) { in reap_threads()
1710 if (td->runstate == TD_EXITED) { in reap_threads()
1711 td_set_runstate(td, TD_REAPED); in reap_threads()
1718 if (td->runstate == TD_EXITED) in reap_threads()
1724 ret = waitpid(td->pid, &status, flags); in reap_threads()
1728 (int) td->pid, td->runstate); in reap_threads()
1729 td->sig = ECHILD; in reap_threads()
1730 td_set_runstate(td, TD_REAPED); in reap_threads()
1734 } else if (ret == td->pid) { in reap_threads()
1740 (int) td->pid, sig); in reap_threads()
1741 td->sig = sig; in reap_threads()
1742 td_set_runstate(td, TD_REAPED); in reap_threads()
1746 if (WEXITSTATUS(status) && !td->error) in reap_threads()
1747 td->error = WEXITSTATUS(status); in reap_threads()
1749 td_set_runstate(td, TD_REAPED); in reap_threads()
1758 if (td->terminate && in reap_threads()
1759 time_since_now(&td->terminate_time) >= FIO_REAP_TIMEOUT) { in reap_threads()
1760 dump_td_info(td); in reap_threads()
1761 td_set_runstate(td, TD_REAPED); in reap_threads()
1772 (*m_rate) -= ddir_rw_sum(td->o.ratemin); in reap_threads()
1773 (*t_rate) -= ddir_rw_sum(td->o.rate); in reap_threads()
1774 if (!td->pid) in reap_threads()
1777 if (td->error) in reap_threads()
1780 done_secs += mtime_since_now(&td->epoch) / 1000; in reap_threads()
1781 profile_td_exit(td); in reap_threads()
1838 static int fio_verify_load_state(struct thread_data *td) in fio_verify_load_state() argument
1842 if (!td->o.verify_state) in fio_verify_load_state()
1848 ret = fio_server_get_verify_state(td->o.name, in fio_verify_load_state()
1849 td->thread_number - 1, &data); in fio_verify_load_state()
1851 verify_convert_assign_state(td, data); in fio_verify_load_state()
1853 ret = verify_load_state(td, "local"); in fio_verify_load_state()
1870 struct thread_data *td; in run_threads() local
1882 for_each_td(td, i) { in run_threads()
1883 if (td->o.use_thread) in run_threads()
1909 for_each_td(td, i) { in run_threads()
1910 print_status_init(td->thread_number - 1); in run_threads()
1912 if (!td->o.create_serialize) in run_threads()
1915 if (fio_verify_load_state(td)) in run_threads()
1923 if (setup_files(td)) { in run_threads()
1926 if (td->error) in run_threads()
1928 (int) td->pid, td->error, td->verror); in run_threads()
1929 td_set_runstate(td, TD_REAPED); in run_threads()
1940 for_each_file(td, f, j) { in run_threads()
1942 td_io_close_file(td, f); in run_threads()
1960 for_each_td(td, i) { in run_threads()
1961 if (td->runstate != TD_NOT_CREATED) in run_threads()
1968 if (td->terminate) { in run_threads()
1973 if (td->o.start_delay) { in run_threads()
1976 if (td->o.start_delay > spent) in run_threads()
1980 if (td->o.stonewall && (nr_started || nr_running)) { in run_threads()
1982 td->o.name); in run_threads()
1986 init_disk_util(td); in run_threads()
1988 td->rusage_sem = fio_mutex_init(FIO_MUTEX_LOCKED); in run_threads()
1989 td->update_rusage = 0; in run_threads()
1995 td_set_runstate(td, TD_CREATED); in run_threads()
1996 map[this_jobs++] = td; in run_threads()
1999 if (td->o.use_thread) { in run_threads()
2003 ret = pthread_create(&td->thread, NULL, in run_threads()
2004 thread_main, td); in run_threads()
2011 ret = pthread_detach(td->thread); in run_threads()
2050 td = map[i]; in run_threads()
2051 if (!td) in run_threads()
2053 if (td->runstate == TD_INITIALIZED) { in run_threads()
2056 } else if (td->runstate >= TD_EXITED) { in run_threads()
2069 td = map[i]; in run_threads()
2070 if (!td) in run_threads()
2072 kill(td->pid, SIGTERM); in run_threads()
2080 for_each_td(td, i) { in run_threads()
2081 if (td->runstate != TD_INITIALIZED) in run_threads()
2084 if (in_ramp_time(td)) in run_threads()
2085 td_set_runstate(td, TD_RAMP); in run_threads()
2087 td_set_runstate(td, TD_RUNNING); in run_threads()
2090 m_rate += ddir_rw_sum(td->o.ratemin); in run_threads()
2091 t_rate += ddir_rw_sum(td->o.rate); in run_threads()
2093 fio_mutex_up(td->mutex); in run_threads()
2188 struct thread_data *td; in fio_backend() local
2237 for_each_td(td, i) { in fio_backend()
2238 fio_options_free(td); in fio_backend()
2239 if (td->rusage_sem) { in fio_backend()
2240 fio_mutex_remove(td->rusage_sem); in fio_backend()
2241 td->rusage_sem = NULL; in fio_backend()