1 /*
2  * Custom fio(1) engine that submits synchronous atomic writes to file.
3  *
4  * Copyright (C) 2013 Fusion-io, Inc.
5  * Author: Santhosh Kumar Koundinya (skoundinya@fusionio.com).
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License as published by the Free
9  * Software Foundation; under version 2 of the License.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License version
14  * 2 for more details.
15  *
16  * You should have received a copy of the GNU General Public License Version 2
17  * along with this program; if not see <http://www.gnu.org/licenses/>
18  */
19 
20 #include <stdlib.h>
21 #include <stdint.h>
22 
23 #include "../fio.h"
24 
25 #include <nvm/nvm_primitives.h>
26 
27 #define NUM_ATOMIC_CAPABILITIES (5)
28 
29 struct fas_data {
30 	nvm_handle_t nvm_handle;
31 	size_t xfer_buf_align;
32 	size_t xfer_buflen_align;
33 	size_t xfer_buflen_max;
34 	size_t sector_size;
35 };
36 
queue(struct thread_data * td,struct io_u * io_u)37 static int queue(struct thread_data *td, struct io_u *io_u)
38 {
39 	struct fas_data *d = FILE_ENG_DATA(io_u->file);
40 	int rc;
41 
42 	if (io_u->ddir != DDIR_WRITE) {
43 		td_vmsg(td, EINVAL, "only writes supported", "io_u->ddir");
44 		rc = -EINVAL;
45 		goto out;
46 	}
47 
48 	if ((size_t) io_u->xfer_buf % d->xfer_buf_align) {
49 		td_vmsg(td, EINVAL, "unaligned data buffer", "io_u->xfer_buf");
50 		rc = -EINVAL;
51 		goto out;
52 	}
53 
54 	if (io_u->xfer_buflen % d->xfer_buflen_align) {
55 		td_vmsg(td, EINVAL, "unaligned data size", "io_u->xfer_buflen");
56 		rc = -EINVAL;
57 		goto out;
58 	}
59 
60 	if (io_u->xfer_buflen > d->xfer_buflen_max) {
61 		td_vmsg(td, EINVAL, "data too big", "io_u->xfer_buflen");
62 		rc = -EINVAL;
63 		goto out;
64 	}
65 
66 	rc = nvm_atomic_write(d->nvm_handle, (uint64_t) io_u->xfer_buf,
67 		io_u->xfer_buflen, io_u->offset / d->sector_size);
68 	if (rc == -1) {
69 		td_verror(td, errno, "nvm_atomic_write");
70 		rc = -errno;
71 		goto out;
72 	}
73 	rc = FIO_Q_COMPLETED;
74 out:
75 	if (rc < 0)
76 		io_u->error = -rc;
77 
78 	return rc;
79 }
80 
open_file(struct thread_data * td,struct fio_file * f)81 static int open_file(struct thread_data *td, struct fio_file *f)
82 {
83 	int rc;
84 	int fio_unused close_file_rc;
85 	struct fas_data *d;
86 	nvm_version_t nvm_version;
87 	nvm_capability_t nvm_capability[NUM_ATOMIC_CAPABILITIES];
88 
89 
90 	d = malloc(sizeof(*d));
91 	if (!d) {
92 		td_verror(td, ENOMEM, "malloc");
93 		rc = ENOMEM;
94 		goto error;
95 	}
96 	d->nvm_handle = -1;
97 	FILE_SET_ENG_DATA(f, d);
98 
99 	rc = generic_open_file(td, f);
100 
101 	if (rc)
102 		goto free_engine_data;
103 
104 	/* Set the version of the library as seen when engine is compiled */
105 	nvm_version.major = NVM_PRIMITIVES_API_MAJOR;
106 	nvm_version.minor = NVM_PRIMITIVES_API_MINOR;
107 	nvm_version.micro = NVM_PRIMITIVES_API_MICRO;
108 
109 	d->nvm_handle = nvm_get_handle(f->fd, &nvm_version);
110 	if (d->nvm_handle == -1) {
111 		td_vmsg(td, errno, "nvm_get_handle failed", "nvm_get_handle");
112 		rc = errno;
113 		goto close_file;
114 	}
115 
116 	nvm_capability[0].cap_id = NVM_CAP_ATOMIC_WRITE_START_ALIGN_ID;
117 	nvm_capability[1].cap_id = NVM_CAP_ATOMIC_WRITE_MULTIPLICITY_ID;
118 	nvm_capability[2].cap_id = NVM_CAP_ATOMIC_WRITE_MAX_VECTOR_SIZE_ID;
119 	nvm_capability[3].cap_id = NVM_CAP_SECTOR_SIZE_ID;
120 	nvm_capability[4].cap_id = NVM_CAP_ATOMIC_MAX_IOV_ID;
121 	rc = nvm_get_capabilities(d->nvm_handle, nvm_capability,
122                                   NUM_ATOMIC_CAPABILITIES, false);
123 	if (rc == -1) {
124 		td_vmsg(td, errno, "error in getting atomic write capabilities", "nvm_get_capabilities");
125 		rc = errno;
126 		goto close_file;
127 	} else if (rc < NUM_ATOMIC_CAPABILITIES) {
128 		td_vmsg(td, EINVAL, "couldn't get all the atomic write capabilities" , "nvm_get_capabilities");
129 		rc = ECANCELED;
130 		goto close_file;
131 	}
132 	/* Reset rc to 0 because we got all capabilities we needed */
133 	rc = 0;
134 	d->xfer_buf_align = nvm_capability[0].cap_value;
135 	d->xfer_buflen_align = nvm_capability[1].cap_value;
136 	d->xfer_buflen_max = d->xfer_buflen_align * nvm_capability[2].cap_value * nvm_capability[4].cap_value;
137 	d->sector_size = nvm_capability[3].cap_value;
138 
139 out:
140 	return rc;
141 close_file:
142 	close_file_rc = generic_close_file(td, f);
143 free_engine_data:
144 	free(d);
145 error:
146 	f->fd = -1;
147 	FILE_SET_ENG_DATA(f, NULL);
148 	goto out;
149 }
150 
close_file(struct thread_data * td,struct fio_file * f)151 static int close_file(struct thread_data *td, struct fio_file *f)
152 {
153 	struct fas_data *d = FILE_ENG_DATA(f);
154 
155 	if (d) {
156 		if (d->nvm_handle != -1)
157 			nvm_release_handle(d->nvm_handle);
158 		free(d);
159 		FILE_SET_ENG_DATA(f, NULL);
160 	}
161 
162 	return generic_close_file(td, f);
163 }
164 
165 static struct ioengine_ops ioengine = {
166 	.name = "fusion-aw-sync",
167 	.version = FIO_IOOPS_VERSION,
168 	.queue = queue,
169 	.open_file = open_file,
170 	.close_file = close_file,
171 	.get_file_size = generic_get_file_size,
172 	.flags = FIO_SYNCIO | FIO_RAWIO | FIO_MEMALIGN
173 };
174 
fio_fusion_aw_init(void)175 static void fio_init fio_fusion_aw_init(void)
176 {
177 	register_ioengine(&ioengine);
178 }
179 
fio_fusion_aw_exit(void)180 static void fio_exit fio_fusion_aw_exit(void)
181 {
182 	unregister_ioengine(&ioengine);
183 }
184