1 /*
2 * Memory helpers
3 */
4 #include <sys/types.h>
5 #include <sys/stat.h>
6 #include <fcntl.h>
7 #include <unistd.h>
8 #include <sys/mman.h>
9
10 #include "fio.h"
11 #ifndef FIO_NO_HAVE_SHM_H
12 #include <sys/shm.h>
13 #endif
14
fio_unpin_memory(struct thread_data * td)15 void fio_unpin_memory(struct thread_data *td)
16 {
17 if (td->pinned_mem) {
18 dprint(FD_MEM, "unpinning %llu bytes\n", td->o.lockmem);
19 if (munlock(td->pinned_mem, td->o.lockmem) < 0)
20 perror("munlock");
21 munmap(td->pinned_mem, td->o.lockmem);
22 td->pinned_mem = NULL;
23 }
24 }
25
fio_pin_memory(struct thread_data * td)26 int fio_pin_memory(struct thread_data *td)
27 {
28 unsigned long long phys_mem;
29
30 if (!td->o.lockmem)
31 return 0;
32
33 dprint(FD_MEM, "pinning %llu bytes\n", td->o.lockmem);
34
35 /*
36 * Don't allow mlock of more than real_mem-128MB
37 */
38 phys_mem = os_phys_mem();
39 if (phys_mem) {
40 if ((td->o.lockmem + 128 * 1024 * 1024) > phys_mem) {
41 td->o.lockmem = phys_mem - 128 * 1024 * 1024;
42 log_info("fio: limiting mlocked memory to %lluMB\n",
43 td->o.lockmem >> 20);
44 }
45 }
46
47 td->pinned_mem = mmap(NULL, td->o.lockmem, PROT_READ | PROT_WRITE,
48 MAP_PRIVATE | OS_MAP_ANON, -1, 0);
49 if (td->pinned_mem == MAP_FAILED) {
50 perror("malloc locked mem");
51 td->pinned_mem = NULL;
52 return 1;
53 }
54 if (mlock(td->pinned_mem, td->o.lockmem) < 0) {
55 perror("mlock");
56 munmap(td->pinned_mem, td->o.lockmem);
57 td->pinned_mem = NULL;
58 return 1;
59 }
60
61 return 0;
62 }
63
alloc_mem_shm(struct thread_data * td,unsigned int total_mem)64 static int alloc_mem_shm(struct thread_data *td, unsigned int total_mem)
65 {
66 #ifndef CONFIG_NO_SHM
67 int flags = IPC_CREAT | S_IRUSR | S_IWUSR;
68
69 if (td->o.mem_type == MEM_SHMHUGE) {
70 unsigned long mask = td->o.hugepage_size - 1;
71
72 flags |= SHM_HUGETLB;
73 total_mem = (total_mem + mask) & ~mask;
74 }
75
76 td->shm_id = shmget(IPC_PRIVATE, total_mem, flags);
77 dprint(FD_MEM, "shmget %u, %d\n", total_mem, td->shm_id);
78 if (td->shm_id < 0) {
79 td_verror(td, errno, "shmget");
80 if (geteuid() != 0 && (errno == ENOMEM || errno == EPERM))
81 log_err("fio: you may need to run this job as root\n");
82 if (td->o.mem_type == MEM_SHMHUGE) {
83 if (errno == EINVAL) {
84 log_err("fio: check that you have free huge"
85 " pages and that hugepage-size is"
86 " correct.\n");
87 } else if (errno == ENOSYS) {
88 log_err("fio: your system does not appear to"
89 " support huge pages.\n");
90 } else if (errno == ENOMEM) {
91 log_err("fio: no huge pages available, do you"
92 " need to alocate some? See HOWTO.\n");
93 }
94 }
95
96 return 1;
97 }
98
99 td->orig_buffer = shmat(td->shm_id, NULL, 0);
100 dprint(FD_MEM, "shmat %d, %p\n", td->shm_id, td->orig_buffer);
101 if (td->orig_buffer == (void *) -1) {
102 td_verror(td, errno, "shmat");
103 td->orig_buffer = NULL;
104 return 1;
105 }
106
107 return 0;
108 #else
109 log_err("fio: shm not supported\n");
110 return 1;
111 #endif
112 }
113
free_mem_shm(struct thread_data * td)114 static void free_mem_shm(struct thread_data *td)
115 {
116 #ifndef CONFIG_NO_SHM
117 struct shmid_ds sbuf;
118
119 dprint(FD_MEM, "shmdt/ctl %d %p\n", td->shm_id, td->orig_buffer);
120 shmdt(td->orig_buffer);
121 shmctl(td->shm_id, IPC_RMID, &sbuf);
122 #endif
123 }
124
alloc_mem_mmap(struct thread_data * td,size_t total_mem)125 static int alloc_mem_mmap(struct thread_data *td, size_t total_mem)
126 {
127 int flags = 0;
128
129 td->mmapfd = -1;
130
131 if (td->o.mem_type == MEM_MMAPHUGE) {
132 unsigned long mask = td->o.hugepage_size - 1;
133
134 /* TODO: make sure the file is a real hugetlbfs file */
135 if (!td->o.mmapfile)
136 flags |= MAP_HUGETLB;
137 total_mem = (total_mem + mask) & ~mask;
138 }
139
140 if (td->o.mmapfile) {
141 td->mmapfd = open(td->o.mmapfile, O_RDWR|O_CREAT, 0644);
142
143 if (td->mmapfd < 0) {
144 td_verror(td, errno, "open mmap file");
145 td->orig_buffer = NULL;
146 return 1;
147 }
148 if (td->o.mem_type != MEM_MMAPHUGE &&
149 ftruncate(td->mmapfd, total_mem) < 0) {
150 td_verror(td, errno, "truncate mmap file");
151 td->orig_buffer = NULL;
152 return 1;
153 }
154 if (td->o.mem_type == MEM_MMAPHUGE)
155 flags |= MAP_SHARED;
156 else
157 flags |= MAP_PRIVATE;
158 } else
159 flags |= OS_MAP_ANON | MAP_PRIVATE;
160
161 td->orig_buffer = mmap(NULL, total_mem, PROT_READ | PROT_WRITE, flags,
162 td->mmapfd, 0);
163 dprint(FD_MEM, "mmap %llu/%d %p\n", (unsigned long long) total_mem,
164 td->mmapfd, td->orig_buffer);
165 if (td->orig_buffer == MAP_FAILED) {
166 td_verror(td, errno, "mmap");
167 td->orig_buffer = NULL;
168 if (td->mmapfd != 1 && td->mmapfd != -1) {
169 close(td->mmapfd);
170 if (td->o.mmapfile)
171 unlink(td->o.mmapfile);
172 }
173
174 return 1;
175 }
176
177 return 0;
178 }
179
free_mem_mmap(struct thread_data * td,size_t total_mem)180 static void free_mem_mmap(struct thread_data *td, size_t total_mem)
181 {
182 dprint(FD_MEM, "munmap %llu %p\n", (unsigned long long) total_mem,
183 td->orig_buffer);
184 munmap(td->orig_buffer, td->orig_buffer_size);
185 if (td->o.mmapfile) {
186 if (td->mmapfd != -1)
187 close(td->mmapfd);
188 unlink(td->o.mmapfile);
189 free(td->o.mmapfile);
190 }
191 }
192
alloc_mem_malloc(struct thread_data * td,size_t total_mem)193 static int alloc_mem_malloc(struct thread_data *td, size_t total_mem)
194 {
195 td->orig_buffer = malloc(total_mem);
196 dprint(FD_MEM, "malloc %llu %p\n", (unsigned long long) total_mem,
197 td->orig_buffer);
198
199 return td->orig_buffer == NULL;
200 }
201
free_mem_malloc(struct thread_data * td)202 static void free_mem_malloc(struct thread_data *td)
203 {
204 dprint(FD_MEM, "free malloc mem %p\n", td->orig_buffer);
205 free(td->orig_buffer);
206 }
207
208 /*
209 * Set up the buffer area we need for io.
210 */
allocate_io_mem(struct thread_data * td)211 int allocate_io_mem(struct thread_data *td)
212 {
213 size_t total_mem;
214 int ret = 0;
215
216 if (td->io_ops->flags & FIO_NOIO)
217 return 0;
218
219 total_mem = td->orig_buffer_size;
220
221 if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
222 (td->io_ops->flags & FIO_MEMALIGN)) {
223 total_mem += page_mask;
224 if (td->o.mem_align && td->o.mem_align > page_size)
225 total_mem += td->o.mem_align - page_size;
226 }
227
228 dprint(FD_MEM, "Alloc %llu for buffers\n", (unsigned long long) total_mem);
229
230 if (td->o.mem_type == MEM_MALLOC)
231 ret = alloc_mem_malloc(td, total_mem);
232 else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE)
233 ret = alloc_mem_shm(td, total_mem);
234 else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE)
235 ret = alloc_mem_mmap(td, total_mem);
236 else {
237 log_err("fio: bad mem type: %d\n", td->o.mem_type);
238 ret = 1;
239 }
240
241 if (ret)
242 td_verror(td, ENOMEM, "iomem allocation");
243
244 return ret;
245 }
246
free_io_mem(struct thread_data * td)247 void free_io_mem(struct thread_data *td)
248 {
249 unsigned int total_mem;
250
251 total_mem = td->orig_buffer_size;
252 if (td->o.odirect || td->o.oatomic)
253 total_mem += page_mask;
254
255 if (td->o.mem_type == MEM_MALLOC)
256 free_mem_malloc(td);
257 else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE)
258 free_mem_shm(td);
259 else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE)
260 free_mem_mmap(td, total_mem);
261 else
262 log_err("Bad memory type %u\n", td->o.mem_type);
263
264 td->orig_buffer = NULL;
265 td->orig_buffer_size = 0;
266 }
267