1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <fcntl.h>
18 #include <sys/mman.h>
19 #include <sys/stat.h>
20 #include <sys/types.h>
21
22 #include <linux/ion_test.h>
23
24 #include <gtest/gtest.h>
25
26 #include <ion/ion.h>
27
28 #include "ion_test_fixture.h"
29
30 #define ALIGN(x,y) (((x) + ((y) - 1)) & ~((y) - 1))
31
32 class Device : public IonAllHeapsTest {
33 public:
34 virtual void SetUp();
35 virtual void TearDown();
36 int m_deviceFd;
37 void readDMA(int fd, void *buf, size_t size);
38 void writeDMA(int fd, void *buf, size_t size);
39 void readKernel(int fd, void *buf, size_t size);
40 void writeKernel(int fd, void *buf, size_t size);
41 void blowCache();
42 void dirtyCache(void *ptr, size_t size);
43 };
44
SetUp()45 void Device::SetUp()
46 {
47 IonAllHeapsTest::SetUp();
48 m_deviceFd = open("/dev/ion-test", O_RDWR);
49 ASSERT_GE(m_deviceFd, 0);
50 }
51
TearDown()52 void Device::TearDown()
53 {
54 ASSERT_EQ(0, close(m_deviceFd));
55 IonAllHeapsTest::TearDown();
56 }
57
readDMA(int fd,void * buf,size_t size)58 void Device::readDMA(int fd, void *buf, size_t size)
59 {
60 ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
61 struct ion_test_rw_data ion_test_rw_data = {
62 .ptr = (uint64_t)buf,
63 .offset = 0,
64 .size = size,
65 .write = 0,
66 };
67
68 ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_DMA_MAPPING, &ion_test_rw_data));
69 ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
70 }
71
writeDMA(int fd,void * buf,size_t size)72 void Device::writeDMA(int fd, void *buf, size_t size)
73 {
74 ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
75 struct ion_test_rw_data ion_test_rw_data = {
76 .ptr = (uint64_t)buf,
77 .offset = 0,
78 .size = size,
79 .write = 1,
80 };
81
82 ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_DMA_MAPPING, &ion_test_rw_data));
83 ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
84 }
85
readKernel(int fd,void * buf,size_t size)86 void Device::readKernel(int fd, void *buf, size_t size)
87 {
88 ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
89 struct ion_test_rw_data ion_test_rw_data = {
90 .ptr = (uint64_t)buf,
91 .offset = 0,
92 .size = size,
93 .write = 0,
94 };
95
96 ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_KERNEL_MAPPING, &ion_test_rw_data));
97 ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
98 }
99
writeKernel(int fd,void * buf,size_t size)100 void Device::writeKernel(int fd, void *buf, size_t size)
101 {
102 ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
103 struct ion_test_rw_data ion_test_rw_data = {
104 .ptr = (uint64_t)buf,
105 .offset = 0,
106 .size = size,
107 .write = 1,
108 };
109
110 ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_KERNEL_MAPPING, &ion_test_rw_data));
111 ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
112 }
113
blowCache()114 void Device::blowCache()
115 {
116 const size_t bigger_than_cache = 8*1024*1024;
117 void *buf1 = malloc(bigger_than_cache);
118 void *buf2 = malloc(bigger_than_cache);
119 memset(buf1, 0xaa, bigger_than_cache);
120 memcpy(buf2, buf1, bigger_than_cache);
121 free(buf1);
122 free(buf2);
123 }
124
dirtyCache(void * ptr,size_t size)125 void Device::dirtyCache(void *ptr, size_t size)
126 {
127 /* try to dirty cache lines */
128 for (size_t i = size-1; i > 0; i--) {
129 ((volatile char *)ptr)[i];
130 ((char *)ptr)[i] = i;
131 }
132 }
133
TEST_F(Device,KernelReadCached)134 TEST_F(Device, KernelReadCached)
135 {
136 void *alloc = malloc(8192 + 1024);
137 void *buf = (void *)(ALIGN((unsigned long)alloc, 4096) + 1024);
138
139 for (unsigned int heapMask : m_allHeaps) {
140 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
141 int map_fd = -1;
142 unsigned int flags = ION_FLAG_CACHED;
143
144 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
145 ASSERT_GE(map_fd, 0);
146
147 void *ptr;
148 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
149 ASSERT_TRUE(ptr != NULL);
150
151 for (int i = 0; i < 4096; i++)
152 ((char *)ptr)[i] = i;
153
154 ((char*)buf)[4096] = 0x12;
155 readKernel(map_fd, buf, 4096);
156 ASSERT_EQ(((char*)buf)[4096], 0x12);
157
158 for (int i = 0; i < 4096; i++)
159 ASSERT_EQ((char)i, ((char *)buf)[i]);
160
161 ASSERT_EQ(0, munmap(ptr, 4096));
162 ASSERT_EQ(0, close(map_fd));
163 }
164
165 free(alloc);
166 }
167
TEST_F(Device,KernelWriteCached)168 TEST_F(Device, KernelWriteCached)
169 {
170 void *alloc = malloc(8192 + 1024);
171 void *buf = (void *)(ALIGN((unsigned long)alloc, 4096) + 1024);
172
173 for (int i = 0; i < 4096; i++)
174 ((char *)buf)[i] = i;
175
176 for (unsigned int heapMask : m_allHeaps) {
177 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
178 int map_fd = -1;
179 unsigned int flags = ION_FLAG_CACHED;
180
181 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
182 ASSERT_GE(map_fd, 0);
183
184 void *ptr;
185 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
186 ASSERT_TRUE(ptr != NULL);
187
188 dirtyCache(ptr, 4096);
189
190 writeKernel(map_fd, buf, 4096);
191
192 for (int i = 0; i < 4096; i++)
193 ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
194
195 ASSERT_EQ(0, munmap(ptr, 4096));
196 ASSERT_EQ(0, close(map_fd));
197 }
198
199 free(alloc);
200 }
201
TEST_F(Device,DMAReadCached)202 TEST_F(Device, DMAReadCached)
203 {
204 void *alloc = malloc(8192 + 1024);
205 void *buf = (void *)(ALIGN((unsigned long)alloc, 4096) + 1024);
206
207 for (unsigned int heapMask : m_allHeaps) {
208 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
209 int map_fd = -1;
210 unsigned int flags = ION_FLAG_CACHED;
211
212 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
213 ASSERT_GE(map_fd, 0);
214
215 void *ptr;
216 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
217 ASSERT_TRUE(ptr != NULL);
218
219 for (int i = 0; i < 4096; i++)
220 ((char *)ptr)[i] = i;
221
222 readDMA(map_fd, buf, 4096);
223
224 for (int i = 0; i < 4096; i++)
225 ASSERT_EQ((char)i, ((char *)buf)[i]);
226
227 ASSERT_EQ(0, munmap(ptr, 4096));
228 ASSERT_EQ(0, close(map_fd));
229 }
230
231 free(alloc);
232 }
233
TEST_F(Device,DMAWriteCached)234 TEST_F(Device, DMAWriteCached)
235 {
236 void *alloc = malloc(8192 + 1024);
237 void *buf = (void *)(ALIGN((unsigned long)alloc, 4096) + 1024);
238
239 for (int i = 0; i < 4096; i++)
240 ((char *)buf)[i] = i;
241
242 for (unsigned int heapMask : m_allHeaps) {
243 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
244 int map_fd = -1;
245 unsigned int flags = ION_FLAG_CACHED;
246
247 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
248 ASSERT_GE(map_fd, 0);
249
250 void *ptr;
251 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
252 ASSERT_TRUE(ptr != NULL);
253
254 dirtyCache(ptr, 4096);
255
256 writeDMA(map_fd, buf, 4096);
257
258 for (int i = 0; i < 4096; i++)
259 ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
260
261 ASSERT_EQ(0, munmap(ptr, 4096));
262 ASSERT_EQ(0, close(map_fd));
263 }
264
265 free(alloc);
266 }
267
TEST_F(Device,KernelReadCachedNeedsSync)268 TEST_F(Device, KernelReadCachedNeedsSync)
269 {
270 void *alloc = malloc(8192 + 1024);
271 void *buf = (void *)(ALIGN((unsigned long)alloc, 4096) + 1024);
272
273 for (unsigned int heapMask : m_allHeaps) {
274 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
275 int map_fd = -1;
276 unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
277
278 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
279 ASSERT_GE(map_fd, 0);
280
281 void *ptr;
282 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
283 ASSERT_TRUE(ptr != NULL);
284
285 for (int i = 0; i < 4096; i++)
286 ((char *)ptr)[i] = i;
287
288 ((char*)buf)[4096] = 0x12;
289 readKernel(map_fd, buf, 4096);
290 ASSERT_EQ(((char*)buf)[4096], 0x12);
291
292 for (int i = 0; i < 4096; i++)
293 ASSERT_EQ((char)i, ((char *)buf)[i]);
294
295 ASSERT_EQ(0, munmap(ptr, 4096));
296 ASSERT_EQ(0, close(map_fd));
297 }
298
299 free(alloc);
300 }
301
TEST_F(Device,KernelWriteCachedNeedsSync)302 TEST_F(Device, KernelWriteCachedNeedsSync)
303 {
304 void *alloc = malloc(8192 + 1024);
305 void *buf = (void *)(ALIGN((unsigned long)alloc, 4096) + 1024);
306
307 for (int i = 0; i < 4096; i++)
308 ((char *)buf)[i] = i;
309
310 for (unsigned int heapMask : m_allHeaps) {
311 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
312 int map_fd = -1;
313 unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
314
315 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
316 ASSERT_GE(map_fd, 0);
317
318 void *ptr;
319 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
320 ASSERT_TRUE(ptr != NULL);
321
322 dirtyCache(ptr, 4096);
323
324 writeKernel(map_fd, buf, 4096);
325
326 for (int i = 0; i < 4096; i++)
327 ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
328
329 ASSERT_EQ(0, munmap(ptr, 4096));
330 ASSERT_EQ(0, close(map_fd));
331 }
332
333 free(alloc);
334 }
335
TEST_F(Device,DMAReadCachedNeedsSync)336 TEST_F(Device, DMAReadCachedNeedsSync)
337 {
338 void *alloc = malloc(8192 + 1024);
339 void *buf = (void *)(ALIGN((unsigned long)alloc, 4096) + 1024);
340
341 for (unsigned int heapMask : m_allHeaps) {
342 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
343 int map_fd = -1;
344 unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
345
346 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
347 ASSERT_GE(map_fd, 0);
348
349 void *ptr;
350 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
351 ASSERT_TRUE(ptr != NULL);
352
353 for (int i = 0; i < 4096; i++)
354 ((char *)ptr)[i] = i;
355
356 ion_sync_fd(m_ionFd, map_fd);
357
358 readDMA(map_fd, buf, 4096);
359
360 for (int i = 0; i < 4096; i++)
361 ASSERT_EQ((char)i, ((char *)buf)[i]);
362
363 ASSERT_EQ(0, munmap(ptr, 4096));
364 ASSERT_EQ(0, close(map_fd));
365 }
366
367 free(alloc);
368 }
369
TEST_F(Device,DMAWriteCachedNeedsSync)370 TEST_F(Device, DMAWriteCachedNeedsSync)
371 {
372 void *alloc = malloc(8192 + 1024);
373 void *buf = (void *)(ALIGN((unsigned long)alloc, 4096) + 1024);
374
375 for (int i = 0; i < 4096; i++)
376 ((char *)buf)[i] = i;
377
378 for (unsigned int heapMask : m_allHeaps) {
379 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
380 int map_fd = -1;
381 unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
382
383 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
384 ASSERT_GE(map_fd, 0);
385
386 void *ptr;
387 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
388 ASSERT_TRUE(ptr != NULL);
389
390 dirtyCache(ptr, 4096);
391
392 writeDMA(map_fd, buf, 4096);
393
394 ion_sync_fd(m_ionFd, map_fd);
395
396 for (int i = 0; i < 4096; i++)
397 ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
398
399 ASSERT_EQ(0, munmap(ptr, 4096));
400 ASSERT_EQ(0, close(map_fd));
401 }
402
403 free(alloc);
404 }
TEST_F(Device,KernelRead)405 TEST_F(Device, KernelRead)
406 {
407 void *alloc = malloc(8192 + 1024);
408 void *buf = (void *)(ALIGN((unsigned long)alloc, 4096) + 1024);
409
410 for (unsigned int heapMask : m_allHeaps) {
411 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
412 int map_fd = -1;
413 unsigned int flags = 0;
414
415 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
416 ASSERT_GE(map_fd, 0);
417
418 void *ptr;
419 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
420 ASSERT_TRUE(ptr != NULL);
421
422 for (int i = 0; i < 4096; i++)
423 ((char *)ptr)[i] = i;
424
425 ((char*)buf)[4096] = 0x12;
426 readKernel(map_fd, buf, 4096);
427 ASSERT_EQ(((char*)buf)[4096], 0x12);
428
429 for (int i = 0; i < 4096; i++)
430 ASSERT_EQ((char)i, ((char *)buf)[i]);
431
432 ASSERT_EQ(0, munmap(ptr, 4096));
433 ASSERT_EQ(0, close(map_fd));
434 }
435
436 free(alloc);
437 }
438
TEST_F(Device,KernelWrite)439 TEST_F(Device, KernelWrite)
440 {
441 void *alloc = malloc(8192 + 1024);
442 void *buf = (void *)(ALIGN((unsigned long)alloc, 4096) + 1024);
443
444 for (int i = 0; i < 4096; i++)
445 ((char *)buf)[i] = i;
446
447 for (unsigned int heapMask : m_allHeaps) {
448 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
449 int map_fd = -1;
450 unsigned int flags = 0;
451
452 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
453 ASSERT_GE(map_fd, 0);
454
455 void *ptr;
456 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
457 ASSERT_TRUE(ptr != NULL);
458
459 dirtyCache(ptr, 4096);
460
461 writeKernel(map_fd, buf, 4096);
462
463 for (int i = 0; i < 4096; i++)
464 ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
465
466 ASSERT_EQ(0, munmap(ptr, 4096));
467 ASSERT_EQ(0, close(map_fd));
468 }
469
470 free(alloc);
471 }
472
TEST_F(Device,DMARead)473 TEST_F(Device, DMARead)
474 {
475 void *alloc = malloc(8192 + 1024);
476 void *buf = (void *)(ALIGN((unsigned long)alloc, 4096) + 1024);
477
478 for (unsigned int heapMask : m_allHeaps) {
479 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
480 int map_fd = -1;
481 unsigned int flags = 0;
482
483 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
484 ASSERT_GE(map_fd, 0);
485
486 void *ptr;
487 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
488 ASSERT_TRUE(ptr != NULL);
489
490 for (int i = 0; i < 4096; i++)
491 ((char *)ptr)[i] = i;
492
493 readDMA(map_fd, buf, 4096);
494
495 for (int i = 0; i < 4096; i++)
496 ASSERT_EQ((char)i, ((char *)buf)[i]);
497
498 ASSERT_EQ(0, munmap(ptr, 4096));
499 ASSERT_EQ(0, close(map_fd));
500 }
501
502 free(alloc);
503 }
504
TEST_F(Device,DMAWrite)505 TEST_F(Device, DMAWrite)
506 {
507 void *alloc = malloc(8192 + 1024);
508 void *buf = (void *)(ALIGN((unsigned long)alloc, 4096) + 1024);
509
510 for (int i = 0; i < 4096; i++)
511 ((char *)buf)[i] = i;
512
513 for (unsigned int heapMask : m_allHeaps) {
514 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
515 int map_fd = -1;
516 unsigned int flags = 0;
517
518 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
519 ASSERT_GE(map_fd, 0);
520
521 void *ptr;
522 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
523 ASSERT_TRUE(ptr != NULL);
524
525 dirtyCache(ptr, 4096);
526
527 writeDMA(map_fd, buf, 4096);
528
529 for (int i = 0; i < 4096; i++)
530 ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
531
532 ASSERT_EQ(0, munmap(ptr, 4096));
533 ASSERT_EQ(0, close(map_fd));
534 }
535
536 free(alloc);
537 }
538
TEST_F(Device,IsCached)539 TEST_F(Device, IsCached)
540 {
541 void *buf = malloc(4096);
542
543 for (unsigned int heapMask : m_allHeaps) {
544 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
545 int map_fd = -1;
546 unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
547
548 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
549 ASSERT_GE(map_fd, 0);
550
551 void *ptr;
552 ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
553 ASSERT_TRUE(ptr != NULL);
554
555 dirtyCache(ptr, 4096);
556
557 readDMA(map_fd, buf, 4096);
558
559 bool same = true;
560 for (int i = 4096-16; i >= 0; i -= 16)
561 if (((char *)buf)[i] != i)
562 same = false;
563 ASSERT_FALSE(same);
564
565 ASSERT_EQ(0, munmap(ptr, 4096));
566 ASSERT_EQ(0, close(map_fd));
567 }
568 }
569