1 /*
2 * Copyright (C) 2018 Samsung Electronics Co., Ltd.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <sys/mman.h>
17
18 #include <gtest/gtest.h>
19
20 #include <ion/ion.h>
21 #include <../include/exynos_ion.h>
22
23 #include "ion_test_fixture.h"
24
25 class Map : public IonAllHeapsTest {
26 };
27
TEST_F(Map,MapFd)28 TEST_F(Map, MapFd)
29 {
30 static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
31 struct ion_heap_data *heap = (struct ion_heap_data *)m_heap_query.heaps;
32
33 for (unsigned int i = 0; i < m_heap_query.cnt; i++) {
34 unsigned int heapMask = 1 << heap[i].heap_id;
35 for (size_t size : allocationSizes) {
36 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
37 SCOPED_TRACE(::testing::Message() << "size " << size);
38 int map_fd = -1;
39
40 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, 0, &map_fd));
41 ASSERT_GE(map_fd, 0);
42
43 void *ptr;
44 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
45 ASSERT_TRUE(ptr != NULL);
46
47 ASSERT_EQ(0, close(map_fd));
48
49 memset(ptr, 0xaa, size);
50
51 ASSERT_EQ(0, munmap(ptr, size));
52 }
53 }
54 }
55
TEST_F(Map,MapOffset)56 TEST_F(Map, MapOffset)
57 {
58 struct ion_heap_data *heap = (struct ion_heap_data *)m_heap_query.heaps;
59
60 for (unsigned int i = 0; i < m_heap_query.cnt; i++) {
61 unsigned int heapMask = 1 << heap[i].heap_id;
62 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
63 int map_fd = -1;
64
65 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, PAGE_SIZE * 2, 0, heapMask, 0, &map_fd));
66 ASSERT_GE(map_fd, 0);
67
68 unsigned char *ptr;
69 ptr = (unsigned char *)mmap(NULL, PAGE_SIZE * 2, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
70 ASSERT_TRUE(ptr != NULL);
71
72 memset(ptr, 0, PAGE_SIZE);
73 memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
74
75 ASSERT_EQ(0, munmap(ptr, PAGE_SIZE * 2));
76
77 ptr = (unsigned char *)mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, PAGE_SIZE);
78 ASSERT_TRUE(ptr != NULL);
79
80 ASSERT_EQ(ptr[0], 0xaa);
81 ASSERT_EQ(ptr[PAGE_SIZE - 1], 0xaa);
82
83 ASSERT_EQ(0, munmap(ptr, PAGE_SIZE));
84
85 ASSERT_EQ(0, close(map_fd));
86 }
87 }
88
TEST_F(Map,MapCached)89 TEST_F(Map, MapCached)
90 {
91 static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
92 struct ion_heap_data *heap = (struct ion_heap_data *)m_heap_query.heaps;
93
94 for (unsigned int i = 0; i < m_heap_query.cnt; i++) {
95 unsigned int heapMask = 1 << heap[i].heap_id;
96 for (size_t size : allocationSizes) {
97 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
98 SCOPED_TRACE(::testing::Message() << "size " << size);
99 int map_fd = -1;
100 unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
101
102 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, flags, &map_fd));
103 ASSERT_GE(map_fd, 0);
104
105 void *ptr;
106 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
107 ASSERT_TRUE(ptr != NULL);
108
109 ASSERT_EQ(0, close(map_fd));
110
111 memset(ptr, 0xaa, size);
112
113 ASSERT_EQ(0, munmap(ptr, size));
114 }
115 }
116 }
117
TEST_F(Map,NoZeroed)118 TEST_F(Map, NoZeroed)
119 {
120 struct ion_heap_data *heap = (struct ion_heap_data *)m_heap_query.heaps;
121
122 for (unsigned int i = 0; i < m_heap_query.cnt; i++) {
123 unsigned int heapMask = 1 << heap[i].heap_id;
124 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
125 int map_fd = -1;
126 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, ION_FLAG_NOZEROED, &map_fd));
127 ASSERT_GE(map_fd, 0);
128
129 void *ptr = NULL;
130 ptr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, map_fd, 0);
131 ASSERT_TRUE(ptr != NULL);
132
133 ASSERT_EQ(0, close(map_fd));
134 }
135 }
136
TEST_F(Map,Protected)137 TEST_F(Map, Protected)
138 {
139 /* TODO : add the secure Heaps */
140 static const unsigned int secureHeaps[] = {
141 EXYNOS_ION_HEAP_CRYPTO_MASK,
142 EXYNOS_ION_HEAP_VIDEO_STREAM_MASK,
143 EXYNOS_ION_HEAP_CAMERA,
144 };
145
146 for (unsigned int heapMask : secureHeaps) {
147 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
148 int map_fd = -1;
149 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, ION_FLAG_PROTECTED, &map_fd));
150 ASSERT_GE(map_fd, 0);
151
152 void *ptr = NULL;
153 ptr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, map_fd, 0);
154 ASSERT_TRUE(ptr != NULL);
155
156 ASSERT_EQ(0, close(map_fd));
157 }
158 }
159
TEST_F(Map,InvalidValues)160 TEST_F(Map, InvalidValues)
161 {
162 struct ion_heap_data *heap = (struct ion_heap_data *)m_heap_query.heaps;
163
164 for (unsigned int i = 0; i < m_heap_query.cnt; i++) {
165 unsigned int heapMask = 1 << heap[i].heap_id;
166 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
167 int map_fd = -1;
168 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, 0, &map_fd));
169 ASSERT_GE(map_fd, 0);
170
171 void *ptr = NULL;
172
173 /* bad size */
174 ptr = mmap(NULL, 0, PROT_READ, MAP_SHARED, map_fd, 0);
175 ASSERT_TRUE(ptr != NULL);
176 /* bad prot */
177 ptr = mmap(NULL, 4096, -1, MAP_SHARED, map_fd, 0);
178 ASSERT_TRUE(ptr != NULL);
179 /* bad offset */
180 ptr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, map_fd, -1);
181 ASSERT_TRUE(ptr != NULL);
182 /* null map fd */
183 ptr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, 0, 0);
184 ASSERT_TRUE(ptr != NULL);
185
186 ASSERT_EQ(0, close(map_fd));
187 }
188 }
189