1 /*
2 * Copyright (C) 2018 Samsung Electronics Co., Ltd.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <iostream>
17 #include <cerrno>
18 #include <cstring>
19
20 #include <unistd.h>
21 #include <fcntl.h>
22 #include <sys/mman.h>
23 #include <sys/ioctl.h>
24
25 #include "ion_test_fixture.h"
26 #include "ion_test_define.h"
27
28 #define TEST_ALLOC_CACHED 1
29 #define TEST_ALLOC_BUDDY 2
30
31 using namespace std;
32
33 class Allocate : public IonAllocTest {
34 protected:
35 struct test_type_struct {
36 int type_flags;
37 const char *type_title;
38 };
checkZero(int fd,size_t size,unsigned long * val)39 off_t checkZero(int fd, size_t size, unsigned long *val) {
40 unsigned long *p = reinterpret_cast<unsigned long *>(mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0));
41 if (p == MAP_FAILED)
42 return -1;
43
44 off_t idx;
45 for (idx = 0; idx < static_cast<off_t>(size / sizeof(*p)); idx++) {
46 if (p[idx] != 0) {
47 if (val)
48 *val = p[idx];
49 break;
50 }
51 }
52
53 munmap(p, size);
54
55 return idx * sizeof(*p);
56 }
57
flushShrinker()58 void flushShrinker() {
59 int fd = open("/sys/kernel/debug/ion_system_heap_shrink", O_RDWR);
60 if (fd < 0)
61 return;
62
63 unsigned long val = mb(256); // This is very big enough to flush shrinker
64 if (write(fd, &val, sizeof(val)) < 0)
65 FAIL() << "Failed to write " << val << " to 'ion_system_heap_shrink': " << strerror(errno);
66 if (read(fd, &val, sizeof(val)) < 0)
67 FAIL() << "Failed to read from 'ion_system_heap_shrink': " << strerror(errno);
68 if (val > 0)
69 FAIL() << "ion_system_heap_shrink still has value " << val;
70 close(fd);
71 }
72 };
73
TEST_F(Allocate,Allocate)74 TEST_F(Allocate, Allocate)
75 {
76 static const size_t allocation_sizes[] = {
77 mkb(16, 716), mkb(12, 4), mkb(8, 912), mkb(4, 60), mkb(2, 520), mkb(1, 92),
78 mb(16), mb(12), mb(8), mb(4), mb(2), mb(1), kb(64), kb(4),
79 };
80 static const test_type_struct test_types[] = {
81 {0, "uncached"},
82 {TEST_ALLOC_CACHED, "cached"},
83 {TEST_ALLOC_BUDDY, "uncached|flush_pool"},
84 {TEST_ALLOC_CACHED | TEST_ALLOC_BUDDY, "cached|flush_pool"},
85 };
86
87 for (test_type_struct type: test_types) {
88 for (unsigned int i = 0; i < getHeapCount(); i++) {
89 if ((type.type_flags & TEST_ALLOC_BUDDY) && !(getHeapFlags(i) & ION_HEAPDATA_FLAGS_DEFER_FREE))
90 continue;
91
92 if (getCmaUsed(getHeapName(i)) > 0)
93 continue;
94
95 for (size_t size : allocation_sizes) {
96 if (size > getHeapSize(i))
97 continue;
98
99 ion_allocation_data_modern data;
100 int ret;
101
102 data.len = size;
103 data.heap_id_mask = getHeapMask(i);
104 data.flags = (type.type_flags & TEST_ALLOC_CACHED) ? ION_FLAG_CACHED : 0;
105 data.fd = 0;
106
107 SCOPED_TRACE(::testing::Message() << "heap: " << getHeapName(i) << ", heapmask: " << getHeapMask(i));
108 SCOPED_TRACE(::testing::Message() << "size: " << size << ", flags: " << data.flags);
109 SCOPED_TRACE(::testing::Message() << "test type: " << type.type_title);
110
111 if (type.type_flags & TEST_ALLOC_BUDDY)
112 flushShrinker();
113
114 EXPECT_EQ(0, ret = ioctl(getIonFd(), ION_IOC_ALLOC_MODERN, &data)) << ": " << strerror(errno);
115
116 EXPECT_LT(2U, data.fd);
117 EXPECT_GT(1024U, data.fd);
118 if (ret == 0) {
119 if (!(getHeapFlags(i) & ION_HEAPDATA_FLAGS_UNTOUCHABLE)) {
120 off_t erridx;
121 unsigned long val = 0;
122 EXPECT_EQ(static_cast<off_t>(size), erridx = checkZero(data.fd, size, &val))
123 << "non-zero " << val << " found at " << erridx << " byte";
124 }
125 EXPECT_EQ(0, close(data.fd));
126 }
127 }
128 }
129 }
130 }
131
TEST_F(Allocate,Large)132 TEST_F(Allocate, Large)
133 {
134 static const test_type_struct test_types[] = {
135 {0, "uncached"},
136 {TEST_ALLOC_CACHED, "cached"},
137 {TEST_ALLOC_BUDDY, "uncached|flush_pool"},
138 {TEST_ALLOC_CACHED | TEST_ALLOC_BUDDY, "cached|flush_pool"},
139 };
140
141 for (test_type_struct type: test_types) {
142 for (unsigned int i = 0; i < getHeapCount(); i++) {
143 if ((type.type_flags & TEST_ALLOC_BUDDY) && !(getHeapFlags(i) & ION_HEAPDATA_FLAGS_DEFER_FREE))
144 continue;
145
146 if (getCmaUsed(getHeapName(i)) > 0)
147 continue;
148
149 __u64 size = 0;
150 switch(getHeapType(i)) {
151 case ION_HEAP_TYPE_SYSTEM:
152 size = getMemTotal() / 2 - 4096;
153 break;
154 case ION_HEAP_TYPE_CARVEOUT:
155 case ION_HEAP_TYPE_DMA:
156 size = getHeapSize(i);
157 break;
158 default:
159 continue;
160 }
161
162 ion_allocation_data_modern data;
163 int ret;
164
165 data.len = size;
166 data.heap_id_mask = getHeapMask(i);
167 data.flags = (type.type_flags & TEST_ALLOC_CACHED) ? ION_FLAG_CACHED : 0;
168 data.fd = 0;
169
170 SCOPED_TRACE(::testing::Message() << "heap: " << getHeapName(i) << ", heapmask: " << getHeapMask(i));
171 SCOPED_TRACE(::testing::Message() << "size: " << size << ", flags: " << data.flags);
172 SCOPED_TRACE(::testing::Message() << "test type: " << type.type_title);
173
174 if (type.type_flags & TEST_ALLOC_BUDDY)
175 flushShrinker();
176
177 EXPECT_EQ(0, ret = ioctl(getIonFd(), ION_IOC_ALLOC_MODERN, &data)) << ": " << strerror(errno);
178
179 EXPECT_LT(2U, data.fd);
180 EXPECT_GT(1024U, data.fd);
181 if (ret == 0)
182 EXPECT_EQ(0, close(data.fd));
183 }
184 }
185 }
186
TEST_F(Allocate,Nozeroed)187 TEST_F(Allocate, Nozeroed)
188 {
189 static const size_t allocation_sizes[] = { mkb(4, 60), mkb(2, 520), mkb(1, 92) };
190 static const unsigned long allocation_flags[] = { 0, ION_FLAG_CACHED, ION_FLAG_SYNC_FORCE };
191
192 for (unsigned int i = 0; i < getHeapCount(); i++) {
193 if (getCmaUsed(getHeapName(i)) > 0)
194 continue;
195
196 for (unsigned long flag : allocation_flags) {
197 for (size_t size : allocation_sizes) {
198 if (size > getHeapSize(i))
199 continue;
200
201 ion_allocation_data_modern data;
202 int ret;
203
204 data.len = size;
205 data.heap_id_mask = getHeapMask(i);
206 data.flags = ION_FLAG_NOZEROED | flag;
207 data.fd = 0;
208
209 SCOPED_TRACE(::testing::Message() << "heap " << getHeapName(i) << " mask " << getHeapMask(i));
210 SCOPED_TRACE(::testing::Message() << "size " << size << ", flags " << data.flags);
211
212 flushShrinker();
213
214 EXPECT_EQ(0, ret = ioctl(getIonFd(), ION_IOC_ALLOC_MODERN, &data)) << ": " << strerror(errno);
215
216 EXPECT_LT(2U, data.fd);
217 EXPECT_GT(1024U, data.fd);
218 if (ret == 0) {
219 void *p;
220
221 EXPECT_EQ(MAP_FAILED, p = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, data.fd, 0));
222 EXPECT_EQ(EACCES, errno);
223 if (p != MAP_FAILED)
224 munmap(p, size);
225 EXPECT_EQ(0, close(data.fd));
226 }
227 }
228 }
229 }
230 }
231
TEST_F(Allocate,Protected)232 TEST_F(Allocate, Protected)
233 {
234 static const size_t allocation_sizes[] = {kb(4), kb(64), mb(1), mb(2)};
235 static const unsigned long allocation_flags[] = { 0, ION_FLAG_CACHED, ION_FLAG_SYNC_FORCE };
236
237 for (unsigned int i = 0; i < getHeapCount(); i++) {
238 if (getCmaUsed(getHeapName(i)) > 0)
239 continue;
240
241 for (unsigned long flags: allocation_flags) {
242 for (size_t size : allocation_sizes) {
243 if (size > getHeapSize(i))
244 continue;
245
246 ion_allocation_data_modern data;
247 int ret;
248
249 data.len = size;
250 data.heap_id_mask = getHeapMask(i);
251 data.flags = ION_FLAG_PROTECTED | flags;
252 data.fd = 0;
253
254 SCOPED_TRACE(::testing::Message() << "heap: " << getHeapName(i) << ", heapmask: " << getHeapMask(i));
255 SCOPED_TRACE(::testing::Message() << "size: " << size << ", flags: " << data.flags);
256
257 EXPECT_EQ(0, ret = ioctl(getIonFd(), ION_IOC_ALLOC_MODERN, &data)) << ": " << strerror(errno);
258
259 EXPECT_LT(2U, data.fd);
260 EXPECT_GT(1024U, data.fd);
261 if (ret == 0) {
262 void *p;
263
264 EXPECT_EQ(MAP_FAILED, p = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, data.fd, 0));
265 EXPECT_EQ(EACCES, errno);
266
267 if (p != MAP_FAILED)
268 munmap(p, size);
269
270 EXPECT_EQ(0, close(data.fd));
271 }
272 }
273 }
274 }
275 }
276
TEST_F(Allocate,InvalidValues)277 TEST_F(Allocate, InvalidValues)
278 {
279 ion_allocation_data_modern data;
280 int ret;
281
282 data.len = kb(4);
283 data.heap_id_mask = 1; // any first heap
284 data.flags = 0;
285 data.fd = 0;
286
287 // incorrect /dev/ion fd
288 EXPECT_EQ(-1, ret = ioctl(0, ION_IOC_ALLOC_MODERN, &data));
289 EXPECT_TRUE(errno == EINVAL || errno == ENOTTY) << " unexpected error: " << strerror(errno);
290
291 // invalid fd
292 EXPECT_EQ(-1, ret = ioctl(-1, ION_IOC_ALLOC_MODERN, &data));
293 EXPECT_EQ(EBADF, errno) << " unexpected error: " << strerror(errno);
294
295 // invalid heap id
296 data.heap_id_mask = 0;
297 EXPECT_EQ(-1, ret = ioctl(getIonFd(), ION_IOC_ALLOC_MODERN, &data));
298 EXPECT_EQ(ENODEV, errno) << " unexpected error: " << strerror(errno);
299
300 // unavailable heap id (the largest heap id + 1)
301 data.heap_id_mask = 1 << (getMaxHeapId() + 1);
302 EXPECT_EQ(-1, ret = ioctl(getIonFd(), ION_IOC_ALLOC_MODERN, &data))
303 << "unexpected success with heapmask " << data.heap_id_mask;
304 EXPECT_EQ(ENODEV, errno) << " unexpected error: " << strerror(errno);
305
306 // zero size
307 data.len = 0;
308 EXPECT_EQ(-1, ret = ioctl(getIonFd(), ION_IOC_ALLOC_MODERN, &data));
309 EXPECT_EQ(EINVAL, errno) << " unexpected error: " << strerror(errno);
310
311 // too large size
312 for (unsigned int i = 0; i < getHeapCount(); i++) {
313 data.heap_id_mask = getHeapMask(i);
314
315 switch(getHeapType(i)) {
316 case ION_HEAP_TYPE_SYSTEM:
317 data.len = kb(getMemTotal() / 2) + kb(4);
318 break;
319 case ION_HEAP_TYPE_CARVEOUT:
320 case ION_HEAP_TYPE_DMA:
321 data.len = getHeapSize(i) + 1;
322 break;
323 case ION_HEAP_TYPE_HPA:
324 data.len = ((sysconf(_SC_PAGESIZE) * 2) / 8) * (64 * 1024) + 1; // see hpa heap of ION driver
325 break;
326 default:
327 continue;
328 }
329
330 SCOPED_TRACE(::testing::Message() << "heap: " << getHeapName(i) << ", heapmask: " << getHeapMask(i));
331 SCOPED_TRACE(::testing::Message() << "size: " << data.len << ", flags: " << data.flags);
332
333 EXPECT_EQ(-1, ret = ioctl(getIonFd(), ION_IOC_ALLOC_MODERN, &data));
334 EXPECT_EQ(ENOMEM, errno) << " unexpected error: " << strerror(errno);
335 }
336 }
337