1 /*
2 * Copyright (C) 2016-2019 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include <string.h>
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <pthread.h>
23 #include <stdlib.h>
24 #include <limits.h>
25
26 #include <log/log.h>
27 #include <cutils/atomic.h>
28
29 #include <sys/ioctl.h>
30
31 #include <hardware/hardware.h>
32
33 #if GRALLOC_VERSION_MAJOR == 1
34 #include <hardware/gralloc1.h>
35 #elif GRALLOC_VERSION_MAJOR == 0
36 #include <hardware/gralloc.h>
37 #endif
38
39 #include <BufferAllocator/BufferAllocator.h>
40 #include "mali_gralloc_module.h"
41 #include "mali_gralloc_private_interface_types.h"
42 #include "mali_gralloc_buffer.h"
43 #include "gralloc_helper.h"
44 #include "framebuffer_device.h"
45 #include "mali_gralloc_formats.h"
46 #include "mali_gralloc_usages.h"
47 #include "mali_gralloc_bufferdescriptor.h"
48 #include "mali_gralloc_bufferallocation.h"
49
50 #include <hardware/exynos/ion.h>
51 #include <hardware/exynos/dmabuf_container.h>
52 #include <linux/ion.h>
53
54 #include <array>
55
56 #define INIT_ZERO(obj) (memset(&(obj), 0, sizeof((obj))))
57
58 #define HEAP_MASK_FROM_ID(id) (1 << id)
59 #define HEAP_MASK_FROM_TYPE(type) (1 << type)
60
61 static const enum ion_heap_type ION_HEAP_TYPE_INVALID = ((enum ion_heap_type)~0);
62 static const enum ion_heap_type ION_HEAP_TYPE_SECURE = (enum ion_heap_type)(((unsigned int)ION_HEAP_TYPE_CUSTOM) + 1);
63
64 #if defined(ION_HEAP_SECURE_MASK)
65 #if (HEAP_MASK_FROM_TYPE(ION_HEAP_TYPE_SECURE) != ION_HEAP_SECURE_MASK)
66 #error "ION_HEAP_TYPE_SECURE value is not compatible with ION_HEAP_SECURE_MASK"
67 #endif
68 #endif
69
70 static std::unique_ptr<BufferAllocator> buffer_allocator;
71 static int ion_client = -1;
72 static bool secure_heap_exists = true;
73
74 static const char kDmabufSensorDirectHeapName[] = "sensor_direct_heap";
75 static const char kDmabufFaceauthTpuHeapName[] = "faceauth_tpu-secure";
76 static const char kDmabufFaceauthImgHeapName[] = "faimg-secure";
77 static const char kDmabufFaceauthRawImgHeapName[] = "farawimg-secure";
78 static const char kDmabufFaceauthPrevHeapName[] = "faprev-secure";
79 static const char kDmabufFaceauthModelHeapName[] = "famodel-secure";
80 static const char kDmabufVframeSecureHeapName[] = "vframe-secure";
81 static const char kDmabufVstreamSecureHeapName[] = "vstream-secure";
82
set_ion_flags(enum ion_heap_type heap_type,uint64_t usage,unsigned int * priv_heap_flag,unsigned int * ion_flags)83 static void set_ion_flags(enum ion_heap_type heap_type, uint64_t usage,
84 unsigned int *priv_heap_flag, unsigned int *ion_flags)
85 {
86 #if !GRALLOC_USE_ION_DMA_HEAP
87 GRALLOC_UNUSED(heap_type);
88 #endif
89
90 if (priv_heap_flag)
91 {
92 #if GRALLOC_USE_ION_DMA_HEAP
93 if (heap_type == ION_HEAP_TYPE_DMA)
94 {
95 *priv_heap_flag = private_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP;
96 }
97 #endif
98 }
99
100 if (ion_flags)
101 {
102 #if GRALLOC_USE_ION_DMA_HEAP
103 if (heap_type != ION_HEAP_TYPE_DMA)
104 {
105 #endif
106 if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
107 {
108 *ion_flags = ION_FLAG_CACHED;
109 }
110 #if GRALLOC_USE_ION_DMA_HEAP
111 }
112 #endif
113 /* LSI Integration */
114 if ((usage & GRALLOC1_USAGE_SW_READ_MASK) == GRALLOC1_USAGE_READ_OFTEN)
115 {
116 *ion_flags = ION_FLAG_CACHED;
117 }
118
119 // DRM or Secure Camera
120 if (usage & GRALLOC1_PRODUCER_USAGE_PROTECTED)
121 {
122 *ion_flags |= ION_FLAG_PROTECTED;
123 }
124 #if GRALLOC_ION_HEAP_CRYPTO_MASK == 1
125 if (usage & GRALLOC1_PRODUCER_USAGE_PRIVATE_NONSECURE && usage & GRALLOC1_CONSUMER_USAGE_HWCOMPOSER)
126 {
127 *ion_flags |= ION_FLAG_PROTECTED;
128 }
129 #endif
130 /* Sensor direct channels require uncached allocations. */
131 if (usage & GRALLOC_USAGE_SENSOR_DIRECT_DATA)
132 {
133 *ion_flags &= ~ION_FLAG_CACHED;
134 }
135 }
136 }
137
select_faceauth_heap_mask(uint64_t usage)138 static unsigned int select_faceauth_heap_mask(uint64_t usage)
139 {
140 struct HeapSpecifier
141 {
142 uint64_t usage_bits; // exact match required
143 unsigned int mask;
144 };
145
146 static constexpr std::array<HeapSpecifier, 5> faceauth_heaps =
147 {{
148 { // isp_image_heap
149 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_CAMERA_WRITE | GS101_GRALLOC_USAGE_TPU_INPUT,
150 EXYNOS_ION_HEAP_FA_IMG_MASK
151 },
152 { // isp_internal_heap
153 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_CAMERA_WRITE | GRALLOC_USAGE_HW_CAMERA_READ,
154 EXYNOS_ION_HEAP_FA_RAWIMG_MASK
155 },
156 { // isp_preview_heap
157 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_CAMERA_WRITE | GRALLOC_USAGE_HW_COMPOSER |
158 GRALLOC_USAGE_HW_TEXTURE,
159 EXYNOS_ION_HEAP_FA_PREV_MASK
160 },
161 { // ml_model_heap
162 GRALLOC_USAGE_PROTECTED | GS101_GRALLOC_USAGE_TPU_INPUT,
163 EXYNOS_ION_HEAP_FA_MODEL_MASK
164 },
165 { // tpu_heap
166 GRALLOC_USAGE_PROTECTED | GS101_GRALLOC_USAGE_TPU_OUTPUT | GS101_GRALLOC_USAGE_TPU_INPUT,
167 EXYNOS_ION_HEAP_FA_TPU_MASK
168 }
169 }};
170
171 for (const HeapSpecifier &heap : faceauth_heaps)
172 {
173 if (usage == heap.usage_bits)
174 {
175 ALOGV("Using FaceAuth heap mask 0x%x for usage 0x%" PRIx64 "\n",
176 heap.mask, usage);
177 return heap.mask;
178 }
179 }
180
181 return 0;
182 }
183
select_heap_mask(uint64_t usage)184 static unsigned int select_heap_mask(uint64_t usage)
185 {
186 if (unsigned int faceauth_heap_mask = select_faceauth_heap_mask(usage);
187 faceauth_heap_mask != 0)
188 {
189 return faceauth_heap_mask;
190 }
191
192 unsigned int heap_mask;
193
194 if (usage & GRALLOC1_PRODUCER_USAGE_PROTECTED)
195 {
196 if (usage & GRALLOC1_PRODUCER_USAGE_PRIVATE_NONSECURE)
197 heap_mask = EXYNOS_ION_HEAP_SYSTEM_MASK;
198 else {
199 if ((usage & GRALLOC1_CONSUMER_USAGE_HWCOMPOSER) &&
200 !(usage & GRALLOC1_CONSUMER_USAGE_GPU_TEXTURE) &&
201 !(usage & GRALLOC1_PRODUCER_USAGE_GPU_RENDER_TARGET))
202 heap_mask = EXYNOS_ION_HEAP_VIDEO_SCALER_MASK;
203 else
204 heap_mask = EXYNOS_ION_HEAP_VIDEO_FRAME_MASK;
205 }
206 }
207 #if GRALLOC_ION_HEAP_CRYPTO_MASK == 1
208 else if (usage & GRALLOC1_PRODUCER_USAGE_PRIVATE_NONSECURE && usage & GRALLOC1_CONSUMER_USAGE_HWCOMPOSER)
209 {
210 heap_mask = EXYNOS_ION_HEAP_CRYPTO_MASK;
211 }
212 #endif
213 else if (usage & GRALLOC1_PRODUCER_USAGE_SENSOR_DIRECT_DATA)
214 {
215 heap_mask = EXYNOS_ION_HEAP_SENSOR_DIRECT_MASK;
216 }
217 else
218 {
219 heap_mask = EXYNOS_ION_HEAP_SYSTEM_MASK;
220 }
221
222 return heap_mask;
223 }
224
225
226 /*
227 * Returns positive number if not mapping due to AFBC or protected memory
228 * Returns negative errno if mapping failed to mmap
229 */
gralloc_map(buffer_handle_t handle)230 static int gralloc_map(buffer_handle_t handle)
231 {
232 private_handle_t *hnd = (private_handle_t*)handle;
233 int ret = 0;
234 hnd->bases[0] = hnd->bases[1] = hnd->bases[2] = 0;
235
236 /*
237 * VideoMetaData must be mapped for CPU access even if the buffer is a secure buffer
238 */
239 if (hnd->flags & (private_handle_t::PRIV_FLAGS_USES_3PRIVATE_DATA | private_handle_t::PRIV_FLAGS_USES_2PRIVATE_DATA))
240 {
241 int idx = hnd->flags & private_handle_t::PRIV_FLAGS_USES_3PRIVATE_DATA ? 2 : 1;
242 void *cpuPtr = mmap(0, hnd->sizes[idx], PROT_READ|PROT_WRITE, MAP_SHARED, hnd->fds[idx], 0);
243
244 if (cpuPtr == MAP_FAILED)
245 {
246 ret = -errno;
247 AERR("could not mmap %s for PRIVATE_DATA at fd%d", strerror(errno), idx);
248 goto err;
249 }
250
251 hnd->bases[idx] = (uint64_t)cpuPtr;
252 }
253
254 // AFBC must not be mapped.
255 if (hnd->is_compressible)
256 {
257 return 1;
258 }
259
260 // Don't be mapped for Secure DRM & Secure Camera
261 if ((hnd->producer_usage & GRALLOC1_PRODUCER_USAGE_PROTECTED && !(hnd->consumer_usage & GRALLOC1_PRODUCER_USAGE_PRIVATE_NONSECURE)))
262 {
263 return 2;
264 }
265
266 if (!(hnd->producer_usage &
267 (GRALLOC1_PRODUCER_USAGE_PROTECTED | GRALLOC1_PRODUCER_USAGE_NOZEROED)))
268 {
269 for (int idx = 0; idx < hnd->get_num_ion_fds(); idx++)
270 {
271 if (hnd->fds[idx] >= 0 && !hnd->bases[idx])
272 {
273 void *cpuPtr = (void*)mmap(0, hnd->sizes[idx], PROT_READ|PROT_WRITE, MAP_SHARED, hnd->fds[idx], 0);
274
275 if (cpuPtr == MAP_FAILED)
276 {
277 ret = -errno;
278 AERR("could not mmap %s for fd%d", strerror(errno), idx);
279 goto err;
280 }
281
282 if (idx == 0)
283 hnd->bases[idx] = (uint64_t)cpuPtr + hnd->offset;
284 else
285 hnd->bases[idx] = (uint64_t)cpuPtr;
286 }
287 }
288 }
289
290 return 0;
291
292 err:
293 for (int idx = 0; idx < hnd->get_num_ion_fds(); idx++)
294 {
295 if (hnd->bases[idx] != 0 && munmap((void *)hnd->bases[idx], hnd->sizes[idx]) != 0)
296 {
297 AERR("Failed to munmap handle %p fd%d", hnd, idx);
298 }
299 else
300 {
301 hnd->bases[idx] = 0;
302 }
303 }
304
305 return ret;
306
307 }
308
309 /*
310 * Selects a DMA-BUF heap name.
311 *
312 * @param heap_mask [in] ION heap mask for which equivalent DMA-BUF heap name
313 * needs to be looked up.
314 *
315 * @return a tuple in std::pair form with the first member as heap
316 * name and second as minimum page size (in bytes).
317 *
318 */
select_dmabuf_heap(unsigned int heap_mask)319 static std::pair<std::string, int> select_dmabuf_heap(unsigned int heap_mask)
320 {
321 switch (heap_mask) {
322 case EXYNOS_ION_HEAP_SENSOR_DIRECT_MASK:
323 return std::make_pair(kDmabufSensorDirectHeapName, SZ_4K);
324 case EXYNOS_ION_HEAP_FA_TPU_MASK:
325 return std::make_pair(kDmabufFaceauthTpuHeapName, SZ_4K);
326 case EXYNOS_ION_HEAP_FA_IMG_MASK:
327 return std::make_pair(kDmabufFaceauthImgHeapName, SZ_4K);
328 case EXYNOS_ION_HEAP_FA_RAWIMG_MASK:
329 return std::make_pair(kDmabufFaceauthRawImgHeapName, SZ_4K);
330 case EXYNOS_ION_HEAP_FA_PREV_MASK:
331 return std::make_pair(kDmabufFaceauthPrevHeapName, SZ_4K);
332 case EXYNOS_ION_HEAP_FA_MODEL_MASK:
333 return std::make_pair(kDmabufFaceauthModelHeapName, SZ_4K);
334 case EXYNOS_ION_HEAP_VIDEO_FRAME_MASK:
335 return std::make_pair(kDmabufVframeSecureHeapName, SZ_4K);
336 case EXYNOS_ION_HEAP_VIDEO_STREAM_MASK:
337 return std::make_pair(kDmabufVstreamSecureHeapName, SZ_4K);
338 default:
339 return {};
340 }
341 }
342
343 /*
344 * Allocates in the DMA-BUF heap with name @heap_name. If allocation fails from
345 * the DMA-BUF heap or if it does not exist, falls back to an ION heap of the
346 * same name.
347 *
348 * @param heap_name [in] DMA-BUF heap name for allocation
349 * @param size [in] Requested buffer size (in bytes).
350 * @param flags [in] ION allocation attributes defined by ION_FLAG_* to
351 * be used for ION allocations. Will not be used with
352 * DMA-BUF heaps since the framework does not support
353 * allocation flags.
354 *
355 * @return fd of the allocated buffer on success, -1 otherwise;
356 */
alloc_from_dmabuf_heap(const std::string & heap_name,size_t size,unsigned int flags)357 static int alloc_from_dmabuf_heap(const std::string& heap_name, size_t size, unsigned int flags)
358 {
359 if (!buffer_allocator)
360 {
361 return -1;
362 }
363
364 int shared_fd = buffer_allocator->Alloc(heap_name, size, flags);
365 if (shared_fd < 0)
366 {
367 ALOGE("Allocation failed for heap %s error: %d\n", heap_name.c_str(), shared_fd);
368 return -1;
369 }
370
371 return shared_fd;
372 }
373
374 /*
375 * Identifies a heap and retrieves file descriptor from ION for allocation
376 *
377 * @param usage [in] Producer and consumer combined usage.
378 * @param size [in] Requested buffer size (in bytes).
379 * @param heap_type [in] Requested heap type.
380 * @param flags [in] ION allocation attributes defined by ION_FLAG_*.
381 * @param min_pgsz [out] Minimum page size (in bytes).
382 *
383 * @return File handle which can be used for allocation, on success
384 * -1, otherwise.
385 */
alloc_from_ion_heap(uint64_t usage,size_t size,enum ion_heap_type heap_type,unsigned int flags,int * min_pgsz)386 static int alloc_from_ion_heap(uint64_t usage, size_t size,
387 enum ion_heap_type heap_type, unsigned int flags,
388 int *min_pgsz)
389 {
390 if (size == 0 || min_pgsz == NULL)
391 {
392 return -1;
393 }
394
395 unsigned int heap_mask = select_heap_mask(usage);
396
397 auto dmabuf_heap_info = select_dmabuf_heap(heap_mask);
398 auto dmabuf_heap_name = dmabuf_heap_info.first;
399
400 int shared_fd;
401 if (!dmabuf_heap_name.empty())
402 {
403 shared_fd = alloc_from_dmabuf_heap(dmabuf_heap_name, size, flags);
404 if (shared_fd >= 0)
405 {
406 *min_pgsz = dmabuf_heap_info.second;
407 }
408 return shared_fd;
409 }
410
411 if (ion_client < 0 || heap_type == ION_HEAP_TYPE_INVALID)
412 {
413 return -1;
414 }
415
416 shared_fd = exynos_ion_alloc(ion_client, size, heap_mask, flags);
417
418 /* Check if allocation from selected heap failed and fall back to system
419 * heap if possible.
420 */
421 if (shared_fd < 0)
422 {
423 /* Don't allow falling back to sytem heap if secure was requested. */
424 if (heap_type == ION_HEAP_TYPE_SECURE)
425 {
426 return -1;
427 }
428
429 /* Can't fall back to system heap if system heap was the heap that
430 * already failed
431 */
432 if (heap_type == ION_HEAP_TYPE_SYSTEM)
433 {
434 AERR("%s: Allocation failed on on system heap. Cannot fallback.", __func__);
435 return -1;
436 }
437
438 heap_type = ION_HEAP_TYPE_SYSTEM;
439
440 /* Set ION flags for system heap allocation */
441 set_ion_flags(heap_type, usage, NULL, &flags);
442
443 shared_fd = exynos_ion_alloc(ion_client, size, EXYNOS_ION_HEAP_SYSTEM_MASK, flags);
444
445 if (shared_fd < 0)
446 {
447 AERR("Fallback ion_alloc_fd(%d, %zd, %d, %u, %p) failed",
448 ion_client, size, 0, flags, &shared_fd);
449 return -1;
450 }
451 else
452 {
453 AWAR("allocating to system heap as fallback: fd(%d) usage(%" PRIx64 ") size(%zu) heap_type(%d) heap_mask(0x%x) flags(%u)",
454 shared_fd, usage, size, heap_type, heap_mask, flags);
455 }
456 }
457
458 switch (heap_type)
459 {
460 case ION_HEAP_TYPE_SYSTEM:
461 *min_pgsz = SZ_4K;
462 break;
463
464 #if GRALLOC_USE_ION_DMA_HEAP
465 case ION_HEAP_TYPE_DMA:
466 *min_pgsz = size;
467 break;
468 #endif
469 #if GRALLOC_USE_ION_COMPOUND_PAGE_HEAP
470 case ION_HEAP_TYPE_COMPOUND_PAGE:
471 *min_pgsz = SZ_2M;
472 break;
473 #endif
474 /* If have customized heap please set the suitable pg type according to
475 * the customized ION implementation
476 */
477 case ION_HEAP_TYPE_CUSTOM:
478 *min_pgsz = SZ_4K;
479 break;
480 default:
481 *min_pgsz = SZ_4K;
482 break;
483 }
484
485 return shared_fd;
486 }
487
488 #if GRALLOC_USE_ASHMEM_METADATA != 1
alloc_metadata()489 int alloc_metadata()
490 {
491 int min_pgsz = 0;
492 return alloc_from_ion_heap(GRALLOC1_USAGE_READ_OFTEN | GRALLOC1_USAGE_WRITE_OFTEN, PAGE_SIZE, ION_HEAP_TYPE_SYSTEM, ION_FLAG_CACHED, &min_pgsz);
493 }
494 #endif
495
pick_ion_heap(uint64_t usage)496 static enum ion_heap_type pick_ion_heap(uint64_t usage)
497 {
498 enum ion_heap_type heap_type = ION_HEAP_TYPE_INVALID;
499
500 if (usage & GRALLOC_USAGE_PROTECTED)
501 {
502 if (secure_heap_exists)
503 {
504 heap_type = ION_HEAP_TYPE_SECURE;
505 }
506 else
507 {
508 AERR("Protected ION memory is not supported on this platform.");
509 }
510 }
511 else if (!(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) && (usage & (GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER)))
512 {
513 #if GRALLOC_USE_ION_COMPOUND_PAGE_HEAP
514 heap_type = ION_HEAP_TYPE_COMPOUND_PAGE;
515 #elif GRALLOC_USE_ION_DMA_HEAP
516 heap_type = ION_HEAP_TYPE_DMA;
517 #else
518 heap_type = ION_HEAP_TYPE_SYSTEM;
519 #endif
520 }
521 else
522 {
523 heap_type = ION_HEAP_TYPE_SYSTEM;
524 }
525
526 return heap_type;
527 }
528
529
check_buffers_sharable(const gralloc_buffer_descriptor_t * descriptors,uint32_t numDescriptors)530 static bool check_buffers_sharable(const gralloc_buffer_descriptor_t *descriptors,
531 uint32_t numDescriptors)
532 {
533 enum ion_heap_type shared_backend_heap_type = ION_HEAP_TYPE_INVALID;
534 unsigned int shared_ion_flags = 0;
535 uint64_t usage;
536 uint32_t i;
537
538 if (numDescriptors <= 1)
539 {
540 return false;
541 }
542
543 for (i = 0; i < numDescriptors; i++)
544 {
545 unsigned int ion_flags;
546 enum ion_heap_type heap_type;
547
548 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)descriptors[i];
549
550 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
551
552 heap_type = pick_ion_heap(usage);
553 if (heap_type == ION_HEAP_TYPE_INVALID)
554 {
555 return false;
556 }
557
558 set_ion_flags(heap_type, usage, NULL, &ion_flags);
559
560 if (shared_backend_heap_type != ION_HEAP_TYPE_INVALID)
561 {
562 if (shared_backend_heap_type != heap_type || shared_ion_flags != ion_flags)
563 {
564 return false;
565 }
566 }
567 else
568 {
569 shared_backend_heap_type = heap_type;
570 shared_ion_flags = ion_flags;
571 }
572 }
573
574 return true;
575 }
576
get_max_buffer_descriptor_index(const gralloc_buffer_descriptor_t * descriptors,uint32_t numDescriptors)577 static int get_max_buffer_descriptor_index(const gralloc_buffer_descriptor_t *descriptors, uint32_t numDescriptors)
578 {
579 uint32_t i, max_buffer_index = 0;
580 size_t max_buffer_size = 0;
581
582 for (i = 0; i < numDescriptors; i++)
583 {
584 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)descriptors[i];
585
586 if (max_buffer_size < bufDescriptor->size)
587 {
588 max_buffer_index = i;
589 max_buffer_size = bufDescriptor->size;
590 }
591 }
592
593 return max_buffer_index;
594 }
595
596 /*
597 * Opens the ION module. Queries heap information and stores it for later use
598 *
599 * @return ionfd in case of success
600 * -1 for all error cases
601 */
open_and_query_ion(void)602 static int open_and_query_ion(void)
603 {
604 if (ion_client >= 0)
605 {
606 AWAR("ION device already open");
607 return 0;
608 }
609
610 ion_client = exynos_ion_open();
611 if (ion_client < 0)
612 {
613 AERR("ion_open failed with %s", strerror(errno));
614 return -1;
615 }
616
617 #if defined(ION_HEAP_SECURE_MASK)
618 secure_heap_exists = true;
619 #endif
620
621 return ion_client;
622 }
623
mali_gralloc_ion_open(void)624 int mali_gralloc_ion_open(void)
625 {
626 if (!buffer_allocator)
627 {
628 buffer_allocator = std::make_unique<BufferAllocator>();
629 if (!buffer_allocator)
630 AERR("Unable to create BufferAllocator object");
631 }
632 return open_and_query_ion();
633 }
634
mali_gralloc_ion_sync(const private_handle_t * const hnd,const bool read,const bool write,const bool start)635 static int mali_gralloc_ion_sync(const private_handle_t * const hnd,
636 const bool read,
637 const bool write,
638 const bool start)
639 {
640 int ret = 0;
641
642 if (hnd == NULL)
643 {
644 return -EINVAL;
645 }
646
647 #ifdef GRALLOC_ION_SYNC
648 if (ion_client <= 0)
649 {
650 /* a second user process must obtain a client handle first via ion_open before it can obtain the shared ion buffer*/
651 AWAR("ion_client not specified");
652
653 if (int status = open_and_query_ion(); status < 0)
654 {
655 return status;
656 }
657 }
658
659 if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION &&
660 !(hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP))
661 {
662 int direction = 0;
663
664 if (read)
665 {
666 direction |= ION_SYNC_READ;
667 }
668 if (write)
669 {
670 direction |= ION_SYNC_WRITE;
671 }
672
673 for (int idx = 0; idx < hnd->get_num_ion_fds(); idx++)
674 {
675 if (start)
676 {
677 ret |= exynos_ion_sync_start(ion_client, hnd->fds[idx], direction);
678 }
679 else
680 {
681 ret |= exynos_ion_sync_end(ion_client, hnd->fds[idx], direction);
682 }
683 }
684 }
685 #else
686 GRALLOC_UNUSED(read);
687 GRALLOC_UNUSED(write);
688 GRALLOC_UNUSED(start);
689 #endif
690
691 return ret;
692 }
693
694 /*
695 * Signal start of CPU access to the DMABUF exported from ION.
696 *
697 * @param hnd [in] Buffer handle
698 * @param read [in] Flag indicating CPU read access to memory
699 * @param write [in] Flag indicating CPU write access to memory
700 *
701 * @return 0 in case of success
702 * errno for all error cases
703 */
mali_gralloc_ion_sync_start(const private_handle_t * const hnd,const bool read,const bool write)704 int mali_gralloc_ion_sync_start(const private_handle_t * const hnd,
705 const bool read,
706 const bool write)
707 {
708 return mali_gralloc_ion_sync(hnd, read, write, true);
709 }
710
711
712 /*
713 * Signal end of CPU access to the DMABUF exported from ION.
714 *
715 * @param hnd [in] Buffer handle
716 * @param read [in] Flag indicating CPU read access to memory
717 * @param write [in] Flag indicating CPU write access to memory
718 *
719 * @return 0 in case of success
720 * errno for all error cases
721 */
mali_gralloc_ion_sync_end(const private_handle_t * const hnd,const bool read,const bool write)722 int mali_gralloc_ion_sync_end(const private_handle_t * const hnd,
723 const bool read,
724 const bool write)
725 {
726 return mali_gralloc_ion_sync(hnd, read, write, false);
727 }
728
729
mali_gralloc_ion_free(private_handle_t * const hnd)730 void mali_gralloc_ion_free(private_handle_t * const hnd)
731 {
732 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
733 {
734 return;
735 }
736 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
737 {
738 /* Buffer might be unregistered already so we need to assure we have a valid handle */
739
740 for (int idx = 0; idx < hnd->get_num_ion_fds(); idx++)
741 {
742 if (hnd->fds[idx] >= 0)
743 {
744 if (hnd->bases[idx] != 0)
745 {
746 if (munmap((void *)hnd->bases[idx], hnd->sizes[idx]) != 0)
747 {
748 AERR("Failed to munmap handle %p fd%d", hnd, idx);
749 }
750 }
751
752 close(hnd->fds[idx]);
753 }
754 }
755
756 memset((void *)hnd, 0, sizeof(*hnd));
757 }
758 }
759
mali_gralloc_ion_free_internal(buffer_handle_t * const pHandle,const uint32_t num_hnds)760 static void mali_gralloc_ion_free_internal(buffer_handle_t * const pHandle,
761 const uint32_t num_hnds)
762 {
763 for (uint32_t i = 0; i < num_hnds; i++)
764 {
765 if (pHandle[i] != NULL)
766 {
767 private_handle_t * const hnd = (private_handle_t * const)pHandle[i];
768 mali_gralloc_ion_free(hnd);
769 }
770 }
771 }
772
allocate_to_fds(buffer_descriptor_t * bufDescriptor,enum ion_heap_type heap_type,uint32_t ion_flags,uint32_t * priv_heap_flag,int * min_pgsz,int * fd0,int * fd1,int * fd2,int ion_fd=-1)773 static int allocate_to_fds(buffer_descriptor_t *bufDescriptor, enum ion_heap_type heap_type,
774 uint32_t ion_flags, uint32_t *priv_heap_flag, int *min_pgsz, int* fd0, int* fd1, int* fd2, int ion_fd = -1)
775 {
776 int fd_arr[3] = {-1, -1, -1};
777 uint64_t usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
778
779 for (int idx = 0; idx < bufDescriptor->fd_count; idx++)
780 {
781 if (ion_fd != -1 && idx == 0) {
782 fd_arr[idx] = ion_fd;
783 } else {
784 fd_arr[idx] = alloc_from_ion_heap(usage, bufDescriptor->sizes[idx], heap_type, ion_flags, min_pgsz);
785 }
786
787 if (fd_arr[idx] < 0)
788 {
789 AERR("ion_alloc failed from client ( %d )", ion_client);
790 goto err;
791 }
792 }
793
794 if (bufDescriptor->alloc_video_private_data)
795 {
796 int idx = bufDescriptor->fd_count;
797
798 if (idx != 1 && idx != 2)
799 {
800 AERR("cannot allocate video private metadata for formate(%#x)", (uint32_t)bufDescriptor->internal_format);
801 goto err;
802 }
803
804 usage = usage & ~GRALLOC1_PRODUCER_USAGE_PROTECTED;
805 fd_arr[idx] = alloc_from_ion_heap(usage, VIDEO_PRIV_DATA_SIZE, ION_HEAP_TYPE_SYSTEM, 0, min_pgsz);
806 if (fd_arr[idx] < 0)
807 {
808 AERR("ion_alloc failed from client ( %d )", ion_client);
809 goto err;
810 }
811
812 bufDescriptor->sizes[idx] = VIDEO_PRIV_DATA_SIZE;
813
814 switch (idx)
815 {
816 case 2:
817 *priv_heap_flag |= private_handle_t::PRIV_FLAGS_USES_3PRIVATE_DATA;
818 break;
819 case 1:
820 *priv_heap_flag |= private_handle_t::PRIV_FLAGS_USES_2PRIVATE_DATA;
821 break;
822 }
823
824 bufDescriptor->fd_count++;
825 }
826
827 *fd0 = fd_arr[0];
828 *fd1 = fd_arr[1];
829 *fd2 = fd_arr[2];
830
831 return 0;
832
833 err:
834 for (int i = 0; i < 3; i++)
835 {
836 if (fd_arr[i] >= 0)
837 {
838 close(fd_arr[i]);
839 }
840 }
841
842 return -1;
843 }
844
845
846 /*
847 * Allocates ION buffers
848 *
849 * @param descriptors [in] Buffer request descriptors
850 * @param numDescriptors [in] Number of descriptors
851 * @param pHandle [out] Handle for each allocated buffer
852 * @param shared_backend [out] Shared buffers flag
853 * @param ion_fd [in] Optional fd of an allocated ION buffer
854 *
855 * @return File handle which can be used for allocation, on success
856 * -1, otherwise.
857 *
858 * In the case ion_fd != -1, this functions wraps ion_fd in a buffer_handle_t
859 * instead.
860 */
mali_gralloc_ion_allocate(const gralloc_buffer_descriptor_t * descriptors,uint32_t numDescriptors,buffer_handle_t * pHandle,bool * shared_backend,int ion_fd)861 int mali_gralloc_ion_allocate(const gralloc_buffer_descriptor_t *descriptors,
862 uint32_t numDescriptors, buffer_handle_t *pHandle,
863 bool *shared_backend, int ion_fd)
864 {
865 unsigned int priv_heap_flag = 0;
866 enum ion_heap_type heap_type;
867 #if GRALLOC_INIT_AFBC == 1
868 unsigned char *cpu_ptr = NULL;
869 #endif
870 uint64_t usage;
871 uint32_t i, max_buffer_index = 0;
872 union
873 {
874 int shared_fd;
875 int fd_arr[3];
876 };
877 unsigned int ion_flags = 0;
878 int min_pgsz = 0;
879 int is_compressible = 0;
880
881 fd_arr[0] = fd_arr[1] = fd_arr[2] = -1;
882
883 if (ion_client < 0)
884 {
885 int status = 0;
886 status = open_and_query_ion();
887 if (status < 0)
888 {
889 return status;
890 }
891 }
892
893 *shared_backend = check_buffers_sharable(descriptors, numDescriptors);
894
895 if (*shared_backend)
896 {
897 buffer_descriptor_t *max_bufDescriptor;
898
899 max_buffer_index = get_max_buffer_descriptor_index(descriptors, numDescriptors);
900 max_bufDescriptor = (buffer_descriptor_t *)(descriptors[max_buffer_index]);
901 usage = max_bufDescriptor->consumer_usage | max_bufDescriptor->producer_usage;
902
903 heap_type = pick_ion_heap(usage);
904 if (heap_type == ION_HEAP_TYPE_INVALID)
905 {
906 AERR("Failed to find an appropriate ion heap");
907 return -1;
908 }
909
910 set_ion_flags(heap_type, usage, &priv_heap_flag, &ion_flags);
911
912 if (allocate_to_fds(max_bufDescriptor, heap_type, ion_flags, &priv_heap_flag, &min_pgsz,
913 &fd_arr[0], &fd_arr[1], &fd_arr[2]) < 0)
914 {
915 AERR("ion_alloc failed form client: ( %d )", ion_client);
916 return -1;
917 }
918
919 for (i = 0; i < numDescriptors; i++)
920 {
921 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
922 int tmp_fd;
923
924 if (i != max_buffer_index)
925 {
926 tmp_fd = dup(shared_fd);
927
928 if (tmp_fd < 0)
929 {
930 AERR("Ion shared fd:%d of index:%d could not be duplicated for descriptor:%d",
931 shared_fd, max_buffer_index, i);
932
933 /* It is possible that already opened shared_fd for the
934 * max_bufDescriptor is also not closed */
935 if (i < max_buffer_index)
936 {
937 close(shared_fd);
938 }
939
940 /* Need to free already allocated memory. */
941 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
942 return -1;
943 }
944 }
945 else
946 {
947 tmp_fd = shared_fd;
948 }
949
950 if (bufDescriptor->alloc_format & MALI_GRALLOC_INTFMT_AFBC_BASIC)
951 {
952 is_compressible = 1;
953
954 if (fd_arr[1] >= 0)
955 {
956 ALOGW("Warning afbc flag fd already exists during create. Closing.");
957 close(fd_arr[1]);
958 }
959
960 bufDescriptor->sizes[1] = sizeof(int);
961 fd_arr[1] = exynos_ion_alloc(ion_client, bufDescriptor->sizes[1], EXYNOS_ION_HEAP_SYSTEM_MASK, 0);
962 if (fd_arr[1] < 0)
963 {
964 ALOGE("Failed to allocate page for afbc flag region");
965 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
966 return -1;
967 }
968 }
969
970 private_handle_t *hnd = new private_handle_t(
971 private_handle_t::PRIV_FLAGS_USES_ION | priv_heap_flag, bufDescriptor->size, 0, 0, min_pgsz,
972 bufDescriptor->consumer_usage, bufDescriptor->producer_usage, tmp_fd, -1, -1, bufDescriptor->hal_format,
973 bufDescriptor->internal_format, bufDescriptor->alloc_format,
974 bufDescriptor->width, bufDescriptor->height,
975 max_bufDescriptor->size, bufDescriptor->pixel_stride, bufDescriptor->layer_count,
976 bufDescriptor->plane_info, is_compressible, bufDescriptor->plane_count);
977
978 if (NULL == hnd)
979 {
980 AERR("Private handle could not be created for descriptor:%d of shared usecase", i);
981
982 /* Close the obtained shared file descriptor for the current handle */
983 close(tmp_fd);
984
985 /* It is possible that already opened shared_fd for the
986 * max_bufDescriptor is also not closed */
987 if (i < max_buffer_index)
988 {
989 close(shared_fd);
990 }
991
992 /* Free the resources allocated for the previous handles */
993 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
994 return -1;
995 }
996
997 pHandle[i] = hnd;
998 }
999 }
1000 else
1001 {
1002 for (i = 0; i < numDescriptors; i++)
1003 {
1004 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
1005 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
1006
1007 heap_type = pick_ion_heap(usage);
1008 if (heap_type == ION_HEAP_TYPE_INVALID)
1009 {
1010 AERR("Failed to find an appropriate ion heap");
1011 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
1012 return -1;
1013 }
1014
1015 set_ion_flags(heap_type, usage, &priv_heap_flag, &ion_flags);
1016
1017 if (allocate_to_fds(bufDescriptor, heap_type, ion_flags, &priv_heap_flag, &min_pgsz,
1018 &fd_arr[0], &fd_arr[1], &fd_arr[2], ion_fd) < 0)
1019 {
1020 AERR("ion_alloc failed from client ( %d )", ion_client);
1021
1022 /* need to free already allocated memory. not just this one */
1023 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
1024 return -1;
1025 }
1026
1027 if (bufDescriptor->alloc_format & MALI_GRALLOC_INTFMT_AFBC_BASIC)
1028 {
1029 is_compressible = 1;
1030
1031 if (fd_arr[1] >= 0)
1032 {
1033 ALOGW("Warning afbc flag fd already exists during create. Closing.");
1034 close(fd_arr[1]);
1035 }
1036
1037 bufDescriptor->sizes[1] = sizeof(int);
1038 fd_arr[1] = exynos_ion_alloc(ion_client, bufDescriptor->sizes[1], EXYNOS_ION_HEAP_SYSTEM_MASK, 0);
1039 if (fd_arr[1] < 0)
1040 {
1041 ALOGE("Failed to allocate page for afbc flag region");
1042 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
1043 return -1;
1044 }
1045 }
1046
1047 private_handle_t *hnd = new private_handle_t(
1048 private_handle_t::PRIV_FLAGS_USES_ION | priv_heap_flag, bufDescriptor->size,
1049 bufDescriptor->sizes[1], bufDescriptor->sizes[2], min_pgsz,
1050 bufDescriptor->consumer_usage, bufDescriptor->producer_usage,
1051 shared_fd, fd_arr[1], fd_arr[2], bufDescriptor->hal_format,
1052 bufDescriptor->internal_format, bufDescriptor->alloc_format,
1053 bufDescriptor->width, bufDescriptor->height,
1054 bufDescriptor->size, bufDescriptor->pixel_stride, bufDescriptor->layer_count,
1055 bufDescriptor->plane_info, is_compressible, bufDescriptor->plane_count);
1056
1057 if (NULL == hnd)
1058 {
1059 AERR("Private handle could not be created for descriptor:%d in non-shared usecase", i);
1060
1061 /* Close the obtained shared file descriptor for the current handle */
1062 close(shared_fd);
1063 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
1064 return -1;
1065 }
1066
1067 pHandle[i] = hnd;
1068 }
1069 }
1070
1071 for (i = 0; i < numDescriptors; i++)
1072 {
1073 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
1074 #if GRALLOC_INIT_AFBC == 1
1075 private_handle_t *hnd = (private_handle_t *)(pHandle[i]);
1076 #endif
1077
1078 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
1079
1080 if (!(usage & GRALLOC_USAGE_PROTECTED))
1081 {
1082 #if GRALLOC_INIT_AFBC == 1
1083 if ((bufDescriptor->alloc_format & MALI_GRALLOC_INTFMT_AFBCENABLE_MASK) && (!(*shared_backend)))
1084 {
1085 cpu_ptr =
1086 (unsigned char *)mmap(NULL, bufDescriptor->size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);
1087
1088 if (MAP_FAILED == cpu_ptr)
1089 {
1090 AERR("mmap failed from client ( %d ), fd ( %d )", ion_client, hnd->share_fd);
1091 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
1092 return -1;
1093 }
1094
1095 mali_gralloc_ion_sync_start(hnd, true, true);
1096
1097 /* For separated plane YUV, there is a header to initialise per plane. */
1098 const plane_info_t *plane_info = bufDescriptor->plane_info;
1099 const bool is_multi_plane = hnd->is_multi_plane();
1100 for (int i = 0; i < MAX_PLANES && (i == 0 || plane_info[i].byte_stride != 0); i++)
1101 {
1102 init_afbc(cpu_ptr + plane_info[i].offset,
1103 bufDescriptor->alloc_format,
1104 is_multi_plane,
1105 plane_info[i].alloc_width,
1106 plane_info[i].alloc_height);
1107 }
1108
1109 mali_gralloc_ion_sync_end(hnd, true, true);
1110
1111 munmap((void *)cpu_ptr, bufDescriptor->size);
1112 }
1113 #endif
1114 }
1115 }
1116
1117 return 0;
1118 }
1119
1120
import_exynos_ion_handles(private_handle_t * hnd)1121 int import_exynos_ion_handles(private_handle_t *hnd)
1122 {
1123 int retval = -1;
1124
1125 #if GRALLOC_VERSION_MAJOR <= 1
1126 if (ion_client < 0)
1127 {
1128 /* a second user process must obtain a client handle first via ion_open before it can obtain the shared ion buffer*/
1129 int status = 0;
1130 status = open_and_query_ion();
1131 if (status < 0)
1132 {
1133 return status;
1134 }
1135 }
1136 #endif
1137
1138 for (int idx = 0; idx < hnd->get_num_ion_fds(); idx++)
1139 {
1140 if (hnd->fds[idx] >= 0)
1141 {
1142 retval = exynos_ion_import_handle(ion_client, hnd->fds[idx], &hnd->ion_handles[idx]);
1143 if (retval)
1144 {
1145 AERR("error importing handle[%d] format(%x)\n", idx, (uint32_t)hnd->internal_format);
1146 goto clean_up;
1147 }
1148 }
1149 }
1150
1151 return retval;
1152
1153 clean_up:
1154 for (int idx = 0; idx < hnd->get_num_ion_fds(); idx++)
1155 {
1156 if (hnd->ion_handles[idx])
1157 {
1158 exynos_ion_free_handle(ion_client, hnd->ion_handles[idx]);
1159 }
1160 }
1161
1162 return retval;
1163 }
1164
1165
free_exynos_ion_handles(private_handle_t * hnd)1166 void free_exynos_ion_handles(private_handle_t *hnd)
1167 {
1168 for (int idx = 0; idx < hnd->get_num_ion_fds(); idx++)
1169 {
1170 if (hnd->ion_handles[idx])
1171 {
1172 if (ion_client > 0 && hnd->ion_handles[idx] > 0 &&
1173 exynos_ion_free_handle(ion_client, hnd->ion_handles[idx]))
1174 {
1175 AERR("error freeing ion_client(%d), ion_handle[%d](%d) format(%x)\n",
1176 ion_client, idx, hnd->ion_handles[idx], (uint32_t)hnd->internal_format);
1177 }
1178 }
1179 }
1180 }
1181
1182
mali_gralloc_ion_map(private_handle_t * hnd)1183 int mali_gralloc_ion_map(private_handle_t *hnd)
1184 {
1185 int retval = -EINVAL;
1186
1187 if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
1188 {
1189 #if GRALLOC_VERSION_MAJOR <= 1
1190 if (ion_client < 0)
1191 {
1192 /* a second user process must obtain a client handle first via ion_open before it can obtain the shared ion buffer*/
1193 int status = 0;
1194 status = open_and_query_ion();
1195 if (status < 0)
1196 {
1197 return status;
1198 }
1199 }
1200 #endif
1201 retval = gralloc_map(hnd);
1202 }
1203
1204 return retval;
1205 }
1206
mali_gralloc_ion_unmap(private_handle_t * hnd)1207 void mali_gralloc_ion_unmap(private_handle_t *hnd)
1208 {
1209 if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
1210 {
1211 int fail = 0;
1212
1213 for (int idx = 0; idx < hnd->get_num_ion_fds(); idx++)
1214 {
1215 if (hnd->bases[idx])
1216 {
1217 if (munmap((void *)hnd->bases[idx], hnd->sizes[idx]) < 0)
1218 {
1219 AERR("Could not munmap base:%p size:%u '%s'", (void *)hnd->bases[0], hnd->sizes[idx], strerror(errno));
1220 fail = 1;
1221 }
1222 else
1223 {
1224 hnd->bases[idx] = 0;
1225 }
1226 }
1227 }
1228
1229 if (!fail)
1230 {
1231 hnd->cpu_read = 0;
1232 hnd->cpu_write = 0;
1233 }
1234 }
1235 }
1236
mali_gralloc_ion_close(void)1237 void mali_gralloc_ion_close(void)
1238 {
1239 if (ion_client != -1)
1240 {
1241 if (exynos_ion_close(ion_client))
1242 {
1243 AERR("Failed to close ion_client: %d err=%s", ion_client, strerror(errno));
1244 }
1245
1246 ion_client = -1;
1247 }
1248
1249 buffer_allocator.reset();
1250 }
1251
1252