1 /*
2 * Copyright (C) 2016-2020 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include <string.h>
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <pthread.h>
23 #include <stdlib.h>
24 #include <limits.h>
25
26 #include <log/log.h>
27 #include <cutils/atomic.h>
28
29 #include <linux/dma-buf.h>
30 #include <vector>
31 #include <sys/ioctl.h>
32
33 #include <hardware/hardware.h>
34 #include <hardware/gralloc1.h>
35
36 #include <hardware/exynos/ion.h>
37 #include <hardware/exynos/dmabuf_container.h>
38
39 #include <BufferAllocator/BufferAllocator.h>
40 #include "mali_gralloc_buffer.h"
41 #include "gralloc_helper.h"
42 #include "mali_gralloc_formats.h"
43 #include "mali_gralloc_usages.h"
44 #include "core/format_info.h"
45 #include "core/mali_gralloc_bufferdescriptor.h"
46 #include "core/mali_gralloc_bufferallocation.h"
47
48 #include "mali_gralloc_ion.h"
49
50 #include <array>
51
52 #define INIT_ZERO(obj) (memset(&(obj), 0, sizeof((obj))))
53
54 #define HEAP_MASK_FROM_ID(id) (1 << id)
55 #define HEAP_MASK_FROM_TYPE(type) (1 << type)
56
57 #if defined(ION_HEAP_SECURE_MASK)
58 #if (HEAP_MASK_FROM_TYPE(ION_HEAP_TYPE_SECURE) != ION_HEAP_SECURE_MASK)
59 #error "ION_HEAP_TYPE_SECURE value is not compatible with ION_HEAP_SECURE_MASK"
60 #endif
61 #endif
62
63 static const char kDmabufSensorDirectHeapName[] = "sensor_direct_heap";
64 static const char kDmabufFaceauthTpuHeapName[] = "faceauth_tpu-secure";
65 static const char kDmabufFaceauthImgHeapName[] = "faimg-secure";
66 static const char kDmabufFaceauthRawImgHeapName[] = "farawimg-secure";
67 static const char kDmabufFaceauthPrevHeapName[] = "faprev-secure";
68 static const char kDmabufFaceauthModelHeapName[] = "famodel-secure";
69 static const char kDmabufVframeSecureHeapName[] = "vframe-secure";
70 static const char kDmabufVstreamSecureHeapName[] = "vstream-secure";
71
72 struct ion_device
73 {
cliention_device74 int client()
75 {
76 return ion_client;
77 }
78
closeion_device79 static void close()
80 {
81 ion_device &dev = get_inst();
82 if (dev.ion_client >= 0)
83 {
84 exynos_ion_close(dev.ion_client);
85 dev.ion_client = -1;
86 }
87
88 dev.buffer_allocator.reset();
89 }
90
getion_device91 static ion_device *get()
92 {
93 ion_device &dev = get_inst();
94 if (!dev.buffer_allocator)
95 {
96 dev.buffer_allocator = std::make_unique<BufferAllocator>();
97 if (!dev.buffer_allocator)
98 ALOGE("Unable to create BufferAllocator object");
99 }
100
101 if (dev.ion_client < 0)
102 {
103 if (dev.open_and_query_ion() != 0)
104 {
105 close();
106 }
107 }
108
109 if (dev.ion_client < 0)
110 {
111 return nullptr;
112 }
113 return &dev;
114 }
115
116 /*
117 * Identifies a heap and retrieves file descriptor from ION for allocation
118 *
119 * @param usage [in] Producer and consumer combined usage.
120 * @param size [in] Requested buffer size (in bytes).
121 * @param heap_type [in] Requested heap type.
122 * @param flags [in] ION allocation attributes defined by ION_FLAG_*.
123 * @param min_pgsz [out] Minimum page size (in bytes).
124 *
125 * @return File handle which can be used for allocation, on success
126 * -1, otherwise.
127 */
128 int alloc_from_ion_heap(uint64_t usage, size_t size, unsigned int flags, int *min_pgsz);
129
130 /*
131 * Signals the start or end of a region where the CPU is accessing a
132 * buffer, allowing appropriate cache synchronization.
133 *
134 * @param fd [in] fd for the buffer
135 * @param read [in] True if the CPU is reading from the buffer
136 * @param write [in] True if the CPU is writing to the buffer
137 * @param start [in] True if the CPU has not yet performed the
138 * operations; false if the operations are
139 * completed.
140 *
141 * @return 0 on success; an error code otherwise.
142 */
143 int sync(int fd, bool read, bool write, bool start);
144
145 private:
146 int ion_client;
147 std::unique_ptr<BufferAllocator> buffer_allocator;
148
ion_deviceion_device149 ion_device()
150 : ion_client(-1)
151 {
152 }
153
get_instion_device154 static ion_device& get_inst()
155 {
156 static ion_device dev;
157 return dev;
158 }
159
160 /*
161 * Opens the ION module. Queries heap information and stores it for later use
162 *
163 * @return 0 in case of success
164 * -1 for all error cases
165 */
166 int open_and_query_ion();
167
168 /*
169 * Allocates in the DMA-BUF heap with name @heap_name. If allocation fails from
170 * the DMA-BUF heap or if it does not exist, falls back to an ION heap of the
171 * same name.
172 *
173 * @param heap_name [in] DMA-BUF heap name for allocation
174 * @param size [in] Requested buffer size (in bytes).
175 * @param flags [in] ION allocation attributes defined by ION_FLAG_* to
176 * be used for ION allocations. Will not be used with
177 * DMA-BUF heaps since the framework does not support
178 * allocation flags.
179 *
180 * @return fd of the allocated buffer on success, -1 otherwise;
181 */
182
183 int alloc_from_dmabuf_heap(const std::string& heap_name, size_t size, unsigned int flags);
184 };
185
set_ion_flags(uint64_t usage,unsigned int * ion_flags)186 static void set_ion_flags(uint64_t usage, unsigned int *ion_flags)
187 {
188 if (ion_flags == nullptr)
189 return;
190
191 if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
192 {
193 *ion_flags |= ION_FLAG_CACHED;
194 }
195
196 // DRM or Secure Camera
197 if (usage & (GRALLOC_USAGE_PROTECTED))
198 {
199 *ion_flags |= ION_FLAG_PROTECTED;
200 }
201
202 /* TODO: used for exynos3830. Add this as an option to Android.bp */
203 #if defined(GRALLOC_SCALER_WFD) && GRALLOC_SCALER_WFD == 1
204 if (usage & GRALLOC_USAGE_PRIVATE_NONSECURE && usage & GRALLOC_USAGE_HW_COMPOSER)
205 {
206 *ion_flags |= ION_FLAG_PROTECTED;
207 }
208 #endif
209 /* Sensor direct channels require uncached allocations. */
210 if (usage & GRALLOC_USAGE_SENSOR_DIRECT_DATA)
211 {
212 *ion_flags &= ~ION_FLAG_CACHED;
213 }
214 }
215
select_faceauth_heap_mask(uint64_t usage)216 static unsigned int select_faceauth_heap_mask(uint64_t usage)
217 {
218 struct HeapSpecifier
219 {
220 uint64_t usage_bits; // exact match required
221 unsigned int mask;
222 };
223
224 static constexpr std::array<HeapSpecifier, 5> faceauth_heaps =
225 {{
226 { // isp_image_heap
227 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_CAMERA_WRITE | GS101_GRALLOC_USAGE_TPU_INPUT,
228 EXYNOS_ION_HEAP_FA_IMG_MASK
229 },
230 { // isp_internal_heap
231 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_CAMERA_WRITE | GRALLOC_USAGE_HW_CAMERA_READ,
232 EXYNOS_ION_HEAP_FA_RAWIMG_MASK
233 },
234 { // isp_preview_heap
235 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_CAMERA_WRITE | GRALLOC_USAGE_HW_COMPOSER |
236 GRALLOC_USAGE_HW_TEXTURE,
237 EXYNOS_ION_HEAP_FA_PREV_MASK
238 },
239 { // ml_model_heap
240 GRALLOC_USAGE_PROTECTED | GS101_GRALLOC_USAGE_TPU_INPUT,
241 EXYNOS_ION_HEAP_FA_MODEL_MASK
242 },
243 { // tpu_heap
244 GRALLOC_USAGE_PROTECTED | GS101_GRALLOC_USAGE_TPU_OUTPUT | GS101_GRALLOC_USAGE_TPU_INPUT,
245 EXYNOS_ION_HEAP_FA_TPU_MASK
246 }
247 }};
248
249 for (const HeapSpecifier &heap : faceauth_heaps)
250 {
251 if (usage == heap.usage_bits)
252 {
253 ALOGV("Using FaceAuth heap mask 0x%x for usage 0x%" PRIx64 "\n",
254 heap.mask, usage);
255 return heap.mask;
256 }
257 }
258
259 return 0;
260 }
261
select_heap_mask(uint64_t usage)262 static unsigned int select_heap_mask(uint64_t usage)
263 {
264 if (unsigned int faceauth_heap_mask = select_faceauth_heap_mask(usage);
265 faceauth_heap_mask != 0)
266 {
267 return faceauth_heap_mask;
268 }
269
270 unsigned int heap_mask;
271
272 if (usage & GRALLOC_USAGE_PROTECTED)
273 {
274 if (usage & GRALLOC_USAGE_PRIVATE_NONSECURE)
275 {
276 heap_mask = EXYNOS_ION_HEAP_SYSTEM_MASK;
277 }
278 else if ((usage & GRALLOC_USAGE_HW_COMPOSER) &&
279 !(usage & GRALLOC_USAGE_HW_TEXTURE) &&
280 !(usage & GRALLOC_USAGE_HW_RENDER))
281 {
282 heap_mask = EXYNOS_ION_HEAP_VIDEO_SCALER_MASK;
283 }
284 else
285 {
286 heap_mask = EXYNOS_ION_HEAP_VIDEO_FRAME_MASK;
287 }
288 }
289 /* TODO: used for exynos3830. Add this as a an option to Android.bp */
290 #if defined(GRALLOC_SCALER_WFD) && GRALLOC_SCALER_WFD == 1
291 else if (usage & GRALLOC_USAGE_PRIVATE_NONSECURE && usage & GRALLOC_USAGE_HW_COMPOSER)
292 {
293 heap_mask = EXYNOS_ION_HEAP_EXT_UI_MASK;
294 }
295 #endif
296 else if (usage & GRALLOC_USAGE_SENSOR_DIRECT_DATA)
297 {
298 heap_mask = EXYNOS_ION_HEAP_SENSOR_DIRECT_MASK;
299 }
300 else
301 {
302 heap_mask = EXYNOS_ION_HEAP_SYSTEM_MASK;
303 }
304
305 return heap_mask;
306 }
307
308 /*
309 * Selects a DMA-BUF heap name.
310 *
311 * @param heap_mask [in] heap_mask for which the equivalent DMA-BUF heap
312 * name must be found.
313 *
314 * @return the name of the DMA-BUF heap equivalent to the ION heap of mask
315 * @heap_mask.
316 *
317 */
select_dmabuf_heap(unsigned int heap_mask)318 static std::string select_dmabuf_heap(unsigned int heap_mask)
319 {
320 switch (heap_mask) {
321 case EXYNOS_ION_HEAP_SENSOR_DIRECT_MASK:
322 return kDmabufSensorDirectHeapName;
323 case EXYNOS_ION_HEAP_FA_TPU_MASK:
324 return kDmabufFaceauthTpuHeapName;
325 case EXYNOS_ION_HEAP_FA_IMG_MASK:
326 return kDmabufFaceauthImgHeapName;
327 case EXYNOS_ION_HEAP_FA_RAWIMG_MASK:
328 return kDmabufFaceauthRawImgHeapName;
329 case EXYNOS_ION_HEAP_FA_PREV_MASK:
330 return kDmabufFaceauthPrevHeapName;
331 case EXYNOS_ION_HEAP_FA_MODEL_MASK:
332 return kDmabufFaceauthModelHeapName;
333 case EXYNOS_ION_HEAP_VIDEO_FRAME_MASK:
334 return kDmabufVframeSecureHeapName;
335 case EXYNOS_ION_HEAP_VIDEO_STREAM_MASK:
336 return kDmabufVstreamSecureHeapName;
337 default:
338 return {};
339 }
340 }
341
alloc_from_dmabuf_heap(const std::string & heap_name,size_t size,unsigned int flags)342 int ion_device::alloc_from_dmabuf_heap(const std::string& heap_name, size_t size,
343 unsigned int flags)
344 {
345 if (!buffer_allocator)
346 {
347 return -1;
348 }
349
350 int shared_fd = buffer_allocator->Alloc(heap_name, size, flags);
351 if (shared_fd < 0)
352 {
353 ALOGE("Allocation failed for heap %s error: %d\n", heap_name.c_str(), shared_fd);
354 }
355
356 return shared_fd;
357 }
358
alloc_from_ion_heap(uint64_t usage,size_t size,unsigned int flags,int * min_pgsz)359 int ion_device::alloc_from_ion_heap(uint64_t usage, size_t size, unsigned int flags, int *min_pgsz)
360 {
361 /* TODO: remove min_pgsz? I don't think this is useful on Exynos */
362 if (size == 0 || min_pgsz == NULL)
363 {
364 return -1;
365 }
366
367 unsigned int heap_mask = select_heap_mask(usage);
368
369 int shared_fd;
370 auto dmabuf_heap_name = select_dmabuf_heap(heap_mask);
371 if (!dmabuf_heap_name.empty())
372 {
373 shared_fd = alloc_from_dmabuf_heap(dmabuf_heap_name, size, flags);
374 }
375 else
376 {
377 if (ion_client < 0)
378 {
379 return -1;
380 }
381
382 shared_fd = exynos_ion_alloc(ion_client, size, heap_mask, flags);
383 }
384
385 *min_pgsz = SZ_4K;
386
387 return shared_fd;
388 }
389
open_and_query_ion()390 int ion_device::open_and_query_ion()
391 {
392 if (ion_client >= 0)
393 {
394 MALI_GRALLOC_LOGW("ION device already open");
395 return 0;
396 }
397
398 ion_client = exynos_ion_open();
399 if (ion_client < 0)
400 {
401 MALI_GRALLOC_LOGE("ion_open failed with %s", strerror(errno));
402 return -1;
403 }
404
405 return 0;
406 }
407
sync_type_for_flags(const bool read,const bool write)408 static SyncType sync_type_for_flags(const bool read, const bool write)
409 {
410 if (read && !write)
411 {
412 return SyncType::kSyncRead;
413 }
414 else if (write && !read)
415 {
416 return SyncType::kSyncWrite;
417 }
418 else
419 {
420 // Deliberately also allowing "not sure" to map to ReadWrite.
421 return SyncType::kSyncReadWrite;
422 }
423 }
424
sync(const int fd,const bool read,const bool write,const bool start)425 int ion_device::sync(const int fd, const bool read, const bool write, const bool start)
426 {
427 if (!buffer_allocator)
428 {
429 return -1;
430 }
431
432 if (start)
433 {
434 return buffer_allocator->CpuSyncStart(fd, sync_type_for_flags(read, write));
435 }
436 else
437 {
438 return buffer_allocator->CpuSyncEnd(fd, sync_type_for_flags(read, write));
439 }
440 }
441
mali_gralloc_ion_sync(const private_handle_t * const hnd,const bool read,const bool write,const bool start)442 static int mali_gralloc_ion_sync(const private_handle_t * const hnd,
443 const bool read,
444 const bool write,
445 const bool start)
446 {
447 if (hnd == NULL)
448 {
449 return -EINVAL;
450 }
451
452 ion_device *dev = ion_device::get();
453 if (!dev)
454 {
455 return -1;
456 }
457
458 for (int i = 0; i < hnd->fd_count; i++)
459 {
460 const int fd = hnd->fds[i];
461 if (const int ret = dev->sync(fd, read, write, start))
462 {
463 return ret;
464 }
465 }
466
467 return 0;
468 }
469
470
471 /*
472 * Signal start of CPU access to the DMABUF exported from ION.
473 *
474 * @param hnd [in] Buffer handle
475 * @param read [in] Flag indicating CPU read access to memory
476 * @param write [in] Flag indicating CPU write access to memory
477 *
478 * @return 0 in case of success
479 * errno for all error cases
480 */
mali_gralloc_ion_sync_start(const private_handle_t * const hnd,const bool read,const bool write)481 int mali_gralloc_ion_sync_start(const private_handle_t * const hnd,
482 const bool read,
483 const bool write)
484 {
485 return mali_gralloc_ion_sync(hnd, read, write, true);
486 }
487
488
489 /*
490 * Signal end of CPU access to the DMABUF exported from ION.
491 *
492 * @param hnd [in] Buffer handle
493 * @param read [in] Flag indicating CPU read access to memory
494 * @param write [in] Flag indicating CPU write access to memory
495 *
496 * @return 0 in case of success
497 * errno for all error cases
498 */
mali_gralloc_ion_sync_end(const private_handle_t * const hnd,const bool read,const bool write)499 int mali_gralloc_ion_sync_end(const private_handle_t * const hnd,
500 const bool read,
501 const bool write)
502 {
503 return mali_gralloc_ion_sync(hnd, read, write, false);
504 }
505
506
mali_gralloc_ion_free(private_handle_t * const hnd)507 void mali_gralloc_ion_free(private_handle_t * const hnd)
508 {
509 for (int i = 0; i < hnd->fd_count; i++)
510 {
511 void* mapped_addr = reinterpret_cast<void*>(hnd->bases[i]);
512
513 /* Buffer might be unregistered already so we need to assure we have a valid handle */
514 if (mapped_addr != nullptr)
515 {
516 if (munmap(mapped_addr, hnd->alloc_sizes[i]) != 0)
517 {
518 /* TODO: more detailed error logs */
519 MALI_GRALLOC_LOGE("Failed to munmap handle %p", hnd);
520 }
521 }
522 close(hnd->fds[i]);
523 hnd->fds[i] = -1;
524 hnd->bases[i] = 0;
525 }
526 delete hnd;
527 }
528
mali_gralloc_ion_free_internal(buffer_handle_t * const pHandle,const uint32_t num_hnds)529 static void mali_gralloc_ion_free_internal(buffer_handle_t * const pHandle,
530 const uint32_t num_hnds)
531 {
532 for (uint32_t i = 0; i < num_hnds; i++)
533 {
534 if (pHandle[i] != NULL)
535 {
536 private_handle_t * const hnd = (private_handle_t * const)pHandle[i];
537 mali_gralloc_ion_free(hnd);
538 }
539 }
540 }
541
mali_gralloc_ion_allocate_attr(private_handle_t * hnd)542 int mali_gralloc_ion_allocate_attr(private_handle_t *hnd)
543 {
544 ion_device *dev = ion_device::get();
545 if (!dev)
546 {
547 return -1;
548 }
549
550 int idx = hnd->get_share_attr_fd_index();
551 int ion_flags = 0;
552 int min_pgsz;
553 uint64_t usage = GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN;
554
555 ion_flags = ION_FLAG_CACHED;
556
557 hnd->fds[idx] = dev->alloc_from_ion_heap(usage, hnd->attr_size, ion_flags, &min_pgsz);
558 if (hnd->fds[idx] < 0)
559 {
560 MALI_GRALLOC_LOGE("ion_alloc failed from client ( %d )", dev->client());
561 return -1;
562 }
563
564 hnd->incr_numfds(1);
565
566 return 0;
567 }
568
569 /*
570 * Allocates ION buffers
571 *
572 * @param descriptors [in] Buffer request descriptors
573 * @param numDescriptors [in] Number of descriptors
574 * @param pHandle [out] Handle for each allocated buffer
575 * @param shared_backend [out] Shared buffers flag
576 *
577 * @return File handle which can be used for allocation, on success
578 * -1, otherwise.
579 */
mali_gralloc_ion_allocate(const gralloc_buffer_descriptor_t * descriptors,uint32_t numDescriptors,buffer_handle_t * pHandle,bool * shared_backend,int ion_fd)580 int mali_gralloc_ion_allocate(const gralloc_buffer_descriptor_t *descriptors,
581 uint32_t numDescriptors, buffer_handle_t *pHandle,
582 bool *shared_backend, int ion_fd)
583 {
584 GRALLOC_UNUSED(shared_backend);
585
586 unsigned int priv_heap_flag = 0;
587 uint64_t usage;
588 uint32_t i;
589 unsigned int ion_flags = 0;
590 int min_pgsz = 0;
591 int fds[5] = {-1, -1, -1, -1, -1};
592
593 ion_device *dev = ion_device::get();
594 if (!dev)
595 {
596 return -1;
597 }
598
599 for (i = 0; i < numDescriptors; i++)
600 {
601 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
602 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
603
604 ion_flags = 0;
605 set_ion_flags(usage, &ion_flags);
606
607 for (int fidx = 0; fidx < bufDescriptor->fd_count; fidx++)
608 {
609 if (ion_fd >= 0 && fidx == 0) {
610 fds[fidx] = ion_fd;
611 } else {
612 fds[fidx] = dev->alloc_from_ion_heap(usage, bufDescriptor->alloc_sizes[fidx], ion_flags, &min_pgsz);
613 }
614 if (fds[fidx] < 0)
615 {
616 MALI_GRALLOC_LOGE("ion_alloc failed from client ( %d )", dev->client());
617
618 for (int cidx = 0; cidx < fidx; cidx++)
619 {
620 close(fds[cidx]);
621 }
622
623 /* need to free already allocated memory. not just this one */
624 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
625
626 return -1;
627 }
628 }
629
630 private_handle_t *hnd = new private_handle_t(
631 priv_heap_flag,
632 bufDescriptor->alloc_sizes,
633 bufDescriptor->consumer_usage, bufDescriptor->producer_usage,
634 fds, bufDescriptor->fd_count,
635 bufDescriptor->hal_format, bufDescriptor->alloc_format,
636 bufDescriptor->width, bufDescriptor->height, bufDescriptor->pixel_stride,
637 bufDescriptor->layer_count, bufDescriptor->plane_info);
638
639 if (NULL == hnd)
640 {
641 MALI_GRALLOC_LOGE("Private handle could not be created for descriptor:%d in non-shared usecase", i);
642
643 /* Close the obtained shared file descriptor for the current handle */
644 for (int j = 0; j < bufDescriptor->fd_count; j++)
645 {
646 close(fds[j]);
647 }
648
649 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
650 return -1;
651 }
652
653 pHandle[i] = hnd;
654 }
655
656 #if defined(GRALLOC_INIT_AFBC) && (GRALLOC_INIT_AFBC == 1)
657 unsigned char *cpu_ptr = NULL;
658 for (i = 0; i < numDescriptors; i++)
659 {
660 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
661 private_handle_t *hnd = (private_handle_t *)(pHandle[i]);
662
663 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
664
665 if ((bufDescriptor->alloc_format & MALI_GRALLOC_INTFMT_AFBCENABLE_MASK)
666 && !(usage & GRALLOC_USAGE_PROTECTED))
667 {
668 /* TODO: only map for AFBC buffers */
669 cpu_ptr =
670 (unsigned char *)mmap(NULL, bufDescriptor->alloc_sizes[0], PROT_READ | PROT_WRITE, MAP_SHARED, hnd->fds[0], 0);
671
672 if (MAP_FAILED == cpu_ptr)
673 {
674 MALI_GRALLOC_LOGE("mmap failed from client ( %d ), fd ( %d )", dev->client(), hnd->fds[0]);
675 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
676 return -1;
677 }
678
679 mali_gralloc_ion_sync_start(hnd, true, true);
680
681 /* For separated plane YUV, there is a header to initialise per plane. */
682 const plane_info_t *plane_info = bufDescriptor->plane_info;
683 const bool is_multi_plane = hnd->is_multi_plane();
684 for (int i = 0; i < MAX_PLANES && (i == 0 || plane_info[i].byte_stride != 0); i++)
685 {
686 init_afbc(cpu_ptr + plane_info[i].offset,
687 bufDescriptor->alloc_format,
688 is_multi_plane,
689 plane_info[i].alloc_width,
690 plane_info[i].alloc_height);
691 }
692
693 mali_gralloc_ion_sync_end(hnd, true, true);
694
695 munmap(cpu_ptr, bufDescriptor->alloc_sizes[0]);
696 }
697 }
698 #endif
699
700 return 0;
701 }
702
703
mali_gralloc_ion_map(private_handle_t * hnd)704 int mali_gralloc_ion_map(private_handle_t *hnd)
705 {
706 uint64_t usage = hnd->producer_usage | hnd->consumer_usage;
707
708 /* Do not allow cpu access to secure buffers */
709 if (usage & (GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_NOZEROED)
710 && !(usage & GRALLOC_USAGE_PRIVATE_NONSECURE))
711 {
712 return 0;
713 }
714
715 for (int fidx = 0; fidx < hnd->fd_count; fidx++) {
716 unsigned char *mappedAddress =
717 (unsigned char *)mmap(NULL, hnd->alloc_sizes[fidx], PROT_READ | PROT_WRITE,
718 MAP_SHARED, hnd->fds[fidx], 0);
719
720 if (MAP_FAILED == mappedAddress)
721 {
722 int err = errno;
723 MALI_GRALLOC_LOGE("mmap( fds[%d]:%d size:%" PRIu64 " ) failed with %s",
724 fidx, hnd->fds[fidx], hnd->alloc_sizes[fidx], strerror(err));
725 hnd->dump("map fail");
726
727 for (int cidx = 0; cidx < fidx; fidx++)
728 {
729 munmap((void*)hnd->bases[cidx], hnd->alloc_sizes[cidx]);
730 hnd->bases[cidx] = 0;
731 }
732
733 return -err;
734 }
735
736 hnd->bases[fidx] = uintptr_t(mappedAddress);
737 }
738
739 return 0;
740 }
741
import_exynos_ion_handles(private_handle_t * hnd)742 int import_exynos_ion_handles(private_handle_t *hnd)
743 {
744 int retval = -1;
745
746 ion_device *dev = ion_device::get();
747
748 for (int idx = 0; idx < hnd->fd_count; idx++)
749 {
750 if (hnd->fds[idx] >= 0)
751 {
752 retval = exynos_ion_import_handle(dev->client(), hnd->fds[idx], &hnd->ion_handles[idx]);
753 if (retval)
754 {
755 MALI_GRALLOC_LOGE("error importing ion_handle. ion_client(%d), ion_handle[%d](%d) format(%s %#" PRIx64 ")",
756 dev->client(), idx, hnd->ion_handles[idx], format_name(hnd->alloc_format), hnd->alloc_format);
757 goto error;
758 }
759 }
760 }
761
762 return retval;
763
764 error:
765 for (int idx = 0; idx < hnd->fd_count; idx++)
766 {
767 if (hnd->ion_handles[idx])
768 {
769 exynos_ion_free_handle(dev->client(), hnd->ion_handles[idx]);
770 }
771 }
772
773 return retval;
774 }
775
free_exynos_ion_handles(private_handle_t * hnd)776 void free_exynos_ion_handles(private_handle_t *hnd)
777 {
778 ion_device *dev = ion_device::get();
779
780 for (int idx = 0; idx < hnd->fd_count; idx++)
781 {
782 if (hnd->ion_handles[idx])
783 {
784 if (hnd->ion_handles[idx] &&
785 exynos_ion_free_handle(dev->client(), hnd->ion_handles[idx]))
786 {
787 MALI_GRALLOC_LOGE("error freeing ion_handle. ion_client(%d), ion_handle[%d](%d) format(%s %#" PRIx64 ")",
788 dev->client(), idx, hnd->ion_handles[idx], format_name(hnd->alloc_format), hnd->alloc_format);
789 }
790 }
791 }
792 }
793
794
mali_gralloc_ion_unmap(private_handle_t * hnd)795 void mali_gralloc_ion_unmap(private_handle_t *hnd)
796 {
797 for (int i = 0; i < hnd->fd_count; i++)
798 {
799 int err = 0;
800
801 if (hnd->bases[i])
802 {
803 err = munmap((void*)hnd->bases[i], hnd->alloc_sizes[i]);
804 }
805
806 if (err)
807 {
808 MALI_GRALLOC_LOGE("Could not munmap base:%p size:%" PRIu64 " '%s'",
809 (void*)hnd->bases[i], hnd->alloc_sizes[i], strerror(errno));
810 }
811 else
812 {
813 hnd->bases[i] = 0;
814 }
815 }
816
817 hnd->cpu_read = 0;
818 hnd->cpu_write = 0;
819 }
820
mali_gralloc_ion_close(void)821 void mali_gralloc_ion_close(void)
822 {
823 ion_device::close();
824 }
825
826