1 /*
2 * Copyright (C) 2016-2020 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include <inttypes.h>
20 #include <assert.h>
21 #include <atomic>
22 #include <algorithm>
23
24 #include <hardware/hardware.h>
25 #include <hardware/gralloc1.h>
26
27 #include "mali_gralloc_bufferallocation.h"
28 #include "allocator/mali_gralloc_ion.h"
29 #include "allocator/mali_gralloc_shared_memory.h"
30 #include "mali_gralloc_buffer.h"
31 #include "mali_gralloc_bufferdescriptor.h"
32 #include "mali_gralloc_debug.h"
33 #include "mali_gralloc_log.h"
34 #include "format_info.h"
35 #include <exynos_format.h>
36 #include "exynos_format_allocation.h"
37
38 #define EXT_SIZE 256
39
40 /* Default align values for Exynos */
41 #define YUV_BYTE_ALIGN_DEFAULT 16
42 #define RGB_BYTE_ALIGN_DEFAULT 64
43
44 /* IP-specific align values */
45 #define GPU_BYTE_ALIGN_DEFAULT 64
46
47 /* Always CPU align for Exynos */
48 #define CAN_SKIP_CPU_ALIGN 0
49
50 /* Realign YV12 format so that chroma stride is half of luma stride */
51 #define REALIGN_YV12 1
52
53 /* TODO: set S10B format align in BoardConfig.mk */
54 #define BOARD_EXYNOS_S10B_FORMAT_ALIGN 64
55 #if 0
56 ifeq ($(BOARD_EXYNOS_S10B_FORMAT_ALIGN), 64)
57 LOCAL_CFLAGS += -DBOARD_EXYNOS_S10B_FORMAT_ALIGN=$(BOARD_EXYNOS_S10B_FORMAT_ALIGN)
58 else
59 LOCAL_CFLAGS += -DBOARD_EXYNOS_S10B_FORMAT_ALIGN=16
60 endif
61 #endif
62
63 #define AFBC_PIXELS_PER_BLOCK 256
64 #define AFBC_HEADER_BUFFER_BYTES_PER_BLOCKENTRY 16
65
66 bool afbc_format_fallback(uint32_t * const format_idx, const uint64_t usage, bool force);
67
68
69 /*
70 * Get a global unique ID
71 */
getUniqueId()72 static uint64_t getUniqueId()
73 {
74 static std::atomic<uint32_t> counter(0);
75 uint64_t id = static_cast<uint64_t>(getpid()) << 32;
76 return id | counter++;
77 }
78
afbc_buffer_align(const bool is_tiled,int * size)79 static void afbc_buffer_align(const bool is_tiled, int *size)
80 {
81 const uint16_t AFBC_BODY_BUFFER_BYTE_ALIGNMENT = 1024;
82
83 int buffer_byte_alignment = AFBC_BODY_BUFFER_BYTE_ALIGNMENT;
84
85 if (is_tiled)
86 {
87 buffer_byte_alignment = 4 * AFBC_BODY_BUFFER_BYTE_ALIGNMENT;
88 }
89
90 *size = GRALLOC_ALIGN(*size, buffer_byte_alignment);
91 }
92
93 /*
94 * Obtain AFBC superblock dimensions from type.
95 */
get_afbc_sb_size(AllocBaseType alloc_base_type)96 static rect_t get_afbc_sb_size(AllocBaseType alloc_base_type)
97 {
98 const uint16_t AFBC_BASIC_BLOCK_WIDTH = 16;
99 const uint16_t AFBC_BASIC_BLOCK_HEIGHT = 16;
100 const uint16_t AFBC_WIDE_BLOCK_WIDTH = 32;
101 const uint16_t AFBC_WIDE_BLOCK_HEIGHT = 8;
102 const uint16_t AFBC_EXTRAWIDE_BLOCK_WIDTH = 64;
103 const uint16_t AFBC_EXTRAWIDE_BLOCK_HEIGHT = 4;
104
105 rect_t sb = {0, 0};
106
107 switch(alloc_base_type)
108 {
109 case AllocBaseType::AFBC:
110 sb.width = AFBC_BASIC_BLOCK_WIDTH;
111 sb.height = AFBC_BASIC_BLOCK_HEIGHT;
112 break;
113 case AllocBaseType::AFBC_WIDEBLK:
114 sb.width = AFBC_WIDE_BLOCK_WIDTH;
115 sb.height = AFBC_WIDE_BLOCK_HEIGHT;
116 break;
117 case AllocBaseType::AFBC_EXTRAWIDEBLK:
118 sb.width = AFBC_EXTRAWIDE_BLOCK_WIDTH;
119 sb.height = AFBC_EXTRAWIDE_BLOCK_HEIGHT;
120 break;
121 default:
122 break;
123 }
124 return sb;
125 }
126
127 /*
128 * Obtain AFBC superblock dimensions for specific plane.
129 *
130 * See alloc_type_t for more information.
131 */
get_afbc_sb_size(alloc_type_t alloc_type,const uint8_t plane)132 static rect_t get_afbc_sb_size(alloc_type_t alloc_type, const uint8_t plane)
133 {
134 if (plane > 0 && alloc_type.is_afbc() && alloc_type.is_multi_plane)
135 {
136 return get_afbc_sb_size(AllocBaseType::AFBC_EXTRAWIDEBLK);
137 }
138 else
139 {
140 return get_afbc_sb_size(alloc_type.primary_type);
141 }
142 }
143
get_alloc_type(const uint64_t format_ext,const uint32_t format_idx,const uint64_t usage,alloc_type_t * const alloc_type)144 bool get_alloc_type(const uint64_t format_ext,
145 const uint32_t format_idx,
146 const uint64_t usage,
147 alloc_type_t * const alloc_type)
148 {
149 alloc_type->primary_type = AllocBaseType::UNCOMPRESSED;
150 alloc_type->is_multi_plane = formats[format_idx].npln > 1;
151 alloc_type->is_tiled = false;
152 alloc_type->is_padded = false;
153 alloc_type->is_frontbuffer_safe = false;
154
155 /* Determine AFBC type for this format. This is used to decide alignment.
156 Split block does not affect alignment, and therefore doesn't affect the allocation type. */
157 if (format_ext & MALI_GRALLOC_INTFMT_AFBCENABLE_MASK)
158 {
159 /* YUV transform shall not be enabled for a YUV format */
160 if ((formats[format_idx].is_yuv == true) && (format_ext & MALI_GRALLOC_INTFMT_AFBC_YUV_TRANSFORM))
161 {
162 MALI_GRALLOC_LOGW("YUV Transform is incorrectly enabled for format = (%s 0x%x). Extended internal format = (%s 0x%" PRIx64 ")\n",
163 format_name(formats[format_idx].id), formats[format_idx].id, format_name(format_ext), format_ext);
164 }
165
166 /* Determine primary AFBC (superblock) type. */
167 alloc_type->primary_type = AllocBaseType::AFBC;
168 if (format_ext & MALI_GRALLOC_INTFMT_AFBC_WIDEBLK)
169 {
170 alloc_type->primary_type = AllocBaseType::AFBC_WIDEBLK;
171 }
172 else if (format_ext & MALI_GRALLOC_INTFMT_AFBC_EXTRAWIDEBLK)
173 {
174 alloc_type->primary_type = AllocBaseType::AFBC_EXTRAWIDEBLK;
175 }
176
177 if (format_ext & MALI_GRALLOC_INTFMT_AFBC_TILED_HEADERS)
178 {
179 alloc_type->is_tiled = true;
180
181 if (formats[format_idx].npln > 1 &&
182 (format_ext & MALI_GRALLOC_INTFMT_AFBC_EXTRAWIDEBLK) == 0)
183 {
184 MALI_GRALLOC_LOGW("Extra-wide AFBC must be signalled for multi-plane formats. "
185 "Falling back to single plane AFBC.");
186 alloc_type->is_multi_plane = false;
187 }
188
189 if (format_ext & MALI_GRALLOC_INTFMT_AFBC_DOUBLE_BODY)
190 {
191 alloc_type->is_frontbuffer_safe = true;
192 }
193 }
194 else
195 {
196 if (formats[format_idx].npln > 1)
197 {
198 MALI_GRALLOC_LOGW("Multi-plane AFBC is not supported without tiling. "
199 "Falling back to single plane AFBC.");
200 }
201 alloc_type->is_multi_plane = false;
202 }
203
204 if (format_ext & MALI_GRALLOC_INTFMT_AFBC_EXTRAWIDEBLK &&
205 !alloc_type->is_tiled)
206 {
207 /* Headers must be tiled for extra-wide. */
208 MALI_GRALLOC_LOGE("ERROR: Invalid to specify extra-wide block without tiled headers.");
209 return false;
210 }
211
212 if (alloc_type->is_frontbuffer_safe &&
213 (format_ext & (MALI_GRALLOC_INTFMT_AFBC_WIDEBLK | MALI_GRALLOC_INTFMT_AFBC_EXTRAWIDEBLK)))
214 {
215 MALI_GRALLOC_LOGE("ERROR: Front-buffer safe not supported with wide/extra-wide block.");
216 }
217
218 if (formats[format_idx].npln == 1 &&
219 format_ext & MALI_GRALLOC_INTFMT_AFBC_WIDEBLK &&
220 format_ext & MALI_GRALLOC_INTFMT_AFBC_EXTRAWIDEBLK)
221 {
222 /* "Wide + Extra-wide" implicitly means "multi-plane". */
223 MALI_GRALLOC_LOGE("ERROR: Invalid to specify multiplane AFBC with single plane format.");
224 return false;
225 }
226
227 if (usage & MALI_GRALLOC_USAGE_AFBC_PADDING)
228 {
229 alloc_type->is_padded = true;
230 }
231 }
232 return true;
233 }
234
235 /*
236 * Initialise AFBC header based on superblock layout.
237 * Width and height should already be AFBC aligned.
238 */
init_afbc(uint8_t * buf,const uint64_t alloc_format,const bool is_multi_plane,const int w,const int h)239 void init_afbc(uint8_t *buf, const uint64_t alloc_format,
240 const bool is_multi_plane,
241 const int w, const int h)
242 {
243 const bool is_tiled = ((alloc_format & MALI_GRALLOC_INTFMT_AFBC_TILED_HEADERS)
244 == MALI_GRALLOC_INTFMT_AFBC_TILED_HEADERS);
245 const uint32_t n_headers = (w * h) / AFBC_PIXELS_PER_BLOCK;
246 int body_offset = n_headers * AFBC_HEADER_BUFFER_BYTES_PER_BLOCKENTRY;
247
248 afbc_buffer_align(is_tiled, &body_offset);
249
250 /*
251 * Declare the AFBC header initialisation values for each superblock layout.
252 * Tiled headers (AFBC 1.2) can be initialised to zero for non-subsampled formats
253 * (SB layouts: 0, 3, 4, 7).
254 */
255 uint32_t headers[][4] = {
256 { (uint32_t)body_offset, 0x1, 0x10000, 0x0 }, /* Layouts 0, 3, 4, 7 */
257 { ((uint32_t)body_offset + (1 << 28)), 0x80200040, 0x1004000, 0x20080 } /* Layouts 1, 5 */
258 };
259 if ((alloc_format & MALI_GRALLOC_INTFMT_AFBC_TILED_HEADERS))
260 {
261 /* Zero out body_offset for non-subsampled formats. */
262 memset(headers[0], 0, sizeof(uint32_t) * 4);
263 }
264
265 /* Map base format to AFBC header layout */
266 const uint32_t base_format = alloc_format & MALI_GRALLOC_INTFMT_FMT_MASK;
267
268 /* Sub-sampled formats use layouts 1 and 5 which is index 1 in the headers array.
269 * 1 = 4:2:0 16x16, 5 = 4:2:0 32x8.
270 *
271 * Non-subsampled use layouts 0, 3, 4 and 7, which is index 0.
272 * 0 = 16x16, 3 = 32x8 + split, 4 = 32x8, 7 = 64x4.
273 *
274 * When using separated planes for YUV formats, the header layout is the non-subsampled one
275 * as there is a header per-plane and there is no sub-sampling within the plane.
276 * Separated plane only supports 32x8 or 64x4 for the luma plane, so the first plane must be 4 or 7.
277 * Seperated plane only supports 64x4 for subsequent planes, so these must be header layout 7.
278 */
279 const uint32_t layout = is_subsampled_yuv(base_format) && !is_multi_plane ? 1 : 0;
280
281 MALI_GRALLOC_LOGV("Writing AFBC header layout %d for format (%s %" PRIx32 ")",
282 layout, format_name(base_format), base_format);
283
284 for (uint32_t i = 0; i < n_headers; i++)
285 {
286 memcpy(buf, headers[layout], sizeof(headers[layout]));
287 buf += sizeof(headers[layout]);
288 }
289 }
290
max(int a,int b)291 static int max(int a, int b)
292 {
293 return a > b ? a : b;
294 }
295
max(int a,int b,int c)296 static int max(int a, int b, int c)
297 {
298 return c > max(a, b) ? c : max(a, b);
299 }
300
301 /*
302 * Obtain plane allocation dimensions (in pixels).
303 *
304 * NOTE: pixel stride, where defined for format, is
305 * incorporated into allocation dimensions.
306 */
get_pixel_w_h(uint32_t * const width,uint32_t * const height,const format_info_t format,const alloc_type_t alloc_type,const uint8_t plane,bool has_cpu_usage)307 static void get_pixel_w_h(uint32_t * const width,
308 uint32_t * const height,
309 const format_info_t format,
310 const alloc_type_t alloc_type,
311 const uint8_t plane,
312 bool has_cpu_usage)
313 {
314 const rect_t sb = get_afbc_sb_size(alloc_type, plane);
315 const bool is_primary_plane = (plane == 0 || !format.planes_contiguous);
316
317 /*
318 * Round-up plane dimensions, to multiple of:
319 * - Samples for all channels (sub-sampled formats)
320 * - Memory bytes/words (some packed formats)
321 */
322 if (is_primary_plane)
323 {
324 *width = GRALLOC_ALIGN(*width, format.align_w);
325 *height = GRALLOC_ALIGN(*height, format.align_h);
326 }
327
328 /*
329 * Sub-sample (sub-sampled) planes.
330 */
331 if (plane > 0)
332 {
333 *width /= format.hsub;
334 *height /= format.vsub;
335 }
336
337 /*
338 * Pixel alignment (width),
339 * where format stride is stated in pixels.
340 */
341 int pixel_align_w = 1, pixel_align_h = 1;
342 if (has_cpu_usage && is_primary_plane)
343 {
344 pixel_align_w = format.align_w_cpu;
345 }
346 else if (alloc_type.is_afbc())
347 {
348 #define HEADER_STRIDE_ALIGN_IN_SUPER_BLOCKS (0)
349 uint32_t num_sb_align = 0;
350 if (alloc_type.is_padded && !format.is_yuv)
351 {
352 /* Align to 4 superblocks in width --> 64-byte,
353 * assuming 16-byte header per superblock.
354 */
355 num_sb_align = 4;
356 }
357 pixel_align_w = max(HEADER_STRIDE_ALIGN_IN_SUPER_BLOCKS, num_sb_align) * sb.width;
358
359 /*
360 * Determine AFBC tile size when allocating tiled headers.
361 */
362 rect_t afbc_tile = sb;
363 if (alloc_type.is_tiled)
364 {
365 afbc_tile.width = format.bpp_afbc[plane] > 32 ? 4 * afbc_tile.width : 8 * afbc_tile.width;
366 afbc_tile.height = format.bpp_afbc[plane] > 32 ? 4 * afbc_tile.height : 8 * afbc_tile.height;
367 }
368
369 MALI_GRALLOC_LOGV("Plane[%hhu]: [SUB-SAMPLE] w:%d, h:%d\n", plane, *width, *height);
370 MALI_GRALLOC_LOGV("Plane[%hhu]: [PIXEL_ALIGN] w:%d\n", plane, pixel_align_w);
371 MALI_GRALLOC_LOGV("Plane[%hhu]: [LINEAR_TILE] w:%" PRIu16 "\n", plane, format.tile_size);
372 MALI_GRALLOC_LOGV("Plane[%hhu]: [AFBC_TILE] w:%" PRIu16 ", h:%" PRIu16 "\n", plane, afbc_tile.width, afbc_tile.height);
373
374 pixel_align_w = max(pixel_align_w, afbc_tile.width);
375 pixel_align_h = max(pixel_align_h, afbc_tile.height);
376
377 if (AllocBaseType::AFBC_WIDEBLK == alloc_type.primary_type && !alloc_type.is_tiled)
378 {
379 /*
380 * Special case for wide block (32x8) AFBC with linear (non-tiled)
381 * headers: hardware reads and writes 32x16 blocks so we need to
382 * pad the body buffer accordingly.
383 *
384 * Note that this branch will not be taken for multi-plane AFBC
385 * since that requires tiled headers.
386 */
387 pixel_align_h = max(pixel_align_h, 16);
388 }
389 }
390 *width = GRALLOC_ALIGN(*width, max(1, pixel_align_w, format.tile_size));
391 *height = GRALLOC_ALIGN(*height, max(1, pixel_align_h, format.tile_size));
392 }
393
394
395
gcd(uint32_t a,uint32_t b)396 static uint32_t gcd(uint32_t a, uint32_t b)
397 {
398 uint32_t r, t;
399
400 if (a == b)
401 {
402 return a;
403 }
404 else if (a < b)
405 {
406 t = a;
407 a = b;
408 b = t;
409 }
410
411 while (b != 0)
412 {
413 r = a % b;
414 a = b;
415 b = r;
416 }
417
418 return a;
419 }
420
lcm(uint32_t a,uint32_t b)421 uint32_t lcm(uint32_t a, uint32_t b)
422 {
423 if (a != 0 && b != 0)
424 {
425 return (a * b) / gcd(a, b);
426 }
427
428 return max(a, b);
429 }
430
431
432 #if REALIGN_YV12 == 1
433 /*
434 * YV12 stride has additional complexity since chroma stride
435 * must conform to the following:
436 *
437 * c_stride = ALIGN(stride/2, 16)
438 *
439 * Since the stride alignment must satisfy both CPU and HW
440 * constraints, the luma stride must be doubled.
441 */
update_yv12_stride(int8_t plane,uint32_t luma_stride,uint32_t stride_align,uint32_t * byte_stride)442 static void update_yv12_stride(int8_t plane,
443 uint32_t luma_stride,
444 uint32_t stride_align,
445 uint32_t * byte_stride)
446 {
447 if (plane == 0)
448 {
449 *byte_stride = GRALLOC_ALIGN(luma_stride, GRALLOC_ALIGN(stride_align, 32));
450 }
451 else
452 {
453 /*
454 * Derive chroma stride from luma and verify it is:
455 * 1. Aligned to "1/2*lcm(hw_align, cpu_align)"
456 * 2. Multiple of 16px (16 bytes)
457 */
458 *byte_stride = luma_stride / 2;
459 assert(*byte_stride == GRALLOC_ALIGN(*byte_stride, GRALLOC_ALIGN(stride_align / 2, 16)));
460 assert(*byte_stride & 15 == 0);
461 }
462 }
463 #endif
464
465 /*
466 * Modify usage flag when BO is the producer
467 *
468 * BO cannot use the flags CPU_READ_RARELY as Codec layer redefines those flags
469 * for some internal usage. So, when BO is sending CPU_READ_OFTEN, it still
470 * expects to allocate an uncached buffer and this procedure convers the OFTEN
471 * flag to RARELY.
472 */
update_usage_for_BO(uint64_t usage)473 static uint64_t update_usage_for_BO(uint64_t usage) {
474 MALI_GRALLOC_LOGV("Hacking CPU RW flags for BO");
475 if (usage & hidl_common::BufferUsage::CPU_READ_OFTEN) {
476 usage &= ~(static_cast<uint64_t>(hidl_common::BufferUsage::CPU_READ_OFTEN));
477 usage |= hidl_common::BufferUsage::CPU_READ_RARELY;
478 }
479
480 if (usage & hidl_common::BufferUsage::CPU_WRITE_OFTEN) {
481 usage &= ~(static_cast<uint64_t>(hidl_common::BufferUsage::CPU_WRITE_OFTEN));
482 usage |= hidl_common::BufferUsage::CPU_WRITE_RARELY;
483 }
484 return usage;
485 }
486
align_plane_stride(plane_info_t * plane_info,int plane,const format_info_t format,uint32_t stride_align)487 static void align_plane_stride(plane_info_t *plane_info, int plane, const format_info_t format, uint32_t stride_align)
488 {
489 plane_info[plane].byte_stride = GRALLOC_ALIGN(plane_info[plane].byte_stride * format.tile_size, stride_align) / format.tile_size;
490 plane_info[plane].alloc_width = plane_info[plane].byte_stride * 8 / format.bpp[plane];
491 }
492
493 /*
494 * Calculate allocation size.
495 *
496 * Determine the width and height of each plane based on pixel alignment for
497 * both uncompressed and AFBC allocations.
498 *
499 * @param width [in] Buffer width.
500 * @param height [in] Buffer height.
501 * @param alloc_type [in] Allocation type inc. whether tiled and/or multi-plane.
502 * @param format [in] Pixel format.
503 * @param has_cpu_usage [in] CPU usage requested (in addition to any other).
504 * @param pixel_stride [out] Calculated pixel stride.
505 * @param size [out] Total calculated buffer size including all planes.
506 * @param plane_info [out] Array of calculated information for each plane. Includes
507 * offset, byte stride and allocation width and height.
508 */
calc_allocation_size(const int width,const int height,const alloc_type_t alloc_type,const format_info_t format,const bool has_cpu_usage,const bool has_hw_usage,const bool has_gpu_usage,int * const pixel_stride,uint64_t * const size,plane_info_t plane_info[MAX_PLANES])509 static void calc_allocation_size(const int width,
510 const int height,
511 const alloc_type_t alloc_type,
512 const format_info_t format,
513 const bool has_cpu_usage,
514 const bool has_hw_usage,
515 const bool has_gpu_usage,
516 int * const pixel_stride,
517 uint64_t * const size,
518 plane_info_t plane_info[MAX_PLANES])
519 {
520 /* pixel_stride is set outside this function after this function is called */
521 GRALLOC_UNUSED(pixel_stride);
522
523 plane_info[0].offset = 0;
524
525 *size = 0;
526 for (uint8_t plane = 0; plane < format.npln; plane++)
527 {
528 plane_info[plane].alloc_width = width;
529 plane_info[plane].alloc_height = height;
530 get_pixel_w_h(&plane_info[plane].alloc_width,
531 &plane_info[plane].alloc_height,
532 format,
533 alloc_type,
534 plane,
535 has_cpu_usage);
536 MALI_GRALLOC_LOGV("Aligned w=%d, h=%d (in pixels)",
537 plane_info[plane].alloc_width, plane_info[plane].alloc_height);
538
539 /*
540 * Calculate byte stride (per plane).
541 */
542 if (alloc_type.is_afbc())
543 {
544 assert((plane_info[plane].alloc_width * format.bpp_afbc[plane]) % 8 == 0);
545 plane_info[plane].byte_stride = (plane_info[plane].alloc_width * format.bpp_afbc[plane]) / 8;
546 }
547 else
548 {
549 assert((plane_info[plane].alloc_width * format.bpp[plane]) % 8 == 0);
550 plane_info[plane].byte_stride = (plane_info[plane].alloc_width * format.bpp[plane]) / 8;
551
552 /*
553 * Align byte stride (uncompressed allocations only).
554 *
555 * Find the lowest-common-multiple of:
556 * 1. hw_align: Minimum byte stride alignment for HW IP (has_hw_usage == true)
557 * 2. cpu_align: Byte equivalent of 'align_w_cpu' (has_cpu_usage == true)
558 *
559 * NOTE: Pixel stride is defined as multiple of 'align_w_cpu'.
560 */
561 uint16_t hw_align = 0;
562 if (has_hw_usage)
563 {
564 static_assert(is_power2(YUV_BYTE_ALIGN_DEFAULT),
565 "YUV_BYTE_ALIGN_DEFAULT is not a power of 2");
566 static_assert(is_power2(RGB_BYTE_ALIGN_DEFAULT),
567 "RGB_BYTE_ALIGN_DEFAULT is not a power of 2");
568
569 hw_align = format.is_yuv ?
570 YUV_BYTE_ALIGN_DEFAULT :
571 (format.is_rgb ? RGB_BYTE_ALIGN_DEFAULT : 0);
572 }
573
574 if (has_gpu_usage)
575 {
576 static_assert(is_power2(GPU_BYTE_ALIGN_DEFAULT),
577 "RGB_BYTE_ALIGN_DEFAULT is not a power of 2");
578
579 /*
580 * The GPU requires stricter alignment on YUV and raw formats.
581 */
582 hw_align = std::max(hw_align, static_cast<uint16_t>(GPU_BYTE_ALIGN_DEFAULT));
583 }
584
585 uint32_t cpu_align = 0;
586
587 #if CAN_SKIP_CPU_ALIGN == 1
588 if (has_cpu_usage)
589 #endif
590 {
591 assert((format.bpp[plane] * format.align_w_cpu) % 8 == 0);
592 const bool is_primary_plane = (plane == 0 || !format.planes_contiguous);
593 if (is_primary_plane)
594 {
595 cpu_align = (format.bpp[plane] * format.align_w_cpu) / 8;
596 }
597 }
598
599 uint32_t stride_align = lcm(hw_align, cpu_align);
600 if (stride_align)
601 {
602 align_plane_stride(plane_info, plane, format, stride_align);
603 }
604
605 #if REALIGN_YV12 == 1
606 /*
607 * Update YV12 stride with both CPU & HW usage due to constraint of chroma stride.
608 * Width is anyway aligned to 16px for luma and chroma (has_cpu_usage).
609 *
610 * Note: To prevent luma stride misalignment with GPU stride alignment.
611 * The luma plane will maintain the same `stride` size, and the chroma plane
612 * will align to `stride/2`.
613 */
614 if (format.id == MALI_GRALLOC_FORMAT_INTERNAL_YV12 && has_hw_usage && has_cpu_usage)
615 {
616 update_yv12_stride(plane,
617 plane_info[0].byte_stride,
618 stride_align,
619 &plane_info[plane].byte_stride);
620 }
621 #endif
622 }
623 MALI_GRALLOC_LOGV("Byte stride: %d", plane_info[plane].byte_stride);
624
625 const uint32_t sb_num = (plane_info[plane].alloc_width * plane_info[plane].alloc_height)
626 / AFBC_PIXELS_PER_BLOCK;
627
628 /*
629 * Calculate body size (per plane).
630 */
631 int body_size = 0;
632 if (alloc_type.is_afbc())
633 {
634 const rect_t sb = get_afbc_sb_size(alloc_type, plane);
635 const int sb_bytes = GRALLOC_ALIGN((format.bpp_afbc[plane] * sb.width * sb.height) / 8, 128);
636 body_size = sb_num * sb_bytes;
637
638 /* When AFBC planes are stored in separate buffers and this is not the last plane,
639 also align the body buffer to make the subsequent header aligned. */
640 if (format.npln > 1 && plane < 2)
641 {
642 afbc_buffer_align(alloc_type.is_tiled, &body_size);
643 }
644
645 if (alloc_type.is_frontbuffer_safe)
646 {
647 int back_buffer_size = body_size;
648 afbc_buffer_align(alloc_type.is_tiled, &back_buffer_size);
649 body_size += back_buffer_size;
650 }
651 }
652 else
653 {
654 body_size = plane_info[plane].byte_stride * plane_info[plane].alloc_height;
655 }
656 MALI_GRALLOC_LOGV("Body size: %d", body_size);
657
658
659 /*
660 * Calculate header size (per plane).
661 */
662 int header_size = 0;
663 if (alloc_type.is_afbc())
664 {
665 /* As this is AFBC, calculate header size for this plane.
666 * Always align the header, which will make the body buffer aligned.
667 */
668 header_size = sb_num * AFBC_HEADER_BUFFER_BYTES_PER_BLOCKENTRY;
669 afbc_buffer_align(alloc_type.is_tiled, &header_size);
670 }
671 MALI_GRALLOC_LOGV("AFBC Header size: %d", header_size);
672
673 /*
674 * Set offset for separate chroma planes.
675 */
676 if (plane > 0)
677 {
678 plane_info[plane].offset = *size;
679 }
680
681 /*
682 * Set overall size.
683 * Size must be updated after offset.
684 */
685 *size += body_size + header_size;
686 MALI_GRALLOC_LOGV("size=%" PRIu64, *size);
687 }
688 }
689
690
691
692 /*
693 * Validate selected format against requested.
694 * Return true if valid, false otherwise.
695 */
validate_format(const format_info_t * const format,const alloc_type_t alloc_type,const buffer_descriptor_t * const bufDescriptor)696 static bool validate_format(const format_info_t * const format,
697 const alloc_type_t alloc_type,
698 const buffer_descriptor_t * const bufDescriptor)
699 {
700 if (alloc_type.is_afbc())
701 {
702 /*
703 * Validate format is supported by AFBC specification and gralloc.
704 */
705 if (format->afbc == false)
706 {
707 MALI_GRALLOC_LOGE("ERROR: AFBC selected but not supported for base format: (%s 0x%" PRIx32")",
708 format_name(format->id), format->id);
709 return false;
710 }
711
712 /*
713 * Enforce consistency between number of format planes and
714 * request for single/multi-plane AFBC.
715 */
716 if (((format->npln == 1 && alloc_type.is_multi_plane) ||
717 (format->npln > 1 && !alloc_type.is_multi_plane)))
718 {
719 MALI_GRALLOC_LOGE("ERROR: Format ((%s %" PRIx32 "), num planes: %u) is incompatible with %s-plane AFBC request",
720 format_name(format->id), format->id, format->npln, (alloc_type.is_multi_plane) ? "multi" : "single");
721 return false;
722 }
723 }
724 else
725 {
726 if (format->linear == false)
727 {
728 MALI_GRALLOC_LOGE("ERROR: Uncompressed format requested but not supported for base format: (%s %" PRIx32 ")",
729 format_name(format->id), format->id);
730 return false;
731 }
732 }
733
734 if (format->id == MALI_GRALLOC_FORMAT_INTERNAL_BLOB &&
735 bufDescriptor->height != 1)
736 {
737 MALI_GRALLOC_LOGE("ERROR: Height for format BLOB must be 1.");
738 return false;
739 }
740
741 return true;
742 }
743
prepare_descriptor_exynos_formats(buffer_descriptor_t * bufDescriptor,format_info_t format_info)744 static int prepare_descriptor_exynos_formats(
745 buffer_descriptor_t *bufDescriptor,
746 format_info_t format_info)
747 {
748 int fd_count = 1;
749 int w = bufDescriptor->width;
750 int h = bufDescriptor->height;
751 uint64_t usage = bufDescriptor->producer_usage | bufDescriptor->consumer_usage;
752 int plane_count = 2;
753 int format = MALI_GRALLOC_INTFMT_FMT_MASK & bufDescriptor->alloc_format;
754
755 if (usage & (GRALLOC_USAGE_HW_VIDEO_ENCODER | GRALLOC_USAGE_HW_VIDEO_DECODER))
756 {
757 usage |= GRALLOC_USAGE_VIDEO_PRIVATE_DATA;
758 bufDescriptor->producer_usage |= GRALLOC_USAGE_VIDEO_PRIVATE_DATA;
759 bufDescriptor->consumer_usage |= GRALLOC_USAGE_VIDEO_PRIVATE_DATA;
760 }
761
762 /* set SBWC format fd_count */
763 fd_count = 1;
764 switch (format)
765 {
766 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_SBWC:
767 case HAL_PIXEL_FORMAT_EXYNOS_YCrCb_420_SP_M_SBWC:
768 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_10B_SBWC:
769 case HAL_PIXEL_FORMAT_EXYNOS_YCrCb_420_SP_M_10B_SBWC:
770 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_SBWC_L50:
771 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_SBWC_L75:
772 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_10B_SBWC_L40:
773 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_10B_SBWC_L60:
774 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_10B_SBWC_L80:
775 fd_count = 2;
776 break;
777 }
778
779 /* SWBC Formats have special size requirements */
780 switch (format)
781 {
782 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_SBWC:
783 case HAL_PIXEL_FORMAT_EXYNOS_YCrCb_420_SP_M_SBWC:
784 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SPN_SBWC:
785 plane_count = setup_sbwc_420_sp(w, h, fd_count, bufDescriptor->plane_info);
786 break;
787
788 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_10B_SBWC:
789 case HAL_PIXEL_FORMAT_EXYNOS_YCrCb_420_SP_M_10B_SBWC:
790 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SPN_10B_SBWC:
791 plane_count = setup_sbwc_420_sp_10bit(w, h, fd_count, bufDescriptor->plane_info);
792 break;
793
794 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_SBWC_L50:
795 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SPN_SBWC_L50:
796 plane_count = setup_sbwc_420_sp_lossy(w, h, 50, fd_count, bufDescriptor->plane_info);
797 break;
798
799 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_SBWC_L75:
800 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SPN_SBWC_L75:
801 plane_count = setup_sbwc_420_sp_lossy(w, h, 75, fd_count, bufDescriptor->plane_info);
802 break;
803
804 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_10B_SBWC_L40:
805 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SPN_10B_SBWC_L40:
806 plane_count = setup_sbwc_420_sp_10bit_lossy(w, h, 40, fd_count, bufDescriptor->plane_info);
807 break;
808
809 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_10B_SBWC_L60:
810 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SPN_10B_SBWC_L60:
811 plane_count = setup_sbwc_420_sp_10bit_lossy(w, h, 60, fd_count, bufDescriptor->plane_info);
812 break;
813
814 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_10B_SBWC_L80:
815 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SPN_10B_SBWC_L80:
816 plane_count = setup_sbwc_420_sp_10bit_lossy(w, h, 80, fd_count, bufDescriptor->plane_info);
817 break;
818
819 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
820 fd_count = 1;
821 h = GRALLOC_ALIGN(h, 2);
822 plane_count = setup_420_sp(w, h, fd_count, bufDescriptor->plane_info);
823 break;
824
825 case HAL_PIXEL_FORMAT_EXYNOS_YV12_M:
826 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_P_M:
827 w = GRALLOC_ALIGN(w, 32);
828 h = GRALLOC_ALIGN(h, 16);
829 fd_count = 3;
830 plane_count = setup_420_p(w, h, fd_count, bufDescriptor->plane_info);
831 break;
832
833 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_TILED:
834 w = GRALLOC_ALIGN(w, 16);
835 h = GRALLOC_ALIGN(h, 32);
836 fd_count = 2;
837 plane_count = setup_420_sp_tiled(w, h, fd_count, bufDescriptor->plane_info);
838 break;
839
840 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_P:
841 w = GRALLOC_ALIGN(w, 16);
842 fd_count = 1;
843 plane_count = setup_420_p(w, h, fd_count, bufDescriptor->plane_info);
844 break;
845
846 case HAL_PIXEL_FORMAT_EXYNOS_YCrCb_420_SP_M:
847 case HAL_PIXEL_FORMAT_EXYNOS_YCrCb_420_SP_M_FULL:
848 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M:
849 w = GRALLOC_ALIGN(w, 16);
850 h = GRALLOC_ALIGN(h, 32);
851 fd_count = 2;
852 plane_count = setup_420_sp(w, h, fd_count, bufDescriptor->plane_info);
853 break;
854
855 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SPN:
856 w = GRALLOC_ALIGN(w, 16);
857 h = GRALLOC_ALIGN(h, 16);
858 fd_count = 1;
859 plane_count = setup_420_sp(w, h, fd_count, bufDescriptor->plane_info);
860 break;
861
862 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SP_M_S10B:
863 /* This is 64 pixel align for now */
864 w = GRALLOC_ALIGN(w, BOARD_EXYNOS_S10B_FORMAT_ALIGN);
865 h = GRALLOC_ALIGN(h, 16);
866 fd_count = 2;
867 plane_count = setup_420_sp_s10b(w, h, fd_count, bufDescriptor->plane_info);
868 break;
869
870 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_420_SPN_S10B:
871 w = GRALLOC_ALIGN(w, BOARD_EXYNOS_S10B_FORMAT_ALIGN);
872 h = GRALLOC_ALIGN(h, 16);
873 fd_count = 1;
874 plane_count = setup_420_sp_s10b(w, h, fd_count, bufDescriptor->plane_info);
875 break;
876
877 case HAL_PIXEL_FORMAT_EXYNOS_YCbCr_P010_M:
878 w = GRALLOC_ALIGN(w, 16);
879 h = GRALLOC_ALIGN(h, 16);
880 fd_count = 2;
881 plane_count = setup_p010_sp(w, h, fd_count, bufDescriptor->plane_info);
882 break;
883
884 default:
885 MALI_GRALLOC_LOGE("invalid yuv format (%s %" PRIx64 ")", format_name(bufDescriptor->alloc_format),
886 bufDescriptor->alloc_format);
887 return -1;
888 }
889
890 plane_info_t *plane = bufDescriptor->plane_info;
891
892 if (usage & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_GPU_DATA_BUFFER))
893 {
894 if (is_sbwc_format(format))
895 {
896 MALI_GRALLOC_LOGE("using SBWC format (%s %" PRIx64 ") with GPU is invalid",
897 format_name(bufDescriptor->alloc_format),
898 bufDescriptor->alloc_format);
899 return -1;
900 }
901 else
902 {
903 /*
904 * The GPU requires stricter alignment on YUV formats.
905 */
906 for (int pidx = 0; pidx < plane_count; ++pidx)
907 {
908 if (plane[pidx].size == plane[pidx].byte_stride * plane[pidx].alloc_height)
909 {
910 align_plane_stride(plane, pidx, format_info, GPU_BYTE_ALIGN_DEFAULT);
911 plane[pidx].size = plane[pidx].byte_stride * plane[pidx].alloc_height;
912 }
913 else
914 {
915 MALI_GRALLOC_LOGE("buffer with format (%s %" PRIx64
916 ") has size %" PRIu64
917 " != byte_stride %" PRIu32 " * alloc_height %" PRIu32,
918 format_name(bufDescriptor->alloc_format),
919 bufDescriptor->alloc_format,
920 plane[pidx].size, plane[pidx].byte_stride, plane[pidx].alloc_height);
921 }
922 }
923 }
924 }
925
926 for (int fidx = 0; fidx < fd_count; fidx++)
927 {
928 uint64_t size = 0;
929
930 for (int pidx = 0; pidx < plane_count; pidx++)
931 {
932 if (plane[pidx].fd_idx == fidx)
933 {
934 size += plane[pidx].size;
935 }
936 }
937
938 /* TODO(b/183073089): Removing the following size hacks make video playback
939 * fail. Need to investigate more for the root cause. Copying the original
940 * comment from upstream below */
941 /* is there a need to check the condition for padding like in older gralloc? */
942 /* Add MSCL_EXT_SIZE */
943 /* MSCL_EXT_SIZE + MSCL_EXT_SIZE/2 + ext_size */
944 size += 1024;
945
946 size = size < SZ_4K ? SZ_4K : size;
947
948 bufDescriptor->alloc_sizes[fidx] = size;
949 }
950
951
952 bufDescriptor->fd_count = fd_count;
953 bufDescriptor->plane_count = plane_count;
954
955 return 0;
956 }
957
mali_gralloc_derive_format_and_size(buffer_descriptor_t * const bufDescriptor)958 int mali_gralloc_derive_format_and_size(buffer_descriptor_t * const bufDescriptor)
959 {
960 alloc_type_t alloc_type{};
961
962 int alloc_width = bufDescriptor->width;
963 int alloc_height = bufDescriptor->height;
964 uint64_t usage = bufDescriptor->producer_usage | bufDescriptor->consumer_usage;
965
966 /*
967 * Select optimal internal pixel format based upon
968 * usage and requested format.
969 */
970 bufDescriptor->alloc_format = mali_gralloc_select_format(bufDescriptor->hal_format,
971 bufDescriptor->format_type,
972 usage,
973 bufDescriptor->width * bufDescriptor->height);
974 if (bufDescriptor->alloc_format == MALI_GRALLOC_FORMAT_INTERNAL_UNDEFINED)
975 {
976 MALI_GRALLOC_LOGE("ERROR: Unrecognized and/or unsupported format (%s 0x%" PRIx64 ") and usage (%s 0x%" PRIx64 ")",
977 format_name(bufDescriptor->hal_format), bufDescriptor->hal_format,
978 describe_usage(usage).c_str(), usage);
979 return -EINVAL;
980 }
981
982 int32_t format_idx = get_format_index(bufDescriptor->alloc_format & MALI_GRALLOC_INTFMT_FMT_MASK);
983 if (format_idx == -1)
984 {
985 return -EINVAL;
986 }
987 MALI_GRALLOC_LOGV("alloc_format: (%s 0x%" PRIx64 ") format_idx: %d",
988 format_name(bufDescriptor->alloc_format), bufDescriptor->alloc_format, format_idx);
989
990 /*
991 * Obtain allocation type (uncompressed, AFBC basic, etc...)
992 */
993 if (!get_alloc_type(bufDescriptor->alloc_format & MALI_GRALLOC_INTFMT_EXT_MASK,
994 format_idx, usage, &alloc_type))
995 {
996 return -EINVAL;
997 }
998
999 if (!validate_format(&formats[format_idx], alloc_type, bufDescriptor))
1000 {
1001 return -EINVAL;
1002 }
1003
1004 if (is_exynos_format(bufDescriptor->alloc_format))
1005 {
1006 prepare_descriptor_exynos_formats(bufDescriptor, formats[format_idx]);
1007 }
1008 else
1009 {
1010 /*
1011 * Resolution of frame (allocation width and height) might require adjustment.
1012 * This adjustment is only based upon specific usage and pixel format.
1013 * If using AFBC, further adjustments to the allocation width and height will be made later
1014 * based on AFBC alignment requirements and, for YUV, the plane properties.
1015 */
1016 mali_gralloc_adjust_dimensions(bufDescriptor->alloc_format,
1017 usage,
1018 &alloc_width,
1019 &alloc_height);
1020
1021 /* Obtain buffer size and plane information. */
1022 calc_allocation_size(alloc_width,
1023 alloc_height,
1024 alloc_type,
1025 formats[format_idx],
1026 usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK),
1027 usage & ~(GRALLOC_USAGE_PRIVATE_MASK | GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK),
1028 usage & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_GPU_DATA_BUFFER),
1029 &bufDescriptor->pixel_stride,
1030 &bufDescriptor->alloc_sizes[0],
1031 bufDescriptor->plane_info);
1032 }
1033
1034 /* Set pixel stride differently for RAW formats */
1035 switch (MALI_GRALLOC_INTFMT_FMT_MASK & bufDescriptor->alloc_format)
1036 {
1037 case MALI_GRALLOC_FORMAT_INTERNAL_RAW12:
1038 case MALI_GRALLOC_FORMAT_INTERNAL_RAW10:
1039 bufDescriptor->pixel_stride = bufDescriptor->plane_info[0].byte_stride;
1040 break;
1041 default:
1042 bufDescriptor->pixel_stride = bufDescriptor->plane_info[0].alloc_width;
1043 }
1044
1045 /*
1046 * Each layer of a multi-layer buffer must be aligned so that
1047 * it is accessible by both producer and consumer. In most cases,
1048 * the stride alignment is also sufficient for each layer, however
1049 * for AFBC the header buffer alignment is more constrained (see
1050 * AFBC specification v3.4, section 2.15: "Alignment requirements").
1051 * Also update the buffer size to accommodate all layers.
1052 */
1053 if (bufDescriptor->layer_count > 1)
1054 {
1055 if (bufDescriptor->alloc_format & MALI_GRALLOC_INTFMT_AFBCENABLE_MASK)
1056 {
1057 if (bufDescriptor->alloc_format & MALI_GRALLOC_INTFMT_AFBC_TILED_HEADERS)
1058 {
1059 bufDescriptor->alloc_sizes[0] = GRALLOC_ALIGN(bufDescriptor->alloc_sizes[0], 4096);
1060 }
1061 else
1062 {
1063 bufDescriptor->alloc_sizes[0] = GRALLOC_ALIGN(bufDescriptor->alloc_sizes[0], 128);
1064 }
1065 }
1066
1067 bufDescriptor->alloc_sizes[0] *= bufDescriptor->layer_count;
1068 }
1069
1070 /* MFC requires EXT_SIZE padding */
1071 bufDescriptor->alloc_sizes[0] += EXT_SIZE;
1072
1073 return 0;
1074 }
1075
1076
mali_gralloc_buffer_allocate(const gralloc_buffer_descriptor_t * descriptors,uint32_t numDescriptors,buffer_handle_t * pHandle,bool * shared_backend,int fd)1077 int mali_gralloc_buffer_allocate(const gralloc_buffer_descriptor_t *descriptors,
1078 uint32_t numDescriptors, buffer_handle_t *pHandle, bool *shared_backend,
1079 int fd)
1080 {
1081 bool shared = false;
1082 uint64_t backing_store_id = 0x0;
1083 int err;
1084
1085 for (uint32_t i = 0; i < numDescriptors; i++)
1086 {
1087 buffer_descriptor_t * const bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
1088
1089 assert(bufDescriptor->producer_usage == bufDescriptor->consumer_usage);
1090 uint64_t usage = bufDescriptor->producer_usage;
1091 if ((usage & hidl_common::BufferUsage::VIDEO_DECODER) && (usage & GRALLOC_USAGE_GOOGLE_IP_BO)) {
1092 usage = update_usage_for_BO(usage);
1093 bufDescriptor->producer_usage = usage;
1094 bufDescriptor->consumer_usage = usage;
1095 }
1096
1097 /* Derive the buffer size from descriptor parameters */
1098 err = mali_gralloc_derive_format_and_size(bufDescriptor);
1099 if (err != 0)
1100 {
1101 return err;
1102 }
1103 }
1104
1105 /* Allocate ION backing store memory */
1106 err = mali_gralloc_ion_allocate(descriptors, numDescriptors, pHandle, &shared, fd);
1107 if (err < 0)
1108 {
1109 return err;
1110 }
1111
1112 if (shared)
1113 {
1114 backing_store_id = getUniqueId();
1115 }
1116
1117 for (uint32_t i = 0; i < numDescriptors; i++)
1118 {
1119 private_handle_t *hnd = (private_handle_t *)pHandle[i];
1120
1121 if (shared)
1122 {
1123 /*each buffer will share the same backing store id.*/
1124 hnd->backing_store_id = backing_store_id;
1125 }
1126 else
1127 {
1128 /* each buffer will have an unique backing store id.*/
1129 hnd->backing_store_id = getUniqueId();
1130 }
1131 }
1132
1133 if (NULL != shared_backend)
1134 {
1135 *shared_backend = shared;
1136 }
1137
1138 return 0;
1139 }
1140
mali_gralloc_buffer_free(buffer_handle_t pHandle)1141 int mali_gralloc_buffer_free(buffer_handle_t pHandle)
1142 {
1143 auto *hnd = const_cast<private_handle_t *>(
1144 reinterpret_cast<const private_handle_t *>(pHandle));
1145
1146 if (hnd == nullptr)
1147 {
1148 return -1;
1149 }
1150
1151 gralloc_shared_memory_free(hnd);
1152 mali_gralloc_ion_free(hnd);
1153
1154 return 0;
1155 }
1156