1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 * Copyright (c) 2011-2014 The Linux Foundation. All rights reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include <atomic>
19
20 #include <limits.h>
21 #include <unistd.h>
22 #include <fcntl.h>
23 #include <cutils/properties.h>
24 #include <sys/mman.h>
25
26 #include "gr.h"
27 #include "gpu.h"
28 #include "memalloc.h"
29 #include "alloc_controller.h"
30 #include <qdMetaData.h>
31 #include <linux/msm_ion.h>
32
33 using namespace gralloc;
34
next_backing_store_id()35 static uint64_t next_backing_store_id()
36 {
37 static std::atomic<uint64_t> next_id(1);
38 return next_id++;
39 }
40
gpu_context_t(const private_module_t * module,IAllocController * alloc_ctrl)41 gpu_context_t::gpu_context_t(const private_module_t* module,
42 IAllocController* alloc_ctrl ) :
43 mAllocCtrl(alloc_ctrl)
44 {
45 // Zero out the alloc_device_t
46 memset(static_cast<alloc_device_t*>(this), 0, sizeof(alloc_device_t));
47
48 // Initialize the procs
49 common.tag = HARDWARE_DEVICE_TAG;
50 common.version = 0;
51 common.module = const_cast<hw_module_t*>(&module->base.common);
52 common.close = gralloc_close;
53 alloc = gralloc_alloc;
54 free = gralloc_free;
55
56 }
57
gralloc_alloc_buffer(unsigned int size,int usage,buffer_handle_t * pHandle,int bufferType,int format,int width,int height)58 int gpu_context_t::gralloc_alloc_buffer(unsigned int size, int usage,
59 buffer_handle_t* pHandle, int bufferType,
60 int format, int width, int height)
61 {
62 int err = 0;
63 int flags = 0;
64 size = roundUpToPageSize(size);
65 alloc_data data;
66 data.offset = 0;
67 data.fd = -1;
68 data.base = 0;
69 if(format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED)
70 data.align = 8192;
71 else
72 data.align = getpagesize();
73
74 if (usage & GRALLOC_USAGE_PROTECTED) {
75 if (usage & GRALLOC_USAGE_PRIVATE_SECURE_DISPLAY) {
76 /* The alignment here reflects qsee mmu V7L/V8L requirement */
77 data.align = SZ_2M;
78 } else {
79 data.align = SECURE_ALIGN;
80 }
81 size = ALIGN(size, data.align);
82 }
83
84 data.size = size;
85 data.pHandle = (uintptr_t) pHandle;
86 err = mAllocCtrl->allocate(data, usage);
87
88 if (!err) {
89 /* allocate memory for enhancement data */
90 alloc_data eData;
91 eData.fd = -1;
92 eData.base = 0;
93 eData.offset = 0;
94 eData.size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
95 eData.pHandle = data.pHandle;
96 eData.align = getpagesize();
97 int eDataUsage = 0;
98 int eDataErr = mAllocCtrl->allocate(eData, eDataUsage);
99 ALOGE_IF(eDataErr, "gralloc failed for eDataErr=%s",
100 strerror(-eDataErr));
101
102 if (usage & GRALLOC_USAGE_PRIVATE_EXTERNAL_ONLY) {
103 flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY;
104 }
105
106 if (usage & GRALLOC_USAGE_PRIVATE_INTERNAL_ONLY) {
107 flags |= private_handle_t::PRIV_FLAGS_INTERNAL_ONLY;
108 }
109
110 if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER ) {
111 flags |= private_handle_t::PRIV_FLAGS_VIDEO_ENCODER;
112 }
113
114 if (usage & GRALLOC_USAGE_HW_CAMERA_WRITE) {
115 flags |= private_handle_t::PRIV_FLAGS_CAMERA_WRITE;
116 }
117
118 if (usage & GRALLOC_USAGE_HW_CAMERA_READ) {
119 flags |= private_handle_t::PRIV_FLAGS_CAMERA_READ;
120 }
121
122 if (usage & GRALLOC_USAGE_HW_COMPOSER) {
123 flags |= private_handle_t::PRIV_FLAGS_HW_COMPOSER;
124 }
125
126 if (usage & GRALLOC_USAGE_HW_TEXTURE) {
127 flags |= private_handle_t::PRIV_FLAGS_HW_TEXTURE;
128 }
129
130 if(usage & GRALLOC_USAGE_PRIVATE_SECURE_DISPLAY) {
131 flags |= private_handle_t::PRIV_FLAGS_SECURE_DISPLAY;
132 }
133
134 if(isMacroTileEnabled(format, usage)) {
135 flags |= private_handle_t::PRIV_FLAGS_TILE_RENDERED;
136 }
137
138 if (isUBwcEnabled(format, usage)) {
139 flags |= private_handle_t::PRIV_FLAGS_UBWC_ALIGNED;
140 }
141
142 if(usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) {
143 flags |= private_handle_t::PRIV_FLAGS_CPU_RENDERED;
144 }
145
146 if (usage & (GRALLOC_USAGE_HW_VIDEO_ENCODER |
147 GRALLOC_USAGE_HW_CAMERA_WRITE |
148 GRALLOC_USAGE_HW_RENDER |
149 GRALLOC_USAGE_HW_FB)) {
150 flags |= private_handle_t::PRIV_FLAGS_NON_CPU_WRITER;
151 }
152
153 if(usage & GRALLOC_USAGE_HW_COMPOSER) {
154 flags |= private_handle_t::PRIV_FLAGS_DISP_CONSUMER;
155 }
156
157 if(false == data.uncached) {
158 flags |= private_handle_t::PRIV_FLAGS_CACHED;
159 }
160
161 flags |= data.allocType;
162 uint64_t eBaseAddr = (uint64_t)(eData.base) + eData.offset;
163 private_handle_t *hnd = new private_handle_t(data.fd, size, flags,
164 bufferType, format, width, height, eData.fd, eData.offset,
165 eBaseAddr);
166
167 hnd->offset = data.offset;
168 hnd->base = (uint64_t)(data.base) + data.offset;
169 hnd->gpuaddr = 0;
170 ColorSpace_t colorSpace = ITU_R_601;
171 setMetaData(hnd, UPDATE_COLOR_SPACE, (void*) &colorSpace);
172
173 *pHandle = hnd;
174 }
175
176 ALOGE_IF(err, "gralloc failed err=%s", strerror(-err));
177
178 return err;
179 }
180
getGrallocInformationFromFormat(int inputFormat,int * bufferType)181 void gpu_context_t::getGrallocInformationFromFormat(int inputFormat,
182 int *bufferType)
183 {
184 *bufferType = BUFFER_TYPE_VIDEO;
185
186 if (isUncompressedRgbFormat(inputFormat) == TRUE) {
187 // RGB formats
188 *bufferType = BUFFER_TYPE_UI;
189 }
190 }
191
gralloc_alloc_framebuffer_locked(int usage,buffer_handle_t * pHandle)192 int gpu_context_t::gralloc_alloc_framebuffer_locked(int usage,
193 buffer_handle_t* pHandle)
194 {
195 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
196
197 // This allocation will only happen when gralloc is in fb mode
198
199 if (m->framebuffer == NULL) {
200 ALOGE("%s: Invalid framebuffer", __FUNCTION__);
201 return -EINVAL;
202 }
203
204 const unsigned int bufferMask = m->bufferMask;
205 const uint32_t numBuffers = m->numBuffers;
206 unsigned int bufferSize = m->finfo.line_length * m->info.yres;
207
208 //adreno needs FB size to be page aligned
209 bufferSize = roundUpToPageSize(bufferSize);
210
211 if (numBuffers == 1) {
212 // If we have only one buffer, we never use page-flipping. Instead,
213 // we return a regular buffer which will be memcpy'ed to the main
214 // screen when post is called.
215 int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
216 return gralloc_alloc_buffer(bufferSize, newUsage, pHandle, BUFFER_TYPE_UI,
217 m->fbFormat, m->info.xres, m->info.yres);
218 }
219
220 if (bufferMask >= ((1LU<<numBuffers)-1)) {
221 // We ran out of buffers.
222 return -ENOMEM;
223 }
224
225 // create a "fake" handle for it
226 uint64_t vaddr = uint64_t(m->framebuffer->base);
227 // As GPU needs ION FD, the private handle is created
228 // using ION fd and ION flags are set
229 private_handle_t* hnd = new private_handle_t(
230 dup(m->framebuffer->fd), bufferSize,
231 private_handle_t::PRIV_FLAGS_USES_ION |
232 private_handle_t::PRIV_FLAGS_FRAMEBUFFER,
233 BUFFER_TYPE_UI, m->fbFormat, m->info.xres,
234 m->info.yres);
235
236 // find a free slot
237 for (uint32_t i=0 ; i<numBuffers ; i++) {
238 if ((bufferMask & (1LU<<i)) == 0) {
239 m->bufferMask |= (uint32_t)(1LU<<i);
240 break;
241 }
242 vaddr += bufferSize;
243 }
244 hnd->base = vaddr;
245 hnd->offset = (unsigned int)(vaddr - m->framebuffer->base);
246 *pHandle = hnd;
247 return 0;
248 }
249
250
gralloc_alloc_framebuffer(int usage,buffer_handle_t * pHandle)251 int gpu_context_t::gralloc_alloc_framebuffer(int usage,
252 buffer_handle_t* pHandle)
253 {
254 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
255 pthread_mutex_lock(&m->lock);
256 int err = gralloc_alloc_framebuffer_locked(usage, pHandle);
257 pthread_mutex_unlock(&m->lock);
258 return err;
259 }
260
alloc_impl(int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride,unsigned int bufferSize)261 int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
262 buffer_handle_t* pHandle, int* pStride,
263 unsigned int bufferSize) {
264 if (!pHandle || !pStride)
265 return -EINVAL;
266
267 unsigned int size;
268 int alignedw, alignedh;
269 int grallocFormat = format;
270 int bufferType;
271
272 //If input format is HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED then based on
273 //the usage bits, gralloc assigns a format.
274 if(format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED ||
275 format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
276 if (usage & GRALLOC_USAGE_PRIVATE_ALLOC_UBWC)
277 grallocFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS_UBWC;
278 else if(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER)
279 grallocFormat = HAL_PIXEL_FORMAT_NV12_ENCODEABLE; //NV12
280 else if((usage & GRALLOC_USAGE_HW_CAMERA_MASK)
281 == GRALLOC_USAGE_HW_CAMERA_ZSL)
282 grallocFormat = HAL_PIXEL_FORMAT_NV21_ZSL; //NV21 ZSL
283 else if(usage & GRALLOC_USAGE_HW_CAMERA_READ)
284 grallocFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP; //NV21
285 else if(usage & GRALLOC_USAGE_HW_CAMERA_WRITE) {
286 if (format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
287 grallocFormat = HAL_PIXEL_FORMAT_NV21_ZSL; //NV21
288 } else {
289 grallocFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS; //NV12 preview
290 }
291 } else if(usage & GRALLOC_USAGE_HW_COMPOSER)
292 //XXX: If we still haven't set a format, default to RGBA8888
293 grallocFormat = HAL_PIXEL_FORMAT_RGBA_8888;
294 else if(format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
295 //If no other usage flags are detected, default the
296 //flexible YUV format to NV21_ZSL
297 grallocFormat = HAL_PIXEL_FORMAT_NV21_ZSL;
298 }
299 }
300
301 bool useFbMem = false;
302 char property[PROPERTY_VALUE_MAX];
303 char isUBWC[PROPERTY_VALUE_MAX];
304 if (usage & GRALLOC_USAGE_HW_FB) {
305 if ((property_get("debug.gralloc.map_fb_memory", property, NULL) > 0) &&
306 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
307 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
308 useFbMem = true;
309 } else {
310 usage &= ~GRALLOC_USAGE_PRIVATE_ALLOC_UBWC;
311 if (property_get("debug.gralloc.enable_fb_ubwc", isUBWC, NULL) > 0){
312 if ((!strncmp(isUBWC, "1", PROPERTY_VALUE_MAX)) ||
313 (!strncasecmp(isUBWC, "true", PROPERTY_VALUE_MAX))) {
314 // Allocate UBWC aligned framebuffer
315 usage |= GRALLOC_USAGE_PRIVATE_ALLOC_UBWC;
316 }
317 }
318 }
319 }
320
321 getGrallocInformationFromFormat(grallocFormat, &bufferType);
322 size = getBufferSizeAndDimensions(w, h, grallocFormat, usage, alignedw,
323 alignedh);
324
325 if ((unsigned int)size <= 0)
326 return -EINVAL;
327 size = (bufferSize >= size)? bufferSize : size;
328
329 int err = 0;
330 if(useFbMem) {
331 err = gralloc_alloc_framebuffer(usage, pHandle);
332 } else {
333 err = gralloc_alloc_buffer(size, usage, pHandle, bufferType,
334 grallocFormat, alignedw, alignedh);
335 }
336
337 if (err < 0) {
338 return err;
339 }
340
341 auto hnd = (private_handle_t*) *pHandle;
342 hnd->backing_store = next_backing_store_id();
343 hnd->original_width = w;
344 hnd->original_format = format;
345
346 *pStride = alignedw;
347 return 0;
348 }
349
free_impl(private_handle_t const * hnd)350 int gpu_context_t::free_impl(private_handle_t const* hnd) {
351 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
352 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
353 const unsigned int bufferSize = m->finfo.line_length * m->info.yres;
354 unsigned int index = (unsigned int) ((hnd->base - m->framebuffer->base)
355 / bufferSize);
356 m->bufferMask &= (uint32_t)~(1LU<<index);
357 } else {
358
359 terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
360 IMemAlloc* memalloc = mAllocCtrl->getAllocator(hnd->flags);
361 int err = memalloc->free_buffer((void*)hnd->base, hnd->size,
362 hnd->offset, hnd->fd);
363 if(err)
364 return err;
365 // free the metadata space
366 unsigned int size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
367 err = memalloc->free_buffer((void*)hnd->base_metadata,
368 size, hnd->offset_metadata,
369 hnd->fd_metadata);
370 if (err)
371 return err;
372 }
373 delete hnd;
374 return 0;
375 }
376
gralloc_alloc(alloc_device_t * dev,int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride)377 int gpu_context_t::gralloc_alloc(alloc_device_t* dev, int w, int h, int format,
378 int usage, buffer_handle_t* pHandle,
379 int* pStride)
380 {
381 if (!dev) {
382 return -EINVAL;
383 }
384 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
385 return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, 0);
386 }
gralloc_alloc_size(alloc_device_t * dev,int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride,int bufferSize)387 int gpu_context_t::gralloc_alloc_size(alloc_device_t* dev, int w, int h,
388 int format, int usage,
389 buffer_handle_t* pHandle, int* pStride,
390 int bufferSize)
391 {
392 if (!dev) {
393 return -EINVAL;
394 }
395 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
396 return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, bufferSize);
397 }
398
399
gralloc_free(alloc_device_t * dev,buffer_handle_t handle)400 int gpu_context_t::gralloc_free(alloc_device_t* dev,
401 buffer_handle_t handle)
402 {
403 if (private_handle_t::validate(handle) < 0)
404 return -EINVAL;
405
406 private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
407 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
408 return gpu->free_impl(hnd);
409 }
410
411 /*****************************************************************************/
412
gralloc_close(struct hw_device_t * dev)413 int gpu_context_t::gralloc_close(struct hw_device_t *dev)
414 {
415 gpu_context_t* ctx = reinterpret_cast<gpu_context_t*>(dev);
416 if (ctx) {
417 /* TODO: keep a list of all buffer_handle_t created, and free them
418 * all here.
419 */
420 delete ctx;
421 }
422 return 0;
423 }
424
425