1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
4 *
5 * Not a Contribution, Apache license notifications and license are retained
6 * for attribution purposes only.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20 #include <cutils/log.h>
21 #include <sys/resource.h>
22 #include <sys/prctl.h>
23
24 #include <stdint.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <errno.h>
28 #include <fcntl.h>
29
30 #include <sys/ioctl.h>
31 #include <sys/types.h>
32 #include <sys/mman.h>
33
34 #include <linux/msm_kgsl.h>
35
36 #include <EGL/eglplatform.h>
37 #include <cutils/native_handle.h>
38 #include <cutils/ashmem.h>
39 #include <linux/ashmem.h>
40 #include <gralloc_priv.h>
41
42 #include <copybit.h>
43 #include <alloc_controller.h>
44 #include <memalloc.h>
45
46 #include "c2d2.h"
47 #include "software_converter.h"
48
49 #include <dlfcn.h>
50
51 using gralloc::IMemAlloc;
52 using gralloc::IonController;
53 using gralloc::alloc_data;
54
55 C2D_STATUS (*LINK_c2dCreateSurface)( uint32 *surface_id,
56 uint32 surface_bits,
57 C2D_SURFACE_TYPE surface_type,
58 void *surface_definition );
59
60 C2D_STATUS (*LINK_c2dUpdateSurface)( uint32 surface_id,
61 uint32 surface_bits,
62 C2D_SURFACE_TYPE surface_type,
63 void *surface_definition );
64
65 C2D_STATUS (*LINK_c2dReadSurface)( uint32 surface_id,
66 C2D_SURFACE_TYPE surface_type,
67 void *surface_definition,
68 int32 x, int32 y );
69
70 C2D_STATUS (*LINK_c2dDraw)( uint32 target_id,
71 uint32 target_config, C2D_RECT *target_scissor,
72 uint32 target_mask_id, uint32 target_color_key,
73 C2D_OBJECT *objects_list, uint32 num_objects );
74
75 C2D_STATUS (*LINK_c2dFinish)( uint32 target_id);
76
77 C2D_STATUS (*LINK_c2dFlush)( uint32 target_id, c2d_ts_handle *timestamp);
78
79 C2D_STATUS (*LINK_c2dWaitTimestamp)( c2d_ts_handle timestamp );
80
81 C2D_STATUS (*LINK_c2dDestroySurface)( uint32 surface_id );
82
83 C2D_STATUS (*LINK_c2dMapAddr) ( int mem_fd, void * hostptr, uint32 len,
84 uint32 offset, uint32 flags, void ** gpuaddr);
85
86 C2D_STATUS (*LINK_c2dUnMapAddr) ( void * gpuaddr);
87
88 C2D_STATUS (*LINK_c2dGetDriverCapabilities) ( C2D_DRIVER_INFO * driver_info);
89
90 /* create a fence fd for the timestamp */
91 C2D_STATUS (*LINK_c2dCreateFenceFD) ( uint32 target_id, c2d_ts_handle timestamp,
92 int32 *fd);
93
94 C2D_STATUS (*LINK_c2dFillSurface) ( uint32 surface_id, uint32 fill_color,
95 C2D_RECT * fill_rect);
96
97 /******************************************************************************/
98
99 #if defined(COPYBIT_Z180)
100 #define MAX_SCALE_FACTOR (4096)
101 #define MAX_DIMENSION (4096)
102 #else
103 #error "Unsupported HW version"
104 #endif
105
106 // The following defines can be changed as required i.e. as we encounter
107 // complex use cases.
108 #define MAX_RGB_SURFACES 8 // Max. RGB layers currently supported per draw
109 #define MAX_YUV_2_PLANE_SURFACES 4// Max. 2-plane YUV layers currently supported per draw
110 #define MAX_YUV_3_PLANE_SURFACES 1// Max. 3-plane YUV layers currently supported per draw
111 // +1 for the destination surface. We cannot have multiple destination surfaces.
112 #define MAX_SURFACES (MAX_RGB_SURFACES + MAX_YUV_2_PLANE_SURFACES + MAX_YUV_3_PLANE_SURFACES + 1)
113 #define NUM_SURFACE_TYPES 3 // RGB_SURFACE + YUV_SURFACE_2_PLANES + YUV_SURFACE_3_PLANES
114 #define MAX_BLIT_OBJECT_COUNT 50 // Max. blit objects that can be passed per draw
115
116 enum {
117 RGB_SURFACE,
118 YUV_SURFACE_2_PLANES,
119 YUV_SURFACE_3_PLANES
120 };
121
122 enum eConversionType {
123 CONVERT_TO_ANDROID_FORMAT,
124 CONVERT_TO_C2D_FORMAT
125 };
126
127 enum eC2DFlags {
128 FLAGS_PREMULTIPLIED_ALPHA = 1<<0,
129 FLAGS_YUV_DESTINATION = 1<<1,
130 FLAGS_TEMP_SRC_DST = 1<<2
131 };
132
133 static gralloc::IAllocController* sAlloc = 0;
134 /******************************************************************************/
135
136 /** State information for each device instance */
137 struct copybit_context_t {
138 struct copybit_device_t device;
139 // Templates for the various source surfaces. These templates are created
140 // to avoid the expensive create/destroy C2D Surfaces
141 C2D_OBJECT_STR blit_rgb_object[MAX_RGB_SURFACES];
142 C2D_OBJECT_STR blit_yuv_2_plane_object[MAX_YUV_2_PLANE_SURFACES];
143 C2D_OBJECT_STR blit_yuv_3_plane_object[MAX_YUV_3_PLANE_SURFACES];
144 C2D_OBJECT_STR blit_list[MAX_BLIT_OBJECT_COUNT]; // Z-ordered list of blit objects
145 C2D_DRIVER_INFO c2d_driver_info;
146 void *libc2d2;
147 alloc_data temp_src_buffer;
148 alloc_data temp_dst_buffer;
149 unsigned int dst[NUM_SURFACE_TYPES]; // dst surfaces
150 unsigned int mapped_gpu_addr[MAX_SURFACES]; // GPU addresses mapped inside copybit
151 int blit_rgb_count; // Total RGB surfaces being blit
152 int blit_yuv_2_plane_count; // Total 2 plane YUV surfaces being
153 int blit_yuv_3_plane_count; // Total 3 plane YUV surfaces being blit
154 int blit_count; // Total blit objects.
155 unsigned int trg_transform; /* target transform */
156 int fb_width;
157 int fb_height;
158 int src_global_alpha;
159 int config_mask;
160 int dst_surface_type;
161 bool is_premultiplied_alpha;
162 void* time_stamp;
163
164 // used for signaling the wait thread
165 bool wait_timestamp;
166 pthread_t wait_thread_id;
167 bool stop_thread;
168 pthread_mutex_t wait_cleanup_lock;
169 pthread_cond_t wait_cleanup_cond;
170
171 };
172
173 struct bufferInfo {
174 int width;
175 int height;
176 int format;
177 };
178
179 struct yuvPlaneInfo {
180 int yStride; //luma stride
181 int plane1_stride;
182 int plane2_stride;
183 int plane1_offset;
184 int plane2_offset;
185 };
186
187 /**
188 * Common hardware methods
189 */
190
191 static int open_copybit(const struct hw_module_t* module, const char* name,
192 struct hw_device_t** device);
193
194 static struct hw_module_methods_t copybit_module_methods = {
195 .open = open_copybit
196 };
197
198 /*
199 * The COPYBIT Module
200 */
201 struct copybit_module_t HAL_MODULE_INFO_SYM = {
202 .common = {
203 .tag = HARDWARE_MODULE_TAG,
204 .version_major = 1,
205 .version_minor = 0,
206 .id = COPYBIT_HARDWARE_MODULE_ID,
207 .name = "QCT COPYBIT C2D 2.0 Module",
208 .author = "Qualcomm",
209 .methods = ©bit_module_methods
210 }
211 };
212
213
214 /* thread function which waits on the timeStamp and cleans up the surfaces */
c2d_wait_loop(void * ptr)215 static void* c2d_wait_loop(void* ptr) {
216 copybit_context_t* ctx = (copybit_context_t*)(ptr);
217 char thread_name[64] = "copybitWaitThr";
218 prctl(PR_SET_NAME, (unsigned long) &thread_name, 0, 0, 0);
219 setpriority(PRIO_PROCESS, 0, HAL_PRIORITY_URGENT_DISPLAY);
220
221 while(ctx->stop_thread == false) {
222 pthread_mutex_lock(&ctx->wait_cleanup_lock);
223 while(ctx->wait_timestamp == false && !ctx->stop_thread) {
224 pthread_cond_wait(&(ctx->wait_cleanup_cond),
225 &(ctx->wait_cleanup_lock));
226 }
227 if(ctx->wait_timestamp) {
228 if(LINK_c2dWaitTimestamp(ctx->time_stamp)) {
229 ALOGE("%s: LINK_c2dWaitTimeStamp ERROR!!", __FUNCTION__);
230 }
231 ctx->wait_timestamp = false;
232 // Unmap any mapped addresses.
233 for (int i = 0; i < MAX_SURFACES; i++) {
234 if (ctx->mapped_gpu_addr[i]) {
235 LINK_c2dUnMapAddr( (void*)ctx->mapped_gpu_addr[i]);
236 ctx->mapped_gpu_addr[i] = 0;
237 }
238 }
239 // Reset the counts after the draw.
240 ctx->blit_rgb_count = 0;
241 ctx->blit_yuv_2_plane_count = 0;
242 ctx->blit_yuv_3_plane_count = 0;
243 ctx->blit_count = 0;
244 }
245 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
246 if(ctx->stop_thread)
247 break;
248 }
249 pthread_exit(NULL);
250 return NULL;
251 }
252
253
254 /* convert COPYBIT_FORMAT to C2D format */
get_format(int format)255 static int get_format(int format) {
256 switch (format) {
257 case HAL_PIXEL_FORMAT_RGB_565: return C2D_COLOR_FORMAT_565_RGB;
258 case HAL_PIXEL_FORMAT_RGBX_8888: return C2D_COLOR_FORMAT_8888_ARGB |
259 C2D_FORMAT_SWAP_RB |
260 C2D_FORMAT_DISABLE_ALPHA;
261 case HAL_PIXEL_FORMAT_RGBA_8888: return C2D_COLOR_FORMAT_8888_ARGB |
262 C2D_FORMAT_SWAP_RB;
263 case HAL_PIXEL_FORMAT_BGRA_8888: return C2D_COLOR_FORMAT_8888_ARGB;
264 case HAL_PIXEL_FORMAT_YCbCr_420_SP: return C2D_COLOR_FORMAT_420_NV12;
265 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:return C2D_COLOR_FORMAT_420_NV12;
266 case HAL_PIXEL_FORMAT_YCrCb_420_SP: return C2D_COLOR_FORMAT_420_NV21;
267 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: return C2D_COLOR_FORMAT_420_NV12 |
268 C2D_FORMAT_MACROTILED;
269 default: ALOGE("%s: invalid format (0x%x",
270 __FUNCTION__, format);
271 return -EINVAL;
272 }
273 return -EINVAL;
274 }
275
276 /* Get the C2D formats needed for conversion to YUV */
get_c2d_format_for_yuv_destination(int halFormat)277 static int get_c2d_format_for_yuv_destination(int halFormat) {
278 switch (halFormat) {
279 // We do not swap the RB when the target is YUV
280 case HAL_PIXEL_FORMAT_RGBX_8888: return C2D_COLOR_FORMAT_8888_ARGB |
281 C2D_FORMAT_DISABLE_ALPHA;
282 case HAL_PIXEL_FORMAT_RGBA_8888: return C2D_COLOR_FORMAT_8888_ARGB;
283 // The U and V need to be interchanged when the target is YUV
284 case HAL_PIXEL_FORMAT_YCbCr_420_SP: return C2D_COLOR_FORMAT_420_NV21;
285 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:return C2D_COLOR_FORMAT_420_NV21;
286 case HAL_PIXEL_FORMAT_YCrCb_420_SP: return C2D_COLOR_FORMAT_420_NV12;
287 default: return get_format(halFormat);
288 }
289 return -EINVAL;
290 }
291
292 /* ------------------------------------------------------------------- *//*!
293 * \internal
294 * \brief Get the bpp for a particular color format
295 * \param color format
296 * \return bits per pixel
297 *//* ------------------------------------------------------------------- */
c2diGetBpp(int32 colorformat)298 int c2diGetBpp(int32 colorformat)
299 {
300
301 int c2dBpp = 0;
302
303 switch(colorformat&0xFF)
304 {
305 case C2D_COLOR_FORMAT_4444_RGBA:
306 case C2D_COLOR_FORMAT_4444_ARGB:
307 case C2D_COLOR_FORMAT_1555_ARGB:
308 case C2D_COLOR_FORMAT_565_RGB:
309 case C2D_COLOR_FORMAT_5551_RGBA:
310 c2dBpp = 16;
311 break;
312 case C2D_COLOR_FORMAT_8888_RGBA:
313 case C2D_COLOR_FORMAT_8888_ARGB:
314 c2dBpp = 32;
315 break;
316 case C2D_COLOR_FORMAT_8_L:
317 case C2D_COLOR_FORMAT_8_A:
318 c2dBpp = 8;
319 break;
320 case C2D_COLOR_FORMAT_4_A:
321 c2dBpp = 4;
322 break;
323 case C2D_COLOR_FORMAT_1:
324 c2dBpp = 1;
325 break;
326 default:
327 ALOGE("%s ERROR", __func__);
328 break;
329 }
330 return c2dBpp;
331 }
332
c2d_get_gpuaddr(copybit_context_t * ctx,struct private_handle_t * handle,int & mapped_idx)333 static uint32 c2d_get_gpuaddr(copybit_context_t* ctx, struct private_handle_t *handle,
334 int &mapped_idx)
335 {
336 uint32 memtype, *gpuaddr;
337 C2D_STATUS rc;
338
339 if(!handle)
340 return 0;
341
342 if (handle->flags & (private_handle_t::PRIV_FLAGS_USES_PMEM |
343 private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP))
344 memtype = KGSL_USER_MEM_TYPE_PMEM;
345 else if (handle->flags & private_handle_t::PRIV_FLAGS_USES_ASHMEM)
346 memtype = KGSL_USER_MEM_TYPE_ASHMEM;
347 else if (handle->flags & private_handle_t::PRIV_FLAGS_USES_ION)
348 memtype = KGSL_USER_MEM_TYPE_ION;
349 else {
350 ALOGE("Invalid handle flags: 0x%x", handle->flags);
351 return 0;
352 }
353
354 rc = LINK_c2dMapAddr(handle->fd, (void*)handle->base, handle->size,
355 handle->offset, memtype, (void**)&gpuaddr);
356
357 if (rc == C2D_STATUS_OK) {
358 // We have mapped the GPU address inside copybit. We need to unmap this
359 // address after the blit. Store this address
360 for (int i = 0; i < MAX_SURFACES; i++) {
361 if (ctx->mapped_gpu_addr[i] == 0) {
362 ctx->mapped_gpu_addr[i] = (uint32) gpuaddr;
363 mapped_idx = i;
364 break;
365 }
366 }
367
368 return (uint32) gpuaddr;
369 }
370 return 0;
371 }
372
unmap_gpuaddr(copybit_context_t * ctx,int mapped_idx)373 static void unmap_gpuaddr(copybit_context_t* ctx, int mapped_idx)
374 {
375 if (!ctx || (mapped_idx == -1))
376 return;
377
378 if (ctx->mapped_gpu_addr[mapped_idx]) {
379 LINK_c2dUnMapAddr( (void*)ctx->mapped_gpu_addr[mapped_idx]);
380 ctx->mapped_gpu_addr[mapped_idx] = 0;
381 }
382 }
383
is_supported_rgb_format(int format)384 static int is_supported_rgb_format(int format)
385 {
386 switch(format) {
387 case HAL_PIXEL_FORMAT_RGBA_8888:
388 case HAL_PIXEL_FORMAT_RGBX_8888:
389 case HAL_PIXEL_FORMAT_RGB_565:
390 case HAL_PIXEL_FORMAT_BGRA_8888: {
391 return COPYBIT_SUCCESS;
392 }
393 default:
394 return COPYBIT_FAILURE;
395 }
396 }
397
get_num_planes(int format)398 static int get_num_planes(int format)
399 {
400 switch(format) {
401 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
402 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
403 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
404 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: {
405 return 2;
406 }
407 case HAL_PIXEL_FORMAT_YV12: {
408 return 3;
409 }
410 default:
411 return COPYBIT_FAILURE;
412 }
413 }
414
is_supported_yuv_format(int format)415 static int is_supported_yuv_format(int format)
416 {
417 switch(format) {
418 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
419 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
420 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
421 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: {
422 return COPYBIT_SUCCESS;
423 }
424 default:
425 return COPYBIT_FAILURE;
426 }
427 }
428
is_valid_destination_format(int format)429 static int is_valid_destination_format(int format)
430 {
431 if (format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED) {
432 // C2D does not support NV12Tile as a destination format.
433 return COPYBIT_FAILURE;
434 }
435 return COPYBIT_SUCCESS;
436 }
437
calculate_yuv_offset_and_stride(const bufferInfo & info,yuvPlaneInfo & yuvInfo)438 static int calculate_yuv_offset_and_stride(const bufferInfo& info,
439 yuvPlaneInfo& yuvInfo)
440 {
441 int width = info.width;
442 int height = info.height;
443 int format = info.format;
444
445 int aligned_height = 0;
446 int aligned_width = 0, size = 0;
447
448 switch (format) {
449 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: {
450 /* NV12 Tile buffers have their luma height aligned to 32bytes and width
451 * aligned to 128 bytes. The chroma offset starts at an 8K boundary
452 */
453 aligned_height = ALIGN(height, 32);
454 aligned_width = ALIGN(width, 128);
455 size = aligned_width * aligned_height;
456 yuvInfo.plane1_offset = ALIGN(size,8192);
457 yuvInfo.yStride = aligned_width;
458 yuvInfo.plane1_stride = aligned_width;
459 break;
460 }
461 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
462 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
463 case HAL_PIXEL_FORMAT_YCrCb_420_SP: {
464 aligned_width = ALIGN(width, 32);
465 yuvInfo.yStride = aligned_width;
466 yuvInfo.plane1_stride = aligned_width;
467 if (HAL_PIXEL_FORMAT_NV12_ENCODEABLE == format) {
468 // The encoder requires a 2K aligned chroma offset
469 yuvInfo.plane1_offset = ALIGN(aligned_width * height, 2048);
470 } else
471 yuvInfo.plane1_offset = aligned_width * height;
472
473 break;
474 }
475 default: {
476 return COPYBIT_FAILURE;
477 }
478 }
479 return COPYBIT_SUCCESS;
480 }
481
482 /** create C2D surface from copybit image */
set_image(copybit_context_t * ctx,uint32 surfaceId,const struct copybit_image_t * rhs,const eC2DFlags flags,int & mapped_idx)483 static int set_image(copybit_context_t* ctx, uint32 surfaceId,
484 const struct copybit_image_t *rhs,
485 const eC2DFlags flags, int &mapped_idx)
486 {
487 struct private_handle_t* handle = (struct private_handle_t*)rhs->handle;
488 C2D_SURFACE_TYPE surfaceType;
489 int status = COPYBIT_SUCCESS;
490 uint32 gpuaddr = 0;
491 int c2d_format;
492 mapped_idx = -1;
493
494 if (flags & FLAGS_YUV_DESTINATION) {
495 c2d_format = get_c2d_format_for_yuv_destination(rhs->format);
496 } else {
497 c2d_format = get_format(rhs->format);
498 }
499
500 if(c2d_format == -EINVAL) {
501 ALOGE("%s: invalid format", __FUNCTION__);
502 return -EINVAL;
503 }
504
505 if(handle == NULL) {
506 ALOGE("%s: invalid handle", __func__);
507 return -EINVAL;
508 }
509
510 if (handle->gpuaddr == 0) {
511 gpuaddr = c2d_get_gpuaddr(ctx, handle, mapped_idx);
512 if(!gpuaddr) {
513 ALOGE("%s: c2d_get_gpuaddr failed", __FUNCTION__);
514 return COPYBIT_FAILURE;
515 }
516 } else {
517 gpuaddr = handle->gpuaddr;
518 }
519
520 /* create C2D surface */
521 if(is_supported_rgb_format(rhs->format) == COPYBIT_SUCCESS) {
522 /* RGB */
523 C2D_RGB_SURFACE_DEF surfaceDef;
524
525 surfaceType = (C2D_SURFACE_TYPE) (C2D_SURFACE_RGB_HOST | C2D_SURFACE_WITH_PHYS);
526
527 surfaceDef.phys = (void*) gpuaddr;
528 surfaceDef.buffer = (void*) (handle->base);
529
530 surfaceDef.format = c2d_format |
531 ((flags & FLAGS_PREMULTIPLIED_ALPHA) ? C2D_FORMAT_PREMULTIPLIED : 0);
532 surfaceDef.width = rhs->w;
533 surfaceDef.height = rhs->h;
534 int aligned_width = ALIGN(surfaceDef.width,32);
535 surfaceDef.stride = (aligned_width * c2diGetBpp(surfaceDef.format))>>3;
536
537 if(LINK_c2dUpdateSurface( surfaceId,C2D_TARGET | C2D_SOURCE, surfaceType,
538 &surfaceDef)) {
539 ALOGE("%s: RGB Surface c2dUpdateSurface ERROR", __FUNCTION__);
540 unmap_gpuaddr(ctx, mapped_idx);
541 status = COPYBIT_FAILURE;
542 }
543 } else if (is_supported_yuv_format(rhs->format) == COPYBIT_SUCCESS) {
544 C2D_YUV_SURFACE_DEF surfaceDef;
545 memset(&surfaceDef, 0, sizeof(surfaceDef));
546 surfaceType = (C2D_SURFACE_TYPE)(C2D_SURFACE_YUV_HOST | C2D_SURFACE_WITH_PHYS);
547 surfaceDef.format = c2d_format;
548
549 bufferInfo info;
550 info.width = rhs->w;
551 info.height = rhs->h;
552 info.format = rhs->format;
553
554 yuvPlaneInfo yuvInfo = {0};
555 status = calculate_yuv_offset_and_stride(info, yuvInfo);
556 if(status != COPYBIT_SUCCESS) {
557 ALOGE("%s: calculate_yuv_offset_and_stride error", __FUNCTION__);
558 unmap_gpuaddr(ctx, mapped_idx);
559 }
560
561 surfaceDef.width = rhs->w;
562 surfaceDef.height = rhs->h;
563 surfaceDef.plane0 = (void*) (handle->base);
564 surfaceDef.phys0 = (void*) (gpuaddr);
565 surfaceDef.stride0 = yuvInfo.yStride;
566
567 surfaceDef.plane1 = (void*) (handle->base + yuvInfo.plane1_offset);
568 surfaceDef.phys1 = (void*) (gpuaddr + yuvInfo.plane1_offset);
569 surfaceDef.stride1 = yuvInfo.plane1_stride;
570 if (3 == get_num_planes(rhs->format)) {
571 surfaceDef.plane2 = (void*) (handle->base + yuvInfo.plane2_offset);
572 surfaceDef.phys2 = (void*) (gpuaddr + yuvInfo.plane2_offset);
573 surfaceDef.stride2 = yuvInfo.plane2_stride;
574 }
575
576 if(LINK_c2dUpdateSurface( surfaceId,C2D_TARGET | C2D_SOURCE, surfaceType,
577 &surfaceDef)) {
578 ALOGE("%s: YUV Surface c2dUpdateSurface ERROR", __FUNCTION__);
579 unmap_gpuaddr(ctx, mapped_idx);
580 status = COPYBIT_FAILURE;
581 }
582 } else {
583 ALOGE("%s: invalid format 0x%x", __FUNCTION__, rhs->format);
584 unmap_gpuaddr(ctx, mapped_idx);
585 status = COPYBIT_FAILURE;
586 }
587
588 return status;
589 }
590
591 /** copy the bits */
msm_copybit(struct copybit_context_t * ctx,unsigned int target)592 static int msm_copybit(struct copybit_context_t *ctx, unsigned int target)
593 {
594 if (ctx->blit_count == 0) {
595 return COPYBIT_SUCCESS;
596 }
597
598 for (int i = 0; i < ctx->blit_count; i++)
599 {
600 ctx->blit_list[i].next = &(ctx->blit_list[i+1]);
601 }
602 ctx->blit_list[ctx->blit_count-1].next = NULL;
603 uint32_t target_transform = ctx->trg_transform;
604 if (ctx->c2d_driver_info.capabilities_mask &
605 C2D_DRIVER_SUPPORTS_OVERRIDE_TARGET_ROTATE_OP) {
606 // For A3xx - set 0x0 as the transform is set in the config_mask
607 target_transform = 0x0;
608 }
609 if(LINK_c2dDraw(target, target_transform, 0x0, 0, 0, ctx->blit_list,
610 ctx->blit_count)) {
611 ALOGE("%s: LINK_c2dDraw ERROR", __FUNCTION__);
612 return COPYBIT_FAILURE;
613 }
614 return COPYBIT_SUCCESS;
615 }
616
617
618
flush_get_fence_copybit(struct copybit_device_t * dev,int * fd)619 static int flush_get_fence_copybit (struct copybit_device_t *dev, int* fd)
620 {
621 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
622 int status = COPYBIT_FAILURE;
623 if (!ctx)
624 return COPYBIT_FAILURE;
625 pthread_mutex_lock(&ctx->wait_cleanup_lock);
626 status = msm_copybit(ctx, ctx->dst[ctx->dst_surface_type]);
627
628 if(LINK_c2dFlush(ctx->dst[ctx->dst_surface_type], &ctx->time_stamp)) {
629 ALOGE("%s: LINK_c2dFlush ERROR", __FUNCTION__);
630 // unlock the mutex and return failure
631 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
632 return COPYBIT_FAILURE;
633 }
634 if(LINK_c2dCreateFenceFD(ctx->dst[ctx->dst_surface_type], ctx->time_stamp,
635 fd)) {
636 ALOGE("%s: LINK_c2dCreateFenceFD ERROR", __FUNCTION__);
637 status = COPYBIT_FAILURE;
638 }
639 if(status == COPYBIT_SUCCESS) {
640 //signal the wait_thread
641 ctx->wait_timestamp = true;
642 pthread_cond_signal(&ctx->wait_cleanup_cond);
643 }
644 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
645 return status;
646 }
647
finish_copybit(struct copybit_device_t * dev)648 static int finish_copybit(struct copybit_device_t *dev)
649 {
650 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
651 if (!ctx)
652 return COPYBIT_FAILURE;
653
654 int status = msm_copybit(ctx, ctx->dst[ctx->dst_surface_type]);
655
656 if(LINK_c2dFinish(ctx->dst[ctx->dst_surface_type])) {
657 ALOGE("%s: LINK_c2dFinish ERROR", __FUNCTION__);
658 return COPYBIT_FAILURE;
659 }
660
661 // Unmap any mapped addresses.
662 for (int i = 0; i < MAX_SURFACES; i++) {
663 if (ctx->mapped_gpu_addr[i]) {
664 LINK_c2dUnMapAddr( (void*)ctx->mapped_gpu_addr[i]);
665 ctx->mapped_gpu_addr[i] = 0;
666 }
667 }
668
669 // Reset the counts after the draw.
670 ctx->blit_rgb_count = 0;
671 ctx->blit_yuv_2_plane_count = 0;
672 ctx->blit_yuv_3_plane_count = 0;
673 ctx->blit_count = 0;
674 return status;
675 }
676
clear_copybit(struct copybit_device_t * dev,struct copybit_image_t const * buf,struct copybit_rect_t * rect)677 static int clear_copybit(struct copybit_device_t *dev,
678 struct copybit_image_t const *buf,
679 struct copybit_rect_t *rect)
680 {
681 int ret = COPYBIT_SUCCESS;
682 int flags = FLAGS_PREMULTIPLIED_ALPHA;
683 int mapped_dst_idx = -1;
684 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
685 C2D_RECT c2drect = {rect->l, rect->t, rect->r - rect->l, rect->b - rect->t};
686 pthread_mutex_lock(&ctx->wait_cleanup_lock);
687 ret = set_image(ctx, ctx->dst[RGB_SURFACE], buf,
688 (eC2DFlags)flags, mapped_dst_idx);
689 if(ret) {
690 ALOGE("%s: set_image error", __FUNCTION__);
691 unmap_gpuaddr(ctx, mapped_dst_idx);
692 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
693 return COPYBIT_FAILURE;
694 }
695
696 ret = LINK_c2dFillSurface(ctx->dst[RGB_SURFACE], 0x0, &c2drect);
697 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
698 return ret;
699 }
700
701
702 /** setup rectangles */
set_rects(struct copybit_context_t * ctx,C2D_OBJECT * c2dObject,const struct copybit_rect_t * dst,const struct copybit_rect_t * src,const struct copybit_rect_t * scissor)703 static void set_rects(struct copybit_context_t *ctx,
704 C2D_OBJECT *c2dObject,
705 const struct copybit_rect_t *dst,
706 const struct copybit_rect_t *src,
707 const struct copybit_rect_t *scissor)
708 {
709 // Set the target rect.
710 if((ctx->trg_transform & C2D_TARGET_ROTATE_90) &&
711 (ctx->trg_transform & C2D_TARGET_ROTATE_180)) {
712 /* target rotation is 270 */
713 c2dObject->target_rect.x = (dst->t)<<16;
714 c2dObject->target_rect.y = ctx->fb_width?(ALIGN(ctx->fb_width,32)- dst->r):dst->r;
715 c2dObject->target_rect.y = c2dObject->target_rect.y<<16;
716 c2dObject->target_rect.height = ((dst->r) - (dst->l))<<16;
717 c2dObject->target_rect.width = ((dst->b) - (dst->t))<<16;
718 } else if(ctx->trg_transform & C2D_TARGET_ROTATE_90) {
719 c2dObject->target_rect.x = ctx->fb_height?(ctx->fb_height - dst->b):dst->b;
720 c2dObject->target_rect.x = c2dObject->target_rect.x<<16;
721 c2dObject->target_rect.y = (dst->l)<<16;
722 c2dObject->target_rect.height = ((dst->r) - (dst->l))<<16;
723 c2dObject->target_rect.width = ((dst->b) - (dst->t))<<16;
724 } else if(ctx->trg_transform & C2D_TARGET_ROTATE_180) {
725 c2dObject->target_rect.y = ctx->fb_height?(ctx->fb_height - dst->b):dst->b;
726 c2dObject->target_rect.y = c2dObject->target_rect.y<<16;
727 c2dObject->target_rect.x = ctx->fb_width?(ALIGN(ctx->fb_width,32) - dst->r):dst->r;
728 c2dObject->target_rect.x = c2dObject->target_rect.x<<16;
729 c2dObject->target_rect.height = ((dst->b) - (dst->t))<<16;
730 c2dObject->target_rect.width = ((dst->r) - (dst->l))<<16;
731 } else {
732 c2dObject->target_rect.x = (dst->l)<<16;
733 c2dObject->target_rect.y = (dst->t)<<16;
734 c2dObject->target_rect.height = ((dst->b) - (dst->t))<<16;
735 c2dObject->target_rect.width = ((dst->r) - (dst->l))<<16;
736 }
737 c2dObject->config_mask |= C2D_TARGET_RECT_BIT;
738
739 // Set the source rect
740 c2dObject->source_rect.x = (src->l)<<16;
741 c2dObject->source_rect.y = (src->t)<<16;
742 c2dObject->source_rect.height = ((src->b) - (src->t))<<16;
743 c2dObject->source_rect.width = ((src->r) - (src->l))<<16;
744 c2dObject->config_mask |= C2D_SOURCE_RECT_BIT;
745
746 // Set the scissor rect
747 c2dObject->scissor_rect.x = scissor->l;
748 c2dObject->scissor_rect.y = scissor->t;
749 c2dObject->scissor_rect.height = (scissor->b) - (scissor->t);
750 c2dObject->scissor_rect.width = (scissor->r) - (scissor->l);
751 c2dObject->config_mask |= C2D_SCISSOR_RECT_BIT;
752 }
753
754 /*****************************************************************************/
755
756 /** Set a parameter to value */
set_parameter_copybit(struct copybit_device_t * dev,int name,int value)757 static int set_parameter_copybit(
758 struct copybit_device_t *dev,
759 int name,
760 int value)
761 {
762 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
763 int status = COPYBIT_SUCCESS;
764 if (!ctx) {
765 ALOGE("%s: null context", __FUNCTION__);
766 return -EINVAL;
767 }
768
769 pthread_mutex_lock(&ctx->wait_cleanup_lock);
770 switch(name) {
771 case COPYBIT_PLANE_ALPHA:
772 {
773 if (value < 0) value = 0;
774 if (value >= 256) value = 255;
775
776 ctx->src_global_alpha = value;
777 if (value < 255)
778 ctx->config_mask |= C2D_GLOBAL_ALPHA_BIT;
779 else
780 ctx->config_mask &= ~C2D_GLOBAL_ALPHA_BIT;
781 }
782 break;
783 case COPYBIT_BLEND_MODE:
784 {
785 if (value == COPYBIT_BLENDING_NONE) {
786 ctx->config_mask |= C2D_ALPHA_BLEND_NONE;
787 ctx->is_premultiplied_alpha = true;
788 } else if (value == COPYBIT_BLENDING_PREMULT) {
789 ctx->is_premultiplied_alpha = true;
790 } else {
791 ctx->config_mask &= ~C2D_ALPHA_BLEND_NONE;
792 }
793 }
794 break;
795 case COPYBIT_TRANSFORM:
796 {
797 unsigned int transform = 0;
798 uint32 config_mask = 0;
799 config_mask |= C2D_OVERRIDE_GLOBAL_TARGET_ROTATE_CONFIG;
800 if((value & 0x7) == COPYBIT_TRANSFORM_ROT_180) {
801 transform = C2D_TARGET_ROTATE_180;
802 config_mask |= C2D_OVERRIDE_TARGET_ROTATE_180;
803 } else if((value & 0x7) == COPYBIT_TRANSFORM_ROT_270) {
804 transform = C2D_TARGET_ROTATE_90;
805 config_mask |= C2D_OVERRIDE_TARGET_ROTATE_90;
806 } else if(value == COPYBIT_TRANSFORM_ROT_90) {
807 transform = C2D_TARGET_ROTATE_270;
808 config_mask |= C2D_OVERRIDE_TARGET_ROTATE_270;
809 } else {
810 config_mask |= C2D_OVERRIDE_TARGET_ROTATE_0;
811 if(value & COPYBIT_TRANSFORM_FLIP_H) {
812 config_mask |= C2D_MIRROR_H_BIT;
813 } else if(value & COPYBIT_TRANSFORM_FLIP_V) {
814 config_mask |= C2D_MIRROR_V_BIT;
815 }
816 }
817
818 if (ctx->c2d_driver_info.capabilities_mask &
819 C2D_DRIVER_SUPPORTS_OVERRIDE_TARGET_ROTATE_OP) {
820 ctx->config_mask |= config_mask;
821 } else {
822 // The transform for this surface does not match the current
823 // target transform. Draw all previous surfaces. This will be
824 // changed once we have a new mechanism to send different
825 // target rotations to c2d.
826 finish_copybit(dev);
827 }
828 ctx->trg_transform = transform;
829 }
830 break;
831 case COPYBIT_FRAMEBUFFER_WIDTH:
832 ctx->fb_width = value;
833 break;
834 case COPYBIT_FRAMEBUFFER_HEIGHT:
835 ctx->fb_height = value;
836 break;
837 case COPYBIT_ROTATION_DEG:
838 case COPYBIT_DITHER:
839 case COPYBIT_BLUR:
840 case COPYBIT_BLIT_TO_FRAMEBUFFER:
841 // Do nothing
842 break;
843 default:
844 ALOGE("%s: default case param=0x%x", __FUNCTION__, name);
845 status = -EINVAL;
846 break;
847 }
848 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
849 return status;
850 }
851
852 /** Get a static info value */
get(struct copybit_device_t * dev,int name)853 static int get(struct copybit_device_t *dev, int name)
854 {
855 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
856 int value;
857
858 if (!ctx) {
859 ALOGE("%s: null context error", __FUNCTION__);
860 return -EINVAL;
861 }
862
863 switch(name) {
864 case COPYBIT_MINIFICATION_LIMIT:
865 value = MAX_SCALE_FACTOR;
866 break;
867 case COPYBIT_MAGNIFICATION_LIMIT:
868 value = MAX_SCALE_FACTOR;
869 break;
870 case COPYBIT_SCALING_FRAC_BITS:
871 value = 32;
872 break;
873 case COPYBIT_ROTATION_STEP_DEG:
874 value = 1;
875 break;
876 default:
877 ALOGE("%s: default case param=0x%x", __FUNCTION__, name);
878 value = -EINVAL;
879 }
880 return value;
881 }
882
is_alpha(int cformat)883 static int is_alpha(int cformat)
884 {
885 int alpha = 0;
886 switch (cformat & 0xFF) {
887 case C2D_COLOR_FORMAT_8888_ARGB:
888 case C2D_COLOR_FORMAT_8888_RGBA:
889 case C2D_COLOR_FORMAT_5551_RGBA:
890 case C2D_COLOR_FORMAT_4444_ARGB:
891 alpha = 1;
892 break;
893 default:
894 alpha = 0;
895 break;
896 }
897
898 if(alpha && (cformat&C2D_FORMAT_DISABLE_ALPHA))
899 alpha = 0;
900
901 return alpha;
902 }
903
904 /* Function to check if we need a temporary buffer for the blit.
905 * This would happen if the requested destination stride and the
906 * C2D stride do not match. We ignore RGB buffers, since their
907 * stride is always aligned to 32.
908 */
need_temp_buffer(struct copybit_image_t const * img)909 static bool need_temp_buffer(struct copybit_image_t const *img)
910 {
911 if (COPYBIT_SUCCESS == is_supported_rgb_format(img->format))
912 return false;
913
914 struct private_handle_t* handle = (struct private_handle_t*)img->handle;
915
916 // The width parameter in the handle contains the aligned_w. We check if we
917 // need to convert based on this param. YUV formats have bpp=1, so checking
918 // if the requested stride is aligned should suffice.
919 if (0 == (handle->width)%32) {
920 return false;
921 }
922
923 return true;
924 }
925
926 /* Function to extract the information from the copybit image and set the corresponding
927 * values in the bufferInfo struct.
928 */
populate_buffer_info(struct copybit_image_t const * img,bufferInfo & info)929 static void populate_buffer_info(struct copybit_image_t const *img, bufferInfo& info)
930 {
931 info.width = img->w;
932 info.height = img->h;
933 info.format = img->format;
934 }
935
936 /* Function to get the required size for a particular format, inorder for C2D to perform
937 * the blit operation.
938 */
get_size(const bufferInfo & info)939 static size_t get_size(const bufferInfo& info)
940 {
941 size_t size = 0;
942 int w = info.width;
943 int h = info.height;
944 int aligned_w = ALIGN(w, 32);
945 switch(info.format) {
946 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
947 {
948 // Chroma for this format is aligned to 2K.
949 size = ALIGN((aligned_w*h), 2048) +
950 ALIGN(aligned_w/2, 32) * (h/2) *2;
951 size = ALIGN(size, 4096);
952 } break;
953 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
954 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
955 {
956 size = aligned_w * h +
957 ALIGN(aligned_w/2, 32) * (h/2) * 2;
958 size = ALIGN(size, 4096);
959 } break;
960 default: break;
961 }
962 return size;
963 }
964
965 /* Function to allocate memory for the temporary buffer. This memory is
966 * allocated from Ashmem. It is the caller's responsibility to free this
967 * memory.
968 */
get_temp_buffer(const bufferInfo & info,alloc_data & data)969 static int get_temp_buffer(const bufferInfo& info, alloc_data& data)
970 {
971 ALOGD("%s E", __FUNCTION__);
972 // Alloc memory from system heap
973 data.base = 0;
974 data.fd = -1;
975 data.offset = 0;
976 data.size = get_size(info);
977 data.align = getpagesize();
978 data.uncached = true;
979 int allocFlags = GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP;
980
981 if (sAlloc == 0) {
982 sAlloc = gralloc::IAllocController::getInstance();
983 }
984
985 if (sAlloc == 0) {
986 ALOGE("%s: sAlloc is still NULL", __FUNCTION__);
987 return COPYBIT_FAILURE;
988 }
989
990 int err = sAlloc->allocate(data, allocFlags);
991 if (0 != err) {
992 ALOGE("%s: allocate failed", __FUNCTION__);
993 return COPYBIT_FAILURE;
994 }
995
996 ALOGD("%s X", __FUNCTION__);
997 return err;
998 }
999
1000 /* Function to free the temporary allocated memory.*/
free_temp_buffer(alloc_data & data)1001 static void free_temp_buffer(alloc_data &data)
1002 {
1003 if (-1 != data.fd) {
1004 IMemAlloc* memalloc = sAlloc->getAllocator(data.allocType);
1005 memalloc->free_buffer(data.base, data.size, 0, data.fd);
1006 }
1007 }
1008
1009 /* Function to perform the software color conversion. Convert the
1010 * C2D compatible format to the Android compatible format
1011 */
copy_image(private_handle_t * src_handle,struct copybit_image_t const * rhs,eConversionType conversionType)1012 static int copy_image(private_handle_t *src_handle,
1013 struct copybit_image_t const *rhs,
1014 eConversionType conversionType)
1015 {
1016 if (src_handle->fd == -1) {
1017 ALOGE("%s: src_handle fd is invalid", __FUNCTION__);
1018 return COPYBIT_FAILURE;
1019 }
1020
1021 // Copy the info.
1022 int ret = COPYBIT_SUCCESS;
1023 switch(rhs->format) {
1024 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
1025 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
1026 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
1027 {
1028 if (CONVERT_TO_ANDROID_FORMAT == conversionType) {
1029 return convert_yuv_c2d_to_yuv_android(src_handle, rhs);
1030 } else {
1031 return convert_yuv_android_to_yuv_c2d(src_handle, rhs);
1032 }
1033
1034 } break;
1035 default: {
1036 ALOGE("%s: invalid format 0x%x", __FUNCTION__, rhs->format);
1037 ret = COPYBIT_FAILURE;
1038 } break;
1039 }
1040 return ret;
1041 }
1042
delete_handle(private_handle_t * handle)1043 static void delete_handle(private_handle_t *handle)
1044 {
1045 if (handle) {
1046 delete handle;
1047 handle = 0;
1048 }
1049 }
1050
need_to_execute_draw(struct copybit_context_t * ctx,eC2DFlags flags)1051 static bool need_to_execute_draw(struct copybit_context_t* ctx,
1052 eC2DFlags flags)
1053 {
1054 if (flags & FLAGS_TEMP_SRC_DST) {
1055 return true;
1056 }
1057 if (flags & FLAGS_YUV_DESTINATION) {
1058 return true;
1059 }
1060 return false;
1061 }
1062
1063 /** do a stretch blit type operation */
stretch_copybit_internal(struct copybit_device_t * dev,struct copybit_image_t const * dst,struct copybit_image_t const * src,struct copybit_rect_t const * dst_rect,struct copybit_rect_t const * src_rect,struct copybit_region_t const * region,bool enableBlend)1064 static int stretch_copybit_internal(
1065 struct copybit_device_t *dev,
1066 struct copybit_image_t const *dst,
1067 struct copybit_image_t const *src,
1068 struct copybit_rect_t const *dst_rect,
1069 struct copybit_rect_t const *src_rect,
1070 struct copybit_region_t const *region,
1071 bool enableBlend)
1072 {
1073 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
1074 int status = COPYBIT_SUCCESS;
1075 int flags = 0;
1076 int src_surface_type;
1077 int mapped_src_idx = -1, mapped_dst_idx = -1;
1078 C2D_OBJECT_STR src_surface;
1079
1080 if (!ctx) {
1081 ALOGE("%s: null context error", __FUNCTION__);
1082 return -EINVAL;
1083 }
1084
1085 if (src->w > MAX_DIMENSION || src->h > MAX_DIMENSION) {
1086 ALOGE("%s: src dimension error", __FUNCTION__);
1087 return -EINVAL;
1088 }
1089
1090 if (dst->w > MAX_DIMENSION || dst->h > MAX_DIMENSION) {
1091 ALOGE("%s : dst dimension error dst w %d h %d", __FUNCTION__, dst->w,
1092 dst->h);
1093 return -EINVAL;
1094 }
1095
1096 if (is_valid_destination_format(dst->format) == COPYBIT_FAILURE) {
1097 ALOGE("%s: Invalid destination format format = 0x%x", __FUNCTION__,
1098 dst->format);
1099 return COPYBIT_FAILURE;
1100 }
1101
1102 int dst_surface_type;
1103 if (is_supported_rgb_format(dst->format) == COPYBIT_SUCCESS) {
1104 dst_surface_type = RGB_SURFACE;
1105 flags |= FLAGS_PREMULTIPLIED_ALPHA;
1106 } else if (is_supported_yuv_format(dst->format) == COPYBIT_SUCCESS) {
1107 int num_planes = get_num_planes(dst->format);
1108 flags |= FLAGS_YUV_DESTINATION;
1109 if (num_planes == 2) {
1110 dst_surface_type = YUV_SURFACE_2_PLANES;
1111 } else if (num_planes == 3) {
1112 dst_surface_type = YUV_SURFACE_3_PLANES;
1113 } else {
1114 ALOGE("%s: dst number of YUV planes is invalid dst format = 0x%x",
1115 __FUNCTION__, dst->format);
1116 return COPYBIT_FAILURE;
1117 }
1118 } else {
1119 ALOGE("%s: Invalid dst surface format 0x%x", __FUNCTION__,
1120 dst->format);
1121 return COPYBIT_FAILURE;
1122 }
1123
1124 if (ctx->blit_rgb_count == MAX_RGB_SURFACES ||
1125 ctx->blit_yuv_2_plane_count == MAX_YUV_2_PLANE_SURFACES ||
1126 ctx->blit_yuv_3_plane_count == MAX_YUV_2_PLANE_SURFACES ||
1127 ctx->blit_count == MAX_BLIT_OBJECT_COUNT ||
1128 ctx->dst_surface_type != dst_surface_type) {
1129 // we have reached the max. limits of our internal structures or
1130 // changed the target.
1131 // Draw the remaining surfaces. We need to do the finish here since
1132 // we need to free up the surface templates.
1133 finish_copybit(dev);
1134 }
1135
1136 ctx->dst_surface_type = dst_surface_type;
1137
1138 // Update the destination
1139 copybit_image_t dst_image;
1140 dst_image.w = dst->w;
1141 dst_image.h = dst->h;
1142 dst_image.format = dst->format;
1143 dst_image.handle = dst->handle;
1144 // Check if we need a temp. copy for the destination. We'd need this the destination
1145 // width is not aligned to 32. This case occurs for YUV formats. RGB formats are
1146 // aligned to 32.
1147 bool need_temp_dst = need_temp_buffer(dst);
1148 bufferInfo dst_info;
1149 populate_buffer_info(dst, dst_info);
1150 private_handle_t* dst_hnd = new private_handle_t(-1, 0, 0, 0, dst_info.format,
1151 dst_info.width, dst_info.height);
1152 if (dst_hnd == NULL) {
1153 ALOGE("%s: dst_hnd is null", __FUNCTION__);
1154 return COPYBIT_FAILURE;
1155 }
1156 if (need_temp_dst) {
1157 if (get_size(dst_info) != ctx->temp_dst_buffer.size) {
1158 free_temp_buffer(ctx->temp_dst_buffer);
1159 // Create a temp buffer and set that as the destination.
1160 if (COPYBIT_FAILURE == get_temp_buffer(dst_info, ctx->temp_dst_buffer)) {
1161 ALOGE("%s: get_temp_buffer(dst) failed", __FUNCTION__);
1162 delete_handle(dst_hnd);
1163 return COPYBIT_FAILURE;
1164 }
1165 }
1166 dst_hnd->fd = ctx->temp_dst_buffer.fd;
1167 dst_hnd->size = ctx->temp_dst_buffer.size;
1168 dst_hnd->flags = ctx->temp_dst_buffer.allocType;
1169 dst_hnd->base = (int)(ctx->temp_dst_buffer.base);
1170 dst_hnd->offset = ctx->temp_dst_buffer.offset;
1171 dst_hnd->gpuaddr = 0;
1172 dst_image.handle = dst_hnd;
1173 }
1174
1175 status = set_image(ctx, ctx->dst[ctx->dst_surface_type], &dst_image,
1176 (eC2DFlags)flags, mapped_dst_idx);
1177 if(status) {
1178 ALOGE("%s: dst: set_image error", __FUNCTION__);
1179 delete_handle(dst_hnd);
1180 unmap_gpuaddr(ctx, mapped_dst_idx);
1181 return COPYBIT_FAILURE;
1182 }
1183
1184 // Update the source
1185 flags = 0;
1186 if(is_supported_rgb_format(src->format) == COPYBIT_SUCCESS) {
1187 src_surface_type = RGB_SURFACE;
1188 src_surface = ctx->blit_rgb_object[ctx->blit_rgb_count];
1189 } else if (is_supported_yuv_format(src->format) == COPYBIT_SUCCESS) {
1190 int num_planes = get_num_planes(src->format);
1191 if (num_planes == 2) {
1192 src_surface_type = YUV_SURFACE_2_PLANES;
1193 src_surface = ctx->blit_yuv_2_plane_object[ctx->blit_yuv_2_plane_count];
1194 } else if (num_planes == 3) {
1195 src_surface_type = YUV_SURFACE_3_PLANES;
1196 src_surface = ctx->blit_yuv_3_plane_object[ctx->blit_yuv_2_plane_count];
1197 } else {
1198 ALOGE("%s: src number of YUV planes is invalid src format = 0x%x",
1199 __FUNCTION__, src->format);
1200 delete_handle(dst_hnd);
1201 unmap_gpuaddr(ctx, mapped_dst_idx);
1202 return -EINVAL;
1203 }
1204 } else {
1205 ALOGE("%s: Invalid source surface format 0x%x", __FUNCTION__,
1206 src->format);
1207 delete_handle(dst_hnd);
1208 unmap_gpuaddr(ctx, mapped_dst_idx);
1209 return -EINVAL;
1210 }
1211
1212 copybit_image_t src_image;
1213 src_image.w = src->w;
1214 src_image.h = src->h;
1215 src_image.format = src->format;
1216 src_image.handle = src->handle;
1217
1218 bool need_temp_src = need_temp_buffer(src);
1219 bufferInfo src_info;
1220 populate_buffer_info(src, src_info);
1221 private_handle_t* src_hnd = new private_handle_t(-1, 0, 0, 0, src_info.format,
1222 src_info.width, src_info.height);
1223 if (NULL == src_hnd) {
1224 ALOGE("%s: src_hnd is null", __FUNCTION__);
1225 delete_handle(dst_hnd);
1226 unmap_gpuaddr(ctx, mapped_dst_idx);
1227 return COPYBIT_FAILURE;
1228 }
1229 if (need_temp_src) {
1230 if (get_size(src_info) != ctx->temp_src_buffer.size) {
1231 free_temp_buffer(ctx->temp_src_buffer);
1232 // Create a temp buffer and set that as the destination.
1233 if (COPYBIT_SUCCESS != get_temp_buffer(src_info,
1234 ctx->temp_src_buffer)) {
1235 ALOGE("%s: get_temp_buffer(src) failed", __FUNCTION__);
1236 delete_handle(dst_hnd);
1237 delete_handle(src_hnd);
1238 unmap_gpuaddr(ctx, mapped_dst_idx);
1239 return COPYBIT_FAILURE;
1240 }
1241 }
1242 src_hnd->fd = ctx->temp_src_buffer.fd;
1243 src_hnd->size = ctx->temp_src_buffer.size;
1244 src_hnd->flags = ctx->temp_src_buffer.allocType;
1245 src_hnd->base = (int)(ctx->temp_src_buffer.base);
1246 src_hnd->offset = ctx->temp_src_buffer.offset;
1247 src_hnd->gpuaddr = 0;
1248 src_image.handle = src_hnd;
1249
1250 // Copy the source.
1251 status = copy_image((private_handle_t *)src->handle, &src_image,
1252 CONVERT_TO_C2D_FORMAT);
1253 if (status == COPYBIT_FAILURE) {
1254 ALOGE("%s:copy_image failed in temp source",__FUNCTION__);
1255 delete_handle(dst_hnd);
1256 delete_handle(src_hnd);
1257 unmap_gpuaddr(ctx, mapped_dst_idx);
1258 return status;
1259 }
1260
1261 // Clean the cache
1262 IMemAlloc* memalloc = sAlloc->getAllocator(src_hnd->flags);
1263 if (memalloc->clean_buffer((void *)(src_hnd->base), src_hnd->size,
1264 src_hnd->offset, src_hnd->fd,
1265 gralloc::CACHE_CLEAN)) {
1266 ALOGE("%s: clean_buffer failed", __FUNCTION__);
1267 delete_handle(dst_hnd);
1268 delete_handle(src_hnd);
1269 unmap_gpuaddr(ctx, mapped_dst_idx);
1270 return COPYBIT_FAILURE;
1271 }
1272 }
1273
1274 flags |= (ctx->is_premultiplied_alpha) ? FLAGS_PREMULTIPLIED_ALPHA : 0;
1275 flags |= (ctx->dst_surface_type != RGB_SURFACE) ? FLAGS_YUV_DESTINATION : 0;
1276 status = set_image(ctx, src_surface.surface_id, &src_image,
1277 (eC2DFlags)flags, mapped_src_idx);
1278 if(status) {
1279 ALOGE("%s: set_image (src) error", __FUNCTION__);
1280 delete_handle(dst_hnd);
1281 delete_handle(src_hnd);
1282 unmap_gpuaddr(ctx, mapped_dst_idx);
1283 unmap_gpuaddr(ctx, mapped_src_idx);
1284 return COPYBIT_FAILURE;
1285 }
1286
1287 src_surface.config_mask = C2D_NO_ANTIALIASING_BIT | ctx->config_mask;
1288 src_surface.global_alpha = ctx->src_global_alpha;
1289 if (enableBlend) {
1290 if(src_surface.config_mask & C2D_GLOBAL_ALPHA_BIT) {
1291 src_surface.config_mask &= ~C2D_ALPHA_BLEND_NONE;
1292 if(!(src_surface.global_alpha)) {
1293 // src alpha is zero
1294 delete_handle(dst_hnd);
1295 delete_handle(src_hnd);
1296 unmap_gpuaddr(ctx, mapped_dst_idx);
1297 unmap_gpuaddr(ctx, mapped_src_idx);
1298 return COPYBIT_FAILURE;
1299 }
1300 }
1301 } else {
1302 src_surface.config_mask |= C2D_ALPHA_BLEND_NONE;
1303 }
1304
1305 if (src_surface_type == RGB_SURFACE) {
1306 ctx->blit_rgb_object[ctx->blit_rgb_count] = src_surface;
1307 ctx->blit_rgb_count++;
1308 } else if (src_surface_type == YUV_SURFACE_2_PLANES) {
1309 ctx->blit_yuv_2_plane_object[ctx->blit_yuv_2_plane_count] = src_surface;
1310 ctx->blit_yuv_2_plane_count++;
1311 } else {
1312 ctx->blit_yuv_3_plane_object[ctx->blit_yuv_3_plane_count] = src_surface;
1313 ctx->blit_yuv_3_plane_count++;
1314 }
1315
1316 struct copybit_rect_t clip;
1317 while ((status == 0) && region->next(region, &clip)) {
1318 set_rects(ctx, &(src_surface), dst_rect, src_rect, &clip);
1319 if (ctx->blit_count == MAX_BLIT_OBJECT_COUNT) {
1320 ALOGW("Reached end of blit count");
1321 finish_copybit(dev);
1322 }
1323 ctx->blit_list[ctx->blit_count] = src_surface;
1324 ctx->blit_count++;
1325 }
1326
1327 // Check if we need to perform an early draw-finish.
1328 flags |= (need_temp_dst || need_temp_src) ? FLAGS_TEMP_SRC_DST : 0;
1329 if (need_to_execute_draw(ctx, (eC2DFlags)flags))
1330 {
1331 finish_copybit(dev);
1332 }
1333
1334 if (need_temp_dst) {
1335 // copy the temp. destination without the alignment to the actual
1336 // destination.
1337 status = copy_image(dst_hnd, dst, CONVERT_TO_ANDROID_FORMAT);
1338 if (status == COPYBIT_FAILURE) {
1339 ALOGE("%s:copy_image failed in temp Dest",__FUNCTION__);
1340 delete_handle(dst_hnd);
1341 delete_handle(src_hnd);
1342 unmap_gpuaddr(ctx, mapped_dst_idx);
1343 unmap_gpuaddr(ctx, mapped_src_idx);
1344 return status;
1345 }
1346 // Clean the cache.
1347 IMemAlloc* memalloc = sAlloc->getAllocator(dst_hnd->flags);
1348 memalloc->clean_buffer((void *)(dst_hnd->base), dst_hnd->size,
1349 dst_hnd->offset, dst_hnd->fd,
1350 gralloc::CACHE_CLEAN);
1351 }
1352 delete_handle(dst_hnd);
1353 delete_handle(src_hnd);
1354
1355 ctx->is_premultiplied_alpha = false;
1356 ctx->fb_width = 0;
1357 ctx->fb_height = 0;
1358 ctx->config_mask = 0;
1359 return status;
1360 }
1361
1362
stretch_copybit(struct copybit_device_t * dev,struct copybit_image_t const * dst,struct copybit_image_t const * src,struct copybit_rect_t const * dst_rect,struct copybit_rect_t const * src_rect,struct copybit_region_t const * region)1363 static int stretch_copybit(
1364 struct copybit_device_t *dev,
1365 struct copybit_image_t const *dst,
1366 struct copybit_image_t const *src,
1367 struct copybit_rect_t const *dst_rect,
1368 struct copybit_rect_t const *src_rect,
1369 struct copybit_region_t const *region)
1370 {
1371 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
1372 int status = COPYBIT_SUCCESS;
1373 bool needsBlending = (ctx->src_global_alpha != 0);
1374 pthread_mutex_lock(&ctx->wait_cleanup_lock);
1375 status = stretch_copybit_internal(dev, dst, src, dst_rect, src_rect,
1376 region, needsBlending);
1377 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
1378 return status;
1379 }
1380
1381 /** Perform a blit type operation */
blit_copybit(struct copybit_device_t * dev,struct copybit_image_t const * dst,struct copybit_image_t const * src,struct copybit_region_t const * region)1382 static int blit_copybit(
1383 struct copybit_device_t *dev,
1384 struct copybit_image_t const *dst,
1385 struct copybit_image_t const *src,
1386 struct copybit_region_t const *region)
1387 {
1388 int status = COPYBIT_SUCCESS;
1389 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
1390 struct copybit_rect_t dr = { 0, 0, dst->w, dst->h };
1391 struct copybit_rect_t sr = { 0, 0, src->w, src->h };
1392 pthread_mutex_lock(&ctx->wait_cleanup_lock);
1393 status = stretch_copybit_internal(dev, dst, src, &dr, &sr, region, false);
1394 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
1395 return status;
1396 }
1397
1398 /*****************************************************************************/
1399
clean_up(copybit_context_t * ctx)1400 static void clean_up(copybit_context_t* ctx)
1401 {
1402 void* ret;
1403 if (!ctx)
1404 return;
1405
1406 // stop the wait_cleanup_thread
1407 pthread_mutex_lock(&ctx->wait_cleanup_lock);
1408 ctx->stop_thread = true;
1409 // Signal waiting thread
1410 pthread_cond_signal(&ctx->wait_cleanup_cond);
1411 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
1412 // waits for the cleanup thread to exit
1413 pthread_join(ctx->wait_thread_id, &ret);
1414 pthread_mutex_destroy(&ctx->wait_cleanup_lock);
1415 pthread_cond_destroy (&ctx->wait_cleanup_cond);
1416
1417 for (int i = 0; i < NUM_SURFACE_TYPES; i++) {
1418 if (ctx->dst[i])
1419 LINK_c2dDestroySurface(ctx->dst[i]);
1420 }
1421
1422 for (int i = 0; i < MAX_RGB_SURFACES; i++) {
1423 if (ctx->blit_rgb_object[i].surface_id)
1424 LINK_c2dDestroySurface(ctx->blit_rgb_object[i].surface_id);
1425 }
1426
1427 for (int i = 0; i < MAX_YUV_2_PLANE_SURFACES; i++) {
1428 if (ctx->blit_yuv_2_plane_object[i].surface_id)
1429 LINK_c2dDestroySurface(ctx->blit_yuv_2_plane_object[i].surface_id);
1430 }
1431
1432 for (int i = 0; i < MAX_YUV_3_PLANE_SURFACES; i++) {
1433 if (ctx->blit_yuv_3_plane_object[i].surface_id)
1434 LINK_c2dDestroySurface(ctx->blit_yuv_3_plane_object[i].surface_id);
1435 }
1436
1437 if (ctx->libc2d2) {
1438 ::dlclose(ctx->libc2d2);
1439 ALOGV("dlclose(libc2d2)");
1440 }
1441
1442 free(ctx);
1443 }
1444
1445 /** Close the copybit device */
close_copybit(struct hw_device_t * dev)1446 static int close_copybit(struct hw_device_t *dev)
1447 {
1448 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
1449 if (ctx) {
1450 free_temp_buffer(ctx->temp_src_buffer);
1451 free_temp_buffer(ctx->temp_dst_buffer);
1452 }
1453 clean_up(ctx);
1454 return 0;
1455 }
1456
1457 /** Open a new instance of a copybit device using name */
open_copybit(const struct hw_module_t * module,const char * name,struct hw_device_t ** device)1458 static int open_copybit(const struct hw_module_t* module, const char* name,
1459 struct hw_device_t** device)
1460 {
1461 int status = COPYBIT_SUCCESS;
1462 C2D_RGB_SURFACE_DEF surfDefinition = {0};
1463 C2D_YUV_SURFACE_DEF yuvSurfaceDef = {0} ;
1464 struct copybit_context_t *ctx;
1465 char fbName[64];
1466
1467 ctx = (struct copybit_context_t *)malloc(sizeof(struct copybit_context_t));
1468 if(!ctx) {
1469 ALOGE("%s: malloc failed", __FUNCTION__);
1470 return COPYBIT_FAILURE;
1471 }
1472
1473 /* initialize drawstate */
1474 memset(ctx, 0, sizeof(*ctx));
1475 ctx->libc2d2 = ::dlopen("libC2D2.so", RTLD_NOW);
1476 if (!ctx->libc2d2) {
1477 ALOGE("FATAL ERROR: could not dlopen libc2d2.so: %s", dlerror());
1478 clean_up(ctx);
1479 status = COPYBIT_FAILURE;
1480 *device = NULL;
1481 return status;
1482 }
1483 *(void **)&LINK_c2dCreateSurface = ::dlsym(ctx->libc2d2,
1484 "c2dCreateSurface");
1485 *(void **)&LINK_c2dUpdateSurface = ::dlsym(ctx->libc2d2,
1486 "c2dUpdateSurface");
1487 *(void **)&LINK_c2dReadSurface = ::dlsym(ctx->libc2d2,
1488 "c2dReadSurface");
1489 *(void **)&LINK_c2dDraw = ::dlsym(ctx->libc2d2, "c2dDraw");
1490 *(void **)&LINK_c2dFlush = ::dlsym(ctx->libc2d2, "c2dFlush");
1491 *(void **)&LINK_c2dFinish = ::dlsym(ctx->libc2d2, "c2dFinish");
1492 *(void **)&LINK_c2dWaitTimestamp = ::dlsym(ctx->libc2d2,
1493 "c2dWaitTimestamp");
1494 *(void **)&LINK_c2dDestroySurface = ::dlsym(ctx->libc2d2,
1495 "c2dDestroySurface");
1496 *(void **)&LINK_c2dMapAddr = ::dlsym(ctx->libc2d2,
1497 "c2dMapAddr");
1498 *(void **)&LINK_c2dUnMapAddr = ::dlsym(ctx->libc2d2,
1499 "c2dUnMapAddr");
1500 *(void **)&LINK_c2dGetDriverCapabilities = ::dlsym(ctx->libc2d2,
1501 "c2dGetDriverCapabilities");
1502 *(void **)&LINK_c2dCreateFenceFD = ::dlsym(ctx->libc2d2,
1503 "c2dCreateFenceFD");
1504 *(void **)&LINK_c2dFillSurface = ::dlsym(ctx->libc2d2,
1505 "c2dFillSurface");
1506
1507 if (!LINK_c2dCreateSurface || !LINK_c2dUpdateSurface || !LINK_c2dReadSurface
1508 || !LINK_c2dDraw || !LINK_c2dFlush || !LINK_c2dWaitTimestamp ||
1509 !LINK_c2dFinish || !LINK_c2dDestroySurface ||
1510 !LINK_c2dGetDriverCapabilities || !LINK_c2dCreateFenceFD ||
1511 !LINK_c2dFillSurface) {
1512 ALOGE("%s: dlsym ERROR", __FUNCTION__);
1513 clean_up(ctx);
1514 status = COPYBIT_FAILURE;
1515 *device = NULL;
1516 return status;
1517 }
1518
1519 ctx->device.common.tag = HARDWARE_DEVICE_TAG;
1520 ctx->device.common.version = 1;
1521 ctx->device.common.module = (hw_module_t*)(module);
1522 ctx->device.common.close = close_copybit;
1523 ctx->device.set_parameter = set_parameter_copybit;
1524 ctx->device.get = get;
1525 ctx->device.blit = blit_copybit;
1526 ctx->device.stretch = stretch_copybit;
1527 ctx->device.finish = finish_copybit;
1528 ctx->device.flush_get_fence = flush_get_fence_copybit;
1529 ctx->device.clear = clear_copybit;
1530
1531 /* Create RGB Surface */
1532 surfDefinition.buffer = (void*)0xdddddddd;
1533 surfDefinition.phys = (void*)0xdddddddd;
1534 surfDefinition.stride = 1 * 4;
1535 surfDefinition.width = 1;
1536 surfDefinition.height = 1;
1537 surfDefinition.format = C2D_COLOR_FORMAT_8888_ARGB;
1538 if (LINK_c2dCreateSurface(&(ctx->dst[RGB_SURFACE]), C2D_TARGET | C2D_SOURCE,
1539 (C2D_SURFACE_TYPE)(C2D_SURFACE_RGB_HOST |
1540 C2D_SURFACE_WITH_PHYS |
1541 C2D_SURFACE_WITH_PHYS_DUMMY ),
1542 &surfDefinition)) {
1543 ALOGE("%s: create ctx->dst_surface[RGB_SURFACE] failed", __FUNCTION__);
1544 ctx->dst[RGB_SURFACE] = 0;
1545 clean_up(ctx);
1546 status = COPYBIT_FAILURE;
1547 *device = NULL;
1548 return status;
1549 }
1550
1551 unsigned int surface_id = 0;
1552 for (int i = 0; i < MAX_RGB_SURFACES; i++)
1553 {
1554 if (LINK_c2dCreateSurface(&surface_id, C2D_TARGET | C2D_SOURCE,
1555 (C2D_SURFACE_TYPE)(C2D_SURFACE_RGB_HOST |
1556 C2D_SURFACE_WITH_PHYS |
1557 C2D_SURFACE_WITH_PHYS_DUMMY ),
1558 &surfDefinition)) {
1559 ALOGE("%s: create RGB source surface %d failed", __FUNCTION__, i);
1560 ctx->blit_rgb_object[i].surface_id = 0;
1561 status = COPYBIT_FAILURE;
1562 break;
1563 } else {
1564 ctx->blit_rgb_object[i].surface_id = surface_id;
1565 ALOGW("%s i = %d surface_id=%d", __FUNCTION__, i,
1566 ctx->blit_rgb_object[i].surface_id);
1567 }
1568 }
1569
1570 if (status == COPYBIT_FAILURE) {
1571 clean_up(ctx);
1572 status = COPYBIT_FAILURE;
1573 *device = NULL;
1574 return status;
1575 }
1576
1577 // Create 2 plane YUV surfaces
1578 yuvSurfaceDef.format = C2D_COLOR_FORMAT_420_NV12;
1579 yuvSurfaceDef.width = 4;
1580 yuvSurfaceDef.height = 4;
1581 yuvSurfaceDef.plane0 = (void*)0xaaaaaaaa;
1582 yuvSurfaceDef.phys0 = (void*) 0xaaaaaaaa;
1583 yuvSurfaceDef.stride0 = 4;
1584
1585 yuvSurfaceDef.plane1 = (void*)0xaaaaaaaa;
1586 yuvSurfaceDef.phys1 = (void*) 0xaaaaaaaa;
1587 yuvSurfaceDef.stride1 = 4;
1588 if (LINK_c2dCreateSurface(&(ctx->dst[YUV_SURFACE_2_PLANES]),
1589 C2D_TARGET | C2D_SOURCE,
1590 (C2D_SURFACE_TYPE)(C2D_SURFACE_YUV_HOST |
1591 C2D_SURFACE_WITH_PHYS |
1592 C2D_SURFACE_WITH_PHYS_DUMMY),
1593 &yuvSurfaceDef)) {
1594 ALOGE("%s: create ctx->dst[YUV_SURFACE_2_PLANES] failed", __FUNCTION__);
1595 ctx->dst[YUV_SURFACE_2_PLANES] = 0;
1596 clean_up(ctx);
1597 status = COPYBIT_FAILURE;
1598 *device = NULL;
1599 return status;
1600 }
1601
1602 for (int i=0; i < MAX_YUV_2_PLANE_SURFACES; i++)
1603 {
1604 if (LINK_c2dCreateSurface(&surface_id, C2D_TARGET | C2D_SOURCE,
1605 (C2D_SURFACE_TYPE)(C2D_SURFACE_YUV_HOST |
1606 C2D_SURFACE_WITH_PHYS |
1607 C2D_SURFACE_WITH_PHYS_DUMMY ),
1608 &yuvSurfaceDef)) {
1609 ALOGE("%s: create YUV source %d failed", __FUNCTION__, i);
1610 ctx->blit_yuv_2_plane_object[i].surface_id = 0;
1611 status = COPYBIT_FAILURE;
1612 break;
1613 } else {
1614 ctx->blit_yuv_2_plane_object[i].surface_id = surface_id;
1615 ALOGW("%s: 2 Plane YUV i=%d surface_id=%d", __FUNCTION__, i,
1616 ctx->blit_yuv_2_plane_object[i].surface_id);
1617 }
1618 }
1619
1620 if (status == COPYBIT_FAILURE) {
1621 clean_up(ctx);
1622 status = COPYBIT_FAILURE;
1623 *device = NULL;
1624 return status;
1625 }
1626
1627 // Create YUV 3 plane surfaces
1628 yuvSurfaceDef.format = C2D_COLOR_FORMAT_420_YV12;
1629 yuvSurfaceDef.plane2 = (void*)0xaaaaaaaa;
1630 yuvSurfaceDef.phys2 = (void*) 0xaaaaaaaa;
1631 yuvSurfaceDef.stride2 = 4;
1632
1633 if (LINK_c2dCreateSurface(&(ctx->dst[YUV_SURFACE_3_PLANES]),
1634 C2D_TARGET | C2D_SOURCE,
1635 (C2D_SURFACE_TYPE)(C2D_SURFACE_YUV_HOST |
1636 C2D_SURFACE_WITH_PHYS |
1637 C2D_SURFACE_WITH_PHYS_DUMMY),
1638 &yuvSurfaceDef)) {
1639 ALOGE("%s: create ctx->dst[YUV_SURFACE_3_PLANES] failed", __FUNCTION__);
1640 ctx->dst[YUV_SURFACE_3_PLANES] = 0;
1641 clean_up(ctx);
1642 status = COPYBIT_FAILURE;
1643 *device = NULL;
1644 return status;
1645 }
1646
1647 for (int i=0; i < MAX_YUV_3_PLANE_SURFACES; i++)
1648 {
1649 if (LINK_c2dCreateSurface(&(surface_id),
1650 C2D_TARGET | C2D_SOURCE,
1651 (C2D_SURFACE_TYPE)(C2D_SURFACE_YUV_HOST |
1652 C2D_SURFACE_WITH_PHYS |
1653 C2D_SURFACE_WITH_PHYS_DUMMY),
1654 &yuvSurfaceDef)) {
1655 ALOGE("%s: create 3 plane YUV surface %d failed", __FUNCTION__, i);
1656 ctx->blit_yuv_3_plane_object[i].surface_id = 0;
1657 status = COPYBIT_FAILURE;
1658 break;
1659 } else {
1660 ctx->blit_yuv_3_plane_object[i].surface_id = surface_id;
1661 ALOGW("%s: 3 Plane YUV i=%d surface_id=%d", __FUNCTION__, i,
1662 ctx->blit_yuv_3_plane_object[i].surface_id);
1663 }
1664 }
1665
1666 if (status == COPYBIT_FAILURE) {
1667 clean_up(ctx);
1668 status = COPYBIT_FAILURE;
1669 *device = NULL;
1670 return status;
1671 }
1672
1673 if (LINK_c2dGetDriverCapabilities(&(ctx->c2d_driver_info))) {
1674 ALOGE("%s: LINK_c2dGetDriverCapabilities failed", __FUNCTION__);
1675 clean_up(ctx);
1676 status = COPYBIT_FAILURE;
1677 *device = NULL;
1678 return status;
1679 }
1680 // Initialize context variables.
1681 ctx->trg_transform = C2D_TARGET_ROTATE_0;
1682
1683 ctx->temp_src_buffer.fd = -1;
1684 ctx->temp_src_buffer.base = 0;
1685 ctx->temp_src_buffer.size = 0;
1686
1687 ctx->temp_dst_buffer.fd = -1;
1688 ctx->temp_dst_buffer.base = 0;
1689 ctx->temp_dst_buffer.size = 0;
1690
1691 ctx->fb_width = 0;
1692 ctx->fb_height = 0;
1693
1694 ctx->blit_rgb_count = 0;
1695 ctx->blit_yuv_2_plane_count = 0;
1696 ctx->blit_yuv_3_plane_count = 0;
1697 ctx->blit_count = 0;
1698
1699 ctx->wait_timestamp = false;
1700 ctx->stop_thread = false;
1701 pthread_mutex_init(&(ctx->wait_cleanup_lock), NULL);
1702 pthread_cond_init(&(ctx->wait_cleanup_cond), NULL);
1703 /* Start the wait thread */
1704 pthread_attr_t attr;
1705 pthread_attr_init(&attr);
1706 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
1707
1708 pthread_create(&ctx->wait_thread_id, &attr, &c2d_wait_loop,
1709 (void *)ctx);
1710 pthread_attr_destroy(&attr);
1711
1712 *device = &ctx->device.common;
1713 return status;
1714 }
1715