1 /*
2 * Copyright (C) 2012-2014, The Linux Foundation. All rights reserved.
3 * Not a Contribution, Apache license notifications and license are retained
4 * for attribution purposes only.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include <math.h>
20 #include "hwc_mdpcomp.h"
21 #include <sys/ioctl.h>
22 #include "external.h"
23 #include "virtual.h"
24 #include "qdMetaData.h"
25 #include "mdp_version.h"
26 #include "hwc_fbupdate.h"
27 #include "hwc_ad.h"
28 #include <overlayRotator.h>
29 #include "hwc_copybit.h"
30
31 using namespace overlay;
32 using namespace qdutils;
33 using namespace overlay::utils;
34 namespace ovutils = overlay::utils;
35
36 namespace qhwc {
37
38 //==============MDPComp========================================================
39
40 IdleInvalidator *MDPComp::idleInvalidator = NULL;
41 bool MDPComp::sIdleFallBack = false;
42 bool MDPComp::sHandleTimeout = false;
43 bool MDPComp::sDebugLogs = false;
44 bool MDPComp::sEnabled = false;
45 bool MDPComp::sEnableMixedMode = true;
46 int MDPComp::sSimulationFlags = 0;
47 int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
48 bool MDPComp::sEnable4k2kYUVSplit = false;
49 bool MDPComp::sSrcSplitEnabled = false;
getObject(hwc_context_t * ctx,const int & dpy)50 MDPComp* MDPComp::getObject(hwc_context_t *ctx, const int& dpy) {
51 if(qdutils::MDPVersion::getInstance().isSrcSplit()) {
52 sSrcSplitEnabled = true;
53 return new MDPCompSrcSplit(dpy);
54 } else if(isDisplaySplit(ctx, dpy)) {
55 return new MDPCompSplit(dpy);
56 }
57 return new MDPCompNonSplit(dpy);
58 }
59
MDPComp(int dpy)60 MDPComp::MDPComp(int dpy):mDpy(dpy){};
61
dump(android::String8 & buf,hwc_context_t * ctx)62 void MDPComp::dump(android::String8& buf, hwc_context_t *ctx)
63 {
64 if(mCurrentFrame.layerCount > MAX_NUM_APP_LAYERS)
65 return;
66
67 dumpsys_log(buf,"HWC Map for Dpy: %s \n",
68 (mDpy == 0) ? "\"PRIMARY\"" :
69 (mDpy == 1) ? "\"EXTERNAL\"" : "\"VIRTUAL\"");
70 dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d "
71 "fbCount:%2d \n", mCurrentFrame.layerCount,
72 mCurrentFrame.mdpCount, mCurrentFrame.fbCount);
73 dumpsys_log(buf,"needsFBRedraw:%3s pipesUsed:%2d MaxPipesPerMixer: %d \n",
74 (mCurrentFrame.needsRedraw? "YES" : "NO"),
75 mCurrentFrame.mdpCount, sMaxPipesPerMixer);
76 if(isDisplaySplit(ctx, mDpy)) {
77 dumpsys_log(buf, "Programmed ROI's: Left: [%d, %d, %d, %d] "
78 "Right: [%d, %d, %d, %d] \n",
79 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
80 ctx->listStats[mDpy].lRoi.right,
81 ctx->listStats[mDpy].lRoi.bottom,
82 ctx->listStats[mDpy].rRoi.left,ctx->listStats[mDpy].rRoi.top,
83 ctx->listStats[mDpy].rRoi.right,
84 ctx->listStats[mDpy].rRoi.bottom);
85 } else {
86 dumpsys_log(buf, "Programmed ROI: [%d, %d, %d, %d] \n",
87 ctx->listStats[mDpy].lRoi.left,ctx->listStats[mDpy].lRoi.top,
88 ctx->listStats[mDpy].lRoi.right,
89 ctx->listStats[mDpy].lRoi.bottom);
90 }
91 dumpsys_log(buf," --------------------------------------------- \n");
92 dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype | Z \n");
93 dumpsys_log(buf," --------------------------------------------- \n");
94 for(int index = 0; index < mCurrentFrame.layerCount; index++ )
95 dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n",
96 index,
97 (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"),
98 mCurrentFrame.layerToMDP[index],
99 (mCurrentFrame.isFBComposed[index] ?
100 (mCurrentFrame.drop[index] ? "DROP" :
101 (mCurrentFrame.needsRedraw ? "GLES" : "CACHE")) : "MDP"),
102 (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ :
103 mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder));
104 dumpsys_log(buf,"\n");
105 }
106
init(hwc_context_t * ctx)107 bool MDPComp::init(hwc_context_t *ctx) {
108
109 if(!ctx) {
110 ALOGE("%s: Invalid hwc context!!",__FUNCTION__);
111 return false;
112 }
113
114 char property[PROPERTY_VALUE_MAX];
115
116 sEnabled = false;
117 if((property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) &&
118 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
119 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
120 sEnabled = true;
121 }
122
123 sEnableMixedMode = true;
124 if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) &&
125 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
126 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
127 sEnableMixedMode = false;
128 }
129
130 sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
131 if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) {
132 int val = atoi(property);
133 if(val >= 0)
134 sMaxPipesPerMixer = min(val, MAX_PIPES_PER_MIXER);
135 }
136
137 if(ctx->mMDP.panel != MIPI_CMD_PANEL) {
138 // Idle invalidation is not necessary on command mode panels
139 long idle_timeout = DEFAULT_IDLE_TIME;
140 if(property_get("debug.mdpcomp.idletime", property, NULL) > 0) {
141 if(atoi(property) != 0)
142 idle_timeout = atoi(property);
143 }
144
145 //create Idle Invalidator only when not disabled through property
146 if(idle_timeout != -1)
147 idleInvalidator = IdleInvalidator::getInstance();
148
149 if(idleInvalidator == NULL) {
150 ALOGE("%s: failed to instantiate idleInvalidator object",
151 __FUNCTION__);
152 } else {
153 idleInvalidator->init(timeout_handler, ctx,
154 (unsigned int)idle_timeout);
155 }
156 }
157
158 if(!qdutils::MDPVersion::getInstance().isSrcSplit() &&
159 property_get("persist.mdpcomp.4k2kSplit", property, "0") > 0 &&
160 (!strncmp(property, "1", PROPERTY_VALUE_MAX) ||
161 !strncasecmp(property,"true", PROPERTY_VALUE_MAX))) {
162 sEnable4k2kYUVSplit = true;
163 }
164
165 if ((property_get("persist.hwc.ptor.enable", property, NULL) > 0) &&
166 ((!strncasecmp(property, "true", PROPERTY_VALUE_MAX )) ||
167 (!strncmp(property, "1", PROPERTY_VALUE_MAX )))) {
168 ctx->mCopyBit[HWC_DISPLAY_PRIMARY] = new CopyBit(ctx,
169 HWC_DISPLAY_PRIMARY);
170 }
171
172 return true;
173 }
174
reset(hwc_context_t * ctx)175 void MDPComp::reset(hwc_context_t *ctx) {
176 const int numLayers = ctx->listStats[mDpy].numAppLayers;
177 mCurrentFrame.reset(numLayers);
178 ctx->mOverlay->clear(mDpy);
179 ctx->mLayerRotMap[mDpy]->clear();
180 }
181
reset()182 void MDPComp::reset() {
183 sHandleTimeout = false;
184 mModeOn = false;
185 }
186
timeout_handler(void * udata)187 void MDPComp::timeout_handler(void *udata) {
188 struct hwc_context_t* ctx = (struct hwc_context_t*)(udata);
189
190 if(!ctx) {
191 ALOGE("%s: received empty data in timer callback", __FUNCTION__);
192 return;
193 }
194 Locker::Autolock _l(ctx->mDrawLock);
195 // Handle timeout event only if the previous composition is MDP or MIXED.
196 if(!sHandleTimeout) {
197 ALOGD_IF(isDebug(), "%s:Do not handle this timeout", __FUNCTION__);
198 return;
199 }
200 if(!ctx->proc) {
201 ALOGE("%s: HWC proc not registered", __FUNCTION__);
202 return;
203 }
204 sIdleFallBack = true;
205 /* Trigger SF to redraw the current frame */
206 ctx->proc->invalidate(ctx->proc);
207 }
208
setMDPCompLayerFlags(hwc_context_t * ctx,hwc_display_contents_1_t * list)209 void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx,
210 hwc_display_contents_1_t* list) {
211 LayerProp *layerProp = ctx->layerProp[mDpy];
212
213 for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) {
214 hwc_layer_1_t* layer = &(list->hwLayers[index]);
215 if(!mCurrentFrame.isFBComposed[index]) {
216 layerProp[index].mFlags |= HWC_MDPCOMP;
217 layer->compositionType = HWC_OVERLAY;
218 layer->hints |= HWC_HINT_CLEAR_FB;
219 } else {
220 /* Drop the layer when its already present in FB OR when it lies
221 * outside frame's ROI */
222 if(!mCurrentFrame.needsRedraw || mCurrentFrame.drop[index]) {
223 layer->compositionType = HWC_OVERLAY;
224 }
225 }
226 }
227 }
228
setRedraw(hwc_context_t * ctx,hwc_display_contents_1_t * list)229 void MDPComp::setRedraw(hwc_context_t *ctx,
230 hwc_display_contents_1_t* list) {
231 mCurrentFrame.needsRedraw = false;
232 if(!mCachedFrame.isSameFrame(mCurrentFrame, list) ||
233 (list->flags & HWC_GEOMETRY_CHANGED) ||
234 isSkipPresent(ctx, mDpy)) {
235 mCurrentFrame.needsRedraw = true;
236 }
237 }
238
FrameInfo()239 MDPComp::FrameInfo::FrameInfo() {
240 memset(&mdpToLayer, 0, sizeof(mdpToLayer));
241 reset(0);
242 }
243
reset(const int & numLayers)244 void MDPComp::FrameInfo::reset(const int& numLayers) {
245 for(int i = 0 ; i < MAX_PIPES_PER_MIXER; i++ ) {
246 if(mdpToLayer[i].pipeInfo) {
247 delete mdpToLayer[i].pipeInfo;
248 mdpToLayer[i].pipeInfo = NULL;
249 //We dont own the rotator
250 mdpToLayer[i].rot = NULL;
251 }
252 }
253
254 memset(&mdpToLayer, 0, sizeof(mdpToLayer));
255 memset(&layerToMDP, -1, sizeof(layerToMDP));
256 memset(&isFBComposed, 1, sizeof(isFBComposed));
257
258 layerCount = numLayers;
259 fbCount = numLayers;
260 mdpCount = 0;
261 needsRedraw = true;
262 fbZ = -1;
263 }
264
map()265 void MDPComp::FrameInfo::map() {
266 // populate layer and MDP maps
267 int mdpIdx = 0;
268 for(int idx = 0; idx < layerCount; idx++) {
269 if(!isFBComposed[idx]) {
270 mdpToLayer[mdpIdx].listIndex = idx;
271 layerToMDP[idx] = mdpIdx++;
272 }
273 }
274 }
275
LayerCache()276 MDPComp::LayerCache::LayerCache() {
277 reset();
278 }
279
reset()280 void MDPComp::LayerCache::reset() {
281 memset(&hnd, 0, sizeof(hnd));
282 memset(&isFBComposed, true, sizeof(isFBComposed));
283 memset(&drop, false, sizeof(drop));
284 layerCount = 0;
285 }
286
cacheAll(hwc_display_contents_1_t * list)287 void MDPComp::LayerCache::cacheAll(hwc_display_contents_1_t* list) {
288 const int numAppLayers = (int)list->numHwLayers - 1;
289 for(int i = 0; i < numAppLayers; i++) {
290 hnd[i] = list->hwLayers[i].handle;
291 }
292 }
293
updateCounts(const FrameInfo & curFrame)294 void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) {
295 layerCount = curFrame.layerCount;
296 memcpy(&isFBComposed, &curFrame.isFBComposed, sizeof(isFBComposed));
297 memcpy(&drop, &curFrame.drop, sizeof(drop));
298 }
299
isSameFrame(const FrameInfo & curFrame,hwc_display_contents_1_t * list)300 bool MDPComp::LayerCache::isSameFrame(const FrameInfo& curFrame,
301 hwc_display_contents_1_t* list) {
302 if(layerCount != curFrame.layerCount)
303 return false;
304 for(int i = 0; i < curFrame.layerCount; i++) {
305 if((curFrame.isFBComposed[i] != isFBComposed[i]) ||
306 (curFrame.drop[i] != drop[i])) {
307 return false;
308 }
309 if(curFrame.isFBComposed[i] &&
310 (hnd[i] != list->hwLayers[i].handle)){
311 return false;
312 }
313 }
314 return true;
315 }
316
isSupportedForMDPComp(hwc_context_t * ctx,hwc_layer_1_t * layer)317 bool MDPComp::isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer) {
318 private_handle_t *hnd = (private_handle_t *)layer->handle;
319 if((has90Transform(layer) and (not isRotationDoable(ctx, hnd))) ||
320 (not isValidDimension(ctx,layer))
321 //More conditions here, SKIP, sRGB+Blend etc
322 ) {
323 return false;
324 }
325 return true;
326 }
327
isValidDimension(hwc_context_t * ctx,hwc_layer_1_t * layer)328 bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) {
329 private_handle_t *hnd = (private_handle_t *)layer->handle;
330
331 if(!hnd) {
332 if (layer->flags & HWC_COLOR_FILL) {
333 // Color layer
334 return true;
335 }
336 ALOGE("%s: layer handle is NULL", __FUNCTION__);
337 return false;
338 }
339
340 //XXX: Investigate doing this with pixel phase on MDSS
341 if(!isSecureBuffer(hnd) && isNonIntegralSourceCrop(layer->sourceCropf))
342 return false;
343
344 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
345 hwc_rect_t dst = layer->displayFrame;
346 bool rotated90 = (bool)(layer->transform & HAL_TRANSFORM_ROT_90);
347 int crop_w = rotated90 ? crop.bottom - crop.top : crop.right - crop.left;
348 int crop_h = rotated90 ? crop.right - crop.left : crop.bottom - crop.top;
349 int dst_w = dst.right - dst.left;
350 int dst_h = dst.bottom - dst.top;
351 float w_scale = ((float)crop_w / (float)dst_w);
352 float h_scale = ((float)crop_h / (float)dst_h);
353
354 /* Workaround for MDP HW limitation in DSI command mode panels where
355 * FPS will not go beyond 30 if buffers on RGB pipes are of width or height
356 * less than 5 pixels
357 * There also is a HW limilation in MDP, minimum block size is 2x2
358 * Fallback to GPU if height is less than 2.
359 */
360 if((crop_w < 5)||(crop_h < 5))
361 return false;
362
363 if((w_scale > 1.0f) || (h_scale > 1.0f)) {
364 const uint32_t maxMDPDownscale =
365 qdutils::MDPVersion::getInstance().getMaxMDPDownscale();
366 const float w_dscale = w_scale;
367 const float h_dscale = h_scale;
368
369 if(ctx->mMDP.version >= qdutils::MDSS_V5) {
370
371 if(!qdutils::MDPVersion::getInstance().supportsDecimation()) {
372 /* On targets that doesnt support Decimation (eg.,8x26)
373 * maximum downscale support is overlay pipe downscale.
374 */
375 if(crop_w > MAX_DISPLAY_DIM || w_dscale > maxMDPDownscale ||
376 h_dscale > maxMDPDownscale)
377 return false;
378 } else {
379 // Decimation on macrotile format layers is not supported.
380 if(isTileRendered(hnd)) {
381 /* MDP can read maximum MAX_DISPLAY_DIM width.
382 * Bail out if
383 * 1. Src crop > MAX_DISPLAY_DIM on nonsplit MDPComp
384 * 2. exceeds maximum downscale limit
385 */
386 if(((crop_w > MAX_DISPLAY_DIM) && !sSrcSplitEnabled) ||
387 w_dscale > maxMDPDownscale ||
388 h_dscale > maxMDPDownscale) {
389 return false;
390 }
391 } else if(w_dscale > 64 || h_dscale > 64)
392 return false;
393 }
394 } else { //A-family
395 if(w_dscale > maxMDPDownscale || h_dscale > maxMDPDownscale)
396 return false;
397 }
398 }
399
400 if((w_scale < 1.0f) || (h_scale < 1.0f)) {
401 const uint32_t upscale =
402 qdutils::MDPVersion::getInstance().getMaxMDPUpscale();
403 const float w_uscale = 1.0f / w_scale;
404 const float h_uscale = 1.0f / h_scale;
405
406 if(w_uscale > upscale || h_uscale > upscale)
407 return false;
408 }
409
410 return true;
411 }
412
isFrameDoable(hwc_context_t * ctx)413 bool MDPComp::isFrameDoable(hwc_context_t *ctx) {
414 bool ret = true;
415
416 if(!isEnabled()) {
417 ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__);
418 ret = false;
419 } else if((qdutils::MDPVersion::getInstance().is8x26() ||
420 qdutils::MDPVersion::getInstance().is8x16() ||
421 qdutils::MDPVersion::getInstance().is8x39()) &&
422 ctx->mVideoTransFlag &&
423 isSecondaryConnected(ctx)) {
424 //1 Padding round to shift pipes across mixers
425 ALOGD_IF(isDebug(),"%s: MDP Comp. video transition padding round",
426 __FUNCTION__);
427 ret = false;
428 } else if(isSecondaryConfiguring(ctx)) {
429 ALOGD_IF( isDebug(),"%s: External Display connection is pending",
430 __FUNCTION__);
431 ret = false;
432 } else if(ctx->isPaddingRound) {
433 ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d",
434 __FUNCTION__,mDpy);
435 ret = false;
436 }
437 return ret;
438 }
439
trimAgainstROI(hwc_context_t * ctx,hwc_rect_t & fbRect)440 void MDPCompNonSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) {
441 hwc_rect_t roi = ctx->listStats[mDpy].lRoi;
442 fbRect = getIntersection(fbRect, roi);
443 }
444
445 /* 1) Identify layers that are not visible or lying outside the updating ROI and
446 * drop them from composition.
447 * 2) If we have a scaling layer which needs cropping against generated
448 * ROI, reset ROI to full resolution. */
validateAndApplyROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)449 bool MDPCompNonSplit::validateAndApplyROI(hwc_context_t *ctx,
450 hwc_display_contents_1_t* list) {
451 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
452 hwc_rect_t visibleRect = ctx->listStats[mDpy].lRoi;
453
454 for(int i = numAppLayers - 1; i >= 0; i--){
455 if(!isValidRect(visibleRect)) {
456 mCurrentFrame.drop[i] = true;
457 mCurrentFrame.dropCount++;
458 continue;
459 }
460
461 const hwc_layer_1_t* layer = &list->hwLayers[i];
462 hwc_rect_t dstRect = layer->displayFrame;
463 hwc_rect_t res = getIntersection(visibleRect, dstRect);
464
465 if(!isValidRect(res)) {
466 mCurrentFrame.drop[i] = true;
467 mCurrentFrame.dropCount++;
468 } else {
469 /* Reset frame ROI when any layer which needs scaling also needs ROI
470 * cropping */
471 if(!isSameRect(res, dstRect) && needsScaling (layer)) {
472 ALOGI("%s: Resetting ROI due to scaling", __FUNCTION__);
473 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
474 mCurrentFrame.dropCount = 0;
475 return false;
476 }
477
478 /* deduct any opaque region from visibleRect */
479 if (layer->blending == HWC_BLENDING_NONE)
480 visibleRect = deductRect(visibleRect, res);
481 }
482 }
483 return true;
484 }
485
486 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which
487 * are updating. If DirtyRegion is applicable, calculate it by accounting all
488 * the changing layer's dirtyRegion. */
generateROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)489 void MDPCompNonSplit::generateROI(hwc_context_t *ctx,
490 hwc_display_contents_1_t* list) {
491 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
492 if(!canPartialUpdate(ctx, list))
493 return;
494
495 struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0};
496 hwc_rect fullFrame = (struct hwc_rect) {0, 0,(int)ctx->dpyAttr[mDpy].xres,
497 (int)ctx->dpyAttr[mDpy].yres};
498
499 for(int index = 0; index < numAppLayers; index++ ) {
500 hwc_layer_1_t* layer = &list->hwLayers[index];
501 if ((mCachedFrame.hnd[index] != layer->handle) ||
502 isYuvBuffer((private_handle_t *)layer->handle)) {
503 hwc_rect_t dst = layer->displayFrame;
504 hwc_rect_t updatingRect = dst;
505
506 #ifdef QCOM_BSP
507 if(!needsScaling(layer) && !layer->transform)
508 {
509 hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf);
510 int x_off = dst.left - src.left;
511 int y_off = dst.top - src.top;
512 updatingRect = moveRect(layer->dirtyRect, x_off, y_off);
513 }
514 #endif
515
516 roi = getUnion(roi, updatingRect);
517 }
518 }
519
520 /* No layer is updating. Still SF wants a refresh.*/
521 if(!isValidRect(roi))
522 return;
523
524 // Align ROI coordinates to panel restrictions
525 roi = getSanitizeROI(roi, fullFrame);
526
527 ctx->listStats[mDpy].lRoi = roi;
528 if(!validateAndApplyROI(ctx, list))
529 resetROI(ctx, mDpy);
530
531 ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d]", __FUNCTION__,
532 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
533 ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom);
534 }
535
trimAgainstROI(hwc_context_t * ctx,hwc_rect_t & fbRect)536 void MDPCompSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) {
537 hwc_rect l_roi = ctx->listStats[mDpy].lRoi;
538 hwc_rect r_roi = ctx->listStats[mDpy].rRoi;
539
540 hwc_rect_t l_fbRect = getIntersection(fbRect, l_roi);
541 hwc_rect_t r_fbRect = getIntersection(fbRect, r_roi);
542 fbRect = getUnion(l_fbRect, r_fbRect);
543 }
544 /* 1) Identify layers that are not visible or lying outside BOTH the updating
545 * ROI's and drop them from composition. If a layer is spanning across both
546 * the halves of the screen but needed by only ROI, the non-contributing
547 * half will not be programmed for MDP.
548 * 2) If we have a scaling layer which needs cropping against generated
549 * ROI, reset ROI to full resolution. */
validateAndApplyROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)550 bool MDPCompSplit::validateAndApplyROI(hwc_context_t *ctx,
551 hwc_display_contents_1_t* list) {
552
553 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
554
555 hwc_rect_t visibleRectL = ctx->listStats[mDpy].lRoi;
556 hwc_rect_t visibleRectR = ctx->listStats[mDpy].rRoi;
557
558 for(int i = numAppLayers - 1; i >= 0; i--){
559 if(!isValidRect(visibleRectL) && !isValidRect(visibleRectR))
560 {
561 mCurrentFrame.drop[i] = true;
562 mCurrentFrame.dropCount++;
563 continue;
564 }
565
566 const hwc_layer_1_t* layer = &list->hwLayers[i];
567 hwc_rect_t dstRect = layer->displayFrame;
568
569 hwc_rect_t l_res = getIntersection(visibleRectL, dstRect);
570 hwc_rect_t r_res = getIntersection(visibleRectR, dstRect);
571 hwc_rect_t res = getUnion(l_res, r_res);
572
573 if(!isValidRect(l_res) && !isValidRect(r_res)) {
574 mCurrentFrame.drop[i] = true;
575 mCurrentFrame.dropCount++;
576 } else {
577 /* Reset frame ROI when any layer which needs scaling also needs ROI
578 * cropping */
579 if(!isSameRect(res, dstRect) && needsScaling (layer)) {
580 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
581 mCurrentFrame.dropCount = 0;
582 return false;
583 }
584
585 if (layer->blending == HWC_BLENDING_NONE) {
586 visibleRectL = deductRect(visibleRectL, l_res);
587 visibleRectR = deductRect(visibleRectR, r_res);
588 }
589 }
590 }
591 return true;
592 }
593 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which
594 * are updating. If DirtyRegion is applicable, calculate it by accounting all
595 * the changing layer's dirtyRegion. */
generateROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)596 void MDPCompSplit::generateROI(hwc_context_t *ctx,
597 hwc_display_contents_1_t* list) {
598 if(!canPartialUpdate(ctx, list))
599 return;
600
601 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
602 int lSplit = getLeftSplit(ctx, mDpy);
603
604 int hw_h = (int)ctx->dpyAttr[mDpy].yres;
605 int hw_w = (int)ctx->dpyAttr[mDpy].xres;
606
607 struct hwc_rect l_frame = (struct hwc_rect){0, 0, lSplit, hw_h};
608 struct hwc_rect r_frame = (struct hwc_rect){lSplit, 0, hw_w, hw_h};
609
610 struct hwc_rect l_roi = (struct hwc_rect){0, 0, 0, 0};
611 struct hwc_rect r_roi = (struct hwc_rect){0, 0, 0, 0};
612
613 for(int index = 0; index < numAppLayers; index++ ) {
614 hwc_layer_1_t* layer = &list->hwLayers[index];
615 private_handle_t *hnd = (private_handle_t *)layer->handle;
616 if ((mCachedFrame.hnd[index] != layer->handle) ||
617 isYuvBuffer(hnd)) {
618 hwc_rect_t dst = layer->displayFrame;
619 hwc_rect_t updatingRect = dst;
620
621 #ifdef QCOM_BSP
622 if(!needsScaling(layer) && !layer->transform)
623 {
624 hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf);
625 int x_off = dst.left - src.left;
626 int y_off = dst.top - src.top;
627 updatingRect = moveRect(layer->dirtyRect, x_off, y_off);
628 }
629 #endif
630
631 hwc_rect_t l_dst = getIntersection(l_frame, updatingRect);
632 if(isValidRect(l_dst))
633 l_roi = getUnion(l_roi, l_dst);
634
635 hwc_rect_t r_dst = getIntersection(r_frame, updatingRect);
636 if(isValidRect(r_dst))
637 r_roi = getUnion(r_roi, r_dst);
638 }
639 }
640
641 /* For panels that cannot accept commands in both the interfaces, we cannot
642 * send two ROI's (for each half). We merge them into single ROI and split
643 * them across lSplit for MDP mixer use. The ROI's will be merged again
644 * finally before udpating the panel in the driver. */
645 if(qdutils::MDPVersion::getInstance().needsROIMerge()) {
646 hwc_rect_t temp_roi = getUnion(l_roi, r_roi);
647 l_roi = getIntersection(temp_roi, l_frame);
648 r_roi = getIntersection(temp_roi, r_frame);
649 }
650
651 /* No layer is updating. Still SF wants a refresh. */
652 if(!isValidRect(l_roi) && !isValidRect(r_roi))
653 return;
654
655 l_roi = getSanitizeROI(l_roi, l_frame);
656 r_roi = getSanitizeROI(r_roi, r_frame);
657
658 ctx->listStats[mDpy].lRoi = l_roi;
659 ctx->listStats[mDpy].rRoi = r_roi;
660
661 if(!validateAndApplyROI(ctx, list))
662 resetROI(ctx, mDpy);
663
664 ALOGD_IF(isDebug(),"%s: generated L_ROI: [%d, %d, %d, %d]"
665 "R_ROI: [%d, %d, %d, %d]", __FUNCTION__,
666 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
667 ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom,
668 ctx->listStats[mDpy].rRoi.left, ctx->listStats[mDpy].rRoi.top,
669 ctx->listStats[mDpy].rRoi.right, ctx->listStats[mDpy].rRoi.bottom);
670 }
671
672 /* Checks for conditions where all the layers marked for MDP comp cannot be
673 * bypassed. On such conditions we try to bypass atleast YUV layers */
tryFullFrame(hwc_context_t * ctx,hwc_display_contents_1_t * list)674 bool MDPComp::tryFullFrame(hwc_context_t *ctx,
675 hwc_display_contents_1_t* list){
676
677 const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
678 int priDispW = ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres;
679
680 if(sIdleFallBack && !ctx->listStats[mDpy].secureUI) {
681 ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy);
682 return false;
683 }
684
685 if(isSkipPresent(ctx, mDpy)) {
686 ALOGD_IF(isDebug(),"%s: SKIP present: %d",
687 __FUNCTION__,
688 isSkipPresent(ctx, mDpy));
689 return false;
690 }
691
692 if(mDpy > HWC_DISPLAY_PRIMARY && (priDispW > MAX_DISPLAY_DIM) &&
693 (ctx->dpyAttr[mDpy].xres < MAX_DISPLAY_DIM)) {
694 // Disable MDP comp on Secondary when the primary is highres panel and
695 // the secondary is a normal 1080p, because, MDP comp on secondary under
696 // in such usecase, decimation gets used for downscale and there will be
697 // a quality mismatch when there will be a fallback to GPU comp
698 ALOGD_IF(isDebug(), "%s: Disable MDP Compositon for Secondary Disp",
699 __FUNCTION__);
700 return false;
701 }
702
703 // check for action safe flag and downscale mode which requires scaling.
704 if(ctx->dpyAttr[mDpy].mActionSafePresent
705 || ctx->dpyAttr[mDpy].mDownScaleMode) {
706 ALOGD_IF(isDebug(), "%s: Scaling needed for this frame",__FUNCTION__);
707 return false;
708 }
709
710 for(int i = 0; i < numAppLayers; ++i) {
711 hwc_layer_1_t* layer = &list->hwLayers[i];
712 private_handle_t *hnd = (private_handle_t *)layer->handle;
713
714 if(has90Transform(layer) && isRotationDoable(ctx, hnd)) {
715 if(!canUseRotator(ctx, mDpy)) {
716 ALOGD_IF(isDebug(), "%s: Can't use rotator for dpy %d",
717 __FUNCTION__, mDpy);
718 return false;
719 }
720 }
721
722 //For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp
723 // may not need it if Gfx pre-rotation can handle all flips & rotations
724 if(qdutils::MDPVersion::getInstance().is8x26() &&
725 (ctx->dpyAttr[mDpy].xres > 1024) &&
726 (layer->transform & HWC_TRANSFORM_FLIP_H) &&
727 (!isYuvBuffer(hnd)))
728 return false;
729 }
730
731 if(ctx->mAD->isDoable()) {
732 return false;
733 }
734
735 //If all above hard conditions are met we can do full or partial MDP comp.
736 bool ret = false;
737 if(fullMDPComp(ctx, list)) {
738 ret = true;
739 } else if(fullMDPCompWithPTOR(ctx, list)) {
740 ret = true;
741 } else if(partialMDPComp(ctx, list)) {
742 ret = true;
743 }
744
745 return ret;
746 }
747
fullMDPComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)748 bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
749
750 if(sSimulationFlags & MDPCOMP_AVOID_FULL_MDP)
751 return false;
752
753 //Will benefit presentation / secondary-only layer.
754 if((mDpy > HWC_DISPLAY_PRIMARY) &&
755 (list->numHwLayers - 1) > MAX_SEC_LAYERS) {
756 ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
757 return false;
758 }
759
760 const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
761 for(int i = 0; i < numAppLayers; i++) {
762 hwc_layer_1_t* layer = &list->hwLayers[i];
763 if(not mCurrentFrame.drop[i] and
764 not isSupportedForMDPComp(ctx, layer)) {
765 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__);
766 return false;
767 }
768 }
769
770 mCurrentFrame.fbCount = 0;
771 memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop,
772 sizeof(mCurrentFrame.isFBComposed));
773 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount -
774 mCurrentFrame.dropCount;
775
776 if(sEnable4k2kYUVSplit){
777 adjustForSourceSplit(ctx, list);
778 }
779
780 if(!postHeuristicsHandling(ctx, list)) {
781 ALOGD_IF(isDebug(), "post heuristic handling failed");
782 reset(ctx);
783 return false;
784 }
785 ALOGD_IF(sSimulationFlags,"%s: FULL_MDP_COMP SUCCEEDED",
786 __FUNCTION__);
787 return true;
788 }
789
790 /* Full MDP Composition with Peripheral Tiny Overlap Removal.
791 * MDP bandwidth limitations can be avoided, if the overlap region
792 * covered by the smallest layer at a higher z-order, gets composed
793 * by Copybit on a render buffer, which can be queued to MDP.
794 */
fullMDPCompWithPTOR(hwc_context_t * ctx,hwc_display_contents_1_t * list)795 bool MDPComp::fullMDPCompWithPTOR(hwc_context_t *ctx,
796 hwc_display_contents_1_t* list) {
797
798 const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
799 const int stagesForMDP = min(sMaxPipesPerMixer,
800 ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT));
801
802 // Hard checks where we cannot use this mode
803 if (mDpy || !ctx->mCopyBit[mDpy] || isDisplaySplit(ctx, mDpy)) {
804 ALOGD_IF(isDebug(), "%s: Feature not supported!", __FUNCTION__);
805 return false;
806 }
807
808 // Frame level checks
809 if ((numAppLayers > stagesForMDP) || isSkipPresent(ctx, mDpy) ||
810 isYuvPresent(ctx, mDpy) || mCurrentFrame.dropCount ||
811 isSecurePresent(ctx, mDpy)) {
812 ALOGD_IF(isDebug(), "%s: Frame not supported!", __FUNCTION__);
813 return false;
814 }
815 // MDP comp checks
816 for(int i = 0; i < numAppLayers; i++) {
817 hwc_layer_1_t* layer = &list->hwLayers[i];
818 if(not isSupportedForMDPComp(ctx, layer)) {
819 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__);
820 return false;
821 }
822 }
823
824 /* We cannot use this composition mode, if:
825 1. A below layer needs scaling.
826 2. Overlap is not peripheral to display.
827 3. Overlap or a below layer has 90 degree transform.
828 4. Overlap area > (1/3 * FrameBuffer) area, based on Perf inputs.
829 */
830
831 int minLayerIndex[MAX_PTOR_LAYERS] = { -1, -1};
832 hwc_rect_t overlapRect[MAX_PTOR_LAYERS];
833 memset(overlapRect, 0, sizeof(overlapRect));
834 int layerPixelCount, minPixelCount = 0;
835 int numPTORLayersFound = 0;
836 for (int i = numAppLayers-1; (i >= 0 &&
837 numPTORLayersFound < MAX_PTOR_LAYERS); i--) {
838 hwc_layer_1_t* layer = &list->hwLayers[i];
839 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
840 hwc_rect_t dispFrame = layer->displayFrame;
841 layerPixelCount = (crop.right - crop.left) * (crop.bottom - crop.top);
842 // PTOR layer should be peripheral and cannot have transform
843 if (!isPeripheral(dispFrame, ctx->mViewFrame[mDpy]) ||
844 has90Transform(layer)) {
845 continue;
846 }
847 if((3 * (layerPixelCount + minPixelCount)) >
848 ((int)ctx->dpyAttr[mDpy].xres * (int)ctx->dpyAttr[mDpy].yres)) {
849 // Overlap area > (1/3 * FrameBuffer) area, based on Perf inputs.
850 continue;
851 }
852 // Found the PTOR layer
853 bool found = true;
854 for (int j = i-1; j >= 0; j--) {
855 // Check if the layers below this layer qualifies for PTOR comp
856 hwc_layer_1_t* layer = &list->hwLayers[j];
857 hwc_rect_t disFrame = layer->displayFrame;
858 //layer below PTOR is intersecting and has 90 degree transform or
859 // needs scaling cannot be supported.
860 if ((isValidRect(getIntersection(dispFrame, disFrame)))
861 && (has90Transform(layer) || needsScaling(layer))) {
862 found = false;
863 break;
864 }
865 }
866 // Store the minLayer Index
867 if(found) {
868 minLayerIndex[numPTORLayersFound] = i;
869 overlapRect[numPTORLayersFound] = list->hwLayers[i].displayFrame;
870 minPixelCount += layerPixelCount;
871 numPTORLayersFound++;
872 }
873 }
874
875 if(isValidRect(getIntersection(overlapRect[0], overlapRect[1]))) {
876 ALOGD_IF(isDebug(), "%s: Ignore Rect2 its intersects with Rect1",
877 __FUNCTION__);
878 // reset second minLayerIndex[1];
879 minLayerIndex[1] = -1;
880 numPTORLayersFound--;
881 }
882
883 // No overlap layers
884 if (!numPTORLayersFound)
885 return false;
886
887 ctx->mPtorInfo.count = numPTORLayersFound;
888 for(int i = 0; i < MAX_PTOR_LAYERS; i++) {
889 ctx->mPtorInfo.layerIndex[i] = minLayerIndex[i];
890 }
891
892 if (!ctx->mCopyBit[mDpy]->prepareOverlap(ctx, list)) {
893 // reset PTOR
894 ctx->mPtorInfo.count = 0;
895 return false;
896 }
897 // Store the displayFrame and the sourceCrops of the layers
898 hwc_rect_t displayFrame[numAppLayers];
899 hwc_rect_t sourceCrop[numAppLayers];
900 for(int i = 0; i < numAppLayers; i++) {
901 hwc_layer_1_t* layer = &list->hwLayers[i];
902 displayFrame[i] = layer->displayFrame;
903 sourceCrop[i] = integerizeSourceCrop(layer->sourceCropf);
904 }
905
906 for(int j = 0; j < numPTORLayersFound; j++) {
907 int index = ctx->mPtorInfo.layerIndex[j];
908 // Remove overlap from crop & displayFrame of below layers
909 for (int i = 0; i < index && index !=-1; i++) {
910 hwc_layer_1_t* layer = &list->hwLayers[i];
911 if(!isValidRect(getIntersection(layer->displayFrame,
912 overlapRect[j]))) {
913 continue;
914 }
915 // Update layer attributes
916 hwc_rect_t srcCrop = integerizeSourceCrop(layer->sourceCropf);
917 hwc_rect_t destRect = deductRect(layer->displayFrame,
918 overlapRect[j]);
919 qhwc::calculate_crop_rects(srcCrop, layer->displayFrame, destRect,
920 layer->transform);
921 layer->sourceCropf.left = (float)srcCrop.left;
922 layer->sourceCropf.top = (float)srcCrop.top;
923 layer->sourceCropf.right = (float)srcCrop.right;
924 layer->sourceCropf.bottom = (float)srcCrop.bottom;
925 }
926 }
927
928 mCurrentFrame.mdpCount = numAppLayers;
929 mCurrentFrame.fbCount = 0;
930 mCurrentFrame.fbZ = -1;
931
932 for (int j = 0; j < numAppLayers; j++)
933 mCurrentFrame.isFBComposed[j] = false;
934
935 bool result = postHeuristicsHandling(ctx, list);
936
937 // Restore layer attributes
938 for(int i = 0; i < numAppLayers; i++) {
939 hwc_layer_1_t* layer = &list->hwLayers[i];
940 layer->displayFrame = displayFrame[i];
941 layer->sourceCropf.left = (float)sourceCrop[i].left;
942 layer->sourceCropf.top = (float)sourceCrop[i].top;
943 layer->sourceCropf.right = (float)sourceCrop[i].right;
944 layer->sourceCropf.bottom = (float)sourceCrop[i].bottom;
945 }
946
947 if (!result) {
948 // reset PTOR
949 ctx->mPtorInfo.count = 0;
950 reset(ctx);
951 } else {
952 ALOGD_IF(isDebug(), "%s: PTOR Indexes: %d and %d", __FUNCTION__,
953 ctx->mPtorInfo.layerIndex[0], ctx->mPtorInfo.layerIndex[1]);
954 }
955
956 ALOGD_IF(isDebug(), "%s: Postheuristics %s!", __FUNCTION__,
957 (result ? "successful" : "failed"));
958 return result;
959 }
960
partialMDPComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)961 bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list)
962 {
963 if(!sEnableMixedMode) {
964 //Mixed mode is disabled. No need to even try caching.
965 return false;
966 }
967
968 bool ret = false;
969 if(list->flags & HWC_GEOMETRY_CHANGED) { //Try load based first
970 ret = loadBasedComp(ctx, list) or
971 cacheBasedComp(ctx, list);
972 } else {
973 ret = cacheBasedComp(ctx, list) or
974 loadBasedComp(ctx, list);
975 }
976
977 return ret;
978 }
979
cacheBasedComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)980 bool MDPComp::cacheBasedComp(hwc_context_t *ctx,
981 hwc_display_contents_1_t* list) {
982 if(sSimulationFlags & MDPCOMP_AVOID_CACHE_MDP)
983 return false;
984
985 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
986 mCurrentFrame.reset(numAppLayers);
987 updateLayerCache(ctx, list);
988
989 //If an MDP marked layer is unsupported cannot do partial MDP Comp
990 for(int i = 0; i < numAppLayers; i++) {
991 if(!mCurrentFrame.isFBComposed[i]) {
992 hwc_layer_1_t* layer = &list->hwLayers[i];
993 if(not isSupportedForMDPComp(ctx, layer)) {
994 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",
995 __FUNCTION__);
996 reset(ctx);
997 return false;
998 }
999 }
1000 }
1001
1002 updateYUV(ctx, list, false /*secure only*/);
1003 bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
1004 if(!ret) {
1005 ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
1006 reset(ctx);
1007 return false;
1008 }
1009
1010 int mdpCount = mCurrentFrame.mdpCount;
1011
1012 if(sEnable4k2kYUVSplit){
1013 adjustForSourceSplit(ctx, list);
1014 }
1015
1016 //Will benefit cases where a video has non-updating background.
1017 if((mDpy > HWC_DISPLAY_PRIMARY) and
1018 (mdpCount > MAX_SEC_LAYERS)) {
1019 ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
1020 reset(ctx);
1021 return false;
1022 }
1023
1024 if(!postHeuristicsHandling(ctx, list)) {
1025 ALOGD_IF(isDebug(), "post heuristic handling failed");
1026 reset(ctx);
1027 return false;
1028 }
1029 ALOGD_IF(sSimulationFlags,"%s: CACHE_MDP_COMP SUCCEEDED",
1030 __FUNCTION__);
1031
1032 return true;
1033 }
1034
loadBasedComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)1035 bool MDPComp::loadBasedComp(hwc_context_t *ctx,
1036 hwc_display_contents_1_t* list) {
1037 if(sSimulationFlags & MDPCOMP_AVOID_LOAD_MDP)
1038 return false;
1039
1040 if(not isLoadBasedCompDoable(ctx)) {
1041 return false;
1042 }
1043
1044 const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1045 const int numNonDroppedLayers = numAppLayers - mCurrentFrame.dropCount;
1046 const int stagesForMDP = min(sMaxPipesPerMixer,
1047 ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT));
1048
1049 int mdpBatchSize = stagesForMDP - 1; //1 stage for FB
1050 int fbBatchSize = numNonDroppedLayers - mdpBatchSize;
1051 int lastMDPSupportedIndex = numAppLayers;
1052 int dropCount = 0;
1053
1054 //Find the minimum MDP batch size
1055 for(int i = 0; i < numAppLayers;i++) {
1056 if(mCurrentFrame.drop[i]) {
1057 dropCount++;
1058 continue;
1059 }
1060 hwc_layer_1_t* layer = &list->hwLayers[i];
1061 if(not isSupportedForMDPComp(ctx, layer)) {
1062 lastMDPSupportedIndex = i;
1063 mdpBatchSize = min(i - dropCount, stagesForMDP - 1);
1064 fbBatchSize = numNonDroppedLayers - mdpBatchSize;
1065 break;
1066 }
1067 }
1068
1069 ALOGD_IF(isDebug(), "%s:Before optimizing fbBatch, mdpbatch %d, fbbatch %d "
1070 "dropped %d", __FUNCTION__, mdpBatchSize, fbBatchSize,
1071 mCurrentFrame.dropCount);
1072
1073 //Start at a point where the fb batch should at least have 2 layers, for
1074 //this mode to be justified.
1075 while(fbBatchSize < 2) {
1076 ++fbBatchSize;
1077 --mdpBatchSize;
1078 }
1079
1080 //If there are no layers for MDP, this mode doesnt make sense.
1081 if(mdpBatchSize < 1) {
1082 ALOGD_IF(isDebug(), "%s: No MDP layers after optimizing for fbBatch",
1083 __FUNCTION__);
1084 return false;
1085 }
1086
1087 mCurrentFrame.reset(numAppLayers);
1088
1089 //Try with successively smaller mdp batch sizes until we succeed or reach 1
1090 while(mdpBatchSize > 0) {
1091 //Mark layers for MDP comp
1092 int mdpBatchLeft = mdpBatchSize;
1093 for(int i = 0; i < lastMDPSupportedIndex and mdpBatchLeft; i++) {
1094 if(mCurrentFrame.drop[i]) {
1095 continue;
1096 }
1097 mCurrentFrame.isFBComposed[i] = false;
1098 --mdpBatchLeft;
1099 }
1100
1101 mCurrentFrame.fbZ = mdpBatchSize;
1102 mCurrentFrame.fbCount = fbBatchSize;
1103 mCurrentFrame.mdpCount = mdpBatchSize;
1104
1105 ALOGD_IF(isDebug(), "%s:Trying with: mdpbatch %d fbbatch %d dropped %d",
1106 __FUNCTION__, mdpBatchSize, fbBatchSize,
1107 mCurrentFrame.dropCount);
1108
1109 if(postHeuristicsHandling(ctx, list)) {
1110 ALOGD_IF(isDebug(), "%s: Postheuristics handling succeeded",
1111 __FUNCTION__);
1112 ALOGD_IF(sSimulationFlags,"%s: LOAD_MDP_COMP SUCCEEDED",
1113 __FUNCTION__);
1114 return true;
1115 }
1116
1117 reset(ctx);
1118 --mdpBatchSize;
1119 ++fbBatchSize;
1120 }
1121
1122 return false;
1123 }
1124
isLoadBasedCompDoable(hwc_context_t * ctx)1125 bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx) {
1126 if(mDpy or isSecurePresent(ctx, mDpy) or
1127 isYuvPresent(ctx, mDpy)) {
1128 return false;
1129 }
1130 return true;
1131 }
1132
canPartialUpdate(hwc_context_t * ctx,hwc_display_contents_1_t * list)1133 bool MDPComp::canPartialUpdate(hwc_context_t *ctx,
1134 hwc_display_contents_1_t* list){
1135 if(!qdutils::MDPVersion::getInstance().isPartialUpdateEnabled() ||
1136 isSkipPresent(ctx, mDpy) || (list->flags & HWC_GEOMETRY_CHANGED) ||
1137 mDpy ) {
1138 return false;
1139 }
1140 if(ctx->listStats[mDpy].secureUI)
1141 return false;
1142 return true;
1143 }
1144
tryVideoOnly(hwc_context_t * ctx,hwc_display_contents_1_t * list)1145 bool MDPComp::tryVideoOnly(hwc_context_t *ctx,
1146 hwc_display_contents_1_t* list) {
1147 const bool secureOnly = true;
1148 return videoOnlyComp(ctx, list, not secureOnly) or
1149 videoOnlyComp(ctx, list, secureOnly);
1150 }
1151
videoOnlyComp(hwc_context_t * ctx,hwc_display_contents_1_t * list,bool secureOnly)1152 bool MDPComp::videoOnlyComp(hwc_context_t *ctx,
1153 hwc_display_contents_1_t* list, bool secureOnly) {
1154 if(sSimulationFlags & MDPCOMP_AVOID_VIDEO_ONLY)
1155 return false;
1156 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1157
1158 mCurrentFrame.reset(numAppLayers);
1159 mCurrentFrame.fbCount -= mCurrentFrame.dropCount;
1160 updateYUV(ctx, list, secureOnly);
1161 int mdpCount = mCurrentFrame.mdpCount;
1162
1163 if(!isYuvPresent(ctx, mDpy) or (mdpCount == 0)) {
1164 reset(ctx);
1165 return false;
1166 }
1167
1168 /* Bail out if we are processing only secured video layers
1169 * and we dont have any */
1170 if(!isSecurePresent(ctx, mDpy) && secureOnly){
1171 reset(ctx);
1172 return false;
1173 }
1174
1175 if(mCurrentFrame.fbCount)
1176 mCurrentFrame.fbZ = mCurrentFrame.mdpCount;
1177
1178 if(sEnable4k2kYUVSplit){
1179 adjustForSourceSplit(ctx, list);
1180 }
1181
1182 if(!postHeuristicsHandling(ctx, list)) {
1183 ALOGD_IF(isDebug(), "post heuristic handling failed");
1184 reset(ctx);
1185 return false;
1186 }
1187
1188 ALOGD_IF(sSimulationFlags,"%s: VIDEO_ONLY_COMP SUCCEEDED",
1189 __FUNCTION__);
1190 return true;
1191 }
1192
1193 /* Checks for conditions where YUV layers cannot be bypassed */
isYUVDoable(hwc_context_t * ctx,hwc_layer_1_t * layer)1194 bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
1195 if(isSkipLayer(layer)) {
1196 ALOGD_IF(isDebug(), "%s: Video marked SKIP dpy %d", __FUNCTION__, mDpy);
1197 return false;
1198 }
1199
1200 if(has90Transform(layer) && !canUseRotator(ctx, mDpy)) {
1201 ALOGD_IF(isDebug(), "%s: no free DMA pipe",__FUNCTION__);
1202 return false;
1203 }
1204
1205 if(isSecuring(ctx, layer)) {
1206 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__);
1207 return false;
1208 }
1209
1210 if(!isValidDimension(ctx, layer)) {
1211 ALOGD_IF(isDebug(), "%s: Buffer is of invalid width",
1212 __FUNCTION__);
1213 return false;
1214 }
1215
1216 if(layer->planeAlpha < 0xFF) {
1217 ALOGD_IF(isDebug(), "%s: Cannot handle YUV layer with plane alpha\
1218 in video only mode",
1219 __FUNCTION__);
1220 return false;
1221 }
1222
1223 return true;
1224 }
1225
1226 /* starts at fromIndex and check for each layer to find
1227 * if it it has overlapping with any Updating layer above it in zorder
1228 * till the end of the batch. returns true if it finds any intersection */
canPushBatchToTop(const hwc_display_contents_1_t * list,int fromIndex,int toIndex)1229 bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list,
1230 int fromIndex, int toIndex) {
1231 for(int i = fromIndex; i < toIndex; i++) {
1232 if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
1233 if(intersectingUpdatingLayers(list, i+1, toIndex, i)) {
1234 return false;
1235 }
1236 }
1237 }
1238 return true;
1239 }
1240
1241 /* Checks if given layer at targetLayerIndex has any
1242 * intersection with all the updating layers in beween
1243 * fromIndex and toIndex. Returns true if it finds intersectiion */
intersectingUpdatingLayers(const hwc_display_contents_1_t * list,int fromIndex,int toIndex,int targetLayerIndex)1244 bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
1245 int fromIndex, int toIndex, int targetLayerIndex) {
1246 for(int i = fromIndex; i <= toIndex; i++) {
1247 if(!mCurrentFrame.isFBComposed[i]) {
1248 if(areLayersIntersecting(&list->hwLayers[i],
1249 &list->hwLayers[targetLayerIndex])) {
1250 return true;
1251 }
1252 }
1253 }
1254 return false;
1255 }
1256
getBatch(hwc_display_contents_1_t * list,int & maxBatchStart,int & maxBatchEnd,int & maxBatchCount)1257 int MDPComp::getBatch(hwc_display_contents_1_t* list,
1258 int& maxBatchStart, int& maxBatchEnd,
1259 int& maxBatchCount) {
1260 int i = 0;
1261 int fbZOrder =-1;
1262 int droppedLayerCt = 0;
1263 while (i < mCurrentFrame.layerCount) {
1264 int batchCount = 0;
1265 int batchStart = i;
1266 int batchEnd = i;
1267 /* Adjust batch Z order with the dropped layers so far */
1268 int fbZ = batchStart - droppedLayerCt;
1269 int firstZReverseIndex = -1;
1270 int updatingLayersAbove = 0;//Updating layer count in middle of batch
1271 while(i < mCurrentFrame.layerCount) {
1272 if(!mCurrentFrame.isFBComposed[i]) {
1273 if(!batchCount) {
1274 i++;
1275 break;
1276 }
1277 updatingLayersAbove++;
1278 i++;
1279 continue;
1280 } else {
1281 if(mCurrentFrame.drop[i]) {
1282 i++;
1283 droppedLayerCt++;
1284 continue;
1285 } else if(updatingLayersAbove <= 0) {
1286 batchCount++;
1287 batchEnd = i;
1288 i++;
1289 continue;
1290 } else { //Layer is FBComposed, not a drop & updatingLayer > 0
1291
1292 // We have a valid updating layer already. If layer-i not
1293 // have overlapping with all updating layers in between
1294 // batch-start and i, then we can add layer i to batch.
1295 if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) {
1296 batchCount++;
1297 batchEnd = i;
1298 i++;
1299 continue;
1300 } else if(canPushBatchToTop(list, batchStart, i)) {
1301 //If All the non-updating layers with in this batch
1302 //does not have intersection with the updating layers
1303 //above in z-order, then we can safely move the batch to
1304 //higher z-order. Increment fbZ as it is moving up.
1305 if( firstZReverseIndex < 0) {
1306 firstZReverseIndex = i;
1307 }
1308 batchCount++;
1309 batchEnd = i;
1310 fbZ += updatingLayersAbove;
1311 i++;
1312 updatingLayersAbove = 0;
1313 continue;
1314 } else {
1315 //both failed.start the loop again from here.
1316 if(firstZReverseIndex >= 0) {
1317 i = firstZReverseIndex;
1318 }
1319 break;
1320 }
1321 }
1322 }
1323 }
1324 if(batchCount > maxBatchCount) {
1325 maxBatchCount = batchCount;
1326 maxBatchStart = batchStart;
1327 maxBatchEnd = batchEnd;
1328 fbZOrder = fbZ;
1329 }
1330 }
1331 return fbZOrder;
1332 }
1333
markLayersForCaching(hwc_context_t * ctx,hwc_display_contents_1_t * list)1334 bool MDPComp::markLayersForCaching(hwc_context_t* ctx,
1335 hwc_display_contents_1_t* list) {
1336 /* Idea is to keep as many non-updating(cached) layers in FB and
1337 * send rest of them through MDP. This is done in 2 steps.
1338 * 1. Find the maximum contiguous batch of non-updating layers.
1339 * 2. See if we can improve this batch size for caching by adding
1340 * opaque layers around the batch, if they don't have
1341 * any overlapping with the updating layers in between.
1342 * NEVER mark an updating layer for caching.
1343 * But cached ones can be marked for MDP */
1344
1345 int maxBatchStart = -1;
1346 int maxBatchEnd = -1;
1347 int maxBatchCount = 0;
1348 int fbZ = -1;
1349
1350 /* Nothing is cached. No batching needed */
1351 if(mCurrentFrame.fbCount == 0) {
1352 return true;
1353 }
1354
1355 /* No MDP comp layers, try to use other comp modes */
1356 if(mCurrentFrame.mdpCount == 0) {
1357 return false;
1358 }
1359
1360 fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount);
1361
1362 /* reset rest of the layers lying inside ROI for MDP comp */
1363 for(int i = 0; i < mCurrentFrame.layerCount; i++) {
1364 hwc_layer_1_t* layer = &list->hwLayers[i];
1365 if((i < maxBatchStart || i > maxBatchEnd) &&
1366 mCurrentFrame.isFBComposed[i]){
1367 if(!mCurrentFrame.drop[i]){
1368 //If an unsupported layer is being attempted to
1369 //be pulled out we should fail
1370 if(not isSupportedForMDPComp(ctx, layer)) {
1371 return false;
1372 }
1373 mCurrentFrame.isFBComposed[i] = false;
1374 }
1375 }
1376 }
1377
1378 // update the frame data
1379 mCurrentFrame.fbZ = fbZ;
1380 mCurrentFrame.fbCount = maxBatchCount;
1381 mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1382 mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1383
1384 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
1385 mCurrentFrame.fbCount);
1386
1387 return true;
1388 }
1389
updateLayerCache(hwc_context_t * ctx,hwc_display_contents_1_t * list)1390 void MDPComp::updateLayerCache(hwc_context_t* ctx,
1391 hwc_display_contents_1_t* list) {
1392 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1393 int fbCount = 0;
1394
1395 for(int i = 0; i < numAppLayers; i++) {
1396 if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) {
1397 if(!mCurrentFrame.drop[i])
1398 fbCount++;
1399 mCurrentFrame.isFBComposed[i] = true;
1400 } else {
1401 mCurrentFrame.isFBComposed[i] = false;
1402 }
1403 }
1404
1405 mCurrentFrame.fbCount = fbCount;
1406 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount
1407 - mCurrentFrame.dropCount;
1408
1409 ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d drop count: %d"
1410 ,__FUNCTION__, mCurrentFrame.mdpCount, mCurrentFrame.fbCount,
1411 mCurrentFrame.dropCount);
1412 }
1413
updateYUV(hwc_context_t * ctx,hwc_display_contents_1_t * list,bool secureOnly)1414 void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
1415 bool secureOnly) {
1416 int nYuvCount = ctx->listStats[mDpy].yuvCount;
1417 for(int index = 0;index < nYuvCount; index++){
1418 int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index];
1419 hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex];
1420
1421 if(!isYUVDoable(ctx, layer)) {
1422 if(!mCurrentFrame.isFBComposed[nYuvIndex]) {
1423 mCurrentFrame.isFBComposed[nYuvIndex] = true;
1424 mCurrentFrame.fbCount++;
1425 }
1426 } else {
1427 if(mCurrentFrame.isFBComposed[nYuvIndex]) {
1428 private_handle_t *hnd = (private_handle_t *)layer->handle;
1429 if(!secureOnly || isSecureBuffer(hnd)) {
1430 mCurrentFrame.isFBComposed[nYuvIndex] = false;
1431 mCurrentFrame.fbCount--;
1432 }
1433 }
1434 }
1435 }
1436
1437 mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1438 mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1439 ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__,
1440 mCurrentFrame.fbCount);
1441 }
1442
getUpdatingFBRect(hwc_context_t * ctx,hwc_display_contents_1_t * list)1443 hwc_rect_t MDPComp::getUpdatingFBRect(hwc_context_t *ctx,
1444 hwc_display_contents_1_t* list){
1445 hwc_rect_t fbRect = (struct hwc_rect){0, 0, 0, 0};
1446
1447 /* Update only the region of FB needed for composition */
1448 for(int i = 0; i < mCurrentFrame.layerCount; i++ ) {
1449 if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
1450 hwc_layer_1_t* layer = &list->hwLayers[i];
1451 hwc_rect_t dst = layer->displayFrame;
1452 fbRect = getUnion(fbRect, dst);
1453 }
1454 }
1455 trimAgainstROI(ctx, fbRect);
1456 return fbRect;
1457 }
1458
postHeuristicsHandling(hwc_context_t * ctx,hwc_display_contents_1_t * list)1459 bool MDPComp::postHeuristicsHandling(hwc_context_t *ctx,
1460 hwc_display_contents_1_t* list) {
1461
1462 //Capability checks
1463 if(!resourceCheck(ctx, list)) {
1464 ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
1465 return false;
1466 }
1467
1468 //Limitations checks
1469 if(!hwLimitationsCheck(ctx, list)) {
1470 ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__);
1471 return false;
1472 }
1473
1474 //Configure framebuffer first if applicable
1475 if(mCurrentFrame.fbZ >= 0) {
1476 hwc_rect_t fbRect = getUpdatingFBRect(ctx, list);
1477 if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, fbRect, mCurrentFrame.fbZ))
1478 {
1479 ALOGD_IF(isDebug(), "%s configure framebuffer failed",
1480 __FUNCTION__);
1481 return false;
1482 }
1483 }
1484
1485 mCurrentFrame.map();
1486
1487 if(!allocLayerPipes(ctx, list)) {
1488 ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__);
1489 return false;
1490 }
1491
1492 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
1493 index++) {
1494 if(!mCurrentFrame.isFBComposed[index]) {
1495 int mdpIndex = mCurrentFrame.layerToMDP[index];
1496 hwc_layer_1_t* layer = &list->hwLayers[index];
1497
1498 //Leave fbZ for framebuffer. CACHE/GLES layers go here.
1499 if(mdpNextZOrder == mCurrentFrame.fbZ) {
1500 mdpNextZOrder++;
1501 }
1502 MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1503 cur_pipe->zOrder = mdpNextZOrder++;
1504
1505 private_handle_t *hnd = (private_handle_t *)layer->handle;
1506 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
1507 if(configure4k2kYuv(ctx, layer,
1508 mCurrentFrame.mdpToLayer[mdpIndex])
1509 != 0 ){
1510 ALOGD_IF(isDebug(), "%s: Failed to configure split pipes \
1511 for layer %d",__FUNCTION__, index);
1512 return false;
1513 }
1514 else{
1515 mdpNextZOrder++;
1516 }
1517 continue;
1518 }
1519 if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
1520 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
1521 layer %d",__FUNCTION__, index);
1522 return false;
1523 }
1524 }
1525 }
1526
1527 if(!ctx->mOverlay->validateAndSet(mDpy, ctx->dpyAttr[mDpy].fd)) {
1528 ALOGD_IF(isDebug(), "%s: Failed to validate and set overlay for dpy %d"
1529 ,__FUNCTION__, mDpy);
1530 return false;
1531 }
1532
1533 setRedraw(ctx, list);
1534 return true;
1535 }
1536
resourceCheck(hwc_context_t * ctx,hwc_display_contents_1_t * list)1537 bool MDPComp::resourceCheck(hwc_context_t* ctx,
1538 hwc_display_contents_1_t* list) {
1539 const bool fbUsed = mCurrentFrame.fbCount;
1540 if(mCurrentFrame.mdpCount > sMaxPipesPerMixer - fbUsed) {
1541 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
1542 return false;
1543 }
1544 // Init rotCount to number of rotate sessions used by other displays
1545 int rotCount = ctx->mRotMgr->getNumActiveSessions();
1546 // Count the number of rotator sessions required for current display
1547 for (int index = 0; index < mCurrentFrame.layerCount; index++) {
1548 if(!mCurrentFrame.isFBComposed[index]) {
1549 hwc_layer_1_t* layer = &list->hwLayers[index];
1550 private_handle_t *hnd = (private_handle_t *)layer->handle;
1551 if(has90Transform(layer) && isRotationDoable(ctx, hnd)) {
1552 rotCount++;
1553 }
1554 }
1555 }
1556 // if number of layers to rotate exceeds max rotator sessions, bail out.
1557 if(rotCount > RotMgr::MAX_ROT_SESS) {
1558 ALOGD_IF(isDebug(), "%s: Exceeds max rotator sessions %d",
1559 __FUNCTION__, mDpy);
1560 return false;
1561 }
1562 return true;
1563 }
1564
hwLimitationsCheck(hwc_context_t * ctx,hwc_display_contents_1_t * list)1565 bool MDPComp::hwLimitationsCheck(hwc_context_t* ctx,
1566 hwc_display_contents_1_t* list) {
1567
1568 //A-family hw limitation:
1569 //If a layer need alpha scaling, MDP can not support.
1570 if(ctx->mMDP.version < qdutils::MDSS_V5) {
1571 for(int i = 0; i < mCurrentFrame.layerCount; ++i) {
1572 if(!mCurrentFrame.isFBComposed[i] &&
1573 isAlphaScaled( &list->hwLayers[i])) {
1574 ALOGD_IF(isDebug(), "%s:frame needs alphaScaling",__FUNCTION__);
1575 return false;
1576 }
1577 }
1578 }
1579
1580 // On 8x26 & 8974 hw, we have a limitation of downscaling+blending.
1581 //If multiple layers requires downscaling and also they are overlapping
1582 //fall back to GPU since MDSS can not handle it.
1583 if(qdutils::MDPVersion::getInstance().is8x74v2() ||
1584 qdutils::MDPVersion::getInstance().is8x26()) {
1585 for(int i = 0; i < mCurrentFrame.layerCount-1; ++i) {
1586 hwc_layer_1_t* botLayer = &list->hwLayers[i];
1587 if(!mCurrentFrame.isFBComposed[i] &&
1588 isDownscaleRequired(botLayer)) {
1589 //if layer-i is marked for MDP and needs downscaling
1590 //check if any MDP layer on top of i & overlaps with layer-i
1591 for(int j = i+1; j < mCurrentFrame.layerCount; ++j) {
1592 hwc_layer_1_t* topLayer = &list->hwLayers[j];
1593 if(!mCurrentFrame.isFBComposed[j] &&
1594 isDownscaleRequired(topLayer)) {
1595 hwc_rect_t r = getIntersection(botLayer->displayFrame,
1596 topLayer->displayFrame);
1597 if(isValidRect(r))
1598 return false;
1599 }
1600 }
1601 }
1602 }
1603 }
1604 return true;
1605 }
1606
prepare(hwc_context_t * ctx,hwc_display_contents_1_t * list)1607 int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1608 int ret = 0;
1609 char property[PROPERTY_VALUE_MAX];
1610
1611 if(!ctx || !list) {
1612 ALOGE("%s: Invalid context or list",__FUNCTION__);
1613 mCachedFrame.reset();
1614 return -1;
1615 }
1616
1617 const int numLayers = ctx->listStats[mDpy].numAppLayers;
1618
1619 if(property_get("debug.hwc.simulate", property, NULL) > 0) {
1620 int currentFlags = atoi(property);
1621 if(currentFlags != sSimulationFlags) {
1622 sSimulationFlags = currentFlags;
1623 ALOGE("%s: Simulation Flag read: 0x%x (%d)", __FUNCTION__,
1624 sSimulationFlags, sSimulationFlags);
1625 }
1626 }
1627 // reset PTOR
1628 if(!mDpy)
1629 memset(&(ctx->mPtorInfo), 0, sizeof(ctx->mPtorInfo));
1630
1631 //Do not cache the information for next draw cycle.
1632 if(numLayers > MAX_NUM_APP_LAYERS or (!numLayers)) {
1633 ALOGI("%s: Unsupported layer count for mdp composition",
1634 __FUNCTION__);
1635 mCachedFrame.reset();
1636 return -1;
1637 }
1638
1639 //reset old data
1640 mCurrentFrame.reset(numLayers);
1641 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
1642 mCurrentFrame.dropCount = 0;
1643
1644 // Detect the start of animation and fall back to GPU only once to cache
1645 // all the layers in FB and display FB content untill animation completes.
1646 if(ctx->listStats[mDpy].isDisplayAnimating) {
1647 mCurrentFrame.needsRedraw = false;
1648 if(ctx->mAnimationState[mDpy] == ANIMATION_STOPPED) {
1649 mCurrentFrame.needsRedraw = true;
1650 ctx->mAnimationState[mDpy] = ANIMATION_STARTED;
1651 }
1652 setMDPCompLayerFlags(ctx, list);
1653 mCachedFrame.updateCounts(mCurrentFrame);
1654 ret = -1;
1655 return ret;
1656 } else {
1657 ctx->mAnimationState[mDpy] = ANIMATION_STOPPED;
1658 }
1659
1660 //Hard conditions, if not met, cannot do MDP comp
1661 if(isFrameDoable(ctx)) {
1662 generateROI(ctx, list);
1663
1664 mModeOn = tryFullFrame(ctx, list) || tryVideoOnly(ctx, list);
1665 if(mModeOn) {
1666 setMDPCompLayerFlags(ctx, list);
1667 } else {
1668 resetROI(ctx, mDpy);
1669 reset(ctx);
1670 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
1671 mCurrentFrame.dropCount = 0;
1672 ret = -1;
1673 }
1674 } else {
1675 ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame",
1676 __FUNCTION__);
1677 ret = -1;
1678 }
1679
1680 if(isDebug()) {
1681 ALOGD("GEOMETRY change: %d",
1682 (list->flags & HWC_GEOMETRY_CHANGED));
1683 android::String8 sDump("");
1684 dump(sDump, ctx);
1685 ALOGD("%s",sDump.string());
1686 }
1687
1688 mCachedFrame.cacheAll(list);
1689 mCachedFrame.updateCounts(mCurrentFrame);
1690 return ret;
1691 }
1692
allocSplitVGPipesfor4k2k(hwc_context_t * ctx,int index)1693 bool MDPComp::allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index) {
1694
1695 bool bRet = true;
1696 int mdpIndex = mCurrentFrame.layerToMDP[index];
1697 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
1698 info.pipeInfo = new MdpYUVPipeInfo;
1699 info.rot = NULL;
1700 MdpYUVPipeInfo& pipe_info = *(MdpYUVPipeInfo*)info.pipeInfo;
1701
1702 pipe_info.lIndex = ovutils::OV_INVALID;
1703 pipe_info.rIndex = ovutils::OV_INVALID;
1704
1705 Overlay::PipeSpecs pipeSpecs;
1706 pipeSpecs.formatClass = Overlay::FORMAT_YUV;
1707 pipeSpecs.needsScaling = true;
1708 pipeSpecs.dpy = mDpy;
1709 pipeSpecs.fb = false;
1710
1711 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
1712 if(pipe_info.lIndex == ovutils::OV_INVALID){
1713 bRet = false;
1714 ALOGD_IF(isDebug(),"%s: allocating first VG pipe failed",
1715 __FUNCTION__);
1716 }
1717 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
1718 if(pipe_info.rIndex == ovutils::OV_INVALID){
1719 bRet = false;
1720 ALOGD_IF(isDebug(),"%s: allocating second VG pipe failed",
1721 __FUNCTION__);
1722 }
1723 return bRet;
1724 }
1725
drawOverlap(hwc_context_t * ctx,hwc_display_contents_1_t * list)1726 int MDPComp::drawOverlap(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1727 int fd = -1;
1728 if (ctx->mPtorInfo.isActive()) {
1729 fd = ctx->mCopyBit[mDpy]->drawOverlap(ctx, list);
1730 if (fd < 0) {
1731 ALOGD_IF(isDebug(),"%s: failed", __FUNCTION__);
1732 }
1733 }
1734 return fd;
1735 }
1736 //=============MDPCompNonSplit==================================================
1737
adjustForSourceSplit(hwc_context_t * ctx,hwc_display_contents_1_t * list)1738 void MDPCompNonSplit::adjustForSourceSplit(hwc_context_t *ctx,
1739 hwc_display_contents_1_t* list) {
1740 //If 4k2k Yuv layer split is possible, and if
1741 //fbz is above 4k2k layer, increment fb zorder by 1
1742 //as we split 4k2k layer and increment zorder for right half
1743 //of the layer
1744 if(!ctx)
1745 return;
1746 if(mCurrentFrame.fbZ >= 0) {
1747 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
1748 index++) {
1749 if(!mCurrentFrame.isFBComposed[index]) {
1750 if(mdpNextZOrder == mCurrentFrame.fbZ) {
1751 mdpNextZOrder++;
1752 }
1753 mdpNextZOrder++;
1754 hwc_layer_1_t* layer = &list->hwLayers[index];
1755 private_handle_t *hnd = (private_handle_t *)layer->handle;
1756 if(is4kx2kYuvBuffer(hnd)) {
1757 if(mdpNextZOrder <= mCurrentFrame.fbZ)
1758 mCurrentFrame.fbZ += 1;
1759 mdpNextZOrder++;
1760 //As we split 4kx2k yuv layer and program to 2 VG pipes
1761 //(if available) increase mdpcount by 1.
1762 mCurrentFrame.mdpCount++;
1763 }
1764 }
1765 }
1766 }
1767 }
1768
1769 /*
1770 * Configures pipe(s) for MDP composition
1771 */
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)1772 int MDPCompNonSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
1773 PipeLayerPair& PipeLayerPair) {
1774 MdpPipeInfoNonSplit& mdp_info =
1775 *(static_cast<MdpPipeInfoNonSplit*>(PipeLayerPair.pipeInfo));
1776 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION;
1777 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1778 eIsFg isFg = IS_FG_OFF;
1779 eDest dest = mdp_info.index;
1780
1781 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d",
1782 __FUNCTION__, layer, zOrder, dest);
1783
1784 return configureNonSplit(ctx, layer, mDpy, mdpFlags, zOrder, isFg, dest,
1785 &PipeLayerPair.rot);
1786 }
1787
allocLayerPipes(hwc_context_t * ctx,hwc_display_contents_1_t * list)1788 bool MDPCompNonSplit::allocLayerPipes(hwc_context_t *ctx,
1789 hwc_display_contents_1_t* list) {
1790 for(int index = 0; index < mCurrentFrame.layerCount; index++) {
1791
1792 if(mCurrentFrame.isFBComposed[index]) continue;
1793
1794 hwc_layer_1_t* layer = &list->hwLayers[index];
1795 private_handle_t *hnd = (private_handle_t *)layer->handle;
1796 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
1797 if(allocSplitVGPipesfor4k2k(ctx, index)){
1798 continue;
1799 }
1800 }
1801
1802 int mdpIndex = mCurrentFrame.layerToMDP[index];
1803 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
1804 info.pipeInfo = new MdpPipeInfoNonSplit;
1805 info.rot = NULL;
1806 MdpPipeInfoNonSplit& pipe_info = *(MdpPipeInfoNonSplit*)info.pipeInfo;
1807
1808 Overlay::PipeSpecs pipeSpecs;
1809 pipeSpecs.formatClass = isYuvBuffer(hnd) ?
1810 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
1811 pipeSpecs.needsScaling = qhwc::needsScaling(layer) or
1812 (qdutils::MDPVersion::getInstance().is8x26() and
1813 ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres > 1024);
1814 pipeSpecs.dpy = mDpy;
1815 pipeSpecs.fb = false;
1816 pipeSpecs.numActiveDisplays = ctx->numActiveDisplays;
1817
1818 pipe_info.index = ctx->mOverlay->getPipe(pipeSpecs);
1819
1820 if(pipe_info.index == ovutils::OV_INVALID) {
1821 ALOGD_IF(isDebug(), "%s: Unable to get pipe", __FUNCTION__);
1822 return false;
1823 }
1824 }
1825 return true;
1826 }
1827
configure4k2kYuv(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)1828 int MDPCompNonSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
1829 PipeLayerPair& PipeLayerPair) {
1830 MdpYUVPipeInfo& mdp_info =
1831 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
1832 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1833 eIsFg isFg = IS_FG_OFF;
1834 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
1835 eDest lDest = mdp_info.lIndex;
1836 eDest rDest = mdp_info.rIndex;
1837
1838 return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg,
1839 lDest, rDest, &PipeLayerPair.rot);
1840 }
1841
draw(hwc_context_t * ctx,hwc_display_contents_1_t * list)1842 bool MDPCompNonSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1843
1844 if(!isEnabled() or !mModeOn) {
1845 ALOGD_IF(isDebug(),"%s: MDP Comp not enabled/configured", __FUNCTION__);
1846 return true;
1847 }
1848
1849 // Set the Handle timeout to true for MDP or MIXED composition.
1850 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) {
1851 sHandleTimeout = true;
1852 }
1853
1854 overlay::Overlay& ov = *ctx->mOverlay;
1855 LayerProp *layerProp = ctx->layerProp[mDpy];
1856
1857 int numHwLayers = ctx->listStats[mDpy].numAppLayers;
1858 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ )
1859 {
1860 if(mCurrentFrame.isFBComposed[i]) continue;
1861
1862 hwc_layer_1_t *layer = &list->hwLayers[i];
1863 private_handle_t *hnd = (private_handle_t *)layer->handle;
1864 if(!hnd) {
1865 if (!(layer->flags & HWC_COLOR_FILL)) {
1866 ALOGE("%s handle null", __FUNCTION__);
1867 return false;
1868 }
1869 // No PLAY for Color layer
1870 layerProp[i].mFlags &= ~HWC_MDPCOMP;
1871 continue;
1872 }
1873
1874 int mdpIndex = mCurrentFrame.layerToMDP[i];
1875
1876 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit)
1877 {
1878 MdpYUVPipeInfo& pipe_info =
1879 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1880 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1881 ovutils::eDest indexL = pipe_info.lIndex;
1882 ovutils::eDest indexR = pipe_info.rIndex;
1883 int fd = hnd->fd;
1884 uint32_t offset = (uint32_t)hnd->offset;
1885 if(rot) {
1886 rot->queueBuffer(fd, offset);
1887 fd = rot->getDstMemId();
1888 offset = rot->getDstOffset();
1889 }
1890 if(indexL != ovutils::OV_INVALID) {
1891 ovutils::eDest destL = (ovutils::eDest)indexL;
1892 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1893 using pipe: %d", __FUNCTION__, layer, hnd, indexL );
1894 if (!ov.queueBuffer(fd, offset, destL)) {
1895 ALOGE("%s: queueBuffer failed for display:%d",
1896 __FUNCTION__, mDpy);
1897 return false;
1898 }
1899 }
1900
1901 if(indexR != ovutils::OV_INVALID) {
1902 ovutils::eDest destR = (ovutils::eDest)indexR;
1903 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1904 using pipe: %d", __FUNCTION__, layer, hnd, indexR );
1905 if (!ov.queueBuffer(fd, offset, destR)) {
1906 ALOGE("%s: queueBuffer failed for display:%d",
1907 __FUNCTION__, mDpy);
1908 return false;
1909 }
1910 }
1911 }
1912 else{
1913 MdpPipeInfoNonSplit& pipe_info =
1914 *(MdpPipeInfoNonSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1915 ovutils::eDest dest = pipe_info.index;
1916 if(dest == ovutils::OV_INVALID) {
1917 ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest);
1918 return false;
1919 }
1920
1921 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
1922 continue;
1923 }
1924
1925 int fd = hnd->fd;
1926 uint32_t offset = (uint32_t)hnd->offset;
1927 int index = ctx->mPtorInfo.getPTORArrayIndex(i);
1928 if (!mDpy && (index != -1)) {
1929 hnd = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer();
1930 fd = hnd->fd;
1931 // Use the offset of the RenderBuffer
1932 offset = ctx->mPtorInfo.mRenderBuffOffset[index];
1933 }
1934
1935 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1936 using pipe: %d", __FUNCTION__, layer,
1937 hnd, dest );
1938
1939 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1940 if(rot) {
1941 if(!rot->queueBuffer(fd, offset))
1942 return false;
1943 fd = rot->getDstMemId();
1944 offset = rot->getDstOffset();
1945 }
1946
1947 if (!ov.queueBuffer(fd, offset, dest)) {
1948 ALOGE("%s: queueBuffer failed for display:%d ",
1949 __FUNCTION__, mDpy);
1950 return false;
1951 }
1952 }
1953
1954 layerProp[i].mFlags &= ~HWC_MDPCOMP;
1955 }
1956 return true;
1957 }
1958
1959 //=============MDPCompSplit===================================================
1960
adjustForSourceSplit(hwc_context_t * ctx,hwc_display_contents_1_t * list)1961 void MDPCompSplit::adjustForSourceSplit(hwc_context_t *ctx,
1962 hwc_display_contents_1_t* list){
1963 //if 4kx2k yuv layer is totally present in either in left half
1964 //or right half then try splitting the yuv layer to avoid decimation
1965 const int lSplit = getLeftSplit(ctx, mDpy);
1966 if(mCurrentFrame.fbZ >= 0) {
1967 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
1968 index++) {
1969 if(!mCurrentFrame.isFBComposed[index]) {
1970 if(mdpNextZOrder == mCurrentFrame.fbZ) {
1971 mdpNextZOrder++;
1972 }
1973 mdpNextZOrder++;
1974 hwc_layer_1_t* layer = &list->hwLayers[index];
1975 private_handle_t *hnd = (private_handle_t *)layer->handle;
1976 if(is4kx2kYuvBuffer(hnd)) {
1977 hwc_rect_t dst = layer->displayFrame;
1978 if((dst.left > lSplit) || (dst.right < lSplit)) {
1979 mCurrentFrame.mdpCount += 1;
1980 }
1981 if(mdpNextZOrder <= mCurrentFrame.fbZ)
1982 mCurrentFrame.fbZ += 1;
1983 mdpNextZOrder++;
1984 }
1985 }
1986 }
1987 }
1988 }
1989
acquireMDPPipes(hwc_context_t * ctx,hwc_layer_1_t * layer,MdpPipeInfoSplit & pipe_info)1990 bool MDPCompSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
1991 MdpPipeInfoSplit& pipe_info) {
1992
1993 const int lSplit = getLeftSplit(ctx, mDpy);
1994 private_handle_t *hnd = (private_handle_t *)layer->handle;
1995 hwc_rect_t dst = layer->displayFrame;
1996 pipe_info.lIndex = ovutils::OV_INVALID;
1997 pipe_info.rIndex = ovutils::OV_INVALID;
1998
1999 Overlay::PipeSpecs pipeSpecs;
2000 pipeSpecs.formatClass = isYuvBuffer(hnd) ?
2001 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
2002 pipeSpecs.needsScaling = qhwc::needsScalingWithSplit(ctx, layer, mDpy);
2003 pipeSpecs.dpy = mDpy;
2004 pipeSpecs.mixer = Overlay::MIXER_LEFT;
2005 pipeSpecs.fb = false;
2006
2007 // Acquire pipe only for the updating half
2008 hwc_rect_t l_roi = ctx->listStats[mDpy].lRoi;
2009 hwc_rect_t r_roi = ctx->listStats[mDpy].rRoi;
2010
2011 if (dst.left < lSplit && isValidRect(getIntersection(dst, l_roi))) {
2012 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
2013 if(pipe_info.lIndex == ovutils::OV_INVALID)
2014 return false;
2015 }
2016
2017 if(dst.right > lSplit && isValidRect(getIntersection(dst, r_roi))) {
2018 pipeSpecs.mixer = Overlay::MIXER_RIGHT;
2019 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
2020 if(pipe_info.rIndex == ovutils::OV_INVALID)
2021 return false;
2022 }
2023
2024 return true;
2025 }
2026
allocLayerPipes(hwc_context_t * ctx,hwc_display_contents_1_t * list)2027 bool MDPCompSplit::allocLayerPipes(hwc_context_t *ctx,
2028 hwc_display_contents_1_t* list) {
2029 for(int index = 0 ; index < mCurrentFrame.layerCount; index++) {
2030
2031 if(mCurrentFrame.isFBComposed[index]) continue;
2032
2033 hwc_layer_1_t* layer = &list->hwLayers[index];
2034 private_handle_t *hnd = (private_handle_t *)layer->handle;
2035 hwc_rect_t dst = layer->displayFrame;
2036 const int lSplit = getLeftSplit(ctx, mDpy);
2037 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
2038 if((dst.left > lSplit)||(dst.right < lSplit)){
2039 if(allocSplitVGPipesfor4k2k(ctx, index)){
2040 continue;
2041 }
2042 }
2043 }
2044 int mdpIndex = mCurrentFrame.layerToMDP[index];
2045 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
2046 info.pipeInfo = new MdpPipeInfoSplit;
2047 info.rot = NULL;
2048 MdpPipeInfoSplit& pipe_info = *(MdpPipeInfoSplit*)info.pipeInfo;
2049
2050 if(!acquireMDPPipes(ctx, layer, pipe_info)) {
2051 ALOGD_IF(isDebug(), "%s: Unable to get pipe for type",
2052 __FUNCTION__);
2053 return false;
2054 }
2055 }
2056 return true;
2057 }
2058
configure4k2kYuv(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2059 int MDPCompSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
2060 PipeLayerPair& PipeLayerPair) {
2061 const int lSplit = getLeftSplit(ctx, mDpy);
2062 hwc_rect_t dst = layer->displayFrame;
2063 if((dst.left > lSplit)||(dst.right < lSplit)){
2064 MdpYUVPipeInfo& mdp_info =
2065 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
2066 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
2067 eIsFg isFg = IS_FG_OFF;
2068 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
2069 eDest lDest = mdp_info.lIndex;
2070 eDest rDest = mdp_info.rIndex;
2071
2072 return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg,
2073 lDest, rDest, &PipeLayerPair.rot);
2074 }
2075 else{
2076 return configure(ctx, layer, PipeLayerPair);
2077 }
2078 }
2079
2080 /*
2081 * Configures pipe(s) for MDP composition
2082 */
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2083 int MDPCompSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
2084 PipeLayerPair& PipeLayerPair) {
2085 MdpPipeInfoSplit& mdp_info =
2086 *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo));
2087 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
2088 eIsFg isFg = IS_FG_OFF;
2089 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
2090 eDest lDest = mdp_info.lIndex;
2091 eDest rDest = mdp_info.rIndex;
2092
2093 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d"
2094 "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest);
2095
2096 return configureSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, lDest,
2097 rDest, &PipeLayerPair.rot);
2098 }
2099
draw(hwc_context_t * ctx,hwc_display_contents_1_t * list)2100 bool MDPCompSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
2101
2102 if(!isEnabled() or !mModeOn) {
2103 ALOGD_IF(isDebug(),"%s: MDP Comp not enabled/configured", __FUNCTION__);
2104 return true;
2105 }
2106
2107 // Set the Handle timeout to true for MDP or MIXED composition.
2108 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) {
2109 sHandleTimeout = true;
2110 }
2111
2112 overlay::Overlay& ov = *ctx->mOverlay;
2113 LayerProp *layerProp = ctx->layerProp[mDpy];
2114
2115 int numHwLayers = ctx->listStats[mDpy].numAppLayers;
2116 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ )
2117 {
2118 if(mCurrentFrame.isFBComposed[i]) continue;
2119
2120 hwc_layer_1_t *layer = &list->hwLayers[i];
2121 private_handle_t *hnd = (private_handle_t *)layer->handle;
2122 if(!hnd) {
2123 ALOGE("%s handle null", __FUNCTION__);
2124 return false;
2125 }
2126
2127 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
2128 continue;
2129 }
2130
2131 int mdpIndex = mCurrentFrame.layerToMDP[i];
2132
2133 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit)
2134 {
2135 MdpYUVPipeInfo& pipe_info =
2136 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
2137 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
2138 ovutils::eDest indexL = pipe_info.lIndex;
2139 ovutils::eDest indexR = pipe_info.rIndex;
2140 int fd = hnd->fd;
2141 uint32_t offset = (uint32_t)hnd->offset;
2142 if(rot) {
2143 rot->queueBuffer(fd, offset);
2144 fd = rot->getDstMemId();
2145 offset = rot->getDstOffset();
2146 }
2147 if(indexL != ovutils::OV_INVALID) {
2148 ovutils::eDest destL = (ovutils::eDest)indexL;
2149 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2150 using pipe: %d", __FUNCTION__, layer, hnd, indexL );
2151 if (!ov.queueBuffer(fd, offset, destL)) {
2152 ALOGE("%s: queueBuffer failed for display:%d",
2153 __FUNCTION__, mDpy);
2154 return false;
2155 }
2156 }
2157
2158 if(indexR != ovutils::OV_INVALID) {
2159 ovutils::eDest destR = (ovutils::eDest)indexR;
2160 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2161 using pipe: %d", __FUNCTION__, layer, hnd, indexR );
2162 if (!ov.queueBuffer(fd, offset, destR)) {
2163 ALOGE("%s: queueBuffer failed for display:%d",
2164 __FUNCTION__, mDpy);
2165 return false;
2166 }
2167 }
2168 }
2169 else{
2170 MdpPipeInfoSplit& pipe_info =
2171 *(MdpPipeInfoSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
2172 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
2173
2174 ovutils::eDest indexL = pipe_info.lIndex;
2175 ovutils::eDest indexR = pipe_info.rIndex;
2176
2177 int fd = hnd->fd;
2178 uint32_t offset = (uint32_t)hnd->offset;
2179 int index = ctx->mPtorInfo.getPTORArrayIndex(i);
2180 if (!mDpy && (index != -1)) {
2181 hnd = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer();
2182 fd = hnd->fd;
2183 offset = ctx->mPtorInfo.mRenderBuffOffset[index];
2184 }
2185
2186 if(ctx->mAD->draw(ctx, fd, offset)) {
2187 fd = ctx->mAD->getDstFd();
2188 offset = ctx->mAD->getDstOffset();
2189 }
2190
2191 if(rot) {
2192 rot->queueBuffer(fd, offset);
2193 fd = rot->getDstMemId();
2194 offset = rot->getDstOffset();
2195 }
2196
2197 //************* play left mixer **********
2198 if(indexL != ovutils::OV_INVALID) {
2199 ovutils::eDest destL = (ovutils::eDest)indexL;
2200 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2201 using pipe: %d", __FUNCTION__, layer, hnd, indexL );
2202 if (!ov.queueBuffer(fd, offset, destL)) {
2203 ALOGE("%s: queueBuffer failed for left mixer",
2204 __FUNCTION__);
2205 return false;
2206 }
2207 }
2208
2209 //************* play right mixer **********
2210 if(indexR != ovutils::OV_INVALID) {
2211 ovutils::eDest destR = (ovutils::eDest)indexR;
2212 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2213 using pipe: %d", __FUNCTION__, layer, hnd, indexR );
2214 if (!ov.queueBuffer(fd, offset, destR)) {
2215 ALOGE("%s: queueBuffer failed for right mixer",
2216 __FUNCTION__);
2217 return false;
2218 }
2219 }
2220 }
2221
2222 layerProp[i].mFlags &= ~HWC_MDPCOMP;
2223 }
2224
2225 return true;
2226 }
2227
2228 //================MDPCompSrcSplit==============================================
acquireMDPPipes(hwc_context_t * ctx,hwc_layer_1_t * layer,MdpPipeInfoSplit & pipe_info)2229 bool MDPCompSrcSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
2230 MdpPipeInfoSplit& pipe_info) {
2231 private_handle_t *hnd = (private_handle_t *)layer->handle;
2232 hwc_rect_t dst = layer->displayFrame;
2233 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
2234 pipe_info.lIndex = ovutils::OV_INVALID;
2235 pipe_info.rIndex = ovutils::OV_INVALID;
2236
2237 //If 2 pipes are staged on a single stage of a mixer, then the left pipe
2238 //should have a higher priority than the right one. Pipe priorities are
2239 //starting with VG0, VG1 ... , RGB0 ..., DMA1
2240
2241 Overlay::PipeSpecs pipeSpecs;
2242 pipeSpecs.formatClass = isYuvBuffer(hnd) ?
2243 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
2244 pipeSpecs.needsScaling = qhwc::needsScaling(layer);
2245 pipeSpecs.dpy = mDpy;
2246 pipeSpecs.fb = false;
2247
2248 //1 pipe by default for a layer
2249 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
2250 if(pipe_info.lIndex == ovutils::OV_INVALID) {
2251 return false;
2252 }
2253
2254 /* Use 2 pipes IF
2255 a) Layer's crop width is > 2048 or
2256 b) Layer's dest width > 2048 or
2257 c) On primary, driver has indicated with caps to split always. This is
2258 based on an empirically derived value of panel height. Applied only
2259 if the layer's width is > mixer's width
2260 */
2261
2262 bool primarySplitAlways = (mDpy == HWC_DISPLAY_PRIMARY) and
2263 qdutils::MDPVersion::getInstance().isSrcSplitAlways();
2264 int lSplit = getLeftSplit(ctx, mDpy);
2265 int dstWidth = dst.right - dst.left;
2266 int cropWidth = crop.right - crop.left;
2267
2268 if(dstWidth > qdutils::MAX_DISPLAY_DIM or
2269 cropWidth > qdutils::MAX_DISPLAY_DIM or
2270 (primarySplitAlways and (cropWidth > lSplit))) {
2271 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
2272 if(pipe_info.rIndex == ovutils::OV_INVALID) {
2273 return false;
2274 }
2275
2276 // Return values
2277 // 1 Left pipe is higher priority, do nothing.
2278 // 0 Pipes of same priority.
2279 //-1 Right pipe is of higher priority, needs swap.
2280 if(ctx->mOverlay->comparePipePriority(pipe_info.lIndex,
2281 pipe_info.rIndex) == -1) {
2282 qhwc::swap(pipe_info.lIndex, pipe_info.rIndex);
2283 }
2284 }
2285
2286 return true;
2287 }
2288
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2289 int MDPCompSrcSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
2290 PipeLayerPair& PipeLayerPair) {
2291 private_handle_t *hnd = (private_handle_t *)layer->handle;
2292 if(!hnd) {
2293 ALOGE("%s: layer handle is NULL", __FUNCTION__);
2294 return -1;
2295 }
2296 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
2297 MdpPipeInfoSplit& mdp_info =
2298 *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo));
2299 Rotator **rot = &PipeLayerPair.rot;
2300 eZorder z = static_cast<eZorder>(mdp_info.zOrder);
2301 eIsFg isFg = IS_FG_OFF;
2302 eDest lDest = mdp_info.lIndex;
2303 eDest rDest = mdp_info.rIndex;
2304 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
2305 hwc_rect_t dst = layer->displayFrame;
2306 int transform = layer->transform;
2307 eTransform orient = static_cast<eTransform>(transform);
2308 const int downscale = 0;
2309 int rotFlags = ROT_FLAGS_NONE;
2310 uint32_t format = ovutils::getMdpFormat(hnd->format, isTileRendered(hnd));
2311 Whf whf(getWidth(hnd), getHeight(hnd), format, hnd->size);
2312
2313 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d"
2314 "dest_pipeR: %d",__FUNCTION__, layer, z, lDest, rDest);
2315
2316 // Handle R/B swap
2317 if (layer->flags & HWC_FORMAT_RB_SWAP) {
2318 if (hnd->format == HAL_PIXEL_FORMAT_RGBA_8888)
2319 whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRA_8888);
2320 else if (hnd->format == HAL_PIXEL_FORMAT_RGBX_8888)
2321 whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRX_8888);
2322 }
2323
2324 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION;
2325 setMdpFlags(ctx, layer, mdpFlags, 0, transform);
2326
2327 if(lDest != OV_INVALID && rDest != OV_INVALID) {
2328 //Enable overfetch
2329 setMdpFlags(mdpFlags, OV_MDSS_MDP_DUAL_PIPE);
2330 }
2331
2332 if(has90Transform(layer) && isRotationDoable(ctx, hnd)) {
2333 (*rot) = ctx->mRotMgr->getNext();
2334 if((*rot) == NULL) return -1;
2335 ctx->mLayerRotMap[mDpy]->add(layer, *rot);
2336 //If the video is using a single pipe, enable BWC
2337 if(rDest == OV_INVALID) {
2338 BwcPM::setBwc(crop, dst, transform, mdpFlags);
2339 }
2340 //Configure rotator for pre-rotation
2341 if(configRotator(*rot, whf, crop, mdpFlags, orient, downscale) < 0) {
2342 ALOGE("%s: configRotator failed!", __FUNCTION__);
2343 return -1;
2344 }
2345 updateSource(orient, whf, crop, *rot);
2346 rotFlags |= ROT_PREROTATED;
2347 }
2348
2349 //If 2 pipes being used, divide layer into half, crop and dst
2350 hwc_rect_t cropL = crop;
2351 hwc_rect_t cropR = crop;
2352 hwc_rect_t dstL = dst;
2353 hwc_rect_t dstR = dst;
2354 if(lDest != OV_INVALID && rDest != OV_INVALID) {
2355 cropL.right = (crop.right + crop.left) / 2;
2356 cropR.left = cropL.right;
2357 sanitizeSourceCrop(cropL, cropR, hnd);
2358
2359 //Swap crops on H flip since 2 pipes are being used
2360 if((orient & OVERLAY_TRANSFORM_FLIP_H) && (*rot) == NULL) {
2361 hwc_rect_t tmp = cropL;
2362 cropL = cropR;
2363 cropR = tmp;
2364 }
2365
2366 dstL.right = (dst.right + dst.left) / 2;
2367 dstR.left = dstL.right;
2368 }
2369
2370 //For the mdp, since either we are pre-rotating or MDP does flips
2371 orient = OVERLAY_TRANSFORM_0;
2372 transform = 0;
2373
2374 //configure left pipe
2375 if(lDest != OV_INVALID) {
2376 PipeArgs pargL(mdpFlags, whf, z, isFg,
2377 static_cast<eRotFlags>(rotFlags), layer->planeAlpha,
2378 (ovutils::eBlending) getBlending(layer->blending));
2379
2380 if(configMdp(ctx->mOverlay, pargL, orient,
2381 cropL, dstL, metadata, lDest) < 0) {
2382 ALOGE("%s: commit failed for left mixer config", __FUNCTION__);
2383 return -1;
2384 }
2385 }
2386
2387 //configure right pipe
2388 if(rDest != OV_INVALID) {
2389 PipeArgs pargR(mdpFlags, whf, z, isFg,
2390 static_cast<eRotFlags>(rotFlags),
2391 layer->planeAlpha,
2392 (ovutils::eBlending) getBlending(layer->blending));
2393 if(configMdp(ctx->mOverlay, pargR, orient,
2394 cropR, dstR, metadata, rDest) < 0) {
2395 ALOGE("%s: commit failed for right mixer config", __FUNCTION__);
2396 return -1;
2397 }
2398 }
2399
2400 return 0;
2401 }
2402
2403 }; //namespace
2404
2405