1 /*
2 * Copyright (C) 2012-2014, The Linux Foundation. All rights reserved.
3 * Not a Contribution, Apache license notifications and license are retained
4 * for attribution purposes only.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include <math.h>
20 #include "hwc_mdpcomp.h"
21 #include <sys/ioctl.h>
22 #include "external.h"
23 #include "virtual.h"
24 #include "qdMetaData.h"
25 #include "mdp_version.h"
26 #include "hwc_fbupdate.h"
27 #include "hwc_ad.h"
28 #include <overlayRotator.h>
29
30 using namespace overlay;
31 using namespace qdutils;
32 using namespace overlay::utils;
33 namespace ovutils = overlay::utils;
34
35 namespace qhwc {
36
37 //==============MDPComp========================================================
38
39 IdleInvalidator *MDPComp::idleInvalidator = NULL;
40 bool MDPComp::sIdleFallBack = false;
41 bool MDPComp::sHandleTimeout = false;
42 bool MDPComp::sDebugLogs = false;
43 bool MDPComp::sEnabled = false;
44 bool MDPComp::sEnableMixedMode = true;
45 int MDPComp::sSimulationFlags = 0;
46 int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
47 bool MDPComp::sEnable4k2kYUVSplit = false;
48 bool MDPComp::sSrcSplitEnabled = false;
getObject(hwc_context_t * ctx,const int & dpy)49 MDPComp* MDPComp::getObject(hwc_context_t *ctx, const int& dpy) {
50 if(qdutils::MDPVersion::getInstance().isSrcSplit()) {
51 sSrcSplitEnabled = true;
52 return new MDPCompSrcSplit(dpy);
53 } else if(isDisplaySplit(ctx, dpy)) {
54 return new MDPCompSplit(dpy);
55 }
56 return new MDPCompNonSplit(dpy);
57 }
58
MDPComp(int dpy)59 MDPComp::MDPComp(int dpy):mDpy(dpy){};
60
dump(android::String8 & buf,hwc_context_t * ctx)61 void MDPComp::dump(android::String8& buf, hwc_context_t *ctx)
62 {
63 if(mCurrentFrame.layerCount > MAX_NUM_APP_LAYERS)
64 return;
65
66 dumpsys_log(buf,"HWC Map for Dpy: %s \n",
67 (mDpy == 0) ? "\"PRIMARY\"" :
68 (mDpy == 1) ? "\"EXTERNAL\"" : "\"VIRTUAL\"");
69 dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d "
70 "fbCount:%2d \n", mCurrentFrame.layerCount,
71 mCurrentFrame.mdpCount, mCurrentFrame.fbCount);
72 dumpsys_log(buf,"needsFBRedraw:%3s pipesUsed:%2d MaxPipesPerMixer: %d \n",
73 (mCurrentFrame.needsRedraw? "YES" : "NO"),
74 mCurrentFrame.mdpCount, sMaxPipesPerMixer);
75 if(isDisplaySplit(ctx, mDpy)) {
76 dumpsys_log(buf, "Programmed ROI's: Left: [%d, %d, %d, %d] "
77 "Right: [%d, %d, %d, %d] \n",
78 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
79 ctx->listStats[mDpy].lRoi.right,
80 ctx->listStats[mDpy].lRoi.bottom,
81 ctx->listStats[mDpy].rRoi.left,ctx->listStats[mDpy].rRoi.top,
82 ctx->listStats[mDpy].rRoi.right,
83 ctx->listStats[mDpy].rRoi.bottom);
84 } else {
85 dumpsys_log(buf, "Programmed ROI: [%d, %d, %d, %d] \n",
86 ctx->listStats[mDpy].lRoi.left,ctx->listStats[mDpy].lRoi.top,
87 ctx->listStats[mDpy].lRoi.right,
88 ctx->listStats[mDpy].lRoi.bottom);
89 }
90 dumpsys_log(buf," --------------------------------------------- \n");
91 dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype | Z \n");
92 dumpsys_log(buf," --------------------------------------------- \n");
93 for(int index = 0; index < mCurrentFrame.layerCount; index++ )
94 dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n",
95 index,
96 (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"),
97 mCurrentFrame.layerToMDP[index],
98 (mCurrentFrame.isFBComposed[index] ?
99 (mCurrentFrame.drop[index] ? "DROP" :
100 (mCurrentFrame.needsRedraw ? "GLES" : "CACHE")) : "MDP"),
101 (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ :
102 mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder));
103 dumpsys_log(buf,"\n");
104 }
105
init(hwc_context_t * ctx)106 bool MDPComp::init(hwc_context_t *ctx) {
107
108 if(!ctx) {
109 ALOGE("%s: Invalid hwc context!!",__FUNCTION__);
110 return false;
111 }
112
113 char property[PROPERTY_VALUE_MAX];
114
115 sEnabled = false;
116 if((property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) &&
117 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
118 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
119 sEnabled = true;
120 }
121
122 sEnableMixedMode = true;
123 if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) &&
124 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
125 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
126 sEnableMixedMode = false;
127 }
128
129 if(property_get("debug.mdpcomp.logs", property, NULL) > 0) {
130 if(atoi(property) != 0)
131 sDebugLogs = true;
132 }
133
134 sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
135 if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) {
136 int val = atoi(property);
137 if(val >= 0)
138 sMaxPipesPerMixer = min(val, MAX_PIPES_PER_MIXER);
139 }
140
141 if(ctx->mMDP.panel != MIPI_CMD_PANEL) {
142 // Idle invalidation is not necessary on command mode panels
143 long idle_timeout = DEFAULT_IDLE_TIME;
144 if(property_get("debug.mdpcomp.idletime", property, NULL) > 0) {
145 if(atoi(property) != 0)
146 idle_timeout = atoi(property);
147 }
148
149 //create Idle Invalidator only when not disabled through property
150 if(idle_timeout != -1)
151 idleInvalidator = IdleInvalidator::getInstance();
152
153 if(idleInvalidator == NULL) {
154 ALOGE("%s: failed to instantiate idleInvalidator object",
155 __FUNCTION__);
156 } else {
157 idleInvalidator->init(timeout_handler, ctx,
158 (unsigned int)idle_timeout);
159 }
160 }
161
162 if(!qdutils::MDPVersion::getInstance().isSrcSplit() &&
163 property_get("persist.mdpcomp.4k2kSplit", property, "0") > 0 &&
164 (!strncmp(property, "1", PROPERTY_VALUE_MAX) ||
165 !strncasecmp(property,"true", PROPERTY_VALUE_MAX))) {
166 sEnable4k2kYUVSplit = true;
167 }
168 return true;
169 }
170
reset(hwc_context_t * ctx)171 void MDPComp::reset(hwc_context_t *ctx) {
172 const int numLayers = ctx->listStats[mDpy].numAppLayers;
173 mCurrentFrame.reset(numLayers);
174 ctx->mOverlay->clear(mDpy);
175 ctx->mLayerRotMap[mDpy]->clear();
176 }
177
timeout_handler(void * udata)178 void MDPComp::timeout_handler(void *udata) {
179 struct hwc_context_t* ctx = (struct hwc_context_t*)(udata);
180
181 if(!ctx) {
182 ALOGE("%s: received empty data in timer callback", __FUNCTION__);
183 return;
184 }
185 Locker::Autolock _l(ctx->mDrawLock);
186 // Handle timeout event only if the previous composition is MDP or MIXED.
187 if(!sHandleTimeout) {
188 ALOGD_IF(isDebug(), "%s:Do not handle this timeout", __FUNCTION__);
189 return;
190 }
191 if(!ctx->proc) {
192 ALOGE("%s: HWC proc not registered", __FUNCTION__);
193 return;
194 }
195 sIdleFallBack = true;
196 /* Trigger SF to redraw the current frame */
197 ctx->proc->invalidate(ctx->proc);
198 }
199
setMDPCompLayerFlags(hwc_context_t * ctx,hwc_display_contents_1_t * list)200 void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx,
201 hwc_display_contents_1_t* list) {
202 LayerProp *layerProp = ctx->layerProp[mDpy];
203
204 for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) {
205 hwc_layer_1_t* layer = &(list->hwLayers[index]);
206 if(!mCurrentFrame.isFBComposed[index]) {
207 layerProp[index].mFlags |= HWC_MDPCOMP;
208 layer->compositionType = HWC_OVERLAY;
209 layer->hints |= HWC_HINT_CLEAR_FB;
210 } else {
211 /* Drop the layer when its already present in FB OR when it lies
212 * outside frame's ROI */
213 if(!mCurrentFrame.needsRedraw || mCurrentFrame.drop[index]) {
214 layer->compositionType = HWC_OVERLAY;
215 }
216 }
217 }
218 }
219
setRedraw(hwc_context_t * ctx,hwc_display_contents_1_t * list)220 void MDPComp::setRedraw(hwc_context_t *ctx,
221 hwc_display_contents_1_t* list) {
222 mCurrentFrame.needsRedraw = false;
223 if(!mCachedFrame.isSameFrame(mCurrentFrame, list) ||
224 (list->flags & HWC_GEOMETRY_CHANGED) ||
225 isSkipPresent(ctx, mDpy)) {
226 mCurrentFrame.needsRedraw = true;
227 }
228 }
229
FrameInfo()230 MDPComp::FrameInfo::FrameInfo() {
231 memset(&mdpToLayer, 0, sizeof(mdpToLayer));
232 reset(0);
233 }
234
reset(const int & numLayers)235 void MDPComp::FrameInfo::reset(const int& numLayers) {
236 for(int i = 0 ; i < MAX_PIPES_PER_MIXER; i++ ) {
237 if(mdpToLayer[i].pipeInfo) {
238 delete mdpToLayer[i].pipeInfo;
239 mdpToLayer[i].pipeInfo = NULL;
240 //We dont own the rotator
241 mdpToLayer[i].rot = NULL;
242 }
243 }
244
245 memset(&mdpToLayer, 0, sizeof(mdpToLayer));
246 memset(&layerToMDP, -1, sizeof(layerToMDP));
247 memset(&isFBComposed, 1, sizeof(isFBComposed));
248
249 layerCount = numLayers;
250 fbCount = numLayers;
251 mdpCount = 0;
252 needsRedraw = true;
253 fbZ = -1;
254 }
255
map()256 void MDPComp::FrameInfo::map() {
257 // populate layer and MDP maps
258 int mdpIdx = 0;
259 for(int idx = 0; idx < layerCount; idx++) {
260 if(!isFBComposed[idx]) {
261 mdpToLayer[mdpIdx].listIndex = idx;
262 layerToMDP[idx] = mdpIdx++;
263 }
264 }
265 }
266
LayerCache()267 MDPComp::LayerCache::LayerCache() {
268 reset();
269 }
270
reset()271 void MDPComp::LayerCache::reset() {
272 memset(&isFBComposed, true, sizeof(isFBComposed));
273 memset(&drop, false, sizeof(drop));
274 layerCount = 0;
275 }
276
updateCounts(const FrameInfo & curFrame)277 void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) {
278 layerCount = curFrame.layerCount;
279 memcpy(&isFBComposed, &curFrame.isFBComposed, sizeof(isFBComposed));
280 memcpy(&drop, &curFrame.drop, sizeof(drop));
281 }
282
isSameFrame(const FrameInfo & curFrame,hwc_display_contents_1_t * list)283 bool MDPComp::LayerCache::isSameFrame(const FrameInfo& curFrame,
284 hwc_display_contents_1_t* list) {
285 if(layerCount != curFrame.layerCount)
286 return false;
287 for(int i = 0; i < curFrame.layerCount; i++) {
288 if((curFrame.isFBComposed[i] != isFBComposed[i]) ||
289 (curFrame.drop[i] != drop[i])) {
290 return false;
291 }
292 hwc_layer_1_t const* layer = &list->hwLayers[i];
293 if(curFrame.isFBComposed[i] && layerUpdating(layer)){
294 return false;
295 }
296 }
297 return true;
298 }
299
isSupportedForMDPComp(hwc_context_t * ctx,hwc_layer_1_t * layer)300 bool MDPComp::isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer) {
301 private_handle_t *hnd = (private_handle_t *)layer->handle;
302 if((not isYuvBuffer(hnd) and has90Transform(layer)) or
303 (not isValidDimension(ctx,layer))
304 //More conditions here, SKIP, sRGB+Blend etc
305 ) {
306 return false;
307 }
308 return true;
309 }
310
isValidDimension(hwc_context_t * ctx,hwc_layer_1_t * layer)311 bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) {
312 private_handle_t *hnd = (private_handle_t *)layer->handle;
313
314 if(!hnd) {
315 if (layer->flags & HWC_COLOR_FILL) {
316 // Color layer
317 return true;
318 }
319 ALOGE("%s: layer handle is NULL", __FUNCTION__);
320 return false;
321 }
322
323 //XXX: Investigate doing this with pixel phase on MDSS
324 if(!isSecureBuffer(hnd) && isNonIntegralSourceCrop(layer->sourceCropf))
325 return false;
326
327 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
328 hwc_rect_t dst = layer->displayFrame;
329 int crop_w = crop.right - crop.left;
330 int crop_h = crop.bottom - crop.top;
331 int dst_w = dst.right - dst.left;
332 int dst_h = dst.bottom - dst.top;
333 float w_scale = ((float)crop_w / (float)dst_w);
334 float h_scale = ((float)crop_h / (float)dst_h);
335
336 /* Workaround for MDP HW limitation in DSI command mode panels where
337 * FPS will not go beyond 30 if buffers on RGB pipes are of width or height
338 * less than 5 pixels
339 * There also is a HW limilation in MDP, minimum block size is 2x2
340 * Fallback to GPU if height is less than 2.
341 */
342 if(qdutils::MDPVersion::getInstance().hasMinCropWidthLimitation() and
343 (crop_w < 5 or crop_h < 5))
344 return false;
345
346 if((w_scale > 1.0f) || (h_scale > 1.0f)) {
347 const uint32_t maxMDPDownscale =
348 qdutils::MDPVersion::getInstance().getMaxMDPDownscale();
349 const float w_dscale = w_scale;
350 const float h_dscale = h_scale;
351
352 if(ctx->mMDP.version >= qdutils::MDSS_V5) {
353
354 if(!qdutils::MDPVersion::getInstance().supportsDecimation()) {
355 /* On targets that doesnt support Decimation (eg.,8x26)
356 * maximum downscale support is overlay pipe downscale.
357 */
358 if(crop_w > MAX_DISPLAY_DIM || w_dscale > maxMDPDownscale ||
359 h_dscale > maxMDPDownscale)
360 return false;
361 } else {
362 // Decimation on macrotile format layers is not supported.
363 if(isTileRendered(hnd)) {
364 /* MDP can read maximum MAX_DISPLAY_DIM width.
365 * Bail out if
366 * 1. Src crop > MAX_DISPLAY_DIM on nonsplit MDPComp
367 * 2. exceeds maximum downscale limit
368 */
369 if(((crop_w > MAX_DISPLAY_DIM) && !sSrcSplitEnabled) ||
370 w_dscale > maxMDPDownscale ||
371 h_dscale > maxMDPDownscale) {
372 return false;
373 }
374 } else if(w_dscale > 64 || h_dscale > 64)
375 return false;
376 }
377 } else { //A-family
378 if(w_dscale > maxMDPDownscale || h_dscale > maxMDPDownscale)
379 return false;
380 }
381 }
382
383 if((w_scale < 1.0f) || (h_scale < 1.0f)) {
384 const uint32_t upscale =
385 qdutils::MDPVersion::getInstance().getMaxMDPUpscale();
386 const float w_uscale = 1.0f / w_scale;
387 const float h_uscale = 1.0f / h_scale;
388
389 if(w_uscale > upscale || h_uscale > upscale)
390 return false;
391 }
392
393 return true;
394 }
395
isFrameDoable(hwc_context_t * ctx)396 bool MDPComp::isFrameDoable(hwc_context_t *ctx) {
397 bool ret = true;
398
399 if(!isEnabled()) {
400 ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__);
401 ret = false;
402 } else if(qdutils::MDPVersion::getInstance().is8x26() &&
403 ctx->mVideoTransFlag &&
404 isSecondaryConnected(ctx)) {
405 //1 Padding round to shift pipes across mixers
406 ALOGD_IF(isDebug(),"%s: MDP Comp. video transition padding round",
407 __FUNCTION__);
408 ret = false;
409 } else if(isSecondaryConfiguring(ctx)) {
410 ALOGD_IF( isDebug(),"%s: External Display connection is pending",
411 __FUNCTION__);
412 ret = false;
413 } else if(ctx->isPaddingRound) {
414 ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d",
415 __FUNCTION__,mDpy);
416 ret = false;
417 }
418 return ret;
419 }
420
calculateDirtyRect(const hwc_layer_1_t * layer,hwc_rect_t & scissor)421 hwc_rect_t MDPComp::calculateDirtyRect(const hwc_layer_1_t* layer,
422 hwc_rect_t& scissor) {
423 hwc_region_t surfDamage = layer->surfaceDamage;
424 hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf);
425 hwc_rect_t dst = layer->displayFrame;
426 int x_off = dst.left - src.left;
427 int y_off = dst.top - src.top;
428 hwc_rect dirtyRect = (hwc_rect){0, 0, 0, 0};
429 hwc_rect_t updatingRect = dst;
430
431 if (surfDamage.numRects == 0) {
432 // full layer updating, dirty rect is full frame
433 dirtyRect = getIntersection(layer->displayFrame, scissor);
434 } else {
435 for(uint32_t i = 0; i < surfDamage.numRects; i++) {
436 updatingRect = moveRect(surfDamage.rects[i], x_off, y_off);
437 hwc_rect_t intersect = getIntersection(updatingRect, scissor);
438 if(isValidRect(intersect)) {
439 dirtyRect = getUnion(intersect, dirtyRect);
440 }
441 }
442 }
443
444 return dirtyRect;
445 }
446
trimAgainstROI(hwc_context_t * ctx,hwc_rect_t & fbRect)447 void MDPCompNonSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) {
448 hwc_rect_t roi = ctx->listStats[mDpy].lRoi;
449 fbRect = getIntersection(fbRect, roi);
450 }
451
452 /* 1) Identify layers that are not visible or lying outside the updating ROI and
453 * drop them from composition.
454 * 2) If we have a scaling layer which needs cropping against generated
455 * ROI, reset ROI to full resolution. */
validateAndApplyROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)456 bool MDPCompNonSplit::validateAndApplyROI(hwc_context_t *ctx,
457 hwc_display_contents_1_t* list) {
458 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
459 hwc_rect_t visibleRect = ctx->listStats[mDpy].lRoi;
460
461 for(int i = numAppLayers - 1; i >= 0; i--){
462 if(!isValidRect(visibleRect)) {
463 mCurrentFrame.drop[i] = true;
464 mCurrentFrame.dropCount++;
465 continue;
466 }
467
468 const hwc_layer_1_t* layer = &list->hwLayers[i];
469 hwc_rect_t dstRect = layer->displayFrame;
470 hwc_rect_t res = getIntersection(visibleRect, dstRect);
471
472 if(!isValidRect(res)) {
473 mCurrentFrame.drop[i] = true;
474 mCurrentFrame.dropCount++;
475 } else {
476 /* Reset frame ROI when any layer which needs scaling also needs ROI
477 * cropping */
478 if(!isSameRect(res, dstRect) && needsScaling (layer)) {
479 ALOGI("%s: Resetting ROI due to scaling", __FUNCTION__);
480 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
481 mCurrentFrame.dropCount = 0;
482 return false;
483 }
484
485 /* deduct any opaque region from visibleRect */
486 if (layer->blending == HWC_BLENDING_NONE)
487 visibleRect = deductRect(visibleRect, res);
488 }
489 }
490 return true;
491 }
492
493 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which
494 * are updating. If DirtyRegion is applicable, calculate it by accounting all
495 * the changing layer's dirtyRegion. */
generateROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)496 void MDPCompNonSplit::generateROI(hwc_context_t *ctx,
497 hwc_display_contents_1_t* list) {
498 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
499 if(!canPartialUpdate(ctx, list))
500 return;
501
502 struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0};
503 hwc_rect fullFrame = (struct hwc_rect) {0, 0,(int)ctx->dpyAttr[mDpy].xres,
504 (int)ctx->dpyAttr[mDpy].yres};
505
506 for(int index = 0; index < numAppLayers; index++ ) {
507 hwc_layer_1_t* layer = &list->hwLayers[index];
508
509 if (layerUpdating(layer) ||
510 isYuvBuffer((private_handle_t *)layer->handle)) {
511 hwc_rect_t dirtyRect = getIntersection(layer->displayFrame,
512 fullFrame);
513 if(!needsScaling(layer) && !layer->transform) {
514 dirtyRect = calculateDirtyRect(layer, fullFrame);
515 }
516
517 roi = getUnion(roi, dirtyRect);
518 }
519 }
520
521 /* No layer is updating. Still SF wants a refresh.*/
522 if(!isValidRect(roi))
523 return;
524
525 // Align ROI coordinates to panel restrictions
526 roi = getSanitizeROI(roi, fullFrame);
527
528 ctx->listStats[mDpy].lRoi = roi;
529 if(!validateAndApplyROI(ctx, list))
530 resetROI(ctx, mDpy);
531
532 ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d]", __FUNCTION__,
533 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
534 ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom);
535 }
536
trimAgainstROI(hwc_context_t * ctx,hwc_rect_t & fbRect)537 void MDPCompSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) {
538 hwc_rect l_roi = ctx->listStats[mDpy].lRoi;
539 hwc_rect r_roi = ctx->listStats[mDpy].rRoi;
540
541 hwc_rect_t l_fbRect = getIntersection(fbRect, l_roi);
542 hwc_rect_t r_fbRect = getIntersection(fbRect, r_roi);
543 fbRect = getUnion(l_fbRect, r_fbRect);
544 }
545 /* 1) Identify layers that are not visible or lying outside BOTH the updating
546 * ROI's and drop them from composition. If a layer is spanning across both
547 * the halves of the screen but needed by only ROI, the non-contributing
548 * half will not be programmed for MDP.
549 * 2) If we have a scaling layer which needs cropping against generated
550 * ROI, reset ROI to full resolution. */
validateAndApplyROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)551 bool MDPCompSplit::validateAndApplyROI(hwc_context_t *ctx,
552 hwc_display_contents_1_t* list) {
553
554 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
555
556 hwc_rect_t visibleRectL = ctx->listStats[mDpy].lRoi;
557 hwc_rect_t visibleRectR = ctx->listStats[mDpy].rRoi;
558
559 for(int i = numAppLayers - 1; i >= 0; i--){
560 if(!isValidRect(visibleRectL) && !isValidRect(visibleRectR))
561 {
562 mCurrentFrame.drop[i] = true;
563 mCurrentFrame.dropCount++;
564 continue;
565 }
566
567 const hwc_layer_1_t* layer = &list->hwLayers[i];
568 hwc_rect_t dstRect = layer->displayFrame;
569
570 hwc_rect_t l_res = getIntersection(visibleRectL, dstRect);
571 hwc_rect_t r_res = getIntersection(visibleRectR, dstRect);
572 hwc_rect_t res = getUnion(l_res, r_res);
573
574 if(!isValidRect(l_res) && !isValidRect(r_res)) {
575 mCurrentFrame.drop[i] = true;
576 mCurrentFrame.dropCount++;
577 } else {
578 /* Reset frame ROI when any layer which needs scaling also needs ROI
579 * cropping */
580 if(!isSameRect(res, dstRect) && needsScaling (layer)) {
581 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
582 mCurrentFrame.dropCount = 0;
583 return false;
584 }
585
586 if (layer->blending == HWC_BLENDING_NONE) {
587 visibleRectL = deductRect(visibleRectL, l_res);
588 visibleRectR = deductRect(visibleRectR, r_res);
589 }
590 }
591 }
592 return true;
593 }
594 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which
595 * are updating. If DirtyRegion is applicable, calculate it by accounting all
596 * the changing layer's dirtyRegion. */
generateROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)597 void MDPCompSplit::generateROI(hwc_context_t *ctx,
598 hwc_display_contents_1_t* list) {
599 if(!canPartialUpdate(ctx, list))
600 return;
601
602 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
603 int lSplit = getLeftSplit(ctx, mDpy);
604
605 int hw_h = (int)ctx->dpyAttr[mDpy].yres;
606 int hw_w = (int)ctx->dpyAttr[mDpy].xres;
607
608 struct hwc_rect l_frame = (struct hwc_rect){0, 0, lSplit, hw_h};
609 struct hwc_rect r_frame = (struct hwc_rect){lSplit, 0, hw_w, hw_h};
610
611 struct hwc_rect l_roi = (struct hwc_rect){0, 0, 0, 0};
612 struct hwc_rect r_roi = (struct hwc_rect){0, 0, 0, 0};
613
614 for(int index = 0; index < numAppLayers; index++ ) {
615 hwc_layer_1_t* layer = &list->hwLayers[index];
616 private_handle_t *hnd = (private_handle_t *)layer->handle;
617
618 if (layerUpdating(layer) || isYuvBuffer(hnd)) {
619 hwc_rect_t l_dirtyRect = getIntersection(layer->displayFrame,
620 l_frame);
621 hwc_rect_t r_dirtyRect = getIntersection(layer->displayFrame,
622 r_frame);
623
624 if(!needsScaling(layer) && !layer->transform) {
625 l_dirtyRect = calculateDirtyRect(layer, l_frame);
626 r_dirtyRect = calculateDirtyRect(layer, r_frame);
627 }
628 if(isValidRect(l_dirtyRect))
629 l_roi = getUnion(l_roi, l_dirtyRect);
630
631 if(isValidRect(r_dirtyRect))
632 r_roi = getUnion(r_roi, r_dirtyRect);
633 }
634 }
635
636 /* For panels that cannot accept commands in both the interfaces, we cannot
637 * send two ROI's (for each half). We merge them into single ROI and split
638 * them across lSplit for MDP mixer use. The ROI's will be merged again
639 * finally before udpating the panel in the driver. */
640 if(qdutils::MDPVersion::getInstance().needsROIMerge()) {
641 hwc_rect_t temp_roi = getUnion(l_roi, r_roi);
642 l_roi = getIntersection(temp_roi, l_frame);
643 r_roi = getIntersection(temp_roi, r_frame);
644 }
645
646 /* No layer is updating. Still SF wants a refresh. */
647 if(!isValidRect(l_roi) && !isValidRect(r_roi))
648 return;
649
650 l_roi = getSanitizeROI(l_roi, l_frame);
651 r_roi = getSanitizeROI(r_roi, r_frame);
652
653 ctx->listStats[mDpy].lRoi = l_roi;
654 ctx->listStats[mDpy].rRoi = r_roi;
655
656 if(!validateAndApplyROI(ctx, list))
657 resetROI(ctx, mDpy);
658
659 ALOGD_IF(isDebug(),"%s: generated L_ROI: [%d, %d, %d, %d]"
660 "R_ROI: [%d, %d, %d, %d]", __FUNCTION__,
661 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
662 ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom,
663 ctx->listStats[mDpy].rRoi.left, ctx->listStats[mDpy].rRoi.top,
664 ctx->listStats[mDpy].rRoi.right, ctx->listStats[mDpy].rRoi.bottom);
665 }
666
667 /* Checks for conditions where all the layers marked for MDP comp cannot be
668 * bypassed. On such conditions we try to bypass atleast YUV layers */
tryFullFrame(hwc_context_t * ctx,hwc_display_contents_1_t * list)669 bool MDPComp::tryFullFrame(hwc_context_t *ctx,
670 hwc_display_contents_1_t* list){
671
672 const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
673 int priDispW = ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres;
674
675 if(sIdleFallBack && !ctx->listStats[mDpy].secureUI) {
676 ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy);
677 return false;
678 }
679
680 if(isSkipPresent(ctx, mDpy)) {
681 ALOGD_IF(isDebug(),"%s: SKIP present: %d",
682 __FUNCTION__,
683 isSkipPresent(ctx, mDpy));
684 return false;
685 }
686
687 if(mDpy > HWC_DISPLAY_PRIMARY && (priDispW > MAX_DISPLAY_DIM) &&
688 (ctx->dpyAttr[mDpy].xres < MAX_DISPLAY_DIM)) {
689 // Disable MDP comp on Secondary when the primary is highres panel and
690 // the secondary is a normal 1080p, because, MDP comp on secondary under
691 // in such usecase, decimation gets used for downscale and there will be
692 // a quality mismatch when there will be a fallback to GPU comp
693 ALOGD_IF(isDebug(), "%s: Disable MDP Compositon for Secondary Disp",
694 __FUNCTION__);
695 return false;
696 }
697
698 // check for action safe flag and downscale mode which requires scaling.
699 if(ctx->dpyAttr[mDpy].mActionSafePresent
700 || ctx->dpyAttr[mDpy].mDownScaleMode) {
701 ALOGD_IF(isDebug(), "%s: Scaling needed for this frame",__FUNCTION__);
702 return false;
703 }
704
705 for(int i = 0; i < numAppLayers; ++i) {
706 hwc_layer_1_t* layer = &list->hwLayers[i];
707 private_handle_t *hnd = (private_handle_t *)layer->handle;
708
709 if(isYuvBuffer(hnd) && has90Transform(layer)) {
710 if(!canUseRotator(ctx, mDpy)) {
711 ALOGD_IF(isDebug(), "%s: Can't use rotator for dpy %d",
712 __FUNCTION__, mDpy);
713 return false;
714 }
715 }
716
717 //For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp
718 // may not need it if Gfx pre-rotation can handle all flips & rotations
719 if(qdutils::MDPVersion::getInstance().is8x26() &&
720 (ctx->dpyAttr[mDpy].xres > 1024) &&
721 (layer->transform & HWC_TRANSFORM_FLIP_H) &&
722 (!isYuvBuffer(hnd)))
723 return false;
724 }
725
726 if(ctx->mAD->isDoable()) {
727 return false;
728 }
729
730 //If all above hard conditions are met we can do full or partial MDP comp.
731 bool ret = false;
732 if(fullMDPComp(ctx, list)) {
733 ret = true;
734 } else if(partialMDPComp(ctx, list)) {
735 ret = true;
736 }
737
738 return ret;
739 }
740
fullMDPComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)741 bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
742
743 if(sSimulationFlags & MDPCOMP_AVOID_FULL_MDP)
744 return false;
745
746 //Will benefit presentation / secondary-only layer.
747 if((mDpy > HWC_DISPLAY_PRIMARY) &&
748 (list->numHwLayers - 1) > MAX_SEC_LAYERS) {
749 ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
750 return false;
751 }
752
753 const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
754 for(int i = 0; i < numAppLayers; i++) {
755 hwc_layer_1_t* layer = &list->hwLayers[i];
756 if(not mCurrentFrame.drop[i] and
757 not isSupportedForMDPComp(ctx, layer)) {
758 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__);
759 return false;
760 }
761
762 //For 8x26, if there is only one layer which needs scale for secondary
763 //while no scale for primary display, DMA pipe is occupied by primary.
764 //If need to fall back to GLES composition, virtual display lacks DMA
765 //pipe and error is reported.
766 if(qdutils::MDPVersion::getInstance().is8x26() &&
767 mDpy >= HWC_DISPLAY_EXTERNAL &&
768 qhwc::needsScaling(layer))
769 return false;
770 }
771
772 mCurrentFrame.fbCount = 0;
773 memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop,
774 sizeof(mCurrentFrame.isFBComposed));
775 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount -
776 mCurrentFrame.dropCount;
777
778 if(sEnable4k2kYUVSplit){
779 adjustForSourceSplit(ctx, list);
780 }
781
782 if(!postHeuristicsHandling(ctx, list)) {
783 ALOGD_IF(isDebug(), "post heuristic handling failed");
784 reset(ctx);
785 return false;
786 }
787 ALOGD_IF(sSimulationFlags,"%s: FULL_MDP_COMP SUCCEEDED",
788 __FUNCTION__);
789 return true;
790 }
791
partialMDPComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)792 bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list)
793 {
794 if(!sEnableMixedMode) {
795 //Mixed mode is disabled. No need to even try caching.
796 return false;
797 }
798
799 bool ret = false;
800 if(list->flags & HWC_GEOMETRY_CHANGED) { //Try load based first
801 ret = loadBasedComp(ctx, list) or
802 cacheBasedComp(ctx, list);
803 } else {
804 ret = cacheBasedComp(ctx, list) or
805 loadBasedComp(ctx, list);
806 }
807
808 return ret;
809 }
810
cacheBasedComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)811 bool MDPComp::cacheBasedComp(hwc_context_t *ctx,
812 hwc_display_contents_1_t* list) {
813 if(sSimulationFlags & MDPCOMP_AVOID_CACHE_MDP)
814 return false;
815
816 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
817 mCurrentFrame.reset(numAppLayers);
818 updateLayerCache(ctx, list);
819
820 //If an MDP marked layer is unsupported cannot do partial MDP Comp
821 for(int i = 0; i < numAppLayers; i++) {
822 if(!mCurrentFrame.isFBComposed[i]) {
823 hwc_layer_1_t* layer = &list->hwLayers[i];
824 if(not isSupportedForMDPComp(ctx, layer)) {
825 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",
826 __FUNCTION__);
827 reset(ctx);
828 return false;
829 }
830 }
831 }
832
833 updateYUV(ctx, list, false /*secure only*/);
834 bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
835 if(!ret) {
836 ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
837 reset(ctx);
838 return false;
839 }
840
841 int mdpCount = mCurrentFrame.mdpCount;
842
843 if(sEnable4k2kYUVSplit){
844 adjustForSourceSplit(ctx, list);
845 }
846
847 //Will benefit cases where a video has non-updating background.
848 if((mDpy > HWC_DISPLAY_PRIMARY) and
849 (mdpCount > MAX_SEC_LAYERS)) {
850 ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
851 reset(ctx);
852 return false;
853 }
854
855 if(!postHeuristicsHandling(ctx, list)) {
856 ALOGD_IF(isDebug(), "post heuristic handling failed");
857 reset(ctx);
858 return false;
859 }
860 ALOGD_IF(sSimulationFlags,"%s: CACHE_MDP_COMP SUCCEEDED",
861 __FUNCTION__);
862
863 return true;
864 }
865
loadBasedComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)866 bool MDPComp::loadBasedComp(hwc_context_t *ctx,
867 hwc_display_contents_1_t* list) {
868 if(sSimulationFlags & MDPCOMP_AVOID_LOAD_MDP)
869 return false;
870
871 if(not isLoadBasedCompDoable(ctx)) {
872 return false;
873 }
874
875 const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
876 const int numNonDroppedLayers = numAppLayers - mCurrentFrame.dropCount;
877 const int stagesForMDP = min(sMaxPipesPerMixer,
878 ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT));
879
880 int mdpBatchSize = stagesForMDP - 1; //1 stage for FB
881 int fbBatchSize = numNonDroppedLayers - mdpBatchSize;
882 int lastMDPSupportedIndex = numAppLayers;
883 int dropCount = 0;
884
885 //Find the minimum MDP batch size
886 for(int i = 0; i < numAppLayers;i++) {
887 if(mCurrentFrame.drop[i]) {
888 dropCount++;
889 continue;
890 }
891 hwc_layer_1_t* layer = &list->hwLayers[i];
892 if(not isSupportedForMDPComp(ctx, layer)) {
893 lastMDPSupportedIndex = i;
894 mdpBatchSize = min(i - dropCount, stagesForMDP - 1);
895 fbBatchSize = numNonDroppedLayers - mdpBatchSize;
896 break;
897 }
898 }
899
900 ALOGD_IF(isDebug(), "%s:Before optimizing fbBatch, mdpbatch %d, fbbatch %d "
901 "dropped %d", __FUNCTION__, mdpBatchSize, fbBatchSize,
902 mCurrentFrame.dropCount);
903
904 //Start at a point where the fb batch should at least have 2 layers, for
905 //this mode to be justified.
906 while(fbBatchSize < 2) {
907 ++fbBatchSize;
908 --mdpBatchSize;
909 }
910
911 //If there are no layers for MDP, this mode doesnt make sense.
912 if(mdpBatchSize < 1) {
913 ALOGD_IF(isDebug(), "%s: No MDP layers after optimizing for fbBatch",
914 __FUNCTION__);
915 return false;
916 }
917
918 mCurrentFrame.reset(numAppLayers);
919
920 //Try with successively smaller mdp batch sizes until we succeed or reach 1
921 while(mdpBatchSize > 0) {
922 //Mark layers for MDP comp
923 int mdpBatchLeft = mdpBatchSize;
924 for(int i = 0; i < lastMDPSupportedIndex and mdpBatchLeft; i++) {
925 if(mCurrentFrame.drop[i]) {
926 continue;
927 }
928 mCurrentFrame.isFBComposed[i] = false;
929 --mdpBatchLeft;
930 }
931
932 mCurrentFrame.fbZ = mdpBatchSize;
933 mCurrentFrame.fbCount = fbBatchSize;
934 mCurrentFrame.mdpCount = mdpBatchSize;
935
936 ALOGD_IF(isDebug(), "%s:Trying with: mdpbatch %d fbbatch %d dropped %d",
937 __FUNCTION__, mdpBatchSize, fbBatchSize,
938 mCurrentFrame.dropCount);
939
940 if(postHeuristicsHandling(ctx, list)) {
941 ALOGD_IF(isDebug(), "%s: Postheuristics handling succeeded",
942 __FUNCTION__);
943 ALOGD_IF(sSimulationFlags,"%s: LOAD_MDP_COMP SUCCEEDED",
944 __FUNCTION__);
945 return true;
946 }
947
948 reset(ctx);
949 --mdpBatchSize;
950 ++fbBatchSize;
951 }
952
953 return false;
954 }
955
isLoadBasedCompDoable(hwc_context_t * ctx)956 bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx) {
957 if(mDpy or isSecurePresent(ctx, mDpy) or
958 isYuvPresent(ctx, mDpy)) {
959 return false;
960 }
961 return true;
962 }
963
canPartialUpdate(hwc_context_t * ctx,hwc_display_contents_1_t * list)964 bool MDPComp::canPartialUpdate(hwc_context_t *ctx,
965 hwc_display_contents_1_t* list){
966 if(!qdutils::MDPVersion::getInstance().isPartialUpdateEnabled() ||
967 isSkipPresent(ctx, mDpy) || (list->flags & HWC_GEOMETRY_CHANGED) ||
968 mDpy ) {
969 return false;
970 }
971 return true;
972 }
973
tryVideoOnly(hwc_context_t * ctx,hwc_display_contents_1_t * list)974 bool MDPComp::tryVideoOnly(hwc_context_t *ctx,
975 hwc_display_contents_1_t* list) {
976 const bool secureOnly = true;
977 return videoOnlyComp(ctx, list, not secureOnly) or
978 videoOnlyComp(ctx, list, secureOnly);
979 }
980
videoOnlyComp(hwc_context_t * ctx,hwc_display_contents_1_t * list,bool secureOnly)981 bool MDPComp::videoOnlyComp(hwc_context_t *ctx,
982 hwc_display_contents_1_t* list, bool secureOnly) {
983 if(sSimulationFlags & MDPCOMP_AVOID_VIDEO_ONLY)
984 return false;
985 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
986
987 mCurrentFrame.reset(numAppLayers);
988 mCurrentFrame.fbCount -= mCurrentFrame.dropCount;
989 updateYUV(ctx, list, secureOnly);
990 int mdpCount = mCurrentFrame.mdpCount;
991
992 if(!isYuvPresent(ctx, mDpy) or (mdpCount == 0)) {
993 reset(ctx);
994 return false;
995 }
996
997 /* Bail out if we are processing only secured video layers
998 * and we dont have any */
999 if(!isSecurePresent(ctx, mDpy) && secureOnly){
1000 reset(ctx);
1001 return false;
1002 }
1003
1004 if(mCurrentFrame.fbCount)
1005 mCurrentFrame.fbZ = mCurrentFrame.mdpCount;
1006
1007 if(sEnable4k2kYUVSplit){
1008 adjustForSourceSplit(ctx, list);
1009 }
1010
1011 if(!postHeuristicsHandling(ctx, list)) {
1012 ALOGD_IF(isDebug(), "post heuristic handling failed");
1013 reset(ctx);
1014 return false;
1015 }
1016
1017 ALOGD_IF(sSimulationFlags,"%s: VIDEO_ONLY_COMP SUCCEEDED",
1018 __FUNCTION__);
1019 return true;
1020 }
1021
1022 /* Checks for conditions where YUV layers cannot be bypassed */
isYUVDoable(hwc_context_t * ctx,hwc_layer_1_t * layer)1023 bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
1024 if(isSkipLayer(layer)) {
1025 ALOGD_IF(isDebug(), "%s: Video marked SKIP dpy %d", __FUNCTION__, mDpy);
1026 return false;
1027 }
1028
1029 if(layer->transform & HWC_TRANSFORM_ROT_90 && !canUseRotator(ctx,mDpy)) {
1030 ALOGD_IF(isDebug(), "%s: no free DMA pipe",__FUNCTION__);
1031 return false;
1032 }
1033
1034 if(isSecuring(ctx, layer)) {
1035 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__);
1036 return false;
1037 }
1038
1039 if(!isValidDimension(ctx, layer)) {
1040 ALOGD_IF(isDebug(), "%s: Buffer is of invalid width",
1041 __FUNCTION__);
1042 return false;
1043 }
1044
1045 if(layer->planeAlpha < 0xFF) {
1046 ALOGD_IF(isDebug(), "%s: Cannot handle YUV layer with plane alpha\
1047 in video only mode",
1048 __FUNCTION__);
1049 return false;
1050 }
1051
1052 return true;
1053 }
1054
1055 /* starts at fromIndex and check for each layer to find
1056 * if it it has overlapping with any Updating layer above it in zorder
1057 * till the end of the batch. returns true if it finds any intersection */
canPushBatchToTop(const hwc_display_contents_1_t * list,int fromIndex,int toIndex)1058 bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list,
1059 int fromIndex, int toIndex) {
1060 for(int i = fromIndex; i < toIndex; i++) {
1061 if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
1062 if(intersectingUpdatingLayers(list, i+1, toIndex, i)) {
1063 return false;
1064 }
1065 }
1066 }
1067 return true;
1068 }
1069
1070 /* Checks if given layer at targetLayerIndex has any
1071 * intersection with all the updating layers in beween
1072 * fromIndex and toIndex. Returns true if it finds intersectiion */
intersectingUpdatingLayers(const hwc_display_contents_1_t * list,int fromIndex,int toIndex,int targetLayerIndex)1073 bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
1074 int fromIndex, int toIndex, int targetLayerIndex) {
1075 for(int i = fromIndex; i <= toIndex; i++) {
1076 if(!mCurrentFrame.isFBComposed[i]) {
1077 if(areLayersIntersecting(&list->hwLayers[i],
1078 &list->hwLayers[targetLayerIndex])) {
1079 return true;
1080 }
1081 }
1082 }
1083 return false;
1084 }
1085
getBatch(hwc_display_contents_1_t * list,int & maxBatchStart,int & maxBatchEnd,int & maxBatchCount)1086 int MDPComp::getBatch(hwc_display_contents_1_t* list,
1087 int& maxBatchStart, int& maxBatchEnd,
1088 int& maxBatchCount) {
1089 int i = 0;
1090 int fbZOrder =-1;
1091 int droppedLayerCt = 0;
1092 while (i < mCurrentFrame.layerCount) {
1093 int batchCount = 0;
1094 int batchStart = i;
1095 int batchEnd = i;
1096 /* Adjust batch Z order with the dropped layers so far */
1097 int fbZ = batchStart - droppedLayerCt;
1098 int firstZReverseIndex = -1;
1099 int updatingLayersAbove = 0;//Updating layer count in middle of batch
1100 while(i < mCurrentFrame.layerCount) {
1101 if(!mCurrentFrame.isFBComposed[i]) {
1102 if(!batchCount) {
1103 i++;
1104 break;
1105 }
1106 updatingLayersAbove++;
1107 i++;
1108 continue;
1109 } else {
1110 if(mCurrentFrame.drop[i]) {
1111 i++;
1112 droppedLayerCt++;
1113 continue;
1114 } else if(updatingLayersAbove <= 0) {
1115 batchCount++;
1116 batchEnd = i;
1117 i++;
1118 continue;
1119 } else { //Layer is FBComposed, not a drop & updatingLayer > 0
1120
1121 // We have a valid updating layer already. If layer-i not
1122 // have overlapping with all updating layers in between
1123 // batch-start and i, then we can add layer i to batch.
1124 if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) {
1125 batchCount++;
1126 batchEnd = i;
1127 i++;
1128 continue;
1129 } else if(canPushBatchToTop(list, batchStart, i)) {
1130 //If All the non-updating layers with in this batch
1131 //does not have intersection with the updating layers
1132 //above in z-order, then we can safely move the batch to
1133 //higher z-order. Increment fbZ as it is moving up.
1134 if( firstZReverseIndex < 0) {
1135 firstZReverseIndex = i;
1136 }
1137 batchCount++;
1138 batchEnd = i;
1139 fbZ += updatingLayersAbove;
1140 i++;
1141 updatingLayersAbove = 0;
1142 continue;
1143 } else {
1144 //both failed.start the loop again from here.
1145 if(firstZReverseIndex >= 0) {
1146 i = firstZReverseIndex;
1147 }
1148 break;
1149 }
1150 }
1151 }
1152 }
1153 if(batchCount > maxBatchCount) {
1154 maxBatchCount = batchCount;
1155 maxBatchStart = batchStart;
1156 maxBatchEnd = batchEnd;
1157 fbZOrder = fbZ;
1158 }
1159 }
1160 return fbZOrder;
1161 }
1162
markLayersForCaching(hwc_context_t * ctx,hwc_display_contents_1_t * list)1163 bool MDPComp::markLayersForCaching(hwc_context_t* ctx,
1164 hwc_display_contents_1_t* list) {
1165 /* Idea is to keep as many non-updating(cached) layers in FB and
1166 * send rest of them through MDP. This is done in 2 steps.
1167 * 1. Find the maximum contiguous batch of non-updating layers.
1168 * 2. See if we can improve this batch size for caching by adding
1169 * opaque layers around the batch, if they don't have
1170 * any overlapping with the updating layers in between.
1171 * NEVER mark an updating layer for caching.
1172 * But cached ones can be marked for MDP */
1173
1174 int maxBatchStart = -1;
1175 int maxBatchEnd = -1;
1176 int maxBatchCount = 0;
1177 int fbZ = -1;
1178
1179 /* Nothing is cached. No batching needed */
1180 if(mCurrentFrame.fbCount == 0) {
1181 return true;
1182 }
1183
1184 /* No MDP comp layers, try to use other comp modes */
1185 if(mCurrentFrame.mdpCount == 0) {
1186 return false;
1187 }
1188
1189 fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount);
1190
1191 /* reset rest of the layers lying inside ROI for MDP comp */
1192 for(int i = 0; i < mCurrentFrame.layerCount; i++) {
1193 hwc_layer_1_t* layer = &list->hwLayers[i];
1194 if((i < maxBatchStart || i > maxBatchEnd) &&
1195 mCurrentFrame.isFBComposed[i]){
1196 if(!mCurrentFrame.drop[i]){
1197 //If an unsupported layer is being attempted to
1198 //be pulled out we should fail
1199 if(not isSupportedForMDPComp(ctx, layer)) {
1200 return false;
1201 }
1202 mCurrentFrame.isFBComposed[i] = false;
1203 }
1204 }
1205 }
1206
1207 // update the frame data
1208 mCurrentFrame.fbZ = fbZ;
1209 mCurrentFrame.fbCount = maxBatchCount;
1210 mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1211 mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1212
1213 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
1214 mCurrentFrame.fbCount);
1215
1216 return true;
1217 }
1218
updateLayerCache(hwc_context_t * ctx,hwc_display_contents_1_t * list)1219 void MDPComp::updateLayerCache(hwc_context_t* ctx,
1220 hwc_display_contents_1_t* list) {
1221 int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1222 int fbCount = 0;
1223
1224 for(int i = 0; i < numAppLayers; i++) {
1225 hwc_layer_1_t * layer = &list->hwLayers[i];
1226 if (!layerUpdating(layer)) {
1227 if(!mCurrentFrame.drop[i])
1228 fbCount++;
1229 mCurrentFrame.isFBComposed[i] = true;
1230 } else {
1231 mCurrentFrame.isFBComposed[i] = false;
1232 }
1233 }
1234
1235 mCurrentFrame.fbCount = fbCount;
1236 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount
1237 - mCurrentFrame.dropCount;
1238
1239 ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d drop count: %d"
1240 ,__FUNCTION__, mCurrentFrame.mdpCount, mCurrentFrame.fbCount,
1241 mCurrentFrame.dropCount);
1242 }
1243
updateYUV(hwc_context_t * ctx,hwc_display_contents_1_t * list,bool secureOnly)1244 void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
1245 bool secureOnly) {
1246 int nYuvCount = ctx->listStats[mDpy].yuvCount;
1247 for(int index = 0;index < nYuvCount; index++){
1248 int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index];
1249 hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex];
1250
1251 if(!isYUVDoable(ctx, layer)) {
1252 if(!mCurrentFrame.isFBComposed[nYuvIndex]) {
1253 mCurrentFrame.isFBComposed[nYuvIndex] = true;
1254 mCurrentFrame.fbCount++;
1255 }
1256 } else {
1257 if(mCurrentFrame.isFBComposed[nYuvIndex]) {
1258 private_handle_t *hnd = (private_handle_t *)layer->handle;
1259 if(!secureOnly || isSecureBuffer(hnd)) {
1260 mCurrentFrame.isFBComposed[nYuvIndex] = false;
1261 mCurrentFrame.fbCount--;
1262 }
1263 }
1264 }
1265 }
1266
1267 mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1268 mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1269 ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__,
1270 mCurrentFrame.fbCount);
1271 }
1272
getUpdatingFBRect(hwc_context_t * ctx,hwc_display_contents_1_t * list)1273 hwc_rect_t MDPComp::getUpdatingFBRect(hwc_context_t *ctx,
1274 hwc_display_contents_1_t* list){
1275 hwc_rect_t fbRect = (struct hwc_rect){0, 0, 0, 0};
1276
1277 /* Update only the region of FB needed for composition */
1278 for(int i = 0; i < mCurrentFrame.layerCount; i++ ) {
1279 if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
1280 hwc_layer_1_t* layer = &list->hwLayers[i];
1281 hwc_rect_t dst = layer->displayFrame;
1282 fbRect = getUnion(fbRect, dst);
1283 }
1284 }
1285 trimAgainstROI(ctx, fbRect);
1286 return fbRect;
1287 }
1288
postHeuristicsHandling(hwc_context_t * ctx,hwc_display_contents_1_t * list)1289 bool MDPComp::postHeuristicsHandling(hwc_context_t *ctx,
1290 hwc_display_contents_1_t* list) {
1291
1292 //Capability checks
1293 if(!resourceCheck()) {
1294 ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
1295 return false;
1296 }
1297
1298 //Limitations checks
1299 if(!hwLimitationsCheck(ctx, list)) {
1300 ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__);
1301 return false;
1302 }
1303
1304 //Configure framebuffer first if applicable
1305 if(mCurrentFrame.fbZ >= 0) {
1306 hwc_rect_t fbRect = getUpdatingFBRect(ctx, list);
1307 if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, fbRect, mCurrentFrame.fbZ))
1308 {
1309 ALOGD_IF(isDebug(), "%s configure framebuffer failed",
1310 __FUNCTION__);
1311 return false;
1312 }
1313 }
1314
1315 mCurrentFrame.map();
1316
1317 if(!allocLayerPipes(ctx, list)) {
1318 ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__);
1319 return false;
1320 }
1321
1322 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
1323 index++) {
1324 if(!mCurrentFrame.isFBComposed[index]) {
1325 int mdpIndex = mCurrentFrame.layerToMDP[index];
1326 hwc_layer_1_t* layer = &list->hwLayers[index];
1327
1328 //Leave fbZ for framebuffer. CACHE/GLES layers go here.
1329 if(mdpNextZOrder == mCurrentFrame.fbZ) {
1330 mdpNextZOrder++;
1331 }
1332 MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1333 cur_pipe->zOrder = mdpNextZOrder++;
1334
1335 private_handle_t *hnd = (private_handle_t *)layer->handle;
1336 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
1337 if(configure4k2kYuv(ctx, layer,
1338 mCurrentFrame.mdpToLayer[mdpIndex])
1339 != 0 ){
1340 ALOGD_IF(isDebug(), "%s: Failed to configure split pipes \
1341 for layer %d",__FUNCTION__, index);
1342 return false;
1343 }
1344 else{
1345 mdpNextZOrder++;
1346 }
1347 continue;
1348 }
1349 if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
1350 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
1351 layer %d",__FUNCTION__, index);
1352 return false;
1353 }
1354 }
1355 }
1356
1357 if(!ctx->mOverlay->validateAndSet(mDpy, ctx->dpyAttr[mDpy].fd)) {
1358 ALOGD_IF(isDebug(), "%s: Failed to validate and set overlay for dpy %d"
1359 ,__FUNCTION__, mDpy);
1360 return false;
1361 }
1362
1363 setRedraw(ctx, list);
1364 return true;
1365 }
1366
resourceCheck()1367 bool MDPComp::resourceCheck() {
1368 const bool fbUsed = mCurrentFrame.fbCount;
1369 if(mCurrentFrame.mdpCount > sMaxPipesPerMixer - fbUsed) {
1370 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
1371 return false;
1372 }
1373 return true;
1374 }
1375
hwLimitationsCheck(hwc_context_t * ctx,hwc_display_contents_1_t * list)1376 bool MDPComp::hwLimitationsCheck(hwc_context_t* ctx,
1377 hwc_display_contents_1_t* list) {
1378
1379 //A-family hw limitation:
1380 //If a layer need alpha scaling, MDP can not support.
1381 if(ctx->mMDP.version < qdutils::MDSS_V5) {
1382 for(int i = 0; i < mCurrentFrame.layerCount; ++i) {
1383 if(!mCurrentFrame.isFBComposed[i] &&
1384 isAlphaScaled( &list->hwLayers[i])) {
1385 ALOGD_IF(isDebug(), "%s:frame needs alphaScaling",__FUNCTION__);
1386 return false;
1387 }
1388 }
1389 }
1390
1391 // On 8x26 & 8974 hw, we have a limitation of downscaling+blending.
1392 //If multiple layers requires downscaling and also they are overlapping
1393 //fall back to GPU since MDSS can not handle it.
1394 if(qdutils::MDPVersion::getInstance().is8x74v2() ||
1395 qdutils::MDPVersion::getInstance().is8x26()) {
1396 for(int i = 0; i < mCurrentFrame.layerCount-1; ++i) {
1397 hwc_layer_1_t* botLayer = &list->hwLayers[i];
1398 if(!mCurrentFrame.isFBComposed[i] &&
1399 isDownscaleRequired(botLayer)) {
1400 //if layer-i is marked for MDP and needs downscaling
1401 //check if any MDP layer on top of i & overlaps with layer-i
1402 for(int j = i+1; j < mCurrentFrame.layerCount; ++j) {
1403 hwc_layer_1_t* topLayer = &list->hwLayers[j];
1404 if(!mCurrentFrame.isFBComposed[j] &&
1405 isDownscaleRequired(topLayer)) {
1406 hwc_rect_t r = getIntersection(botLayer->displayFrame,
1407 topLayer->displayFrame);
1408 if(isValidRect(r))
1409 return false;
1410 }
1411 }
1412 }
1413 }
1414 }
1415 return true;
1416 }
1417
prepare(hwc_context_t * ctx,hwc_display_contents_1_t * list)1418 int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1419 int ret = 0;
1420 const int numLayers = ctx->listStats[mDpy].numAppLayers;
1421 char property[PROPERTY_VALUE_MAX];
1422
1423 if(property_get("debug.hwc.simulate", property, NULL) > 0) {
1424 int currentFlags = atoi(property);
1425 if(currentFlags != sSimulationFlags) {
1426 sSimulationFlags = currentFlags;
1427 ALOGE("%s: Simulation Flag read: 0x%x (%d)", __FUNCTION__,
1428 sSimulationFlags, sSimulationFlags);
1429 }
1430 }
1431
1432 //Do not cache the information for next draw cycle.
1433 if(numLayers > MAX_NUM_APP_LAYERS or (!numLayers)) {
1434 ALOGI("%s: Unsupported layer count for mdp composition",
1435 __FUNCTION__);
1436 mCachedFrame.reset();
1437 return -1;
1438 }
1439
1440 //reset old data
1441 mCurrentFrame.reset(numLayers);
1442 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
1443 mCurrentFrame.dropCount = 0;
1444
1445 // Detect the start of animation and fall back to GPU only once to cache
1446 // all the layers in FB and display FB content untill animation completes.
1447 if(ctx->listStats[mDpy].isDisplayAnimating) {
1448 mCurrentFrame.needsRedraw = false;
1449 if(ctx->mAnimationState[mDpy] == ANIMATION_STOPPED) {
1450 mCurrentFrame.needsRedraw = true;
1451 ctx->mAnimationState[mDpy] = ANIMATION_STARTED;
1452 }
1453 setMDPCompLayerFlags(ctx, list);
1454 mCachedFrame.updateCounts(mCurrentFrame);
1455 ret = -1;
1456 return ret;
1457 } else {
1458 ctx->mAnimationState[mDpy] = ANIMATION_STOPPED;
1459 }
1460
1461 //Hard conditions, if not met, cannot do MDP comp
1462 if(isFrameDoable(ctx)) {
1463 generateROI(ctx, list);
1464
1465 if(tryFullFrame(ctx, list) || tryVideoOnly(ctx, list)) {
1466 setMDPCompLayerFlags(ctx, list);
1467 } else {
1468 resetROI(ctx, mDpy);
1469 reset(ctx);
1470 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
1471 mCurrentFrame.dropCount = 0;
1472 ret = -1;
1473 }
1474 } else {
1475 ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame",
1476 __FUNCTION__);
1477 ret = -1;
1478 }
1479
1480 if(isDebug()) {
1481 ALOGD("GEOMETRY change: %d",
1482 (list->flags & HWC_GEOMETRY_CHANGED));
1483 android::String8 sDump("");
1484 dump(sDump, ctx);
1485 ALOGD("%s",sDump.string());
1486 }
1487
1488 mCachedFrame.updateCounts(mCurrentFrame);
1489 return ret;
1490 }
1491
allocSplitVGPipesfor4k2k(hwc_context_t * ctx,int index)1492 bool MDPComp::allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index) {
1493
1494 bool bRet = true;
1495 int mdpIndex = mCurrentFrame.layerToMDP[index];
1496 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
1497 info.pipeInfo = new MdpYUVPipeInfo;
1498 info.rot = NULL;
1499 MdpYUVPipeInfo& pipe_info = *(MdpYUVPipeInfo*)info.pipeInfo;
1500
1501 pipe_info.lIndex = ovutils::OV_INVALID;
1502 pipe_info.rIndex = ovutils::OV_INVALID;
1503
1504 Overlay::PipeSpecs pipeSpecs;
1505 pipeSpecs.formatClass = Overlay::FORMAT_YUV;
1506 pipeSpecs.needsScaling = true;
1507 pipeSpecs.dpy = mDpy;
1508 pipeSpecs.fb = false;
1509
1510 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
1511 if(pipe_info.lIndex == ovutils::OV_INVALID){
1512 bRet = false;
1513 ALOGD_IF(isDebug(),"%s: allocating first VG pipe failed",
1514 __FUNCTION__);
1515 }
1516 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
1517 if(pipe_info.rIndex == ovutils::OV_INVALID){
1518 bRet = false;
1519 ALOGD_IF(isDebug(),"%s: allocating second VG pipe failed",
1520 __FUNCTION__);
1521 }
1522 return bRet;
1523 }
1524 //=============MDPCompNonSplit==================================================
1525
adjustForSourceSplit(hwc_context_t * ctx,hwc_display_contents_1_t * list)1526 void MDPCompNonSplit::adjustForSourceSplit(hwc_context_t *ctx,
1527 hwc_display_contents_1_t* list) {
1528 //If 4k2k Yuv layer split is possible, and if
1529 //fbz is above 4k2k layer, increment fb zorder by 1
1530 //as we split 4k2k layer and increment zorder for right half
1531 //of the layer
1532 if(mCurrentFrame.fbZ >= 0) {
1533 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
1534 index++) {
1535 if(!mCurrentFrame.isFBComposed[index]) {
1536 if(mdpNextZOrder == mCurrentFrame.fbZ) {
1537 mdpNextZOrder++;
1538 }
1539 mdpNextZOrder++;
1540 hwc_layer_1_t* layer = &list->hwLayers[index];
1541 private_handle_t *hnd = (private_handle_t *)layer->handle;
1542 if(is4kx2kYuvBuffer(hnd)) {
1543 if(mdpNextZOrder <= mCurrentFrame.fbZ)
1544 mCurrentFrame.fbZ += 1;
1545 mdpNextZOrder++;
1546 //As we split 4kx2k yuv layer and program to 2 VG pipes
1547 //(if available) increase mdpcount by 1.
1548 mCurrentFrame.mdpCount++;
1549 }
1550 }
1551 }
1552 }
1553 }
1554
1555 /*
1556 * Configures pipe(s) for MDP composition
1557 */
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)1558 int MDPCompNonSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
1559 PipeLayerPair& PipeLayerPair) {
1560 MdpPipeInfoNonSplit& mdp_info =
1561 *(static_cast<MdpPipeInfoNonSplit*>(PipeLayerPair.pipeInfo));
1562 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION;
1563 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1564 eIsFg isFg = IS_FG_OFF;
1565 eDest dest = mdp_info.index;
1566
1567 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d",
1568 __FUNCTION__, layer, zOrder, dest);
1569
1570 return configureNonSplit(ctx, layer, mDpy, mdpFlags, zOrder, isFg, dest,
1571 &PipeLayerPair.rot);
1572 }
1573
allocLayerPipes(hwc_context_t * ctx,hwc_display_contents_1_t * list)1574 bool MDPCompNonSplit::allocLayerPipes(hwc_context_t *ctx,
1575 hwc_display_contents_1_t* list) {
1576 for(int index = 0; index < mCurrentFrame.layerCount; index++) {
1577
1578 if(mCurrentFrame.isFBComposed[index]) continue;
1579
1580 hwc_layer_1_t* layer = &list->hwLayers[index];
1581 private_handle_t *hnd = (private_handle_t *)layer->handle;
1582 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
1583 if(allocSplitVGPipesfor4k2k(ctx, index)){
1584 continue;
1585 }
1586 }
1587
1588 int mdpIndex = mCurrentFrame.layerToMDP[index];
1589 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
1590 info.pipeInfo = new MdpPipeInfoNonSplit;
1591 info.rot = NULL;
1592 MdpPipeInfoNonSplit& pipe_info = *(MdpPipeInfoNonSplit*)info.pipeInfo;
1593
1594 Overlay::PipeSpecs pipeSpecs;
1595 pipeSpecs.formatClass = isYuvBuffer(hnd) ?
1596 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
1597 pipeSpecs.needsScaling = qhwc::needsScaling(layer) or
1598 (qdutils::MDPVersion::getInstance().is8x26() and
1599 ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres > 1024);
1600 pipeSpecs.dpy = mDpy;
1601 pipeSpecs.fb = false;
1602
1603 pipe_info.index = ctx->mOverlay->getPipe(pipeSpecs);
1604
1605 if(pipe_info.index == ovutils::OV_INVALID) {
1606 ALOGD_IF(isDebug(), "%s: Unable to get pipe", __FUNCTION__);
1607 return false;
1608 }
1609 }
1610 return true;
1611 }
1612
configure4k2kYuv(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)1613 int MDPCompNonSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
1614 PipeLayerPair& PipeLayerPair) {
1615 MdpYUVPipeInfo& mdp_info =
1616 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
1617 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1618 eIsFg isFg = IS_FG_OFF;
1619 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
1620 eDest lDest = mdp_info.lIndex;
1621 eDest rDest = mdp_info.rIndex;
1622
1623 return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg,
1624 lDest, rDest, &PipeLayerPair.rot);
1625 }
1626
draw(hwc_context_t * ctx,hwc_display_contents_1_t * list)1627 bool MDPCompNonSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1628
1629 if(!isEnabled()) {
1630 ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__);
1631 return true;
1632 }
1633
1634 if(!ctx || !list) {
1635 ALOGE("%s: invalid contxt or list",__FUNCTION__);
1636 return false;
1637 }
1638
1639 if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) {
1640 ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__);
1641 return true;
1642 }
1643
1644 // Set the Handle timeout to true for MDP or MIXED composition.
1645 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) {
1646 sHandleTimeout = true;
1647 }
1648
1649 overlay::Overlay& ov = *ctx->mOverlay;
1650 LayerProp *layerProp = ctx->layerProp[mDpy];
1651
1652 int numHwLayers = ctx->listStats[mDpy].numAppLayers;
1653 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ )
1654 {
1655 if(mCurrentFrame.isFBComposed[i]) continue;
1656
1657 hwc_layer_1_t *layer = &list->hwLayers[i];
1658 private_handle_t *hnd = (private_handle_t *)layer->handle;
1659 if(!hnd) {
1660 if (!(layer->flags & HWC_COLOR_FILL)) {
1661 ALOGE("%s handle null", __FUNCTION__);
1662 return false;
1663 }
1664 // No PLAY for Color layer
1665 layerProp[i].mFlags &= ~HWC_MDPCOMP;
1666 continue;
1667 }
1668
1669 int mdpIndex = mCurrentFrame.layerToMDP[i];
1670
1671 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit)
1672 {
1673 MdpYUVPipeInfo& pipe_info =
1674 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1675 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1676 ovutils::eDest indexL = pipe_info.lIndex;
1677 ovutils::eDest indexR = pipe_info.rIndex;
1678 int fd = hnd->fd;
1679 uint32_t offset = (uint32_t)hnd->offset;
1680 if(rot) {
1681 rot->queueBuffer(fd, offset);
1682 fd = rot->getDstMemId();
1683 offset = rot->getDstOffset();
1684 }
1685 if(indexL != ovutils::OV_INVALID) {
1686 ovutils::eDest destL = (ovutils::eDest)indexL;
1687 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1688 using pipe: %d", __FUNCTION__, layer, hnd, indexL );
1689 if (!ov.queueBuffer(fd, offset, destL)) {
1690 ALOGE("%s: queueBuffer failed for display:%d",
1691 __FUNCTION__, mDpy);
1692 return false;
1693 }
1694 }
1695
1696 if(indexR != ovutils::OV_INVALID) {
1697 ovutils::eDest destR = (ovutils::eDest)indexR;
1698 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1699 using pipe: %d", __FUNCTION__, layer, hnd, indexR );
1700 if (!ov.queueBuffer(fd, offset, destR)) {
1701 ALOGE("%s: queueBuffer failed for display:%d",
1702 __FUNCTION__, mDpy);
1703 return false;
1704 }
1705 }
1706 }
1707 else{
1708 MdpPipeInfoNonSplit& pipe_info =
1709 *(MdpPipeInfoNonSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1710 ovutils::eDest dest = pipe_info.index;
1711 if(dest == ovutils::OV_INVALID) {
1712 ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest);
1713 return false;
1714 }
1715
1716 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
1717 continue;
1718 }
1719
1720 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1721 using pipe: %d", __FUNCTION__, layer,
1722 hnd, dest );
1723
1724 int fd = hnd->fd;
1725 uint32_t offset = (uint32_t)hnd->offset;
1726
1727 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1728 if(rot) {
1729 if(!rot->queueBuffer(fd, offset))
1730 return false;
1731 fd = rot->getDstMemId();
1732 offset = rot->getDstOffset();
1733 }
1734
1735 if (!ov.queueBuffer(fd, offset, dest)) {
1736 ALOGE("%s: queueBuffer failed for display:%d ",
1737 __FUNCTION__, mDpy);
1738 return false;
1739 }
1740 }
1741
1742 layerProp[i].mFlags &= ~HWC_MDPCOMP;
1743 }
1744 return true;
1745 }
1746
1747 //=============MDPCompSplit===================================================
1748
adjustForSourceSplit(hwc_context_t * ctx,hwc_display_contents_1_t * list)1749 void MDPCompSplit::adjustForSourceSplit(hwc_context_t *ctx,
1750 hwc_display_contents_1_t* list){
1751 //if 4kx2k yuv layer is totally present in either in left half
1752 //or right half then try splitting the yuv layer to avoid decimation
1753 const int lSplit = getLeftSplit(ctx, mDpy);
1754 if(mCurrentFrame.fbZ >= 0) {
1755 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
1756 index++) {
1757 if(!mCurrentFrame.isFBComposed[index]) {
1758 if(mdpNextZOrder == mCurrentFrame.fbZ) {
1759 mdpNextZOrder++;
1760 }
1761 mdpNextZOrder++;
1762 hwc_layer_1_t* layer = &list->hwLayers[index];
1763 private_handle_t *hnd = (private_handle_t *)layer->handle;
1764 if(is4kx2kYuvBuffer(hnd)) {
1765 hwc_rect_t dst = layer->displayFrame;
1766 if((dst.left > lSplit) || (dst.right < lSplit)) {
1767 mCurrentFrame.mdpCount += 1;
1768 }
1769 if(mdpNextZOrder <= mCurrentFrame.fbZ)
1770 mCurrentFrame.fbZ += 1;
1771 mdpNextZOrder++;
1772 }
1773 }
1774 }
1775 }
1776 }
1777
acquireMDPPipes(hwc_context_t * ctx,hwc_layer_1_t * layer,MdpPipeInfoSplit & pipe_info)1778 bool MDPCompSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
1779 MdpPipeInfoSplit& pipe_info) {
1780
1781 const int lSplit = getLeftSplit(ctx, mDpy);
1782 private_handle_t *hnd = (private_handle_t *)layer->handle;
1783 hwc_rect_t dst = layer->displayFrame;
1784 pipe_info.lIndex = ovutils::OV_INVALID;
1785 pipe_info.rIndex = ovutils::OV_INVALID;
1786
1787 Overlay::PipeSpecs pipeSpecs;
1788 pipeSpecs.formatClass = isYuvBuffer(hnd) ?
1789 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
1790 pipeSpecs.needsScaling = qhwc::needsScalingWithSplit(ctx, layer, mDpy);
1791 pipeSpecs.dpy = mDpy;
1792 pipeSpecs.mixer = Overlay::MIXER_LEFT;
1793 pipeSpecs.fb = false;
1794
1795 // Acquire pipe only for the updating half
1796 hwc_rect_t l_roi = ctx->listStats[mDpy].lRoi;
1797 hwc_rect_t r_roi = ctx->listStats[mDpy].rRoi;
1798
1799 if (dst.left < lSplit && isValidRect(getIntersection(dst, l_roi))) {
1800 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
1801 if(pipe_info.lIndex == ovutils::OV_INVALID)
1802 return false;
1803 }
1804
1805 if(dst.right > lSplit && isValidRect(getIntersection(dst, r_roi))) {
1806 pipeSpecs.mixer = Overlay::MIXER_RIGHT;
1807 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
1808 if(pipe_info.rIndex == ovutils::OV_INVALID)
1809 return false;
1810 }
1811
1812 return true;
1813 }
1814
allocLayerPipes(hwc_context_t * ctx,hwc_display_contents_1_t * list)1815 bool MDPCompSplit::allocLayerPipes(hwc_context_t *ctx,
1816 hwc_display_contents_1_t* list) {
1817 for(int index = 0 ; index < mCurrentFrame.layerCount; index++) {
1818
1819 if(mCurrentFrame.isFBComposed[index]) continue;
1820
1821 hwc_layer_1_t* layer = &list->hwLayers[index];
1822 private_handle_t *hnd = (private_handle_t *)layer->handle;
1823 hwc_rect_t dst = layer->displayFrame;
1824 const int lSplit = getLeftSplit(ctx, mDpy);
1825 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
1826 if((dst.left > lSplit)||(dst.right < lSplit)){
1827 if(allocSplitVGPipesfor4k2k(ctx, index)){
1828 continue;
1829 }
1830 }
1831 }
1832 int mdpIndex = mCurrentFrame.layerToMDP[index];
1833 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
1834 info.pipeInfo = new MdpPipeInfoSplit;
1835 info.rot = NULL;
1836 MdpPipeInfoSplit& pipe_info = *(MdpPipeInfoSplit*)info.pipeInfo;
1837
1838 if(!acquireMDPPipes(ctx, layer, pipe_info)) {
1839 ALOGD_IF(isDebug(), "%s: Unable to get pipe for type",
1840 __FUNCTION__);
1841 return false;
1842 }
1843 }
1844 return true;
1845 }
1846
configure4k2kYuv(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)1847 int MDPCompSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
1848 PipeLayerPair& PipeLayerPair) {
1849 const int lSplit = getLeftSplit(ctx, mDpy);
1850 hwc_rect_t dst = layer->displayFrame;
1851 if((dst.left > lSplit)||(dst.right < lSplit)){
1852 MdpYUVPipeInfo& mdp_info =
1853 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
1854 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1855 eIsFg isFg = IS_FG_OFF;
1856 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
1857 eDest lDest = mdp_info.lIndex;
1858 eDest rDest = mdp_info.rIndex;
1859
1860 return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg,
1861 lDest, rDest, &PipeLayerPair.rot);
1862 }
1863 else{
1864 return configure(ctx, layer, PipeLayerPair);
1865 }
1866 }
1867
1868 /*
1869 * Configures pipe(s) for MDP composition
1870 */
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)1871 int MDPCompSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
1872 PipeLayerPair& PipeLayerPair) {
1873 MdpPipeInfoSplit& mdp_info =
1874 *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo));
1875 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1876 eIsFg isFg = IS_FG_OFF;
1877 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
1878 eDest lDest = mdp_info.lIndex;
1879 eDest rDest = mdp_info.rIndex;
1880
1881 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d"
1882 "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest);
1883
1884 return configureSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, lDest,
1885 rDest, &PipeLayerPair.rot);
1886 }
1887
draw(hwc_context_t * ctx,hwc_display_contents_1_t * list)1888 bool MDPCompSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1889
1890 if(!isEnabled()) {
1891 ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__);
1892 return true;
1893 }
1894
1895 if(!ctx || !list) {
1896 ALOGE("%s: invalid contxt or list",__FUNCTION__);
1897 return false;
1898 }
1899
1900 if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) {
1901 ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__);
1902 return true;
1903 }
1904
1905 // Set the Handle timeout to true for MDP or MIXED composition.
1906 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) {
1907 sHandleTimeout = true;
1908 }
1909
1910 overlay::Overlay& ov = *ctx->mOverlay;
1911 LayerProp *layerProp = ctx->layerProp[mDpy];
1912
1913 int numHwLayers = ctx->listStats[mDpy].numAppLayers;
1914 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ )
1915 {
1916 if(mCurrentFrame.isFBComposed[i]) continue;
1917
1918 hwc_layer_1_t *layer = &list->hwLayers[i];
1919 private_handle_t *hnd = (private_handle_t *)layer->handle;
1920 if(!hnd) {
1921 ALOGE("%s handle null", __FUNCTION__);
1922 return false;
1923 }
1924
1925 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
1926 continue;
1927 }
1928
1929 int mdpIndex = mCurrentFrame.layerToMDP[i];
1930
1931 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit)
1932 {
1933 MdpYUVPipeInfo& pipe_info =
1934 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1935 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1936 ovutils::eDest indexL = pipe_info.lIndex;
1937 ovutils::eDest indexR = pipe_info.rIndex;
1938 int fd = hnd->fd;
1939 uint32_t offset = (uint32_t)hnd->offset;
1940 if(rot) {
1941 rot->queueBuffer(fd, offset);
1942 fd = rot->getDstMemId();
1943 offset = rot->getDstOffset();
1944 }
1945 if(indexL != ovutils::OV_INVALID) {
1946 ovutils::eDest destL = (ovutils::eDest)indexL;
1947 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1948 using pipe: %d", __FUNCTION__, layer, hnd, indexL );
1949 if (!ov.queueBuffer(fd, offset, destL)) {
1950 ALOGE("%s: queueBuffer failed for display:%d",
1951 __FUNCTION__, mDpy);
1952 return false;
1953 }
1954 }
1955
1956 if(indexR != ovutils::OV_INVALID) {
1957 ovutils::eDest destR = (ovutils::eDest)indexR;
1958 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1959 using pipe: %d", __FUNCTION__, layer, hnd, indexR );
1960 if (!ov.queueBuffer(fd, offset, destR)) {
1961 ALOGE("%s: queueBuffer failed for display:%d",
1962 __FUNCTION__, mDpy);
1963 return false;
1964 }
1965 }
1966 }
1967 else{
1968 MdpPipeInfoSplit& pipe_info =
1969 *(MdpPipeInfoSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1970 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1971
1972 ovutils::eDest indexL = pipe_info.lIndex;
1973 ovutils::eDest indexR = pipe_info.rIndex;
1974
1975 int fd = hnd->fd;
1976 int offset = (uint32_t)hnd->offset;
1977
1978 if(ctx->mAD->isModeOn()) {
1979 if(ctx->mAD->draw(ctx, fd, offset)) {
1980 fd = ctx->mAD->getDstFd();
1981 offset = ctx->mAD->getDstOffset();
1982 }
1983 }
1984
1985 if(rot) {
1986 rot->queueBuffer(fd, offset);
1987 fd = rot->getDstMemId();
1988 offset = rot->getDstOffset();
1989 }
1990
1991 //************* play left mixer **********
1992 if(indexL != ovutils::OV_INVALID) {
1993 ovutils::eDest destL = (ovutils::eDest)indexL;
1994 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1995 using pipe: %d", __FUNCTION__, layer, hnd, indexL );
1996 if (!ov.queueBuffer(fd, offset, destL)) {
1997 ALOGE("%s: queueBuffer failed for left mixer",
1998 __FUNCTION__);
1999 return false;
2000 }
2001 }
2002
2003 //************* play right mixer **********
2004 if(indexR != ovutils::OV_INVALID) {
2005 ovutils::eDest destR = (ovutils::eDest)indexR;
2006 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2007 using pipe: %d", __FUNCTION__, layer, hnd, indexR );
2008 if (!ov.queueBuffer(fd, offset, destR)) {
2009 ALOGE("%s: queueBuffer failed for right mixer",
2010 __FUNCTION__);
2011 return false;
2012 }
2013 }
2014 }
2015
2016 layerProp[i].mFlags &= ~HWC_MDPCOMP;
2017 }
2018
2019 return true;
2020 }
2021
2022 //================MDPCompSrcSplit==============================================
acquireMDPPipes(hwc_context_t * ctx,hwc_layer_1_t * layer,MdpPipeInfoSplit & pipe_info)2023 bool MDPCompSrcSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
2024 MdpPipeInfoSplit& pipe_info) {
2025 private_handle_t *hnd = (private_handle_t *)layer->handle;
2026 hwc_rect_t dst = layer->displayFrame;
2027 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
2028 pipe_info.lIndex = ovutils::OV_INVALID;
2029 pipe_info.rIndex = ovutils::OV_INVALID;
2030
2031 //If 2 pipes are staged on a single stage of a mixer, then the left pipe
2032 //should have a higher priority than the right one. Pipe priorities are
2033 //starting with VG0, VG1 ... , RGB0 ..., DMA1
2034
2035 Overlay::PipeSpecs pipeSpecs;
2036 pipeSpecs.formatClass = isYuvBuffer(hnd) ?
2037 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
2038 pipeSpecs.needsScaling = qhwc::needsScaling(layer);
2039 pipeSpecs.dpy = mDpy;
2040 pipeSpecs.fb = false;
2041
2042 //1 pipe by default for a layer
2043 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
2044 if(pipe_info.lIndex == ovutils::OV_INVALID) {
2045 return false;
2046 }
2047
2048 /* Use 2 pipes IF
2049 a) Layer's crop width is > 2048 or
2050 b) Layer's dest width > 2048 or
2051 c) On primary, driver has indicated with caps to split always. This is
2052 based on an empirically derived value of panel height. Applied only
2053 if the layer's width is > mixer's width
2054 */
2055
2056 bool primarySplitAlways = (mDpy == HWC_DISPLAY_PRIMARY) and
2057 qdutils::MDPVersion::getInstance().isSrcSplitAlways();
2058 int lSplit = getLeftSplit(ctx, mDpy);
2059 int dstWidth = dst.right - dst.left;
2060 int cropWidth = has90Transform(layer) ? crop.bottom - crop.top :
2061 crop.right - crop.left;
2062
2063 if(dstWidth > qdutils::MAX_DISPLAY_DIM or
2064 cropWidth > qdutils::MAX_DISPLAY_DIM or
2065 (primarySplitAlways and (cropWidth > lSplit))) {
2066 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
2067 if(pipe_info.rIndex == ovutils::OV_INVALID) {
2068 return false;
2069 }
2070
2071 // Return values
2072 // 1 Left pipe is higher priority, do nothing.
2073 // 0 Pipes of same priority.
2074 //-1 Right pipe is of higher priority, needs swap.
2075 if(ctx->mOverlay->comparePipePriority(pipe_info.lIndex,
2076 pipe_info.rIndex) == -1) {
2077 qhwc::swap(pipe_info.lIndex, pipe_info.rIndex);
2078 }
2079 }
2080
2081 return true;
2082 }
2083
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2084 int MDPCompSrcSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
2085 PipeLayerPair& PipeLayerPair) {
2086 private_handle_t *hnd = (private_handle_t *)layer->handle;
2087 if(!hnd) {
2088 ALOGE("%s: layer handle is NULL", __FUNCTION__);
2089 return -1;
2090 }
2091 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
2092 MdpPipeInfoSplit& mdp_info =
2093 *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo));
2094 Rotator **rot = &PipeLayerPair.rot;
2095 eZorder z = static_cast<eZorder>(mdp_info.zOrder);
2096 eIsFg isFg = IS_FG_OFF;
2097 eDest lDest = mdp_info.lIndex;
2098 eDest rDest = mdp_info.rIndex;
2099 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
2100 hwc_rect_t dst = layer->displayFrame;
2101 int transform = layer->transform;
2102 eTransform orient = static_cast<eTransform>(transform);
2103 const int downscale = 0;
2104 int rotFlags = ROT_FLAGS_NONE;
2105 uint32_t format = ovutils::getMdpFormat(hnd->format, isTileRendered(hnd));
2106 Whf whf(getWidth(hnd), getHeight(hnd), format, hnd->size);
2107
2108 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d"
2109 "dest_pipeR: %d",__FUNCTION__, layer, z, lDest, rDest);
2110
2111 // Handle R/B swap
2112 if (layer->flags & HWC_FORMAT_RB_SWAP) {
2113 if (hnd->format == HAL_PIXEL_FORMAT_RGBA_8888)
2114 whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRA_8888);
2115 else if (hnd->format == HAL_PIXEL_FORMAT_RGBX_8888)
2116 whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRX_8888);
2117 }
2118
2119 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION;
2120 setMdpFlags(layer, mdpFlags, 0, transform);
2121
2122 if(lDest != OV_INVALID && rDest != OV_INVALID) {
2123 //Enable overfetch
2124 setMdpFlags(mdpFlags, OV_MDSS_MDP_DUAL_PIPE);
2125 }
2126
2127 if(isYuvBuffer(hnd) && (transform & HWC_TRANSFORM_ROT_90)) {
2128 (*rot) = ctx->mRotMgr->getNext();
2129 if((*rot) == NULL) return -1;
2130 ctx->mLayerRotMap[mDpy]->add(layer, *rot);
2131 //If the video is using a single pipe, enable BWC
2132 if(rDest == OV_INVALID) {
2133 BwcPM::setBwc(crop, dst, transform, mdpFlags);
2134 }
2135 //Configure rotator for pre-rotation
2136 if(configRotator(*rot, whf, crop, mdpFlags, orient, downscale) < 0) {
2137 ALOGE("%s: configRotator failed!", __FUNCTION__);
2138 return -1;
2139 }
2140 whf.format = (*rot)->getDstFormat();
2141 updateSource(orient, whf, crop);
2142 rotFlags |= ROT_PREROTATED;
2143 }
2144
2145 //If 2 pipes being used, divide layer into half, crop and dst
2146 hwc_rect_t cropL = crop;
2147 hwc_rect_t cropR = crop;
2148 hwc_rect_t dstL = dst;
2149 hwc_rect_t dstR = dst;
2150 if(lDest != OV_INVALID && rDest != OV_INVALID) {
2151 cropL.right = (crop.right + crop.left) / 2;
2152 cropR.left = cropL.right;
2153 sanitizeSourceCrop(cropL, cropR, hnd);
2154
2155 //Swap crops on H flip since 2 pipes are being used
2156 if((orient & OVERLAY_TRANSFORM_FLIP_H) && (*rot) == NULL) {
2157 hwc_rect_t tmp = cropL;
2158 cropL = cropR;
2159 cropR = tmp;
2160 }
2161
2162 dstL.right = (dst.right + dst.left) / 2;
2163 dstR.left = dstL.right;
2164 }
2165
2166 //For the mdp, since either we are pre-rotating or MDP does flips
2167 orient = OVERLAY_TRANSFORM_0;
2168 transform = 0;
2169
2170 //configure left pipe
2171 if(lDest != OV_INVALID) {
2172 PipeArgs pargL(mdpFlags, whf, z, isFg,
2173 static_cast<eRotFlags>(rotFlags), layer->planeAlpha,
2174 (ovutils::eBlending) getBlending(layer->blending));
2175
2176 if(configMdp(ctx->mOverlay, pargL, orient,
2177 cropL, dstL, metadata, lDest) < 0) {
2178 ALOGE("%s: commit failed for left mixer config", __FUNCTION__);
2179 return -1;
2180 }
2181 }
2182
2183 //configure right pipe
2184 if(rDest != OV_INVALID) {
2185 PipeArgs pargR(mdpFlags, whf, z, isFg,
2186 static_cast<eRotFlags>(rotFlags),
2187 layer->planeAlpha,
2188 (ovutils::eBlending) getBlending(layer->blending));
2189 if(configMdp(ctx->mOverlay, pargR, orient,
2190 cropR, dstR, metadata, rDest) < 0) {
2191 ALOGE("%s: commit failed for right mixer config", __FUNCTION__);
2192 return -1;
2193 }
2194 }
2195
2196 return 0;
2197 }
2198
2199 }; //namespace
2200
2201