1 /*
2  * Copyright (C) 2012-2016, The Linux Foundation. All rights reserved.
3  * Not a Contribution, Apache license notifications and license are retained
4  * for attribution purposes only.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *      http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include <math.h>
20 #include "hwc_mdpcomp.h"
21 #include <sys/ioctl.h>
22 #include "hdmi.h"
23 #include "qdMetaData.h"
24 #include "mdp_version.h"
25 #include "hwc_fbupdate.h"
26 #include "hwc_ad.h"
27 #include <overlayRotator.h>
28 #include "hwc_copybit.h"
29 #include "qd_utils.h"
30 
31 using namespace overlay;
32 using namespace qdutils;
33 using namespace overlay::utils;
34 namespace ovutils = overlay::utils;
35 
36 namespace qhwc {
37 
38 //==============MDPComp========================================================
39 
40 IdleInvalidator *MDPComp::sIdleInvalidator = NULL;
41 bool MDPComp::sIdleFallBack = false;
42 bool MDPComp::sDebugLogs = false;
43 bool MDPComp::sEnabled = false;
44 bool MDPComp::sEnableMixedMode = true;
45 int MDPComp::sSimulationFlags = 0;
46 int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
47 bool MDPComp::sEnableYUVsplit = false;
48 bool MDPComp::sSrcSplitEnabled = false;
49 bool MDPComp::enablePartialUpdateForMDP3 = false;
50 bool MDPComp::sIsPartialUpdateActive = true;
getObject(hwc_context_t * ctx,const int & dpy)51 MDPComp* MDPComp::getObject(hwc_context_t *ctx, const int& dpy) {
52     if(qdutils::MDPVersion::getInstance().isSrcSplit()) {
53         sSrcSplitEnabled = true;
54         return new MDPCompSrcSplit(dpy);
55     } else if(isDisplaySplit(ctx, dpy)) {
56         return new MDPCompSplit(dpy);
57     }
58     return new MDPCompNonSplit(dpy);
59 }
60 
MDPComp(int dpy)61 MDPComp::MDPComp(int dpy) : mDpy(dpy), mModeOn(false), mPrevModeOn(false) {
62 };
63 
dump(android::String8 & buf,hwc_context_t * ctx)64 void MDPComp::dump(android::String8& buf, hwc_context_t *ctx)
65 {
66     if(mCurrentFrame.layerCount > MAX_NUM_APP_LAYERS)
67         return;
68 
69     dumpsys_log(buf,"HWC Map for Dpy: %s \n",
70                 (mDpy == 0) ? "\"PRIMARY\"" :
71                 (mDpy == 1) ? "\"EXTERNAL\"" : "\"VIRTUAL\"");
72     dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d "
73                 "fbCount:%2d \n", mCurrentFrame.layerCount,
74                 mCurrentFrame.mdpCount, mCurrentFrame.fbCount);
75     dumpsys_log(buf,"needsFBRedraw:%3s  pipesUsed:%2d  MaxPipesPerMixer: %d \n",
76                 (mCurrentFrame.needsRedraw? "YES" : "NO"),
77                 mCurrentFrame.mdpCount, sMaxPipesPerMixer);
78     if(isDisplaySplit(ctx, mDpy)) {
79         dumpsys_log(buf, "Programmed ROI's: Left: [%d, %d, %d, %d] "
80                 "Right: [%d, %d, %d, %d] \n",
81                 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
82                 ctx->listStats[mDpy].lRoi.right,
83                 ctx->listStats[mDpy].lRoi.bottom,
84                 ctx->listStats[mDpy].rRoi.left,ctx->listStats[mDpy].rRoi.top,
85                 ctx->listStats[mDpy].rRoi.right,
86                 ctx->listStats[mDpy].rRoi.bottom);
87     } else {
88         dumpsys_log(buf, "Programmed ROI: [%d, %d, %d, %d] \n",
89                 ctx->listStats[mDpy].lRoi.left,ctx->listStats[mDpy].lRoi.top,
90                 ctx->listStats[mDpy].lRoi.right,
91                 ctx->listStats[mDpy].lRoi.bottom);
92     }
93     dumpsys_log(buf," ---------------------------------------------  \n");
94     dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype  |  Z  \n");
95     dumpsys_log(buf," ---------------------------------------------  \n");
96     for(int index = 0; index < mCurrentFrame.layerCount; index++ )
97         dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n",
98                     index,
99                     (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"),
100                      mCurrentFrame.layerToMDP[index],
101                     (mCurrentFrame.isFBComposed[index] ?
102                     (mCurrentFrame.drop[index] ? "DROP" :
103                     (mCurrentFrame.needsRedraw ? "GLES" : "CACHE")) : "MDP"),
104                     (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ :
105     mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder));
106     dumpsys_log(buf,"\n");
107 }
108 
init(hwc_context_t * ctx)109 bool MDPComp::init(hwc_context_t *ctx) {
110 
111     if(!ctx) {
112         ALOGE("%s: Invalid hwc context!!",__FUNCTION__);
113         return false;
114     }
115 
116     char property[PROPERTY_VALUE_MAX] = {0};
117 
118     sEnabled = false;
119     if((ctx->mMDP.version >= qdutils::MDP_V4_0) &&
120        (property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) &&
121        (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
122         (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
123         sEnabled = true;
124     }
125 
126     sEnableMixedMode = true;
127     if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) &&
128        (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
129         (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
130         sEnableMixedMode = false;
131     }
132 
133     sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
134     if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) {
135         int val = atoi(property);
136         if(val >= 0)
137             sMaxPipesPerMixer = min(val, MAX_PIPES_PER_MIXER);
138     }
139 
140     if(ctx->mMDP.panel != MIPI_CMD_PANEL &&
141             (ctx->mMDP.version >= qdutils::MDP_V4_0)) {
142         sIdleInvalidator = IdleInvalidator::getInstance();
143         if(sIdleInvalidator->init(timeout_handler, ctx) < 0) {
144             delete sIdleInvalidator;
145             sIdleInvalidator = NULL;
146         }
147     }
148 
149     if(!qdutils::MDPVersion::getInstance().isSrcSplit() &&
150         !qdutils::MDPVersion::getInstance().isRotDownscaleEnabled() &&
151             property_get("persist.mdpcomp.4k2kSplit", property, "0") > 0 &&
152             (!strncmp(property, "1", PROPERTY_VALUE_MAX) ||
153             !strncasecmp(property,"true", PROPERTY_VALUE_MAX))) {
154         sEnableYUVsplit = true;
155     }
156 
157     bool defaultPTOR = false;
158     //Enable PTOR when "persist.hwc.ptor.enable" is not defined for
159     //8x16 and 8x39 targets by default
160     if((property_get("persist.hwc.ptor.enable", property, NULL) <= 0) &&
161             (qdutils::MDPVersion::getInstance().is8x16() ||
162                 qdutils::MDPVersion::getInstance().is8x39())) {
163         defaultPTOR = true;
164     }
165 
166     if (defaultPTOR || (!strncasecmp(property, "true", PROPERTY_VALUE_MAX)) ||
167                 (!strncmp(property, "1", PROPERTY_VALUE_MAX ))) {
168         ctx->mCopyBit[HWC_DISPLAY_PRIMARY] = new CopyBit(ctx,
169                                                     HWC_DISPLAY_PRIMARY);
170     }
171 
172     if((property_get("persist.mdp3.partialUpdate", property, NULL) <= 0) &&
173           (ctx->mMDP.version == qdutils::MDP_V3_0_5)) {
174        enablePartialUpdateForMDP3 = true;
175     }
176 
177     if(!enablePartialUpdateForMDP3 &&
178           (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
179            (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
180        enablePartialUpdateForMDP3 = true;
181     }
182 
183     int retPartialUpdatePref = getPartialUpdatePref(ctx);
184     if(retPartialUpdatePref >= 0)
185        sIsPartialUpdateActive = (retPartialUpdatePref != 0);
186 
187     return true;
188 }
189 
reset(hwc_context_t * ctx)190 void MDPComp::reset(hwc_context_t *ctx) {
191     const int numLayers = ctx->listStats[mDpy].numAppLayers;
192     mCurrentFrame.reset(numLayers);
193     ctx->mOverlay->clear(mDpy);
194     ctx->mLayerRotMap[mDpy]->clear();
195     resetROI(ctx, mDpy);
196     memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
197     mCurrentFrame.dropCount = 0;
198 }
199 
reset()200 void MDPComp::reset() {
201     mPrevModeOn = mModeOn;
202     mModeOn = false;
203 }
204 
timeout_handler(void * udata)205 void MDPComp::timeout_handler(void *udata) {
206     struct hwc_context_t* ctx = (struct hwc_context_t*)(udata);
207     bool handleTimeout = false;
208 
209     if(!ctx) {
210         ALOGE("%s: received empty data in timer callback", __FUNCTION__);
211         return;
212     }
213 
214     ctx->mDrawLock.lock();
215 
216     /* Handle timeout event only if the previous composition
217        on any display is MDP or MIXED*/
218     for(int i = 0; i < HWC_NUM_DISPLAY_TYPES; i++) {
219         if(ctx->mMDPComp[i])
220             handleTimeout =
221                     ctx->mMDPComp[i]->isMDPComp() || handleTimeout;
222     }
223 
224     if(!handleTimeout) {
225         ALOGD_IF(isDebug(), "%s:Do not handle this timeout", __FUNCTION__);
226         ctx->mDrawLock.unlock();
227         return;
228     }
229     if(!ctx->proc) {
230         ALOGE("%s: HWC proc not registered", __FUNCTION__);
231         ctx->mDrawLock.unlock();
232         return;
233     }
234     sIdleFallBack = true;
235     ctx->mDrawLock.unlock();
236     /* Trigger SF to redraw the current frame */
237     ctx->proc->invalidate(ctx->proc);
238 }
239 
setIdleTimeout(const uint32_t & timeout)240 void MDPComp::setIdleTimeout(const uint32_t& timeout) {
241     enum { ONE_REFRESH_PERIOD_MS = 17, ONE_BILLION_MS = 1000000000 };
242 
243     if(sIdleInvalidator) {
244         if(timeout <= ONE_REFRESH_PERIOD_MS) {
245             //If the specified timeout is < 1 draw cycle worth, "virtually"
246             //disable idle timeout. The ideal way for clients to disable
247             //timeout is to set it to 0
248             sIdleInvalidator->setIdleTimeout(ONE_BILLION_MS);
249             ALOGI("Disabled idle timeout");
250             return;
251         }
252         sIdleInvalidator->setIdleTimeout(timeout);
253         ALOGI("Idle timeout set to %u", timeout);
254     } else {
255         ALOGW("Cannot set idle timeout, IdleInvalidator not enabled");
256     }
257 }
258 
setMDPCompLayerFlags(hwc_context_t * ctx,hwc_display_contents_1_t * list)259 void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx,
260                                    hwc_display_contents_1_t* list) {
261     LayerProp *layerProp = ctx->layerProp[mDpy];
262 
263     for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) {
264         hwc_layer_1_t* layer = &(list->hwLayers[index]);
265         if(!mCurrentFrame.isFBComposed[index]) {
266             layerProp[index].mFlags |= HWC_MDPCOMP;
267             layer->compositionType = HWC_OVERLAY;
268             layer->hints |= HWC_HINT_CLEAR_FB;
269         } else {
270             /* Drop the layer when its already present in FB OR when it lies
271              * outside frame's ROI */
272             if(!mCurrentFrame.needsRedraw || mCurrentFrame.drop[index]) {
273                 layer->compositionType = HWC_OVERLAY;
274             }
275         }
276     }
277 }
278 
setRedraw(hwc_context_t * ctx,hwc_display_contents_1_t * list)279 void MDPComp::setRedraw(hwc_context_t *ctx,
280         hwc_display_contents_1_t* list) {
281     mCurrentFrame.needsRedraw = false;
282     if(!mCachedFrame.isSameFrame(mCurrentFrame, list) ||
283             (list->flags & HWC_GEOMETRY_CHANGED) ||
284             isSkipPresent(ctx, mDpy)) {
285         mCurrentFrame.needsRedraw = true;
286     }
287 }
288 
FrameInfo()289 MDPComp::FrameInfo::FrameInfo() {
290     memset(&mdpToLayer, 0, sizeof(mdpToLayer));
291     reset(0);
292 }
293 
reset(const int & numLayers)294 void MDPComp::FrameInfo::reset(const int& numLayers) {
295     for(int i = 0; i < MAX_PIPES_PER_MIXER; i++) {
296         if(mdpToLayer[i].pipeInfo) {
297             delete mdpToLayer[i].pipeInfo;
298             mdpToLayer[i].pipeInfo = NULL;
299             //We dont own the rotator
300             mdpToLayer[i].rot = NULL;
301         }
302     }
303 
304     memset(&mdpToLayer, 0, sizeof(mdpToLayer));
305     memset(&layerToMDP, -1, sizeof(layerToMDP));
306     memset(&isFBComposed, 1, sizeof(isFBComposed));
307 
308     layerCount = numLayers;
309     fbCount = numLayers;
310     mdpCount = 0;
311     needsRedraw = true;
312     fbZ = -1;
313 }
314 
map()315 void MDPComp::FrameInfo::map() {
316     // populate layer and MDP maps
317     int mdpIdx = 0;
318     for(int idx = 0; idx < layerCount; idx++) {
319         if(!isFBComposed[idx]) {
320             mdpToLayer[mdpIdx].listIndex = idx;
321             layerToMDP[idx] = mdpIdx++;
322         }
323     }
324 }
325 
LayerCache()326 MDPComp::LayerCache::LayerCache() {
327     reset();
328 }
329 
reset()330 void MDPComp::LayerCache::reset() {
331     memset(&isFBComposed, true, sizeof(isFBComposed));
332     memset(&drop, false, sizeof(drop));
333     layerCount = 0;
334 }
335 
updateCounts(const FrameInfo & curFrame)336 void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) {
337     layerCount = curFrame.layerCount;
338     memcpy(&isFBComposed, &curFrame.isFBComposed, sizeof(isFBComposed));
339     memcpy(&drop, &curFrame.drop, sizeof(drop));
340 }
341 
isSameFrame(const FrameInfo & curFrame,hwc_display_contents_1_t * list)342 bool MDPComp::LayerCache::isSameFrame(const FrameInfo& curFrame,
343                                       hwc_display_contents_1_t* list) {
344     if(layerCount != curFrame.layerCount)
345         return false;
346     for(int i = 0; i < curFrame.layerCount; i++) {
347         if((curFrame.isFBComposed[i] != isFBComposed[i]) ||
348                 (curFrame.drop[i] != drop[i])) {
349             return false;
350         }
351         hwc_layer_1_t const* layer = &list->hwLayers[i];
352         if(curFrame.isFBComposed[i] && layerUpdating(layer)) {
353             return false;
354         }
355     }
356     return true;
357 }
358 
isSameFrame(hwc_context_t * ctx,int dpy,hwc_display_contents_1_t * list)359 bool MDPComp::LayerCache::isSameFrame(hwc_context_t *ctx, int dpy,
360                                       hwc_display_contents_1_t* list) {
361 
362     if(layerCount != ctx->listStats[dpy].numAppLayers)
363         return false;
364 
365     if((list->flags & HWC_GEOMETRY_CHANGED) ||
366        isSkipPresent(ctx, dpy)) {
367         return false;
368     }
369 
370     for(int i = 0; i < layerCount; i++) {
371         hwc_layer_1_t const* layer = &list->hwLayers[i];
372         if(layerUpdating(layer))
373             return false;
374     }
375 
376     return true;
377 }
378 
isSupportedForMDPComp(hwc_context_t * ctx,hwc_layer_1_t * layer)379 bool MDPComp::isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer) {
380     private_handle_t *hnd = (private_handle_t *)layer->handle;
381     if((has90Transform(layer) and (not isRotationDoable(ctx, hnd))) ||
382         (not isValidDimension(ctx,layer)) ||
383         isSkipLayer(layer)) {
384         //More conditions here, sRGB+Blend etc
385         return false;
386     }
387     return true;
388 }
389 
isValidDimension(hwc_context_t * ctx,hwc_layer_1_t * layer)390 bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) {
391     private_handle_t *hnd = (private_handle_t *)layer->handle;
392 
393     if(!hnd) {
394         if (layer->flags & HWC_COLOR_FILL) {
395             // Color layer
396             return true;
397         }
398         ALOGD_IF(isDebug(), "%s: layer handle is NULL", __FUNCTION__);
399         return false;
400     }
401 
402     //XXX: Investigate doing this with pixel phase on MDSS
403     if(!isSecureBuffer(hnd) && isNonIntegralSourceCrop(layer->sourceCropf))
404         return false;
405 
406     hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
407     hwc_rect_t dst = layer->displayFrame;
408     bool rotated90 = (bool)(layer->transform & HAL_TRANSFORM_ROT_90);
409     int crop_w = rotated90 ? crop.bottom - crop.top : crop.right - crop.left;
410     int crop_h = rotated90 ? crop.right - crop.left : crop.bottom - crop.top;
411     int dst_w = dst.right - dst.left;
412     int dst_h = dst.bottom - dst.top;
413     float w_scale = ((float)crop_w / (float)dst_w);
414     float h_scale = ((float)crop_h / (float)dst_h);
415     MDPVersion& mdpHw = MDPVersion::getInstance();
416 
417     /* Workaround for MDP HW limitation in DSI command mode panels where
418      * FPS will not go beyond 30 if buffers on RGB pipes are of width or height
419      * less than 5 pixels
420      * There also is a HW limilation in MDP, minimum block size is 2x2
421      * Fallback to GPU if height is less than 2.
422      */
423     if(mdpHw.hasMinCropWidthLimitation() and (crop_w < 5 or crop_h < 5))
424         return false;
425 
426     if((w_scale > 1.0f) || (h_scale > 1.0f)) {
427         const uint32_t maxMDPDownscale = mdpHw.getMaxMDPDownscale();
428         const float w_dscale = w_scale;
429         const float h_dscale = h_scale;
430 
431         if(ctx->mMDP.version >= qdutils::MDSS_V5) {
432 
433             if(!mdpHw.supportsDecimation()) {
434                 /* On targets that doesnt support Decimation (eg.,8x26)
435                  * maximum downscale support is overlay pipe downscale.
436                  */
437                 if(crop_w > (int) mdpHw.getMaxMixerWidth() ||
438                         w_dscale > maxMDPDownscale ||
439                         h_dscale > maxMDPDownscale)
440                     return false;
441             } else {
442                 // Decimation on macrotile format layers is not supported.
443                 if(isTileRendered(hnd)) {
444                     /* Bail out if
445                      *      1. Src crop > Mixer limit on nonsplit MDPComp
446                      *      2. exceeds maximum downscale limit
447                      */
448                     if(((crop_w > (int) mdpHw.getMaxMixerWidth()) &&
449                                 !sSrcSplitEnabled) ||
450                             w_dscale > maxMDPDownscale ||
451                             h_dscale > maxMDPDownscale) {
452                         return false;
453                     }
454                 } else if(w_dscale > 64 || h_dscale > 64)
455                     return false;
456             }
457         } else { //A-family
458             if(w_dscale > maxMDPDownscale || h_dscale > maxMDPDownscale)
459                 return false;
460         }
461     }
462 
463     if((w_scale < 1.0f) || (h_scale < 1.0f)) {
464         const uint32_t upscale = mdpHw.getMaxMDPUpscale();
465         const float w_uscale = 1.0f / w_scale;
466         const float h_uscale = 1.0f / h_scale;
467 
468         if(w_uscale > upscale || h_uscale > upscale)
469             return false;
470     }
471 
472     return true;
473 }
474 
isFrameDoable(hwc_context_t * ctx)475 bool MDPComp::isFrameDoable(hwc_context_t *ctx) {
476     bool ret = true;
477 
478     if(!isEnabled()) {
479         ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__);
480         ret = false;
481     } else if((qdutils::MDPVersion::getInstance().is8x26() ||
482                qdutils::MDPVersion::getInstance().is8x16() ||
483                qdutils::MDPVersion::getInstance().is8x39()) &&
484             ctx->mVideoTransFlag &&
485             isSecondaryConnected(ctx)) {
486         //1 Padding round to shift pipes across mixers
487         ALOGD_IF(isDebug(),"%s: MDP Comp. video transition padding round",
488                 __FUNCTION__);
489         ret = false;
490     } else if((qdutils::MDPVersion::getInstance().is8x26() ||
491                qdutils::MDPVersion::getInstance().is8x16() ||
492                qdutils::MDPVersion::getInstance().is8x39()) &&
493               !mDpy && isSecondaryAnimating(ctx) &&
494               isYuvPresent(ctx,HWC_DISPLAY_VIRTUAL)) {
495         ALOGD_IF(isDebug(),"%s: Display animation in progress",
496                  __FUNCTION__);
497         ret = false;
498     } else if(qdutils::MDPVersion::getInstance().getTotalPipes() < 8) {
499        /* TODO: freeing up all the resources only for the targets having total
500                 number of pipes < 8. Need to analyze number of VIG pipes used
501                 for primary in previous draw cycle and accordingly decide
502                 whether to fall back to full GPU comp or video only comp
503         */
504         if(isSecondaryConfiguring(ctx)) {
505             ALOGD_IF( isDebug(),"%s: External Display connection is pending",
506                       __FUNCTION__);
507             ret = false;
508         } else if(ctx->isPaddingRound) {
509             ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d",
510                      __FUNCTION__,mDpy);
511             ret = false;
512         }
513     }
514     return ret;
515 }
516 
trimAgainstROI(hwc_context_t * ctx,hwc_rect_t & fbRect)517 void MDPCompNonSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) {
518     hwc_rect_t roi = ctx->listStats[mDpy].lRoi;
519     fbRect = getIntersection(fbRect, roi);
520 }
521 
522 /* 1) Identify layers that are not visible or lying outside the updating ROI and
523  *    drop them from composition.
524  * 2) If we have a scaling layer which needs cropping against generated
525  *    ROI, reset ROI to full resolution. */
validateAndApplyROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)526 bool MDPCompNonSplit::validateAndApplyROI(hwc_context_t *ctx,
527         hwc_display_contents_1_t* list) {
528     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
529     hwc_rect_t visibleRect = ctx->listStats[mDpy].lRoi;
530 
531     for(int i = numAppLayers - 1; i >= 0; i--){
532         if(!isValidRect(visibleRect)) {
533             mCurrentFrame.drop[i] = true;
534             mCurrentFrame.dropCount++;
535             continue;
536         }
537 
538         const hwc_layer_1_t* layer =  &list->hwLayers[i];
539         hwc_rect_t dstRect = layer->displayFrame;
540         hwc_rect_t res  = getIntersection(visibleRect, dstRect);
541 
542         if(!isValidRect(res)) {
543             mCurrentFrame.drop[i] = true;
544             mCurrentFrame.dropCount++;
545         } else {
546             /* Reset frame ROI when any layer which needs scaling also needs ROI
547              * cropping */
548             if(!isSameRect(res, dstRect) && needsScaling (layer)) {
549                 ALOGI("%s: Resetting ROI due to scaling", __FUNCTION__);
550                 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
551                 mCurrentFrame.dropCount = 0;
552                 return false;
553             }
554 
555             /* deduct any opaque region from visibleRect */
556             if (layer->blending == HWC_BLENDING_NONE &&
557                     layer->planeAlpha == 0xFF)
558                 visibleRect = deductRect(visibleRect, res);
559         }
560     }
561     return true;
562 }
563 
564 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which
565  * are updating. If DirtyRegion is applicable, calculate it by accounting all
566  * the changing layer's dirtyRegion. */
generateROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)567 void MDPCompNonSplit::generateROI(hwc_context_t *ctx,
568         hwc_display_contents_1_t* list) {
569     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
570     if(!canPartialUpdate(ctx, list))
571         return;
572 
573     struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0};
574     hwc_rect fullFrame = (struct hwc_rect) {0, 0,(int)ctx->dpyAttr[mDpy].xres,
575         (int)ctx->dpyAttr[mDpy].yres};
576 
577     for(int index = 0; index < numAppLayers; index++ ) {
578         hwc_layer_1_t* layer = &list->hwLayers[index];
579         if (layerUpdating(layer) ||
580                 isYuvBuffer((private_handle_t *)layer->handle)) {
581             hwc_rect_t dirtyRect = (struct hwc_rect){0, 0, 0, 0};;
582             if(!needsScaling(layer) && !layer->transform &&
583                    (!isYuvBuffer((private_handle_t *)layer->handle)))
584             {
585                 dirtyRect = calculateDirtyRect(layer, fullFrame);
586             }
587 
588             roi = getUnion(roi, dirtyRect);
589         }
590     }
591 
592     /* No layer is updating. Still SF wants a refresh.*/
593     if(!isValidRect(roi))
594         return;
595 
596     // Align ROI coordinates to panel restrictions
597     roi = getSanitizeROI(roi, fullFrame);
598 
599     ctx->listStats[mDpy].lRoi = roi;
600     if(!validateAndApplyROI(ctx, list))
601         resetROI(ctx, mDpy);
602 
603     ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d]", __FUNCTION__,
604             ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
605             ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom);
606 }
607 
trimAgainstROI(hwc_context_t * ctx,hwc_rect_t & fbRect)608 void MDPCompSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) {
609     hwc_rect l_roi = ctx->listStats[mDpy].lRoi;
610     hwc_rect r_roi = ctx->listStats[mDpy].rRoi;
611 
612     hwc_rect_t l_fbRect = getIntersection(fbRect, l_roi);
613     hwc_rect_t r_fbRect = getIntersection(fbRect, r_roi);
614     fbRect = getUnion(l_fbRect, r_fbRect);
615 }
616 /* 1) Identify layers that are not visible or lying outside BOTH the updating
617  *    ROI's and drop them from composition. If a layer is spanning across both
618  *    the halves of the screen but needed by only ROI, the non-contributing
619  *    half will not be programmed for MDP.
620  * 2) If we have a scaling layer which needs cropping against generated
621  *    ROI, reset ROI to full resolution. */
validateAndApplyROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)622 bool MDPCompSplit::validateAndApplyROI(hwc_context_t *ctx,
623         hwc_display_contents_1_t* list) {
624 
625     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
626 
627     hwc_rect_t visibleRectL = ctx->listStats[mDpy].lRoi;
628     hwc_rect_t visibleRectR = ctx->listStats[mDpy].rRoi;
629 
630     for(int i = numAppLayers - 1; i >= 0; i--){
631         if(!isValidRect(visibleRectL) && !isValidRect(visibleRectR))
632         {
633             mCurrentFrame.drop[i] = true;
634             mCurrentFrame.dropCount++;
635             continue;
636         }
637 
638         const hwc_layer_1_t* layer =  &list->hwLayers[i];
639         hwc_rect_t dstRect = layer->displayFrame;
640 
641         hwc_rect_t l_res  = getIntersection(visibleRectL, dstRect);
642         hwc_rect_t r_res  = getIntersection(visibleRectR, dstRect);
643         hwc_rect_t res = getUnion(l_res, r_res);
644 
645         if(!isValidRect(l_res) && !isValidRect(r_res)) {
646             mCurrentFrame.drop[i] = true;
647             mCurrentFrame.dropCount++;
648         } else {
649             /* Reset frame ROI when any layer which needs scaling also needs ROI
650              * cropping */
651             if(!isSameRect(res, dstRect) && needsScaling (layer)) {
652                 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
653                 mCurrentFrame.dropCount = 0;
654                 return false;
655             }
656 
657             if (layer->blending == HWC_BLENDING_NONE &&
658                     layer->planeAlpha == 0xFF) {
659                 visibleRectL = deductRect(visibleRectL, l_res);
660                 visibleRectR = deductRect(visibleRectR, r_res);
661             }
662         }
663     }
664     return true;
665 }
666 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which
667  * are updating. If DirtyRegion is applicable, calculate it by accounting all
668  * the changing layer's dirtyRegion. */
generateROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)669 void MDPCompSplit::generateROI(hwc_context_t *ctx,
670         hwc_display_contents_1_t* list) {
671     if(!canPartialUpdate(ctx, list))
672         return;
673 
674     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
675     int lSplit = getLeftSplit(ctx, mDpy);
676 
677     int hw_h = (int)ctx->dpyAttr[mDpy].yres;
678     int hw_w = (int)ctx->dpyAttr[mDpy].xres;
679 
680     struct hwc_rect l_frame = (struct hwc_rect){0, 0, lSplit, hw_h};
681     struct hwc_rect r_frame = (struct hwc_rect){lSplit, 0, hw_w, hw_h};
682 
683     struct hwc_rect l_roi = (struct hwc_rect){0, 0, 0, 0};
684     struct hwc_rect r_roi = (struct hwc_rect){0, 0, 0, 0};
685 
686     for(int index = 0; index < numAppLayers; index++ ) {
687         hwc_layer_1_t* layer = &list->hwLayers[index];
688         private_handle_t *hnd = (private_handle_t *)layer->handle;
689         if (layerUpdating(layer) || isYuvBuffer(hnd)) {
690             hwc_rect_t l_dirtyRect = (struct hwc_rect){0, 0, 0, 0};
691             hwc_rect_t r_dirtyRect = (struct hwc_rect){0, 0, 0, 0};
692             if(!needsScaling(layer) && !layer->transform)
693             {
694                 l_dirtyRect = calculateDirtyRect(layer, l_frame);
695                 r_dirtyRect = calculateDirtyRect(layer, r_frame);
696             }
697             if(isValidRect(l_dirtyRect))
698                 l_roi = getUnion(l_roi, l_dirtyRect);
699 
700             if(isValidRect(r_dirtyRect))
701                 r_roi = getUnion(r_roi, r_dirtyRect);
702         }
703     }
704 
705     /* For panels that cannot accept commands in both the interfaces, we cannot
706      * send two ROI's (for each half). We merge them into single ROI and split
707      * them across lSplit for MDP mixer use. The ROI's will be merged again
708      * finally before udpating the panel in the driver. */
709     if(qdutils::MDPVersion::getInstance().needsROIMerge()) {
710         hwc_rect_t temp_roi = getUnion(l_roi, r_roi);
711         l_roi = getIntersection(temp_roi, l_frame);
712         r_roi = getIntersection(temp_roi, r_frame);
713     }
714 
715     /* No layer is updating. Still SF wants a refresh. */
716     if(!isValidRect(l_roi) && !isValidRect(r_roi))
717         return;
718 
719     l_roi = getSanitizeROI(l_roi, l_frame);
720     r_roi = getSanitizeROI(r_roi, r_frame);
721 
722     ctx->listStats[mDpy].lRoi = l_roi;
723     ctx->listStats[mDpy].rRoi = r_roi;
724 
725     if(!validateAndApplyROI(ctx, list))
726         resetROI(ctx, mDpy);
727 
728     ALOGD_IF(isDebug(),"%s: generated L_ROI: [%d, %d, %d, %d]"
729             "R_ROI: [%d, %d, %d, %d]", __FUNCTION__,
730             ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
731             ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom,
732             ctx->listStats[mDpy].rRoi.left, ctx->listStats[mDpy].rRoi.top,
733             ctx->listStats[mDpy].rRoi.right, ctx->listStats[mDpy].rRoi.bottom);
734 }
735 
736 /* Checks for conditions where all the layers marked for MDP comp cannot be
737  * bypassed. On such conditions we try to bypass atleast YUV layers */
tryFullFrame(hwc_context_t * ctx,hwc_display_contents_1_t * list)738 bool MDPComp::tryFullFrame(hwc_context_t *ctx,
739                                 hwc_display_contents_1_t* list){
740 
741     const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
742     int priDispW = ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres;
743 
744     // Fall back to video only composition, if AIV video mode is enabled
745     if(ctx->listStats[mDpy].mAIVVideoMode) {
746         ALOGD_IF(isDebug(), "%s: AIV Video Mode enabled dpy %d",
747             __FUNCTION__, mDpy);
748         return false;
749     }
750 
751     // No Idle fall back, if secure display or secure RGB layers are present or
752     // if there's only a single layer being composed
753     if(sIdleFallBack && (!ctx->listStats[mDpy].secureUI &&
754                     !ctx->listStats[mDpy].secureRGBCount) &&
755                     (ctx->listStats[mDpy].numAppLayers != 1)) {
756         ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy);
757         return false;
758     }
759 
760     if(!mDpy && isSecondaryAnimating(ctx) &&
761        (isYuvPresent(ctx,HWC_DISPLAY_EXTERNAL) ||
762        isYuvPresent(ctx,HWC_DISPLAY_VIRTUAL)) ) {
763         ALOGD_IF(isDebug(),"%s: Display animation in progress",
764                  __FUNCTION__);
765         return false;
766     }
767 
768     // if secondary is configuring or Padding round, fall back to video only
769     // composition and release all assigned non VIG pipes from primary.
770     if(isSecondaryConfiguring(ctx)) {
771         ALOGD_IF( isDebug(),"%s: External Display connection is pending",
772                   __FUNCTION__);
773         return false;
774     } else if(ctx->isPaddingRound) {
775         ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d",
776                  __FUNCTION__,mDpy);
777         return false;
778     }
779 
780     MDPVersion& mdpHw = MDPVersion::getInstance();
781     if(mDpy > HWC_DISPLAY_PRIMARY &&
782             (priDispW >  (int) mdpHw.getMaxMixerWidth()) &&
783             (ctx->dpyAttr[mDpy].xres <  mdpHw.getMaxMixerWidth())) {
784         // Disable MDP comp on Secondary when the primary is highres panel and
785         // the secondary is a normal 1080p, because, MDP comp on secondary under
786         // in such usecase, decimation gets used for downscale and there will be
787         // a quality mismatch when there will be a fallback to GPU comp
788         ALOGD_IF(isDebug(), "%s: Disable MDP Compositon for Secondary Disp",
789               __FUNCTION__);
790         return false;
791     }
792 
793     // check for action safe flag and MDP scaling mode which requires scaling.
794     if(ctx->dpyAttr[mDpy].mActionSafePresent
795             || ctx->dpyAttr[mDpy].mMDPScalingMode) {
796         ALOGD_IF(isDebug(), "%s: Scaling needed for this frame",__FUNCTION__);
797         return false;
798     }
799 
800     for(int i = 0; i < numAppLayers; ++i) {
801         hwc_layer_1_t* layer = &list->hwLayers[i];
802         private_handle_t *hnd = (private_handle_t *)layer->handle;
803 
804         if(has90Transform(layer) && isRotationDoable(ctx, hnd)) {
805             if(!canUseRotator(ctx, mDpy)) {
806                 ALOGD_IF(isDebug(), "%s: Can't use rotator for dpy %d",
807                         __FUNCTION__, mDpy);
808                 return false;
809             }
810         }
811 
812         //For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp
813         // may not need it if Gfx pre-rotation can handle all flips & rotations
814         int transform = (layer->flags & HWC_COLOR_FILL) ? 0 : layer->transform;
815         if( mdpHw.is8x26() && (ctx->dpyAttr[mDpy].xres > 1024) &&
816                 (transform & HWC_TRANSFORM_FLIP_H) && (!isYuvBuffer(hnd)))
817             return false;
818     }
819 
820     if(ctx->mAD->isDoable()) {
821         return false;
822     }
823 
824     //If all above hard conditions are met we can do full or partial MDP comp.
825     bool ret = false;
826     if(fullMDPComp(ctx, list)) {
827         ret = true;
828     } else if(fullMDPCompWithPTOR(ctx, list)) {
829         ret = true;
830     } else if(partialMDPComp(ctx, list)) {
831         ret = true;
832     }
833 
834     return ret;
835 }
836 
fullMDPComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)837 bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
838 
839     if(sSimulationFlags & MDPCOMP_AVOID_FULL_MDP)
840         return false;
841 
842     //Will benefit presentation / secondary-only layer.
843     if((mDpy > HWC_DISPLAY_PRIMARY) &&
844             (list->numHwLayers - 1) > MAX_SEC_LAYERS) {
845         ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
846         return false;
847     }
848 
849     const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
850     for(int i = 0; i < numAppLayers; i++) {
851         hwc_layer_1_t* layer = &list->hwLayers[i];
852         if(not mCurrentFrame.drop[i] and
853            not isSupportedForMDPComp(ctx, layer)) {
854             ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__);
855             return false;
856         }
857     }
858 
859     if(!mDpy && isSecondaryConnected(ctx) &&
860            (qdutils::MDPVersion::getInstance().is8x16() ||
861             qdutils::MDPVersion::getInstance().is8x26() ||
862             qdutils::MDPVersion::getInstance().is8x39()) &&
863            isYuvPresent(ctx, HWC_DISPLAY_VIRTUAL)) {
864         ALOGD_IF(isDebug(), "%s: YUV layer present on secondary", __FUNCTION__);
865         return false;
866     }
867 
868     mCurrentFrame.fbCount = 0;
869     memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop,
870            sizeof(mCurrentFrame.isFBComposed));
871     mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount -
872         mCurrentFrame.dropCount;
873 
874     if(sEnableYUVsplit){
875         adjustForSourceSplit(ctx, list);
876     }
877 
878     if(!postHeuristicsHandling(ctx, list)) {
879         ALOGD_IF(isDebug(), "post heuristic handling failed");
880         reset(ctx);
881         return false;
882     }
883     ALOGD_IF(sSimulationFlags,"%s: FULL_MDP_COMP SUCCEEDED",
884              __FUNCTION__);
885     return true;
886 }
887 
888 /* Full MDP Composition with Peripheral Tiny Overlap Removal.
889  * MDP bandwidth limitations can be avoided, if the overlap region
890  * covered by the smallest layer at a higher z-order, gets composed
891  * by Copybit on a render buffer, which can be queued to MDP.
892  */
fullMDPCompWithPTOR(hwc_context_t * ctx,hwc_display_contents_1_t * list)893 bool MDPComp::fullMDPCompWithPTOR(hwc_context_t *ctx,
894     hwc_display_contents_1_t* list) {
895 
896     const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
897     const int stagesForMDP = min(sMaxPipesPerMixer,
898             ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT));
899 
900     // Hard checks where we cannot use this mode
901     if (mDpy || !ctx->mCopyBit[mDpy]) {
902         ALOGD_IF(isDebug(), "%s: Feature not supported!", __FUNCTION__);
903         return false;
904     }
905 
906     // Frame level checks
907     if ((numAppLayers > stagesForMDP) || isSkipPresent(ctx, mDpy) ||
908         isYuvPresent(ctx, mDpy) || mCurrentFrame.dropCount ||
909         isSecurePresent(ctx, mDpy)) {
910         ALOGD_IF(isDebug(), "%s: Frame not supported!", __FUNCTION__);
911         return false;
912     }
913     // MDP comp checks
914     for(int i = 0; i < numAppLayers; i++) {
915         hwc_layer_1_t* layer = &list->hwLayers[i];
916         if(not isSupportedForMDPComp(ctx, layer)) {
917             ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__);
918             return false;
919         }
920     }
921 
922     if(!mDpy && isSecondaryConnected(ctx) &&
923            (qdutils::MDPVersion::getInstance().is8x16() ||
924             qdutils::MDPVersion::getInstance().is8x26() ||
925             qdutils::MDPVersion::getInstance().is8x39()) &&
926            isYuvPresent(ctx, HWC_DISPLAY_VIRTUAL)) {
927         ALOGD_IF(isDebug(), "%s: YUV layer present on secondary", __FUNCTION__);
928         return false;
929     }
930 
931     /* We cannot use this composition mode, if:
932      1. A below layer needs scaling.
933      2. Overlap is not peripheral to display.
934      3. Overlap or a below layer has 90 degree transform.
935      4. Overlap area > (1/3 * FrameBuffer) area, based on Perf inputs.
936      */
937 
938     int minLayerIndex[MAX_PTOR_LAYERS] = { -1, -1};
939     hwc_rect_t overlapRect[MAX_PTOR_LAYERS];
940     memset(overlapRect, 0, sizeof(overlapRect));
941     int layerPixelCount, minPixelCount = 0;
942     int numPTORLayersFound = 0;
943     for (int i = numAppLayers-1; (i >= 0 &&
944                                   numPTORLayersFound < MAX_PTOR_LAYERS); i--) {
945         hwc_layer_1_t* layer = &list->hwLayers[i];
946         hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
947         hwc_rect_t dispFrame = layer->displayFrame;
948         layerPixelCount = (crop.right - crop.left) * (crop.bottom - crop.top);
949         // PTOR layer should be peripheral and cannot have transform
950         if (!isPeripheral(dispFrame, ctx->mViewFrame[mDpy]) ||
951                                 has90Transform(layer)) {
952             continue;
953         }
954         if((3 * (layerPixelCount + minPixelCount)) >
955                 ((int)ctx->dpyAttr[mDpy].xres * (int)ctx->dpyAttr[mDpy].yres)) {
956             // Overlap area > (1/3 * FrameBuffer) area, based on Perf inputs.
957             continue;
958         }
959         bool found = false;
960         for (int j = i-1; j >= 0; j--) {
961             // Check if the layers below this layer qualifies for PTOR comp
962             hwc_layer_1_t* layer = &list->hwLayers[j];
963             hwc_rect_t disFrame = layer->displayFrame;
964             // Layer below PTOR is intersecting and has 90 degree transform or
965             // needs scaling cannot be supported.
966             if (isValidRect(getIntersection(dispFrame, disFrame))) {
967                 if (has90Transform(layer) || needsScaling(layer)) {
968                     found = false;
969                     break;
970                 }
971                 found = true;
972             }
973         }
974         // Store the minLayer Index
975         if(found) {
976             minLayerIndex[numPTORLayersFound] = i;
977             overlapRect[numPTORLayersFound] = list->hwLayers[i].displayFrame;
978             minPixelCount += layerPixelCount;
979             numPTORLayersFound++;
980         }
981     }
982 
983     // No overlap layers
984     if (!numPTORLayersFound)
985         return false;
986 
987     // Store the displayFrame and the sourceCrops of the layers
988     hwc_rect_t displayFrame[numAppLayers];
989     hwc_rect_t sourceCrop[numAppLayers];
990     for(int i = 0; i < numAppLayers; i++) {
991         hwc_layer_1_t* layer = &list->hwLayers[i];
992         displayFrame[i] = layer->displayFrame;
993         sourceCrop[i] = integerizeSourceCrop(layer->sourceCropf);
994     }
995 
996     /**
997      * It's possible that 2 PTOR layers might have overlapping.
998      * In such case, remove the intersection(again if peripheral)
999      * from the lower PTOR layer to avoid overlapping.
1000      * If intersection is not on peripheral then compromise
1001      * by reducing number of PTOR layers.
1002      **/
1003     hwc_rect_t commonRect = getIntersection(overlapRect[0], overlapRect[1]);
1004     if(isValidRect(commonRect)) {
1005         overlapRect[1] = deductRect(overlapRect[1], commonRect);
1006         list->hwLayers[minLayerIndex[1]].displayFrame = overlapRect[1];
1007     }
1008 
1009     ctx->mPtorInfo.count = numPTORLayersFound;
1010     for(int i = 0; i < MAX_PTOR_LAYERS; i++) {
1011         ctx->mPtorInfo.layerIndex[i] = minLayerIndex[i];
1012     }
1013 
1014     if (!ctx->mCopyBit[mDpy]->prepareOverlap(ctx, list)) {
1015         // reset PTOR
1016         ctx->mPtorInfo.count = 0;
1017         if(isValidRect(commonRect)) {
1018             // If PTORs are intersecting restore displayframe of PTOR[1]
1019             // before returning, as we have modified it above.
1020             list->hwLayers[minLayerIndex[1]].displayFrame =
1021                     displayFrame[minLayerIndex[1]];
1022         }
1023         return false;
1024     }
1025     private_handle_t *renderBuf = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer();
1026     Whf layerWhf[MAX_PTOR_LAYERS]; // To store w,h,f of PTOR layers
1027 
1028     // Store the blending mode, planeAlpha, and transform of PTOR layers
1029     int32_t blending[numPTORLayersFound];
1030     uint8_t planeAlpha[numPTORLayersFound];
1031     uint32_t transform[numPTORLayersFound];
1032 
1033     for(int j = 0; j < numPTORLayersFound; j++) {
1034         int index =  ctx->mPtorInfo.layerIndex[j];
1035 
1036         // Update src crop of PTOR layer
1037         hwc_layer_1_t* layer = &list->hwLayers[index];
1038         layer->sourceCropf.left = (float)ctx->mPtorInfo.displayFrame[j].left;
1039         layer->sourceCropf.top = (float)ctx->mPtorInfo.displayFrame[j].top;
1040         layer->sourceCropf.right = (float)ctx->mPtorInfo.displayFrame[j].right;
1041         layer->sourceCropf.bottom =(float)ctx->mPtorInfo.displayFrame[j].bottom;
1042 
1043         // Store & update w, h, format of PTOR layer
1044         private_handle_t *hnd = (private_handle_t *)layer->handle;
1045         Whf whf(hnd->width, hnd->height, hnd->format, hnd->size);
1046         layerWhf[j] = whf;
1047         hnd->width = renderBuf->width;
1048         hnd->height = renderBuf->height;
1049         hnd->format = renderBuf->format;
1050 
1051         // Store & update blending mode, planeAlpha and transform of PTOR layer
1052         blending[j] = layer->blending;
1053         planeAlpha[j] = layer->planeAlpha;
1054         transform[j] = layer->transform;
1055         layer->blending = HWC_BLENDING_NONE;
1056         layer->planeAlpha = 0xFF;
1057         layer->transform = 0;
1058 
1059         // Remove overlap from crop & displayFrame of below layers
1060         for (int i = 0; i < index && index !=-1; i++) {
1061             layer = &list->hwLayers[i];
1062             if(!isValidRect(getIntersection(layer->displayFrame,
1063                                             overlapRect[j])))  {
1064                 continue;
1065             }
1066             // Update layer attributes
1067             hwc_rect_t srcCrop = integerizeSourceCrop(layer->sourceCropf);
1068             hwc_rect_t destRect = deductRect(layer->displayFrame,
1069                         getIntersection(layer->displayFrame, overlapRect[j]));
1070             qhwc::calculate_crop_rects(srcCrop, layer->displayFrame, destRect,
1071                                        layer->transform);
1072             layer->sourceCropf.left = (float)srcCrop.left;
1073             layer->sourceCropf.top = (float)srcCrop.top;
1074             layer->sourceCropf.right = (float)srcCrop.right;
1075             layer->sourceCropf.bottom = (float)srcCrop.bottom;
1076         }
1077     }
1078 
1079     mCurrentFrame.mdpCount = numAppLayers;
1080     mCurrentFrame.fbCount = 0;
1081     mCurrentFrame.fbZ = -1;
1082 
1083     for (int j = 0; j < numAppLayers; j++) {
1084         if(isValidRect(list->hwLayers[j].displayFrame)) {
1085             mCurrentFrame.isFBComposed[j] = false;
1086         } else {
1087             mCurrentFrame.mdpCount--;
1088             mCurrentFrame.drop[j] = true;
1089         }
1090     }
1091 
1092     bool result = postHeuristicsHandling(ctx, list);
1093 
1094     // Restore layer attributes
1095     for(int i = 0; i < numAppLayers; i++) {
1096         hwc_layer_1_t* layer = &list->hwLayers[i];
1097         layer->displayFrame = displayFrame[i];
1098         layer->sourceCropf.left = (float)sourceCrop[i].left;
1099         layer->sourceCropf.top = (float)sourceCrop[i].top;
1100         layer->sourceCropf.right = (float)sourceCrop[i].right;
1101         layer->sourceCropf.bottom = (float)sourceCrop[i].bottom;
1102     }
1103 
1104     // Restore w,h,f, blending attributes, and transform of PTOR layers
1105     for (int i = 0; i < numPTORLayersFound; i++) {
1106         int idx = ctx->mPtorInfo.layerIndex[i];
1107         hwc_layer_1_t* layer = &list->hwLayers[idx];
1108         private_handle_t *hnd = (private_handle_t *)list->hwLayers[idx].handle;
1109         hnd->width = layerWhf[i].w;
1110         hnd->height = layerWhf[i].h;
1111         hnd->format = layerWhf[i].format;
1112         layer->blending = blending[i];
1113         layer->planeAlpha = planeAlpha[i];
1114         layer->transform = transform[i];
1115     }
1116 
1117     if (!result) {
1118         // reset PTOR
1119         ctx->mPtorInfo.count = 0;
1120         reset(ctx);
1121     } else {
1122         ALOGD_IF(isDebug(), "%s: PTOR Indexes: %d and %d", __FUNCTION__,
1123                  ctx->mPtorInfo.layerIndex[0],  ctx->mPtorInfo.layerIndex[1]);
1124     }
1125 
1126     ALOGD_IF(isDebug(), "%s: Postheuristics %s!", __FUNCTION__,
1127              (result ? "successful" : "failed"));
1128     return result;
1129 }
1130 
partialMDPComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)1131 bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list)
1132 {
1133     if(!sEnableMixedMode || !isAlphaPresentinFB(ctx, mDpy)) {
1134         //Mixed mode is disabled/can't be used. No need to even try caching.
1135         return false;
1136     }
1137 
1138     bool ret = false;
1139     if(isSkipPresent(ctx, mDpy) or list->flags & HWC_GEOMETRY_CHANGED) {
1140         //Try load based first
1141         ret =   loadBasedComp(ctx, list) or
1142                 cacheBasedComp(ctx, list);
1143     } else {
1144         ret =   cacheBasedComp(ctx, list) or
1145                 loadBasedComp(ctx, list);
1146     }
1147 
1148     return ret;
1149 }
1150 
cacheBasedComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)1151 bool MDPComp::cacheBasedComp(hwc_context_t *ctx,
1152         hwc_display_contents_1_t* list) {
1153     if(sSimulationFlags & MDPCOMP_AVOID_CACHE_MDP)
1154         return false;
1155 
1156     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1157     mCurrentFrame.reset(numAppLayers);
1158     updateLayerCache(ctx, list, mCurrentFrame);
1159 
1160     //If an MDP marked layer is unsupported cannot do partial MDP Comp
1161     for(int i = 0; i < numAppLayers; i++) {
1162         if(!mCurrentFrame.isFBComposed[i]) {
1163             hwc_layer_1_t* layer = &list->hwLayers[i];
1164             if(not isSupportedForMDPComp(ctx, layer)) {
1165                 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",
1166                         __FUNCTION__);
1167                 reset(ctx);
1168                 return false;
1169             }
1170         }
1171     }
1172 
1173     updateYUV(ctx, list, false /*secure only*/, mCurrentFrame);
1174     /* mark secure RGB layers for MDP comp */
1175     updateSecureRGB(ctx, list);
1176     bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
1177     if(!ret) {
1178         ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
1179         reset(ctx);
1180         return false;
1181     }
1182 
1183     int mdpCount = mCurrentFrame.mdpCount;
1184 
1185     if(sEnableYUVsplit){
1186         adjustForSourceSplit(ctx, list);
1187     }
1188 
1189     //Will benefit cases where a video has non-updating background.
1190     if((mDpy > HWC_DISPLAY_PRIMARY) and
1191             (mdpCount > MAX_SEC_LAYERS)) {
1192         ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
1193         reset(ctx);
1194         return false;
1195     }
1196 
1197     if(!postHeuristicsHandling(ctx, list)) {
1198         ALOGD_IF(isDebug(), "post heuristic handling failed");
1199         reset(ctx);
1200         return false;
1201     }
1202     ALOGD_IF(sSimulationFlags,"%s: CACHE_MDP_COMP SUCCEEDED",
1203              __FUNCTION__);
1204 
1205     return true;
1206 }
1207 
loadBasedComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)1208 bool MDPComp::loadBasedComp(hwc_context_t *ctx,
1209         hwc_display_contents_1_t* list) {
1210     if(sSimulationFlags & MDPCOMP_AVOID_LOAD_MDP)
1211         return false;
1212 
1213     if(not isLoadBasedCompDoable(ctx)) {
1214         return false;
1215     }
1216 
1217     const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1218     const int numNonDroppedLayers = numAppLayers - mCurrentFrame.dropCount;
1219     const int stagesForMDP = min(sMaxPipesPerMixer,
1220             ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT));
1221 
1222     int mdpBatchSize = stagesForMDP - 1; //1 stage for FB
1223     int fbBatchSize = numNonDroppedLayers - mdpBatchSize;
1224     int lastMDPSupportedIndex = numAppLayers;
1225     int dropCount = 0;
1226 
1227     //Find the minimum MDP batch size
1228     for(int i = 0; i < numAppLayers;i++) {
1229         if(mCurrentFrame.drop[i]) {
1230             dropCount++;
1231             continue;
1232         }
1233         hwc_layer_1_t* layer = &list->hwLayers[i];
1234         if(not isSupportedForMDPComp(ctx, layer)) {
1235             lastMDPSupportedIndex = i;
1236             mdpBatchSize = min(i - dropCount, stagesForMDP - 1);
1237             fbBatchSize = numNonDroppedLayers - mdpBatchSize;
1238             break;
1239         }
1240     }
1241 
1242     ALOGD_IF(isDebug(), "%s:Before optimizing fbBatch, mdpbatch %d, fbbatch %d "
1243             "dropped %d", __FUNCTION__, mdpBatchSize, fbBatchSize,
1244             mCurrentFrame.dropCount);
1245 
1246     //Start at a point where the fb batch should at least have 2 layers, for
1247     //this mode to be justified.
1248     while(fbBatchSize < 2) {
1249         ++fbBatchSize;
1250         --mdpBatchSize;
1251     }
1252 
1253     //If there are no layers for MDP, this mode doesnt make sense.
1254     if(mdpBatchSize < 1) {
1255         ALOGD_IF(isDebug(), "%s: No MDP layers after optimizing for fbBatch",
1256                 __FUNCTION__);
1257         return false;
1258     }
1259 
1260     mCurrentFrame.reset(numAppLayers);
1261 
1262     //Try with successively smaller mdp batch sizes until we succeed or reach 1
1263     while(mdpBatchSize > 0) {
1264         //Mark layers for MDP comp
1265         int mdpBatchLeft = mdpBatchSize;
1266         for(int i = 0; i < lastMDPSupportedIndex and mdpBatchLeft; i++) {
1267             if(mCurrentFrame.drop[i]) {
1268                 continue;
1269             }
1270             mCurrentFrame.isFBComposed[i] = false;
1271             --mdpBatchLeft;
1272         }
1273 
1274         mCurrentFrame.fbZ = mdpBatchSize;
1275         mCurrentFrame.fbCount = fbBatchSize;
1276         mCurrentFrame.mdpCount = mdpBatchSize;
1277 
1278         ALOGD_IF(isDebug(), "%s:Trying with: mdpbatch %d fbbatch %d dropped %d",
1279                 __FUNCTION__, mdpBatchSize, fbBatchSize,
1280                 mCurrentFrame.dropCount);
1281 
1282         if(postHeuristicsHandling(ctx, list)) {
1283             ALOGD_IF(isDebug(), "%s: Postheuristics handling succeeded",
1284                      __FUNCTION__);
1285             ALOGD_IF(sSimulationFlags,"%s: LOAD_MDP_COMP SUCCEEDED",
1286                      __FUNCTION__);
1287             return true;
1288         }
1289 
1290         reset(ctx);
1291         --mdpBatchSize;
1292         ++fbBatchSize;
1293     }
1294 
1295     return false;
1296 }
1297 
isLoadBasedCompDoable(hwc_context_t * ctx)1298 bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx) {
1299     if(mDpy or isSecurePresent(ctx, mDpy) or
1300             isYuvPresent(ctx, mDpy)) {
1301         return false;
1302     }
1303     return true;
1304 }
1305 
canPartialUpdate(hwc_context_t * ctx,hwc_display_contents_1_t * list)1306 bool MDPComp::canPartialUpdate(hwc_context_t *ctx,
1307         hwc_display_contents_1_t* list){
1308     if(!qdutils::MDPVersion::getInstance().isPartialUpdateEnabled() ||
1309             isSkipPresent(ctx, mDpy) || (list->flags & HWC_GEOMETRY_CHANGED) ||
1310             !sIsPartialUpdateActive || mDpy ) {
1311         return false;
1312     }
1313     if(ctx->listStats[mDpy].secureUI)
1314         return false;
1315     return true;
1316 }
1317 
tryVideoOnly(hwc_context_t * ctx,hwc_display_contents_1_t * list)1318 bool MDPComp::tryVideoOnly(hwc_context_t *ctx,
1319         hwc_display_contents_1_t* list) {
1320     const bool secureOnly = true;
1321     return videoOnlyComp(ctx, list, not secureOnly) or
1322             videoOnlyComp(ctx, list, secureOnly);
1323 }
1324 
videoOnlyComp(hwc_context_t * ctx,hwc_display_contents_1_t * list,bool secureOnly)1325 bool MDPComp::videoOnlyComp(hwc_context_t *ctx,
1326         hwc_display_contents_1_t* list, bool secureOnly) {
1327     if(sSimulationFlags & MDPCOMP_AVOID_VIDEO_ONLY)
1328         return false;
1329     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1330 
1331     mCurrentFrame.reset(numAppLayers);
1332     mCurrentFrame.fbCount -= mCurrentFrame.dropCount;
1333     updateYUV(ctx, list, secureOnly, mCurrentFrame);
1334     int mdpCount = mCurrentFrame.mdpCount;
1335 
1336     if(!isYuvPresent(ctx, mDpy) or (mdpCount == 0)) {
1337         reset(ctx);
1338         return false;
1339     }
1340 
1341     /* Bail out if we are processing only secured video layers
1342      * and we dont have any */
1343     if(!isSecurePresent(ctx, mDpy) && secureOnly){
1344         reset(ctx);
1345         return false;
1346     }
1347 
1348     if(mCurrentFrame.fbCount)
1349         mCurrentFrame.fbZ = mCurrentFrame.mdpCount;
1350 
1351     if(sEnableYUVsplit){
1352         adjustForSourceSplit(ctx, list);
1353     }
1354 
1355     if(!postHeuristicsHandling(ctx, list)) {
1356         ALOGD_IF(isDebug(), "post heuristic handling failed");
1357         reset(ctx);
1358         return false;
1359     }
1360 
1361     ALOGD_IF(sSimulationFlags,"%s: VIDEO_ONLY_COMP SUCCEEDED",
1362              __FUNCTION__);
1363     return true;
1364 }
1365 
1366 /* if tryFullFrame fails, try to push all video and secure RGB layers to MDP */
tryMDPOnlyLayers(hwc_context_t * ctx,hwc_display_contents_1_t * list)1367 bool MDPComp::tryMDPOnlyLayers(hwc_context_t *ctx,
1368         hwc_display_contents_1_t* list) {
1369     // Fall back to video only composition, if AIV video mode is enabled
1370     if(ctx->listStats[mDpy].mAIVVideoMode) {
1371         ALOGD_IF(isDebug(), "%s: AIV Video Mode enabled dpy %d",
1372             __FUNCTION__, mDpy);
1373         return false;
1374     }
1375 
1376     const bool secureOnly = true;
1377     return mdpOnlyLayersComp(ctx, list, not secureOnly) or
1378             mdpOnlyLayersComp(ctx, list, secureOnly);
1379 
1380 }
1381 
mdpOnlyLayersComp(hwc_context_t * ctx,hwc_display_contents_1_t * list,bool secureOnly)1382 bool MDPComp::mdpOnlyLayersComp(hwc_context_t *ctx,
1383         hwc_display_contents_1_t* list, bool secureOnly) {
1384 
1385     if(sSimulationFlags & MDPCOMP_AVOID_MDP_ONLY_LAYERS)
1386         return false;
1387 
1388     /* Bail out if we are processing only secured video layers
1389      * and we dont have any */
1390     if(!isSecurePresent(ctx, mDpy) && secureOnly){
1391         reset(ctx);
1392         return false;
1393     }
1394 
1395     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1396     mCurrentFrame.reset(numAppLayers);
1397     mCurrentFrame.fbCount -= mCurrentFrame.dropCount;
1398 
1399     updateYUV(ctx, list, secureOnly, mCurrentFrame);
1400     /* mark secure RGB layers for MDP comp */
1401     updateSecureRGB(ctx, list);
1402 
1403     if(mCurrentFrame.mdpCount == 0) {
1404         reset(ctx);
1405         return false;
1406     }
1407 
1408     /* find the maximum batch of layers to be marked for framebuffer */
1409     bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
1410     if(!ret) {
1411         ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
1412         reset(ctx);
1413         return false;
1414     }
1415 
1416     if(sEnableYUVsplit){
1417         adjustForSourceSplit(ctx, list);
1418     }
1419 
1420     if(!postHeuristicsHandling(ctx, list)) {
1421         ALOGD_IF(isDebug(), "post heuristic handling failed");
1422         reset(ctx);
1423         return false;
1424     }
1425 
1426     ALOGD_IF(sSimulationFlags,"%s: MDP_ONLY_LAYERS_COMP SUCCEEDED",
1427              __FUNCTION__);
1428     return true;
1429 }
1430 
1431 /* Checks for conditions where YUV layers cannot be bypassed */
isYUVDoable(hwc_context_t * ctx,hwc_layer_1_t * layer)1432 bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
1433     if(isSkipLayer(layer)) {
1434         ALOGD_IF(isDebug(), "%s: Video marked SKIP dpy %d", __FUNCTION__, mDpy);
1435         return false;
1436     }
1437 
1438     if(has90Transform(layer) && !canUseRotator(ctx, mDpy)) {
1439         ALOGD_IF(isDebug(), "%s: no free DMA pipe",__FUNCTION__);
1440         return false;
1441     }
1442 
1443     if(isSecuring(ctx, layer)) {
1444         ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__);
1445         return false;
1446     }
1447 
1448     if(!isValidDimension(ctx, layer)) {
1449         ALOGD_IF(isDebug(), "%s: Buffer is of invalid width",
1450             __FUNCTION__);
1451         return false;
1452     }
1453 
1454     if(layer->planeAlpha < 0xFF) {
1455         ALOGD_IF(isDebug(), "%s: Cannot handle YUV layer with plane alpha\
1456                  in video only mode",
1457                  __FUNCTION__);
1458         return false;
1459     }
1460 
1461     return true;
1462 }
1463 
1464 /* Checks for conditions where Secure RGB layers cannot be bypassed */
isSecureRGBDoable(hwc_context_t * ctx,hwc_layer_1_t * layer)1465 bool MDPComp::isSecureRGBDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
1466     if(isSkipLayer(layer)) {
1467         ALOGD_IF(isDebug(), "%s: Secure RGB layer marked SKIP dpy %d",
1468             __FUNCTION__, mDpy);
1469         return false;
1470     }
1471 
1472     if(isSecuring(ctx, layer)) {
1473         ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__);
1474         return false;
1475     }
1476 
1477     if(not isSupportedForMDPComp(ctx, layer)) {
1478         ALOGD_IF(isDebug(), "%s: Unsupported secure RGB layer",
1479             __FUNCTION__);
1480         return false;
1481     }
1482     return true;
1483 }
1484 
1485 /* starts at fromIndex and check for each layer to find
1486  * if it it has overlapping with any Updating layer above it in zorder
1487  * till the end of the batch. returns true if it finds any intersection */
canPushBatchToTop(const hwc_display_contents_1_t * list,int fromIndex,int toIndex)1488 bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list,
1489         int fromIndex, int toIndex) {
1490     for(int i = fromIndex; i < toIndex; i++) {
1491         if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
1492             if(intersectingUpdatingLayers(list, i+1, toIndex, i)) {
1493                 return false;
1494             }
1495         }
1496     }
1497     return true;
1498 }
1499 
1500 /* Checks if given layer at targetLayerIndex has any
1501  * intersection with all the updating layers in beween
1502  * fromIndex and toIndex. Returns true if it finds intersectiion */
intersectingUpdatingLayers(const hwc_display_contents_1_t * list,int fromIndex,int toIndex,int targetLayerIndex)1503 bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
1504         int fromIndex, int toIndex, int targetLayerIndex) {
1505     for(int i = fromIndex; i <= toIndex; i++) {
1506         if(!mCurrentFrame.isFBComposed[i]) {
1507             if(areLayersIntersecting(&list->hwLayers[i],
1508                         &list->hwLayers[targetLayerIndex]))  {
1509                 return true;
1510             }
1511         }
1512     }
1513     return false;
1514 }
1515 
getBatch(hwc_display_contents_1_t * list,int & maxBatchStart,int & maxBatchEnd,int & maxBatchCount)1516 int MDPComp::getBatch(hwc_display_contents_1_t* list,
1517         int& maxBatchStart, int& maxBatchEnd,
1518         int& maxBatchCount) {
1519     int i = 0;
1520     int fbZOrder =-1;
1521     int droppedLayerCt = 0;
1522     while (i < mCurrentFrame.layerCount) {
1523         int batchCount = 0;
1524         int batchStart = i;
1525         int batchEnd = i;
1526         /* Adjust batch Z order with the dropped layers so far */
1527         int fbZ = batchStart - droppedLayerCt;
1528         int firstZReverseIndex = -1;
1529         int updatingLayersAbove = 0;//Updating layer count in middle of batch
1530         while(i < mCurrentFrame.layerCount) {
1531             if(!mCurrentFrame.isFBComposed[i]) {
1532                 if(!batchCount) {
1533                     i++;
1534                     break;
1535                 }
1536                 updatingLayersAbove++;
1537                 i++;
1538                 continue;
1539             } else {
1540                 if(mCurrentFrame.drop[i]) {
1541                     i++;
1542                     droppedLayerCt++;
1543                     continue;
1544                 } else if(updatingLayersAbove <= 0) {
1545                     batchCount++;
1546                     batchEnd = i;
1547                     i++;
1548                     continue;
1549                 } else { //Layer is FBComposed, not a drop & updatingLayer > 0
1550 
1551                     // We have a valid updating layer already. If layer-i not
1552                     // have overlapping with all updating layers in between
1553                     // batch-start and i, then we can add layer i to batch.
1554                     if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) {
1555                         batchCount++;
1556                         batchEnd = i;
1557                         i++;
1558                         continue;
1559                     } else if(canPushBatchToTop(list, batchStart, i)) {
1560                         //If All the non-updating layers with in this batch
1561                         //does not have intersection with the updating layers
1562                         //above in z-order, then we can safely move the batch to
1563                         //higher z-order. Increment fbZ as it is moving up.
1564                         if( firstZReverseIndex < 0) {
1565                             firstZReverseIndex = i;
1566                         }
1567                         batchCount++;
1568                         batchEnd = i;
1569                         fbZ += updatingLayersAbove;
1570                         i++;
1571                         updatingLayersAbove = 0;
1572                         continue;
1573                     } else {
1574                         //both failed.start the loop again from here.
1575                         if(firstZReverseIndex >= 0) {
1576                             i = firstZReverseIndex;
1577                         }
1578                         break;
1579                     }
1580                 }
1581             }
1582         }
1583         if(batchCount > maxBatchCount) {
1584             maxBatchCount = batchCount;
1585             maxBatchStart = batchStart;
1586             maxBatchEnd = batchEnd;
1587             fbZOrder = fbZ;
1588         }
1589     }
1590     return fbZOrder;
1591 }
1592 
markLayersForCaching(hwc_context_t * ctx,hwc_display_contents_1_t * list)1593 bool  MDPComp::markLayersForCaching(hwc_context_t* ctx,
1594         hwc_display_contents_1_t* list) {
1595     /* Idea is to keep as many non-updating(cached) layers in FB and
1596      * send rest of them through MDP. This is done in 2 steps.
1597      *   1. Find the maximum contiguous batch of non-updating layers.
1598      *   2. See if we can improve this batch size for caching by adding
1599      *      opaque layers around the batch, if they don't have
1600      *      any overlapping with the updating layers in between.
1601      * NEVER mark an updating layer for caching.
1602      * But cached ones can be marked for MDP */
1603 
1604     int maxBatchStart = -1;
1605     int maxBatchEnd = -1;
1606     int maxBatchCount = 0;
1607     int fbZ = -1;
1608 
1609     /* Nothing is cached. No batching needed */
1610     if(mCurrentFrame.fbCount == 0) {
1611         return true;
1612     }
1613 
1614     /* No MDP comp layers, try to use other comp modes */
1615     if(mCurrentFrame.mdpCount == 0) {
1616         return false;
1617     }
1618 
1619     fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount);
1620 
1621     /* reset rest of the layers lying inside ROI for MDP comp */
1622     for(int i = 0; i < mCurrentFrame.layerCount; i++) {
1623         hwc_layer_1_t* layer = &list->hwLayers[i];
1624         if((i < maxBatchStart || i > maxBatchEnd) &&
1625                 mCurrentFrame.isFBComposed[i]){
1626             if(!mCurrentFrame.drop[i]){
1627                 //If an unsupported layer is being attempted to
1628                 //be pulled out we should fail
1629                 if(not isSupportedForMDPComp(ctx, layer)) {
1630                     return false;
1631                 }
1632                 mCurrentFrame.isFBComposed[i] = false;
1633             }
1634         }
1635     }
1636 
1637     // update the frame data
1638     mCurrentFrame.fbZ = fbZ;
1639     mCurrentFrame.fbCount = maxBatchCount;
1640     mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1641             mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1642 
1643     ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
1644             mCurrentFrame.fbCount);
1645 
1646     return true;
1647 }
1648 
updateLayerCache(hwc_context_t * ctx,hwc_display_contents_1_t * list,FrameInfo & frame)1649 void MDPComp::updateLayerCache(hwc_context_t* ctx,
1650         hwc_display_contents_1_t* list, FrameInfo& frame) {
1651     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1652     int fbCount = 0;
1653 
1654     for(int i = 0; i < numAppLayers; i++) {
1655         hwc_layer_1_t * layer = &list->hwLayers[i];
1656         if (!layerUpdating(layer)) {
1657             if(!frame.drop[i])
1658                 fbCount++;
1659             frame.isFBComposed[i] = true;
1660         } else {
1661             frame.isFBComposed[i] = false;
1662         }
1663     }
1664 
1665     frame.fbCount = fbCount;
1666     frame.mdpCount = frame.layerCount - frame.fbCount
1667                                             - frame.dropCount;
1668 
1669     ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d drop count: %d",
1670             __FUNCTION__, frame.mdpCount, frame.fbCount, frame.dropCount);
1671 }
1672 
1673 // drop other non-AIV layers from external display list.
dropNonAIVLayers(hwc_context_t * ctx,hwc_display_contents_1_t * list)1674 void MDPComp::dropNonAIVLayers(hwc_context_t* ctx,
1675                               hwc_display_contents_1_t* list) {
1676     for (size_t i = 0; i < (size_t)ctx->listStats[mDpy].numAppLayers; i++) {
1677         hwc_layer_1_t * layer = &list->hwLayers[i];
1678          if(!(isAIVVideoLayer(layer) || isAIVCCLayer(layer))) {
1679             mCurrentFrame.dropCount++;
1680             mCurrentFrame.drop[i] = true;
1681         }
1682     }
1683     mCurrentFrame.fbCount -= mCurrentFrame.dropCount;
1684     mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1685             mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1686     ALOGD_IF(isDebug(),"%s: fb count: %d mdp count %d drop count %d",
1687         __FUNCTION__, mCurrentFrame.fbCount, mCurrentFrame.mdpCount,
1688         mCurrentFrame.dropCount);
1689 }
1690 
updateYUV(hwc_context_t * ctx,hwc_display_contents_1_t * list,bool secureOnly,FrameInfo & frame)1691 void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
1692         bool secureOnly, FrameInfo& frame) {
1693     int nYuvCount = ctx->listStats[mDpy].yuvCount;
1694     for(int index = 0;index < nYuvCount; index++){
1695         int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index];
1696         hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex];
1697 
1698         if(mCurrentFrame.drop[nYuvIndex]) {
1699             continue;
1700         }
1701 
1702         if(!isYUVDoable(ctx, layer)) {
1703             if(!frame.isFBComposed[nYuvIndex]) {
1704                 frame.isFBComposed[nYuvIndex] = true;
1705                 frame.fbCount++;
1706             }
1707         } else {
1708             if(frame.isFBComposed[nYuvIndex]) {
1709                 private_handle_t *hnd = (private_handle_t *)layer->handle;
1710                 if(!secureOnly || isSecureBuffer(hnd)) {
1711                     frame.isFBComposed[nYuvIndex] = false;
1712                     frame.fbCount--;
1713                 }
1714             }
1715         }
1716     }
1717 
1718     frame.mdpCount = frame.layerCount - frame.fbCount - frame.dropCount;
1719     ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__, frame.fbCount);
1720 }
1721 
updateSecureRGB(hwc_context_t * ctx,hwc_display_contents_1_t * list)1722 void MDPComp::updateSecureRGB(hwc_context_t* ctx,
1723     hwc_display_contents_1_t* list) {
1724     int nSecureRGBCount = ctx->listStats[mDpy].secureRGBCount;
1725     for(int index = 0;index < nSecureRGBCount; index++){
1726         int nSecureRGBIndex = ctx->listStats[mDpy].secureRGBIndices[index];
1727         hwc_layer_1_t* layer = &list->hwLayers[nSecureRGBIndex];
1728 
1729         if(!isSecureRGBDoable(ctx, layer)) {
1730             if(!mCurrentFrame.isFBComposed[nSecureRGBIndex]) {
1731                 mCurrentFrame.isFBComposed[nSecureRGBIndex] = true;
1732                 mCurrentFrame.fbCount++;
1733             }
1734         } else {
1735             if(mCurrentFrame.isFBComposed[nSecureRGBIndex]) {
1736                 mCurrentFrame.isFBComposed[nSecureRGBIndex] = false;
1737                 mCurrentFrame.fbCount--;
1738             }
1739         }
1740     }
1741 
1742     mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1743             mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1744     ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__,
1745              mCurrentFrame.fbCount);
1746 }
1747 
getUpdatingFBRect(hwc_context_t * ctx,hwc_display_contents_1_t * list)1748 hwc_rect_t MDPComp::getUpdatingFBRect(hwc_context_t *ctx,
1749         hwc_display_contents_1_t* list){
1750     hwc_rect_t fbRect = (struct hwc_rect){0, 0, 0, 0};
1751 
1752     /* Update only the region of FB needed for composition */
1753     for(int i = 0; i < mCurrentFrame.layerCount; i++ ) {
1754         if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
1755             hwc_layer_1_t* layer = &list->hwLayers[i];
1756             hwc_rect_t dst = layer->displayFrame;
1757             fbRect = getUnion(fbRect, dst);
1758         }
1759     }
1760     trimAgainstROI(ctx, fbRect);
1761     return fbRect;
1762 }
1763 
postHeuristicsHandling(hwc_context_t * ctx,hwc_display_contents_1_t * list)1764 bool MDPComp::postHeuristicsHandling(hwc_context_t *ctx,
1765         hwc_display_contents_1_t* list) {
1766 
1767     //Capability checks
1768     if(!resourceCheck(ctx, list)) {
1769         ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
1770         return false;
1771     }
1772 
1773     //Limitations checks
1774     if(!hwLimitationsCheck(ctx, list)) {
1775         ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__);
1776         return false;
1777     }
1778 
1779     //Configure framebuffer first if applicable
1780     if(mCurrentFrame.fbZ >= 0) {
1781         hwc_rect_t fbRect = getUpdatingFBRect(ctx, list);
1782         if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, fbRect, mCurrentFrame.fbZ))
1783         {
1784             ALOGD_IF(isDebug(), "%s configure framebuffer failed",
1785                     __FUNCTION__);
1786             return false;
1787         }
1788     }
1789 
1790     mCurrentFrame.map();
1791 
1792     if(!allocLayerPipes(ctx, list)) {
1793         ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__);
1794         return false;
1795     }
1796 
1797     for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
1798             index++) {
1799         if(!mCurrentFrame.isFBComposed[index]) {
1800             int mdpIndex = mCurrentFrame.layerToMDP[index];
1801             hwc_layer_1_t* layer = &list->hwLayers[index];
1802 
1803             //Leave fbZ for framebuffer. CACHE/GLES layers go here.
1804             if(mdpNextZOrder == mCurrentFrame.fbZ) {
1805                 mdpNextZOrder++;
1806             }
1807             MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1808             cur_pipe->zOrder = mdpNextZOrder++;
1809 
1810             private_handle_t *hnd = (private_handle_t *)layer->handle;
1811             if(isYUVSplitNeeded(hnd) && sEnableYUVsplit){
1812                 if(configure4k2kYuv(ctx, layer,
1813                             mCurrentFrame.mdpToLayer[mdpIndex])
1814                         != 0 ){
1815                     ALOGD_IF(isDebug(), "%s: Failed to configure split pipes \
1816                             for layer %d",__FUNCTION__, index);
1817                     return false;
1818                 }
1819                 else{
1820                     mdpNextZOrder++;
1821                 }
1822                 continue;
1823             }
1824             if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
1825                 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
1826                         layer %d",__FUNCTION__, index);
1827                 return false;
1828             }
1829         }
1830     }
1831 
1832     if(!ctx->mOverlay->validateAndSet(mDpy, ctx->dpyAttr[mDpy].fd)) {
1833         ALOGD_IF(isDebug(), "%s: Failed to validate and set overlay for dpy %d"
1834                 ,__FUNCTION__, mDpy);
1835         return false;
1836     }
1837 
1838     setRedraw(ctx, list);
1839     return true;
1840 }
1841 
resourceCheck(hwc_context_t * ctx,hwc_display_contents_1_t * list)1842 bool MDPComp::resourceCheck(hwc_context_t* ctx,
1843         hwc_display_contents_1_t* list) {
1844     const bool fbUsed = mCurrentFrame.fbCount;
1845     if(mCurrentFrame.mdpCount > sMaxPipesPerMixer - fbUsed) {
1846         ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
1847         return false;
1848     }
1849     // Init rotCount to number of rotate sessions used by other displays
1850     int rotCount = ctx->mRotMgr->getNumActiveSessions();
1851     // Count the number of rotator sessions required for current display
1852     for (int index = 0; index < mCurrentFrame.layerCount; index++) {
1853         if(!mCurrentFrame.isFBComposed[index]) {
1854             hwc_layer_1_t* layer = &list->hwLayers[index];
1855             private_handle_t *hnd = (private_handle_t *)layer->handle;
1856             if(has90Transform(layer) && isRotationDoable(ctx, hnd)) {
1857                 rotCount++;
1858             }
1859         }
1860     }
1861     // if number of layers to rotate exceeds max rotator sessions, bail out.
1862     if(rotCount > RotMgr::MAX_ROT_SESS) {
1863         ALOGD_IF(isDebug(), "%s: Exceeds max rotator sessions  %d",
1864                                     __FUNCTION__, mDpy);
1865         return false;
1866     }
1867     return true;
1868 }
1869 
hwLimitationsCheck(hwc_context_t * ctx,hwc_display_contents_1_t * list)1870 bool MDPComp::hwLimitationsCheck(hwc_context_t* ctx,
1871         hwc_display_contents_1_t* list) {
1872 
1873     //A-family hw limitation:
1874     //If a layer need alpha scaling, MDP can not support.
1875     if(ctx->mMDP.version < qdutils::MDSS_V5) {
1876         for(int i = 0; i < mCurrentFrame.layerCount; ++i) {
1877             if(!mCurrentFrame.isFBComposed[i] &&
1878                     isAlphaScaled( &list->hwLayers[i])) {
1879                 ALOGD_IF(isDebug(), "%s:frame needs alphaScaling",__FUNCTION__);
1880                 return false;
1881             }
1882         }
1883     }
1884 
1885     // On 8x26 & 8974 hw, we have a limitation of downscaling+blending.
1886     //If multiple layers requires downscaling and also they are overlapping
1887     //fall back to GPU since MDSS can not handle it.
1888     if(qdutils::MDPVersion::getInstance().is8x74v2() ||
1889             qdutils::MDPVersion::getInstance().is8x26()) {
1890         for(int i = 0; i < mCurrentFrame.layerCount-1; ++i) {
1891             hwc_layer_1_t* botLayer = &list->hwLayers[i];
1892             if(!mCurrentFrame.isFBComposed[i] &&
1893                     isDownscaleRequired(botLayer)) {
1894                 //if layer-i is marked for MDP and needs downscaling
1895                 //check if any MDP layer on top of i & overlaps with layer-i
1896                 for(int j = i+1; j < mCurrentFrame.layerCount; ++j) {
1897                     hwc_layer_1_t* topLayer = &list->hwLayers[j];
1898                     if(!mCurrentFrame.isFBComposed[j] &&
1899                             isDownscaleRequired(topLayer)) {
1900                         hwc_rect_t r = getIntersection(botLayer->displayFrame,
1901                                 topLayer->displayFrame);
1902                         if(isValidRect(r))
1903                             return false;
1904                     }
1905                 }
1906             }
1907         }
1908     }
1909     return true;
1910 }
1911 
setDynRefreshRate(hwc_context_t * ctx,hwc_display_contents_1_t * list)1912 void MDPComp::setDynRefreshRate(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1913     //For primary display, set the dynamic refreshrate
1914     if(!mDpy && qdutils::MDPVersion::getInstance().isDynFpsSupported() &&
1915                                         ctx->mUseMetaDataRefreshRate) {
1916         FrameInfo frame;
1917         frame.reset(mCurrentFrame.layerCount);
1918         memset(&frame.drop, 0, sizeof(frame.drop));
1919         frame.dropCount = 0;
1920         ALOGD_IF(isDebug(), "%s: Update Cache and YUVInfo for Dyn Refresh Rate",
1921                  __FUNCTION__);
1922         updateLayerCache(ctx, list, frame);
1923         updateYUV(ctx, list, false /*secure only*/, frame);
1924         uint32_t refreshRate = ctx->dpyAttr[mDpy].refreshRate;
1925         MDPVersion& mdpHw = MDPVersion::getInstance();
1926         if(sIdleFallBack) {
1927             //Set minimum panel refresh rate during idle timeout
1928             refreshRate = mdpHw.getMinFpsSupported();
1929         } else if((ctx->listStats[mDpy].yuvCount == frame.mdpCount) ||
1930                                 (frame.layerCount == 1)) {
1931             //Set the new fresh rate, if there is only one updating YUV layer
1932             //or there is one single RGB layer with this request
1933             refreshRate = ctx->listStats[mDpy].refreshRateRequest;
1934         }
1935         setRefreshRate(ctx, mDpy, refreshRate);
1936     }
1937 }
1938 
prepare(hwc_context_t * ctx,hwc_display_contents_1_t * list)1939 int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1940     int ret = 0;
1941     char property[PROPERTY_VALUE_MAX];
1942 
1943     if(!ctx || !list) {
1944         ALOGE("%s: Invalid context or list",__FUNCTION__);
1945         mCachedFrame.reset();
1946         return -1;
1947     }
1948 
1949     const int numLayers = ctx->listStats[mDpy].numAppLayers;
1950     if(mDpy == HWC_DISPLAY_PRIMARY) {
1951         sSimulationFlags = 0;
1952         if(property_get("debug.hwc.simulate", property, NULL) > 0) {
1953             int currentFlags = atoi(property);
1954             if(currentFlags != sSimulationFlags) {
1955                 sSimulationFlags = currentFlags;
1956                 ALOGI("%s: Simulation Flag read: 0x%x (%d)", __FUNCTION__,
1957                         sSimulationFlags, sSimulationFlags);
1958             }
1959         }
1960     }
1961     // reset PTOR
1962     if(!mDpy)
1963         memset(&(ctx->mPtorInfo), 0, sizeof(ctx->mPtorInfo));
1964 
1965     //reset old data
1966     mCurrentFrame.reset(numLayers);
1967     memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
1968     mCurrentFrame.dropCount = 0;
1969 
1970     //Do not cache the information for next draw cycle.
1971     if(numLayers > MAX_NUM_APP_LAYERS or (!numLayers)) {
1972         ALOGI("%s: Unsupported layer count for mdp composition",
1973                 __FUNCTION__);
1974         mCachedFrame.reset();
1975 #ifdef DYNAMIC_FPS
1976         // Reset refresh rate
1977         setRefreshRate(ctx, mDpy, ctx->dpyAttr[mDpy].refreshRate);
1978 #endif
1979         return -1;
1980     }
1981 
1982     // Detect the start of animation and fall back to GPU only once to cache
1983     // all the layers in FB and display FB content untill animation completes.
1984     if(ctx->listStats[mDpy].isDisplayAnimating) {
1985         mCurrentFrame.needsRedraw = false;
1986         if(ctx->mAnimationState[mDpy] == ANIMATION_STOPPED) {
1987             mCurrentFrame.needsRedraw = true;
1988             ctx->mAnimationState[mDpy] = ANIMATION_STARTED;
1989         }
1990         setMDPCompLayerFlags(ctx, list);
1991         mCachedFrame.updateCounts(mCurrentFrame);
1992 #ifdef DYNAMIC_FPS
1993         // Reset refresh rate
1994         setRefreshRate(ctx, mDpy, ctx->dpyAttr[mDpy].refreshRate);
1995 #endif
1996         ret = -1;
1997         return ret;
1998     } else {
1999         ctx->mAnimationState[mDpy] = ANIMATION_STOPPED;
2000     }
2001 
2002     if(!mDpy and !isSecondaryConnected(ctx) and !mPrevModeOn and
2003        mCachedFrame.isSameFrame(ctx,mDpy,list)) {
2004 
2005         ALOGD_IF(isDebug(),"%s: Avoid new composition",__FUNCTION__);
2006         mCurrentFrame.needsRedraw = false;
2007         setMDPCompLayerFlags(ctx, list);
2008         mCachedFrame.updateCounts(mCurrentFrame);
2009         return -1;
2010 
2011     }
2012 
2013     //Hard conditions, if not met, cannot do MDP comp
2014     if(isFrameDoable(ctx)) {
2015         generateROI(ctx, list);
2016         // if AIV Video mode is enabled, drop all non AIV layers from the
2017         // external display list.
2018         if(ctx->listStats[mDpy].mAIVVideoMode) {
2019             dropNonAIVLayers(ctx, list);
2020         }
2021 
2022         // if tryFullFrame fails, try to push all video and secure RGB layers
2023         // to MDP for composition.
2024         mModeOn = tryFullFrame(ctx, list) || tryMDPOnlyLayers(ctx, list) ||
2025                   tryVideoOnly(ctx, list);
2026         if(mModeOn) {
2027             setMDPCompLayerFlags(ctx, list);
2028         } else {
2029             resetROI(ctx, mDpy);
2030             reset(ctx);
2031             memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
2032             mCurrentFrame.dropCount = 0;
2033             ret = -1;
2034             ALOGE_IF(sSimulationFlags && (mDpy == HWC_DISPLAY_PRIMARY),
2035                     "MDP Composition Strategies Failed");
2036         }
2037     } else {
2038         if ((ctx->mMDP.version == qdutils::MDP_V3_0_5) && ctx->mCopyBit[mDpy] &&
2039                 enablePartialUpdateForMDP3) {
2040             generateROI(ctx, list);
2041             for(int i = 0; i < ctx->listStats[mDpy].numAppLayers; i++) {
2042                 ctx->copybitDrop[i] = mCurrentFrame.drop[i];
2043             }
2044         }
2045         ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame",
2046                 __FUNCTION__);
2047         ret = -1;
2048     }
2049 
2050     if(isDebug()) {
2051         ALOGD("GEOMETRY change: %d",
2052                 (list->flags & HWC_GEOMETRY_CHANGED));
2053         android::String8 sDump("");
2054         dump(sDump, ctx);
2055         ALOGD("%s",sDump.string());
2056     }
2057 
2058 #ifdef DYNAMIC_FPS
2059     setDynRefreshRate(ctx, list);
2060 #endif
2061 
2062     mCachedFrame.updateCounts(mCurrentFrame);
2063     return ret;
2064 }
2065 
allocSplitVGPipesfor4k2k(hwc_context_t * ctx,int index)2066 bool MDPComp::allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index) {
2067 
2068     bool bRet = true;
2069     int mdpIndex = mCurrentFrame.layerToMDP[index];
2070     PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
2071     info.pipeInfo = new MdpYUVPipeInfo;
2072     info.rot = NULL;
2073     MdpYUVPipeInfo& pipe_info = *(MdpYUVPipeInfo*)info.pipeInfo;
2074 
2075     pipe_info.lIndex = ovutils::OV_INVALID;
2076     pipe_info.rIndex = ovutils::OV_INVALID;
2077 
2078     Overlay::PipeSpecs pipeSpecs;
2079     pipeSpecs.formatClass = Overlay::FORMAT_YUV;
2080     pipeSpecs.needsScaling = true;
2081     pipeSpecs.dpy = mDpy;
2082     pipeSpecs.fb = false;
2083 
2084     pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
2085     if(pipe_info.lIndex == ovutils::OV_INVALID){
2086         bRet = false;
2087         ALOGD_IF(isDebug(),"%s: allocating first VG pipe failed",
2088                 __FUNCTION__);
2089     }
2090     pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
2091     if(pipe_info.rIndex == ovutils::OV_INVALID){
2092         bRet = false;
2093         ALOGD_IF(isDebug(),"%s: allocating second VG pipe failed",
2094                 __FUNCTION__);
2095     }
2096     return bRet;
2097 }
2098 
drawOverlap(hwc_context_t * ctx,hwc_display_contents_1_t * list)2099 int MDPComp::drawOverlap(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
2100     int fd = -1;
2101     if (ctx->mPtorInfo.isActive()) {
2102         fd = ctx->mCopyBit[mDpy]->drawOverlap(ctx, list);
2103         if (fd < 0) {
2104             ALOGD_IF(isDebug(),"%s: failed", __FUNCTION__);
2105         }
2106     }
2107     return fd;
2108 }
2109 //=============MDPCompNonSplit==================================================
2110 
adjustForSourceSplit(hwc_context_t * ctx,hwc_display_contents_1_t * list)2111 void MDPCompNonSplit::adjustForSourceSplit(hwc_context_t *ctx,
2112         hwc_display_contents_1_t* list) {
2113     //If 4k2k Yuv layer split is possible,  and if
2114     //fbz is above 4k2k layer, increment fb zorder by 1
2115     //as we split 4k2k layer and increment zorder for right half
2116     //of the layer
2117     if(!ctx)
2118         return;
2119     if(mCurrentFrame.fbZ >= 0) {
2120         for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
2121                 index++) {
2122             if(!mCurrentFrame.isFBComposed[index]) {
2123                 if(mdpNextZOrder == mCurrentFrame.fbZ) {
2124                     mdpNextZOrder++;
2125                 }
2126                 mdpNextZOrder++;
2127                 hwc_layer_1_t* layer = &list->hwLayers[index];
2128                 private_handle_t *hnd = (private_handle_t *)layer->handle;
2129                 if(isYUVSplitNeeded(hnd)) {
2130                     if(mdpNextZOrder <= mCurrentFrame.fbZ)
2131                         mCurrentFrame.fbZ += 1;
2132                     mdpNextZOrder++;
2133                     //As we split 4kx2k yuv layer and program to 2 VG pipes
2134                     //(if available) increase mdpcount by 1.
2135                     mCurrentFrame.mdpCount++;
2136                 }
2137             }
2138         }
2139     }
2140 }
2141 
2142 /*
2143  * Configures pipe(s) for MDP composition
2144  */
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2145 int MDPCompNonSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
2146                              PipeLayerPair& PipeLayerPair) {
2147     MdpPipeInfoNonSplit& mdp_info =
2148         *(static_cast<MdpPipeInfoNonSplit*>(PipeLayerPair.pipeInfo));
2149     eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION;
2150     eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
2151     eDest dest = mdp_info.index;
2152 
2153     ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d",
2154              __FUNCTION__, layer, zOrder, dest);
2155 
2156     return configureNonSplit(ctx, layer, mDpy, mdpFlags, zOrder, dest,
2157                            &PipeLayerPair.rot);
2158 }
2159 
allocLayerPipes(hwc_context_t * ctx,hwc_display_contents_1_t * list)2160 bool MDPCompNonSplit::allocLayerPipes(hwc_context_t *ctx,
2161         hwc_display_contents_1_t* list) {
2162     for(int index = 0; index < mCurrentFrame.layerCount; index++) {
2163 
2164         if(mCurrentFrame.isFBComposed[index]) continue;
2165 
2166         hwc_layer_1_t* layer = &list->hwLayers[index];
2167         private_handle_t *hnd = (private_handle_t *)layer->handle;
2168         if(isYUVSplitNeeded(hnd) && sEnableYUVsplit){
2169             if(allocSplitVGPipesfor4k2k(ctx, index)){
2170                 continue;
2171             }
2172         }
2173 
2174         int mdpIndex = mCurrentFrame.layerToMDP[index];
2175         PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
2176         info.pipeInfo = new MdpPipeInfoNonSplit;
2177         info.rot = NULL;
2178         MdpPipeInfoNonSplit& pipe_info = *(MdpPipeInfoNonSplit*)info.pipeInfo;
2179 
2180         Overlay::PipeSpecs pipeSpecs;
2181         pipeSpecs.formatClass = isYuvBuffer(hnd) ?
2182                 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
2183         pipeSpecs.needsScaling = qhwc::needsScaling(layer) or
2184                 (qdutils::MDPVersion::getInstance().is8x26() and
2185                 ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres > 1024);
2186         pipeSpecs.dpy = mDpy;
2187         pipeSpecs.fb = false;
2188         pipeSpecs.numActiveDisplays = ctx->numActiveDisplays;
2189 
2190         pipe_info.index = ctx->mOverlay->getPipe(pipeSpecs);
2191 
2192         if(pipe_info.index == ovutils::OV_INVALID) {
2193             ALOGD_IF(isDebug(), "%s: Unable to get pipe", __FUNCTION__);
2194             return false;
2195         }
2196     }
2197     return true;
2198 }
2199 
configure4k2kYuv(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2200 int MDPCompNonSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
2201         PipeLayerPair& PipeLayerPair) {
2202     MdpYUVPipeInfo& mdp_info =
2203             *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
2204     eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
2205     eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
2206     eDest lDest = mdp_info.lIndex;
2207     eDest rDest = mdp_info.rIndex;
2208 
2209     return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder,
2210             lDest, rDest, &PipeLayerPair.rot);
2211 }
2212 
draw(hwc_context_t * ctx,hwc_display_contents_1_t * list)2213 bool MDPCompNonSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
2214 
2215     if(!isEnabled() or !mModeOn) {
2216         ALOGD_IF(isDebug(),"%s: MDP Comp not enabled/configured", __FUNCTION__);
2217         return true;
2218     }
2219 
2220     overlay::Overlay& ov = *ctx->mOverlay;
2221     LayerProp *layerProp = ctx->layerProp[mDpy];
2222 
2223     int numHwLayers = ctx->listStats[mDpy].numAppLayers;
2224     for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ )
2225     {
2226         if(mCurrentFrame.isFBComposed[i]) continue;
2227 
2228         hwc_layer_1_t *layer = &list->hwLayers[i];
2229         private_handle_t *hnd = (private_handle_t *)layer->handle;
2230         if(!hnd) {
2231             if (!(layer->flags & HWC_COLOR_FILL)) {
2232                 ALOGE("%s handle null", __FUNCTION__);
2233                 return false;
2234             }
2235             // No PLAY for Color layer
2236             layerProp[i].mFlags &= ~HWC_MDPCOMP;
2237             continue;
2238         }
2239 
2240         int mdpIndex = mCurrentFrame.layerToMDP[i];
2241 
2242         if(isYUVSplitNeeded(hnd) && sEnableYUVsplit)
2243         {
2244             MdpYUVPipeInfo& pipe_info =
2245                 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
2246             Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
2247             ovutils::eDest indexL = pipe_info.lIndex;
2248             ovutils::eDest indexR = pipe_info.rIndex;
2249             int fd = hnd->fd;
2250             uint32_t offset = (uint32_t)hnd->offset;
2251             if(rot) {
2252                 rot->queueBuffer(fd, offset);
2253                 fd = rot->getDstMemId();
2254                 offset = rot->getDstOffset();
2255             }
2256             if(indexL != ovutils::OV_INVALID) {
2257                 ovutils::eDest destL = (ovutils::eDest)indexL;
2258                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2259                         using  pipe: %d", __FUNCTION__, layer, hnd, indexL );
2260                 if (!ov.queueBuffer(fd, offset, destL)) {
2261                     ALOGE("%s: queueBuffer failed for display:%d",
2262                             __FUNCTION__, mDpy);
2263                     return false;
2264                 }
2265             }
2266 
2267             if(indexR != ovutils::OV_INVALID) {
2268                 ovutils::eDest destR = (ovutils::eDest)indexR;
2269                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2270                         using  pipe: %d", __FUNCTION__, layer, hnd, indexR );
2271                 if (!ov.queueBuffer(fd, offset, destR)) {
2272                     ALOGE("%s: queueBuffer failed for display:%d",
2273                             __FUNCTION__, mDpy);
2274                     return false;
2275                 }
2276             }
2277         }
2278         else{
2279             MdpPipeInfoNonSplit& pipe_info =
2280             *(MdpPipeInfoNonSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
2281             ovutils::eDest dest = pipe_info.index;
2282             if(dest == ovutils::OV_INVALID) {
2283                 ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest);
2284                 return false;
2285             }
2286 
2287             if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
2288                 continue;
2289             }
2290 
2291             int fd = hnd->fd;
2292             uint32_t offset = (uint32_t)hnd->offset;
2293             int index = ctx->mPtorInfo.getPTORArrayIndex(i);
2294             if (!mDpy && (index != -1)) {
2295                 hnd = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer();
2296                 fd = hnd->fd;
2297                 offset = 0;
2298             }
2299 
2300             ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2301                     using  pipe: %d", __FUNCTION__, layer,
2302                     hnd, dest );
2303 
2304             Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
2305             if(rot) {
2306                 if(!rot->queueBuffer(fd, offset))
2307                     return false;
2308                 fd = rot->getDstMemId();
2309                 offset = rot->getDstOffset();
2310             }
2311 
2312             if (!ov.queueBuffer(fd, offset, dest)) {
2313                 ALOGE("%s: queueBuffer failed for display:%d ",
2314                         __FUNCTION__, mDpy);
2315                 return false;
2316             }
2317         }
2318 
2319         layerProp[i].mFlags &= ~HWC_MDPCOMP;
2320     }
2321     return true;
2322 }
2323 
2324 //=============MDPCompSplit===================================================
2325 
adjustForSourceSplit(hwc_context_t * ctx,hwc_display_contents_1_t * list)2326 void MDPCompSplit::adjustForSourceSplit(hwc_context_t *ctx,
2327          hwc_display_contents_1_t* list){
2328     //if 4kx2k yuv layer is totally present in either in left half
2329     //or right half then try splitting the yuv layer to avoid decimation
2330     const int lSplit = getLeftSplit(ctx, mDpy);
2331     if(mCurrentFrame.fbZ >= 0) {
2332         for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
2333                 index++) {
2334             if(!mCurrentFrame.isFBComposed[index]) {
2335                 if(mdpNextZOrder == mCurrentFrame.fbZ) {
2336                     mdpNextZOrder++;
2337                 }
2338                 mdpNextZOrder++;
2339                 hwc_layer_1_t* layer = &list->hwLayers[index];
2340                 private_handle_t *hnd = (private_handle_t *)layer->handle;
2341                 if(isYUVSplitNeeded(hnd)) {
2342                     hwc_rect_t dst = layer->displayFrame;
2343                     if((dst.left > lSplit) || (dst.right < lSplit)) {
2344                         mCurrentFrame.mdpCount += 1;
2345                     }
2346                     if(mdpNextZOrder <= mCurrentFrame.fbZ)
2347                         mCurrentFrame.fbZ += 1;
2348                     mdpNextZOrder++;
2349                 }
2350             }
2351         }
2352     }
2353 }
2354 
acquireMDPPipes(hwc_context_t * ctx,hwc_layer_1_t * layer,MdpPipeInfoSplit & pipe_info)2355 bool MDPCompSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
2356         MdpPipeInfoSplit& pipe_info) {
2357 
2358     const int lSplit = getLeftSplit(ctx, mDpy);
2359     private_handle_t *hnd = (private_handle_t *)layer->handle;
2360     hwc_rect_t dst = layer->displayFrame;
2361     pipe_info.lIndex = ovutils::OV_INVALID;
2362     pipe_info.rIndex = ovutils::OV_INVALID;
2363 
2364     Overlay::PipeSpecs pipeSpecs;
2365     pipeSpecs.formatClass = isYuvBuffer(hnd) ?
2366             Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
2367     pipeSpecs.needsScaling = qhwc::needsScalingWithSplit(ctx, layer, mDpy);
2368     pipeSpecs.dpy = mDpy;
2369     pipeSpecs.mixer = Overlay::MIXER_LEFT;
2370     pipeSpecs.fb = false;
2371 
2372     // Acquire pipe only for the updating half
2373     hwc_rect_t l_roi = ctx->listStats[mDpy].lRoi;
2374     hwc_rect_t r_roi = ctx->listStats[mDpy].rRoi;
2375 
2376     if (dst.left < lSplit && isValidRect(getIntersection(dst, l_roi))) {
2377         pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
2378         if(pipe_info.lIndex == ovutils::OV_INVALID)
2379             return false;
2380     }
2381 
2382     if(dst.right > lSplit && isValidRect(getIntersection(dst, r_roi))) {
2383         pipeSpecs.mixer = Overlay::MIXER_RIGHT;
2384         pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
2385         if(pipe_info.rIndex == ovutils::OV_INVALID)
2386             return false;
2387     }
2388 
2389     return true;
2390 }
2391 
allocLayerPipes(hwc_context_t * ctx,hwc_display_contents_1_t * list)2392 bool MDPCompSplit::allocLayerPipes(hwc_context_t *ctx,
2393         hwc_display_contents_1_t* list) {
2394     for(int index = 0 ; index < mCurrentFrame.layerCount; index++) {
2395 
2396         if(mCurrentFrame.isFBComposed[index]) continue;
2397 
2398         hwc_layer_1_t* layer = &list->hwLayers[index];
2399         private_handle_t *hnd = (private_handle_t *)layer->handle;
2400         hwc_rect_t dst = layer->displayFrame;
2401         const int lSplit = getLeftSplit(ctx, mDpy);
2402         if(isYUVSplitNeeded(hnd) && sEnableYUVsplit){
2403             if((dst.left > lSplit)||(dst.right < lSplit)){
2404                 if(allocSplitVGPipesfor4k2k(ctx, index)){
2405                     continue;
2406                 }
2407             }
2408         }
2409         int mdpIndex = mCurrentFrame.layerToMDP[index];
2410         PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
2411         info.pipeInfo = new MdpPipeInfoSplit;
2412         info.rot = NULL;
2413         MdpPipeInfoSplit& pipe_info = *(MdpPipeInfoSplit*)info.pipeInfo;
2414 
2415         if(!acquireMDPPipes(ctx, layer, pipe_info)) {
2416             ALOGD_IF(isDebug(), "%s: Unable to get pipe for type",
2417                     __FUNCTION__);
2418             return false;
2419         }
2420     }
2421     return true;
2422 }
2423 
configure4k2kYuv(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2424 int MDPCompSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
2425         PipeLayerPair& PipeLayerPair) {
2426     const int lSplit = getLeftSplit(ctx, mDpy);
2427     hwc_rect_t dst = layer->displayFrame;
2428     if((dst.left > lSplit)||(dst.right < lSplit)){
2429         MdpYUVPipeInfo& mdp_info =
2430                 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
2431         eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
2432         eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
2433         eDest lDest = mdp_info.lIndex;
2434         eDest rDest = mdp_info.rIndex;
2435 
2436         return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder,
2437                 lDest, rDest, &PipeLayerPair.rot);
2438     }
2439     else{
2440         return configure(ctx, layer, PipeLayerPair);
2441     }
2442 }
2443 
2444 /*
2445  * Configures pipe(s) for MDP composition
2446  */
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2447 int MDPCompSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
2448         PipeLayerPair& PipeLayerPair) {
2449     MdpPipeInfoSplit& mdp_info =
2450         *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo));
2451     eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
2452     eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
2453     eDest lDest = mdp_info.lIndex;
2454     eDest rDest = mdp_info.rIndex;
2455 
2456     ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d"
2457              "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest);
2458 
2459     return configureSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, lDest,
2460                             rDest, &PipeLayerPair.rot);
2461 }
2462 
draw(hwc_context_t * ctx,hwc_display_contents_1_t * list)2463 bool MDPCompSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
2464 
2465     if(!isEnabled() or !mModeOn) {
2466         ALOGD_IF(isDebug(),"%s: MDP Comp not enabled/configured", __FUNCTION__);
2467         return true;
2468     }
2469 
2470     overlay::Overlay& ov = *ctx->mOverlay;
2471     LayerProp *layerProp = ctx->layerProp[mDpy];
2472 
2473     int numHwLayers = ctx->listStats[mDpy].numAppLayers;
2474     for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ )
2475     {
2476         if(mCurrentFrame.isFBComposed[i]) continue;
2477 
2478         hwc_layer_1_t *layer = &list->hwLayers[i];
2479         private_handle_t *hnd = (private_handle_t *)layer->handle;
2480         if(!hnd) {
2481             ALOGE("%s handle null", __FUNCTION__);
2482             return false;
2483         }
2484 
2485         if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
2486             continue;
2487         }
2488 
2489         int mdpIndex = mCurrentFrame.layerToMDP[i];
2490 
2491         if(isYUVSplitNeeded(hnd) && sEnableYUVsplit)
2492         {
2493             MdpYUVPipeInfo& pipe_info =
2494                 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
2495             Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
2496             ovutils::eDest indexL = pipe_info.lIndex;
2497             ovutils::eDest indexR = pipe_info.rIndex;
2498             int fd = hnd->fd;
2499             uint32_t offset = (uint32_t)hnd->offset;
2500             if(rot) {
2501                 rot->queueBuffer(fd, offset);
2502                 fd = rot->getDstMemId();
2503                 offset = rot->getDstOffset();
2504             }
2505             if(indexL != ovutils::OV_INVALID) {
2506                 ovutils::eDest destL = (ovutils::eDest)indexL;
2507                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2508                         using  pipe: %d", __FUNCTION__, layer, hnd, indexL );
2509                 if (!ov.queueBuffer(fd, offset, destL)) {
2510                     ALOGE("%s: queueBuffer failed for display:%d",
2511                             __FUNCTION__, mDpy);
2512                     return false;
2513                 }
2514             }
2515 
2516             if(indexR != ovutils::OV_INVALID) {
2517                 ovutils::eDest destR = (ovutils::eDest)indexR;
2518                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2519                         using  pipe: %d", __FUNCTION__, layer, hnd, indexR );
2520                 if (!ov.queueBuffer(fd, offset, destR)) {
2521                     ALOGE("%s: queueBuffer failed for display:%d",
2522                             __FUNCTION__, mDpy);
2523                     return false;
2524                 }
2525             }
2526         }
2527         else{
2528             MdpPipeInfoSplit& pipe_info =
2529                 *(MdpPipeInfoSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
2530             Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
2531 
2532             ovutils::eDest indexL = pipe_info.lIndex;
2533             ovutils::eDest indexR = pipe_info.rIndex;
2534 
2535             int fd = hnd->fd;
2536             uint32_t offset = (uint32_t)hnd->offset;
2537             int index = ctx->mPtorInfo.getPTORArrayIndex(i);
2538             if (!mDpy && (index != -1)) {
2539                 hnd = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer();
2540                 fd = hnd->fd;
2541                 offset = 0;
2542             }
2543 
2544             if(ctx->mAD->draw(ctx, fd, offset)) {
2545                 fd = ctx->mAD->getDstFd();
2546                 offset = ctx->mAD->getDstOffset();
2547             }
2548 
2549             if(rot) {
2550                 rot->queueBuffer(fd, offset);
2551                 fd = rot->getDstMemId();
2552                 offset = rot->getDstOffset();
2553             }
2554 
2555             //************* play left mixer **********
2556             if(indexL != ovutils::OV_INVALID) {
2557                 ovutils::eDest destL = (ovutils::eDest)indexL;
2558                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2559                         using  pipe: %d", __FUNCTION__, layer, hnd, indexL );
2560                 if (!ov.queueBuffer(fd, offset, destL)) {
2561                     ALOGE("%s: queueBuffer failed for left mixer",
2562                             __FUNCTION__);
2563                     return false;
2564                 }
2565             }
2566 
2567             //************* play right mixer **********
2568             if(indexR != ovutils::OV_INVALID) {
2569                 ovutils::eDest destR = (ovutils::eDest)indexR;
2570                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2571                         using  pipe: %d", __FUNCTION__, layer, hnd, indexR );
2572                 if (!ov.queueBuffer(fd, offset, destR)) {
2573                     ALOGE("%s: queueBuffer failed for right mixer",
2574                             __FUNCTION__);
2575                     return false;
2576                 }
2577             }
2578         }
2579 
2580         layerProp[i].mFlags &= ~HWC_MDPCOMP;
2581     }
2582 
2583     return true;
2584 }
2585 
2586 //================MDPCompSrcSplit==============================================
acquireMDPPipes(hwc_context_t * ctx,hwc_layer_1_t * layer,MdpPipeInfoSplit & pipe_info)2587 bool MDPCompSrcSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
2588         MdpPipeInfoSplit& pipe_info) {
2589     private_handle_t *hnd = (private_handle_t *)layer->handle;
2590     hwc_rect_t dst = layer->displayFrame;
2591     hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
2592     pipe_info.lIndex = ovutils::OV_INVALID;
2593     pipe_info.rIndex = ovutils::OV_INVALID;
2594 
2595     //If 2 pipes are staged on a single stage of a mixer, then the left pipe
2596     //should have a higher priority than the right one. Pipe priorities are
2597     //starting with VG0, VG1 ... , RGB0 ..., DMA1
2598 
2599     Overlay::PipeSpecs pipeSpecs;
2600     pipeSpecs.formatClass = isYuvBuffer(hnd) ?
2601             Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
2602     pipeSpecs.needsScaling = qhwc::needsScaling(layer);
2603     pipeSpecs.dpy = mDpy;
2604     pipeSpecs.fb = false;
2605 
2606     //1 pipe by default for a layer
2607     pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
2608     if(pipe_info.lIndex == ovutils::OV_INVALID) {
2609         return false;
2610     }
2611 
2612     /* Use 2 pipes IF
2613         a) Layer's crop width is > 2048 or
2614         b) Layer's dest width > 2048 or
2615         c) On primary, driver has indicated with caps to split always. This is
2616            based on an empirically derived value of panel height. Applied only
2617            if the layer's width is > mixer's width
2618     */
2619 
2620     MDPVersion& mdpHw = MDPVersion::getInstance();
2621     bool primarySplitAlways = (mDpy == HWC_DISPLAY_PRIMARY) and
2622             mdpHw.isSrcSplitAlways();
2623     int lSplit = getLeftSplit(ctx, mDpy);
2624     int dstWidth = dst.right - dst.left;
2625     int cropWidth = has90Transform(layer) ? crop.bottom - crop.top :
2626             crop.right - crop.left;
2627 
2628     //TODO Even if a 4k video is going to be rot-downscaled to dimensions under
2629     //pipe line length, we are still using 2 pipes. This is fine just because
2630     //this is source split where destination doesn't matter. Evaluate later to
2631     //see if going through all the calcs to save a pipe is worth it
2632     if(dstWidth > (int) mdpHw.getMaxMixerWidth() or
2633             cropWidth > (int) mdpHw.getMaxMixerWidth() or
2634             (primarySplitAlways and (cropWidth > lSplit))) {
2635         pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
2636         if(pipe_info.rIndex == ovutils::OV_INVALID) {
2637             return false;
2638         }
2639 
2640         // Return values
2641         // 1  Left pipe is higher priority, do nothing.
2642         // 0  Pipes of same priority.
2643         //-1  Right pipe is of higher priority, needs swap.
2644         if(ctx->mOverlay->comparePipePriority(pipe_info.lIndex,
2645                 pipe_info.rIndex) == -1) {
2646             qhwc::swap(pipe_info.lIndex, pipe_info.rIndex);
2647         }
2648     }
2649 
2650     return true;
2651 }
2652 
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2653 int MDPCompSrcSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
2654         PipeLayerPair& PipeLayerPair) {
2655     private_handle_t *hnd = (private_handle_t *)layer->handle;
2656     if(!hnd) {
2657         ALOGE("%s: layer handle is NULL", __FUNCTION__);
2658         return -1;
2659     }
2660     MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
2661     MdpPipeInfoSplit& mdp_info =
2662         *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo));
2663     Rotator **rot = &PipeLayerPair.rot;
2664     eZorder z = static_cast<eZorder>(mdp_info.zOrder);
2665     eDest lDest = mdp_info.lIndex;
2666     eDest rDest = mdp_info.rIndex;
2667     hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
2668     hwc_rect_t dst = layer->displayFrame;
2669     int transform = layer->transform;
2670     eTransform orient = static_cast<eTransform>(transform);
2671     int rotFlags = ROT_FLAGS_NONE;
2672     uint32_t format = ovutils::getMdpFormat(hnd->format, isTileRendered(hnd));
2673     Whf whf(getWidth(hnd), getHeight(hnd), format, hnd->size);
2674 
2675     ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d"
2676              "dest_pipeR: %d",__FUNCTION__, layer, z, lDest, rDest);
2677 
2678     // Handle R/B swap
2679     if (layer->flags & HWC_FORMAT_RB_SWAP) {
2680         if (hnd->format == HAL_PIXEL_FORMAT_RGBA_8888)
2681             whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRA_8888);
2682         else if (hnd->format == HAL_PIXEL_FORMAT_RGBX_8888)
2683             whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRX_8888);
2684     }
2685     // update source crop and destination position of AIV video layer.
2686     if(ctx->listStats[mDpy].mAIVVideoMode && isYuvBuffer(hnd)) {
2687         updateCoordinates(ctx, crop, dst, mDpy);
2688     }
2689     /* Calculate the external display position based on MDP downscale,
2690        ActionSafe, and extorientation features. */
2691     calcExtDisplayPosition(ctx, hnd, mDpy, crop, dst, transform, orient);
2692 
2693     int downscale = getRotDownscale(ctx, layer);
2694     eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION;
2695     setMdpFlags(ctx, layer, mdpFlags, downscale, transform);
2696 
2697     if(lDest != OV_INVALID && rDest != OV_INVALID) {
2698         //Enable overfetch
2699         setMdpFlags(mdpFlags, OV_MDSS_MDP_DUAL_PIPE);
2700     }
2701 
2702     if((has90Transform(layer) or downscale) and isRotationDoable(ctx, hnd)) {
2703         (*rot) = ctx->mRotMgr->getNext();
2704         if((*rot) == NULL) return -1;
2705         ctx->mLayerRotMap[mDpy]->add(layer, *rot);
2706         //If the video is using a single pipe, enable BWC
2707         if(rDest == OV_INVALID) {
2708             BwcPM::setBwc(crop, dst, transform, downscale, mdpFlags);
2709         }
2710         //Configure rotator for pre-rotation
2711         if(configRotator(*rot, whf, crop, mdpFlags, orient, downscale) < 0) {
2712             ALOGE("%s: configRotator failed!", __FUNCTION__);
2713             return -1;
2714         }
2715         updateSource(orient, whf, crop, *rot);
2716         rotFlags |= ovutils::ROT_PREROTATED;
2717     }
2718 
2719     //If 2 pipes being used, divide layer into half, crop and dst
2720     hwc_rect_t cropL = crop;
2721     hwc_rect_t cropR = crop;
2722     hwc_rect_t dstL = dst;
2723     hwc_rect_t dstR = dst;
2724     if(lDest != OV_INVALID && rDest != OV_INVALID) {
2725         cropL.right = (crop.right + crop.left) / 2;
2726         cropR.left = cropL.right;
2727         sanitizeSourceCrop(cropL, cropR, hnd);
2728 
2729         bool cropSwap = false;
2730         //Swap crops on H flip since 2 pipes are being used
2731         if((orient & OVERLAY_TRANSFORM_FLIP_H) && (*rot) == NULL) {
2732             hwc_rect_t tmp = cropL;
2733             cropL = cropR;
2734             cropR = tmp;
2735             cropSwap = true;
2736         }
2737 
2738         //cropSwap trick: If the src and dst widths are both odd, let us say
2739         //2507, then splitting both into half would cause left width to be 1253
2740         //and right 1254. If crop is swapped because of H flip, this will cause
2741         //left crop width to be 1254, whereas left dst width remains 1253, thus
2742         //inducing a scaling that is unaccounted for. To overcome that we add 1
2743         //to the dst width if there is a cropSwap. So if the original width was
2744         //2507, the left dst width will be 1254. Even if the original width was
2745         //even for ex: 2508, the left dst width will still remain 1254.
2746         dstL.right = (dst.right + dst.left + cropSwap) / 2;
2747         dstR.left = dstL.right;
2748     }
2749 
2750     //For the mdp, since either we are pre-rotating or MDP does flips
2751     orient = OVERLAY_TRANSFORM_0;
2752     transform = 0;
2753 
2754     //configure left pipe
2755     if(lDest != OV_INVALID) {
2756         PipeArgs pargL(mdpFlags, whf, z,
2757                 static_cast<eRotFlags>(rotFlags), layer->planeAlpha,
2758                 (ovutils::eBlending) getBlending(layer->blending));
2759 
2760         if(configMdp(ctx->mOverlay, pargL, orient,
2761                     cropL, dstL, metadata, lDest) < 0) {
2762             ALOGE("%s: commit failed for left mixer config", __FUNCTION__);
2763             return -1;
2764         }
2765     }
2766 
2767     //configure right pipe
2768     if(rDest != OV_INVALID) {
2769         PipeArgs pargR(mdpFlags, whf, z,
2770                 static_cast<eRotFlags>(rotFlags),
2771                 layer->planeAlpha,
2772                 (ovutils::eBlending) getBlending(layer->blending));
2773         if(configMdp(ctx->mOverlay, pargR, orient,
2774                     cropR, dstR, metadata, rDest) < 0) {
2775             ALOGE("%s: commit failed for right mixer config", __FUNCTION__);
2776             return -1;
2777         }
2778     }
2779 
2780     return 0;
2781 }
2782 
getPartialUpdatePref(hwc_context_t * ctx)2783 int MDPComp::getPartialUpdatePref(hwc_context_t *ctx) {
2784     Locker::Autolock _l(ctx->mDrawLock);
2785     const int fbNum = Overlay::getFbForDpy(Overlay::DPY_PRIMARY);
2786     char path[MAX_SYSFS_FILE_PATH];
2787     snprintf (path, sizeof(path), "sys/class/graphics/fb%d/dyn_pu", fbNum);
2788     int fd = open(path, O_RDONLY);
2789     if(fd < 0) {
2790         ALOGE("%s: Failed to open sysfs node: %s", __FUNCTION__, path);
2791         return -1;
2792     }
2793     char value[4];
2794     ssize_t size_read = read(fd, value, sizeof(value)-1);
2795     if(size_read <= 0) {
2796         ALOGE("%s: Failed to read sysfs node: %s", __FUNCTION__, path);
2797         close(fd);
2798         return -1;
2799     }
2800     close(fd);
2801     value[size_read] = '\0';
2802     return atoi(value);
2803 }
2804 
setPartialUpdatePref(hwc_context_t * ctx,bool enable)2805 int MDPComp::setPartialUpdatePref(hwc_context_t *ctx, bool enable) {
2806     Locker::Autolock _l(ctx->mDrawLock);
2807     const int fbNum = Overlay::getFbForDpy(Overlay::DPY_PRIMARY);
2808     char path[MAX_SYSFS_FILE_PATH];
2809     snprintf (path, sizeof(path), "sys/class/graphics/fb%d/dyn_pu", fbNum);
2810     int fd = open(path, O_WRONLY);
2811     if(fd < 0) {
2812         ALOGE("%s: Failed to open sysfs node: %s", __FUNCTION__, path);
2813         return -1;
2814     }
2815     char value[4];
2816     snprintf(value, sizeof(value), "%d", (int)enable);
2817     ssize_t ret = write(fd, value, strlen(value));
2818     if(ret <= 0) {
2819         ALOGE("%s: Failed to write to sysfs nodes: %s", __FUNCTION__, path);
2820         close(fd);
2821         return -1;
2822     }
2823     close(fd);
2824     return 0;
2825 }
2826 }; //namespace
2827 
2828