1 /*
2  * Copyright (C) 2012-2015, The Linux Foundation. All rights reserved.
3  * Not a Contribution, Apache license notifications and license are retained
4  * for attribution purposes only.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *      http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include <math.h>
20 #include "hwc_mdpcomp.h"
21 #include <sys/ioctl.h>
22 #include <dlfcn.h>
23 #include "hdmi.h"
24 #include "qdMetaData.h"
25 #include "mdp_version.h"
26 #include "hwc_fbupdate.h"
27 #include "hwc_ad.h"
28 #include <overlayRotator.h>
29 #include "hwc_copybit.h"
30 #include "qd_utils.h"
31 
32 using namespace overlay;
33 using namespace qdutils;
34 using namespace overlay::utils;
35 namespace ovutils = overlay::utils;
36 
37 namespace qhwc {
38 
39 //==============MDPComp========================================================
40 
41 IdleInvalidator *MDPComp::sIdleInvalidator = NULL;
42 bool MDPComp::sIdleFallBack = false;
43 bool MDPComp::sHandleTimeout = false;
44 bool MDPComp::sDebugLogs = false;
45 bool MDPComp::sEnabled = false;
46 bool MDPComp::sEnableMixedMode = true;
47 int MDPComp::sSimulationFlags = 0;
48 int MDPComp::sMaxPipesPerMixer = 0;
49 bool MDPComp::sEnableYUVsplit = false;
50 bool MDPComp::sSrcSplitEnabled = false;
51 int MDPComp::sMaxSecLayers = 1;
52 bool MDPComp::enablePartialUpdateForMDP3 = false;
53 bool MDPComp::sIsPartialUpdateActive = true;
54 bool MDPComp::sIsSingleFullScreenUpdate = false;
55 void *MDPComp::sLibPerfHint = NULL;
56 int MDPComp::sPerfLockHandle = 0;
57 int (*MDPComp::sPerfLockAcquire)(int, int, int*, int) = NULL;
58 int (*MDPComp::sPerfLockRelease)(int value) = NULL;
59 int MDPComp::sPerfHintWindow = -1;
60 
getObject(hwc_context_t * ctx,const int & dpy)61 MDPComp* MDPComp::getObject(hwc_context_t *ctx, const int& dpy) {
62     if(qdutils::MDPVersion::getInstance().isSrcSplit()) {
63         sSrcSplitEnabled = true;
64         return new MDPCompSrcSplit(dpy);
65     } else if(isDisplaySplit(ctx, dpy)) {
66         return new MDPCompSplit(dpy);
67     }
68     return new MDPCompNonSplit(dpy);
69 }
70 
MDPComp(int dpy)71 MDPComp::MDPComp(int dpy):mDpy(dpy){};
72 
dump(android::String8 & buf,hwc_context_t * ctx)73 void MDPComp::dump(android::String8& buf, hwc_context_t *ctx)
74 {
75     if(mCurrentFrame.layerCount > MAX_NUM_APP_LAYERS)
76         return;
77 
78     dumpsys_log(buf,"HWC Map for Dpy: %s \n",
79                 (mDpy == 0) ? "\"PRIMARY\"" :
80                 (mDpy == 1) ? "\"EXTERNAL\"" : "\"VIRTUAL\"");
81     dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d "
82                 "fbCount:%2d \n", mCurrentFrame.layerCount,
83                 mCurrentFrame.mdpCount, mCurrentFrame.fbCount);
84     dumpsys_log(buf,"needsFBRedraw:%3s  pipesUsed:%2d  MaxPipesPerMixer: %d \n",
85                 (mCurrentFrame.needsRedraw? "YES" : "NO"),
86                 mCurrentFrame.mdpCount, sMaxPipesPerMixer);
87     if(isDisplaySplit(ctx, mDpy)) {
88         dumpsys_log(buf, "Programmed ROI's: Left: [%d, %d, %d, %d] "
89                 "Right: [%d, %d, %d, %d] \n",
90                 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
91                 ctx->listStats[mDpy].lRoi.right,
92                 ctx->listStats[mDpy].lRoi.bottom,
93                 ctx->listStats[mDpy].rRoi.left,ctx->listStats[mDpy].rRoi.top,
94                 ctx->listStats[mDpy].rRoi.right,
95                 ctx->listStats[mDpy].rRoi.bottom);
96     } else {
97         dumpsys_log(buf, "Programmed ROI: [%d, %d, %d, %d] \n",
98                 ctx->listStats[mDpy].lRoi.left,ctx->listStats[mDpy].lRoi.top,
99                 ctx->listStats[mDpy].lRoi.right,
100                 ctx->listStats[mDpy].lRoi.bottom);
101     }
102     dumpsys_log(buf," ---------------------------------------------  \n");
103     dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype  |  Z  \n");
104     dumpsys_log(buf," ---------------------------------------------  \n");
105     for(int index = 0; index < mCurrentFrame.layerCount; index++ )
106         dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n",
107                     index,
108                     (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"),
109                      mCurrentFrame.layerToMDP[index],
110                     (mCurrentFrame.isFBComposed[index] ?
111                     (mCurrentFrame.drop[index] ? "DROP" :
112                     (mCurrentFrame.needsRedraw ? "GLES" : "CACHE")) : "MDP"),
113                     (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ :
114     mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder));
115     dumpsys_log(buf,"\n");
116 }
117 
init(hwc_context_t * ctx)118 bool MDPComp::init(hwc_context_t *ctx) {
119 
120     if(!ctx) {
121         ALOGE("%s: Invalid hwc context!!",__FUNCTION__);
122         return false;
123     }
124 
125     char property[PROPERTY_VALUE_MAX] = {0};
126 
127     sEnabled = false;
128     if((ctx->mMDP.version >= qdutils::MDP_V4_0) &&
129        (property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) &&
130        (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
131         (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
132         sEnabled = true;
133     }
134 
135     sEnableMixedMode = true;
136     if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) &&
137        (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
138         (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
139         sEnableMixedMode = false;
140     }
141 
142     qdutils::MDPVersion &mdpVersion = qdutils::MDPVersion::getInstance();
143 
144     sMaxPipesPerMixer = (int)mdpVersion.getBlendStages();
145     if(property_get("persist.hwc.mdpcomp.maxpermixer", property, "-1") > 0) {
146         int val = atoi(property);
147         if(val >= 0)
148             sMaxPipesPerMixer = min(val, sMaxPipesPerMixer);
149     }
150 
151     /* Maximum layers allowed to use MDP on secondary panels. If property
152      * doesn't exist, default to 1. Using the property it can be set to 0 or
153      * more.
154      */
155     if(property_get("persist.hwc.maxseclayers", property, "1") > 0) {
156         int val = atoi(property);
157         sMaxSecLayers = (val >= 0) ? val : 1;
158         sMaxSecLayers = min(sMaxSecLayers, sMaxPipesPerMixer);
159     }
160 
161     if(ctx->mMDP.panel != MIPI_CMD_PANEL) {
162         sIdleInvalidator = IdleInvalidator::getInstance();
163         if(sIdleInvalidator->init(timeout_handler, ctx) < 0) {
164             delete sIdleInvalidator;
165             sIdleInvalidator = NULL;
166         }
167     }
168 
169     if(!qdutils::MDPVersion::getInstance().isSrcSplit() &&
170         !qdutils::MDPVersion::getInstance().isRotDownscaleEnabled() &&
171             property_get("persist.mdpcomp.4k2kSplit", property, "0") > 0 &&
172             (!strncmp(property, "1", PROPERTY_VALUE_MAX) ||
173             !strncasecmp(property,"true", PROPERTY_VALUE_MAX))) {
174         sEnableYUVsplit = true;
175     }
176 
177     bool defaultPTOR = false;
178     //Enable PTOR when "persist.hwc.ptor.enable" is not defined for
179     //8x16 and 8x39 targets by default
180     if((property_get("persist.hwc.ptor.enable", property, NULL) <= 0) &&
181             (qdutils::MDPVersion::getInstance().is8x16() ||
182                 qdutils::MDPVersion::getInstance().is8x39())) {
183         defaultPTOR = true;
184     }
185 
186     if (defaultPTOR || (!strncasecmp(property, "true", PROPERTY_VALUE_MAX)) ||
187                 (!strncmp(property, "1", PROPERTY_VALUE_MAX ))) {
188         ctx->mCopyBit[HWC_DISPLAY_PRIMARY] = new CopyBit(ctx,
189                                                     HWC_DISPLAY_PRIMARY);
190     }
191 
192     if((property_get("persist.mdp3.partialUpdate", property, NULL) <= 0) &&
193           (ctx->mMDP.version == qdutils::MDP_V3_0_5)) {
194        enablePartialUpdateForMDP3 = true;
195     }
196 
197     if(!enablePartialUpdateForMDP3 &&
198           (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
199            (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
200        enablePartialUpdateForMDP3 = true;
201     }
202 
203     sIsPartialUpdateActive = getPartialUpdatePref(ctx);
204 
205     if(property_get("persist.mdpcomp_perfhint", property, "-1") > 0) {
206         int val = atoi(property);
207         if(val > 0 && loadPerfLib()) {
208             sPerfHintWindow = val;
209             ALOGI("PerfHintWindow = %d", sPerfHintWindow);
210         }
211     }
212 
213     return true;
214 }
215 
reset(hwc_context_t * ctx)216 void MDPComp::reset(hwc_context_t *ctx) {
217     const int numLayers = ctx->listStats[mDpy].numAppLayers;
218     mCurrentFrame.reset(numLayers);
219     ctx->mOverlay->clear(mDpy);
220     ctx->mLayerRotMap[mDpy]->clear();
221 }
222 
reset()223 void MDPComp::reset() {
224     sHandleTimeout = false;
225     mModeOn = false;
226 }
227 
timeout_handler(void * udata)228 void MDPComp::timeout_handler(void *udata) {
229     struct hwc_context_t* ctx = (struct hwc_context_t*)(udata);
230 
231     if(!ctx) {
232         ALOGE("%s: received empty data in timer callback", __FUNCTION__);
233         return;
234     }
235 
236     ctx->mDrawLock.lock();
237     // Handle timeout event only if the previous composition is MDP or MIXED.
238     if(!sHandleTimeout) {
239         ALOGD_IF(isDebug(), "%s:Do not handle this timeout", __FUNCTION__);
240         ctx->mDrawLock.unlock();
241         return;
242     }
243     if(!ctx->proc) {
244         ALOGE("%s: HWC proc not registered", __FUNCTION__);
245         ctx->mDrawLock.unlock();
246         return;
247     }
248     sIdleFallBack = true;
249     ctx->mDrawLock.unlock();
250     /* Trigger SF to redraw the current frame */
251     ctx->proc->invalidate(ctx->proc);
252 }
253 
setMaxPipesPerMixer(const uint32_t value)254 void MDPComp::setMaxPipesPerMixer(const uint32_t value) {
255     qdutils::MDPVersion &mdpVersion = qdutils::MDPVersion::getInstance();
256     uint32_t maxSupported = (int)mdpVersion.getBlendStages();
257     if(value > maxSupported) {
258         ALOGW("%s: Input exceeds max value supported. Setting to"
259                 "max value: %d", __FUNCTION__, maxSupported);
260     }
261     sMaxPipesPerMixer = min(value, maxSupported);
262 }
263 
setIdleTimeout(const uint32_t & timeout)264 void MDPComp::setIdleTimeout(const uint32_t& timeout) {
265     enum { ONE_REFRESH_PERIOD_MS = 17, ONE_BILLION_MS = 1000000000 };
266 
267     if(sIdleInvalidator) {
268         if(timeout <= ONE_REFRESH_PERIOD_MS) {
269             //If the specified timeout is < 1 draw cycle worth, "virtually"
270             //disable idle timeout. The ideal way for clients to disable
271             //timeout is to set it to 0
272             sIdleInvalidator->setIdleTimeout(ONE_BILLION_MS);
273             ALOGI("Disabled idle timeout");
274             return;
275         }
276         sIdleInvalidator->setIdleTimeout(timeout);
277         ALOGI("Idle timeout set to %u", timeout);
278     } else {
279         ALOGW("Cannot set idle timeout, IdleInvalidator not enabled");
280     }
281 }
282 
setMDPCompLayerFlags(hwc_context_t * ctx,hwc_display_contents_1_t * list)283 void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx,
284                                    hwc_display_contents_1_t* list) {
285     LayerProp *layerProp = ctx->layerProp[mDpy];
286 
287     for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) {
288         hwc_layer_1_t* layer = &(list->hwLayers[index]);
289         if(!mCurrentFrame.isFBComposed[index]) {
290             layerProp[index].mFlags |= HWC_MDPCOMP;
291             layer->compositionType = HWC_OVERLAY;
292             layer->hints |= HWC_HINT_CLEAR_FB;
293         } else {
294             /* Drop the layer when its already present in FB OR when it lies
295              * outside frame's ROI */
296             if(!mCurrentFrame.needsRedraw || mCurrentFrame.drop[index]) {
297                 layer->compositionType = HWC_OVERLAY;
298             }
299         }
300     }
301 }
302 
setRedraw(hwc_context_t * ctx,hwc_display_contents_1_t * list)303 void MDPComp::setRedraw(hwc_context_t *ctx,
304         hwc_display_contents_1_t* list) {
305     mCurrentFrame.needsRedraw = false;
306     if(!mCachedFrame.isSameFrame(mCurrentFrame, list) ||
307             (list->flags & HWC_GEOMETRY_CHANGED) ||
308             isSkipPresent(ctx, mDpy)) {
309         mCurrentFrame.needsRedraw = true;
310     }
311 }
312 
FrameInfo()313 MDPComp::FrameInfo::FrameInfo() {
314     memset(&mdpToLayer, 0, sizeof(mdpToLayer));
315     reset(0);
316 }
317 
reset(const int & numLayers)318 void MDPComp::FrameInfo::reset(const int& numLayers) {
319     for(int i = 0 ; i < MAX_NUM_BLEND_STAGES; i++ ) {
320         if(mdpToLayer[i].pipeInfo) {
321             delete mdpToLayer[i].pipeInfo;
322             mdpToLayer[i].pipeInfo = NULL;
323             //We dont own the rotator
324             mdpToLayer[i].rot = NULL;
325         }
326     }
327 
328     memset(&mdpToLayer, 0, sizeof(mdpToLayer));
329     memset(&layerToMDP, -1, sizeof(layerToMDP));
330     memset(&isFBComposed, 1, sizeof(isFBComposed));
331 
332     layerCount = numLayers;
333     fbCount = numLayers;
334     mdpCount = 0;
335     needsRedraw = true;
336     fbZ = -1;
337 }
338 
map()339 void MDPComp::FrameInfo::map() {
340     // populate layer and MDP maps
341     int mdpIdx = 0;
342     for(int idx = 0; idx < layerCount; idx++) {
343         if(!isFBComposed[idx]) {
344             mdpToLayer[mdpIdx].listIndex = idx;
345             layerToMDP[idx] = mdpIdx++;
346         }
347     }
348 }
349 
LayerCache()350 MDPComp::LayerCache::LayerCache() {
351     reset();
352 }
353 
reset()354 void MDPComp::LayerCache::reset() {
355     memset(&isFBComposed, true, sizeof(isFBComposed));
356     memset(&drop, false, sizeof(drop));
357     layerCount = 0;
358 }
359 
updateCounts(const FrameInfo & curFrame)360 void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) {
361     layerCount = curFrame.layerCount;
362     memcpy(&isFBComposed, &curFrame.isFBComposed, sizeof(isFBComposed));
363     memcpy(&drop, &curFrame.drop, sizeof(drop));
364 }
365 
isSameFrame(const FrameInfo & curFrame,hwc_display_contents_1_t * list)366 bool MDPComp::LayerCache::isSameFrame(const FrameInfo& curFrame,
367                                       hwc_display_contents_1_t* list) {
368     if(layerCount != curFrame.layerCount)
369         return false;
370     for(int i = 0; i < curFrame.layerCount; i++) {
371         if((curFrame.isFBComposed[i] != isFBComposed[i]) ||
372                 (curFrame.drop[i] != drop[i])) {
373             return false;
374         }
375         hwc_layer_1_t const* layer = &list->hwLayers[i];
376         if(curFrame.isFBComposed[i] && layerUpdating(layer)){
377             return false;
378         }
379     }
380     return true;
381 }
382 
isSupportedForMDPComp(hwc_context_t * ctx,hwc_layer_1_t * layer)383 bool MDPComp::isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer) {
384     private_handle_t *hnd = (private_handle_t *)layer->handle;
385     if((has90Transform(layer) and (not isRotationDoable(ctx, hnd))) ||
386         (not isValidDimension(ctx,layer))
387         //More conditions here, SKIP, sRGB+Blend etc
388         ) {
389         return false;
390     }
391     return true;
392 }
393 
isValidDimension(hwc_context_t * ctx,hwc_layer_1_t * layer)394 bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) {
395     private_handle_t *hnd = (private_handle_t *)layer->handle;
396 
397     if(!hnd) {
398         if (layer->flags & HWC_COLOR_FILL) {
399             // Color layer
400             return true;
401         }
402         ALOGD_IF(isDebug(), "%s: layer handle is NULL", __FUNCTION__);
403         return false;
404     }
405 
406     //XXX: Investigate doing this with pixel phase on MDSS
407     if(!isSecureBuffer(hnd) && isNonIntegralSourceCrop(layer->sourceCropf))
408         return false;
409 
410     hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
411     hwc_rect_t dst = layer->displayFrame;
412     bool rotated90 = (bool)(layer->transform & HAL_TRANSFORM_ROT_90);
413     int crop_w = rotated90 ? crop.bottom - crop.top : crop.right - crop.left;
414     int crop_h = rotated90 ? crop.right - crop.left : crop.bottom - crop.top;
415     int dst_w = dst.right - dst.left;
416     int dst_h = dst.bottom - dst.top;
417     float w_scale = ((float)crop_w / (float)dst_w);
418     float h_scale = ((float)crop_h / (float)dst_h);
419     MDPVersion& mdpHw = MDPVersion::getInstance();
420 
421     /* Workaround for MDP HW limitation in DSI command mode panels where
422      * FPS will not go beyond 30 if buffers on RGB pipes are of width or height
423      * less than 5 pixels
424      * There also is a HW limilation in MDP, minimum block size is 2x2
425      * Fallback to GPU if height is less than 2.
426      */
427     if(mdpHw.hasMinCropWidthLimitation() and (crop_w < 5 or crop_h < 5))
428         return false;
429 
430     if((w_scale > 1.0f) || (h_scale > 1.0f)) {
431         const uint32_t maxMDPDownscale = mdpHw.getMaxMDPDownscale();
432         const float w_dscale = w_scale;
433         const float h_dscale = h_scale;
434 
435         if(ctx->mMDP.version >= qdutils::MDSS_V5) {
436 
437             if(!mdpHw.supportsDecimation()) {
438                 /* On targets that doesnt support Decimation (eg.,8x26)
439                  * maximum downscale support is overlay pipe downscale.
440                  */
441                 if(crop_w > (int) mdpHw.getMaxPipeWidth() ||
442                         w_dscale > maxMDPDownscale ||
443                         h_dscale > maxMDPDownscale)
444                     return false;
445             } else {
446                 // Decimation on macrotile format layers is not supported.
447                 if(isTileRendered(hnd)) {
448                     /* Bail out if
449                      *      1. Src crop > Mixer limit on nonsplit MDPComp
450                      *      2. exceeds maximum downscale limit
451                      */
452                     if(((crop_w > (int) mdpHw.getMaxPipeWidth()) &&
453                                 !sSrcSplitEnabled) ||
454                             w_dscale > maxMDPDownscale ||
455                             h_dscale > maxMDPDownscale) {
456                         return false;
457                     }
458                 } else if(w_dscale > 64 || h_dscale > 64)
459                     return false;
460             }
461         } else { //A-family
462             if(w_dscale > maxMDPDownscale || h_dscale > maxMDPDownscale)
463                 return false;
464         }
465     }
466 
467     if((w_scale < 1.0f) || (h_scale < 1.0f)) {
468         const uint32_t upscale = mdpHw.getMaxMDPUpscale();
469         const float w_uscale = 1.0f / w_scale;
470         const float h_uscale = 1.0f / h_scale;
471 
472         if(w_uscale > upscale || h_uscale > upscale)
473             return false;
474     }
475 
476     return true;
477 }
478 
isFrameDoable(hwc_context_t * ctx)479 bool MDPComp::isFrameDoable(hwc_context_t *ctx) {
480     bool ret = true;
481 
482     if(!isEnabled()) {
483         ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__);
484         ret = false;
485     } else if((qdutils::MDPVersion::getInstance().is8x26() ||
486                qdutils::MDPVersion::getInstance().is8x16() ||
487                qdutils::MDPVersion::getInstance().is8x39()) &&
488             ctx->mVideoTransFlag &&
489             isSecondaryConnected(ctx)) {
490         //1 Padding round to shift pipes across mixers
491         ALOGD_IF(isDebug(),"%s: MDP Comp. video transition padding round",
492                 __FUNCTION__);
493         ret = false;
494     } else if(qdutils::MDPVersion::getInstance().getTotalPipes() < 8) {
495        /* TODO: freeing up all the resources only for the targets having total
496                 number of pipes < 8. Need to analyze number of VIG pipes used
497                 for primary in previous draw cycle and accordingly decide
498                 whether to fall back to full GPU comp or video only comp
499         */
500         if(isSecondaryConfiguring(ctx)) {
501             ALOGD_IF( isDebug(),"%s: External Display connection is pending",
502                       __FUNCTION__);
503             ret = false;
504         } else if(ctx->isPaddingRound) {
505             ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d",
506                      __FUNCTION__,mDpy);
507             ret = false;
508         }
509     } else if (ctx->isDMAStateChanging) {
510         // Bail out if a padding round has been invoked in order to switch DMA
511         // state to block mode. We need this to cater for the case when a layer
512         // requires rotation in the current frame.
513         ALOGD_IF(isDebug(), "%s: padding round invoked to switch DMA state",
514                 __FUNCTION__);
515         return false;
516     }
517 
518     return ret;
519 }
520 
calculateDirtyRect(const hwc_layer_1_t * layer,hwc_rect_t & scissor)521 hwc_rect_t MDPComp::calculateDirtyRect(const hwc_layer_1_t* layer,
522                     hwc_rect_t& scissor) {
523   hwc_region_t surfDamage = layer->surfaceDamage;
524   hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf);
525   hwc_rect_t dst = layer->displayFrame;
526   int x_off = dst.left - src.left;
527   int y_off = dst.top - src.top;
528   hwc_rect dirtyRect = (hwc_rect){0, 0, 0, 0};
529   hwc_rect_t updatingRect = dst;
530 
531   if (surfDamage.numRects == 0) {
532       // full layer updating, dirty rect is full frame
533       dirtyRect = getIntersection(layer->displayFrame, scissor);
534   } else {
535       for(uint32_t i = 0; i < surfDamage.numRects; i++) {
536           updatingRect = moveRect(surfDamage.rects[i], x_off, y_off);
537           hwc_rect_t intersect = getIntersection(updatingRect, scissor);
538           if(isValidRect(intersect)) {
539               dirtyRect = getUnion(intersect, dirtyRect);
540           }
541       }
542   }
543 
544   return dirtyRect;
545 }
546 
trimAgainstROI(hwc_context_t * ctx,hwc_rect & crop,hwc_rect & dst)547 void MDPCompNonSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect &crop,
548         hwc_rect &dst) {
549     hwc_rect_t roi = ctx->listStats[mDpy].lRoi;
550     dst = getIntersection(dst, roi);
551     crop = dst;
552 }
553 
554 /* 1) Identify layers that are not visible or lying outside the updating ROI and
555  *    drop them from composition.
556  * 2) If we have a scaling layer which needs cropping against generated
557  *    ROI, reset ROI to full resolution. */
validateAndApplyROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)558 bool MDPCompNonSplit::validateAndApplyROI(hwc_context_t *ctx,
559         hwc_display_contents_1_t* list) {
560     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
561     hwc_rect_t visibleRect = ctx->listStats[mDpy].lRoi;
562 
563     for(int i = numAppLayers - 1; i >= 0; i--){
564         if(!isValidRect(visibleRect)) {
565             mCurrentFrame.drop[i] = true;
566             mCurrentFrame.dropCount++;
567             continue;
568         }
569 
570         const hwc_layer_1_t* layer =  &list->hwLayers[i];
571         hwc_rect_t dstRect = layer->displayFrame;
572         hwc_rect_t res  = getIntersection(visibleRect, dstRect);
573 
574         if(!isValidRect(res)) {
575             mCurrentFrame.drop[i] = true;
576             mCurrentFrame.dropCount++;
577         } else {
578             /* Reset frame ROI when any layer which needs scaling also needs ROI
579              * cropping */
580             if(!isSameRect(res, dstRect) && needsScaling (layer)) {
581                 ALOGI("%s: Resetting ROI due to scaling", __FUNCTION__);
582                 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
583                 mCurrentFrame.dropCount = 0;
584                 return false;
585             }
586 
587             /* deduct any opaque region from visibleRect */
588             if (layer->blending == HWC_BLENDING_NONE &&
589                     layer->planeAlpha == 0xFF)
590                 visibleRect = deductRect(visibleRect, res);
591         }
592     }
593     return true;
594 }
595 
596 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which
597  * are updating. If DirtyRegion is applicable, calculate it by accounting all
598  * the changing layer's dirtyRegion. */
generateROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)599 void MDPCompNonSplit::generateROI(hwc_context_t *ctx,
600         hwc_display_contents_1_t* list) {
601     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
602     if(!canPartialUpdate(ctx, list))
603         return;
604 
605     struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0};
606     hwc_rect fullFrame = (struct hwc_rect) {0, 0,(int)ctx->dpyAttr[mDpy].xres,
607         (int)ctx->dpyAttr[mDpy].yres};
608 
609     for(int index = 0; index < numAppLayers; index++ ) {
610         hwc_layer_1_t* layer = &list->hwLayers[index];
611         if (layerUpdating(layer) ||
612                 isYuvBuffer((private_handle_t *)layer->handle)) {
613             hwc_rect_t dirtyRect = getIntersection(layer->displayFrame,
614                                                     fullFrame);
615             if(!needsScaling(layer) && !layer->transform) {
616                 dirtyRect = calculateDirtyRect(layer, fullFrame);
617             }
618 
619             roi = getUnion(roi, dirtyRect);
620         }
621     }
622 
623     /* No layer is updating. Still SF wants a refresh.*/
624     if(!isValidRect(roi))
625         return;
626 
627     // Align ROI coordinates to panel restrictions
628     roi = getSanitizeROI(roi, fullFrame);
629 
630     ctx->listStats[mDpy].lRoi = roi;
631     if(!validateAndApplyROI(ctx, list))
632         resetROI(ctx, mDpy);
633 
634     ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d]", __FUNCTION__,
635             ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
636             ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom);
637 }
638 
trimAgainstROI(hwc_context_t * ctx,hwc_rect & crop,hwc_rect & dst)639 void MDPCompSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect &crop,
640         hwc_rect &dst) {
641     hwc_rect roi = getUnion(ctx->listStats[mDpy].lRoi,
642             ctx->listStats[mDpy].rRoi);
643     hwc_rect tmpDst = getIntersection(dst, roi);
644     if(!isSameRect(dst, tmpDst)) {
645         crop.left = crop.left + (tmpDst.left - dst.left);
646         crop.top = crop.top + (tmpDst.top - dst.top);
647         crop.right = crop.left + (tmpDst.right - tmpDst.left);
648         crop.bottom = crop.top + (tmpDst.bottom - tmpDst.top);
649         dst = tmpDst;
650     }
651 }
652 
653 /* 1) Identify layers that are not visible or lying outside BOTH the updating
654  *    ROI's and drop them from composition. If a layer is spanning across both
655  *    the halves of the screen but needed by only ROI, the non-contributing
656  *    half will not be programmed for MDP.
657  * 2) If we have a scaling layer which needs cropping against generated
658  *    ROI, reset ROI to full resolution. */
validateAndApplyROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)659 bool MDPCompSplit::validateAndApplyROI(hwc_context_t *ctx,
660         hwc_display_contents_1_t* list) {
661 
662     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
663 
664     hwc_rect_t visibleRectL = ctx->listStats[mDpy].lRoi;
665     hwc_rect_t visibleRectR = ctx->listStats[mDpy].rRoi;
666 
667     for(int i = numAppLayers - 1; i >= 0; i--){
668         if(!isValidRect(visibleRectL) && !isValidRect(visibleRectR))
669         {
670             mCurrentFrame.drop[i] = true;
671             mCurrentFrame.dropCount++;
672             continue;
673         }
674 
675         const hwc_layer_1_t* layer =  &list->hwLayers[i];
676         hwc_rect_t dstRect = layer->displayFrame;
677 
678         hwc_rect_t l_res  = getIntersection(visibleRectL, dstRect);
679         hwc_rect_t r_res  = getIntersection(visibleRectR, dstRect);
680         hwc_rect_t res = getUnion(l_res, r_res);
681 
682         if(!isValidRect(l_res) && !isValidRect(r_res)) {
683             mCurrentFrame.drop[i] = true;
684             mCurrentFrame.dropCount++;
685         } else {
686             /* Reset frame ROI when any layer which needs scaling also needs ROI
687              * cropping */
688             if(!isSameRect(res, dstRect) && needsScaling (layer)) {
689                 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
690                 mCurrentFrame.dropCount = 0;
691                 return false;
692             }
693 
694             if (layer->blending == HWC_BLENDING_NONE &&
695                     layer->planeAlpha == 0xFF) {
696                 visibleRectL = deductRect(visibleRectL, l_res);
697                 visibleRectR = deductRect(visibleRectR, r_res);
698             }
699         }
700     }
701     return true;
702 }
703 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which
704  * are updating. If DirtyRegion is applicable, calculate it by accounting all
705  * the changing layer's dirtyRegion. */
generateROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)706 void MDPCompSplit::generateROI(hwc_context_t *ctx,
707         hwc_display_contents_1_t* list) {
708     if(!canPartialUpdate(ctx, list))
709         return;
710 
711     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
712     int lSplit = getLeftSplit(ctx, mDpy);
713 
714     int hw_h = (int)ctx->dpyAttr[mDpy].yres;
715     int hw_w = (int)ctx->dpyAttr[mDpy].xres;
716 
717     struct hwc_rect l_frame = (struct hwc_rect){0, 0, lSplit, hw_h};
718     struct hwc_rect r_frame = (struct hwc_rect){lSplit, 0, hw_w, hw_h};
719 
720     struct hwc_rect l_roi = (struct hwc_rect){0, 0, 0, 0};
721     struct hwc_rect r_roi = (struct hwc_rect){0, 0, 0, 0};
722 
723     for(int index = 0; index < numAppLayers; index++ ) {
724         hwc_layer_1_t* layer = &list->hwLayers[index];
725         private_handle_t *hnd = (private_handle_t *)layer->handle;
726 
727         if (layerUpdating(layer) || isYuvBuffer(hnd)) {
728             hwc_rect_t l_dirtyRect = getIntersection(layer->displayFrame,
729                                         l_frame);
730             hwc_rect_t r_dirtyRect = getIntersection(layer->displayFrame,
731                                         r_frame);
732 
733             if(!needsScaling(layer) && !layer->transform) {
734                 l_dirtyRect = calculateDirtyRect(layer, l_frame);
735                 r_dirtyRect = calculateDirtyRect(layer, r_frame);
736             }
737             if(isValidRect(l_dirtyRect))
738                 l_roi = getUnion(l_roi, l_dirtyRect);
739 
740             if(isValidRect(r_dirtyRect))
741                 r_roi = getUnion(r_roi, r_dirtyRect);
742 
743         }
744     }
745 
746     /* For panels that cannot accept commands in both the interfaces, we cannot
747      * send two ROI's (for each half). We merge them into single ROI and split
748      * them across lSplit for MDP mixer use. The ROI's will be merged again
749      * finally before udpating the panel in the driver. */
750     if(qdutils::MDPVersion::getInstance().needsROIMerge()) {
751         hwc_rect_t temp_roi = getUnion(l_roi, r_roi);
752         l_roi = getIntersection(temp_roi, l_frame);
753         r_roi = getIntersection(temp_roi, r_frame);
754     }
755 
756     /* No layer is updating. Still SF wants a refresh. */
757     if(!isValidRect(l_roi) && !isValidRect(r_roi))
758         return;
759 
760     l_roi = getSanitizeROI(l_roi, l_frame);
761     r_roi = getSanitizeROI(r_roi, r_frame);
762 
763     ctx->listStats[mDpy].lRoi = l_roi;
764     ctx->listStats[mDpy].rRoi = r_roi;
765 
766     if(!validateAndApplyROI(ctx, list))
767         resetROI(ctx, mDpy);
768 
769     ALOGD_IF(isDebug(),"%s: generated L_ROI: [%d, %d, %d, %d]"
770             "R_ROI: [%d, %d, %d, %d]", __FUNCTION__,
771             ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
772             ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom,
773             ctx->listStats[mDpy].rRoi.left, ctx->listStats[mDpy].rRoi.top,
774             ctx->listStats[mDpy].rRoi.right, ctx->listStats[mDpy].rRoi.bottom);
775 }
776 
777 /* Checks for conditions where all the layers marked for MDP comp cannot be
778  * bypassed. On such conditions we try to bypass atleast YUV layers */
tryFullFrame(hwc_context_t * ctx,hwc_display_contents_1_t * list)779 bool MDPComp::tryFullFrame(hwc_context_t *ctx,
780                                 hwc_display_contents_1_t* list){
781 
782     const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
783 
784     // Fall back to video only composition, if AIV video mode is enabled
785     if(ctx->listStats[mDpy].mAIVVideoMode) {
786         ALOGD_IF(isDebug(), "%s: AIV Video Mode enabled dpy %d",
787             __FUNCTION__, mDpy);
788         return false;
789     }
790 
791     /* No Idle fall back if secure display or secure RGB layers are present
792      * or if there is only a single layer being composed */
793     if(sIdleFallBack && !ctx->listStats[mDpy].secureUI &&
794                   !ctx->listStats[mDpy].secureRGBCount &&
795                   (ctx->listStats[mDpy].numAppLayers > 1)) {
796         ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy);
797         return false;
798     }
799 
800     if(isSkipPresent(ctx, mDpy)) {
801         ALOGD_IF(isDebug(),"%s: SKIP present: %d",
802                 __FUNCTION__,
803                 isSkipPresent(ctx, mDpy));
804         return false;
805     }
806 
807     // if secondary is configuring or Padding round, fall back to video only
808     // composition and release all assigned non VIG pipes from primary.
809     if(isSecondaryConfiguring(ctx)) {
810         ALOGD_IF( isDebug(),"%s: External Display connection is pending",
811                   __FUNCTION__);
812         return false;
813     } else if(ctx->isPaddingRound) {
814         ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d",
815                  __FUNCTION__,mDpy);
816         return false;
817     }
818 
819     // check for action safe flag and MDP scaling mode which requires scaling.
820     if(ctx->dpyAttr[mDpy].mActionSafePresent
821             || ctx->dpyAttr[mDpy].mMDPScalingMode) {
822         ALOGD_IF(isDebug(), "%s: Scaling needed for this frame",__FUNCTION__);
823         return false;
824     }
825 
826     for(int i = 0; i < numAppLayers; ++i) {
827         hwc_layer_1_t* layer = &list->hwLayers[i];
828         private_handle_t *hnd = (private_handle_t *)layer->handle;
829 
830         if(has90Transform(layer) && isRotationDoable(ctx, hnd)) {
831             if(!canUseRotator(ctx, mDpy)) {
832                 ALOGD_IF(isDebug(), "%s: Can't use rotator for dpy %d",
833                         __FUNCTION__, mDpy);
834                 return false;
835             }
836         }
837 
838         //For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp
839         // may not need it if Gfx pre-rotation can handle all flips & rotations
840         MDPVersion& mdpHw = MDPVersion::getInstance();
841         int transform = (layer->flags & HWC_COLOR_FILL) ? 0 : layer->transform;
842         if( mdpHw.is8x26() && (ctx->dpyAttr[mDpy].xres > 1024) &&
843                 (transform & HWC_TRANSFORM_FLIP_H) && (!isYuvBuffer(hnd)))
844             return false;
845     }
846 
847     if(ctx->mAD->isDoable()) {
848         return false;
849     }
850 
851     //If all above hard conditions are met we can do full or partial MDP comp.
852     bool ret = false;
853     if(fullMDPComp(ctx, list)) {
854         ret = true;
855     } else if(fullMDPCompWithPTOR(ctx, list)) {
856         ret = true;
857     } else if(partialMDPComp(ctx, list)) {
858         ret = true;
859     }
860 
861     return ret;
862 }
863 
fullMDPComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)864 bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
865 
866     if(sSimulationFlags & MDPCOMP_AVOID_FULL_MDP)
867         return false;
868 
869     const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
870     for(int i = 0; i < numAppLayers; i++) {
871         hwc_layer_1_t* layer = &list->hwLayers[i];
872         if(not mCurrentFrame.drop[i] and
873            not isSupportedForMDPComp(ctx, layer)) {
874             ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__);
875             return false;
876         }
877     }
878 
879     mCurrentFrame.fbCount = 0;
880     memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop,
881            sizeof(mCurrentFrame.isFBComposed));
882     mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount -
883         mCurrentFrame.dropCount;
884 
885     if(sEnableYUVsplit){
886         adjustForSourceSplit(ctx, list);
887     }
888 
889     if(!postHeuristicsHandling(ctx, list)) {
890         ALOGD_IF(isDebug(), "post heuristic handling failed");
891         reset(ctx);
892         return false;
893     }
894     ALOGD_IF(sSimulationFlags,"%s: FULL_MDP_COMP SUCCEEDED",
895              __FUNCTION__);
896     return true;
897 }
898 
899 /* Full MDP Composition with Peripheral Tiny Overlap Removal.
900  * MDP bandwidth limitations can be avoided, if the overlap region
901  * covered by the smallest layer at a higher z-order, gets composed
902  * by Copybit on a render buffer, which can be queued to MDP.
903  */
fullMDPCompWithPTOR(hwc_context_t * ctx,hwc_display_contents_1_t * list)904 bool MDPComp::fullMDPCompWithPTOR(hwc_context_t *ctx,
905     hwc_display_contents_1_t* list) {
906 
907     const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
908     const int stagesForMDP = min(sMaxPipesPerMixer,
909             ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT));
910 
911     // Hard checks where we cannot use this mode
912     if (mDpy || !ctx->mCopyBit[mDpy]) {
913         ALOGD_IF(isDebug(), "%s: Feature not supported!", __FUNCTION__);
914         return false;
915     }
916 
917     // Frame level checks
918     if ((numAppLayers > stagesForMDP) || isSkipPresent(ctx, mDpy) ||
919         isYuvPresent(ctx, mDpy) || mCurrentFrame.dropCount ||
920         isSecurePresent(ctx, mDpy)) {
921         ALOGD_IF(isDebug(), "%s: Frame not supported!", __FUNCTION__);
922         return false;
923     }
924     // MDP comp checks
925     for(int i = 0; i < numAppLayers; i++) {
926         hwc_layer_1_t* layer = &list->hwLayers[i];
927         if(not isSupportedForMDPComp(ctx, layer)) {
928             ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__);
929             return false;
930         }
931     }
932 
933     /* We cannot use this composition mode, if:
934      1. A below layer needs scaling.
935      2. Overlap is not peripheral to display.
936      3. Overlap or a below layer has 90 degree transform.
937      4. Overlap area > (1/3 * FrameBuffer) area, based on Perf inputs.
938      */
939 
940     int minLayerIndex[MAX_PTOR_LAYERS] = { -1, -1};
941     hwc_rect_t overlapRect[MAX_PTOR_LAYERS];
942     memset(overlapRect, 0, sizeof(overlapRect));
943     int layerPixelCount, minPixelCount = 0;
944     int numPTORLayersFound = 0;
945     for (int i = numAppLayers-1; (i >= 0 &&
946                                   numPTORLayersFound < MAX_PTOR_LAYERS); i--) {
947         hwc_layer_1_t* layer = &list->hwLayers[i];
948         hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
949         hwc_rect_t dispFrame = layer->displayFrame;
950         layerPixelCount = (crop.right - crop.left) * (crop.bottom - crop.top);
951         // PTOR layer should be peripheral and cannot have transform
952         if (!isPeripheral(dispFrame, ctx->mViewFrame[mDpy]) ||
953                                 has90Transform(layer)) {
954             continue;
955         }
956         if((3 * (layerPixelCount + minPixelCount)) >
957                 ((int)ctx->dpyAttr[mDpy].xres * (int)ctx->dpyAttr[mDpy].yres)) {
958             // Overlap area > (1/3 * FrameBuffer) area, based on Perf inputs.
959             continue;
960         }
961         bool found = false;
962         for (int j = i-1; j >= 0; j--) {
963             // Check if the layers below this layer qualifies for PTOR comp
964             hwc_layer_1_t* layer = &list->hwLayers[j];
965             hwc_rect_t disFrame = layer->displayFrame;
966             // Layer below PTOR is intersecting and has 90 degree transform or
967             // needs scaling cannot be supported.
968             if (isValidRect(getIntersection(dispFrame, disFrame))) {
969                 if (has90Transform(layer) || needsScaling(layer)) {
970                     found = false;
971                     break;
972                 }
973                 found = true;
974             }
975         }
976         // Store the minLayer Index
977         if(found) {
978             minLayerIndex[numPTORLayersFound] = i;
979             overlapRect[numPTORLayersFound] = list->hwLayers[i].displayFrame;
980             minPixelCount += layerPixelCount;
981             numPTORLayersFound++;
982         }
983     }
984 
985     // No overlap layers
986     if (!numPTORLayersFound)
987         return false;
988 
989     // Store the displayFrame and the sourceCrops of the layers
990     hwc_rect_t displayFrame[numAppLayers];
991     hwc_rect_t sourceCrop[numAppLayers];
992     for(int i = 0; i < numAppLayers; i++) {
993         hwc_layer_1_t* layer = &list->hwLayers[i];
994         displayFrame[i] = layer->displayFrame;
995         sourceCrop[i] = integerizeSourceCrop(layer->sourceCropf);
996     }
997 
998     /**
999      * It's possible that 2 PTOR layers might have overlapping.
1000      * In such case, remove the intersection(again if peripheral)
1001      * from the lower PTOR layer to avoid overlapping.
1002      * If intersection is not on peripheral then compromise
1003      * by reducing number of PTOR layers.
1004      **/
1005     hwc_rect_t commonRect = getIntersection(overlapRect[0], overlapRect[1]);
1006     if(isValidRect(commonRect)) {
1007         overlapRect[1] = deductRect(overlapRect[1], commonRect);
1008         list->hwLayers[minLayerIndex[1]].displayFrame = overlapRect[1];
1009     }
1010 
1011     ctx->mPtorInfo.count = numPTORLayersFound;
1012     for(int i = 0; i < MAX_PTOR_LAYERS; i++) {
1013         ctx->mPtorInfo.layerIndex[i] = minLayerIndex[i];
1014     }
1015 
1016     if (!ctx->mCopyBit[mDpy]->prepareOverlap(ctx, list)) {
1017         // reset PTOR
1018         ctx->mPtorInfo.count = 0;
1019         if(isValidRect(commonRect)) {
1020             // If PTORs are intersecting restore displayframe of PTOR[1]
1021             // before returning, as we have modified it above.
1022             list->hwLayers[minLayerIndex[1]].displayFrame =
1023                     displayFrame[minLayerIndex[1]];
1024         }
1025         return false;
1026     }
1027     private_handle_t *renderBuf = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer();
1028     Whf layerWhf[numPTORLayersFound]; // To store w,h,f of PTOR layers
1029 
1030     // Store the blending mode, planeAlpha, and transform of PTOR layers
1031     int32_t blending[numPTORLayersFound];
1032     uint8_t planeAlpha[numPTORLayersFound];
1033     uint32_t transform[numPTORLayersFound];
1034 
1035     for(int j = 0; j < numPTORLayersFound; j++) {
1036         int index =  ctx->mPtorInfo.layerIndex[j];
1037 
1038         // Update src crop of PTOR layer
1039         hwc_layer_1_t* layer = &list->hwLayers[index];
1040         layer->sourceCropf.left = (float)ctx->mPtorInfo.displayFrame[j].left;
1041         layer->sourceCropf.top = (float)ctx->mPtorInfo.displayFrame[j].top;
1042         layer->sourceCropf.right = (float)ctx->mPtorInfo.displayFrame[j].right;
1043         layer->sourceCropf.bottom =(float)ctx->mPtorInfo.displayFrame[j].bottom;
1044 
1045         // Store & update w, h, format of PTOR layer
1046         private_handle_t *hnd = (private_handle_t *)layer->handle;
1047         Whf whf(hnd->width, hnd->height, hnd->format, hnd->size);
1048         layerWhf[j] = whf;
1049         hnd->width = renderBuf->width;
1050         hnd->height = renderBuf->height;
1051         hnd->format = renderBuf->format;
1052 
1053         // Store & update blending mode, planeAlpha and transform of PTOR layer
1054         blending[j] = layer->blending;
1055         planeAlpha[j] = layer->planeAlpha;
1056         transform[j] = layer->transform;
1057         layer->blending = HWC_BLENDING_NONE;
1058         layer->planeAlpha = 0xFF;
1059         layer->transform = 0;
1060 
1061         // Remove overlap from crop & displayFrame of below layers
1062         for (int i = 0; i < index && index !=-1; i++) {
1063             layer = &list->hwLayers[i];
1064             if(!isValidRect(getIntersection(layer->displayFrame,
1065                                             overlapRect[j])))  {
1066                 continue;
1067             }
1068             // Update layer attributes
1069             hwc_rect_t srcCrop = integerizeSourceCrop(layer->sourceCropf);
1070             hwc_rect_t destRect = deductRect(layer->displayFrame,
1071                         getIntersection(layer->displayFrame, overlapRect[j]));
1072             qhwc::calculate_crop_rects(srcCrop, layer->displayFrame, destRect,
1073                                        layer->transform);
1074             layer->sourceCropf.left = (float)srcCrop.left;
1075             layer->sourceCropf.top = (float)srcCrop.top;
1076             layer->sourceCropf.right = (float)srcCrop.right;
1077             layer->sourceCropf.bottom = (float)srcCrop.bottom;
1078         }
1079     }
1080 
1081     mCurrentFrame.mdpCount = numAppLayers;
1082     mCurrentFrame.fbCount = 0;
1083     mCurrentFrame.fbZ = -1;
1084 
1085     for (int j = 0; j < numAppLayers; j++) {
1086         if(isValidRect(list->hwLayers[j].displayFrame)) {
1087             mCurrentFrame.isFBComposed[j] = false;
1088         } else {
1089             mCurrentFrame.mdpCount--;
1090             mCurrentFrame.drop[j] = true;
1091         }
1092     }
1093 
1094     bool result = postHeuristicsHandling(ctx, list);
1095 
1096     // Restore layer attributes
1097     for(int i = 0; i < numAppLayers; i++) {
1098         hwc_layer_1_t* layer = &list->hwLayers[i];
1099         layer->displayFrame = displayFrame[i];
1100         layer->sourceCropf.left = (float)sourceCrop[i].left;
1101         layer->sourceCropf.top = (float)sourceCrop[i].top;
1102         layer->sourceCropf.right = (float)sourceCrop[i].right;
1103         layer->sourceCropf.bottom = (float)sourceCrop[i].bottom;
1104     }
1105 
1106     // Restore w,h,f, blending attributes, and transform of PTOR layers
1107     for (int i = 0; i < numPTORLayersFound; i++) {
1108         int idx = ctx->mPtorInfo.layerIndex[i];
1109         hwc_layer_1_t* layer = &list->hwLayers[idx];
1110         private_handle_t *hnd = (private_handle_t *)list->hwLayers[idx].handle;
1111         hnd->width = layerWhf[i].w;
1112         hnd->height = layerWhf[i].h;
1113         hnd->format = layerWhf[i].format;
1114         layer->blending = blending[i];
1115         layer->planeAlpha = planeAlpha[i];
1116         layer->transform = transform[i];
1117     }
1118 
1119     if (!result) {
1120         // reset PTOR
1121         ctx->mPtorInfo.count = 0;
1122         reset(ctx);
1123     } else {
1124         ALOGD_IF(isDebug(), "%s: PTOR Indexes: %d and %d", __FUNCTION__,
1125                  ctx->mPtorInfo.layerIndex[0],  ctx->mPtorInfo.layerIndex[1]);
1126     }
1127 
1128     ALOGD_IF(isDebug(), "%s: Postheuristics %s!", __FUNCTION__,
1129              (result ? "successful" : "failed"));
1130     return result;
1131 }
1132 
partialMDPComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)1133 bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list)
1134 {
1135     if(!sEnableMixedMode) {
1136         //Mixed mode is disabled. No need to even try caching.
1137         return false;
1138     }
1139 
1140     bool ret = false;
1141     if(list->flags & HWC_GEOMETRY_CHANGED) { //Try load based first
1142         ret =   loadBasedComp(ctx, list) or
1143                 cacheBasedComp(ctx, list);
1144     } else {
1145         ret =   cacheBasedComp(ctx, list) or
1146                 loadBasedComp(ctx, list);
1147     }
1148 
1149     return ret;
1150 }
1151 
cacheBasedComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)1152 bool MDPComp::cacheBasedComp(hwc_context_t *ctx,
1153         hwc_display_contents_1_t* list) {
1154     if(sSimulationFlags & MDPCOMP_AVOID_CACHE_MDP)
1155         return false;
1156 
1157     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1158     mCurrentFrame.reset(numAppLayers);
1159     updateLayerCache(ctx, list, mCurrentFrame);
1160 
1161     //If an MDP marked layer is unsupported cannot do partial MDP Comp
1162     for(int i = 0; i < numAppLayers; i++) {
1163         if(!mCurrentFrame.isFBComposed[i]) {
1164             hwc_layer_1_t* layer = &list->hwLayers[i];
1165             if(not isSupportedForMDPComp(ctx, layer)) {
1166                 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",
1167                         __FUNCTION__);
1168                 reset(ctx);
1169                 return false;
1170             }
1171         }
1172     }
1173 
1174     updateYUV(ctx, list, false /*secure only*/, mCurrentFrame);
1175     /* mark secure RGB layers for MDP comp */
1176     updateSecureRGB(ctx, list);
1177     bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
1178     if(!ret) {
1179         ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
1180         reset(ctx);
1181         return false;
1182     }
1183 
1184     int mdpCount = mCurrentFrame.mdpCount;
1185 
1186     if(sEnableYUVsplit){
1187         adjustForSourceSplit(ctx, list);
1188     }
1189 
1190     if(!postHeuristicsHandling(ctx, list)) {
1191         ALOGD_IF(isDebug(), "post heuristic handling failed");
1192         reset(ctx);
1193         return false;
1194     }
1195     ALOGD_IF(sSimulationFlags,"%s: CACHE_MDP_COMP SUCCEEDED",
1196              __FUNCTION__);
1197 
1198     return true;
1199 }
1200 
loadBasedComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)1201 bool MDPComp::loadBasedComp(hwc_context_t *ctx,
1202         hwc_display_contents_1_t* list) {
1203     if(sSimulationFlags & MDPCOMP_AVOID_LOAD_MDP)
1204         return false;
1205 
1206     if(not isLoadBasedCompDoable(ctx)) {
1207         return false;
1208     }
1209 
1210     const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1211     const int numNonDroppedLayers = numAppLayers - mCurrentFrame.dropCount;
1212     const int stagesForMDP = min(sMaxPipesPerMixer,
1213             ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT));
1214 
1215     int mdpBatchSize = stagesForMDP - 1; //1 stage for FB
1216     int fbBatchSize = numNonDroppedLayers - mdpBatchSize;
1217     int lastMDPSupportedIndex = numAppLayers;
1218     int dropCount = 0;
1219 
1220     //Find the minimum MDP batch size
1221     for(int i = 0; i < numAppLayers;i++) {
1222         if(mCurrentFrame.drop[i]) {
1223             dropCount++;
1224             continue;
1225         }
1226         hwc_layer_1_t* layer = &list->hwLayers[i];
1227         if(not isSupportedForMDPComp(ctx, layer)) {
1228             lastMDPSupportedIndex = i;
1229             mdpBatchSize = min(i - dropCount, stagesForMDP - 1);
1230             fbBatchSize = numNonDroppedLayers - mdpBatchSize;
1231             break;
1232         }
1233     }
1234 
1235     ALOGD_IF(isDebug(), "%s:Before optimizing fbBatch, mdpbatch %d, fbbatch %d "
1236             "dropped %d", __FUNCTION__, mdpBatchSize, fbBatchSize,
1237             mCurrentFrame.dropCount);
1238 
1239     //Start at a point where the fb batch should at least have 2 layers, for
1240     //this mode to be justified.
1241     while(fbBatchSize < 2) {
1242         ++fbBatchSize;
1243         --mdpBatchSize;
1244     }
1245 
1246     //If there are no layers for MDP, this mode doesnt make sense.
1247     if(mdpBatchSize < 1) {
1248         ALOGD_IF(isDebug(), "%s: No MDP layers after optimizing for fbBatch",
1249                 __FUNCTION__);
1250         return false;
1251     }
1252 
1253     mCurrentFrame.reset(numAppLayers);
1254 
1255     //Try with successively smaller mdp batch sizes until we succeed or reach 1
1256     while(mdpBatchSize > 0) {
1257         //Mark layers for MDP comp
1258         int mdpBatchLeft = mdpBatchSize;
1259         for(int i = 0; i < lastMDPSupportedIndex and mdpBatchLeft; i++) {
1260             if(mCurrentFrame.drop[i]) {
1261                 continue;
1262             }
1263             mCurrentFrame.isFBComposed[i] = false;
1264             --mdpBatchLeft;
1265         }
1266 
1267         mCurrentFrame.fbZ = mdpBatchSize;
1268         mCurrentFrame.fbCount = fbBatchSize;
1269         mCurrentFrame.mdpCount = mdpBatchSize;
1270 
1271         ALOGD_IF(isDebug(), "%s:Trying with: mdpbatch %d fbbatch %d dropped %d",
1272                 __FUNCTION__, mdpBatchSize, fbBatchSize,
1273                 mCurrentFrame.dropCount);
1274 
1275         if(postHeuristicsHandling(ctx, list)) {
1276             ALOGD_IF(isDebug(), "%s: Postheuristics handling succeeded",
1277                      __FUNCTION__);
1278             ALOGD_IF(sSimulationFlags,"%s: LOAD_MDP_COMP SUCCEEDED",
1279                      __FUNCTION__);
1280             return true;
1281         }
1282 
1283         reset(ctx);
1284         --mdpBatchSize;
1285         ++fbBatchSize;
1286     }
1287 
1288     return false;
1289 }
1290 
isLoadBasedCompDoable(hwc_context_t * ctx)1291 bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx) {
1292     if(mDpy or isSecurePresent(ctx, mDpy) or
1293             isYuvPresent(ctx, mDpy)) {
1294         return false;
1295     }
1296     return true;
1297 }
1298 
canPartialUpdate(hwc_context_t * ctx,hwc_display_contents_1_t * list)1299 bool MDPComp::canPartialUpdate(hwc_context_t *ctx,
1300         hwc_display_contents_1_t* list){
1301     if(!qdutils::MDPVersion::getInstance().isPartialUpdateEnabled() ||
1302             isSkipPresent(ctx, mDpy) || (list->flags & HWC_GEOMETRY_CHANGED) ||
1303             !sIsPartialUpdateActive || mDpy ) {
1304         return false;
1305     }
1306     if(ctx->listStats[mDpy].secureUI)
1307         return false;
1308     if (sIsSingleFullScreenUpdate) {
1309         // make sure one full screen update
1310         sIsSingleFullScreenUpdate = false;
1311         return false;
1312     }
1313     return true;
1314 }
1315 
tryVideoOnly(hwc_context_t * ctx,hwc_display_contents_1_t * list)1316 bool MDPComp::tryVideoOnly(hwc_context_t *ctx,
1317         hwc_display_contents_1_t* list) {
1318     const bool secureOnly = true;
1319     return videoOnlyComp(ctx, list, not secureOnly) or
1320             videoOnlyComp(ctx, list, secureOnly);
1321 }
1322 
videoOnlyComp(hwc_context_t * ctx,hwc_display_contents_1_t * list,bool secureOnly)1323 bool MDPComp::videoOnlyComp(hwc_context_t *ctx,
1324         hwc_display_contents_1_t* list, bool secureOnly) {
1325     if(sSimulationFlags & MDPCOMP_AVOID_VIDEO_ONLY)
1326         return false;
1327 
1328     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1329     if(!isSecurePresent(ctx, mDpy)) {
1330        /* Bail out if we are processing only secured video layers
1331         * and we dont have any */
1332        if(secureOnly) {
1333            ALOGD_IF(isDebug(),"%s: No Secure Video Layers", __FUNCTION__);
1334            return false;
1335        }
1336        /* No Idle fall back for secure video layers and if there is only
1337         * single layer being composed. */
1338        if(sIdleFallBack && (ctx->listStats[mDpy].numAppLayers > 1)) {
1339            ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy);
1340            return false;
1341         }
1342     }
1343 
1344     mCurrentFrame.reset(numAppLayers);
1345     mCurrentFrame.fbCount -= mCurrentFrame.dropCount;
1346     updateYUV(ctx, list, secureOnly, mCurrentFrame);
1347     int mdpCount = mCurrentFrame.mdpCount;
1348 
1349     if(!isYuvPresent(ctx, mDpy) or (mdpCount == 0)) {
1350         reset(ctx);
1351         return false;
1352     }
1353 
1354     if(mCurrentFrame.fbCount)
1355         mCurrentFrame.fbZ = mCurrentFrame.mdpCount;
1356 
1357     if(sEnableYUVsplit){
1358         adjustForSourceSplit(ctx, list);
1359     }
1360 
1361     if(!postHeuristicsHandling(ctx, list)) {
1362         ALOGD_IF(isDebug(), "post heuristic handling failed");
1363         if(errno == ENOBUFS) {
1364             ALOGD_IF(isDebug(), "SMP Allocation failed");
1365             //On SMP allocation failure in video only comp add padding round
1366             ctx->isPaddingRound = true;
1367         }
1368         reset(ctx);
1369         return false;
1370     }
1371 
1372     ALOGD_IF(sSimulationFlags,"%s: VIDEO_ONLY_COMP SUCCEEDED",
1373              __FUNCTION__);
1374     return true;
1375 }
1376 
1377 /* if tryFullFrame fails, try to push all video and secure RGB layers to MDP */
tryMDPOnlyLayers(hwc_context_t * ctx,hwc_display_contents_1_t * list)1378 bool MDPComp::tryMDPOnlyLayers(hwc_context_t *ctx,
1379         hwc_display_contents_1_t* list) {
1380     // Fall back to video only composition, if AIV video mode is enabled
1381     if(ctx->listStats[mDpy].mAIVVideoMode) {
1382         ALOGD_IF(isDebug(), "%s: AIV Video Mode enabled dpy %d",
1383             __FUNCTION__, mDpy);
1384         return false;
1385     }
1386 
1387     const bool secureOnly = true;
1388     return mdpOnlyLayersComp(ctx, list, not secureOnly) or
1389             mdpOnlyLayersComp(ctx, list, secureOnly);
1390 
1391 }
1392 
mdpOnlyLayersComp(hwc_context_t * ctx,hwc_display_contents_1_t * list,bool secureOnly)1393 bool MDPComp::mdpOnlyLayersComp(hwc_context_t *ctx,
1394         hwc_display_contents_1_t* list, bool secureOnly) {
1395 
1396     if(sSimulationFlags & MDPCOMP_AVOID_MDP_ONLY_LAYERS)
1397         return false;
1398 
1399     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1400     if(!isSecurePresent(ctx, mDpy) && !ctx->listStats[mDpy].secureUI) {
1401         /* Bail out if we are processing only secured video/ui layers
1402          * and we dont have any */
1403         if(secureOnly) {
1404             ALOGD_IF(isDebug(), "%s: No secure video/ui layers");
1405             return false;
1406         }
1407         /* No Idle fall back for secure video/ui layers and if there is only
1408          * single layer being composed. */
1409         if(sIdleFallBack && (ctx->listStats[mDpy].numAppLayers > 1)) {
1410            ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy);
1411            return false;
1412        }
1413     }
1414 
1415     /* Bail out if we dont have any secure RGB layers */
1416     if (!ctx->listStats[mDpy].secureRGBCount) {
1417         reset(ctx);
1418         return false;
1419     }
1420 
1421     mCurrentFrame.reset(numAppLayers);
1422     mCurrentFrame.fbCount -= mCurrentFrame.dropCount;
1423 
1424     updateYUV(ctx, list, secureOnly, mCurrentFrame);
1425     /* mark secure RGB layers for MDP comp */
1426     updateSecureRGB(ctx, list);
1427 
1428     if(mCurrentFrame.mdpCount == 0) {
1429         reset(ctx);
1430         return false;
1431     }
1432 
1433     /* find the maximum batch of layers to be marked for framebuffer */
1434     bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
1435     if(!ret) {
1436         ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
1437         reset(ctx);
1438         return false;
1439     }
1440 
1441     if(sEnableYUVsplit){
1442         adjustForSourceSplit(ctx, list);
1443     }
1444 
1445     if(!postHeuristicsHandling(ctx, list)) {
1446         ALOGD_IF(isDebug(), "post heuristic handling failed");
1447         reset(ctx);
1448         return false;
1449     }
1450 
1451     ALOGD_IF(sSimulationFlags,"%s: MDP_ONLY_LAYERS_COMP SUCCEEDED",
1452              __FUNCTION__);
1453     return true;
1454 }
1455 
1456 /* Checks for conditions where YUV layers cannot be bypassed */
isYUVDoable(hwc_context_t * ctx,hwc_layer_1_t * layer)1457 bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
1458     if(isSkipLayer(layer)) {
1459         ALOGD_IF(isDebug(), "%s: Video marked SKIP dpy %d", __FUNCTION__, mDpy);
1460         return false;
1461     }
1462 
1463     if(has90Transform(layer) && !canUseRotator(ctx, mDpy)) {
1464         ALOGD_IF(isDebug(), "%s: no free DMA pipe",__FUNCTION__);
1465         return false;
1466     }
1467 
1468     if(isSecuring(ctx, layer)) {
1469         ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__);
1470         return false;
1471     }
1472 
1473     if(!isValidDimension(ctx, layer)) {
1474         ALOGD_IF(isDebug(), "%s: Buffer is of invalid width",
1475             __FUNCTION__);
1476         return false;
1477     }
1478 
1479     if(layer->planeAlpha < 0xFF) {
1480         ALOGD_IF(isDebug(), "%s: Cannot handle YUV layer with plane alpha\
1481                  in video only mode",
1482                  __FUNCTION__);
1483         return false;
1484     }
1485 
1486     return true;
1487 }
1488 
1489 /* Checks for conditions where Secure RGB layers cannot be bypassed */
isSecureRGBDoable(hwc_context_t * ctx,hwc_layer_1_t * layer)1490 bool MDPComp::isSecureRGBDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
1491     if(isSkipLayer(layer)) {
1492         ALOGD_IF(isDebug(), "%s: Secure RGB layer marked SKIP dpy %d",
1493             __FUNCTION__, mDpy);
1494         return false;
1495     }
1496 
1497     if(isSecuring(ctx, layer)) {
1498         ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__);
1499         return false;
1500     }
1501 
1502     if(not isSupportedForMDPComp(ctx, layer)) {
1503         ALOGD_IF(isDebug(), "%s: Unsupported secure RGB layer",
1504             __FUNCTION__);
1505         return false;
1506     }
1507     return true;
1508 }
1509 
1510 /* starts at fromIndex and check for each layer to find
1511  * if it it has overlapping with any Updating layer above it in zorder
1512  * till the end of the batch. returns true if it finds any intersection */
canPushBatchToTop(const hwc_display_contents_1_t * list,int fromIndex,int toIndex)1513 bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list,
1514         int fromIndex, int toIndex) {
1515     for(int i = fromIndex; i < toIndex; i++) {
1516         if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
1517             if(intersectingUpdatingLayers(list, i+1, toIndex, i)) {
1518                 return false;
1519             }
1520         }
1521     }
1522     return true;
1523 }
1524 
1525 /* Checks if given layer at targetLayerIndex has any
1526  * intersection with all the updating layers in beween
1527  * fromIndex and toIndex. Returns true if it finds intersectiion */
intersectingUpdatingLayers(const hwc_display_contents_1_t * list,int fromIndex,int toIndex,int targetLayerIndex)1528 bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
1529         int fromIndex, int toIndex, int targetLayerIndex) {
1530     for(int i = fromIndex; i <= toIndex; i++) {
1531         if(!mCurrentFrame.isFBComposed[i]) {
1532             if(areLayersIntersecting(&list->hwLayers[i],
1533                         &list->hwLayers[targetLayerIndex]))  {
1534                 return true;
1535             }
1536         }
1537     }
1538     return false;
1539 }
1540 
getBatch(hwc_display_contents_1_t * list,int & maxBatchStart,int & maxBatchEnd,int & maxBatchCount)1541 int MDPComp::getBatch(hwc_display_contents_1_t* list,
1542         int& maxBatchStart, int& maxBatchEnd,
1543         int& maxBatchCount) {
1544     int i = 0;
1545     int fbZOrder =-1;
1546     int droppedLayerCt = 0;
1547     while (i < mCurrentFrame.layerCount) {
1548         int batchCount = 0;
1549         int batchStart = i;
1550         int batchEnd = i;
1551         /* Adjust batch Z order with the dropped layers so far */
1552         int fbZ = batchStart - droppedLayerCt;
1553         int firstZReverseIndex = -1;
1554         int updatingLayersAbove = 0;//Updating layer count in middle of batch
1555         while(i < mCurrentFrame.layerCount) {
1556             if(!mCurrentFrame.isFBComposed[i]) {
1557                 if(!batchCount) {
1558                     i++;
1559                     break;
1560                 }
1561                 updatingLayersAbove++;
1562                 i++;
1563                 continue;
1564             } else {
1565                 if(mCurrentFrame.drop[i]) {
1566                     i++;
1567                     droppedLayerCt++;
1568                     continue;
1569                 } else if(updatingLayersAbove <= 0) {
1570                     batchCount++;
1571                     batchEnd = i;
1572                     i++;
1573                     continue;
1574                 } else { //Layer is FBComposed, not a drop & updatingLayer > 0
1575 
1576                     // We have a valid updating layer already. If layer-i not
1577                     // have overlapping with all updating layers in between
1578                     // batch-start and i, then we can add layer i to batch.
1579                     if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) {
1580                         batchCount++;
1581                         batchEnd = i;
1582                         i++;
1583                         continue;
1584                     } else if(canPushBatchToTop(list, batchStart, i)) {
1585                         //If All the non-updating layers with in this batch
1586                         //does not have intersection with the updating layers
1587                         //above in z-order, then we can safely move the batch to
1588                         //higher z-order. Increment fbZ as it is moving up.
1589                         if( firstZReverseIndex < 0) {
1590                             firstZReverseIndex = i;
1591                         }
1592                         batchCount++;
1593                         batchEnd = i;
1594                         fbZ += updatingLayersAbove;
1595                         i++;
1596                         updatingLayersAbove = 0;
1597                         continue;
1598                     } else {
1599                         //both failed.start the loop again from here.
1600                         if(firstZReverseIndex >= 0) {
1601                             i = firstZReverseIndex;
1602                         }
1603                         break;
1604                     }
1605                 }
1606             }
1607         }
1608         if(batchCount > maxBatchCount) {
1609             maxBatchCount = batchCount;
1610             maxBatchStart = batchStart;
1611             maxBatchEnd = batchEnd;
1612             fbZOrder = fbZ;
1613         }
1614     }
1615     return fbZOrder;
1616 }
1617 
markLayersForCaching(hwc_context_t * ctx,hwc_display_contents_1_t * list)1618 bool  MDPComp::markLayersForCaching(hwc_context_t* ctx,
1619         hwc_display_contents_1_t* list) {
1620     /* Idea is to keep as many non-updating(cached) layers in FB and
1621      * send rest of them through MDP. This is done in 2 steps.
1622      *   1. Find the maximum contiguous batch of non-updating layers.
1623      *   2. See if we can improve this batch size for caching by adding
1624      *      opaque layers around the batch, if they don't have
1625      *      any overlapping with the updating layers in between.
1626      * NEVER mark an updating layer for caching.
1627      * But cached ones can be marked for MDP */
1628 
1629     int maxBatchStart = -1;
1630     int maxBatchEnd = -1;
1631     int maxBatchCount = 0;
1632     int fbZ = -1;
1633 
1634     /* Nothing is cached. No batching needed */
1635     if(mCurrentFrame.fbCount == 0) {
1636         return true;
1637     }
1638 
1639     /* No MDP comp layers, try to use other comp modes */
1640     if(mCurrentFrame.mdpCount == 0) {
1641         return false;
1642     }
1643 
1644     fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount);
1645 
1646     /* reset rest of the layers lying inside ROI for MDP comp */
1647     for(int i = 0; i < mCurrentFrame.layerCount; i++) {
1648         hwc_layer_1_t* layer = &list->hwLayers[i];
1649         if((i < maxBatchStart || i > maxBatchEnd) &&
1650                 mCurrentFrame.isFBComposed[i]){
1651             if(!mCurrentFrame.drop[i]){
1652                 //If an unsupported layer is being attempted to
1653                 //be pulled out we should fail
1654                 if(not isSupportedForMDPComp(ctx, layer)) {
1655                     return false;
1656                 }
1657                 mCurrentFrame.isFBComposed[i] = false;
1658             }
1659         }
1660     }
1661 
1662     // update the frame data
1663     mCurrentFrame.fbZ = fbZ;
1664     mCurrentFrame.fbCount = maxBatchCount;
1665     mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1666             mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1667 
1668     ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
1669             mCurrentFrame.fbCount);
1670 
1671     return true;
1672 }
1673 
updateLayerCache(hwc_context_t * ctx,hwc_display_contents_1_t * list,FrameInfo & frame)1674 void MDPComp::updateLayerCache(hwc_context_t* ctx,
1675         hwc_display_contents_1_t* list, FrameInfo& frame) {
1676     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1677     int fbCount = 0;
1678 
1679     for(int i = 0; i < numAppLayers; i++) {
1680         hwc_layer_1_t * layer = &list->hwLayers[i];
1681         if (!layerUpdating(layer)) {
1682             if(!frame.drop[i])
1683                 fbCount++;
1684             frame.isFBComposed[i] = true;
1685         } else {
1686             frame.isFBComposed[i] = false;
1687         }
1688     }
1689 
1690     frame.fbCount = fbCount;
1691     frame.mdpCount = frame.layerCount - frame.fbCount
1692                                             - frame.dropCount;
1693 
1694     ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d drop count: %d",
1695             __FUNCTION__, frame.mdpCount, frame.fbCount, frame.dropCount);
1696 }
1697 
1698 // drop other non-AIV layers from external display list.
dropNonAIVLayers(hwc_context_t * ctx,hwc_display_contents_1_t * list)1699 void MDPComp::dropNonAIVLayers(hwc_context_t* ctx,
1700                               hwc_display_contents_1_t* list) {
1701     for (size_t i = 0; i < (size_t)ctx->listStats[mDpy].numAppLayers; i++) {
1702         hwc_layer_1_t * layer = &list->hwLayers[i];
1703          if(!(isAIVVideoLayer(layer) || isAIVCCLayer(layer))) {
1704             mCurrentFrame.dropCount++;
1705             mCurrentFrame.drop[i] = true;
1706         }
1707     }
1708     mCurrentFrame.fbCount -= mCurrentFrame.dropCount;
1709     mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1710             mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1711     ALOGD_IF(isDebug(),"%s: fb count: %d mdp count %d drop count %d",
1712         __FUNCTION__, mCurrentFrame.fbCount, mCurrentFrame.mdpCount,
1713         mCurrentFrame.dropCount);
1714 }
1715 
updateYUV(hwc_context_t * ctx,hwc_display_contents_1_t * list,bool secureOnly,FrameInfo & frame)1716 void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
1717         bool secureOnly, FrameInfo& frame) {
1718     int nYuvCount = ctx->listStats[mDpy].yuvCount;
1719     for(int index = 0;index < nYuvCount; index++){
1720         int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index];
1721         hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex];
1722 
1723         if(mCurrentFrame.drop[nYuvIndex]) {
1724             continue;
1725         }
1726 
1727         if(!isYUVDoable(ctx, layer)) {
1728             if(!frame.isFBComposed[nYuvIndex]) {
1729                 frame.isFBComposed[nYuvIndex] = true;
1730                 frame.fbCount++;
1731             }
1732         } else {
1733             if(frame.isFBComposed[nYuvIndex]) {
1734                 private_handle_t *hnd = (private_handle_t *)layer->handle;
1735                 if(!secureOnly || isSecureBuffer(hnd)) {
1736                     frame.isFBComposed[nYuvIndex] = false;
1737                     frame.fbCount--;
1738                 }
1739             }
1740         }
1741     }
1742 
1743     frame.mdpCount = frame.layerCount - frame.fbCount - frame.dropCount;
1744     ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__, frame.fbCount);
1745 }
1746 
updateSecureRGB(hwc_context_t * ctx,hwc_display_contents_1_t * list)1747 void MDPComp::updateSecureRGB(hwc_context_t* ctx,
1748     hwc_display_contents_1_t* list) {
1749     int nSecureRGBCount = ctx->listStats[mDpy].secureRGBCount;
1750     for(int index = 0;index < nSecureRGBCount; index++){
1751         int nSecureRGBIndex = ctx->listStats[mDpy].secureRGBIndices[index];
1752         hwc_layer_1_t* layer = &list->hwLayers[nSecureRGBIndex];
1753 
1754         if(!isSecureRGBDoable(ctx, layer)) {
1755             if(!mCurrentFrame.isFBComposed[nSecureRGBIndex]) {
1756                 mCurrentFrame.isFBComposed[nSecureRGBIndex] = true;
1757                 mCurrentFrame.fbCount++;
1758             }
1759         } else {
1760             if(mCurrentFrame.isFBComposed[nSecureRGBIndex]) {
1761                 mCurrentFrame.isFBComposed[nSecureRGBIndex] = false;
1762                 mCurrentFrame.fbCount--;
1763             }
1764         }
1765     }
1766 
1767     mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1768             mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1769     ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__,
1770              mCurrentFrame.fbCount);
1771 }
1772 
getUpdatingFBRect(hwc_context_t * ctx,hwc_display_contents_1_t * list)1773 hwc_rect_t MDPComp::getUpdatingFBRect(hwc_context_t *ctx,
1774         hwc_display_contents_1_t* list){
1775     hwc_rect_t fbRect = (struct hwc_rect){0, 0, 0, 0};
1776 
1777     /* Update only the region of FB needed for composition */
1778     for(int i = 0; i < mCurrentFrame.layerCount; i++ ) {
1779         if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
1780             hwc_layer_1_t* layer = &list->hwLayers[i];
1781             hwc_rect_t dst = layer->displayFrame;
1782             fbRect = getUnion(fbRect, dst);
1783         }
1784     }
1785     trimAgainstROI(ctx, fbRect, fbRect);
1786     return fbRect;
1787 }
1788 
postHeuristicsHandling(hwc_context_t * ctx,hwc_display_contents_1_t * list)1789 bool MDPComp::postHeuristicsHandling(hwc_context_t *ctx,
1790         hwc_display_contents_1_t* list) {
1791 
1792     //Capability checks
1793     if(!resourceCheck(ctx, list)) {
1794         ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
1795         return false;
1796     }
1797 
1798     //Limitations checks
1799     if(!hwLimitationsCheck(ctx, list)) {
1800         ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__);
1801         return false;
1802     }
1803 
1804     //Configure framebuffer first if applicable
1805     if(mCurrentFrame.fbZ >= 0) {
1806         hwc_rect_t fbRect = getUpdatingFBRect(ctx, list);
1807         if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, fbRect, mCurrentFrame.fbZ))
1808         {
1809             ALOGD_IF(isDebug(), "%s configure framebuffer failed",
1810                     __FUNCTION__);
1811             return false;
1812         }
1813     }
1814 
1815     mCurrentFrame.map();
1816 
1817     if(!allocLayerPipes(ctx, list)) {
1818         ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__);
1819         return false;
1820     }
1821 
1822     for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
1823             index++) {
1824         if(!mCurrentFrame.isFBComposed[index]) {
1825             int mdpIndex = mCurrentFrame.layerToMDP[index];
1826             hwc_layer_1_t* layer = &list->hwLayers[index];
1827 
1828             //Leave fbZ for framebuffer. CACHE/GLES layers go here.
1829             if(mdpNextZOrder == mCurrentFrame.fbZ) {
1830                 mdpNextZOrder++;
1831             }
1832             MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1833             cur_pipe->zOrder = mdpNextZOrder++;
1834 
1835             private_handle_t *hnd = (private_handle_t *)layer->handle;
1836             if(isYUVSplitNeeded(hnd) && sEnableYUVsplit){
1837                 if(configure4k2kYuv(ctx, layer,
1838                             mCurrentFrame.mdpToLayer[mdpIndex])
1839                         != 0 ){
1840                     ALOGD_IF(isDebug(), "%s: Failed to configure split pipes \
1841                             for layer %d",__FUNCTION__, index);
1842                     return false;
1843                 }
1844                 else{
1845                     mdpNextZOrder++;
1846                 }
1847                 continue;
1848             }
1849             if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
1850                 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
1851                         layer %d",__FUNCTION__, index);
1852                 return false;
1853             }
1854         }
1855     }
1856 
1857     if(!ctx->mOverlay->validateAndSet(mDpy, ctx->dpyAttr[mDpy].fd)) {
1858         ALOGD_IF(isDebug(), "%s: Failed to validate and set overlay for dpy %d"
1859                 ,__FUNCTION__, mDpy);
1860         return false;
1861     }
1862 
1863     setRedraw(ctx, list);
1864     return true;
1865 }
1866 
resourceCheck(hwc_context_t * ctx,hwc_display_contents_1_t * list)1867 bool MDPComp::resourceCheck(hwc_context_t* ctx,
1868         hwc_display_contents_1_t* list) {
1869     const bool fbUsed = mCurrentFrame.fbCount;
1870     if(mCurrentFrame.mdpCount > sMaxPipesPerMixer - fbUsed) {
1871         ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
1872         return false;
1873     }
1874 
1875     //Will benefit cases where a video has non-updating background.
1876     if((mDpy > HWC_DISPLAY_PRIMARY) and
1877             (mCurrentFrame.mdpCount > sMaxSecLayers)) {
1878         ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
1879         return false;
1880     }
1881 
1882     // Init rotCount to number of rotate sessions used by other displays
1883     int rotCount = ctx->mRotMgr->getNumActiveSessions();
1884     // Count the number of rotator sessions required for current display
1885     for (int index = 0; index < mCurrentFrame.layerCount; index++) {
1886         if(!mCurrentFrame.isFBComposed[index]) {
1887             hwc_layer_1_t* layer = &list->hwLayers[index];
1888             private_handle_t *hnd = (private_handle_t *)layer->handle;
1889             if(has90Transform(layer) && isRotationDoable(ctx, hnd)) {
1890                 rotCount++;
1891             }
1892         }
1893     }
1894     // if number of layers to rotate exceeds max rotator sessions, bail out.
1895     if(rotCount > RotMgr::MAX_ROT_SESS) {
1896         ALOGD_IF(isDebug(), "%s: Exceeds max rotator sessions  %d",
1897                                     __FUNCTION__, mDpy);
1898         return false;
1899     }
1900     return true;
1901 }
1902 
hwLimitationsCheck(hwc_context_t * ctx,hwc_display_contents_1_t * list)1903 bool MDPComp::hwLimitationsCheck(hwc_context_t* ctx,
1904         hwc_display_contents_1_t* list) {
1905 
1906     //A-family hw limitation:
1907     //If a layer need alpha scaling, MDP can not support.
1908     if(ctx->mMDP.version < qdutils::MDSS_V5) {
1909         for(int i = 0; i < mCurrentFrame.layerCount; ++i) {
1910             if(!mCurrentFrame.isFBComposed[i] &&
1911                     isAlphaScaled( &list->hwLayers[i])) {
1912                 ALOGD_IF(isDebug(), "%s:frame needs alphaScaling",__FUNCTION__);
1913                 return false;
1914             }
1915         }
1916     }
1917 
1918     // On 8x26 & 8974 hw, we have a limitation of downscaling+blending.
1919     //If multiple layers requires downscaling and also they are overlapping
1920     //fall back to GPU since MDSS can not handle it.
1921     if(qdutils::MDPVersion::getInstance().is8x74v2() ||
1922             qdutils::MDPVersion::getInstance().is8x26()) {
1923         for(int i = 0; i < mCurrentFrame.layerCount-1; ++i) {
1924             hwc_layer_1_t* botLayer = &list->hwLayers[i];
1925             if(!mCurrentFrame.isFBComposed[i] &&
1926                     isDownscaleRequired(botLayer)) {
1927                 //if layer-i is marked for MDP and needs downscaling
1928                 //check if any MDP layer on top of i & overlaps with layer-i
1929                 for(int j = i+1; j < mCurrentFrame.layerCount; ++j) {
1930                     hwc_layer_1_t* topLayer = &list->hwLayers[j];
1931                     if(!mCurrentFrame.isFBComposed[j] &&
1932                             isDownscaleRequired(topLayer)) {
1933                         hwc_rect_t r = getIntersection(botLayer->displayFrame,
1934                                 topLayer->displayFrame);
1935                         if(isValidRect(r))
1936                             return false;
1937                     }
1938                 }
1939             }
1940         }
1941     }
1942     return true;
1943 }
1944 
1945 // Checks only if videos or single layer(RGB) is updating
1946 // which is used for setting dynamic fps or perf hint for single
1947 // layer video playback
onlyVideosUpdating(hwc_context_t * ctx,hwc_display_contents_1_t * list)1948 bool MDPComp::onlyVideosUpdating(hwc_context_t *ctx,
1949                                 hwc_display_contents_1_t* list) {
1950     bool support = false;
1951     FrameInfo frame;
1952     frame.reset(mCurrentFrame.layerCount);
1953     memset(&frame.drop, 0, sizeof(frame.drop));
1954     frame.dropCount = 0;
1955     ALOGD_IF(isDebug(), "%s: Update Cache and YUVInfo", __FUNCTION__);
1956     updateLayerCache(ctx, list, frame);
1957     updateYUV(ctx, list, false /*secure only*/, frame);
1958     // There are only updating YUV layers or there is single RGB
1959     // Layer(Youtube)
1960     if((ctx->listStats[mDpy].yuvCount == frame.mdpCount) ||
1961                                         (frame.layerCount == 1)) {
1962         support = true;
1963     }
1964     return support;
1965 }
1966 
setDynRefreshRate(hwc_context_t * ctx,hwc_display_contents_1_t * list)1967 void MDPComp::setDynRefreshRate(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1968     //For primary display, set the dynamic refreshrate
1969     if(!mDpy && qdutils::MDPVersion::getInstance().isDynFpsSupported() &&
1970                                         ctx->mUseMetaDataRefreshRate) {
1971         uint32_t refreshRate = ctx->dpyAttr[mDpy].refreshRate;
1972         MDPVersion& mdpHw = MDPVersion::getInstance();
1973         if(sIdleFallBack) {
1974             //Set minimum panel refresh rate during idle timeout
1975             refreshRate = mdpHw.getMinFpsSupported();
1976         } else if(onlyVideosUpdating(ctx, list)) {
1977             //Set the new fresh rate, if there is only one updating YUV layer
1978             //or there is one single RGB layer with this request
1979             refreshRate = ctx->listStats[mDpy].refreshRateRequest;
1980         }
1981         setRefreshRate(ctx, mDpy, refreshRate);
1982     }
1983 }
1984 
prepare(hwc_context_t * ctx,hwc_display_contents_1_t * list)1985 int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1986     int ret = 0;
1987     char property[PROPERTY_VALUE_MAX];
1988 
1989     if(!ctx || !list) {
1990         ALOGE("%s: Invalid context or list",__FUNCTION__);
1991         mCachedFrame.reset();
1992         return -1;
1993     }
1994 
1995     const int numLayers = ctx->listStats[mDpy].numAppLayers;
1996     if(mDpy == HWC_DISPLAY_PRIMARY) {
1997         sSimulationFlags = 0;
1998         if(property_get("debug.hwc.simulate", property, NULL) > 0) {
1999             int currentFlags = atoi(property);
2000             if(currentFlags != sSimulationFlags) {
2001                 sSimulationFlags = currentFlags;
2002                 ALOGI("%s: Simulation Flag read: 0x%x (%d)", __FUNCTION__,
2003                         sSimulationFlags, sSimulationFlags);
2004             }
2005         }
2006     }
2007     // reset PTOR
2008     if(!mDpy)
2009         memset(&(ctx->mPtorInfo), 0, sizeof(ctx->mPtorInfo));
2010 
2011     //reset old data
2012     mCurrentFrame.reset(numLayers);
2013     memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
2014     mCurrentFrame.dropCount = 0;
2015 
2016     //Do not cache the information for next draw cycle.
2017     if(numLayers > MAX_NUM_APP_LAYERS or (!numLayers)) {
2018         ALOGI_IF(numLayers, "%s: Unsupported layer count for mdp composition: %d",
2019                 __FUNCTION__, numLayers);
2020         mCachedFrame.reset();
2021 #ifdef DYNAMIC_FPS
2022         // Reset refresh rate
2023         setRefreshRate(ctx, mDpy, ctx->dpyAttr[mDpy].refreshRate);
2024 #endif
2025         return -1;
2026     }
2027 
2028     // Detect the start of animation and fall back to GPU only once to cache
2029     // all the layers in FB and display FB content untill animation completes.
2030     if(ctx->listStats[mDpy].isDisplayAnimating) {
2031         mCurrentFrame.needsRedraw = false;
2032         if(ctx->mAnimationState[mDpy] == ANIMATION_STOPPED) {
2033             mCurrentFrame.needsRedraw = true;
2034             ctx->mAnimationState[mDpy] = ANIMATION_STARTED;
2035         }
2036         setMDPCompLayerFlags(ctx, list);
2037         mCachedFrame.updateCounts(mCurrentFrame);
2038 #ifdef DYNAMIC_FPS
2039         // Reset refresh rate
2040         setRefreshRate(ctx, mDpy, ctx->dpyAttr[mDpy].refreshRate);
2041 #endif
2042         ret = -1;
2043         return ret;
2044     } else {
2045         ctx->mAnimationState[mDpy] = ANIMATION_STOPPED;
2046     }
2047 
2048     //Hard conditions, if not met, cannot do MDP comp
2049     if(isFrameDoable(ctx)) {
2050         generateROI(ctx, list);
2051         // if AIV Video mode is enabled, drop all non AIV layers from the
2052         // external display list.
2053         if(ctx->listStats[mDpy].mAIVVideoMode) {
2054             dropNonAIVLayers(ctx, list);
2055         }
2056 
2057         // if tryFullFrame fails, try to push all video and secure RGB layers
2058         // to MDP for composition.
2059         mModeOn = tryFullFrame(ctx, list) || tryMDPOnlyLayers(ctx, list) ||
2060                   tryVideoOnly(ctx, list);
2061         if(mModeOn) {
2062             setMDPCompLayerFlags(ctx, list);
2063         } else {
2064             resetROI(ctx, mDpy);
2065             reset(ctx);
2066             memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
2067             mCurrentFrame.dropCount = 0;
2068             ret = -1;
2069             ALOGE_IF(sSimulationFlags && (mDpy == HWC_DISPLAY_PRIMARY),
2070                     "MDP Composition Strategies Failed");
2071         }
2072     } else {
2073         if ((ctx->mMDP.version == qdutils::MDP_V3_0_5) && ctx->mCopyBit[mDpy] &&
2074                 enablePartialUpdateForMDP3) {
2075             generateROI(ctx, list);
2076             for(int i = 0; i < ctx->listStats[mDpy].numAppLayers; i++) {
2077                 ctx->copybitDrop[i] = mCurrentFrame.drop[i];
2078             }
2079         }
2080         ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame",
2081                 __FUNCTION__);
2082         ret = -1;
2083     }
2084 
2085     if(isDebug()) {
2086         ALOGD("GEOMETRY change: %d",
2087                 (list->flags & HWC_GEOMETRY_CHANGED));
2088         android::String8 sDump("");
2089         dump(sDump, ctx);
2090         ALOGD("%s",sDump.string());
2091     }
2092 
2093 #ifdef DYNAMIC_FPS
2094     setDynRefreshRate(ctx, list);
2095 #endif
2096     setPerfHint(ctx, list);
2097 
2098     mCachedFrame.updateCounts(mCurrentFrame);
2099     return ret;
2100 }
2101 
allocSplitVGPipesfor4k2k(hwc_context_t * ctx,int index)2102 bool MDPComp::allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index) {
2103 
2104     bool bRet = true;
2105     int mdpIndex = mCurrentFrame.layerToMDP[index];
2106     PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
2107     info.pipeInfo = new MdpYUVPipeInfo;
2108     info.rot = NULL;
2109     MdpYUVPipeInfo& pipe_info = *(MdpYUVPipeInfo*)info.pipeInfo;
2110 
2111     pipe_info.lIndex = ovutils::OV_INVALID;
2112     pipe_info.rIndex = ovutils::OV_INVALID;
2113 
2114     Overlay::PipeSpecs pipeSpecs;
2115     pipeSpecs.formatClass = Overlay::FORMAT_YUV;
2116     pipeSpecs.needsScaling = true;
2117     pipeSpecs.dpy = mDpy;
2118     pipeSpecs.fb = false;
2119 
2120     pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
2121     if(pipe_info.lIndex == ovutils::OV_INVALID){
2122         bRet = false;
2123         ALOGD_IF(isDebug(),"%s: allocating first VG pipe failed",
2124                 __FUNCTION__);
2125     }
2126     pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
2127     if(pipe_info.rIndex == ovutils::OV_INVALID){
2128         bRet = false;
2129         ALOGD_IF(isDebug(),"%s: allocating second VG pipe failed",
2130                 __FUNCTION__);
2131     }
2132     return bRet;
2133 }
2134 
drawOverlap(hwc_context_t * ctx,hwc_display_contents_1_t * list)2135 int MDPComp::drawOverlap(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
2136     int fd = -1;
2137     if (ctx->mPtorInfo.isActive()) {
2138         fd = ctx->mCopyBit[mDpy]->drawOverlap(ctx, list);
2139         if (fd < 0) {
2140             ALOGD_IF(isDebug(),"%s: failed", __FUNCTION__);
2141         }
2142     }
2143     return fd;
2144 }
2145 //=============MDPCompNonSplit==================================================
2146 
adjustForSourceSplit(hwc_context_t * ctx,hwc_display_contents_1_t * list)2147 void MDPCompNonSplit::adjustForSourceSplit(hwc_context_t *ctx,
2148         hwc_display_contents_1_t* list) {
2149     //If 4k2k Yuv layer split is possible,  and if
2150     //fbz is above 4k2k layer, increment fb zorder by 1
2151     //as we split 4k2k layer and increment zorder for right half
2152     //of the layer
2153     if(!ctx)
2154         return;
2155     if(mCurrentFrame.fbZ >= 0) {
2156         for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
2157                 index++) {
2158             if(!mCurrentFrame.isFBComposed[index]) {
2159                 if(mdpNextZOrder == mCurrentFrame.fbZ) {
2160                     mdpNextZOrder++;
2161                 }
2162                 mdpNextZOrder++;
2163                 hwc_layer_1_t* layer = &list->hwLayers[index];
2164                 private_handle_t *hnd = (private_handle_t *)layer->handle;
2165                 if(isYUVSplitNeeded(hnd)) {
2166                     if(mdpNextZOrder <= mCurrentFrame.fbZ)
2167                         mCurrentFrame.fbZ += 1;
2168                     mdpNextZOrder++;
2169                     //As we split 4kx2k yuv layer and program to 2 VG pipes
2170                     //(if available) increase mdpcount by 1.
2171                     mCurrentFrame.mdpCount++;
2172                 }
2173             }
2174         }
2175     }
2176 }
2177 
2178 /*
2179  * Configures pipe(s) for MDP composition
2180  */
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2181 int MDPCompNonSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
2182                              PipeLayerPair& PipeLayerPair) {
2183     MdpPipeInfoNonSplit& mdp_info =
2184         *(static_cast<MdpPipeInfoNonSplit*>(PipeLayerPair.pipeInfo));
2185     eMdpFlags mdpFlags = ovutils::OV_MDP_FLAGS_NONE;
2186     eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
2187     eDest dest = mdp_info.index;
2188 
2189     ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d",
2190              __FUNCTION__, layer, zOrder, dest);
2191 
2192     return configureNonSplit(ctx, layer, mDpy, mdpFlags, zOrder, dest,
2193                            &PipeLayerPair.rot);
2194 }
2195 
allocLayerPipes(hwc_context_t * ctx,hwc_display_contents_1_t * list)2196 bool MDPCompNonSplit::allocLayerPipes(hwc_context_t *ctx,
2197         hwc_display_contents_1_t* list) {
2198     for(int index = 0; index < mCurrentFrame.layerCount; index++) {
2199 
2200         if(mCurrentFrame.isFBComposed[index]) continue;
2201 
2202         hwc_layer_1_t* layer = &list->hwLayers[index];
2203         private_handle_t *hnd = (private_handle_t *)layer->handle;
2204         if(isYUVSplitNeeded(hnd) && sEnableYUVsplit){
2205             if(allocSplitVGPipesfor4k2k(ctx, index)){
2206                 continue;
2207             }
2208         }
2209 
2210         int mdpIndex = mCurrentFrame.layerToMDP[index];
2211         PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
2212         info.pipeInfo = new MdpPipeInfoNonSplit;
2213         info.rot = NULL;
2214         MdpPipeInfoNonSplit& pipe_info = *(MdpPipeInfoNonSplit*)info.pipeInfo;
2215 
2216         Overlay::PipeSpecs pipeSpecs;
2217         pipeSpecs.formatClass = isYuvBuffer(hnd) ?
2218                 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
2219         pipeSpecs.needsScaling = qhwc::needsScaling(layer) or
2220                 (qdutils::MDPVersion::getInstance().is8x26() and
2221                 ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres > 1024);
2222         pipeSpecs.dpy = mDpy;
2223         pipeSpecs.fb = false;
2224         pipeSpecs.numActiveDisplays = ctx->numActiveDisplays;
2225 
2226         pipe_info.index = ctx->mOverlay->getPipe(pipeSpecs);
2227 
2228         if(pipe_info.index == ovutils::OV_INVALID) {
2229             ALOGD_IF(isDebug(), "%s: Unable to get pipe", __FUNCTION__);
2230             return false;
2231         }
2232     }
2233     return true;
2234 }
2235 
configure4k2kYuv(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2236 int MDPCompNonSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
2237         PipeLayerPair& PipeLayerPair) {
2238     MdpYUVPipeInfo& mdp_info =
2239             *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
2240     eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
2241     eMdpFlags mdpFlagsL = ovutils::OV_MDP_FLAGS_NONE;
2242     eDest lDest = mdp_info.lIndex;
2243     eDest rDest = mdp_info.rIndex;
2244 
2245     return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder,
2246             lDest, rDest, &PipeLayerPair.rot);
2247 }
2248 
draw(hwc_context_t * ctx,hwc_display_contents_1_t * list)2249 bool MDPCompNonSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
2250 
2251     if(!isEnabled() or !mModeOn) {
2252         ALOGD_IF(isDebug(),"%s: MDP Comp not enabled/configured", __FUNCTION__);
2253         return true;
2254     }
2255 
2256     // Set the Handle timeout to true for MDP or MIXED composition.
2257     if(sIdleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) {
2258         sHandleTimeout = true;
2259     }
2260 
2261     overlay::Overlay& ov = *ctx->mOverlay;
2262     LayerProp *layerProp = ctx->layerProp[mDpy];
2263 
2264     int numHwLayers = ctx->listStats[mDpy].numAppLayers;
2265     for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ )
2266     {
2267         if(mCurrentFrame.isFBComposed[i]) continue;
2268 
2269         hwc_layer_1_t *layer = &list->hwLayers[i];
2270         private_handle_t *hnd = (private_handle_t *)layer->handle;
2271         if(!hnd) {
2272             if (!(layer->flags & HWC_COLOR_FILL)) {
2273                 ALOGE("%s handle null", __FUNCTION__);
2274                 return false;
2275             }
2276             // No PLAY for Color layer
2277             layerProp[i].mFlags &= ~HWC_MDPCOMP;
2278             continue;
2279         }
2280 
2281         int mdpIndex = mCurrentFrame.layerToMDP[i];
2282 
2283         if(isYUVSplitNeeded(hnd) && sEnableYUVsplit)
2284         {
2285             MdpYUVPipeInfo& pipe_info =
2286                 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
2287             Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
2288             ovutils::eDest indexL = pipe_info.lIndex;
2289             ovutils::eDest indexR = pipe_info.rIndex;
2290             int fd = hnd->fd;
2291             uint32_t offset = (uint32_t)hnd->offset;
2292             if(rot) {
2293                 rot->queueBuffer(fd, offset);
2294                 fd = rot->getDstMemId();
2295                 offset = rot->getDstOffset();
2296             }
2297             if(indexL != ovutils::OV_INVALID) {
2298                 ovutils::eDest destL = (ovutils::eDest)indexL;
2299                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2300                         using  pipe: %d", __FUNCTION__, layer, hnd, indexL );
2301                 if (!ov.queueBuffer(fd, offset, destL)) {
2302                     ALOGE("%s: queueBuffer failed for display:%d",
2303                             __FUNCTION__, mDpy);
2304                     return false;
2305                 }
2306             }
2307 
2308             if(indexR != ovutils::OV_INVALID) {
2309                 ovutils::eDest destR = (ovutils::eDest)indexR;
2310                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2311                         using  pipe: %d", __FUNCTION__, layer, hnd, indexR );
2312                 if (!ov.queueBuffer(fd, offset, destR)) {
2313                     ALOGE("%s: queueBuffer failed for display:%d",
2314                             __FUNCTION__, mDpy);
2315                     return false;
2316                 }
2317             }
2318         }
2319         else{
2320             MdpPipeInfoNonSplit& pipe_info =
2321             *(MdpPipeInfoNonSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
2322             ovutils::eDest dest = pipe_info.index;
2323             if(dest == ovutils::OV_INVALID) {
2324                 ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest);
2325                 return false;
2326             }
2327 
2328             if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
2329                 continue;
2330             }
2331 
2332             int fd = hnd->fd;
2333             uint32_t offset = (uint32_t)hnd->offset;
2334             int index = ctx->mPtorInfo.getPTORArrayIndex(i);
2335             if (!mDpy && (index != -1)) {
2336                 hnd = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer();
2337                 fd = hnd->fd;
2338                 offset = 0;
2339             }
2340 
2341             ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2342                     using  pipe: %d", __FUNCTION__, layer,
2343                     hnd, dest );
2344 
2345             Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
2346             if(rot) {
2347                 if(!rot->queueBuffer(fd, offset))
2348                     return false;
2349                 fd = rot->getDstMemId();
2350                 offset = rot->getDstOffset();
2351             }
2352 
2353             if (!ov.queueBuffer(fd, offset, dest)) {
2354                 ALOGE("%s: queueBuffer failed for display:%d ",
2355                         __FUNCTION__, mDpy);
2356                 return false;
2357             }
2358         }
2359 
2360         layerProp[i].mFlags &= ~HWC_MDPCOMP;
2361     }
2362     return true;
2363 }
2364 
2365 //=============MDPCompSplit===================================================
2366 
adjustForSourceSplit(hwc_context_t * ctx,hwc_display_contents_1_t * list)2367 void MDPCompSplit::adjustForSourceSplit(hwc_context_t *ctx,
2368          hwc_display_contents_1_t* list){
2369     //if 4kx2k yuv layer is totally present in either in left half
2370     //or right half then try splitting the yuv layer to avoid decimation
2371     const int lSplit = getLeftSplit(ctx, mDpy);
2372     if(mCurrentFrame.fbZ >= 0) {
2373         for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
2374                 index++) {
2375             if(!mCurrentFrame.isFBComposed[index]) {
2376                 if(mdpNextZOrder == mCurrentFrame.fbZ) {
2377                     mdpNextZOrder++;
2378                 }
2379                 mdpNextZOrder++;
2380                 hwc_layer_1_t* layer = &list->hwLayers[index];
2381                 private_handle_t *hnd = (private_handle_t *)layer->handle;
2382                 if(isYUVSplitNeeded(hnd)) {
2383                     hwc_rect_t dst = layer->displayFrame;
2384                     if((dst.left > lSplit) || (dst.right < lSplit)) {
2385                         mCurrentFrame.mdpCount += 1;
2386                     }
2387                     if(mdpNextZOrder <= mCurrentFrame.fbZ)
2388                         mCurrentFrame.fbZ += 1;
2389                     mdpNextZOrder++;
2390                 }
2391             }
2392         }
2393     }
2394 }
2395 
acquireMDPPipes(hwc_context_t * ctx,hwc_layer_1_t * layer,MdpPipeInfoSplit & pipe_info)2396 bool MDPCompSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
2397         MdpPipeInfoSplit& pipe_info) {
2398 
2399     const int lSplit = getLeftSplit(ctx, mDpy);
2400     private_handle_t *hnd = (private_handle_t *)layer->handle;
2401     hwc_rect_t dst = layer->displayFrame;
2402     pipe_info.lIndex = ovutils::OV_INVALID;
2403     pipe_info.rIndex = ovutils::OV_INVALID;
2404 
2405     Overlay::PipeSpecs pipeSpecs;
2406     pipeSpecs.formatClass = isYuvBuffer(hnd) ?
2407             Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
2408     pipeSpecs.needsScaling = qhwc::needsScalingWithSplit(ctx, layer, mDpy);
2409     pipeSpecs.dpy = mDpy;
2410     pipeSpecs.mixer = Overlay::MIXER_LEFT;
2411     pipeSpecs.fb = false;
2412 
2413     // Acquire pipe only for the updating half
2414     hwc_rect_t l_roi = ctx->listStats[mDpy].lRoi;
2415     hwc_rect_t r_roi = ctx->listStats[mDpy].rRoi;
2416 
2417     if (dst.left < lSplit && isValidRect(getIntersection(dst, l_roi))) {
2418         pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
2419         if(pipe_info.lIndex == ovutils::OV_INVALID)
2420             return false;
2421     }
2422 
2423     if(dst.right > lSplit && isValidRect(getIntersection(dst, r_roi))) {
2424         pipeSpecs.mixer = Overlay::MIXER_RIGHT;
2425         pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
2426         if(pipe_info.rIndex == ovutils::OV_INVALID)
2427             return false;
2428     }
2429 
2430     return true;
2431 }
2432 
allocLayerPipes(hwc_context_t * ctx,hwc_display_contents_1_t * list)2433 bool MDPCompSplit::allocLayerPipes(hwc_context_t *ctx,
2434         hwc_display_contents_1_t* list) {
2435     for(int index = 0 ; index < mCurrentFrame.layerCount; index++) {
2436 
2437         if(mCurrentFrame.isFBComposed[index]) continue;
2438 
2439         hwc_layer_1_t* layer = &list->hwLayers[index];
2440         private_handle_t *hnd = (private_handle_t *)layer->handle;
2441         hwc_rect_t dst = layer->displayFrame;
2442         const int lSplit = getLeftSplit(ctx, mDpy);
2443         if(isYUVSplitNeeded(hnd) && sEnableYUVsplit){
2444             if((dst.left > lSplit)||(dst.right < lSplit)){
2445                 if(allocSplitVGPipesfor4k2k(ctx, index)){
2446                     continue;
2447                 }
2448             }
2449         }
2450         int mdpIndex = mCurrentFrame.layerToMDP[index];
2451         PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
2452         info.pipeInfo = new MdpPipeInfoSplit;
2453         info.rot = NULL;
2454         MdpPipeInfoSplit& pipe_info = *(MdpPipeInfoSplit*)info.pipeInfo;
2455 
2456         if(!acquireMDPPipes(ctx, layer, pipe_info)) {
2457             ALOGD_IF(isDebug(), "%s: Unable to get pipe for type",
2458                     __FUNCTION__);
2459             return false;
2460         }
2461     }
2462     return true;
2463 }
2464 
configure4k2kYuv(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2465 int MDPCompSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
2466         PipeLayerPair& PipeLayerPair) {
2467     const int lSplit = getLeftSplit(ctx, mDpy);
2468     hwc_rect_t dst = layer->displayFrame;
2469     if((dst.left > lSplit)||(dst.right < lSplit)){
2470         MdpYUVPipeInfo& mdp_info =
2471                 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
2472         eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
2473         eMdpFlags mdpFlagsL = ovutils::OV_MDP_FLAGS_NONE;
2474         eDest lDest = mdp_info.lIndex;
2475         eDest rDest = mdp_info.rIndex;
2476 
2477         return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder,
2478                 lDest, rDest, &PipeLayerPair.rot);
2479     }
2480     else{
2481         return configure(ctx, layer, PipeLayerPair);
2482     }
2483 }
2484 
2485 /*
2486  * Configures pipe(s) for MDP composition
2487  */
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2488 int MDPCompSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
2489         PipeLayerPair& PipeLayerPair) {
2490     MdpPipeInfoSplit& mdp_info =
2491         *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo));
2492     eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
2493     eMdpFlags mdpFlagsL = ovutils::OV_MDP_FLAGS_NONE;
2494     eDest lDest = mdp_info.lIndex;
2495     eDest rDest = mdp_info.rIndex;
2496 
2497     ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d"
2498             "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest);
2499 
2500     return configureSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, lDest,
2501             rDest, &PipeLayerPair.rot);
2502 }
2503 
draw(hwc_context_t * ctx,hwc_display_contents_1_t * list)2504 bool MDPCompSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
2505 
2506     if(!isEnabled() or !mModeOn) {
2507         ALOGD_IF(isDebug(),"%s: MDP Comp not enabled/configured", __FUNCTION__);
2508         return true;
2509     }
2510 
2511     // Set the Handle timeout to true for MDP or MIXED composition.
2512     if(sIdleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) {
2513         sHandleTimeout = true;
2514     }
2515 
2516     overlay::Overlay& ov = *ctx->mOverlay;
2517     LayerProp *layerProp = ctx->layerProp[mDpy];
2518 
2519     int numHwLayers = ctx->listStats[mDpy].numAppLayers;
2520     for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ )
2521     {
2522         if(mCurrentFrame.isFBComposed[i]) continue;
2523 
2524         hwc_layer_1_t *layer = &list->hwLayers[i];
2525         private_handle_t *hnd = (private_handle_t *)layer->handle;
2526         if(!hnd) {
2527             ALOGE("%s handle null", __FUNCTION__);
2528             return false;
2529         }
2530 
2531         if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
2532             continue;
2533         }
2534 
2535         int mdpIndex = mCurrentFrame.layerToMDP[i];
2536 
2537         if(isYUVSplitNeeded(hnd) && sEnableYUVsplit)
2538         {
2539             MdpYUVPipeInfo& pipe_info =
2540                 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
2541             Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
2542             ovutils::eDest indexL = pipe_info.lIndex;
2543             ovutils::eDest indexR = pipe_info.rIndex;
2544             int fd = hnd->fd;
2545             uint32_t offset = (uint32_t)hnd->offset;
2546             if(rot) {
2547                 rot->queueBuffer(fd, offset);
2548                 fd = rot->getDstMemId();
2549                 offset = rot->getDstOffset();
2550             }
2551             if(indexL != ovutils::OV_INVALID) {
2552                 ovutils::eDest destL = (ovutils::eDest)indexL;
2553                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2554                         using  pipe: %d", __FUNCTION__, layer, hnd, indexL );
2555                 if (!ov.queueBuffer(fd, offset, destL)) {
2556                     ALOGE("%s: queueBuffer failed for display:%d",
2557                             __FUNCTION__, mDpy);
2558                     return false;
2559                 }
2560             }
2561 
2562             if(indexR != ovutils::OV_INVALID) {
2563                 ovutils::eDest destR = (ovutils::eDest)indexR;
2564                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2565                         using  pipe: %d", __FUNCTION__, layer, hnd, indexR );
2566                 if (!ov.queueBuffer(fd, offset, destR)) {
2567                     ALOGE("%s: queueBuffer failed for display:%d",
2568                             __FUNCTION__, mDpy);
2569                     return false;
2570                 }
2571             }
2572         }
2573         else{
2574             MdpPipeInfoSplit& pipe_info =
2575                 *(MdpPipeInfoSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
2576             Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
2577 
2578             ovutils::eDest indexL = pipe_info.lIndex;
2579             ovutils::eDest indexR = pipe_info.rIndex;
2580 
2581             int fd = hnd->fd;
2582             uint32_t offset = (uint32_t)hnd->offset;
2583             int index = ctx->mPtorInfo.getPTORArrayIndex(i);
2584             if (!mDpy && (index != -1)) {
2585                 hnd = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer();
2586                 fd = hnd->fd;
2587                 offset = 0;
2588             }
2589 
2590             if(ctx->mAD->draw(ctx, fd, offset)) {
2591                 fd = ctx->mAD->getDstFd();
2592                 offset = ctx->mAD->getDstOffset();
2593             }
2594 
2595             if(rot) {
2596                 rot->queueBuffer(fd, offset);
2597                 fd = rot->getDstMemId();
2598                 offset = rot->getDstOffset();
2599             }
2600 
2601             //************* play left mixer **********
2602             if(indexL != ovutils::OV_INVALID) {
2603                 ovutils::eDest destL = (ovutils::eDest)indexL;
2604                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2605                         using  pipe: %d", __FUNCTION__, layer, hnd, indexL );
2606                 if (!ov.queueBuffer(fd, offset, destL)) {
2607                     ALOGE("%s: queueBuffer failed for left mixer",
2608                             __FUNCTION__);
2609                     return false;
2610                 }
2611             }
2612 
2613             //************* play right mixer **********
2614             if(indexR != ovutils::OV_INVALID) {
2615                 ovutils::eDest destR = (ovutils::eDest)indexR;
2616                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2617                         using  pipe: %d", __FUNCTION__, layer, hnd, indexR );
2618                 if (!ov.queueBuffer(fd, offset, destR)) {
2619                     ALOGE("%s: queueBuffer failed for right mixer",
2620                             __FUNCTION__);
2621                     return false;
2622                 }
2623             }
2624         }
2625 
2626         layerProp[i].mFlags &= ~HWC_MDPCOMP;
2627     }
2628 
2629     return true;
2630 }
2631 
2632 //================MDPCompSrcSplit==============================================
2633 
validateAndApplyROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)2634 bool MDPCompSrcSplit::validateAndApplyROI(hwc_context_t *ctx,
2635         hwc_display_contents_1_t* list) {
2636     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
2637     hwc_rect_t visibleRect = ctx->listStats[mDpy].lRoi;
2638 
2639     for(int i = numAppLayers - 1; i >= 0; i--) {
2640         if(!isValidRect(visibleRect)) {
2641             mCurrentFrame.drop[i] = true;
2642             mCurrentFrame.dropCount++;
2643             continue;
2644         }
2645 
2646         const hwc_layer_1_t* layer =  &list->hwLayers[i];
2647         hwc_rect_t dstRect = layer->displayFrame;
2648         hwc_rect_t res  = getIntersection(visibleRect, dstRect);
2649 
2650         if(!isValidRect(res)) {
2651             mCurrentFrame.drop[i] = true;
2652             mCurrentFrame.dropCount++;
2653         } else {
2654             /* Reset frame ROI when any layer which needs scaling also needs ROI
2655              * cropping */
2656             if(!isSameRect(res, dstRect) && needsScaling (layer)) {
2657                 ALOGI("%s: Resetting ROI due to scaling", __FUNCTION__);
2658                 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
2659                 mCurrentFrame.dropCount = 0;
2660                 return false;
2661             }
2662 
2663             /* deduct any opaque region from visibleRect */
2664             if (layer->blending == HWC_BLENDING_NONE &&
2665                     layer->planeAlpha == 0xFF)
2666                 visibleRect = deductRect(visibleRect, res);
2667         }
2668     }
2669     return true;
2670 }
2671 
2672 /*
2673  * HW Limitation: ping pong split can always split the ping pong output
2674  * equally across two DSI's. So the ROI programmed should be of equal width
2675  * for both the halves
2676  */
generateROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)2677 void MDPCompSrcSplit::generateROI(hwc_context_t *ctx,
2678         hwc_display_contents_1_t* list) {
2679     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
2680 
2681     if(!canPartialUpdate(ctx, list))
2682         return;
2683 
2684     struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0};
2685     hwc_rect fullFrame = (struct hwc_rect) {0, 0,(int)ctx->dpyAttr[mDpy].xres,
2686         (int)ctx->dpyAttr[mDpy].yres};
2687 
2688     for(int index = 0; index < numAppLayers; index++ ) {
2689         hwc_layer_1_t* layer = &list->hwLayers[index];
2690 
2691         // If we have a RGB layer which needs rotation, no partial update
2692         if(!isYuvBuffer((private_handle_t *)layer->handle) && layer->transform)
2693             return;
2694 
2695         if (layerUpdating(layer) ||
2696                 isYuvBuffer((private_handle_t *)layer->handle)) {
2697             hwc_rect_t dirtyRect = getIntersection(layer->displayFrame,
2698                                                     fullFrame);
2699             if (!needsScaling(layer) && !layer->transform) {
2700                 dirtyRect = calculateDirtyRect(layer, fullFrame);
2701             }
2702             roi = getUnion(roi, dirtyRect);
2703         }
2704     }
2705 
2706     /* No layer is updating. Still SF wants a refresh.*/
2707     if(!isValidRect(roi))
2708         return;
2709 
2710     if (isDisplaySplit(ctx, mDpy)) {
2711         hwc_rect lFrame = fullFrame;
2712         roi = expandROIFromMidPoint(roi, fullFrame);
2713 
2714         lFrame.right = fullFrame.right / 2;
2715         hwc_rect lRoi = getIntersection(roi, lFrame);
2716         // Align ROI coordinates to panel restrictions
2717         lRoi = getSanitizeROI(lRoi, lFrame);
2718 
2719         hwc_rect rFrame = fullFrame;
2720         rFrame.left = fullFrame.right/2;
2721         hwc_rect rRoi = getIntersection(roi, rFrame);
2722         // Align ROI coordinates to panel restrictions
2723         rRoi = getSanitizeROI(rRoi, rFrame);
2724 
2725         roi = getUnion(lRoi, rRoi);
2726 
2727         ctx->listStats[mDpy].lRoi = roi;
2728     } else {
2729       hwc_rect lRoi = getIntersection(roi, fullFrame);
2730       // Align ROI coordinates to panel restrictions
2731       lRoi = getSanitizeROI(lRoi, fullFrame);
2732 
2733       ctx->listStats[mDpy].lRoi = lRoi;
2734     }
2735 
2736     if(!validateAndApplyROI(ctx, list))
2737         resetROI(ctx, mDpy);
2738 
2739     ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d] [%d, %d, %d, %d]",
2740             __FUNCTION__,
2741             ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
2742             ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom,
2743             ctx->listStats[mDpy].rRoi.left, ctx->listStats[mDpy].rRoi.top,
2744             ctx->listStats[mDpy].rRoi.right, ctx->listStats[mDpy].rRoi.bottom);
2745 }
2746 
acquireMDPPipes(hwc_context_t * ctx,hwc_layer_1_t * layer,MdpPipeInfoSplit & pipe_info)2747 bool MDPCompSrcSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
2748         MdpPipeInfoSplit& pipe_info) {
2749     private_handle_t *hnd = (private_handle_t *)layer->handle;
2750     hwc_rect_t dst = layer->displayFrame;
2751     hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
2752     pipe_info.lIndex = ovutils::OV_INVALID;
2753     pipe_info.rIndex = ovutils::OV_INVALID;
2754 
2755     if(qdutils::MDPVersion::getInstance().isPartialUpdateEnabled() && !mDpy)
2756         trimAgainstROI(ctx,crop, dst);
2757 
2758     //If 2 pipes are staged on a single stage of a mixer, then the left pipe
2759     //should have a higher priority than the right one. Pipe priorities are
2760     //starting with VG0, VG1 ... , RGB0 ..., DMA1
2761 
2762     Overlay::PipeSpecs pipeSpecs;
2763     pipeSpecs.formatClass = isYuvBuffer(hnd) ?
2764             Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
2765     pipeSpecs.needsScaling = qhwc::needsScaling(layer);
2766     pipeSpecs.dpy = mDpy;
2767     pipeSpecs.fb = false;
2768 
2769     //1 pipe by default for a layer
2770     pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
2771     if(pipe_info.lIndex == ovutils::OV_INVALID) {
2772         return false;
2773     }
2774 
2775     /* Use 2 pipes IF
2776         a) Layer's crop width is > 2048 or
2777         b) Layer's dest width > 2048 or
2778         c) On primary, driver has indicated with caps to split always. This is
2779            based on an empirically derived value of panel height. Applied only
2780            if the layer's width is > mixer's width
2781     */
2782 
2783     MDPVersion& mdpHw = MDPVersion::getInstance();
2784     bool primarySplitAlways = (mDpy == HWC_DISPLAY_PRIMARY) and
2785             mdpHw.isSrcSplitAlways();
2786     const uint32_t lSplit = getLeftSplit(ctx, mDpy);
2787     const uint32_t dstWidth = dst.right - dst.left;
2788     const uint32_t dstHeight = dst.bottom - dst.top;
2789     uint32_t cropWidth = has90Transform(layer) ? crop.bottom - crop.top :
2790             crop.right - crop.left;
2791     uint32_t cropHeight = has90Transform(layer) ? crop.right - crop.left :
2792             crop.bottom - crop.top;
2793     //Approximation to actual clock, ignoring the common factors in pipe and
2794     //mixer cases like line_time
2795     const uint32_t layerClock = getLayerClock(dstWidth, dstHeight, cropHeight);
2796     const uint32_t mixerClock = lSplit;
2797 
2798     const uint32_t downscale = getRotDownscale(ctx, layer);
2799     if(downscale) {
2800         cropWidth /= downscale;
2801         cropHeight /= downscale;
2802     }
2803 
2804     if(dstWidth > mdpHw.getMaxPipeWidth() or
2805             cropWidth > mdpHw.getMaxPipeWidth() or
2806             (primarySplitAlways and
2807             (cropWidth > lSplit or layerClock > mixerClock))) {
2808         pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
2809         if(pipe_info.rIndex == ovutils::OV_INVALID) {
2810             return false;
2811         }
2812 
2813         if(ctx->mOverlay->needsPrioritySwap(pipe_info.lIndex,
2814                     pipe_info.rIndex)) {
2815             qhwc::swap(pipe_info.lIndex, pipe_info.rIndex);
2816         }
2817     }
2818 
2819     return true;
2820 }
2821 
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2822 int MDPCompSrcSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
2823         PipeLayerPair& PipeLayerPair) {
2824     private_handle_t *hnd = (private_handle_t *)layer->handle;
2825     if(!hnd) {
2826         ALOGE("%s: layer handle is NULL", __FUNCTION__);
2827         return -1;
2828     }
2829     MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
2830     MdpPipeInfoSplit& mdp_info =
2831         *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo));
2832     Rotator **rot = &PipeLayerPair.rot;
2833     eZorder z = static_cast<eZorder>(mdp_info.zOrder);
2834     eDest lDest = mdp_info.lIndex;
2835     eDest rDest = mdp_info.rIndex;
2836     hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
2837     hwc_rect_t dst = layer->displayFrame;
2838     int transform = layer->transform;
2839     eTransform orient = static_cast<eTransform>(transform);
2840     int rotFlags = ROT_FLAGS_NONE;
2841     uint32_t format = ovutils::getMdpFormat(hnd->format, hnd->flags);
2842     Whf whf(getWidth(hnd), getHeight(hnd), format, hnd->size);
2843 
2844     ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d"
2845              "dest_pipeR: %d",__FUNCTION__, layer, z, lDest, rDest);
2846 
2847     if(qdutils::MDPVersion::getInstance().isPartialUpdateEnabled() && !mDpy) {
2848         /* MDP driver crops layer coordinates against ROI in Non-Split
2849          * and Split MDP comp. But HWC needs to crop them for source split.
2850          * Reason: 1) Source split is efficient only when the final effective
2851          *            load is distributed evenly across mixers.
2852          *         2) We have to know the effective width of the layer that
2853          *            the ROI needs to find the no. of pipes the layer needs.
2854          */
2855         trimAgainstROI(ctx, crop, dst);
2856     }
2857 
2858     // Handle R/B swap
2859     if (layer->flags & HWC_FORMAT_RB_SWAP) {
2860         if (hnd->format == HAL_PIXEL_FORMAT_RGBA_8888)
2861             whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRA_8888);
2862         else if (hnd->format == HAL_PIXEL_FORMAT_RGBX_8888)
2863             whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRX_8888);
2864     }
2865     // update source crop and destination position of AIV video layer.
2866     if(ctx->listStats[mDpy].mAIVVideoMode && isYuvBuffer(hnd)) {
2867         updateCoordinates(ctx, crop, dst, mDpy);
2868     }
2869     /* Calculate the external display position based on MDP downscale,
2870        ActionSafe, and extorientation features. */
2871     calcExtDisplayPosition(ctx, hnd, mDpy, crop, dst, transform, orient);
2872 
2873     int downscale = getRotDownscale(ctx, layer);
2874     eMdpFlags mdpFlags = ovutils::OV_MDP_FLAGS_NONE;
2875     setMdpFlags(ctx, layer, mdpFlags, downscale, transform);
2876 
2877     if(lDest != OV_INVALID && rDest != OV_INVALID) {
2878         //Enable overfetch
2879         setMdpFlags(mdpFlags, OV_MDSS_MDP_DUAL_PIPE);
2880     }
2881 
2882     if((has90Transform(layer) or downscale) and isRotationDoable(ctx, hnd)) {
2883         (*rot) = ctx->mRotMgr->getNext();
2884         if((*rot) == NULL) return -1;
2885         ctx->mLayerRotMap[mDpy]->add(layer, *rot);
2886         //If the video is using a single pipe, enable BWC
2887         if(rDest == OV_INVALID) {
2888             BwcPM::setBwc(ctx, mDpy, hnd, crop, dst, transform, downscale,
2889                     mdpFlags);
2890         }
2891         //Configure rotator for pre-rotation
2892         if(configRotator(*rot, whf, crop, mdpFlags, orient, downscale) < 0) {
2893             ALOGE("%s: configRotator failed!", __FUNCTION__);
2894             return -1;
2895         }
2896         updateSource(orient, whf, crop, *rot);
2897         rotFlags |= ovutils::ROT_PREROTATED;
2898     }
2899 
2900     //If 2 pipes being used, divide layer into half, crop and dst
2901     hwc_rect_t cropL = crop;
2902     hwc_rect_t cropR = crop;
2903     hwc_rect_t dstL = dst;
2904     hwc_rect_t dstR = dst;
2905     if(lDest != OV_INVALID && rDest != OV_INVALID) {
2906         cropL.right = (crop.right + crop.left) / 2;
2907         cropR.left = cropL.right;
2908         sanitizeSourceCrop(cropL, cropR, hnd);
2909 
2910         bool cropSwap = false;
2911         //Swap crops on H flip since 2 pipes are being used
2912         if((orient & OVERLAY_TRANSFORM_FLIP_H) && (*rot) == NULL) {
2913             hwc_rect_t tmp = cropL;
2914             cropL = cropR;
2915             cropR = tmp;
2916             cropSwap = true;
2917         }
2918 
2919         //cropSwap trick: If the src and dst widths are both odd, let us say
2920         //2507, then splitting both into half would cause left width to be 1253
2921         //and right 1254. If crop is swapped because of H flip, this will cause
2922         //left crop width to be 1254, whereas left dst width remains 1253, thus
2923         //inducing a scaling that is unaccounted for. To overcome that we add 1
2924         //to the dst width if there is a cropSwap. So if the original width was
2925         //2507, the left dst width will be 1254. Even if the original width was
2926         //even for ex: 2508, the left dst width will still remain 1254.
2927         dstL.right = (dst.right + dst.left + cropSwap) / 2;
2928         dstR.left = dstL.right;
2929     }
2930 
2931     //For the mdp, since either we are pre-rotating or MDP does flips
2932     orient = OVERLAY_TRANSFORM_0;
2933     transform = 0;
2934 
2935     //configure left pipe
2936     if(lDest != OV_INVALID) {
2937         PipeArgs pargL(mdpFlags, whf, z,
2938                 static_cast<eRotFlags>(rotFlags), layer->planeAlpha,
2939                 (ovutils::eBlending) getBlending(layer->blending));
2940 
2941         if(configMdp(ctx->mOverlay, pargL, orient,
2942                     cropL, dstL, metadata, lDest) < 0) {
2943             ALOGE("%s: commit failed for left mixer config", __FUNCTION__);
2944             return -1;
2945         }
2946     }
2947 
2948     //configure right pipe
2949     if(rDest != OV_INVALID) {
2950         PipeArgs pargR(mdpFlags, whf, z,
2951                 static_cast<eRotFlags>(rotFlags),
2952                 layer->planeAlpha,
2953                 (ovutils::eBlending) getBlending(layer->blending));
2954         if(configMdp(ctx->mOverlay, pargR, orient,
2955                     cropR, dstR, metadata, rDest) < 0) {
2956             ALOGE("%s: commit failed for right mixer config", __FUNCTION__);
2957             return -1;
2958         }
2959     }
2960 
2961     return 0;
2962 }
2963 
getPartialUpdatePref(hwc_context_t * ctx)2964 bool MDPComp::getPartialUpdatePref(hwc_context_t *ctx) {
2965     Locker::Autolock _l(ctx->mDrawLock);
2966     const int fbNum = Overlay::getFbForDpy(Overlay::DPY_PRIMARY);
2967     char path[MAX_SYSFS_FILE_PATH];
2968     snprintf (path, sizeof(path), "sys/class/graphics/fb%d/dyn_pu", fbNum);
2969     int fd = open(path, O_RDONLY);
2970     if(fd < 0) {
2971         ALOGE("%s: Failed to open sysfd node: %s", __FUNCTION__, path);
2972         return -1;
2973     }
2974     char value[4];
2975     ssize_t size_read = read(fd, value, sizeof(value)-1);
2976     if(size_read <= 0) {
2977         ALOGE("%s: Failed to read sysfd node: %s", __FUNCTION__, path);
2978         close(fd);
2979         return -1;
2980     }
2981     close(fd);
2982     value[size_read] = '\0';
2983     return atoi(value);
2984 }
2985 
setPartialUpdatePref(hwc_context_t * ctx,bool enable)2986 int MDPComp::setPartialUpdatePref(hwc_context_t *ctx, bool enable) {
2987     Locker::Autolock _l(ctx->mDrawLock);
2988     const int fbNum = Overlay::getFbForDpy(Overlay::DPY_PRIMARY);
2989     char path[MAX_SYSFS_FILE_PATH];
2990     snprintf (path, sizeof(path), "sys/class/graphics/fb%d/dyn_pu", fbNum);
2991     int fd = open(path, O_WRONLY);
2992     if(fd < 0) {
2993         ALOGE("%s: Failed to open sysfd node: %s", __FUNCTION__, path);
2994         return -1;
2995     }
2996     char value[4];
2997     snprintf(value, sizeof(value), "%d", (int)enable);
2998     ssize_t ret = write(fd, value, strlen(value));
2999     if(ret <= 0) {
3000         ALOGE("%s: Failed to write to sysfd nodes: %s", __FUNCTION__, path);
3001         close(fd);
3002         return -1;
3003     }
3004     close(fd);
3005     sIsPartialUpdateActive = enable;
3006     return 0;
3007 }
3008 
loadPerfLib()3009 bool MDPComp::loadPerfLib() {
3010     char perfLibPath[PROPERTY_VALUE_MAX] = {0};
3011     bool success = false;
3012     if((property_get("ro.vendor.extension_library", perfLibPath, NULL) <= 0)) {
3013         ALOGE("vendor library not set in ro.vendor.extension_library");
3014         return false;
3015     }
3016 
3017     sLibPerfHint = dlopen(perfLibPath, RTLD_NOW);
3018     if(sLibPerfHint) {
3019         *(void **)&sPerfLockAcquire = dlsym(sLibPerfHint, "perf_lock_acq");
3020         *(void **)&sPerfLockRelease = dlsym(sLibPerfHint, "perf_lock_rel");
3021         if (!sPerfLockAcquire || !sPerfLockRelease) {
3022             ALOGE("Failed to load symbols for perfLock");
3023             dlclose(sLibPerfHint);
3024             sLibPerfHint = NULL;
3025             return false;
3026         }
3027         success = true;
3028         ALOGI("Successfully Loaded perf hint API's");
3029     } else {
3030         ALOGE("Failed to open %s : %s", perfLibPath, dlerror());
3031     }
3032     return success;
3033 }
3034 
setPerfHint(hwc_context_t * ctx,hwc_display_contents_1_t * list)3035 void MDPComp::setPerfHint(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
3036     if ((sPerfHintWindow < 0) || mDpy || !sLibPerfHint) {
3037         return;
3038     }
3039     static int count = sPerfHintWindow;
3040     static int perflockFlag = 0;
3041 
3042     /* Send hint to mpctl when single layer is updated
3043      * for a successful number of windows. Hint release
3044      * happens immediately upon multiple layer update.
3045      */
3046     if (onlyVideosUpdating(ctx, list)) {
3047         if(count) {
3048             count--;
3049         }
3050     } else {
3051         if (perflockFlag) {
3052             perflockFlag = 0;
3053             sPerfLockRelease(sPerfLockHandle);
3054         }
3055         count = sPerfHintWindow;
3056     }
3057     if (count == 0 && !perflockFlag) {
3058         int perfHint = 0x4501; // 45-display layer hint, 01-Enable
3059         sPerfLockHandle = sPerfLockAcquire(0 /*handle*/, 0/*duration*/,
3060                                     &perfHint, sizeof(perfHint)/sizeof(int));
3061         if(sPerfLockHandle > 0) {
3062             perflockFlag = 1;
3063         }
3064     }
3065 }
3066 
3067 }; //namespace
3068 
3069