1 /*
2 * Copyright 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #undef LOG_TAG
18 #define LOG_TAG "Planner"
19 // #define LOG_NDEBUG 0
20 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
21
22 #include <android-base/properties.h>
23 #include <common/FlagManager.h>
24 #include <compositionengine/impl/planner/Flattener.h>
25 #include <compositionengine/impl/planner/LayerState.h>
26
27 #include <gui/TraceUtils.h>
28
29 using time_point = std::chrono::steady_clock::time_point;
30 using namespace std::chrono_literals;
31
32 namespace android::compositionengine::impl::planner {
33
34 namespace {
35
36 // True if the underlying layer stack is the same modulo state that would be expected to be
37 // different like specific buffers, false otherwise.
isSameStack(const std::vector<const LayerState * > & incomingLayers,const std::vector<CachedSet> & cachedSets)38 bool isSameStack(const std::vector<const LayerState*>& incomingLayers,
39 const std::vector<CachedSet>& cachedSets) {
40 std::vector<const LayerState*> existingLayers;
41 for (auto& cachedSet : cachedSets) {
42 for (auto& layer : cachedSet.getConstituentLayers()) {
43 existingLayers.push_back(layer.getState());
44 }
45 }
46
47 if (incomingLayers.size() != existingLayers.size()) {
48 return false;
49 }
50
51 for (size_t i = 0; i < incomingLayers.size(); i++) {
52 // Checking the IDs here is very strict, but we do this as otherwise we may mistakenly try
53 // to access destroyed OutputLayers later on.
54 if (incomingLayers[i]->getId() != existingLayers[i]->getId()) {
55 return false;
56 }
57
58 // Do not unflatten if source crop is only moved.
59 if (FlagManager::getInstance().cache_when_source_crop_layer_only_moved() &&
60 incomingLayers[i]->isSourceCropSizeEqual(*(existingLayers[i])) &&
61 incomingLayers[i]->getDifferingFields(*(existingLayers[i])) ==
62 LayerStateField::SourceCrop) {
63 continue;
64 }
65
66 if (incomingLayers[i]->getDifferingFields(*(existingLayers[i])) != LayerStateField::None) {
67 return false;
68 }
69 }
70 return true;
71 }
72
73 } // namespace
74
Flattener(renderengine::RenderEngine & renderEngine,const Tunables & tunables)75 Flattener::Flattener(renderengine::RenderEngine& renderEngine, const Tunables& tunables)
76 : mRenderEngine(renderEngine), mTunables(tunables), mTexturePool(mRenderEngine) {}
77
flattenLayers(const std::vector<const LayerState * > & layers,NonBufferHash hash,time_point now)78 NonBufferHash Flattener::flattenLayers(const std::vector<const LayerState*>& layers,
79 NonBufferHash hash, time_point now) {
80 ATRACE_CALL();
81 const size_t unflattenedDisplayCost = calculateDisplayCost(layers);
82 mUnflattenedDisplayCost += unflattenedDisplayCost;
83
84 // We invalidate the layer cache if:
85 // 1. We're not tracking any layers, or
86 // 2. The last seen hashed geometry changed between frames, or
87 // 3. A stricter equality check demonstrates that the layer stack really did change, since the
88 // hashed geometry does not guarantee uniqueness.
89 if (mCurrentGeometry != hash || (!mLayers.empty() && !isSameStack(layers, mLayers))) {
90 resetActivities(hash, now);
91 mFlattenedDisplayCost += unflattenedDisplayCost;
92 return hash;
93 }
94
95 ++mInitialLayerCounts[layers.size()];
96
97 // Only buildCachedSets if these layers are already stored in mLayers.
98 // Otherwise (i.e. mergeWithCachedSets returns false), the time has not
99 // changed, so buildCachedSets will never find any runs.
100 const bool alreadyHadCachedSets = mergeWithCachedSets(layers, now);
101
102 ++mFinalLayerCounts[mLayers.size()];
103
104 if (alreadyHadCachedSets) {
105 buildCachedSets(now);
106 hash = computeLayersHash();
107 }
108
109 return hash;
110 }
111
renderCachedSets(const OutputCompositionState & outputState,std::optional<std::chrono::steady_clock::time_point> renderDeadline,bool deviceHandlesColorTransform)112 void Flattener::renderCachedSets(
113 const OutputCompositionState& outputState,
114 std::optional<std::chrono::steady_clock::time_point> renderDeadline,
115 bool deviceHandlesColorTransform) {
116 ATRACE_CALL();
117
118 if (!mNewCachedSet) {
119 return;
120 }
121
122 // Ensure that a cached set has a valid buffer first
123 if (mNewCachedSet->hasRenderedBuffer()) {
124 ATRACE_NAME("mNewCachedSet->hasRenderedBuffer()");
125 return;
126 }
127
128 const auto now = std::chrono::steady_clock::now();
129
130 // If we have a render deadline, and the flattener is configured to skip rendering if we don't
131 // have enough time, then we skip rendering the cached set if we think that we'll steal too much
132 // time from the next frame.
133 if (renderDeadline && mTunables.mRenderScheduling) {
134 if (const auto estimatedRenderFinish =
135 now + mTunables.mRenderScheduling->cachedSetRenderDuration;
136 estimatedRenderFinish > *renderDeadline) {
137 mNewCachedSet->incrementSkipCount();
138
139 if (mNewCachedSet->getSkipCount() <=
140 mTunables.mRenderScheduling->maxDeferRenderAttempts) {
141 ATRACE_FORMAT("DeadlinePassed: exceeded deadline by: %d us",
142 std::chrono::duration_cast<std::chrono::microseconds>(
143 estimatedRenderFinish - *renderDeadline)
144 .count());
145 return;
146 } else {
147 ATRACE_NAME("DeadlinePassed: exceeded max skips");
148 }
149 }
150 }
151
152 mNewCachedSet->render(mRenderEngine, mTexturePool, outputState, deviceHandlesColorTransform);
153 }
154
dumpLayers(std::string & result) const155 void Flattener::dumpLayers(std::string& result) const {
156 result.append(" Current layers:");
157 for (const CachedSet& layer : mLayers) {
158 result.append("\n");
159 layer.dump(result);
160 }
161 }
162
dump(std::string & result) const163 void Flattener::dump(std::string& result) const {
164 const auto now = std::chrono::steady_clock::now();
165
166 base::StringAppendF(&result, "Flattener state:\n");
167
168 result.append("\n Statistics:\n");
169
170 result.append(" Display cost (in screen-size buffers):\n");
171 const size_t displayArea = static_cast<size_t>(mDisplaySize.width * mDisplaySize.height);
172 base::StringAppendF(&result, " Unflattened: %.2f\n",
173 static_cast<float>(mUnflattenedDisplayCost) / displayArea);
174 base::StringAppendF(&result, " Flattened: %.2f\n",
175 static_cast<float>(mFlattenedDisplayCost) / displayArea);
176
177 const auto compareLayerCounts = [](const std::pair<size_t, size_t>& left,
178 const std::pair<size_t, size_t>& right) {
179 return left.first < right.first;
180 };
181
182 const size_t maxLayerCount = mInitialLayerCounts.empty()
183 ? 0u
184 : std::max_element(mInitialLayerCounts.cbegin(), mInitialLayerCounts.cend(),
185 compareLayerCounts)
186 ->first;
187
188 result.append("\n Initial counts:\n");
189 for (size_t count = 1; count < maxLayerCount; ++count) {
190 size_t initial = mInitialLayerCounts.count(count) > 0 ? mInitialLayerCounts.at(count) : 0;
191 base::StringAppendF(&result, " % 2zd: %zd\n", count, initial);
192 }
193
194 result.append("\n Final counts:\n");
195 for (size_t count = 1; count < maxLayerCount; ++count) {
196 size_t final = mFinalLayerCounts.count(count) > 0 ? mFinalLayerCounts.at(count) : 0;
197 base::StringAppendF(&result, " % 2zd: %zd\n", count, final);
198 }
199
200 base::StringAppendF(&result, "\n Cached sets created: %zd\n", mCachedSetCreationCount);
201 base::StringAppendF(&result, " Cost: %.2f\n",
202 static_cast<float>(mCachedSetCreationCost) / displayArea);
203
204 const auto lastUpdate =
205 std::chrono::duration_cast<std::chrono::milliseconds>(now - mLastGeometryUpdate);
206 base::StringAppendF(&result, "\n Current hash %016zx, last update %sago\n\n", mCurrentGeometry,
207 durationString(lastUpdate).c_str());
208
209 dumpLayers(result);
210
211 base::StringAppendF(&result, "\n");
212 mTexturePool.dump(result);
213 }
214
calculateDisplayCost(const std::vector<const LayerState * > & layers) const215 size_t Flattener::calculateDisplayCost(const std::vector<const LayerState*>& layers) const {
216 Region coveredRegion;
217 size_t displayCost = 0;
218 bool hasClientComposition = false;
219
220 for (const LayerState* layer : layers) {
221 coveredRegion.orSelf(layer->getDisplayFrame());
222
223 // Regardless of composition type, we always have to read each input once
224 displayCost += static_cast<size_t>(layer->getDisplayFrame().width() *
225 layer->getDisplayFrame().height());
226
227 hasClientComposition |= layer->getCompositionType() ==
228 aidl::android::hardware::graphics::composer3::Composition::CLIENT;
229 }
230
231 if (hasClientComposition) {
232 // If there is client composition, the client target buffer has to be both written by the
233 // GPU and read by the DPU, so we pay its cost twice
234 displayCost += 2 *
235 static_cast<size_t>(coveredRegion.bounds().width() *
236 coveredRegion.bounds().height());
237 }
238
239 return displayCost;
240 }
241
resetActivities(NonBufferHash hash,time_point now)242 void Flattener::resetActivities(NonBufferHash hash, time_point now) {
243 ALOGV("[%s]", __func__);
244
245 mCurrentGeometry = hash;
246 mLastGeometryUpdate = now;
247
248 for (const CachedSet& cachedSet : mLayers) {
249 if (cachedSet.getLayerCount() > 1) {
250 ++mInvalidatedCachedSetAges[cachedSet.getAge()];
251 }
252 }
253
254 mLayers.clear();
255
256 if (mNewCachedSet) {
257 ++mInvalidatedCachedSetAges[mNewCachedSet->getAge()];
258 mNewCachedSet = std::nullopt;
259 }
260 }
261
computeLayersHash() const262 NonBufferHash Flattener::computeLayersHash() const{
263 size_t hash = 0;
264 for (const auto& layer : mLayers) {
265 android::hashCombineSingleHashed(hash, layer.getNonBufferHash());
266 }
267 return hash;
268 }
269
270 // Only called if the geometry matches the last frame. Return true if mLayers
271 // was already populated with these layers, i.e. on the second and following
272 // calls with the same geometry.
mergeWithCachedSets(const std::vector<const LayerState * > & layers,time_point now)273 bool Flattener::mergeWithCachedSets(const std::vector<const LayerState*>& layers, time_point now) {
274 ATRACE_CALL();
275 std::vector<CachedSet> merged;
276
277 if (mLayers.empty()) {
278 merged.reserve(layers.size());
279 for (const LayerState* layer : layers) {
280 merged.emplace_back(layer, now);
281 mFlattenedDisplayCost += merged.back().getDisplayCost();
282 }
283 mLayers = std::move(merged);
284 return false;
285 }
286
287 // the compiler should strip out the following no-op loops when ALOGV is off
288 ALOGV("[%s] Incoming layers:", __func__);
289 for (const LayerState* layer : layers) {
290 ALOGV("%s", layer->getName().c_str());
291 }
292
293 ALOGV("[%s] Current layers:", __func__);
294 for (const CachedSet& layer : mLayers) {
295 const auto dumper = [&] {
296 std::string dump;
297 layer.dump(dump);
298 return dump;
299 };
300 ALOGV("%s", dumper().c_str());
301 }
302
303 auto currentLayerIter = mLayers.begin();
304 auto incomingLayerIter = layers.begin();
305
306 // If not null, this represents the layer that is blurring the layer before
307 // currentLayerIter. The blurring was stored in the override buffer, so the
308 // layer that requests the blur no longer needs to do any blurring.
309 compositionengine::OutputLayer* priorBlurLayer = nullptr;
310
311 while (incomingLayerIter != layers.end()) {
312 if (mNewCachedSet &&
313 mNewCachedSet->getFirstLayer().getState()->getId() == (*incomingLayerIter)->getId()) {
314 if (mNewCachedSet->hasBufferUpdate()) {
315 ALOGV("[%s] Dropping new cached set", __func__);
316 ++mInvalidatedCachedSetAges[0];
317 mNewCachedSet = std::nullopt;
318 } else if (mNewCachedSet->hasReadyBuffer()) {
319 ALOGV("[%s] Found ready buffer", __func__);
320 size_t skipCount = mNewCachedSet->getLayerCount();
321 while (skipCount != 0) {
322 auto* peekThroughLayer = mNewCachedSet->getHolePunchLayer();
323 const size_t layerCount = currentLayerIter->getLayerCount();
324 for (size_t i = 0; i < layerCount; ++i) {
325 bool disableBlur = priorBlurLayer &&
326 priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
327 OutputLayer::CompositionState& state =
328 (*incomingLayerIter)->getOutputLayer()->editState();
329 state.overrideInfo = {
330 .buffer = mNewCachedSet->getBuffer(),
331 .acquireFence = mNewCachedSet->getDrawFence(),
332 .displayFrame = mNewCachedSet->getTextureBounds(),
333 .dataspace = mNewCachedSet->getOutputDataspace(),
334 .displaySpace = mNewCachedSet->getOutputSpace(),
335 .damageRegion = Region::INVALID_REGION,
336 .visibleRegion = mNewCachedSet->getVisibleRegion(),
337 .peekThroughLayer = peekThroughLayer,
338 .disableBackgroundBlur = disableBlur,
339 };
340 ++incomingLayerIter;
341 }
342
343 if (currentLayerIter->getLayerCount() > 1) {
344 ++mInvalidatedCachedSetAges[currentLayerIter->getAge()];
345 }
346 ++currentLayerIter;
347
348 skipCount -= layerCount;
349 }
350 priorBlurLayer = mNewCachedSet->getBlurLayer();
351 merged.emplace_back(std::move(*mNewCachedSet));
352 mNewCachedSet = std::nullopt;
353 continue;
354 }
355 }
356
357 if (!currentLayerIter->hasBufferUpdate()) {
358 currentLayerIter->incrementAge();
359 merged.emplace_back(*currentLayerIter);
360
361 // Skip the incoming layers corresponding to this valid current layer
362 const size_t layerCount = currentLayerIter->getLayerCount();
363 auto* peekThroughLayer = currentLayerIter->getHolePunchLayer();
364 for (size_t i = 0; i < layerCount; ++i) {
365 bool disableBlur =
366 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
367 OutputLayer::CompositionState& state =
368 (*incomingLayerIter)->getOutputLayer()->editState();
369 state.overrideInfo = {
370 .buffer = currentLayerIter->getBuffer(),
371 .acquireFence = currentLayerIter->getDrawFence(),
372 .displayFrame = currentLayerIter->getTextureBounds(),
373 .dataspace = currentLayerIter->getOutputDataspace(),
374 .displaySpace = currentLayerIter->getOutputSpace(),
375 .damageRegion = Region(),
376 .visibleRegion = currentLayerIter->getVisibleRegion(),
377 .peekThroughLayer = peekThroughLayer,
378 .disableBackgroundBlur = disableBlur,
379 };
380 ++incomingLayerIter;
381 }
382 } else if (currentLayerIter->getLayerCount() > 1) {
383 // Break the current layer into its constituent layers
384 ++mInvalidatedCachedSetAges[currentLayerIter->getAge()];
385 for (CachedSet& layer : currentLayerIter->decompose()) {
386 bool disableBlur =
387 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
388 OutputLayer::CompositionState& state =
389 (*incomingLayerIter)->getOutputLayer()->editState();
390 state.overrideInfo.disableBackgroundBlur = disableBlur;
391 layer.updateAge(now);
392 merged.emplace_back(layer);
393 ++incomingLayerIter;
394 }
395 } else {
396 bool disableBlur =
397 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
398 OutputLayer::CompositionState& state =
399 (*incomingLayerIter)->getOutputLayer()->editState();
400 state.overrideInfo.disableBackgroundBlur = disableBlur;
401 currentLayerIter->updateAge(now);
402 merged.emplace_back(*currentLayerIter);
403 ++incomingLayerIter;
404 }
405 priorBlurLayer = currentLayerIter->getBlurLayer();
406 ++currentLayerIter;
407 }
408
409 for (const CachedSet& layer : merged) {
410 mFlattenedDisplayCost += layer.getDisplayCost();
411 }
412
413 mLayers = std::move(merged);
414 return true;
415 }
416
findCandidateRuns(time_point now) const417 std::vector<Flattener::Run> Flattener::findCandidateRuns(time_point now) const {
418 ATRACE_CALL();
419 std::vector<Run> runs;
420 bool isPartOfRun = false;
421 Run::Builder builder;
422 bool firstLayer = true;
423 bool runHasFirstLayer = false;
424
425 for (auto currentSet = mLayers.cbegin(); currentSet != mLayers.cend(); ++currentSet) {
426 bool layerIsInactive = now - currentSet->getLastUpdate() > mTunables.mActiveLayerTimeout;
427 const bool layerHasBlur = currentSet->hasBlurBehind();
428 const bool layerDeniedFromCaching = currentSet->cachingHintExcludesLayers();
429
430 // Layers should also be considered inactive whenever their framerate is lower than 1fps.
431 if (!layerIsInactive && currentSet->getLayerCount() == kNumLayersFpsConsideration) {
432 auto layerFps = currentSet->getFirstLayer().getState()->getFps();
433 if (layerFps > 0 && layerFps <= kFpsActiveThreshold) {
434 ATRACE_FORMAT("layer is considered inactive due to low FPS [%s] %f",
435 currentSet->getFirstLayer().getName().c_str(), layerFps);
436 layerIsInactive = true;
437 }
438 }
439
440 if (!layerDeniedFromCaching && layerIsInactive &&
441 (firstLayer || runHasFirstLayer || !layerHasBlur) &&
442 !currentSet->hasKnownColorShift()) {
443 if (isPartOfRun) {
444 builder.increment();
445 } else {
446 builder.init(currentSet);
447 if (firstLayer) {
448 runHasFirstLayer = true;
449 }
450 isPartOfRun = true;
451 }
452 } else if (isPartOfRun) {
453 builder.setHolePunchCandidate(&(*currentSet));
454
455 // If we're here then this blur layer recently had an active buffer updating, meaning
456 // that there is exactly one layer. Blur radius currently is part of layer stack
457 // geometry, so we're also guaranteed that the background blur radius hasn't changed for
458 // at least as long as this new inactive cached set.
459 if (runHasFirstLayer && layerHasBlur &&
460 currentSet->getFirstLayer().getBackgroundBlurRadius() > 0) {
461 builder.setBlurringLayer(&(*currentSet));
462 }
463 if (auto run = builder.validateAndBuild(); run) {
464 runs.push_back(*run);
465 }
466
467 runHasFirstLayer = false;
468 builder.reset();
469 isPartOfRun = false;
470 }
471
472 firstLayer = false;
473 }
474
475 // If we're in the middle of a run at the end, we still need to validate and build it.
476 if (isPartOfRun) {
477 if (auto run = builder.validateAndBuild(); run) {
478 runs.push_back(*run);
479 }
480 }
481
482 ALOGV("[%s] Found %zu candidate runs", __func__, runs.size());
483
484 return runs;
485 }
486
findBestRun(std::vector<Flattener::Run> & runs) const487 std::optional<Flattener::Run> Flattener::findBestRun(std::vector<Flattener::Run>& runs) const {
488 if (runs.empty()) {
489 return std::nullopt;
490 }
491
492 // TODO (b/181192467): Choose the best run, instead of just the first.
493 return runs[0];
494 }
495
buildCachedSets(time_point now)496 void Flattener::buildCachedSets(time_point now) {
497 ATRACE_CALL();
498 if (mLayers.empty()) {
499 ALOGV("[%s] No layers found, returning", __func__);
500 return;
501 }
502
503 // Don't try to build a new cached set if we already have a new one in progress
504 if (mNewCachedSet) {
505 return;
506 }
507
508 for (const CachedSet& layer : mLayers) {
509 // TODO (b/191997217): make it less aggressive, and sync with findCandidateRuns
510 if (layer.hasProtectedLayers()) {
511 ATRACE_NAME("layer->hasProtectedLayers()");
512 return;
513 }
514 }
515
516 std::vector<Run> runs = findCandidateRuns(now);
517
518 std::optional<Run> bestRun = findBestRun(runs);
519
520 if (!bestRun) {
521 return;
522 }
523
524 mNewCachedSet.emplace(*bestRun->getStart());
525 mNewCachedSet->setLastUpdate(now);
526 auto currentSet = bestRun->getStart();
527 while (mNewCachedSet->getLayerCount() < bestRun->getLayerLength()) {
528 ++currentSet;
529 mNewCachedSet->append(*currentSet);
530 }
531
532 if (bestRun->getBlurringLayer()) {
533 mNewCachedSet->addBackgroundBlurLayer(*bestRun->getBlurringLayer());
534 }
535
536 if (mTunables.mEnableHolePunch && bestRun->getHolePunchCandidate() &&
537 bestRun->getHolePunchCandidate()->requiresHolePunch()) {
538 // Add the pip layer to mNewCachedSet, but in a special way - it should
539 // replace the buffer with a clear round rect.
540 mNewCachedSet->addHolePunchLayerIfFeasible(*bestRun->getHolePunchCandidate(),
541 bestRun->getStart() == mLayers.cbegin());
542 }
543
544 // TODO(b/181192467): Actually compute new LayerState vector and corresponding hash for each run
545 // and feedback into the predictor
546
547 ++mCachedSetCreationCount;
548 mCachedSetCreationCost += mNewCachedSet->getCreationCost();
549
550 // note the compiler should strip the follow no-op statements when ALOGV is off
551 const auto dumper = [&] {
552 std::string setDump;
553 mNewCachedSet->dump(setDump);
554 return setDump;
555 };
556 ALOGV("[%s] Added new cached set:\n%s", __func__, dumper().c_str());
557 }
558
559 } // namespace android::compositionengine::impl::planner
560