1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #undef LOG_TAG
19 #define LOG_TAG "BufferLayer"
20 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
21 
22 #include "BufferLayer.h"
23 #include "Colorizer.h"
24 #include "DisplayDevice.h"
25 #include "LayerRejecter.h"
26 #include "clz.h"
27 
28 #include "RenderEngine/RenderEngine.h"
29 
30 #include <gui/BufferItem.h>
31 #include <gui/BufferQueue.h>
32 #include <gui/LayerDebugInfo.h>
33 #include <gui/Surface.h>
34 
35 #include <ui/DebugUtils.h>
36 
37 #include <utils/Errors.h>
38 #include <utils/Log.h>
39 #include <utils/NativeHandle.h>
40 #include <utils/StopWatch.h>
41 #include <utils/Trace.h>
42 
43 #include <cutils/compiler.h>
44 #include <cutils/native_handle.h>
45 #include <cutils/properties.h>
46 
47 #include <math.h>
48 #include <stdlib.h>
49 #include <mutex>
50 
51 namespace android {
52 
BufferLayer(SurfaceFlinger * flinger,const sp<Client> & client,const String8 & name,uint32_t w,uint32_t h,uint32_t flags)53 BufferLayer::BufferLayer(SurfaceFlinger* flinger, const sp<Client>& client, const String8& name,
54                          uint32_t w, uint32_t h, uint32_t flags)
55       : Layer(flinger, client, name, w, h, flags),
56         mConsumer(nullptr),
57         mTextureName(UINT32_MAX),
58         mFormat(PIXEL_FORMAT_NONE),
59         mCurrentScalingMode(NATIVE_WINDOW_SCALING_MODE_FREEZE),
60         mBufferLatched(false),
61         mPreviousFrameNumber(0),
62         mUpdateTexImageFailed(false),
63         mRefreshPending(false) {
64     ALOGV("Creating Layer %s", name.string());
65 
66     mFlinger->getRenderEngine().genTextures(1, &mTextureName);
67     mTexture.init(Texture::TEXTURE_EXTERNAL, mTextureName);
68 
69     if (flags & ISurfaceComposerClient::eNonPremultiplied) mPremultipliedAlpha = false;
70 
71     mCurrentState.requested = mCurrentState.active;
72 
73     // drawing state & current state are identical
74     mDrawingState = mCurrentState;
75 }
76 
~BufferLayer()77 BufferLayer::~BufferLayer() {
78     mFlinger->deleteTextureAsync(mTextureName);
79 
80     if (!getBE().mHwcLayers.empty()) {
81         ALOGE("Found stale hardware composer layers when destroying "
82               "surface flinger layer %s",
83               mName.string());
84         destroyAllHwcLayers();
85     }
86 }
87 
useSurfaceDamage()88 void BufferLayer::useSurfaceDamage() {
89     if (mFlinger->mForceFullDamage) {
90         surfaceDamageRegion = Region::INVALID_REGION;
91     } else {
92         surfaceDamageRegion = mConsumer->getSurfaceDamage();
93     }
94 }
95 
useEmptyDamage()96 void BufferLayer::useEmptyDamage() {
97     surfaceDamageRegion.clear();
98 }
99 
isProtected() const100 bool BufferLayer::isProtected() const {
101     const sp<GraphicBuffer>& buffer(getBE().compositionInfo.mBuffer);
102     return (buffer != 0) &&
103             (buffer->getUsage() & GRALLOC_USAGE_PROTECTED);
104 }
105 
isVisible() const106 bool BufferLayer::isVisible() const {
107     return !(isHiddenByPolicy()) && getAlpha() > 0.0f &&
108             (getBE().compositionInfo.mBuffer != nullptr ||
109              getBE().compositionInfo.hwc.sidebandStream != nullptr);
110 }
111 
isFixedSize() const112 bool BufferLayer::isFixedSize() const {
113     return getEffectiveScalingMode() != NATIVE_WINDOW_SCALING_MODE_FREEZE;
114 }
115 
setBuffers(uint32_t w,uint32_t h,PixelFormat format,uint32_t flags)116 status_t BufferLayer::setBuffers(uint32_t w, uint32_t h, PixelFormat format, uint32_t flags) {
117     uint32_t const maxSurfaceDims =
118             min(mFlinger->getMaxTextureSize(), mFlinger->getMaxViewportDims());
119 
120     // never allow a surface larger than what our underlying GL implementation
121     // can handle.
122     if ((uint32_t(w) > maxSurfaceDims) || (uint32_t(h) > maxSurfaceDims)) {
123         ALOGE("dimensions too large %u x %u", uint32_t(w), uint32_t(h));
124         return BAD_VALUE;
125     }
126 
127     mFormat = format;
128 
129     mPotentialCursor = (flags & ISurfaceComposerClient::eCursorWindow) ? true : false;
130     mProtectedByApp = (flags & ISurfaceComposerClient::eProtectedByApp) ? true : false;
131     mCurrentOpacity = getOpacityForFormat(format);
132 
133     mConsumer->setDefaultBufferSize(w, h);
134     mConsumer->setDefaultBufferFormat(format);
135     mConsumer->setConsumerUsageBits(getEffectiveUsage(0));
136 
137     return NO_ERROR;
138 }
139 
inverseOrientation(uint32_t transform)140 static constexpr mat4 inverseOrientation(uint32_t transform) {
141     const mat4 flipH(-1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1);
142     const mat4 flipV(1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1);
143     const mat4 rot90(0, 1, 0, 0, -1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1);
144     mat4 tr;
145 
146     if (transform & NATIVE_WINDOW_TRANSFORM_ROT_90) {
147         tr = tr * rot90;
148     }
149     if (transform & NATIVE_WINDOW_TRANSFORM_FLIP_H) {
150         tr = tr * flipH;
151     }
152     if (transform & NATIVE_WINDOW_TRANSFORM_FLIP_V) {
153         tr = tr * flipV;
154     }
155     return inverse(tr);
156 }
157 
158 /*
159  * onDraw will draw the current layer onto the presentable buffer
160  */
onDraw(const RenderArea & renderArea,const Region & clip,bool useIdentityTransform) const161 void BufferLayer::onDraw(const RenderArea& renderArea, const Region& clip,
162                          bool useIdentityTransform) const {
163     ATRACE_CALL();
164 
165     if (CC_UNLIKELY(getBE().compositionInfo.mBuffer == 0)) {
166         // the texture has not been created yet, this Layer has
167         // in fact never been drawn into. This happens frequently with
168         // SurfaceView because the WindowManager can't know when the client
169         // has drawn the first time.
170 
171         // If there is nothing under us, we paint the screen in black, otherwise
172         // we just skip this update.
173 
174         // figure out if there is something below us
175         Region under;
176         bool finished = false;
177         mFlinger->mDrawingState.traverseInZOrder([&](Layer* layer) {
178             if (finished || layer == static_cast<BufferLayer const*>(this)) {
179                 finished = true;
180                 return;
181             }
182             under.orSelf(renderArea.getTransform().transform(layer->visibleRegion));
183         });
184         // if not everything below us is covered, we plug the holes!
185         Region holes(clip.subtract(under));
186         if (!holes.isEmpty()) {
187             clearWithOpenGL(renderArea, 0, 0, 0, 1);
188         }
189         return;
190     }
191 
192     // Bind the current buffer to the GL texture, and wait for it to be
193     // ready for us to draw into.
194     status_t err = mConsumer->bindTextureImage();
195     if (err != NO_ERROR) {
196         ALOGW("onDraw: bindTextureImage failed (err=%d)", err);
197         // Go ahead and draw the buffer anyway; no matter what we do the screen
198         // is probably going to have something visibly wrong.
199     }
200 
201     bool blackOutLayer = isProtected() || (isSecure() && !renderArea.isSecure());
202 
203     auto& engine(mFlinger->getRenderEngine());
204 
205     if (!blackOutLayer) {
206         // TODO: we could be more subtle with isFixedSize()
207         const bool useFiltering = getFiltering() || needsFiltering(renderArea) || isFixedSize();
208 
209         // Query the texture matrix given our current filtering mode.
210         float textureMatrix[16];
211         mConsumer->setFilteringEnabled(useFiltering);
212         mConsumer->getTransformMatrix(textureMatrix);
213 
214         if (getTransformToDisplayInverse()) {
215             /*
216              * the code below applies the primary display's inverse transform to
217              * the texture transform
218              */
219             uint32_t transform = DisplayDevice::getPrimaryDisplayOrientationTransform();
220             mat4 tr = inverseOrientation(transform);
221 
222             /**
223              * TODO(b/36727915): This is basically a hack.
224              *
225              * Ensure that regardless of the parent transformation,
226              * this buffer is always transformed from native display
227              * orientation to display orientation. For example, in the case
228              * of a camera where the buffer remains in native orientation,
229              * we want the pixels to always be upright.
230              */
231             sp<Layer> p = mDrawingParent.promote();
232             if (p != nullptr) {
233                 const auto parentTransform = p->getTransform();
234                 tr = tr * inverseOrientation(parentTransform.getOrientation());
235             }
236 
237             // and finally apply it to the original texture matrix
238             const mat4 texTransform(mat4(static_cast<const float*>(textureMatrix)) * tr);
239             memcpy(textureMatrix, texTransform.asArray(), sizeof(textureMatrix));
240         }
241 
242         // Set things up for texturing.
243         mTexture.setDimensions(getBE().compositionInfo.mBuffer->getWidth(),
244                                getBE().compositionInfo.mBuffer->getHeight());
245         mTexture.setFiltering(useFiltering);
246         mTexture.setMatrix(textureMatrix);
247 
248         engine.setupLayerTexturing(mTexture);
249     } else {
250         engine.setupLayerBlackedOut();
251     }
252     drawWithOpenGL(renderArea, useIdentityTransform);
253     engine.disableTexturing();
254 }
255 
onLayerDisplayed(const sp<Fence> & releaseFence)256 void BufferLayer::onLayerDisplayed(const sp<Fence>& releaseFence) {
257     mConsumer->setReleaseFence(releaseFence);
258 }
259 
abandon()260 void BufferLayer::abandon() {
261     mConsumer->abandon();
262 }
263 
shouldPresentNow(const DispSync & dispSync) const264 bool BufferLayer::shouldPresentNow(const DispSync& dispSync) const {
265     if (mSidebandStreamChanged || mAutoRefresh) {
266         return true;
267     }
268 
269     Mutex::Autolock lock(mQueueItemLock);
270     if (mQueueItems.empty()) {
271         return false;
272     }
273     auto timestamp = mQueueItems[0].mTimestamp;
274     nsecs_t expectedPresent = mConsumer->computeExpectedPresent(dispSync);
275 
276     // Ignore timestamps more than a second in the future
277     bool isPlausible = timestamp < (expectedPresent + s2ns(1));
278     ALOGW_IF(!isPlausible,
279              "[%s] Timestamp %" PRId64 " seems implausible "
280              "relative to expectedPresent %" PRId64,
281              mName.string(), timestamp, expectedPresent);
282 
283     bool isDue = timestamp < expectedPresent;
284     return isDue || !isPlausible;
285 }
286 
setTransformHint(uint32_t orientation) const287 void BufferLayer::setTransformHint(uint32_t orientation) const {
288     mConsumer->setTransformHint(orientation);
289 }
290 
onPreComposition(nsecs_t refreshStartTime)291 bool BufferLayer::onPreComposition(nsecs_t refreshStartTime) {
292     if (mBufferLatched) {
293         Mutex::Autolock lock(mFrameEventHistoryMutex);
294         mFrameEventHistory.addPreComposition(mCurrentFrameNumber,
295                                              refreshStartTime);
296     }
297     mRefreshPending = false;
298     return mQueuedFrames > 0 || mSidebandStreamChanged ||
299             mAutoRefresh;
300 }
onPostComposition(const std::shared_ptr<FenceTime> & glDoneFence,const std::shared_ptr<FenceTime> & presentFence,const CompositorTiming & compositorTiming)301 bool BufferLayer::onPostComposition(const std::shared_ptr<FenceTime>& glDoneFence,
302                                     const std::shared_ptr<FenceTime>& presentFence,
303                                     const CompositorTiming& compositorTiming) {
304     // mFrameLatencyNeeded is true when a new frame was latched for the
305     // composition.
306     if (!mFrameLatencyNeeded) return false;
307 
308     // Update mFrameEventHistory.
309     {
310         Mutex::Autolock lock(mFrameEventHistoryMutex);
311         mFrameEventHistory.addPostComposition(mCurrentFrameNumber, glDoneFence,
312                                               presentFence, compositorTiming);
313     }
314 
315     // Update mFrameTracker.
316     nsecs_t desiredPresentTime = mConsumer->getTimestamp();
317     mFrameTracker.setDesiredPresentTime(desiredPresentTime);
318 
319     const std::string layerName(getName().c_str());
320     mTimeStats.setDesiredTime(layerName, mCurrentFrameNumber, desiredPresentTime);
321 
322     std::shared_ptr<FenceTime> frameReadyFence = mConsumer->getCurrentFenceTime();
323     if (frameReadyFence->isValid()) {
324         mFrameTracker.setFrameReadyFence(std::move(frameReadyFence));
325     } else {
326         // There was no fence for this frame, so assume that it was ready
327         // to be presented at the desired present time.
328         mFrameTracker.setFrameReadyTime(desiredPresentTime);
329     }
330 
331     if (presentFence->isValid()) {
332         mTimeStats.setPresentFence(layerName, mCurrentFrameNumber, presentFence);
333         mFrameTracker.setActualPresentFence(std::shared_ptr<FenceTime>(presentFence));
334     } else {
335         // The HWC doesn't support present fences, so use the refresh
336         // timestamp instead.
337         const nsecs_t actualPresentTime =
338                 mFlinger->getHwComposer().getRefreshTimestamp(HWC_DISPLAY_PRIMARY);
339         mTimeStats.setPresentTime(layerName, mCurrentFrameNumber, actualPresentTime);
340         mFrameTracker.setActualPresentTime(actualPresentTime);
341     }
342 
343     mFrameTracker.advanceFrame();
344     mFrameLatencyNeeded = false;
345     return true;
346 }
347 
getOccupancyHistory(bool forceFlush)348 std::vector<OccupancyTracker::Segment> BufferLayer::getOccupancyHistory(bool forceFlush) {
349     std::vector<OccupancyTracker::Segment> history;
350     status_t result = mConsumer->getOccupancyHistory(forceFlush, &history);
351     if (result != NO_ERROR) {
352         ALOGW("[%s] Failed to obtain occupancy history (%d)", mName.string(), result);
353         return {};
354     }
355     return history;
356 }
357 
getTransformToDisplayInverse() const358 bool BufferLayer::getTransformToDisplayInverse() const {
359     return mConsumer->getTransformToDisplayInverse();
360 }
361 
releasePendingBuffer(nsecs_t dequeueReadyTime)362 void BufferLayer::releasePendingBuffer(nsecs_t dequeueReadyTime) {
363     if (!mConsumer->releasePendingBuffer()) {
364         return;
365     }
366 
367     auto releaseFenceTime =
368             std::make_shared<FenceTime>(mConsumer->getPrevFinalReleaseFence());
369     mReleaseTimeline.updateSignalTimes();
370     mReleaseTimeline.push(releaseFenceTime);
371 
372     Mutex::Autolock lock(mFrameEventHistoryMutex);
373     if (mPreviousFrameNumber != 0) {
374         mFrameEventHistory.addRelease(mPreviousFrameNumber, dequeueReadyTime,
375                                       std::move(releaseFenceTime));
376     }
377 }
378 
latchBuffer(bool & recomputeVisibleRegions,nsecs_t latchTime)379 Region BufferLayer::latchBuffer(bool& recomputeVisibleRegions, nsecs_t latchTime) {
380     ATRACE_CALL();
381 
382     if (android_atomic_acquire_cas(true, false, &mSidebandStreamChanged) == 0) {
383         // mSidebandStreamChanged was true
384         mSidebandStream = mConsumer->getSidebandStream();
385         // replicated in LayerBE until FE/BE is ready to be synchronized
386         getBE().compositionInfo.hwc.sidebandStream = mSidebandStream;
387         if (getBE().compositionInfo.hwc.sidebandStream != nullptr) {
388             setTransactionFlags(eTransactionNeeded);
389             mFlinger->setTransactionFlags(eTraversalNeeded);
390         }
391         recomputeVisibleRegions = true;
392 
393         const State& s(getDrawingState());
394         return getTransform().transform(Region(Rect(s.active.w, s.active.h)));
395     }
396 
397     Region outDirtyRegion;
398     if (mQueuedFrames <= 0 && !mAutoRefresh) {
399         return outDirtyRegion;
400     }
401 
402     // if we've already called updateTexImage() without going through
403     // a composition step, we have to skip this layer at this point
404     // because we cannot call updateTeximage() without a corresponding
405     // compositionComplete() call.
406     // we'll trigger an update in onPreComposition().
407     if (mRefreshPending) {
408         return outDirtyRegion;
409     }
410 
411     // If the head buffer's acquire fence hasn't signaled yet, return and
412     // try again later
413     if (!headFenceHasSignaled()) {
414         mFlinger->signalLayerUpdate();
415         return outDirtyRegion;
416     }
417 
418     // Capture the old state of the layer for comparisons later
419     const State& s(getDrawingState());
420     const bool oldOpacity = isOpaque(s);
421     sp<GraphicBuffer> oldBuffer = getBE().compositionInfo.mBuffer;
422 
423     if (!allTransactionsSignaled()) {
424         mFlinger->signalLayerUpdate();
425         return outDirtyRegion;
426     }
427 
428     // This boolean is used to make sure that SurfaceFlinger's shadow copy
429     // of the buffer queue isn't modified when the buffer queue is returning
430     // BufferItem's that weren't actually queued. This can happen in shared
431     // buffer mode.
432     bool queuedBuffer = false;
433     LayerRejecter r(mDrawingState, getCurrentState(), recomputeVisibleRegions,
434                     getProducerStickyTransform() != 0, mName.string(),
435                     mOverrideScalingMode, mFreezeGeometryUpdates);
436     status_t updateResult =
437             mConsumer->updateTexImage(&r, mFlinger->mPrimaryDispSync,
438                                                     &mAutoRefresh, &queuedBuffer,
439                                                     mLastFrameNumberReceived);
440     if (updateResult == BufferQueue::PRESENT_LATER) {
441         // Producer doesn't want buffer to be displayed yet.  Signal a
442         // layer update so we check again at the next opportunity.
443         mFlinger->signalLayerUpdate();
444         return outDirtyRegion;
445     } else if (updateResult == BufferLayerConsumer::BUFFER_REJECTED) {
446         // If the buffer has been rejected, remove it from the shadow queue
447         // and return early
448         if (queuedBuffer) {
449             Mutex::Autolock lock(mQueueItemLock);
450             mTimeStats.removeTimeRecord(getName().c_str(), mQueueItems[0].mFrameNumber);
451             mQueueItems.removeAt(0);
452             android_atomic_dec(&mQueuedFrames);
453         }
454         return outDirtyRegion;
455     } else if (updateResult != NO_ERROR || mUpdateTexImageFailed) {
456         // This can occur if something goes wrong when trying to create the
457         // EGLImage for this buffer. If this happens, the buffer has already
458         // been released, so we need to clean up the queue and bug out
459         // early.
460         if (queuedBuffer) {
461             Mutex::Autolock lock(mQueueItemLock);
462             mQueueItems.clear();
463             android_atomic_and(0, &mQueuedFrames);
464             mTimeStats.clearLayerRecord(getName().c_str());
465         }
466 
467         // Once we have hit this state, the shadow queue may no longer
468         // correctly reflect the incoming BufferQueue's contents, so even if
469         // updateTexImage starts working, the only safe course of action is
470         // to continue to ignore updates.
471         mUpdateTexImageFailed = true;
472 
473         return outDirtyRegion;
474     }
475 
476     if (queuedBuffer) {
477         // Autolock scope
478         auto currentFrameNumber = mConsumer->getFrameNumber();
479 
480         Mutex::Autolock lock(mQueueItemLock);
481 
482         // Remove any stale buffers that have been dropped during
483         // updateTexImage
484         while (mQueueItems[0].mFrameNumber != currentFrameNumber) {
485             mTimeStats.removeTimeRecord(getName().c_str(), mQueueItems[0].mFrameNumber);
486             mQueueItems.removeAt(0);
487             android_atomic_dec(&mQueuedFrames);
488         }
489 
490         const std::string layerName(getName().c_str());
491         mTimeStats.setAcquireFence(layerName, currentFrameNumber, mQueueItems[0].mFenceTime);
492         mTimeStats.setLatchTime(layerName, currentFrameNumber, latchTime);
493 
494         mQueueItems.removeAt(0);
495     }
496 
497     // Decrement the queued-frames count.  Signal another event if we
498     // have more frames pending.
499     if ((queuedBuffer && android_atomic_dec(&mQueuedFrames) > 1) ||
500         mAutoRefresh) {
501         mFlinger->signalLayerUpdate();
502     }
503 
504     // update the active buffer
505     getBE().compositionInfo.mBuffer =
506             mConsumer->getCurrentBuffer(&getBE().compositionInfo.mBufferSlot);
507     // replicated in LayerBE until FE/BE is ready to be synchronized
508     mActiveBuffer = getBE().compositionInfo.mBuffer;
509     if (getBE().compositionInfo.mBuffer == nullptr) {
510         // this can only happen if the very first buffer was rejected.
511         return outDirtyRegion;
512     }
513 
514     mBufferLatched = true;
515     mPreviousFrameNumber = mCurrentFrameNumber;
516     mCurrentFrameNumber = mConsumer->getFrameNumber();
517 
518     {
519         Mutex::Autolock lock(mFrameEventHistoryMutex);
520         mFrameEventHistory.addLatch(mCurrentFrameNumber, latchTime);
521     }
522 
523     mRefreshPending = true;
524     mFrameLatencyNeeded = true;
525     if (oldBuffer == nullptr) {
526         // the first time we receive a buffer, we need to trigger a
527         // geometry invalidation.
528         recomputeVisibleRegions = true;
529     }
530 
531     ui::Dataspace dataSpace = mConsumer->getCurrentDataSpace();
532     // treat modern dataspaces as legacy dataspaces whenever possible, until
533     // we can trust the buffer producers
534     switch (dataSpace) {
535         case ui::Dataspace::V0_SRGB:
536             dataSpace = ui::Dataspace::SRGB;
537             break;
538         case ui::Dataspace::V0_SRGB_LINEAR:
539             dataSpace = ui::Dataspace::SRGB_LINEAR;
540             break;
541         case ui::Dataspace::V0_JFIF:
542             dataSpace = ui::Dataspace::JFIF;
543             break;
544         case ui::Dataspace::V0_BT601_625:
545             dataSpace = ui::Dataspace::BT601_625;
546             break;
547         case ui::Dataspace::V0_BT601_525:
548             dataSpace = ui::Dataspace::BT601_525;
549             break;
550         case ui::Dataspace::V0_BT709:
551             dataSpace = ui::Dataspace::BT709;
552             break;
553         default:
554             break;
555     }
556     mCurrentDataSpace = dataSpace;
557 
558     Rect crop(mConsumer->getCurrentCrop());
559     const uint32_t transform(mConsumer->getCurrentTransform());
560     const uint32_t scalingMode(mConsumer->getCurrentScalingMode());
561     if ((crop != mCurrentCrop) ||
562         (transform != mCurrentTransform) ||
563         (scalingMode != mCurrentScalingMode)) {
564         mCurrentCrop = crop;
565         mCurrentTransform = transform;
566         mCurrentScalingMode = scalingMode;
567         recomputeVisibleRegions = true;
568     }
569 
570     if (oldBuffer != nullptr) {
571         uint32_t bufWidth = getBE().compositionInfo.mBuffer->getWidth();
572         uint32_t bufHeight = getBE().compositionInfo.mBuffer->getHeight();
573         if (bufWidth != uint32_t(oldBuffer->width) ||
574             bufHeight != uint32_t(oldBuffer->height)) {
575             recomputeVisibleRegions = true;
576         }
577     }
578 
579     mCurrentOpacity = getOpacityForFormat(getBE().compositionInfo.mBuffer->format);
580     if (oldOpacity != isOpaque(s)) {
581         recomputeVisibleRegions = true;
582     }
583 
584     // Remove any sync points corresponding to the buffer which was just
585     // latched
586     {
587         Mutex::Autolock lock(mLocalSyncPointMutex);
588         auto point = mLocalSyncPoints.begin();
589         while (point != mLocalSyncPoints.end()) {
590             if (!(*point)->frameIsAvailable() || !(*point)->transactionIsApplied()) {
591                 // This sync point must have been added since we started
592                 // latching. Don't drop it yet.
593                 ++point;
594                 continue;
595             }
596 
597             if ((*point)->getFrameNumber() <= mCurrentFrameNumber) {
598                 point = mLocalSyncPoints.erase(point);
599             } else {
600                 ++point;
601             }
602         }
603     }
604 
605     // FIXME: postedRegion should be dirty & bounds
606     Region dirtyRegion(Rect(s.active.w, s.active.h));
607 
608     // transform the dirty region to window-manager space
609     outDirtyRegion = (getTransform().transform(dirtyRegion));
610 
611     return outDirtyRegion;
612 }
613 
setDefaultBufferSize(uint32_t w,uint32_t h)614 void BufferLayer::setDefaultBufferSize(uint32_t w, uint32_t h) {
615     mConsumer->setDefaultBufferSize(w, h);
616 }
617 
setPerFrameData(const sp<const DisplayDevice> & displayDevice)618 void BufferLayer::setPerFrameData(const sp<const DisplayDevice>& displayDevice) {
619     // Apply this display's projection's viewport to the visible region
620     // before giving it to the HWC HAL.
621     const Transform& tr = displayDevice->getTransform();
622     const auto& viewport = displayDevice->getViewport();
623     Region visible = tr.transform(visibleRegion.intersect(viewport));
624     auto hwcId = displayDevice->getHwcDisplayId();
625     auto& hwcInfo = getBE().mHwcLayers[hwcId];
626     auto& hwcLayer = hwcInfo.layer;
627     auto error = hwcLayer->setVisibleRegion(visible);
628     if (error != HWC2::Error::None) {
629         ALOGE("[%s] Failed to set visible region: %s (%d)", mName.string(),
630               to_string(error).c_str(), static_cast<int32_t>(error));
631         visible.dump(LOG_TAG);
632     }
633 
634     error = hwcLayer->setSurfaceDamage(surfaceDamageRegion);
635     if (error != HWC2::Error::None) {
636         ALOGE("[%s] Failed to set surface damage: %s (%d)", mName.string(),
637               to_string(error).c_str(), static_cast<int32_t>(error));
638         surfaceDamageRegion.dump(LOG_TAG);
639     }
640 
641     // Sideband layers
642     if (getBE().compositionInfo.hwc.sidebandStream.get()) {
643         setCompositionType(hwcId, HWC2::Composition::Sideband);
644         ALOGV("[%s] Requesting Sideband composition", mName.string());
645         error = hwcLayer->setSidebandStream(getBE().compositionInfo.hwc.sidebandStream->handle());
646         if (error != HWC2::Error::None) {
647             ALOGE("[%s] Failed to set sideband stream %p: %s (%d)", mName.string(),
648                   getBE().compositionInfo.hwc.sidebandStream->handle(), to_string(error).c_str(),
649                   static_cast<int32_t>(error));
650         }
651         return;
652     }
653 
654     // Device or Cursor layers
655     if (mPotentialCursor) {
656         ALOGV("[%s] Requesting Cursor composition", mName.string());
657         setCompositionType(hwcId, HWC2::Composition::Cursor);
658     } else {
659         ALOGV("[%s] Requesting Device composition", mName.string());
660         setCompositionType(hwcId, HWC2::Composition::Device);
661     }
662 
663     ALOGV("setPerFrameData: dataspace = %d", mCurrentDataSpace);
664     error = hwcLayer->setDataspace(mCurrentDataSpace);
665     if (error != HWC2::Error::None) {
666         ALOGE("[%s] Failed to set dataspace %d: %s (%d)", mName.string(), mCurrentDataSpace,
667               to_string(error).c_str(), static_cast<int32_t>(error));
668     }
669 
670     const HdrMetadata& metadata = mConsumer->getCurrentHdrMetadata();
671     error = hwcLayer->setPerFrameMetadata(displayDevice->getSupportedPerFrameMetadata(), metadata);
672     if (error != HWC2::Error::None && error != HWC2::Error::Unsupported) {
673         ALOGE("[%s] Failed to set hdrMetadata: %s (%d)", mName.string(),
674               to_string(error).c_str(), static_cast<int32_t>(error));
675     }
676 
677     uint32_t hwcSlot = 0;
678     sp<GraphicBuffer> hwcBuffer;
679     hwcInfo.bufferCache.getHwcBuffer(getBE().compositionInfo.mBufferSlot,
680                                      getBE().compositionInfo.mBuffer, &hwcSlot, &hwcBuffer);
681 
682     auto acquireFence = mConsumer->getCurrentFence();
683     error = hwcLayer->setBuffer(hwcSlot, hwcBuffer, acquireFence);
684     if (error != HWC2::Error::None) {
685         ALOGE("[%s] Failed to set buffer %p: %s (%d)", mName.string(),
686               getBE().compositionInfo.mBuffer->handle, to_string(error).c_str(),
687               static_cast<int32_t>(error));
688     }
689 }
690 
isOpaque(const Layer::State & s) const691 bool BufferLayer::isOpaque(const Layer::State& s) const {
692     // if we don't have a buffer or sidebandStream yet, we're translucent regardless of the
693     // layer's opaque flag.
694     if ((getBE().compositionInfo.hwc.sidebandStream == nullptr) && (getBE().compositionInfo.mBuffer == nullptr)) {
695         return false;
696     }
697 
698     // if the layer has the opaque flag, then we're always opaque,
699     // otherwise we use the current buffer's format.
700     return ((s.flags & layer_state_t::eLayerOpaque) != 0) || mCurrentOpacity;
701 }
702 
onFirstRef()703 void BufferLayer::onFirstRef() {
704     // Creates a custom BufferQueue for SurfaceFlingerConsumer to use
705     sp<IGraphicBufferProducer> producer;
706     sp<IGraphicBufferConsumer> consumer;
707     BufferQueue::createBufferQueue(&producer, &consumer, true);
708     mProducer = new MonitoredProducer(producer, mFlinger, this);
709     mConsumer = new BufferLayerConsumer(consumer,
710             mFlinger->getRenderEngine(), mTextureName, this);
711     mConsumer->setConsumerUsageBits(getEffectiveUsage(0));
712     mConsumer->setContentsChangedListener(this);
713     mConsumer->setName(mName);
714 
715     if (mFlinger->isLayerTripleBufferingDisabled()) {
716         mProducer->setMaxDequeuedBufferCount(2);
717     }
718 
719     const sp<const DisplayDevice> hw(mFlinger->getDefaultDisplayDevice());
720     updateTransformHint(hw);
721 }
722 
723 // ---------------------------------------------------------------------------
724 // Interface implementation for SurfaceFlingerConsumer::ContentsChangedListener
725 // ---------------------------------------------------------------------------
726 
onFrameAvailable(const BufferItem & item)727 void BufferLayer::onFrameAvailable(const BufferItem& item) {
728     // Add this buffer from our internal queue tracker
729     { // Autolock scope
730         Mutex::Autolock lock(mQueueItemLock);
731         mFlinger->mInterceptor->saveBufferUpdate(this, item.mGraphicBuffer->getWidth(),
732                                                  item.mGraphicBuffer->getHeight(),
733                                                  item.mFrameNumber);
734         // Reset the frame number tracker when we receive the first buffer after
735         // a frame number reset
736         if (item.mFrameNumber == 1) {
737             mLastFrameNumberReceived = 0;
738         }
739 
740         // Ensure that callbacks are handled in order
741         while (item.mFrameNumber != mLastFrameNumberReceived + 1) {
742             status_t result = mQueueItemCondition.waitRelative(mQueueItemLock,
743                                                                ms2ns(500));
744             if (result != NO_ERROR) {
745                 ALOGE("[%s] Timed out waiting on callback", mName.string());
746             }
747         }
748 
749         mQueueItems.push_back(item);
750         android_atomic_inc(&mQueuedFrames);
751 
752         // Wake up any pending callbacks
753         mLastFrameNumberReceived = item.mFrameNumber;
754         mQueueItemCondition.broadcast();
755     }
756 
757     mFlinger->signalLayerUpdate();
758 }
759 
onFrameReplaced(const BufferItem & item)760 void BufferLayer::onFrameReplaced(const BufferItem& item) {
761     { // Autolock scope
762         Mutex::Autolock lock(mQueueItemLock);
763 
764         // Ensure that callbacks are handled in order
765         while (item.mFrameNumber != mLastFrameNumberReceived + 1) {
766             status_t result = mQueueItemCondition.waitRelative(mQueueItemLock,
767                                                                ms2ns(500));
768             if (result != NO_ERROR) {
769                 ALOGE("[%s] Timed out waiting on callback", mName.string());
770             }
771         }
772 
773         if (mQueueItems.empty()) {
774             ALOGE("Can't replace a frame on an empty queue");
775             return;
776         }
777         mQueueItems.editItemAt(mQueueItems.size() - 1) = item;
778 
779         // Wake up any pending callbacks
780         mLastFrameNumberReceived = item.mFrameNumber;
781         mQueueItemCondition.broadcast();
782     }
783 }
784 
onSidebandStreamChanged()785 void BufferLayer::onSidebandStreamChanged() {
786     if (android_atomic_release_cas(false, true, &mSidebandStreamChanged) == 0) {
787         // mSidebandStreamChanged was false
788         mFlinger->signalLayerUpdate();
789     }
790 }
791 
needsFiltering(const RenderArea & renderArea) const792 bool BufferLayer::needsFiltering(const RenderArea& renderArea) const {
793     return mNeedsFiltering || renderArea.needsFiltering();
794 }
795 
796 // As documented in libhardware header, formats in the range
797 // 0x100 - 0x1FF are specific to the HAL implementation, and
798 // are known to have no alpha channel
799 // TODO: move definition for device-specific range into
800 // hardware.h, instead of using hard-coded values here.
801 #define HARDWARE_IS_DEVICE_FORMAT(f) ((f) >= 0x100 && (f) <= 0x1FF)
802 
getOpacityForFormat(uint32_t format)803 bool BufferLayer::getOpacityForFormat(uint32_t format) {
804     if (HARDWARE_IS_DEVICE_FORMAT(format)) {
805         return true;
806     }
807     switch (format) {
808         case HAL_PIXEL_FORMAT_RGBA_8888:
809         case HAL_PIXEL_FORMAT_BGRA_8888:
810         case HAL_PIXEL_FORMAT_RGBA_FP16:
811         case HAL_PIXEL_FORMAT_RGBA_1010102:
812             return false;
813     }
814     // in all other case, we have no blending (also for unknown formats)
815     return true;
816 }
817 
isHdrY410() const818 bool BufferLayer::isHdrY410() const {
819     // pixel format is HDR Y410 masquerading as RGBA_1010102
820     return (mCurrentDataSpace == ui::Dataspace::BT2020_ITU_PQ &&
821             mConsumer->getCurrentApi() == NATIVE_WINDOW_API_MEDIA &&
822             getBE().compositionInfo.mBuffer->getPixelFormat() == HAL_PIXEL_FORMAT_RGBA_1010102);
823 }
824 
drawWithOpenGL(const RenderArea & renderArea,bool useIdentityTransform) const825 void BufferLayer::drawWithOpenGL(const RenderArea& renderArea, bool useIdentityTransform) const {
826     ATRACE_CALL();
827     const State& s(getDrawingState());
828 
829     computeGeometry(renderArea, getBE().mMesh, useIdentityTransform);
830 
831     /*
832      * NOTE: the way we compute the texture coordinates here produces
833      * different results than when we take the HWC path -- in the later case
834      * the "source crop" is rounded to texel boundaries.
835      * This can produce significantly different results when the texture
836      * is scaled by a large amount.
837      *
838      * The GL code below is more logical (imho), and the difference with
839      * HWC is due to a limitation of the HWC API to integers -- a question
840      * is suspend is whether we should ignore this problem or revert to
841      * GL composition when a buffer scaling is applied (maybe with some
842      * minimal value)? Or, we could make GL behave like HWC -- but this feel
843      * like more of a hack.
844      */
845     const Rect bounds{computeBounds()}; // Rounds from FloatRect
846 
847     Transform t = getTransform();
848     Rect win = bounds;
849     if (!s.finalCrop.isEmpty()) {
850         win = t.transform(win);
851         if (!win.intersect(s.finalCrop, &win)) {
852             win.clear();
853         }
854         win = t.inverse().transform(win);
855         if (!win.intersect(bounds, &win)) {
856             win.clear();
857         }
858     }
859 
860     float left = float(win.left) / float(s.active.w);
861     float top = float(win.top) / float(s.active.h);
862     float right = float(win.right) / float(s.active.w);
863     float bottom = float(win.bottom) / float(s.active.h);
864 
865     // TODO: we probably want to generate the texture coords with the mesh
866     // here we assume that we only have 4 vertices
867     Mesh::VertexArray<vec2> texCoords(getBE().mMesh.getTexCoordArray<vec2>());
868     texCoords[0] = vec2(left, 1.0f - top);
869     texCoords[1] = vec2(left, 1.0f - bottom);
870     texCoords[2] = vec2(right, 1.0f - bottom);
871     texCoords[3] = vec2(right, 1.0f - top);
872 
873     auto& engine(mFlinger->getRenderEngine());
874     engine.setupLayerBlending(mPremultipliedAlpha, isOpaque(s), false /* disableTexture */,
875                               getColor());
876     engine.setSourceDataSpace(mCurrentDataSpace);
877 
878     if (isHdrY410()) {
879         engine.setSourceY410BT2020(true);
880     }
881 
882     engine.drawMesh(getBE().mMesh);
883     engine.disableBlending();
884 
885     engine.setSourceY410BT2020(false);
886 }
887 
getProducerStickyTransform() const888 uint32_t BufferLayer::getProducerStickyTransform() const {
889     int producerStickyTransform = 0;
890     int ret = mProducer->query(NATIVE_WINDOW_STICKY_TRANSFORM, &producerStickyTransform);
891     if (ret != OK) {
892         ALOGW("%s: Error %s (%d) while querying window sticky transform.", __FUNCTION__,
893               strerror(-ret), ret);
894         return 0;
895     }
896     return static_cast<uint32_t>(producerStickyTransform);
897 }
898 
latchUnsignaledBuffers()899 bool BufferLayer::latchUnsignaledBuffers() {
900     static bool propertyLoaded = false;
901     static bool latch = false;
902     static std::mutex mutex;
903     std::lock_guard<std::mutex> lock(mutex);
904     if (!propertyLoaded) {
905         char value[PROPERTY_VALUE_MAX] = {};
906         property_get("debug.sf.latch_unsignaled", value, "0");
907         latch = atoi(value);
908         propertyLoaded = true;
909     }
910     return latch;
911 }
912 
getHeadFrameNumber() const913 uint64_t BufferLayer::getHeadFrameNumber() const {
914     Mutex::Autolock lock(mQueueItemLock);
915     if (!mQueueItems.empty()) {
916         return mQueueItems[0].mFrameNumber;
917     } else {
918         return mCurrentFrameNumber;
919     }
920 }
921 
headFenceHasSignaled() const922 bool BufferLayer::headFenceHasSignaled() const {
923     if (latchUnsignaledBuffers()) {
924         return true;
925     }
926 
927     Mutex::Autolock lock(mQueueItemLock);
928     if (mQueueItems.empty()) {
929         return true;
930     }
931     if (mQueueItems[0].mIsDroppable) {
932         // Even though this buffer's fence may not have signaled yet, it could
933         // be replaced by another buffer before it has a chance to, which means
934         // that it's possible to get into a situation where a buffer is never
935         // able to be latched. To avoid this, grab this buffer anyway.
936         return true;
937     }
938     return mQueueItems[0].mFenceTime->getSignalTime() !=
939             Fence::SIGNAL_TIME_PENDING;
940 }
941 
getEffectiveScalingMode() const942 uint32_t BufferLayer::getEffectiveScalingMode() const {
943     if (mOverrideScalingMode >= 0) {
944         return mOverrideScalingMode;
945     }
946     return mCurrentScalingMode;
947 }
948 
949 // ----------------------------------------------------------------------------
950 // transaction
951 // ----------------------------------------------------------------------------
952 
notifyAvailableFrames()953 void BufferLayer::notifyAvailableFrames() {
954     auto headFrameNumber = getHeadFrameNumber();
955     bool headFenceSignaled = headFenceHasSignaled();
956     Mutex::Autolock lock(mLocalSyncPointMutex);
957     for (auto& point : mLocalSyncPoints) {
958         if (headFrameNumber >= point->getFrameNumber() && headFenceSignaled) {
959             point->setFrameAvailable();
960         }
961     }
962 }
963 
getProducer() const964 sp<IGraphicBufferProducer> BufferLayer::getProducer() const {
965     return mProducer;
966 }
967 
968 // ---------------------------------------------------------------------------
969 // h/w composer set-up
970 // ---------------------------------------------------------------------------
971 
allTransactionsSignaled()972 bool BufferLayer::allTransactionsSignaled() {
973     auto headFrameNumber = getHeadFrameNumber();
974     bool matchingFramesFound = false;
975     bool allTransactionsApplied = true;
976     Mutex::Autolock lock(mLocalSyncPointMutex);
977 
978     for (auto& point : mLocalSyncPoints) {
979         if (point->getFrameNumber() > headFrameNumber) {
980             break;
981         }
982         matchingFramesFound = true;
983 
984         if (!point->frameIsAvailable()) {
985             // We haven't notified the remote layer that the frame for
986             // this point is available yet. Notify it now, and then
987             // abort this attempt to latch.
988             point->setFrameAvailable();
989             allTransactionsApplied = false;
990             break;
991         }
992 
993         allTransactionsApplied = allTransactionsApplied && point->transactionIsApplied();
994     }
995     return !matchingFramesFound || allTransactionsApplied;
996 }
997 
998 } // namespace android
999 
1000 #if defined(__gl_h_)
1001 #error "don't include gl/gl.h in this file"
1002 #endif
1003 
1004 #if defined(__gl2_h_)
1005 #error "don't include gl2/gl2.h in this file"
1006 #endif
1007