1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Surface"
18 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
19 //#define LOG_NDEBUG 0
20
21 #include <gui/Surface.h>
22
23 #include <condition_variable>
24 #include <deque>
25 #include <mutex>
26 #include <thread>
27
28 #include <inttypes.h>
29
30 #include <android/gui/DisplayStatInfo.h>
31 #include <android/native_window.h>
32
33 #include <gui/FenceMonitor.h>
34 #include <gui/TraceUtils.h>
35 #include <utils/Log.h>
36 #include <utils/NativeHandle.h>
37 #include <utils/Trace.h>
38
39 #include <ui/DynamicDisplayInfo.h>
40 #include <ui/Fence.h>
41 #include <ui/GraphicBuffer.h>
42 #include <ui/Region.h>
43
44 #include <gui/AidlStatusUtil.h>
45 #include <gui/BufferItem.h>
46
47 #include <gui/IProducerListener.h>
48
49 #include <gui/ISurfaceComposer.h>
50 #include <gui/LayerState.h>
51 #include <private/gui/ComposerService.h>
52 #include <private/gui/ComposerServiceAIDL.h>
53
54 #include <com_android_graphics_libgui_flags.h>
55
56 namespace android {
57
58 using namespace com::android::graphics::libgui;
59 using gui::aidl_utils::statusTFromBinderStatus;
60 using ui::Dataspace;
61
62 namespace {
63
64 enum {
65 // moved from nativewindow/include/system/window.h, to be removed
66 NATIVE_WINDOW_GET_WIDE_COLOR_SUPPORT = 28,
67 NATIVE_WINDOW_GET_HDR_SUPPORT = 29,
68 };
69
isInterceptorRegistrationOp(int op)70 bool isInterceptorRegistrationOp(int op) {
71 return op == NATIVE_WINDOW_SET_CANCEL_INTERCEPTOR ||
72 op == NATIVE_WINDOW_SET_DEQUEUE_INTERCEPTOR ||
73 op == NATIVE_WINDOW_SET_PERFORM_INTERCEPTOR ||
74 op == NATIVE_WINDOW_SET_QUEUE_INTERCEPTOR ||
75 op == NATIVE_WINDOW_SET_QUERY_INTERCEPTOR;
76 }
77
78 } // namespace
79
Surface(const sp<IGraphicBufferProducer> & bufferProducer,bool controlledByApp,const sp<IBinder> & surfaceControlHandle)80 Surface::Surface(const sp<IGraphicBufferProducer>& bufferProducer, bool controlledByApp,
81 const sp<IBinder>& surfaceControlHandle)
82 : mGraphicBufferProducer(bufferProducer),
83 mCrop(Rect::EMPTY_RECT),
84 mBufferAge(0),
85 mGenerationNumber(0),
86 mSharedBufferMode(false),
87 mAutoRefresh(false),
88 mAutoPrerotation(false),
89 mSharedBufferSlot(BufferItem::INVALID_BUFFER_SLOT),
90 mSharedBufferHasBeenQueued(false),
91 mQueriedSupportedTimestamps(false),
92 mFrameTimestampsSupportsPresent(false),
93 mEnableFrameTimestamps(false),
94 mFrameEventHistory(std::make_unique<ProducerFrameEventHistory>()) {
95 // Initialize the ANativeWindow function pointers.
96 ANativeWindow::setSwapInterval = hook_setSwapInterval;
97 ANativeWindow::dequeueBuffer = hook_dequeueBuffer;
98 ANativeWindow::cancelBuffer = hook_cancelBuffer;
99 ANativeWindow::queueBuffer = hook_queueBuffer;
100 ANativeWindow::query = hook_query;
101 ANativeWindow::perform = hook_perform;
102
103 ANativeWindow::dequeueBuffer_DEPRECATED = hook_dequeueBuffer_DEPRECATED;
104 ANativeWindow::cancelBuffer_DEPRECATED = hook_cancelBuffer_DEPRECATED;
105 ANativeWindow::lockBuffer_DEPRECATED = hook_lockBuffer_DEPRECATED;
106 ANativeWindow::queueBuffer_DEPRECATED = hook_queueBuffer_DEPRECATED;
107
108 const_cast<int&>(ANativeWindow::minSwapInterval) = 0;
109 const_cast<int&>(ANativeWindow::maxSwapInterval) = 1;
110
111 mReqWidth = 0;
112 mReqHeight = 0;
113 mReqFormat = 0;
114 mReqUsage = 0;
115 mTimestamp = NATIVE_WINDOW_TIMESTAMP_AUTO;
116 mDataSpace = Dataspace::UNKNOWN;
117 mScalingMode = NATIVE_WINDOW_SCALING_MODE_FREEZE;
118 mTransform = 0;
119 mStickyTransform = 0;
120 mDefaultWidth = 0;
121 mDefaultHeight = 0;
122 mUserWidth = 0;
123 mUserHeight = 0;
124 mTransformHint = 0;
125 mConsumerRunningBehind = false;
126 mConnectedToCpu = false;
127 mProducerControlledByApp = controlledByApp;
128 mSwapIntervalZero = false;
129 mMaxBufferCount = NUM_BUFFER_SLOTS;
130 mSurfaceControlHandle = surfaceControlHandle;
131 }
132
~Surface()133 Surface::~Surface() {
134 if (mConnectedToCpu) {
135 Surface::disconnect(NATIVE_WINDOW_API_CPU);
136 }
137 }
138
composerService() const139 sp<ISurfaceComposer> Surface::composerService() const {
140 return ComposerService::getComposerService();
141 }
142
composerServiceAIDL() const143 sp<gui::ISurfaceComposer> Surface::composerServiceAIDL() const {
144 return ComposerServiceAIDL::getComposerService();
145 }
146
now() const147 nsecs_t Surface::now() const {
148 return systemTime();
149 }
150
getIGraphicBufferProducer() const151 sp<IGraphicBufferProducer> Surface::getIGraphicBufferProducer() const {
152 return mGraphicBufferProducer;
153 }
154
setSidebandStream(const sp<NativeHandle> & stream)155 void Surface::setSidebandStream(const sp<NativeHandle>& stream) {
156 mGraphicBufferProducer->setSidebandStream(stream);
157 }
158
allocateBuffers()159 void Surface::allocateBuffers() {
160 uint32_t reqWidth = mReqWidth ? mReqWidth : mUserWidth;
161 uint32_t reqHeight = mReqHeight ? mReqHeight : mUserHeight;
162 mGraphicBufferProducer->allocateBuffers(reqWidth, reqHeight,
163 mReqFormat, mReqUsage);
164 }
165
setGenerationNumber(uint32_t generation)166 status_t Surface::setGenerationNumber(uint32_t generation) {
167 status_t result = mGraphicBufferProducer->setGenerationNumber(generation);
168 if (result == NO_ERROR) {
169 mGenerationNumber = generation;
170 }
171 return result;
172 }
173
getNextFrameNumber() const174 uint64_t Surface::getNextFrameNumber() const {
175 Mutex::Autolock lock(mMutex);
176 return mNextFrameNumber;
177 }
178
getConsumerName() const179 String8 Surface::getConsumerName() const {
180 return mGraphicBufferProducer->getConsumerName();
181 }
182
setDequeueTimeout(nsecs_t timeout)183 status_t Surface::setDequeueTimeout(nsecs_t timeout) {
184 return mGraphicBufferProducer->setDequeueTimeout(timeout);
185 }
186
getLastQueuedBuffer(sp<GraphicBuffer> * outBuffer,sp<Fence> * outFence,float outTransformMatrix[16])187 status_t Surface::getLastQueuedBuffer(sp<GraphicBuffer>* outBuffer,
188 sp<Fence>* outFence, float outTransformMatrix[16]) {
189 return mGraphicBufferProducer->getLastQueuedBuffer(outBuffer, outFence,
190 outTransformMatrix);
191 }
192
getDisplayRefreshCycleDuration(nsecs_t * outRefreshDuration)193 status_t Surface::getDisplayRefreshCycleDuration(nsecs_t* outRefreshDuration) {
194 ATRACE_CALL();
195
196 gui::DisplayStatInfo stats;
197 binder::Status status = composerServiceAIDL()->getDisplayStats(nullptr, &stats);
198 if (!status.isOk()) {
199 return statusTFromBinderStatus(status);
200 }
201
202 *outRefreshDuration = stats.vsyncPeriod;
203
204 return NO_ERROR;
205 }
206
enableFrameTimestamps(bool enable)207 void Surface::enableFrameTimestamps(bool enable) {
208 Mutex::Autolock lock(mMutex);
209 // If going from disabled to enabled, get the initial values for
210 // compositor and display timing.
211 if (!mEnableFrameTimestamps && enable) {
212 FrameEventHistoryDelta delta;
213 mGraphicBufferProducer->getFrameTimestamps(&delta);
214 mFrameEventHistory->applyDelta(delta);
215 }
216 mEnableFrameTimestamps = enable;
217 }
218
getCompositorTiming(nsecs_t * compositeDeadline,nsecs_t * compositeInterval,nsecs_t * compositeToPresentLatency)219 status_t Surface::getCompositorTiming(
220 nsecs_t* compositeDeadline, nsecs_t* compositeInterval,
221 nsecs_t* compositeToPresentLatency) {
222 Mutex::Autolock lock(mMutex);
223 if (!mEnableFrameTimestamps) {
224 return INVALID_OPERATION;
225 }
226
227 if (compositeDeadline != nullptr) {
228 *compositeDeadline =
229 mFrameEventHistory->getNextCompositeDeadline(now());
230 }
231 if (compositeInterval != nullptr) {
232 *compositeInterval = mFrameEventHistory->getCompositeInterval();
233 }
234 if (compositeToPresentLatency != nullptr) {
235 *compositeToPresentLatency =
236 mFrameEventHistory->getCompositeToPresentLatency();
237 }
238 return NO_ERROR;
239 }
240
checkConsumerForUpdates(const FrameEvents * e,const uint64_t lastFrameNumber,const nsecs_t * outLatchTime,const nsecs_t * outFirstRefreshStartTime,const nsecs_t * outLastRefreshStartTime,const nsecs_t * outGpuCompositionDoneTime,const nsecs_t * outDisplayPresentTime,const nsecs_t * outDequeueReadyTime,const nsecs_t * outReleaseTime)241 static bool checkConsumerForUpdates(
242 const FrameEvents* e, const uint64_t lastFrameNumber,
243 const nsecs_t* outLatchTime,
244 const nsecs_t* outFirstRefreshStartTime,
245 const nsecs_t* outLastRefreshStartTime,
246 const nsecs_t* outGpuCompositionDoneTime,
247 const nsecs_t* outDisplayPresentTime,
248 const nsecs_t* outDequeueReadyTime,
249 const nsecs_t* outReleaseTime) {
250 bool checkForLatch = (outLatchTime != nullptr) && !e->hasLatchInfo();
251 bool checkForFirstRefreshStart = (outFirstRefreshStartTime != nullptr) &&
252 !e->hasFirstRefreshStartInfo();
253 bool checkForGpuCompositionDone = (outGpuCompositionDoneTime != nullptr) &&
254 !e->hasGpuCompositionDoneInfo();
255 bool checkForDisplayPresent = (outDisplayPresentTime != nullptr) &&
256 !e->hasDisplayPresentInfo();
257
258 // LastRefreshStart, DequeueReady, and Release are never available for the
259 // last frame.
260 bool checkForLastRefreshStart = (outLastRefreshStartTime != nullptr) &&
261 !e->hasLastRefreshStartInfo() &&
262 (e->frameNumber != lastFrameNumber);
263 bool checkForDequeueReady = (outDequeueReadyTime != nullptr) &&
264 !e->hasDequeueReadyInfo() && (e->frameNumber != lastFrameNumber);
265 bool checkForRelease = (outReleaseTime != nullptr) &&
266 !e->hasReleaseInfo() && (e->frameNumber != lastFrameNumber);
267
268 // RequestedPresent and Acquire info are always available producer-side.
269 return checkForLatch || checkForFirstRefreshStart ||
270 checkForLastRefreshStart || checkForGpuCompositionDone ||
271 checkForDisplayPresent || checkForDequeueReady || checkForRelease;
272 }
273
getFrameTimestamp(nsecs_t * dst,const nsecs_t & src)274 static void getFrameTimestamp(nsecs_t *dst, const nsecs_t& src) {
275 if (dst != nullptr) {
276 // We always get valid timestamps for these eventually.
277 *dst = (src == FrameEvents::TIMESTAMP_PENDING) ?
278 NATIVE_WINDOW_TIMESTAMP_PENDING : src;
279 }
280 }
281
getFrameTimestampFence(nsecs_t * dst,const std::shared_ptr<FenceTime> & src,bool fenceShouldBeKnown)282 static void getFrameTimestampFence(nsecs_t *dst,
283 const std::shared_ptr<FenceTime>& src, bool fenceShouldBeKnown) {
284 if (dst != nullptr) {
285 if (!fenceShouldBeKnown) {
286 *dst = NATIVE_WINDOW_TIMESTAMP_PENDING;
287 return;
288 }
289
290 nsecs_t signalTime = src->getSignalTime();
291 *dst = (signalTime == Fence::SIGNAL_TIME_PENDING) ?
292 NATIVE_WINDOW_TIMESTAMP_PENDING :
293 (signalTime == Fence::SIGNAL_TIME_INVALID) ?
294 NATIVE_WINDOW_TIMESTAMP_INVALID :
295 signalTime;
296 }
297 }
298
getFrameTimestamps(uint64_t frameNumber,nsecs_t * outRequestedPresentTime,nsecs_t * outAcquireTime,nsecs_t * outLatchTime,nsecs_t * outFirstRefreshStartTime,nsecs_t * outLastRefreshStartTime,nsecs_t * outGpuCompositionDoneTime,nsecs_t * outDisplayPresentTime,nsecs_t * outDequeueReadyTime,nsecs_t * outReleaseTime)299 status_t Surface::getFrameTimestamps(uint64_t frameNumber,
300 nsecs_t* outRequestedPresentTime, nsecs_t* outAcquireTime,
301 nsecs_t* outLatchTime, nsecs_t* outFirstRefreshStartTime,
302 nsecs_t* outLastRefreshStartTime, nsecs_t* outGpuCompositionDoneTime,
303 nsecs_t* outDisplayPresentTime, nsecs_t* outDequeueReadyTime,
304 nsecs_t* outReleaseTime) {
305 ATRACE_CALL();
306
307 Mutex::Autolock lock(mMutex);
308
309 if (!mEnableFrameTimestamps) {
310 return INVALID_OPERATION;
311 }
312
313 // Verify the requested timestamps are supported.
314 querySupportedTimestampsLocked();
315 if (outDisplayPresentTime != nullptr && !mFrameTimestampsSupportsPresent) {
316 return BAD_VALUE;
317 }
318
319 FrameEvents* events = mFrameEventHistory->getFrame(frameNumber);
320 if (events == nullptr) {
321 // If the entry isn't available in the producer, it's definitely not
322 // available in the consumer.
323 return NAME_NOT_FOUND;
324 }
325
326 // Update our cache of events if the requested events are not available.
327 if (checkConsumerForUpdates(events, mLastFrameNumber,
328 outLatchTime, outFirstRefreshStartTime, outLastRefreshStartTime,
329 outGpuCompositionDoneTime, outDisplayPresentTime,
330 outDequeueReadyTime, outReleaseTime)) {
331 FrameEventHistoryDelta delta;
332 mGraphicBufferProducer->getFrameTimestamps(&delta);
333 mFrameEventHistory->applyDelta(delta);
334 events = mFrameEventHistory->getFrame(frameNumber);
335 }
336
337 if (events == nullptr) {
338 // The entry was available before the update, but was overwritten
339 // after the update. Make sure not to send the wrong frame's data.
340 return NAME_NOT_FOUND;
341 }
342
343 getFrameTimestamp(outRequestedPresentTime, events->requestedPresentTime);
344 getFrameTimestamp(outLatchTime, events->latchTime);
345
346 nsecs_t firstRefreshStartTime = NATIVE_WINDOW_TIMESTAMP_INVALID;
347 getFrameTimestamp(&firstRefreshStartTime, events->firstRefreshStartTime);
348 if (outFirstRefreshStartTime) {
349 *outFirstRefreshStartTime = firstRefreshStartTime;
350 }
351
352 getFrameTimestamp(outLastRefreshStartTime, events->lastRefreshStartTime);
353 getFrameTimestamp(outDequeueReadyTime, events->dequeueReadyTime);
354
355 nsecs_t acquireTime = NATIVE_WINDOW_TIMESTAMP_INVALID;
356 getFrameTimestampFence(&acquireTime, events->acquireFence,
357 events->hasAcquireInfo());
358 if (outAcquireTime != nullptr) {
359 *outAcquireTime = acquireTime;
360 }
361
362 getFrameTimestampFence(outGpuCompositionDoneTime,
363 events->gpuCompositionDoneFence,
364 events->hasGpuCompositionDoneInfo());
365 getFrameTimestampFence(outDisplayPresentTime, events->displayPresentFence,
366 events->hasDisplayPresentInfo());
367 getFrameTimestampFence(outReleaseTime, events->releaseFence,
368 events->hasReleaseInfo());
369
370 // Fix up the GPU completion fence at this layer -- eglGetFrameTimestampsANDROID() expects
371 // that EGL_FIRST_COMPOSITION_GPU_FINISHED_TIME_ANDROID > EGL_RENDERING_COMPLETE_TIME_ANDROID.
372 // This is typically true, but SurfaceFlinger may opt to cache prior GPU composition results,
373 // which breaks that assumption, so zero out GPU composition time.
374 if (outGpuCompositionDoneTime != nullptr
375 && *outGpuCompositionDoneTime > 0 && (acquireTime > 0 || firstRefreshStartTime > 0)
376 && *outGpuCompositionDoneTime <= std::max(acquireTime, firstRefreshStartTime)) {
377 *outGpuCompositionDoneTime = 0;
378 }
379
380 return NO_ERROR;
381 }
382
383 // Deprecated(b/242763577): to be removed, this method should not be used
384 // The reason this method still exists here is to support compiled vndk
385 // Surface support should not be tied to the display
386 // Return true since most displays should have this support
getWideColorSupport(bool * supported)387 status_t Surface::getWideColorSupport(bool* supported) {
388 ATRACE_CALL();
389
390 *supported = true;
391 return NO_ERROR;
392 }
393
394 // Deprecated(b/242763577): to be removed, this method should not be used
395 // The reason this method still exists here is to support compiled vndk
396 // Surface support should not be tied to the display
397 // Return true since most displays should have this support
getHdrSupport(bool * supported)398 status_t Surface::getHdrSupport(bool* supported) {
399 ATRACE_CALL();
400
401 *supported = true;
402 return NO_ERROR;
403 }
404
hook_setSwapInterval(ANativeWindow * window,int interval)405 int Surface::hook_setSwapInterval(ANativeWindow* window, int interval) {
406 Surface* c = getSelf(window);
407 return c->setSwapInterval(interval);
408 }
409
hook_dequeueBuffer(ANativeWindow * window,ANativeWindowBuffer ** buffer,int * fenceFd)410 int Surface::hook_dequeueBuffer(ANativeWindow* window,
411 ANativeWindowBuffer** buffer, int* fenceFd) {
412 Surface* c = getSelf(window);
413 {
414 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
415 if (c->mDequeueInterceptor != nullptr) {
416 auto interceptor = c->mDequeueInterceptor;
417 auto data = c->mDequeueInterceptorData;
418 return interceptor(window, Surface::dequeueBufferInternal, data, buffer, fenceFd);
419 }
420 }
421 return c->dequeueBuffer(buffer, fenceFd);
422 }
423
dequeueBufferInternal(ANativeWindow * window,ANativeWindowBuffer ** buffer,int * fenceFd)424 int Surface::dequeueBufferInternal(ANativeWindow* window, ANativeWindowBuffer** buffer,
425 int* fenceFd) {
426 Surface* c = getSelf(window);
427 return c->dequeueBuffer(buffer, fenceFd);
428 }
429
hook_cancelBuffer(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)430 int Surface::hook_cancelBuffer(ANativeWindow* window,
431 ANativeWindowBuffer* buffer, int fenceFd) {
432 Surface* c = getSelf(window);
433 {
434 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
435 if (c->mCancelInterceptor != nullptr) {
436 auto interceptor = c->mCancelInterceptor;
437 auto data = c->mCancelInterceptorData;
438 return interceptor(window, Surface::cancelBufferInternal, data, buffer, fenceFd);
439 }
440 }
441 return c->cancelBuffer(buffer, fenceFd);
442 }
443
cancelBufferInternal(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)444 int Surface::cancelBufferInternal(ANativeWindow* window, ANativeWindowBuffer* buffer, int fenceFd) {
445 Surface* c = getSelf(window);
446 return c->cancelBuffer(buffer, fenceFd);
447 }
448
hook_queueBuffer(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)449 int Surface::hook_queueBuffer(ANativeWindow* window,
450 ANativeWindowBuffer* buffer, int fenceFd) {
451 Surface* c = getSelf(window);
452 {
453 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
454 if (c->mQueueInterceptor != nullptr) {
455 auto interceptor = c->mQueueInterceptor;
456 auto data = c->mQueueInterceptorData;
457 return interceptor(window, Surface::queueBufferInternal, data, buffer, fenceFd);
458 }
459 }
460 return c->queueBuffer(buffer, fenceFd);
461 }
462
queueBufferInternal(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)463 int Surface::queueBufferInternal(ANativeWindow* window, ANativeWindowBuffer* buffer, int fenceFd) {
464 Surface* c = getSelf(window);
465 return c->queueBuffer(buffer, fenceFd);
466 }
467
hook_dequeueBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer ** buffer)468 int Surface::hook_dequeueBuffer_DEPRECATED(ANativeWindow* window,
469 ANativeWindowBuffer** buffer) {
470 Surface* c = getSelf(window);
471 ANativeWindowBuffer* buf;
472 int fenceFd = -1;
473 int result = c->dequeueBuffer(&buf, &fenceFd);
474 if (result != OK) {
475 return result;
476 }
477 sp<Fence> fence(new Fence(fenceFd));
478 int waitResult = fence->waitForever("dequeueBuffer_DEPRECATED");
479 if (waitResult != OK) {
480 ALOGE("dequeueBuffer_DEPRECATED: Fence::wait returned an error: %d",
481 waitResult);
482 c->cancelBuffer(buf, -1);
483 return waitResult;
484 }
485 *buffer = buf;
486 return result;
487 }
488
hook_cancelBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)489 int Surface::hook_cancelBuffer_DEPRECATED(ANativeWindow* window,
490 ANativeWindowBuffer* buffer) {
491 Surface* c = getSelf(window);
492 return c->cancelBuffer(buffer, -1);
493 }
494
hook_lockBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)495 int Surface::hook_lockBuffer_DEPRECATED(ANativeWindow* window,
496 ANativeWindowBuffer* buffer) {
497 Surface* c = getSelf(window);
498 return c->lockBuffer_DEPRECATED(buffer);
499 }
500
hook_queueBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)501 int Surface::hook_queueBuffer_DEPRECATED(ANativeWindow* window,
502 ANativeWindowBuffer* buffer) {
503 Surface* c = getSelf(window);
504 return c->queueBuffer(buffer, -1);
505 }
506
hook_perform(ANativeWindow * window,int operation,...)507 int Surface::hook_perform(ANativeWindow* window, int operation, ...) {
508 va_list args;
509 va_start(args, operation);
510 Surface* c = getSelf(window);
511 int result;
512 // Don't acquire shared ownership of the interceptor mutex if we're going to
513 // do interceptor registration, as otherwise we'll deadlock on acquiring
514 // exclusive ownership.
515 if (!isInterceptorRegistrationOp(operation)) {
516 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
517 if (c->mPerformInterceptor != nullptr) {
518 result = c->mPerformInterceptor(window, Surface::performInternal,
519 c->mPerformInterceptorData, operation, args);
520 va_end(args);
521 return result;
522 }
523 }
524 result = c->perform(operation, args);
525 va_end(args);
526 return result;
527 }
528
performInternal(ANativeWindow * window,int operation,va_list args)529 int Surface::performInternal(ANativeWindow* window, int operation, va_list args) {
530 Surface* c = getSelf(window);
531 return c->perform(operation, args);
532 }
533
hook_query(const ANativeWindow * window,int what,int * value)534 int Surface::hook_query(const ANativeWindow* window, int what, int* value) {
535 const Surface* c = getSelf(window);
536 {
537 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
538 if (c->mQueryInterceptor != nullptr) {
539 auto interceptor = c->mQueryInterceptor;
540 auto data = c->mQueryInterceptorData;
541 return interceptor(window, Surface::queryInternal, data, what, value);
542 }
543 }
544 return c->query(what, value);
545 }
546
queryInternal(const ANativeWindow * window,int what,int * value)547 int Surface::queryInternal(const ANativeWindow* window, int what, int* value) {
548 const Surface* c = getSelf(window);
549 return c->query(what, value);
550 }
551
setSwapInterval(int interval)552 int Surface::setSwapInterval(int interval) {
553 ATRACE_CALL();
554 // EGL specification states:
555 // interval is silently clamped to minimum and maximum implementation
556 // dependent values before being stored.
557
558 if (interval < minSwapInterval)
559 interval = minSwapInterval;
560
561 if (interval > maxSwapInterval)
562 interval = maxSwapInterval;
563
564 const bool wasSwapIntervalZero = mSwapIntervalZero;
565 mSwapIntervalZero = (interval == 0);
566
567 if (mSwapIntervalZero != wasSwapIntervalZero) {
568 mGraphicBufferProducer->setAsyncMode(mSwapIntervalZero);
569 }
570
571 return NO_ERROR;
572 }
573
getDequeueBufferInputLocked(IGraphicBufferProducer::DequeueBufferInput * dequeueInput)574 void Surface::getDequeueBufferInputLocked(
575 IGraphicBufferProducer::DequeueBufferInput* dequeueInput) {
576 LOG_ALWAYS_FATAL_IF(dequeueInput == nullptr, "input is null");
577
578 dequeueInput->width = mReqWidth ? mReqWidth : mUserWidth;
579 dequeueInput->height = mReqHeight ? mReqHeight : mUserHeight;
580
581 dequeueInput->format = mReqFormat;
582 dequeueInput->usage = mReqUsage;
583
584 dequeueInput->getTimestamps = mEnableFrameTimestamps;
585 }
586
dequeueBuffer(android_native_buffer_t ** buffer,int * fenceFd)587 int Surface::dequeueBuffer(android_native_buffer_t** buffer, int* fenceFd) {
588 ATRACE_FORMAT("dequeueBuffer - %s", getDebugName());
589 ALOGV("Surface::dequeueBuffer");
590
591 IGraphicBufferProducer::DequeueBufferInput dqInput;
592 {
593 Mutex::Autolock lock(mMutex);
594 if (mReportRemovedBuffers) {
595 mRemovedBuffers.clear();
596 }
597
598 getDequeueBufferInputLocked(&dqInput);
599
600 if (mSharedBufferMode && mAutoRefresh && mSharedBufferSlot !=
601 BufferItem::INVALID_BUFFER_SLOT) {
602 sp<GraphicBuffer>& gbuf(mSlots[mSharedBufferSlot].buffer);
603 if (gbuf != nullptr) {
604 *buffer = gbuf.get();
605 *fenceFd = -1;
606 return OK;
607 }
608 }
609 } // Drop the lock so that we can still touch the Surface while blocking in IGBP::dequeueBuffer
610
611 int buf = -1;
612 sp<Fence> fence;
613 nsecs_t startTime = systemTime();
614
615 FrameEventHistoryDelta frameTimestamps;
616 status_t result = mGraphicBufferProducer->dequeueBuffer(&buf, &fence, dqInput.width,
617 dqInput.height, dqInput.format,
618 dqInput.usage, &mBufferAge,
619 dqInput.getTimestamps ?
620 &frameTimestamps : nullptr);
621 mLastDequeueDuration = systemTime() - startTime;
622
623 if (result < 0) {
624 ALOGV("dequeueBuffer: IGraphicBufferProducer::dequeueBuffer"
625 "(%d, %d, %d, %#" PRIx64 ") failed: %d",
626 dqInput.width, dqInput.height, dqInput.format, dqInput.usage, result);
627 return result;
628 }
629
630 if (buf < 0 || buf >= NUM_BUFFER_SLOTS) {
631 ALOGE("dequeueBuffer: IGraphicBufferProducer returned invalid slot number %d", buf);
632 android_errorWriteLog(0x534e4554, "36991414"); // SafetyNet logging
633 return FAILED_TRANSACTION;
634 }
635
636 Mutex::Autolock lock(mMutex);
637
638 // Write this while holding the mutex
639 mLastDequeueStartTime = startTime;
640
641 sp<GraphicBuffer>& gbuf(mSlots[buf].buffer);
642
643 // this should never happen
644 ALOGE_IF(fence == nullptr, "Surface::dequeueBuffer: received null Fence! buf=%d", buf);
645
646 if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
647 static gui::FenceMonitor hwcReleaseThread("HWC release");
648 hwcReleaseThread.queueFence(fence);
649 }
650
651 if (result & IGraphicBufferProducer::RELEASE_ALL_BUFFERS) {
652 freeAllBuffers();
653 }
654
655 if (dqInput.getTimestamps) {
656 mFrameEventHistory->applyDelta(frameTimestamps);
657 }
658
659 if ((result & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) || gbuf == nullptr) {
660 if (mReportRemovedBuffers && (gbuf != nullptr)) {
661 mRemovedBuffers.push_back(gbuf);
662 }
663 result = mGraphicBufferProducer->requestBuffer(buf, &gbuf);
664 if (result != NO_ERROR) {
665 ALOGE("dequeueBuffer: IGraphicBufferProducer::requestBuffer failed: %d", result);
666 mGraphicBufferProducer->cancelBuffer(buf, fence);
667 return result;
668 }
669 }
670
671 if (fence->isValid()) {
672 *fenceFd = fence->dup();
673 if (*fenceFd == -1) {
674 ALOGE("dequeueBuffer: error duping fence: %d", errno);
675 // dup() should never fail; something is badly wrong. Soldier on
676 // and hope for the best; the worst that should happen is some
677 // visible corruption that lasts until the next frame.
678 }
679 } else {
680 *fenceFd = -1;
681 }
682
683 *buffer = gbuf.get();
684
685 if (mSharedBufferMode && mAutoRefresh) {
686 mSharedBufferSlot = buf;
687 mSharedBufferHasBeenQueued = false;
688 } else if (mSharedBufferSlot == buf) {
689 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
690 mSharedBufferHasBeenQueued = false;
691 }
692
693 mDequeuedSlots.insert(buf);
694
695 return OK;
696 }
697
dequeueBuffers(std::vector<BatchBuffer> * buffers)698 int Surface::dequeueBuffers(std::vector<BatchBuffer>* buffers) {
699 using DequeueBufferInput = IGraphicBufferProducer::DequeueBufferInput;
700 using DequeueBufferOutput = IGraphicBufferProducer::DequeueBufferOutput;
701 using CancelBufferInput = IGraphicBufferProducer::CancelBufferInput;
702 using RequestBufferOutput = IGraphicBufferProducer::RequestBufferOutput;
703
704 ATRACE_CALL();
705 ALOGV("Surface::dequeueBuffers");
706
707 if (buffers->size() == 0) {
708 ALOGE("%s: must dequeue at least 1 buffer!", __FUNCTION__);
709 return BAD_VALUE;
710 }
711
712 if (mSharedBufferMode) {
713 ALOGE("%s: batch operation is not supported in shared buffer mode!",
714 __FUNCTION__);
715 return INVALID_OPERATION;
716 }
717
718 size_t numBufferRequested = buffers->size();
719 DequeueBufferInput input;
720
721 {
722 Mutex::Autolock lock(mMutex);
723 if (mReportRemovedBuffers) {
724 mRemovedBuffers.clear();
725 }
726
727 getDequeueBufferInputLocked(&input);
728 } // Drop the lock so that we can still touch the Surface while blocking in IGBP::dequeueBuffers
729
730 std::vector<DequeueBufferInput> dequeueInput(numBufferRequested, input);
731 std::vector<DequeueBufferOutput> dequeueOutput;
732
733 nsecs_t startTime = systemTime();
734
735 status_t result = mGraphicBufferProducer->dequeueBuffers(dequeueInput, &dequeueOutput);
736
737 mLastDequeueDuration = systemTime() - startTime;
738
739 if (result < 0) {
740 ALOGV("%s: IGraphicBufferProducer::dequeueBuffers"
741 "(%d, %d, %d, %#" PRIx64 ") failed: %d",
742 __FUNCTION__, input.width, input.height, input.format, input.usage, result);
743 return result;
744 }
745
746 std::vector<CancelBufferInput> cancelBufferInputs;
747 cancelBufferInputs.reserve(numBufferRequested);
748 std::vector<status_t> cancelBufferOutputs;
749 for (size_t i = 0; i < numBufferRequested; i++) {
750 if (dequeueOutput[i].result >= 0) {
751 CancelBufferInput& input = cancelBufferInputs.emplace_back();
752 input.slot = dequeueOutput[i].slot;
753 input.fence = dequeueOutput[i].fence;
754 }
755 }
756
757 for (const auto& output : dequeueOutput) {
758 if (output.result < 0) {
759 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
760 ALOGV("%s: IGraphicBufferProducer::dequeueBuffers"
761 "(%d, %d, %d, %#" PRIx64 ") failed: %d",
762 __FUNCTION__, input.width, input.height, input.format, input.usage,
763 output.result);
764 return output.result;
765 }
766
767 if (output.slot < 0 || output.slot >= NUM_BUFFER_SLOTS) {
768 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
769 ALOGE("%s: IGraphicBufferProducer returned invalid slot number %d",
770 __FUNCTION__, output.slot);
771 android_errorWriteLog(0x534e4554, "36991414"); // SafetyNet logging
772 return FAILED_TRANSACTION;
773 }
774
775 if (input.getTimestamps && !output.timestamps.has_value()) {
776 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
777 ALOGE("%s: no frame timestamp returns!", __FUNCTION__);
778 return FAILED_TRANSACTION;
779 }
780
781 // this should never happen
782 ALOGE_IF(output.fence == nullptr,
783 "%s: received null Fence! slot=%d", __FUNCTION__, output.slot);
784 }
785
786 Mutex::Autolock lock(mMutex);
787
788 // Write this while holding the mutex
789 mLastDequeueStartTime = startTime;
790
791 std::vector<int32_t> requestBufferSlots;
792 requestBufferSlots.reserve(numBufferRequested);
793 // handle release all buffers and request buffers
794 for (const auto& output : dequeueOutput) {
795 if (output.result & IGraphicBufferProducer::RELEASE_ALL_BUFFERS) {
796 ALOGV("%s: RELEASE_ALL_BUFFERS during batch operation", __FUNCTION__);
797 freeAllBuffers();
798 break;
799 }
800 }
801
802 for (const auto& output : dequeueOutput) {
803 // Collect slots that needs requesting buffer
804 sp<GraphicBuffer>& gbuf(mSlots[output.slot].buffer);
805 if ((result & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) || gbuf == nullptr) {
806 if (mReportRemovedBuffers && (gbuf != nullptr)) {
807 mRemovedBuffers.push_back(gbuf);
808 }
809 requestBufferSlots.push_back(output.slot);
810 }
811 }
812
813 // Batch request Buffer
814 std::vector<RequestBufferOutput> reqBufferOutput;
815 if (requestBufferSlots.size() > 0) {
816 result = mGraphicBufferProducer->requestBuffers(requestBufferSlots, &reqBufferOutput);
817 if (result != NO_ERROR) {
818 ALOGE("%s: IGraphicBufferProducer::requestBuffers failed: %d",
819 __FUNCTION__, result);
820 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
821 return result;
822 }
823
824 // Check if we have any single failure
825 for (size_t i = 0; i < requestBufferSlots.size(); i++) {
826 if (reqBufferOutput[i].result != OK) {
827 ALOGE("%s: IGraphicBufferProducer::requestBuffers failed at %zu-th buffer, slot %d",
828 __FUNCTION__, i, requestBufferSlots[i]);
829 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
830 return reqBufferOutput[i].result;
831 }
832 }
833
834 // Fill request buffer results to mSlots
835 for (size_t i = 0; i < requestBufferSlots.size(); i++) {
836 mSlots[requestBufferSlots[i]].buffer = reqBufferOutput[i].buffer;
837 }
838 }
839
840 for (size_t batchIdx = 0; batchIdx < numBufferRequested; batchIdx++) {
841 const auto& output = dequeueOutput[batchIdx];
842 int slot = output.slot;
843 sp<GraphicBuffer>& gbuf(mSlots[slot].buffer);
844
845 if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
846 static gui::FenceMonitor hwcReleaseThread("HWC release");
847 hwcReleaseThread.queueFence(output.fence);
848 }
849
850 if (input.getTimestamps) {
851 mFrameEventHistory->applyDelta(output.timestamps.value());
852 }
853
854 if (output.fence->isValid()) {
855 buffers->at(batchIdx).fenceFd = output.fence->dup();
856 if (buffers->at(batchIdx).fenceFd == -1) {
857 ALOGE("%s: error duping fence: %d", __FUNCTION__, errno);
858 // dup() should never fail; something is badly wrong. Soldier on
859 // and hope for the best; the worst that should happen is some
860 // visible corruption that lasts until the next frame.
861 }
862 } else {
863 buffers->at(batchIdx).fenceFd = -1;
864 }
865
866 buffers->at(batchIdx).buffer = gbuf.get();
867 mDequeuedSlots.insert(slot);
868 }
869 return OK;
870 }
871
cancelBuffer(android_native_buffer_t * buffer,int fenceFd)872 int Surface::cancelBuffer(android_native_buffer_t* buffer,
873 int fenceFd) {
874 ATRACE_CALL();
875 ALOGV("Surface::cancelBuffer");
876 Mutex::Autolock lock(mMutex);
877 int i = getSlotFromBufferLocked(buffer);
878 if (i < 0) {
879 if (fenceFd >= 0) {
880 close(fenceFd);
881 }
882 return i;
883 }
884 if (mSharedBufferSlot == i && mSharedBufferHasBeenQueued) {
885 if (fenceFd >= 0) {
886 close(fenceFd);
887 }
888 return OK;
889 }
890 sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE);
891 mGraphicBufferProducer->cancelBuffer(i, fence);
892
893 if (mSharedBufferMode && mAutoRefresh && mSharedBufferSlot == i) {
894 mSharedBufferHasBeenQueued = true;
895 }
896
897 mDequeuedSlots.erase(i);
898
899 return OK;
900 }
901
cancelBuffers(const std::vector<BatchBuffer> & buffers)902 int Surface::cancelBuffers(const std::vector<BatchBuffer>& buffers) {
903 using CancelBufferInput = IGraphicBufferProducer::CancelBufferInput;
904 ATRACE_CALL();
905 ALOGV("Surface::cancelBuffers");
906
907 if (mSharedBufferMode) {
908 ALOGE("%s: batch operation is not supported in shared buffer mode!",
909 __FUNCTION__);
910 return INVALID_OPERATION;
911 }
912
913 size_t numBuffers = buffers.size();
914 std::vector<CancelBufferInput> cancelBufferInputs(numBuffers);
915 std::vector<status_t> cancelBufferOutputs;
916 size_t numBuffersCancelled = 0;
917 int badSlotResult = 0;
918 for (size_t i = 0; i < numBuffers; i++) {
919 int slot = getSlotFromBufferLocked(buffers[i].buffer);
920 int fenceFd = buffers[i].fenceFd;
921 if (slot < 0) {
922 if (fenceFd >= 0) {
923 close(fenceFd);
924 }
925 ALOGE("%s: cannot find slot number for cancelled buffer", __FUNCTION__);
926 badSlotResult = slot;
927 } else {
928 sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE);
929 cancelBufferInputs[numBuffersCancelled].slot = slot;
930 cancelBufferInputs[numBuffersCancelled++].fence = fence;
931 }
932 }
933 cancelBufferInputs.resize(numBuffersCancelled);
934 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
935
936
937 for (size_t i = 0; i < numBuffersCancelled; i++) {
938 mDequeuedSlots.erase(cancelBufferInputs[i].slot);
939 }
940
941 if (badSlotResult != 0) {
942 return badSlotResult;
943 }
944 return OK;
945 }
946
getSlotFromBufferLocked(android_native_buffer_t * buffer) const947 int Surface::getSlotFromBufferLocked(
948 android_native_buffer_t* buffer) const {
949 if (buffer == nullptr) {
950 ALOGE("%s: input buffer is null!", __FUNCTION__);
951 return BAD_VALUE;
952 }
953
954 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
955 if (mSlots[i].buffer != nullptr &&
956 mSlots[i].buffer->handle == buffer->handle) {
957 return i;
958 }
959 }
960 ALOGE("%s: unknown buffer: %p", __FUNCTION__, buffer->handle);
961 return BAD_VALUE;
962 }
963
lockBuffer_DEPRECATED(android_native_buffer_t * buffer)964 int Surface::lockBuffer_DEPRECATED(android_native_buffer_t* buffer __attribute__((unused))) {
965 ALOGV("Surface::lockBuffer");
966 Mutex::Autolock lock(mMutex);
967 return OK;
968 }
969
getQueueBufferInputLocked(android_native_buffer_t * buffer,int fenceFd,nsecs_t timestamp,IGraphicBufferProducer::QueueBufferInput * out)970 void Surface::getQueueBufferInputLocked(android_native_buffer_t* buffer, int fenceFd,
971 nsecs_t timestamp, IGraphicBufferProducer::QueueBufferInput* out) {
972 bool isAutoTimestamp = false;
973
974 if (timestamp == NATIVE_WINDOW_TIMESTAMP_AUTO) {
975 timestamp = systemTime(SYSTEM_TIME_MONOTONIC);
976 isAutoTimestamp = true;
977 ALOGV("Surface::queueBuffer making up timestamp: %.2f ms",
978 timestamp / 1000000.0);
979 }
980
981 // Make sure the crop rectangle is entirely inside the buffer.
982 Rect crop(Rect::EMPTY_RECT);
983 mCrop.intersect(Rect(buffer->width, buffer->height), &crop);
984
985 sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE);
986 IGraphicBufferProducer::QueueBufferInput input(timestamp, isAutoTimestamp,
987 static_cast<android_dataspace>(mDataSpace), crop, mScalingMode,
988 mTransform ^ mStickyTransform, fence, mStickyTransform,
989 mEnableFrameTimestamps);
990
991 // we should send HDR metadata as needed if this becomes a bottleneck
992 input.setHdrMetadata(mHdrMetadata);
993
994 if (mConnectedToCpu || mDirtyRegion.bounds() == Rect::INVALID_RECT) {
995 input.setSurfaceDamage(Region::INVALID_REGION);
996 } else {
997 // Here we do two things:
998 // 1) The surface damage was specified using the OpenGL ES convention of
999 // the origin being in the bottom-left corner. Here we flip to the
1000 // convention that the rest of the system uses (top-left corner) by
1001 // subtracting all top/bottom coordinates from the buffer height.
1002 // 2) If the buffer is coming in rotated (for example, because the EGL
1003 // implementation is reacting to the transform hint coming back from
1004 // SurfaceFlinger), the surface damage needs to be rotated the
1005 // opposite direction, since it was generated assuming an unrotated
1006 // buffer (the app doesn't know that the EGL implementation is
1007 // reacting to the transform hint behind its back). The
1008 // transformations in the switch statement below apply those
1009 // complementary rotations (e.g., if 90 degrees, rotate 270 degrees).
1010
1011 int width = buffer->width;
1012 int height = buffer->height;
1013 bool rotated90 = (mTransform ^ mStickyTransform) &
1014 NATIVE_WINDOW_TRANSFORM_ROT_90;
1015 if (rotated90) {
1016 std::swap(width, height);
1017 }
1018
1019 Region flippedRegion;
1020 for (auto rect : mDirtyRegion) {
1021 int left = rect.left;
1022 int right = rect.right;
1023 int top = height - rect.bottom; // Flip from OpenGL convention
1024 int bottom = height - rect.top; // Flip from OpenGL convention
1025 switch (mTransform ^ mStickyTransform) {
1026 case NATIVE_WINDOW_TRANSFORM_ROT_90: {
1027 // Rotate 270 degrees
1028 Rect flippedRect{top, width - right, bottom, width - left};
1029 flippedRegion.orSelf(flippedRect);
1030 break;
1031 }
1032 case NATIVE_WINDOW_TRANSFORM_ROT_180: {
1033 // Rotate 180 degrees
1034 Rect flippedRect{width - right, height - bottom,
1035 width - left, height - top};
1036 flippedRegion.orSelf(flippedRect);
1037 break;
1038 }
1039 case NATIVE_WINDOW_TRANSFORM_ROT_270: {
1040 // Rotate 90 degrees
1041 Rect flippedRect{height - bottom, left,
1042 height - top, right};
1043 flippedRegion.orSelf(flippedRect);
1044 break;
1045 }
1046 default: {
1047 Rect flippedRect{left, top, right, bottom};
1048 flippedRegion.orSelf(flippedRect);
1049 break;
1050 }
1051 }
1052 }
1053
1054 input.setSurfaceDamage(flippedRegion);
1055 }
1056 *out = input;
1057 }
1058
applyGrallocMetadataLocked(android_native_buffer_t * buffer,const IGraphicBufferProducer::QueueBufferInput & queueBufferInput)1059 void Surface::applyGrallocMetadataLocked(
1060 android_native_buffer_t* buffer,
1061 const IGraphicBufferProducer::QueueBufferInput& queueBufferInput) {
1062 ATRACE_CALL();
1063 auto& mapper = GraphicBufferMapper::get();
1064 mapper.setDataspace(buffer->handle, static_cast<ui::Dataspace>(queueBufferInput.dataSpace));
1065 if (mHdrMetadataIsSet & HdrMetadata::SMPTE2086)
1066 mapper.setSmpte2086(buffer->handle, queueBufferInput.getHdrMetadata().getSmpte2086());
1067 if (mHdrMetadataIsSet & HdrMetadata::CTA861_3)
1068 mapper.setCta861_3(buffer->handle, queueBufferInput.getHdrMetadata().getCta8613());
1069 if (mHdrMetadataIsSet & HdrMetadata::HDR10PLUS)
1070 mapper.setSmpte2094_40(buffer->handle, queueBufferInput.getHdrMetadata().getHdr10Plus());
1071 }
1072
onBufferQueuedLocked(int slot,sp<Fence> fence,const IGraphicBufferProducer::QueueBufferOutput & output)1073 void Surface::onBufferQueuedLocked(int slot, sp<Fence> fence,
1074 const IGraphicBufferProducer::QueueBufferOutput& output) {
1075 mDequeuedSlots.erase(slot);
1076
1077 if (mEnableFrameTimestamps) {
1078 mFrameEventHistory->applyDelta(output.frameTimestamps);
1079 // Update timestamps with the local acquire fence.
1080 // The consumer doesn't send it back to prevent us from having two
1081 // file descriptors of the same fence.
1082 mFrameEventHistory->updateAcquireFence(mNextFrameNumber,
1083 std::make_shared<FenceTime>(fence));
1084
1085 // Cache timestamps of signaled fences so we can close their file
1086 // descriptors.
1087 mFrameEventHistory->updateSignalTimes();
1088 }
1089
1090 mLastFrameNumber = mNextFrameNumber;
1091
1092 mDefaultWidth = output.width;
1093 mDefaultHeight = output.height;
1094 mNextFrameNumber = output.nextFrameNumber;
1095
1096 // Ignore transform hint if sticky transform is set or transform to display inverse flag is
1097 // set.
1098 if (mStickyTransform == 0 && !transformToDisplayInverse()) {
1099 mTransformHint = output.transformHint;
1100 }
1101
1102 mConsumerRunningBehind = (output.numPendingBuffers >= 2);
1103
1104 if (!mConnectedToCpu) {
1105 // Clear surface damage back to full-buffer
1106 mDirtyRegion = Region::INVALID_REGION;
1107 }
1108
1109 if (mSharedBufferMode && mAutoRefresh && mSharedBufferSlot == slot) {
1110 mSharedBufferHasBeenQueued = true;
1111 }
1112
1113 mQueueBufferCondition.broadcast();
1114
1115 if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
1116 static gui::FenceMonitor gpuCompletionThread("GPU completion");
1117 gpuCompletionThread.queueFence(fence);
1118 }
1119 }
1120
queueBuffer(android_native_buffer_t * buffer,int fenceFd)1121 int Surface::queueBuffer(android_native_buffer_t* buffer, int fenceFd) {
1122 ATRACE_CALL();
1123 ALOGV("Surface::queueBuffer");
1124 Mutex::Autolock lock(mMutex);
1125
1126 int i = getSlotFromBufferLocked(buffer);
1127 if (i < 0) {
1128 if (fenceFd >= 0) {
1129 close(fenceFd);
1130 }
1131 return i;
1132 }
1133 if (mSharedBufferSlot == i && mSharedBufferHasBeenQueued) {
1134 if (fenceFd >= 0) {
1135 close(fenceFd);
1136 }
1137 return OK;
1138 }
1139
1140 IGraphicBufferProducer::QueueBufferOutput output;
1141 IGraphicBufferProducer::QueueBufferInput input;
1142 getQueueBufferInputLocked(buffer, fenceFd, mTimestamp, &input);
1143 applyGrallocMetadataLocked(buffer, input);
1144 sp<Fence> fence = input.fence;
1145
1146 nsecs_t now = systemTime();
1147
1148 status_t err = mGraphicBufferProducer->queueBuffer(i, input, &output);
1149 mLastQueueDuration = systemTime() - now;
1150 if (err != OK) {
1151 ALOGE("queueBuffer: error queuing buffer, %d", err);
1152 }
1153
1154 onBufferQueuedLocked(i, fence, output);
1155 return err;
1156 }
1157
queueBuffers(const std::vector<BatchQueuedBuffer> & buffers)1158 int Surface::queueBuffers(const std::vector<BatchQueuedBuffer>& buffers) {
1159 ATRACE_CALL();
1160 ALOGV("Surface::queueBuffers");
1161 Mutex::Autolock lock(mMutex);
1162
1163 if (mSharedBufferMode) {
1164 ALOGE("%s: batched operation is not supported in shared buffer mode", __FUNCTION__);
1165 return INVALID_OPERATION;
1166 }
1167
1168 size_t numBuffers = buffers.size();
1169 std::vector<IGraphicBufferProducer::QueueBufferInput> queueBufferInputs(numBuffers);
1170 std::vector<IGraphicBufferProducer::QueueBufferOutput> queueBufferOutputs;
1171 std::vector<int> bufferSlots(numBuffers, -1);
1172 std::vector<sp<Fence>> bufferFences(numBuffers);
1173
1174 for (size_t batchIdx = 0; batchIdx < numBuffers; batchIdx++) {
1175 int i = getSlotFromBufferLocked(buffers[batchIdx].buffer);
1176 if (i < 0) {
1177 if (buffers[batchIdx].fenceFd >= 0) {
1178 close(buffers[batchIdx].fenceFd);
1179 }
1180 return i;
1181 }
1182 bufferSlots[batchIdx] = i;
1183
1184 IGraphicBufferProducer::QueueBufferInput input;
1185 getQueueBufferInputLocked(
1186 buffers[batchIdx].buffer, buffers[batchIdx].fenceFd, buffers[batchIdx].timestamp,
1187 &input);
1188 bufferFences[batchIdx] = input.fence;
1189 queueBufferInputs[batchIdx] = input;
1190 }
1191
1192 nsecs_t now = systemTime();
1193 status_t err = mGraphicBufferProducer->queueBuffers(queueBufferInputs, &queueBufferOutputs);
1194 mLastQueueDuration = systemTime() - now;
1195 if (err != OK) {
1196 ALOGE("%s: error queuing buffer, %d", __FUNCTION__, err);
1197 }
1198
1199
1200 for (size_t batchIdx = 0; batchIdx < numBuffers; batchIdx++) {
1201 onBufferQueuedLocked(bufferSlots[batchIdx], bufferFences[batchIdx],
1202 queueBufferOutputs[batchIdx]);
1203 }
1204
1205 return err;
1206 }
1207
querySupportedTimestampsLocked() const1208 void Surface::querySupportedTimestampsLocked() const {
1209 // mMutex must be locked when calling this method.
1210
1211 if (mQueriedSupportedTimestamps) {
1212 return;
1213 }
1214 mQueriedSupportedTimestamps = true;
1215
1216 std::vector<FrameEvent> supportedFrameTimestamps;
1217 binder::Status status =
1218 composerServiceAIDL()->getSupportedFrameTimestamps(&supportedFrameTimestamps);
1219
1220 if (!status.isOk()) {
1221 return;
1222 }
1223
1224 for (auto sft : supportedFrameTimestamps) {
1225 if (sft == FrameEvent::DISPLAY_PRESENT) {
1226 mFrameTimestampsSupportsPresent = true;
1227 }
1228 }
1229 }
1230
query(int what,int * value) const1231 int Surface::query(int what, int* value) const {
1232 ATRACE_CALL();
1233 ALOGV("Surface::query");
1234 { // scope for the lock
1235 Mutex::Autolock lock(mMutex);
1236 switch (what) {
1237 case NATIVE_WINDOW_FORMAT:
1238 if (mReqFormat) {
1239 *value = static_cast<int>(mReqFormat);
1240 return NO_ERROR;
1241 }
1242 break;
1243 case NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER: {
1244 status_t err = mGraphicBufferProducer->query(what, value);
1245 if (err == NO_ERROR) {
1246 return NO_ERROR;
1247 }
1248 sp<gui::ISurfaceComposer> surfaceComposer = composerServiceAIDL();
1249 if (surfaceComposer == nullptr) {
1250 return -EPERM; // likely permissions error
1251 }
1252 // ISurfaceComposer no longer supports authenticateSurfaceTexture
1253 *value = 0;
1254 return NO_ERROR;
1255 }
1256 case NATIVE_WINDOW_CONCRETE_TYPE:
1257 *value = NATIVE_WINDOW_SURFACE;
1258 return NO_ERROR;
1259 case NATIVE_WINDOW_DEFAULT_WIDTH:
1260 *value = static_cast<int>(
1261 mUserWidth ? mUserWidth : mDefaultWidth);
1262 return NO_ERROR;
1263 case NATIVE_WINDOW_DEFAULT_HEIGHT:
1264 *value = static_cast<int>(
1265 mUserHeight ? mUserHeight : mDefaultHeight);
1266 return NO_ERROR;
1267 case NATIVE_WINDOW_TRANSFORM_HINT:
1268 *value = static_cast<int>(getTransformHint());
1269 return NO_ERROR;
1270 case NATIVE_WINDOW_CONSUMER_RUNNING_BEHIND: {
1271 status_t err = NO_ERROR;
1272 if (!mConsumerRunningBehind) {
1273 *value = 0;
1274 } else {
1275 err = mGraphicBufferProducer->query(what, value);
1276 if (err == NO_ERROR) {
1277 mConsumerRunningBehind = *value;
1278 }
1279 }
1280 return err;
1281 }
1282 case NATIVE_WINDOW_BUFFER_AGE: {
1283 if (mBufferAge > INT32_MAX) {
1284 *value = 0;
1285 } else {
1286 *value = static_cast<int32_t>(mBufferAge);
1287 }
1288 return NO_ERROR;
1289 }
1290 case NATIVE_WINDOW_LAST_DEQUEUE_DURATION: {
1291 int64_t durationUs = mLastDequeueDuration / 1000;
1292 *value = durationUs > std::numeric_limits<int>::max() ?
1293 std::numeric_limits<int>::max() :
1294 static_cast<int>(durationUs);
1295 return NO_ERROR;
1296 }
1297 case NATIVE_WINDOW_LAST_QUEUE_DURATION: {
1298 int64_t durationUs = mLastQueueDuration / 1000;
1299 *value = durationUs > std::numeric_limits<int>::max() ?
1300 std::numeric_limits<int>::max() :
1301 static_cast<int>(durationUs);
1302 return NO_ERROR;
1303 }
1304 case NATIVE_WINDOW_FRAME_TIMESTAMPS_SUPPORTS_PRESENT: {
1305 querySupportedTimestampsLocked();
1306 *value = mFrameTimestampsSupportsPresent ? 1 : 0;
1307 return NO_ERROR;
1308 }
1309 case NATIVE_WINDOW_IS_VALID: {
1310 *value = mGraphicBufferProducer != nullptr ? 1 : 0;
1311 return NO_ERROR;
1312 }
1313 case NATIVE_WINDOW_DATASPACE: {
1314 *value = static_cast<int>(mDataSpace);
1315 return NO_ERROR;
1316 }
1317 case NATIVE_WINDOW_MAX_BUFFER_COUNT: {
1318 *value = mMaxBufferCount;
1319 return NO_ERROR;
1320 }
1321 }
1322 }
1323 return mGraphicBufferProducer->query(what, value);
1324 }
1325
perform(int operation,va_list args)1326 int Surface::perform(int operation, va_list args)
1327 {
1328 int res = NO_ERROR;
1329 switch (operation) {
1330 case NATIVE_WINDOW_CONNECT:
1331 // deprecated. must return NO_ERROR.
1332 break;
1333 case NATIVE_WINDOW_DISCONNECT:
1334 // deprecated. must return NO_ERROR.
1335 break;
1336 case NATIVE_WINDOW_SET_USAGE:
1337 res = dispatchSetUsage(args);
1338 break;
1339 case NATIVE_WINDOW_SET_CROP:
1340 res = dispatchSetCrop(args);
1341 break;
1342 case NATIVE_WINDOW_SET_BUFFER_COUNT:
1343 res = dispatchSetBufferCount(args);
1344 break;
1345 case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
1346 res = dispatchSetBuffersGeometry(args);
1347 break;
1348 case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM:
1349 res = dispatchSetBuffersTransform(args);
1350 break;
1351 case NATIVE_WINDOW_SET_BUFFERS_STICKY_TRANSFORM:
1352 res = dispatchSetBuffersStickyTransform(args);
1353 break;
1354 case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
1355 res = dispatchSetBuffersTimestamp(args);
1356 break;
1357 case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS:
1358 res = dispatchSetBuffersDimensions(args);
1359 break;
1360 case NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS:
1361 res = dispatchSetBuffersUserDimensions(args);
1362 break;
1363 case NATIVE_WINDOW_SET_BUFFERS_FORMAT:
1364 res = dispatchSetBuffersFormat(args);
1365 break;
1366 case NATIVE_WINDOW_LOCK:
1367 res = dispatchLock(args);
1368 break;
1369 case NATIVE_WINDOW_UNLOCK_AND_POST:
1370 res = dispatchUnlockAndPost(args);
1371 break;
1372 case NATIVE_WINDOW_SET_SCALING_MODE:
1373 res = dispatchSetScalingMode(args);
1374 break;
1375 case NATIVE_WINDOW_API_CONNECT:
1376 res = dispatchConnect(args);
1377 break;
1378 case NATIVE_WINDOW_API_DISCONNECT:
1379 res = dispatchDisconnect(args);
1380 break;
1381 case NATIVE_WINDOW_SET_SIDEBAND_STREAM:
1382 res = dispatchSetSidebandStream(args);
1383 break;
1384 case NATIVE_WINDOW_SET_BUFFERS_DATASPACE:
1385 res = dispatchSetBuffersDataSpace(args);
1386 break;
1387 case NATIVE_WINDOW_SET_BUFFERS_SMPTE2086_METADATA:
1388 res = dispatchSetBuffersSmpte2086Metadata(args);
1389 break;
1390 case NATIVE_WINDOW_SET_BUFFERS_CTA861_3_METADATA:
1391 res = dispatchSetBuffersCta8613Metadata(args);
1392 break;
1393 case NATIVE_WINDOW_SET_BUFFERS_HDR10_PLUS_METADATA:
1394 res = dispatchSetBuffersHdr10PlusMetadata(args);
1395 break;
1396 case NATIVE_WINDOW_SET_SURFACE_DAMAGE:
1397 res = dispatchSetSurfaceDamage(args);
1398 break;
1399 case NATIVE_WINDOW_SET_SHARED_BUFFER_MODE:
1400 res = dispatchSetSharedBufferMode(args);
1401 break;
1402 case NATIVE_WINDOW_SET_AUTO_REFRESH:
1403 res = dispatchSetAutoRefresh(args);
1404 break;
1405 case NATIVE_WINDOW_GET_REFRESH_CYCLE_DURATION:
1406 res = dispatchGetDisplayRefreshCycleDuration(args);
1407 break;
1408 case NATIVE_WINDOW_GET_NEXT_FRAME_ID:
1409 res = dispatchGetNextFrameId(args);
1410 break;
1411 case NATIVE_WINDOW_ENABLE_FRAME_TIMESTAMPS:
1412 res = dispatchEnableFrameTimestamps(args);
1413 break;
1414 case NATIVE_WINDOW_GET_COMPOSITOR_TIMING:
1415 res = dispatchGetCompositorTiming(args);
1416 break;
1417 case NATIVE_WINDOW_GET_FRAME_TIMESTAMPS:
1418 res = dispatchGetFrameTimestamps(args);
1419 break;
1420 case NATIVE_WINDOW_GET_WIDE_COLOR_SUPPORT:
1421 res = dispatchGetWideColorSupport(args);
1422 break;
1423 case NATIVE_WINDOW_GET_HDR_SUPPORT:
1424 res = dispatchGetHdrSupport(args);
1425 break;
1426 case NATIVE_WINDOW_SET_USAGE64:
1427 res = dispatchSetUsage64(args);
1428 break;
1429 case NATIVE_WINDOW_GET_CONSUMER_USAGE64:
1430 res = dispatchGetConsumerUsage64(args);
1431 break;
1432 case NATIVE_WINDOW_SET_AUTO_PREROTATION:
1433 res = dispatchSetAutoPrerotation(args);
1434 break;
1435 case NATIVE_WINDOW_GET_LAST_DEQUEUE_START:
1436 res = dispatchGetLastDequeueStartTime(args);
1437 break;
1438 case NATIVE_WINDOW_SET_DEQUEUE_TIMEOUT:
1439 res = dispatchSetDequeueTimeout(args);
1440 break;
1441 case NATIVE_WINDOW_GET_LAST_DEQUEUE_DURATION:
1442 res = dispatchGetLastDequeueDuration(args);
1443 break;
1444 case NATIVE_WINDOW_GET_LAST_QUEUE_DURATION:
1445 res = dispatchGetLastQueueDuration(args);
1446 break;
1447 case NATIVE_WINDOW_SET_FRAME_RATE:
1448 res = dispatchSetFrameRate(args);
1449 break;
1450 case NATIVE_WINDOW_SET_CANCEL_INTERCEPTOR:
1451 res = dispatchAddCancelInterceptor(args);
1452 break;
1453 case NATIVE_WINDOW_SET_DEQUEUE_INTERCEPTOR:
1454 res = dispatchAddDequeueInterceptor(args);
1455 break;
1456 case NATIVE_WINDOW_SET_PERFORM_INTERCEPTOR:
1457 res = dispatchAddPerformInterceptor(args);
1458 break;
1459 case NATIVE_WINDOW_SET_QUEUE_INTERCEPTOR:
1460 res = dispatchAddQueueInterceptor(args);
1461 break;
1462 case NATIVE_WINDOW_SET_QUERY_INTERCEPTOR:
1463 res = dispatchAddQueryInterceptor(args);
1464 break;
1465 case NATIVE_WINDOW_ALLOCATE_BUFFERS:
1466 allocateBuffers();
1467 res = NO_ERROR;
1468 break;
1469 case NATIVE_WINDOW_GET_LAST_QUEUED_BUFFER:
1470 res = dispatchGetLastQueuedBuffer(args);
1471 break;
1472 case NATIVE_WINDOW_GET_LAST_QUEUED_BUFFER2:
1473 res = dispatchGetLastQueuedBuffer2(args);
1474 break;
1475 case NATIVE_WINDOW_SET_FRAME_TIMELINE_INFO:
1476 res = dispatchSetFrameTimelineInfo(args);
1477 break;
1478 case NATIVE_WINDOW_SET_BUFFERS_ADDITIONAL_OPTIONS:
1479 res = dispatchSetAdditionalOptions(args);
1480 break;
1481 default:
1482 res = NAME_NOT_FOUND;
1483 break;
1484 }
1485 return res;
1486 }
1487
dispatchConnect(va_list args)1488 int Surface::dispatchConnect(va_list args) {
1489 int api = va_arg(args, int);
1490 return connect(api);
1491 }
1492
dispatchDisconnect(va_list args)1493 int Surface::dispatchDisconnect(va_list args) {
1494 int api = va_arg(args, int);
1495 return disconnect(api);
1496 }
1497
dispatchSetUsage(va_list args)1498 int Surface::dispatchSetUsage(va_list args) {
1499 uint64_t usage = va_arg(args, uint32_t);
1500 return setUsage(usage);
1501 }
1502
dispatchSetUsage64(va_list args)1503 int Surface::dispatchSetUsage64(va_list args) {
1504 uint64_t usage = va_arg(args, uint64_t);
1505 return setUsage(usage);
1506 }
1507
dispatchSetCrop(va_list args)1508 int Surface::dispatchSetCrop(va_list args) {
1509 android_native_rect_t const* rect = va_arg(args, android_native_rect_t*);
1510 return setCrop(reinterpret_cast<Rect const*>(rect));
1511 }
1512
dispatchSetBufferCount(va_list args)1513 int Surface::dispatchSetBufferCount(va_list args) {
1514 size_t bufferCount = va_arg(args, size_t);
1515 return setBufferCount(static_cast<int32_t>(bufferCount));
1516 }
1517
dispatchSetBuffersGeometry(va_list args)1518 int Surface::dispatchSetBuffersGeometry(va_list args) {
1519 uint32_t width = va_arg(args, uint32_t);
1520 uint32_t height = va_arg(args, uint32_t);
1521 PixelFormat format = va_arg(args, PixelFormat);
1522 int err = setBuffersDimensions(width, height);
1523 if (err != 0) {
1524 return err;
1525 }
1526 return setBuffersFormat(format);
1527 }
1528
dispatchSetBuffersDimensions(va_list args)1529 int Surface::dispatchSetBuffersDimensions(va_list args) {
1530 uint32_t width = va_arg(args, uint32_t);
1531 uint32_t height = va_arg(args, uint32_t);
1532 return setBuffersDimensions(width, height);
1533 }
1534
dispatchSetBuffersUserDimensions(va_list args)1535 int Surface::dispatchSetBuffersUserDimensions(va_list args) {
1536 uint32_t width = va_arg(args, uint32_t);
1537 uint32_t height = va_arg(args, uint32_t);
1538 return setBuffersUserDimensions(width, height);
1539 }
1540
dispatchSetBuffersFormat(va_list args)1541 int Surface::dispatchSetBuffersFormat(va_list args) {
1542 PixelFormat format = va_arg(args, PixelFormat);
1543 return setBuffersFormat(format);
1544 }
1545
dispatchSetScalingMode(va_list args)1546 int Surface::dispatchSetScalingMode(va_list args) {
1547 int mode = va_arg(args, int);
1548 return setScalingMode(mode);
1549 }
1550
dispatchSetBuffersTransform(va_list args)1551 int Surface::dispatchSetBuffersTransform(va_list args) {
1552 uint32_t transform = va_arg(args, uint32_t);
1553 return setBuffersTransform(transform);
1554 }
1555
dispatchSetBuffersStickyTransform(va_list args)1556 int Surface::dispatchSetBuffersStickyTransform(va_list args) {
1557 uint32_t transform = va_arg(args, uint32_t);
1558 return setBuffersStickyTransform(transform);
1559 }
1560
dispatchSetBuffersTimestamp(va_list args)1561 int Surface::dispatchSetBuffersTimestamp(va_list args) {
1562 int64_t timestamp = va_arg(args, int64_t);
1563 return setBuffersTimestamp(timestamp);
1564 }
1565
dispatchLock(va_list args)1566 int Surface::dispatchLock(va_list args) {
1567 ANativeWindow_Buffer* outBuffer = va_arg(args, ANativeWindow_Buffer*);
1568 ARect* inOutDirtyBounds = va_arg(args, ARect*);
1569 return lock(outBuffer, inOutDirtyBounds);
1570 }
1571
dispatchUnlockAndPost(va_list args)1572 int Surface::dispatchUnlockAndPost(va_list args __attribute__((unused))) {
1573 return unlockAndPost();
1574 }
1575
dispatchSetSidebandStream(va_list args)1576 int Surface::dispatchSetSidebandStream(va_list args) {
1577 native_handle_t* sH = va_arg(args, native_handle_t*);
1578 sp<NativeHandle> sidebandHandle = NativeHandle::create(sH, false);
1579 setSidebandStream(sidebandHandle);
1580 return OK;
1581 }
1582
dispatchSetBuffersDataSpace(va_list args)1583 int Surface::dispatchSetBuffersDataSpace(va_list args) {
1584 Dataspace dataspace = static_cast<Dataspace>(va_arg(args, int));
1585 return setBuffersDataSpace(dataspace);
1586 }
1587
dispatchSetBuffersSmpte2086Metadata(va_list args)1588 int Surface::dispatchSetBuffersSmpte2086Metadata(va_list args) {
1589 const android_smpte2086_metadata* metadata =
1590 va_arg(args, const android_smpte2086_metadata*);
1591 return setBuffersSmpte2086Metadata(metadata);
1592 }
1593
dispatchSetBuffersCta8613Metadata(va_list args)1594 int Surface::dispatchSetBuffersCta8613Metadata(va_list args) {
1595 const android_cta861_3_metadata* metadata =
1596 va_arg(args, const android_cta861_3_metadata*);
1597 return setBuffersCta8613Metadata(metadata);
1598 }
1599
dispatchSetBuffersHdr10PlusMetadata(va_list args)1600 int Surface::dispatchSetBuffersHdr10PlusMetadata(va_list args) {
1601 const size_t size = va_arg(args, size_t);
1602 const uint8_t* metadata = va_arg(args, const uint8_t*);
1603 return setBuffersHdr10PlusMetadata(size, metadata);
1604 }
1605
dispatchSetSurfaceDamage(va_list args)1606 int Surface::dispatchSetSurfaceDamage(va_list args) {
1607 android_native_rect_t* rects = va_arg(args, android_native_rect_t*);
1608 size_t numRects = va_arg(args, size_t);
1609 setSurfaceDamage(rects, numRects);
1610 return NO_ERROR;
1611 }
1612
dispatchSetSharedBufferMode(va_list args)1613 int Surface::dispatchSetSharedBufferMode(va_list args) {
1614 bool sharedBufferMode = va_arg(args, int);
1615 return setSharedBufferMode(sharedBufferMode);
1616 }
1617
dispatchSetAutoRefresh(va_list args)1618 int Surface::dispatchSetAutoRefresh(va_list args) {
1619 bool autoRefresh = va_arg(args, int);
1620 return setAutoRefresh(autoRefresh);
1621 }
1622
dispatchGetDisplayRefreshCycleDuration(va_list args)1623 int Surface::dispatchGetDisplayRefreshCycleDuration(va_list args) {
1624 nsecs_t* outRefreshDuration = va_arg(args, int64_t*);
1625 return getDisplayRefreshCycleDuration(outRefreshDuration);
1626 }
1627
dispatchGetNextFrameId(va_list args)1628 int Surface::dispatchGetNextFrameId(va_list args) {
1629 uint64_t* nextFrameId = va_arg(args, uint64_t*);
1630 *nextFrameId = getNextFrameNumber();
1631 return NO_ERROR;
1632 }
1633
dispatchEnableFrameTimestamps(va_list args)1634 int Surface::dispatchEnableFrameTimestamps(va_list args) {
1635 bool enable = va_arg(args, int);
1636 enableFrameTimestamps(enable);
1637 return NO_ERROR;
1638 }
1639
dispatchGetCompositorTiming(va_list args)1640 int Surface::dispatchGetCompositorTiming(va_list args) {
1641 nsecs_t* compositeDeadline = va_arg(args, int64_t*);
1642 nsecs_t* compositeInterval = va_arg(args, int64_t*);
1643 nsecs_t* compositeToPresentLatency = va_arg(args, int64_t*);
1644 return getCompositorTiming(compositeDeadline, compositeInterval,
1645 compositeToPresentLatency);
1646 }
1647
dispatchGetFrameTimestamps(va_list args)1648 int Surface::dispatchGetFrameTimestamps(va_list args) {
1649 uint64_t frameId = va_arg(args, uint64_t);
1650 nsecs_t* outRequestedPresentTime = va_arg(args, int64_t*);
1651 nsecs_t* outAcquireTime = va_arg(args, int64_t*);
1652 nsecs_t* outLatchTime = va_arg(args, int64_t*);
1653 nsecs_t* outFirstRefreshStartTime = va_arg(args, int64_t*);
1654 nsecs_t* outLastRefreshStartTime = va_arg(args, int64_t*);
1655 nsecs_t* outGpuCompositionDoneTime = va_arg(args, int64_t*);
1656 nsecs_t* outDisplayPresentTime = va_arg(args, int64_t*);
1657 nsecs_t* outDequeueReadyTime = va_arg(args, int64_t*);
1658 nsecs_t* outReleaseTime = va_arg(args, int64_t*);
1659 return getFrameTimestamps(frameId,
1660 outRequestedPresentTime, outAcquireTime, outLatchTime,
1661 outFirstRefreshStartTime, outLastRefreshStartTime,
1662 outGpuCompositionDoneTime, outDisplayPresentTime,
1663 outDequeueReadyTime, outReleaseTime);
1664 }
1665
dispatchGetWideColorSupport(va_list args)1666 int Surface::dispatchGetWideColorSupport(va_list args) {
1667 bool* outSupport = va_arg(args, bool*);
1668 return getWideColorSupport(outSupport);
1669 }
1670
dispatchGetHdrSupport(va_list args)1671 int Surface::dispatchGetHdrSupport(va_list args) {
1672 bool* outSupport = va_arg(args, bool*);
1673 return getHdrSupport(outSupport);
1674 }
1675
dispatchGetConsumerUsage64(va_list args)1676 int Surface::dispatchGetConsumerUsage64(va_list args) {
1677 uint64_t* usage = va_arg(args, uint64_t*);
1678 return getConsumerUsage(usage);
1679 }
1680
dispatchSetAutoPrerotation(va_list args)1681 int Surface::dispatchSetAutoPrerotation(va_list args) {
1682 bool autoPrerotation = va_arg(args, int);
1683 return setAutoPrerotation(autoPrerotation);
1684 }
1685
dispatchGetLastDequeueStartTime(va_list args)1686 int Surface::dispatchGetLastDequeueStartTime(va_list args) {
1687 int64_t* lastDequeueStartTime = va_arg(args, int64_t*);
1688 *lastDequeueStartTime = mLastDequeueStartTime;
1689 return NO_ERROR;
1690 }
1691
dispatchSetDequeueTimeout(va_list args)1692 int Surface::dispatchSetDequeueTimeout(va_list args) {
1693 nsecs_t timeout = va_arg(args, int64_t);
1694 return setDequeueTimeout(timeout);
1695 }
1696
dispatchGetLastDequeueDuration(va_list args)1697 int Surface::dispatchGetLastDequeueDuration(va_list args) {
1698 int64_t* lastDequeueDuration = va_arg(args, int64_t*);
1699 *lastDequeueDuration = mLastDequeueDuration;
1700 return NO_ERROR;
1701 }
1702
dispatchGetLastQueueDuration(va_list args)1703 int Surface::dispatchGetLastQueueDuration(va_list args) {
1704 int64_t* lastQueueDuration = va_arg(args, int64_t*);
1705 *lastQueueDuration = mLastQueueDuration;
1706 return NO_ERROR;
1707 }
1708
dispatchSetFrameRate(va_list args)1709 int Surface::dispatchSetFrameRate(va_list args) {
1710 float frameRate = static_cast<float>(va_arg(args, double));
1711 int8_t compatibility = static_cast<int8_t>(va_arg(args, int));
1712 int8_t changeFrameRateStrategy = static_cast<int8_t>(va_arg(args, int));
1713 return setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
1714 }
1715
dispatchAddCancelInterceptor(va_list args)1716 int Surface::dispatchAddCancelInterceptor(va_list args) {
1717 ANativeWindow_cancelBufferInterceptor interceptor =
1718 va_arg(args, ANativeWindow_cancelBufferInterceptor);
1719 void* data = va_arg(args, void*);
1720 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1721 mCancelInterceptor = interceptor;
1722 mCancelInterceptorData = data;
1723 return NO_ERROR;
1724 }
1725
dispatchAddDequeueInterceptor(va_list args)1726 int Surface::dispatchAddDequeueInterceptor(va_list args) {
1727 ANativeWindow_dequeueBufferInterceptor interceptor =
1728 va_arg(args, ANativeWindow_dequeueBufferInterceptor);
1729 void* data = va_arg(args, void*);
1730 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1731 mDequeueInterceptor = interceptor;
1732 mDequeueInterceptorData = data;
1733 return NO_ERROR;
1734 }
1735
dispatchAddPerformInterceptor(va_list args)1736 int Surface::dispatchAddPerformInterceptor(va_list args) {
1737 ANativeWindow_performInterceptor interceptor = va_arg(args, ANativeWindow_performInterceptor);
1738 void* data = va_arg(args, void*);
1739 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1740 mPerformInterceptor = interceptor;
1741 mPerformInterceptorData = data;
1742 return NO_ERROR;
1743 }
1744
dispatchAddQueueInterceptor(va_list args)1745 int Surface::dispatchAddQueueInterceptor(va_list args) {
1746 ANativeWindow_queueBufferInterceptor interceptor =
1747 va_arg(args, ANativeWindow_queueBufferInterceptor);
1748 void* data = va_arg(args, void*);
1749 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1750 mQueueInterceptor = interceptor;
1751 mQueueInterceptorData = data;
1752 return NO_ERROR;
1753 }
1754
dispatchAddQueryInterceptor(va_list args)1755 int Surface::dispatchAddQueryInterceptor(va_list args) {
1756 ANativeWindow_queryInterceptor interceptor = va_arg(args, ANativeWindow_queryInterceptor);
1757 void* data = va_arg(args, void*);
1758 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1759 mQueryInterceptor = interceptor;
1760 mQueryInterceptorData = data;
1761 return NO_ERROR;
1762 }
1763
dispatchGetLastQueuedBuffer(va_list args)1764 int Surface::dispatchGetLastQueuedBuffer(va_list args) {
1765 AHardwareBuffer** buffer = va_arg(args, AHardwareBuffer**);
1766 int* fence = va_arg(args, int*);
1767 float* matrix = va_arg(args, float*);
1768 sp<GraphicBuffer> graphicBuffer;
1769 sp<Fence> spFence;
1770
1771 int result = mGraphicBufferProducer->getLastQueuedBuffer(&graphicBuffer, &spFence, matrix);
1772
1773 if (graphicBuffer != nullptr) {
1774 *buffer = graphicBuffer->toAHardwareBuffer();
1775 AHardwareBuffer_acquire(*buffer);
1776 } else {
1777 *buffer = nullptr;
1778 }
1779
1780 if (spFence != nullptr) {
1781 *fence = spFence->dup();
1782 } else {
1783 *fence = -1;
1784 }
1785 return result;
1786 }
1787
dispatchGetLastQueuedBuffer2(va_list args)1788 int Surface::dispatchGetLastQueuedBuffer2(va_list args) {
1789 AHardwareBuffer** buffer = va_arg(args, AHardwareBuffer**);
1790 int* fence = va_arg(args, int*);
1791 ARect* crop = va_arg(args, ARect*);
1792 uint32_t* transform = va_arg(args, uint32_t*);
1793 sp<GraphicBuffer> graphicBuffer;
1794 sp<Fence> spFence;
1795
1796 Rect r;
1797 int result =
1798 mGraphicBufferProducer->getLastQueuedBuffer(&graphicBuffer, &spFence, &r, transform);
1799
1800 if (graphicBuffer != nullptr) {
1801 *buffer = graphicBuffer->toAHardwareBuffer();
1802 AHardwareBuffer_acquire(*buffer);
1803
1804 // Avoid setting crop* unless buffer is valid (matches IGBP behavior)
1805 crop->left = r.left;
1806 crop->top = r.top;
1807 crop->right = r.right;
1808 crop->bottom = r.bottom;
1809 } else {
1810 *buffer = nullptr;
1811 }
1812
1813 if (spFence != nullptr) {
1814 *fence = spFence->dup();
1815 } else {
1816 *fence = -1;
1817 }
1818 return result;
1819 }
1820
dispatchSetFrameTimelineInfo(va_list args)1821 int Surface::dispatchSetFrameTimelineInfo(va_list args) {
1822 ATRACE_CALL();
1823 ALOGV("Surface::%s", __func__);
1824
1825 const auto nativeWindowFtlInfo = static_cast<ANativeWindowFrameTimelineInfo>(
1826 va_arg(args, ANativeWindowFrameTimelineInfo));
1827
1828 FrameTimelineInfo ftlInfo;
1829 ftlInfo.vsyncId = nativeWindowFtlInfo.frameTimelineVsyncId;
1830 ftlInfo.inputEventId = nativeWindowFtlInfo.inputEventId;
1831 ftlInfo.startTimeNanos = nativeWindowFtlInfo.startTimeNanos;
1832 ftlInfo.useForRefreshRateSelection = nativeWindowFtlInfo.useForRefreshRateSelection;
1833 ftlInfo.skippedFrameVsyncId = nativeWindowFtlInfo.skippedFrameVsyncId;
1834 ftlInfo.skippedFrameStartTimeNanos = nativeWindowFtlInfo.skippedFrameStartTimeNanos;
1835
1836 return setFrameTimelineInfo(nativeWindowFtlInfo.frameNumber, ftlInfo);
1837 }
1838
dispatchSetAdditionalOptions(va_list args)1839 int Surface::dispatchSetAdditionalOptions(va_list args) {
1840 ATRACE_CALL();
1841
1842 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_EXTENDEDALLOCATE)
1843 const AHardwareBufferLongOptions* opts = va_arg(args, const AHardwareBufferLongOptions*);
1844 const size_t optsSize = va_arg(args, size_t);
1845 std::vector<gui::AdditionalOptions> convertedOpts;
1846 convertedOpts.reserve(optsSize);
1847 for (size_t i = 0; i < optsSize; i++) {
1848 convertedOpts.emplace_back(opts[i].name, opts[i].value);
1849 }
1850 return setAdditionalOptions(convertedOpts);
1851 #else
1852 (void)args;
1853 return INVALID_OPERATION;
1854 #endif
1855 }
1856
transformToDisplayInverse() const1857 bool Surface::transformToDisplayInverse() const {
1858 return (mTransform & NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY) ==
1859 NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
1860 }
1861
connect(int api)1862 int Surface::connect(int api) {
1863 static sp<IProducerListener> listener = new StubProducerListener();
1864 return connect(api, listener);
1865 }
1866
connect(int api,const sp<IProducerListener> & listener)1867 int Surface::connect(int api, const sp<IProducerListener>& listener) {
1868 return connect(api, listener, false);
1869 }
1870
connect(int api,bool reportBufferRemoval,const sp<SurfaceListener> & sListener)1871 int Surface::connect(
1872 int api, bool reportBufferRemoval, const sp<SurfaceListener>& sListener) {
1873 if (sListener != nullptr) {
1874 mListenerProxy = new ProducerListenerProxy(this, sListener);
1875 }
1876 return connect(api, mListenerProxy, reportBufferRemoval);
1877 }
1878
connect(int api,const sp<IProducerListener> & listener,bool reportBufferRemoval)1879 int Surface::connect(
1880 int api, const sp<IProducerListener>& listener, bool reportBufferRemoval) {
1881 ATRACE_CALL();
1882 ALOGV("Surface::connect");
1883 Mutex::Autolock lock(mMutex);
1884 IGraphicBufferProducer::QueueBufferOutput output;
1885 mReportRemovedBuffers = reportBufferRemoval;
1886 int err = mGraphicBufferProducer->connect(listener, api, mProducerControlledByApp, &output);
1887 if (err == NO_ERROR) {
1888 mDefaultWidth = output.width;
1889 mDefaultHeight = output.height;
1890 mNextFrameNumber = output.nextFrameNumber;
1891 mMaxBufferCount = output.maxBufferCount;
1892
1893 // Ignore transform hint if sticky transform is set or transform to display inverse flag is
1894 // set. Transform hint should be ignored if the client is expected to always submit buffers
1895 // in the same orientation.
1896 if (mStickyTransform == 0 && !transformToDisplayInverse()) {
1897 mTransformHint = output.transformHint;
1898 }
1899
1900 mConsumerRunningBehind = (output.numPendingBuffers >= 2);
1901 }
1902 if (!err && api == NATIVE_WINDOW_API_CPU) {
1903 mConnectedToCpu = true;
1904 // Clear the dirty region in case we're switching from a non-CPU API
1905 mDirtyRegion.clear();
1906 } else if (!err) {
1907 // Initialize the dirty region for tracking surface damage
1908 mDirtyRegion = Region::INVALID_REGION;
1909 }
1910
1911 return err;
1912 }
1913
1914
disconnect(int api,IGraphicBufferProducer::DisconnectMode mode)1915 int Surface::disconnect(int api, IGraphicBufferProducer::DisconnectMode mode) {
1916 ATRACE_CALL();
1917 ALOGV("Surface::disconnect");
1918 Mutex::Autolock lock(mMutex);
1919 mRemovedBuffers.clear();
1920 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
1921 mSharedBufferHasBeenQueued = false;
1922 freeAllBuffers();
1923 int err = mGraphicBufferProducer->disconnect(api, mode);
1924 if (!err) {
1925 mReqFormat = 0;
1926 mReqWidth = 0;
1927 mReqHeight = 0;
1928 mReqUsage = 0;
1929 mCrop.clear();
1930 mDataSpace = Dataspace::UNKNOWN;
1931 mScalingMode = NATIVE_WINDOW_SCALING_MODE_FREEZE;
1932 mTransform = 0;
1933 mStickyTransform = 0;
1934 mAutoPrerotation = false;
1935 mEnableFrameTimestamps = false;
1936 mMaxBufferCount = NUM_BUFFER_SLOTS;
1937
1938 if (api == NATIVE_WINDOW_API_CPU) {
1939 mConnectedToCpu = false;
1940 }
1941 }
1942 return err;
1943 }
1944
detachNextBuffer(sp<GraphicBuffer> * outBuffer,sp<Fence> * outFence)1945 int Surface::detachNextBuffer(sp<GraphicBuffer>* outBuffer,
1946 sp<Fence>* outFence) {
1947 ATRACE_CALL();
1948 ALOGV("Surface::detachNextBuffer");
1949
1950 if (outBuffer == nullptr || outFence == nullptr) {
1951 return BAD_VALUE;
1952 }
1953
1954 Mutex::Autolock lock(mMutex);
1955 if (mReportRemovedBuffers) {
1956 mRemovedBuffers.clear();
1957 }
1958
1959 sp<GraphicBuffer> buffer(nullptr);
1960 sp<Fence> fence(nullptr);
1961 status_t result = mGraphicBufferProducer->detachNextBuffer(
1962 &buffer, &fence);
1963 if (result != NO_ERROR) {
1964 return result;
1965 }
1966
1967 *outBuffer = buffer;
1968 if (fence != nullptr && fence->isValid()) {
1969 *outFence = fence;
1970 } else {
1971 *outFence = Fence::NO_FENCE;
1972 }
1973
1974 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
1975 if (mSlots[i].buffer != nullptr &&
1976 mSlots[i].buffer->getId() == buffer->getId()) {
1977 if (mReportRemovedBuffers) {
1978 mRemovedBuffers.push_back(mSlots[i].buffer);
1979 }
1980 mSlots[i].buffer = nullptr;
1981 }
1982 }
1983
1984 return NO_ERROR;
1985 }
1986
attachBuffer(ANativeWindowBuffer * buffer)1987 int Surface::attachBuffer(ANativeWindowBuffer* buffer)
1988 {
1989 ATRACE_CALL();
1990 ALOGV("Surface::attachBuffer");
1991
1992 Mutex::Autolock lock(mMutex);
1993 if (mReportRemovedBuffers) {
1994 mRemovedBuffers.clear();
1995 }
1996
1997 sp<GraphicBuffer> graphicBuffer(static_cast<GraphicBuffer*>(buffer));
1998 uint32_t priorGeneration = graphicBuffer->mGenerationNumber;
1999 graphicBuffer->mGenerationNumber = mGenerationNumber;
2000 int32_t attachedSlot = -1;
2001 status_t result = mGraphicBufferProducer->attachBuffer(&attachedSlot, graphicBuffer);
2002 if (result != NO_ERROR) {
2003 ALOGE("attachBuffer: IGraphicBufferProducer call failed (%d)", result);
2004 graphicBuffer->mGenerationNumber = priorGeneration;
2005 return result;
2006 }
2007 if (mReportRemovedBuffers && (mSlots[attachedSlot].buffer != nullptr)) {
2008 mRemovedBuffers.push_back(mSlots[attachedSlot].buffer);
2009 }
2010 mSlots[attachedSlot].buffer = graphicBuffer;
2011 mDequeuedSlots.insert(attachedSlot);
2012
2013 return NO_ERROR;
2014 }
2015
setUsage(uint64_t reqUsage)2016 int Surface::setUsage(uint64_t reqUsage)
2017 {
2018 ALOGV("Surface::setUsage");
2019 Mutex::Autolock lock(mMutex);
2020 if (reqUsage != mReqUsage) {
2021 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2022 }
2023 mReqUsage = reqUsage;
2024 return OK;
2025 }
2026
setCrop(Rect const * rect)2027 int Surface::setCrop(Rect const* rect)
2028 {
2029 ATRACE_CALL();
2030
2031 Rect realRect(Rect::EMPTY_RECT);
2032 if (rect == nullptr || rect->isEmpty()) {
2033 realRect.clear();
2034 } else {
2035 realRect = *rect;
2036 }
2037
2038 ALOGV("Surface::setCrop rect=[%d %d %d %d]",
2039 realRect.left, realRect.top, realRect.right, realRect.bottom);
2040
2041 Mutex::Autolock lock(mMutex);
2042 mCrop = realRect;
2043 return NO_ERROR;
2044 }
2045
setBufferCount(int bufferCount)2046 int Surface::setBufferCount(int bufferCount)
2047 {
2048 ATRACE_CALL();
2049 ALOGV("Surface::setBufferCount");
2050 Mutex::Autolock lock(mMutex);
2051
2052 status_t err = NO_ERROR;
2053 if (bufferCount == 0) {
2054 err = mGraphicBufferProducer->setMaxDequeuedBufferCount(1);
2055 } else {
2056 int minUndequeuedBuffers = 0;
2057 err = mGraphicBufferProducer->query(
2058 NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers);
2059 if (err == NO_ERROR) {
2060 err = mGraphicBufferProducer->setMaxDequeuedBufferCount(
2061 bufferCount - minUndequeuedBuffers);
2062 }
2063 }
2064
2065 ALOGE_IF(err, "IGraphicBufferProducer::setBufferCount(%d) returned %s",
2066 bufferCount, strerror(-err));
2067
2068 return err;
2069 }
2070
setMaxDequeuedBufferCount(int maxDequeuedBuffers)2071 int Surface::setMaxDequeuedBufferCount(int maxDequeuedBuffers) {
2072 ATRACE_CALL();
2073 ALOGV("Surface::setMaxDequeuedBufferCount");
2074 Mutex::Autolock lock(mMutex);
2075
2076 status_t err = mGraphicBufferProducer->setMaxDequeuedBufferCount(
2077 maxDequeuedBuffers);
2078 ALOGE_IF(err, "IGraphicBufferProducer::setMaxDequeuedBufferCount(%d) "
2079 "returned %s", maxDequeuedBuffers, strerror(-err));
2080
2081 return err;
2082 }
2083
setAsyncMode(bool async)2084 int Surface::setAsyncMode(bool async) {
2085 ATRACE_CALL();
2086 ALOGV("Surface::setAsyncMode");
2087 Mutex::Autolock lock(mMutex);
2088
2089 status_t err = mGraphicBufferProducer->setAsyncMode(async);
2090 ALOGE_IF(err, "IGraphicBufferProducer::setAsyncMode(%d) returned %s",
2091 async, strerror(-err));
2092
2093 return err;
2094 }
2095
setSharedBufferMode(bool sharedBufferMode)2096 int Surface::setSharedBufferMode(bool sharedBufferMode) {
2097 ATRACE_CALL();
2098 ALOGV("Surface::setSharedBufferMode (%d)", sharedBufferMode);
2099 Mutex::Autolock lock(mMutex);
2100
2101 status_t err = mGraphicBufferProducer->setSharedBufferMode(
2102 sharedBufferMode);
2103 if (err == NO_ERROR) {
2104 mSharedBufferMode = sharedBufferMode;
2105 }
2106 ALOGE_IF(err, "IGraphicBufferProducer::setSharedBufferMode(%d) returned"
2107 "%s", sharedBufferMode, strerror(-err));
2108
2109 return err;
2110 }
2111
setAutoRefresh(bool autoRefresh)2112 int Surface::setAutoRefresh(bool autoRefresh) {
2113 ATRACE_CALL();
2114 ALOGV("Surface::setAutoRefresh (%d)", autoRefresh);
2115 Mutex::Autolock lock(mMutex);
2116
2117 status_t err = mGraphicBufferProducer->setAutoRefresh(autoRefresh);
2118 if (err == NO_ERROR) {
2119 mAutoRefresh = autoRefresh;
2120 }
2121 ALOGE_IF(err, "IGraphicBufferProducer::setAutoRefresh(%d) returned %s",
2122 autoRefresh, strerror(-err));
2123 return err;
2124 }
2125
setBuffersDimensions(uint32_t width,uint32_t height)2126 int Surface::setBuffersDimensions(uint32_t width, uint32_t height)
2127 {
2128 ATRACE_CALL();
2129 ALOGV("Surface::setBuffersDimensions");
2130
2131 if ((width && !height) || (!width && height))
2132 return BAD_VALUE;
2133
2134 Mutex::Autolock lock(mMutex);
2135 if (width != mReqWidth || height != mReqHeight) {
2136 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2137 }
2138 mReqWidth = width;
2139 mReqHeight = height;
2140 return NO_ERROR;
2141 }
2142
setBuffersUserDimensions(uint32_t width,uint32_t height)2143 int Surface::setBuffersUserDimensions(uint32_t width, uint32_t height)
2144 {
2145 ATRACE_CALL();
2146 ALOGV("Surface::setBuffersUserDimensions");
2147
2148 if ((width && !height) || (!width && height))
2149 return BAD_VALUE;
2150
2151 Mutex::Autolock lock(mMutex);
2152 if (width != mUserWidth || height != mUserHeight) {
2153 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2154 }
2155 mUserWidth = width;
2156 mUserHeight = height;
2157 return NO_ERROR;
2158 }
2159
setBuffersFormat(PixelFormat format)2160 int Surface::setBuffersFormat(PixelFormat format)
2161 {
2162 ALOGV("Surface::setBuffersFormat");
2163
2164 Mutex::Autolock lock(mMutex);
2165 if (format != mReqFormat) {
2166 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2167 }
2168 mReqFormat = format;
2169 return NO_ERROR;
2170 }
2171
setScalingMode(int mode)2172 int Surface::setScalingMode(int mode)
2173 {
2174 ATRACE_CALL();
2175 ALOGV("Surface::setScalingMode(%d)", mode);
2176
2177 switch (mode) {
2178 case NATIVE_WINDOW_SCALING_MODE_FREEZE:
2179 case NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW:
2180 case NATIVE_WINDOW_SCALING_MODE_SCALE_CROP:
2181 case NATIVE_WINDOW_SCALING_MODE_NO_SCALE_CROP:
2182 break;
2183 default:
2184 ALOGE("unknown scaling mode: %d", mode);
2185 return BAD_VALUE;
2186 }
2187
2188 Mutex::Autolock lock(mMutex);
2189 mScalingMode = mode;
2190 return NO_ERROR;
2191 }
2192
setBuffersTransform(uint32_t transform)2193 int Surface::setBuffersTransform(uint32_t transform)
2194 {
2195 ATRACE_CALL();
2196 ALOGV("Surface::setBuffersTransform");
2197 Mutex::Autolock lock(mMutex);
2198 // Ensure NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY is sticky. If the client sets the flag, do not
2199 // override it until the surface is disconnected. This is a temporary workaround for camera
2200 // until they switch to using Buffer State Layers. Currently if client sets the buffer transform
2201 // it may be overriden by the buffer producer when the producer sets the buffer transform.
2202 if (transformToDisplayInverse()) {
2203 transform |= NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
2204 }
2205 mTransform = transform;
2206 return NO_ERROR;
2207 }
2208
setBuffersStickyTransform(uint32_t transform)2209 int Surface::setBuffersStickyTransform(uint32_t transform)
2210 {
2211 ATRACE_CALL();
2212 ALOGV("Surface::setBuffersStickyTransform");
2213 Mutex::Autolock lock(mMutex);
2214 mStickyTransform = transform;
2215 return NO_ERROR;
2216 }
2217
setBuffersTimestamp(int64_t timestamp)2218 int Surface::setBuffersTimestamp(int64_t timestamp)
2219 {
2220 ALOGV("Surface::setBuffersTimestamp");
2221 Mutex::Autolock lock(mMutex);
2222 mTimestamp = timestamp;
2223 return NO_ERROR;
2224 }
2225
setBuffersDataSpace(Dataspace dataSpace)2226 int Surface::setBuffersDataSpace(Dataspace dataSpace)
2227 {
2228 ALOGV("Surface::setBuffersDataSpace");
2229 Mutex::Autolock lock(mMutex);
2230 mDataSpace = dataSpace;
2231 return NO_ERROR;
2232 }
2233
setBuffersSmpte2086Metadata(const android_smpte2086_metadata * metadata)2234 int Surface::setBuffersSmpte2086Metadata(const android_smpte2086_metadata* metadata) {
2235 ALOGV("Surface::setBuffersSmpte2086Metadata");
2236 Mutex::Autolock lock(mMutex);
2237 mHdrMetadataIsSet |= HdrMetadata::SMPTE2086;
2238 if (metadata) {
2239 mHdrMetadata.smpte2086 = *metadata;
2240 mHdrMetadata.validTypes |= HdrMetadata::SMPTE2086;
2241 } else {
2242 mHdrMetadata.validTypes &= ~HdrMetadata::SMPTE2086;
2243 }
2244 return NO_ERROR;
2245 }
2246
setBuffersCta8613Metadata(const android_cta861_3_metadata * metadata)2247 int Surface::setBuffersCta8613Metadata(const android_cta861_3_metadata* metadata) {
2248 ALOGV("Surface::setBuffersCta8613Metadata");
2249 Mutex::Autolock lock(mMutex);
2250 mHdrMetadataIsSet |= HdrMetadata::CTA861_3;
2251 if (metadata) {
2252 mHdrMetadata.cta8613 = *metadata;
2253 mHdrMetadata.validTypes |= HdrMetadata::CTA861_3;
2254 } else {
2255 mHdrMetadata.validTypes &= ~HdrMetadata::CTA861_3;
2256 }
2257 return NO_ERROR;
2258 }
2259
setBuffersHdr10PlusMetadata(const size_t size,const uint8_t * metadata)2260 int Surface::setBuffersHdr10PlusMetadata(const size_t size, const uint8_t* metadata) {
2261 ALOGV("Surface::setBuffersBlobMetadata");
2262 Mutex::Autolock lock(mMutex);
2263 mHdrMetadataIsSet |= HdrMetadata::HDR10PLUS;
2264 if (size > 0) {
2265 mHdrMetadata.hdr10plus.assign(metadata, metadata + size);
2266 mHdrMetadata.validTypes |= HdrMetadata::HDR10PLUS;
2267 } else {
2268 mHdrMetadata.validTypes &= ~HdrMetadata::HDR10PLUS;
2269 mHdrMetadata.hdr10plus.clear();
2270 }
2271 return NO_ERROR;
2272 }
2273
getBuffersDataSpace()2274 Dataspace Surface::getBuffersDataSpace() {
2275 ALOGV("Surface::getBuffersDataSpace");
2276 Mutex::Autolock lock(mMutex);
2277 return mDataSpace;
2278 }
2279
freeAllBuffers()2280 void Surface::freeAllBuffers() {
2281 if (!mDequeuedSlots.empty()) {
2282 ALOGE("%s: %zu buffers were freed while being dequeued!",
2283 __FUNCTION__, mDequeuedSlots.size());
2284 }
2285 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
2286 mSlots[i].buffer = nullptr;
2287 }
2288 }
2289
getAndFlushBuffersFromSlots(const std::vector<int32_t> & slots,std::vector<sp<GraphicBuffer>> * outBuffers)2290 status_t Surface::getAndFlushBuffersFromSlots(const std::vector<int32_t>& slots,
2291 std::vector<sp<GraphicBuffer>>* outBuffers) {
2292 ALOGV("Surface::getAndFlushBuffersFromSlots");
2293 for (int32_t i : slots) {
2294 if (i < 0 || i >= NUM_BUFFER_SLOTS) {
2295 ALOGE("%s: Invalid slotIndex: %d", __FUNCTION__, i);
2296 return BAD_VALUE;
2297 }
2298 }
2299
2300 Mutex::Autolock lock(mMutex);
2301 for (int32_t i : slots) {
2302 if (mSlots[i].buffer == nullptr) {
2303 ALOGW("%s: Discarded slot %d doesn't contain buffer!", __FUNCTION__, i);
2304 continue;
2305 }
2306 // Don't flush currently dequeued buffers
2307 if (mDequeuedSlots.count(i) > 0) {
2308 continue;
2309 }
2310 outBuffers->push_back(mSlots[i].buffer);
2311 mSlots[i].buffer = nullptr;
2312 }
2313 return OK;
2314 }
2315
setSurfaceDamage(android_native_rect_t * rects,size_t numRects)2316 void Surface::setSurfaceDamage(android_native_rect_t* rects, size_t numRects) {
2317 ATRACE_CALL();
2318 ALOGV("Surface::setSurfaceDamage");
2319 Mutex::Autolock lock(mMutex);
2320
2321 if (mConnectedToCpu || numRects == 0) {
2322 mDirtyRegion = Region::INVALID_REGION;
2323 return;
2324 }
2325
2326 mDirtyRegion.clear();
2327 for (size_t r = 0; r < numRects; ++r) {
2328 // We intentionally flip top and bottom here, since because they're
2329 // specified with a bottom-left origin, top > bottom, which fails
2330 // validation in the Region class. We will fix this up when we flip to a
2331 // top-left origin in queueBuffer.
2332 Rect rect(rects[r].left, rects[r].bottom, rects[r].right, rects[r].top);
2333 mDirtyRegion.orSelf(rect);
2334 }
2335 }
2336
2337 // ----------------------------------------------------------------------
2338 // the lock/unlock APIs must be used from the same thread
2339
copyBlt(const sp<GraphicBuffer> & dst,const sp<GraphicBuffer> & src,const Region & reg,int * dstFenceFd)2340 static status_t copyBlt(
2341 const sp<GraphicBuffer>& dst,
2342 const sp<GraphicBuffer>& src,
2343 const Region& reg,
2344 int *dstFenceFd)
2345 {
2346 if (dst->getId() == src->getId())
2347 return OK;
2348
2349 // src and dst with, height and format must be identical. no verification
2350 // is done here.
2351 status_t err;
2352 uint8_t* src_bits = nullptr;
2353 err = src->lock(GRALLOC_USAGE_SW_READ_OFTEN, reg.bounds(),
2354 reinterpret_cast<void**>(&src_bits));
2355 ALOGE_IF(err, "error locking src buffer %s", strerror(-err));
2356
2357 uint8_t* dst_bits = nullptr;
2358 err = dst->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, reg.bounds(),
2359 reinterpret_cast<void**>(&dst_bits), *dstFenceFd);
2360 ALOGE_IF(err, "error locking dst buffer %s", strerror(-err));
2361 *dstFenceFd = -1;
2362
2363 Region::const_iterator head(reg.begin());
2364 Region::const_iterator tail(reg.end());
2365 if (head != tail && src_bits && dst_bits) {
2366 const size_t bpp = bytesPerPixel(src->format);
2367 const size_t dbpr = static_cast<uint32_t>(dst->stride) * bpp;
2368 const size_t sbpr = static_cast<uint32_t>(src->stride) * bpp;
2369
2370 while (head != tail) {
2371 const Rect& r(*head++);
2372 int32_t h = r.height();
2373 if (h <= 0) continue;
2374 size_t size = static_cast<uint32_t>(r.width()) * bpp;
2375 uint8_t const * s = src_bits +
2376 static_cast<uint32_t>(r.left + src->stride * r.top) * bpp;
2377 uint8_t * d = dst_bits +
2378 static_cast<uint32_t>(r.left + dst->stride * r.top) * bpp;
2379 if (dbpr==sbpr && size==sbpr) {
2380 size *= static_cast<size_t>(h);
2381 h = 1;
2382 }
2383 do {
2384 memcpy(d, s, size);
2385 d += dbpr;
2386 s += sbpr;
2387 } while (--h > 0);
2388 }
2389 }
2390
2391 if (src_bits)
2392 src->unlock();
2393
2394 if (dst_bits)
2395 dst->unlockAsync(dstFenceFd);
2396
2397 return err;
2398 }
2399
2400 // ----------------------------------------------------------------------------
2401
lock(ANativeWindow_Buffer * outBuffer,ARect * inOutDirtyBounds)2402 status_t Surface::lock(
2403 ANativeWindow_Buffer* outBuffer, ARect* inOutDirtyBounds)
2404 {
2405 if (mLockedBuffer != nullptr) {
2406 ALOGE("Surface::lock failed, already locked");
2407 return INVALID_OPERATION;
2408 }
2409
2410 if (!mConnectedToCpu) {
2411 int err = Surface::connect(NATIVE_WINDOW_API_CPU);
2412 if (err) {
2413 return err;
2414 }
2415 // we're intending to do software rendering from this point
2416 setUsage(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN);
2417 }
2418
2419 ANativeWindowBuffer* out;
2420 int fenceFd = -1;
2421 status_t err = dequeueBuffer(&out, &fenceFd);
2422 ALOGE_IF(err, "dequeueBuffer failed (%s)", strerror(-err));
2423 if (err == NO_ERROR) {
2424 sp<GraphicBuffer> backBuffer(GraphicBuffer::getSelf(out));
2425 const Rect bounds(backBuffer->width, backBuffer->height);
2426
2427 Region newDirtyRegion;
2428 if (inOutDirtyBounds) {
2429 newDirtyRegion.set(static_cast<Rect const&>(*inOutDirtyBounds));
2430 newDirtyRegion.andSelf(bounds);
2431 } else {
2432 newDirtyRegion.set(bounds);
2433 }
2434
2435 // figure out if we can copy the frontbuffer back
2436 const sp<GraphicBuffer>& frontBuffer(mPostedBuffer);
2437 const bool canCopyBack = (frontBuffer != nullptr &&
2438 backBuffer->width == frontBuffer->width &&
2439 backBuffer->height == frontBuffer->height &&
2440 backBuffer->format == frontBuffer->format);
2441
2442 if (canCopyBack) {
2443 // copy the area that is invalid and not repainted this round
2444 const Region copyback(mDirtyRegion.subtract(newDirtyRegion));
2445 if (!copyback.isEmpty()) {
2446 copyBlt(backBuffer, frontBuffer, copyback, &fenceFd);
2447 }
2448 } else {
2449 // if we can't copy-back anything, modify the user's dirty
2450 // region to make sure they redraw the whole buffer
2451 newDirtyRegion.set(bounds);
2452 mDirtyRegion.clear();
2453 Mutex::Autolock lock(mMutex);
2454 for (size_t i=0 ; i<NUM_BUFFER_SLOTS ; i++) {
2455 mSlots[i].dirtyRegion.clear();
2456 }
2457 }
2458
2459
2460 { // scope for the lock
2461 Mutex::Autolock lock(mMutex);
2462 int backBufferSlot(getSlotFromBufferLocked(backBuffer.get()));
2463 if (backBufferSlot >= 0) {
2464 Region& dirtyRegion(mSlots[backBufferSlot].dirtyRegion);
2465 mDirtyRegion.subtract(dirtyRegion);
2466 dirtyRegion = newDirtyRegion;
2467 }
2468 }
2469
2470 mDirtyRegion.orSelf(newDirtyRegion);
2471 if (inOutDirtyBounds) {
2472 *inOutDirtyBounds = newDirtyRegion.getBounds();
2473 }
2474
2475 void* vaddr;
2476 status_t res = backBuffer->lockAsync(
2477 GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
2478 newDirtyRegion.bounds(), &vaddr, fenceFd);
2479
2480 ALOGW_IF(res, "failed locking buffer (handle = %p)",
2481 backBuffer->handle);
2482
2483 if (res != 0) {
2484 err = INVALID_OPERATION;
2485 } else {
2486 mLockedBuffer = backBuffer;
2487 outBuffer->width = backBuffer->width;
2488 outBuffer->height = backBuffer->height;
2489 outBuffer->stride = backBuffer->stride;
2490 outBuffer->format = backBuffer->format;
2491 outBuffer->bits = vaddr;
2492 }
2493 }
2494 return err;
2495 }
2496
unlockAndPost()2497 status_t Surface::unlockAndPost()
2498 {
2499 if (mLockedBuffer == nullptr) {
2500 ALOGE("Surface::unlockAndPost failed, no locked buffer");
2501 return INVALID_OPERATION;
2502 }
2503
2504 int fd = -1;
2505 status_t err = mLockedBuffer->unlockAsync(&fd);
2506 ALOGE_IF(err, "failed unlocking buffer (%p)", mLockedBuffer->handle);
2507
2508 err = queueBuffer(mLockedBuffer.get(), fd);
2509 ALOGE_IF(err, "queueBuffer (handle=%p) failed (%s)",
2510 mLockedBuffer->handle, strerror(-err));
2511
2512 mPostedBuffer = mLockedBuffer;
2513 mLockedBuffer = nullptr;
2514 return err;
2515 }
2516
waitForNextFrame(uint64_t lastFrame,nsecs_t timeout)2517 bool Surface::waitForNextFrame(uint64_t lastFrame, nsecs_t timeout) {
2518 Mutex::Autolock lock(mMutex);
2519 if (mNextFrameNumber > lastFrame) {
2520 return true;
2521 }
2522 return mQueueBufferCondition.waitRelative(mMutex, timeout) == OK;
2523 }
2524
getUniqueId(uint64_t * outId) const2525 status_t Surface::getUniqueId(uint64_t* outId) const {
2526 Mutex::Autolock lock(mMutex);
2527 return mGraphicBufferProducer->getUniqueId(outId);
2528 }
2529
getConsumerUsage(uint64_t * outUsage) const2530 int Surface::getConsumerUsage(uint64_t* outUsage) const {
2531 Mutex::Autolock lock(mMutex);
2532 return mGraphicBufferProducer->getConsumerUsage(outUsage);
2533 }
2534
getAndFlushRemovedBuffers(std::vector<sp<GraphicBuffer>> * out)2535 status_t Surface::getAndFlushRemovedBuffers(std::vector<sp<GraphicBuffer>>* out) {
2536 if (out == nullptr) {
2537 ALOGE("%s: out must not be null!", __FUNCTION__);
2538 return BAD_VALUE;
2539 }
2540
2541 Mutex::Autolock lock(mMutex);
2542 *out = mRemovedBuffers;
2543 mRemovedBuffers.clear();
2544 return OK;
2545 }
2546
attachAndQueueBufferWithDataspace(Surface * surface,sp<GraphicBuffer> buffer,Dataspace dataspace)2547 status_t Surface::attachAndQueueBufferWithDataspace(Surface* surface, sp<GraphicBuffer> buffer,
2548 Dataspace dataspace) {
2549 if (buffer == nullptr) {
2550 return BAD_VALUE;
2551 }
2552 int err = static_cast<ANativeWindow*>(surface)->perform(surface, NATIVE_WINDOW_API_CONNECT,
2553 NATIVE_WINDOW_API_CPU);
2554 if (err != OK) {
2555 return err;
2556 }
2557 ui::Dataspace tmpDataspace = surface->getBuffersDataSpace();
2558 err = surface->setBuffersDataSpace(dataspace);
2559 if (err != OK) {
2560 return err;
2561 }
2562 err = surface->attachBuffer(buffer->getNativeBuffer());
2563 if (err != OK) {
2564 return err;
2565 }
2566 err = static_cast<ANativeWindow*>(surface)->queueBuffer(surface, buffer->getNativeBuffer(), -1);
2567 if (err != OK) {
2568 return err;
2569 }
2570 err = surface->setBuffersDataSpace(tmpDataspace);
2571 if (err != OK) {
2572 return err;
2573 }
2574 err = surface->disconnect(NATIVE_WINDOW_API_CPU);
2575 return err;
2576 }
2577
setAutoPrerotation(bool autoPrerotation)2578 int Surface::setAutoPrerotation(bool autoPrerotation) {
2579 ATRACE_CALL();
2580 ALOGV("Surface::setAutoPrerotation (%d)", autoPrerotation);
2581 Mutex::Autolock lock(mMutex);
2582
2583 if (mAutoPrerotation == autoPrerotation) {
2584 return OK;
2585 }
2586
2587 status_t err = mGraphicBufferProducer->setAutoPrerotation(autoPrerotation);
2588 if (err == NO_ERROR) {
2589 mAutoPrerotation = autoPrerotation;
2590 }
2591 ALOGE_IF(err, "IGraphicBufferProducer::setAutoPrerotation(%d) returned %s", autoPrerotation,
2592 strerror(-err));
2593 return err;
2594 }
2595
onBuffersDiscarded(const std::vector<int32_t> & slots)2596 void Surface::ProducerListenerProxy::onBuffersDiscarded(const std::vector<int32_t>& slots) {
2597 ATRACE_CALL();
2598 sp<Surface> parent = mParent.promote();
2599 if (parent == nullptr) {
2600 return;
2601 }
2602
2603 std::vector<sp<GraphicBuffer>> discardedBufs;
2604 status_t res = parent->getAndFlushBuffersFromSlots(slots, &discardedBufs);
2605 if (res != OK) {
2606 ALOGE("%s: Failed to get buffers from slots: %s(%d)", __FUNCTION__,
2607 strerror(-res), res);
2608 return;
2609 }
2610
2611 mSurfaceListener->onBuffersDiscarded(discardedBufs);
2612 }
2613
setFrameRate(float frameRate,int8_t compatibility,int8_t changeFrameRateStrategy)2614 status_t Surface::setFrameRate(float frameRate, int8_t compatibility,
2615 int8_t changeFrameRateStrategy) {
2616 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_SETFRAMERATE)
2617 if (flags::bq_setframerate()) {
2618 status_t err = mGraphicBufferProducer->setFrameRate(frameRate, compatibility,
2619 changeFrameRateStrategy);
2620 ALOGE_IF(err, "IGraphicBufferProducer::setFrameRate(%.2f) returned %s", frameRate,
2621 strerror(-err));
2622 return err;
2623 }
2624 #else
2625 static_cast<void>(frameRate);
2626 static_cast<void>(compatibility);
2627 static_cast<void>(changeFrameRateStrategy);
2628 #endif
2629
2630 ALOGI("Surface::setFrameRate is deprecated, setFrameRate hint is dropped as destination is not "
2631 "SurfaceFlinger");
2632 // ISurfaceComposer no longer supports setFrameRate, we will return NO_ERROR when the api is
2633 // called to avoid apps crashing, as BAD_VALUE can generate fatal exception in apps.
2634 return NO_ERROR;
2635 }
2636
setFrameTimelineInfo(uint64_t,const FrameTimelineInfo &)2637 status_t Surface::setFrameTimelineInfo(uint64_t /*frameNumber*/,
2638 const FrameTimelineInfo& /*frameTimelineInfo*/) {
2639 // ISurfaceComposer no longer supports setFrameTimelineInfo
2640 return BAD_VALUE;
2641 }
2642
2643 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_EXTENDEDALLOCATE)
setAdditionalOptions(const std::vector<gui::AdditionalOptions> & options)2644 status_t Surface::setAdditionalOptions(const std::vector<gui::AdditionalOptions>& options) {
2645 if (!GraphicBufferAllocator::get().supportsAdditionalOptions()) {
2646 return INVALID_OPERATION;
2647 }
2648
2649 Mutex::Autolock lock(mMutex);
2650 return mGraphicBufferProducer->setAdditionalOptions(options);
2651 }
2652 #endif
2653
getSurfaceControlHandle() const2654 sp<IBinder> Surface::getSurfaceControlHandle() const {
2655 Mutex::Autolock lock(mMutex);
2656 return mSurfaceControlHandle;
2657 }
2658
destroy()2659 void Surface::destroy() {
2660 Mutex::Autolock lock(mMutex);
2661 mSurfaceControlHandle = nullptr;
2662 }
2663
getDebugName()2664 const char* Surface::getDebugName() {
2665 std::unique_lock lock{mNameMutex};
2666 if (mName.empty()) {
2667 mName = getConsumerName();
2668 }
2669 return mName.c_str();
2670 }
2671
2672 }; // namespace android
2673