1 /*
2  * Copyright (C) 2023 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 //#define LOG_NDEBUG 0
17 #define LOG_TAG "GraphicsTracker"
18 #include <fcntl.h>
19 #include <unistd.h>
20 
21 #include <media/stagefright/foundation/ADebug.h>
22 #include <private/android/AHardwareBufferHelpers.h>
23 #include <vndk/hardware_buffer.h>
24 
25 #include <C2BlockInternal.h>
26 #include <codec2/aidl/GraphicsTracker.h>
27 
28 namespace aidl::android::hardware::media::c2::implementation {
29 
30 namespace {
31 
32 static constexpr int kMaxDequeueMin = 1;
33 static constexpr int kMaxDequeueMax = ::android::BufferQueueDefs::NUM_BUFFER_SLOTS - 2;
34 
retrieveAHardwareBufferId(const C2ConstGraphicBlock & blk,uint64_t * bid)35 c2_status_t retrieveAHardwareBufferId(const C2ConstGraphicBlock &blk, uint64_t *bid) {
36     std::shared_ptr<const _C2BlockPoolData> bpData = _C2BlockFactory::GetGraphicBlockPoolData(blk);
37     if (bpData->getType() != _C2BlockPoolData::TYPE_AHWBUFFER) {
38         return C2_BAD_VALUE;
39     }
40     if (__builtin_available(android __ANDROID_API_T__, *)) {
41         AHardwareBuffer *pBuf;
42         if (!_C2BlockFactory::GetAHardwareBuffer(bpData, &pBuf)) {
43             return C2_CORRUPTED;
44         }
45         int ret = AHardwareBuffer_getId(pBuf, bid);
46         if (ret != ::android::OK) {
47             return C2_CORRUPTED;
48         }
49         return C2_OK;
50     } else {
51         return C2_OMITTED;
52     }
53 }
54 
55 } // anonymous namespace
56 
BufferItem(uint32_t generation,int slot,const sp<GraphicBuffer> & buf,const sp<Fence> & fence)57 GraphicsTracker::BufferItem::BufferItem(
58         uint32_t generation, int slot, const sp<GraphicBuffer>& buf, const sp<Fence>& fence) :
59         mInit{false}, mGeneration{generation}, mSlot{slot} {
60     if (!buf) {
61         return;
62     }
63     if (__builtin_available(android __ANDROID_API_T__, *)) {
64         AHardwareBuffer *pBuf = AHardwareBuffer_from_GraphicBuffer(buf.get());
65         int ret = AHardwareBuffer_getId(pBuf, &mId);
66         if (ret != ::android::OK) {
67             return;
68         }
69         mUsage = buf->getUsage();
70         AHardwareBuffer_acquire(pBuf);
71         mBuf = pBuf;
72         mFence = fence;
73         mInit = true;
74     }
75 }
76 
BufferItem(uint32_t generation,AHardwareBuffer * pBuf,uint64_t usage)77 GraphicsTracker::BufferItem::BufferItem(
78         uint32_t generation, AHardwareBuffer *pBuf, uint64_t usage) :
79         mInit{true}, mGeneration{generation}, mSlot{-1},
80         mBuf{pBuf}, mUsage{usage},
81         mFence{Fence::NO_FENCE} {
82     if (__builtin_available(android __ANDROID_API_T__, *)) {
83         int ret = AHardwareBuffer_getId(mBuf, &mId);
84         if (ret != ::android::OK) {
85             mInit = false;
86             mBuf = nullptr;
87             return;
88         }
89     }
90     AHardwareBuffer_acquire(mBuf);
91 }
92 
~BufferItem()93 GraphicsTracker::BufferItem::~BufferItem() {
94     if (mInit) {
95         AHardwareBuffer_release(mBuf);
96     }
97 }
98 
99 
migrateBuffer(uint64_t newUsage,uint32_t newGeneration)100 std::shared_ptr<GraphicsTracker::BufferItem> GraphicsTracker::BufferItem::migrateBuffer(
101         uint64_t newUsage, uint32_t newGeneration) {
102     if (!mInit) {
103         return nullptr;
104     }
105     newUsage |= mUsage;
106     uint64_t ahbUsage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(newUsage);
107     AHardwareBuffer_Desc desc;
108     AHardwareBuffer_describe(mBuf, &desc);
109     // TODO: we need well-established buffer migration features from graphics.
110     // (b/273776738)
111     desc.usage = ahbUsage;
112     const native_handle_t *handle = AHardwareBuffer_getNativeHandle(mBuf);
113     if (!handle) {
114         return nullptr;
115     }
116 
117     AHardwareBuffer *newBuf;
118     int err = AHardwareBuffer_createFromHandle(&desc, handle,
119                                      AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
120                                      &newBuf);
121     if (err != ::android::NO_ERROR) {
122         return nullptr;
123     }
124 
125     std::shared_ptr<BufferItem> newBuffer =
126             std::make_shared<BufferItem>(newGeneration, newBuf, newUsage);
127     AHardwareBuffer_release(newBuf);
128     return newBuffer;
129 }
130 
getGraphicBuffer()131 sp<GraphicBuffer> GraphicsTracker::BufferItem::getGraphicBuffer() {
132     if (!mInit) {
133         return nullptr;
134     }
135     GraphicBuffer *gb = ::android::AHardwareBuffer_to_GraphicBuffer(mBuf);
136     if (!gb) {
137         return nullptr;
138     }
139     gb->setGenerationNumber(mGeneration);
140     return gb;
141 }
142 
~BufferCache()143 GraphicsTracker::BufferCache::~BufferCache() {
144     ALOGV("BufferCache destruction: generation(%d), igbp(%d)", mGeneration, (bool)mIgbp);
145 }
146 
waitOnSlot(int slot)147 void GraphicsTracker::BufferCache::waitOnSlot(int slot) {
148     // TODO: log
149     CHECK(0 <= slot && slot < kNumSlots);
150     BlockedSlot *p = &mBlockedSlots[slot];
151     std::unique_lock<std::mutex> l(p->l);
152     while (p->blocked) {
153         p->cv.wait(l);
154     }
155 }
156 
blockSlot(int slot)157 void GraphicsTracker::BufferCache::blockSlot(int slot) {
158     CHECK(0 <= slot && slot < kNumSlots);
159     ALOGV("block slot %d", slot);
160     BlockedSlot *p = &mBlockedSlots[slot];
161     std::unique_lock<std::mutex> l(p->l);
162     p->blocked = true;
163 }
164 
unblockSlot(int slot)165 void GraphicsTracker::BufferCache::unblockSlot(int slot) {
166     CHECK(0 <= slot && slot < kNumSlots);
167     ALOGV("unblock slot %d", slot);
168     BlockedSlot *p = &mBlockedSlots[slot];
169     std::unique_lock<std::mutex> l(p->l);
170     p->blocked = false;
171     l.unlock();
172     p->cv.notify_one();
173 }
174 
GraphicsTracker(int maxDequeueCount)175 GraphicsTracker::GraphicsTracker(int maxDequeueCount)
176     : mBufferCache(new BufferCache()), mNumDequeueing{0}, mMaxDequeue{maxDequeueCount},
177     mMaxDequeueCommitted{maxDequeueCount},
178     mDequeueable{maxDequeueCount},
179     mTotalDequeued{0}, mTotalCancelled{0}, mTotalDropped{0}, mTotalReleased{0},
180     mInConfig{false}, mStopped{false} {
181     if (maxDequeueCount < kMaxDequeueMin) {
182         mMaxDequeue = kMaxDequeueMin;
183         mMaxDequeueCommitted = kMaxDequeueMin;
184         mDequeueable = kMaxDequeueMin;
185     } else if(maxDequeueCount > kMaxDequeueMax) {
186         mMaxDequeue = kMaxDequeueMax;
187         mMaxDequeueCommitted = kMaxDequeueMax;
188         mDequeueable = kMaxDequeueMax;
189     }
190     int pipefd[2] = { -1, -1};
191     int ret = ::pipe2(pipefd, O_CLOEXEC | O_NONBLOCK);
192 
193     mReadPipeFd.reset(pipefd[0]);
194     mWritePipeFd.reset(pipefd[1]);
195 
196     // ctor does not require lock to be held.
197     writeIncDequeueableLocked(mDequeueable);
198 
199     CHECK(ret >= 0);
200 }
201 
~GraphicsTracker()202 GraphicsTracker::~GraphicsTracker() {
203     stop();
204 }
205 
adjustDequeueConfLocked(bool * updateDequeue)206 bool GraphicsTracker::adjustDequeueConfLocked(bool *updateDequeue) {
207     // TODO: can't we adjust during config? not committing it may safe?
208     *updateDequeue = false;
209     if (!mInConfig && mMaxDequeueRequested.has_value() && mMaxDequeueRequested < mMaxDequeue) {
210         int delta = mMaxDequeue - mMaxDequeueRequested.value();
211         int drained = 0;
212         // Since we are supposed to increase mDequeuable by one already
213         int adjustable = mDequeueable + 1;
214         if (adjustable >= delta) {
215             mMaxDequeue = mMaxDequeueRequested.value();
216             mDequeueable -= (delta - 1);
217             drained = delta - 1;
218         } else {
219             mMaxDequeue -= adjustable;
220             drained = mDequeueable;
221             mDequeueable = 0;
222         }
223         if (drained > 0) {
224             drainDequeueableLocked(drained);
225         }
226         if (mMaxDequeueRequested == mMaxDequeue && mMaxDequeueRequested != mMaxDequeueCommitted) {
227             *updateDequeue = true;
228         }
229         return true;
230     }
231     return false;
232 }
233 
configureGraphics(const sp<IGraphicBufferProducer> & igbp,uint32_t generation)234 c2_status_t GraphicsTracker::configureGraphics(
235         const sp<IGraphicBufferProducer>& igbp, uint32_t generation) {
236     // TODO: wait until operations to previous IGBP is completed.
237     std::shared_ptr<BufferCache> prevCache;
238     int prevDequeueRequested = 0;
239     int prevDequeueCommitted;
240 
241     std::unique_lock<std::mutex> cl(mConfigLock);
242     {
243         std::unique_lock<std::mutex> l(mLock);
244         mInConfig = true;
245         prevCache = mBufferCache;
246         prevDequeueCommitted = mMaxDequeueCommitted;
247         if (mMaxDequeueRequested.has_value()) {
248             prevDequeueRequested = mMaxDequeueRequested.value();
249         }
250     }
251     // NOTE: Switching to the same surface is blocked from MediaCodec.
252     // Switching to the same surface might not work if tried, since disconnect()
253     // to the old surface in MediaCodec and allocate from the new surface from
254     // GraphicsTracker cannot be synchronized properly.
255     uint64_t bqId{0ULL};
256     ::android::status_t ret = ::android::OK;
257     if (igbp) {
258         ret = igbp->getUniqueId(&bqId);
259     }
260     if (ret != ::android::OK ||
261             prevCache->mGeneration == generation) {
262         ALOGE("new surface configure fail due to wrong or same bqId or same generation:"
263               "igbp(%d:%llu -> %llu), gen(%lu -> %lu)", (bool)igbp,
264               (unsigned long long)prevCache->mBqId, (unsigned long long)bqId,
265               (unsigned long)prevCache->mGeneration, (unsigned long)generation);
266         std::unique_lock<std::mutex> l(mLock);
267         mInConfig = false;
268         return C2_BAD_VALUE;
269     }
270     ALOGD("new surface in configuration: maxDequeueRequested(%d), maxDequeueCommitted(%d)",
271           prevDequeueRequested, prevDequeueCommitted);
272     if (prevDequeueRequested > 0 && prevDequeueRequested > prevDequeueCommitted) {
273         prevDequeueCommitted = prevDequeueRequested;
274     }
275     if (igbp) {
276         ret = igbp->setMaxDequeuedBufferCount(prevDequeueCommitted);
277         if (ret != ::android::OK) {
278             ALOGE("new surface maxDequeueBufferCount configure fail");
279             // TODO: sort out the error from igbp and return an error accordingly.
280             std::unique_lock<std::mutex> l(mLock);
281             mInConfig = false;
282             return C2_CORRUPTED;
283         }
284     }
285     ALOGD("new surface configured with id:%llu gen:%lu maxDequeue:%d",
286           (unsigned long long)bqId, (unsigned long)generation, prevDequeueCommitted);
287     std::shared_ptr<BufferCache> newCache = std::make_shared<BufferCache>(bqId, generation, igbp);
288     {
289         std::unique_lock<std::mutex> l(mLock);
290         mInConfig = false;
291         mBufferCache = newCache;
292         // {@code dequeued} is the number of currently dequeued buffers.
293         // {@code prevDequeueCommitted} is max dequeued buffer at any moment
294         //  from the new surface.
295         // {@code newDequeueable} is hence the current # of dequeueable buffers
296         //  if no change occurs.
297         int dequeued = mDequeued.size() + mNumDequeueing;
298         int newDequeueable = prevDequeueCommitted - dequeued;
299         if (newDequeueable < 0) {
300             // This will not happen.
301             // But if this happens, we respect the value and try to continue.
302             ALOGE("calculated new dequeueable is negative: %d max(%d),dequeued(%d)",
303                   newDequeueable, prevDequeueCommitted, dequeued);
304         }
305 
306         if (mMaxDequeueRequested.has_value() && mMaxDequeueRequested == prevDequeueCommitted) {
307             mMaxDequeueRequested.reset();
308         }
309         mMaxDequeue = mMaxDequeueCommitted = prevDequeueCommitted;
310 
311         int delta = newDequeueable - mDequeueable;
312         if (delta > 0) {
313             writeIncDequeueableLocked(delta);
314         } else if (delta < 0) {
315             drainDequeueableLocked(-delta);
316         }
317         ALOGV("new surfcace dequeueable %d(delta %d), maxDequeue %d",
318               newDequeueable, delta, mMaxDequeue);
319         mDequeueable = newDequeueable;
320     }
321     return C2_OK;
322 }
323 
configureMaxDequeueCount(int maxDequeueCount)324 c2_status_t GraphicsTracker::configureMaxDequeueCount(int maxDequeueCount) {
325     std::shared_ptr<BufferCache> cache;
326 
327     if (maxDequeueCount < kMaxDequeueMin || maxDequeueCount > kMaxDequeueMax) {
328         ALOGE("max dequeue count %d is not valid", maxDequeueCount);
329         return C2_BAD_VALUE;
330     }
331 
332     // max dequeue count which can be committed to IGBP.
333     // (Sometimes maxDequeueCount cannot be committed if the number of
334     // dequeued buffer count is bigger.)
335     int maxDequeueToCommit;
336     std::unique_lock<std::mutex> cl(mConfigLock);
337     {
338         std::unique_lock<std::mutex> l(mLock);
339         if (mMaxDequeueRequested.has_value()) {
340             if (mMaxDequeueRequested == maxDequeueCount) {
341                 ALOGD("maxDequeueCount requested with %d already", maxDequeueCount);
342                 return C2_OK;
343             }
344         } else if (mMaxDequeue == maxDequeueCount) {
345             ALOGD("maxDequeueCount is already %d", maxDequeueCount);
346             return C2_OK;
347         }
348         mInConfig = true;
349         mMaxDequeueRequested = maxDequeueCount;
350         cache = mBufferCache;
351         if (mMaxDequeue <= maxDequeueCount) {
352             maxDequeueToCommit = maxDequeueCount;
353         } else {
354             // Since mDequeuable is decreasing,
355             // a delievered ready to allocate event may not be fulfilled.
356             // Another waiting via a waitable object may be necessary in the case.
357             int delta = std::min(mMaxDequeue - maxDequeueCount, mDequeueable);
358             maxDequeueToCommit = mMaxDequeue - delta;
359             mDequeueable -= delta;
360             if (delta > 0) {
361                 drainDequeueableLocked(delta);
362             }
363         }
364     }
365 
366     bool committed = true;
367     if (cache->mIgbp && maxDequeueToCommit != mMaxDequeueCommitted) {
368         ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(maxDequeueToCommit);
369         committed = (ret == ::android::OK);
370         if (committed) {
371             ALOGD("maxDequeueCount committed to IGBP: %d", maxDequeueToCommit);
372         } else {
373             // This should not happen.
374             ALOGE("maxdequeueCount update to IGBP failed with error(%d)", (int)ret);
375         }
376     }
377 
378     int oldMaxDequeue = 0;
379     int requested = 0;
380     {
381         std::unique_lock<std::mutex> l(mLock);
382         mInConfig = false;
383         oldMaxDequeue = mMaxDequeue;
384         mMaxDequeue = maxDequeueToCommit; // we already drained dequeueable
385         if (committed) {
386             clearCacheIfNecessaryLocked(cache, maxDequeueToCommit);
387             mMaxDequeueCommitted = maxDequeueToCommit;
388             if (mMaxDequeueRequested == mMaxDequeueCommitted &&
389                   mMaxDequeueRequested == mMaxDequeue) {
390                 mMaxDequeueRequested.reset();
391             }
392             if (mMaxDequeueRequested.has_value()) {
393                 requested = mMaxDequeueRequested.value();
394             }
395             int delta = mMaxDequeueCommitted - oldMaxDequeue;
396             if (delta > 0) {
397                 mDequeueable += delta;
398                 writeIncDequeueableLocked(delta);
399             }
400         }
401     }
402     ALOGD("maxDqueueCount change %d -> %d: pending: %d",
403           oldMaxDequeue, maxDequeueToCommit, requested);
404 
405     if (!committed) {
406         return C2_CORRUPTED;
407     }
408     return C2_OK;
409 }
410 
updateDequeueConf()411 void GraphicsTracker::updateDequeueConf() {
412     std::shared_ptr<BufferCache> cache;
413     int dequeueCommit;
414     ALOGV("trying to update max dequeue count");
415     std::unique_lock<std::mutex> cl(mConfigLock);
416     {
417         std::unique_lock<std::mutex> l(mLock);
418         if (!mMaxDequeueRequested.has_value() || mMaxDequeue != mMaxDequeueRequested) {
419             return;
420         }
421         if (mMaxDequeueCommitted == mMaxDequeueRequested) {
422             // already committed. may not happen.
423             mMaxDequeueRequested.reset();
424             return;
425         }
426         dequeueCommit = mMaxDequeue;
427         mInConfig = true;
428         cache = mBufferCache;
429     }
430     bool committed = true;
431     if (cache->mIgbp) {
432         ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(dequeueCommit);
433         committed = (ret == ::android::OK);
434         if (committed) {
435             ALOGD("delayed maxDequeueCount update to IGBP: %d", dequeueCommit);
436         } else {
437             // This should not happen.
438             ALOGE("delayed maxdequeueCount update to IGBP failed with error(%d)", (int)ret);
439         }
440     }
441     {
442         // cache == mCache here, since we locked config.
443         std::unique_lock<std::mutex> l(mLock);
444         mInConfig = false;
445         if (committed) {
446             clearCacheIfNecessaryLocked(cache, dequeueCommit);
447             mMaxDequeueCommitted = dequeueCommit;
448         }
449         mMaxDequeueRequested.reset();
450     }
451 }
452 
clearCacheIfNecessaryLocked(const std::shared_ptr<BufferCache> & cache,int maxDequeueCommitted)453 void GraphicsTracker::clearCacheIfNecessaryLocked(const std::shared_ptr<BufferCache> &cache,
454                                             int maxDequeueCommitted) {
455     int cleared = 0;
456     size_t origCacheSize = cache->mBuffers.size();
457     if (cache->mIgbp && maxDequeueCommitted < mMaxDequeueCommitted) {
458         // we are shrinking # of buffers in the case, so evict the previous
459         // cached buffers.
460         for (auto it = cache->mBuffers.begin(); it != cache->mBuffers.end();) {
461             uint64_t bid = it->second->mId;
462             if (mDequeued.count(bid) == 0 || mDeallocating.count(bid) > 0) {
463                 ++cleared;
464                 it = cache->mBuffers.erase(it);
465             } else {
466                 ++it;
467             }
468         }
469     }
470     ALOGD("Cache size %zu -> %zu: maybe_cleared(%d), dequeued(%zu)",
471           origCacheSize, cache->mBuffers.size(), cleared, mDequeued.size());
472 }
473 
getCurDequeueable()474 int GraphicsTracker::getCurDequeueable() {
475     std::unique_lock<std::mutex> l(mLock);
476     return mDequeueable;
477 }
478 
stop()479 void GraphicsTracker::stop() {
480    // TODO: wait until all operation to current IGBP
481    // being completed.
482     std::unique_lock<std::mutex> l(mLock);
483     if (mStopped) {
484         return;
485     }
486     mStopped = true;
487     int writeFd = mWritePipeFd.release();
488     if (writeFd >= 0) {
489         ::close(writeFd);
490     }
491 }
492 
writeIncDequeueableLocked(int inc)493 void GraphicsTracker::writeIncDequeueableLocked(int inc) {
494     CHECK(inc > 0 && inc < kMaxDequeueMax);
495     thread_local char buf[kMaxDequeueMax];
496     if (mStopped) { // reading end closed;
497         return;
498     }
499     int writeFd = mWritePipeFd.get();
500     if (writeFd < 0) {
501         // initialization fail and not valid though.
502         return;
503     }
504     int ret = ::write(writeFd, buf, inc);
505     // Since this is non-blocking i/o, it never returns EINTR.
506     //
507     // ::write() to pipe guarantee to succeed atomically if it writes less than
508     // the given PIPE_BUF. And the buffer size in pipe/fifo is at least 4K and our total
509     // max pending buffer size is 64. So it never returns EAGAIN here either.
510     // See pipe(7) for further information.
511     //
512     // Other errors are serious errors and we cannot synchronize mDequeueable to
513     // length of pending buffer in pipe/fifo anymore. So better to abort here.
514     // TODO: do not abort here. (b/318717399)
515     CHECK(ret == inc);
516 }
517 
drainDequeueableLocked(int dec)518 void GraphicsTracker::drainDequeueableLocked(int dec) {
519     CHECK(dec > 0 && dec < kMaxDequeueMax);
520     thread_local char buf[kMaxDequeueMax];
521     if (mStopped) {
522         return;
523     }
524     int readFd = mReadPipeFd.get();
525     if (readFd < 0) {
526         // initializationf fail and not valid though.
527         return;
528     }
529     int ret = ::read(readFd, buf, dec);
530     // TODO: no dot abort here. (b/318717399)
531     CHECK(ret == dec);
532 }
533 
getWaitableFd(int * pipeFd)534 c2_status_t GraphicsTracker::getWaitableFd(int *pipeFd) {
535     *pipeFd = ::dup(mReadPipeFd.get());
536     if (*pipeFd < 0) {
537         if (mReadPipeFd.get() < 0) {
538             return C2_BAD_STATE;
539         }
540         // dup error
541         ALOGE("dup() for the reading end failed %d", errno);
542         return C2_NO_MEMORY;
543     }
544     return C2_OK;
545 }
546 
requestAllocate(std::shared_ptr<BufferCache> * cache)547 c2_status_t GraphicsTracker::requestAllocate(std::shared_ptr<BufferCache> *cache) {
548     std::lock_guard<std::mutex> l(mLock);
549     if (mDequeueable > 0) {
550         char buf[1];
551         int ret = ::read(mReadPipeFd.get(), buf, 1);
552         if (ret < 0) {
553             if (errno == EINTR) {
554                 // Do we really need to care for cancel due to signal handling?
555                 return C2_CANCELED;
556             }
557             if (errno == EAGAIN) {
558                 // proper usage of waitable object should not return this.
559                 // but there could be alloc requests from HAL ignoring the internal status.
560                 return C2_BLOCKING;
561             }
562             CHECK(errno != 0);
563         }
564         if (ret == 0) {
565             // writing end is closed
566             ALOGE("writing end for the waitable object seems to be closed");
567             return C2_BAD_STATE;
568         }
569         mNumDequeueing++;
570         mDequeueable--;
571         *cache = mBufferCache;
572         return C2_OK;
573     }
574     return C2_BLOCKING;
575 }
576 
577 // If {@code cached} is {@code true}, {@code pBuffer} should be read from the
578 // current cached status. Otherwise, {@code pBuffer} should be written to
579 // current caches status.
commitAllocate(c2_status_t res,const std::shared_ptr<BufferCache> & cache,bool cached,int slot,const sp<Fence> & fence,std::shared_ptr<BufferItem> * pBuffer,bool * updateDequeue)580 void GraphicsTracker::commitAllocate(c2_status_t res, const std::shared_ptr<BufferCache> &cache,
581                     bool cached, int slot, const sp<Fence> &fence,
582                     std::shared_ptr<BufferItem> *pBuffer, bool *updateDequeue) {
583     std::unique_lock<std::mutex> l(mLock);
584     mNumDequeueing--;
585     if (res == C2_OK) {
586         if (cached) {
587             auto it = cache->mBuffers.find(slot);
588             CHECK(it != cache->mBuffers.end());
589             it->second->mFence = fence;
590             *pBuffer = it->second;
591             ALOGV("an allocated buffer already cached, updated Fence");
592         } else if (cache.get() == mBufferCache.get() && mBufferCache->mIgbp) {
593             // Cache the buffer if it is allocated from the current IGBP
594             CHECK(slot >= 0);
595             auto ret = mBufferCache->mBuffers.emplace(slot, *pBuffer);
596             if (!ret.second) {
597                 ret.first->second = *pBuffer;
598             }
599             ALOGV("an allocated buffer not cached from the current IGBP");
600         }
601         uint64_t bid = (*pBuffer)->mId;
602         auto mapRet = mDequeued.emplace(bid, *pBuffer);
603         CHECK(mapRet.second);
604     } else {
605         ALOGD("allocate error(%d): Dequeued(%zu), Dequeuable(%d)",
606               (int)res, mDequeued.size(), mDequeueable + 1);
607         if (adjustDequeueConfLocked(updateDequeue)) {
608             return;
609         }
610         mDequeueable++;
611         writeIncDequeueableLocked(1);
612     }
613 }
614 
615 
616 // if a buffer is newly allocated, {@code cached} is {@code false},
617 // and the buffer is in the {@code buffer}
618 // otherwise, {@code cached} is {@code false} and the buffer should be
619 // retrieved by commitAllocate();
_allocate(const std::shared_ptr<BufferCache> & cache,uint32_t width,uint32_t height,PixelFormat format,uint64_t usage,bool * cached,int * rSlotId,sp<Fence> * rFence,std::shared_ptr<BufferItem> * buffer)620 c2_status_t GraphicsTracker::_allocate(const std::shared_ptr<BufferCache> &cache,
621                                       uint32_t width, uint32_t height, PixelFormat format,
622                                       uint64_t usage,
623                                       bool *cached,
624                                       int *rSlotId,
625                                       sp<Fence> *rFence,
626                                       std::shared_ptr<BufferItem> *buffer) {
627     ::android::sp<IGraphicBufferProducer> igbp = cache->mIgbp;
628     uint32_t generation = cache->mGeneration;
629     if (!igbp) {
630         // allocate directly
631         AHardwareBuffer_Desc desc;
632         desc.width = width;
633         desc.height = height;
634         desc.layers = 1u;
635         desc.format = ::android::AHardwareBuffer_convertFromPixelFormat(format);
636         desc.usage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(usage);
637         desc.rfu0 = 0;
638         desc.rfu1 = 0;
639 
640         AHardwareBuffer *buf;
641         int ret = AHardwareBuffer_allocate(&desc, &buf);
642         if (ret != ::android::OK) {
643             ALOGE("direct allocation of AHB failed(%d)", ret);
644             return ret == ::android::NO_MEMORY ? C2_NO_MEMORY : C2_CORRUPTED;
645         }
646         *cached = false;
647         *rSlotId = -1;
648         *rFence = Fence::NO_FENCE;
649         *buffer = std::make_shared<BufferItem>(generation, buf, usage);
650         AHardwareBuffer_release(buf); // remove an acquire count from
651                                       // AHwb_allocate().
652         if (!*buffer) {
653             ALOGE("direct allocation of AHB successful, but failed to create BufferItem");
654             return C2_NO_MEMORY;
655         }
656         if (!(*buffer)->mInit) {
657             ALOGE("direct allocation of AHB successful, but BufferItem init failed");
658             buffer->reset();
659             return C2_CORRUPTED;
660         }
661         ALOGV("allocate: direct allocate without igbp");
662         return C2_OK;
663     }
664 
665     int slotId;
666     uint64_t outBufferAge;
667     sp<Fence> fence;
668 
669     ::android::status_t status = igbp->dequeueBuffer(
670             &slotId, &fence, width, height, format, usage, &outBufferAge, nullptr);
671     if (status < ::android::OK) {
672         if (status == ::android::TIMED_OUT || status == ::android::WOULD_BLOCK) {
673             ALOGW("BQ might not be ready for dequeueBuffer()");
674             return C2_BLOCKING;
675         }
676         bool cacheExpired = false;
677         {
678             std::unique_lock<std::mutex> l(mLock);
679             cacheExpired = (mBufferCache.get() != cache.get());
680         }
681         if (cacheExpired) {
682             ALOGW("a new BQ is configured. dequeueBuffer() error %d", (int)status);
683             return C2_BLOCKING;
684         }
685         ALOGE("BQ in inconsistent status. dequeueBuffer() error %d", (int)status);
686         return C2_CORRUPTED;
687     }
688     cache->waitOnSlot(slotId);
689     bool exists = false;
690     {
691         std::unique_lock<std::mutex> l(mLock);
692         if (cache.get() == mBufferCache.get() &&
693             cache->mBuffers.find(slotId) != cache->mBuffers.end()) {
694             exists = true;
695         }
696     }
697     bool needsRealloc = status & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION;
698     if (needsRealloc || !exists) {
699         sp<GraphicBuffer> realloced;
700         status = igbp->requestBuffer(slotId, &realloced);
701         if (status != ::android::OK) {
702             ALOGE("allocate by dequeueBuffer() successful, but requestBuffer() failed %d",
703                   status);
704             igbp->cancelBuffer(slotId, fence);
705             // This might be due to life-cycle end and/or surface switching.
706             return C2_BLOCKING;
707         }
708         *buffer = std::make_shared<BufferItem>(generation, slotId, realloced, fence);
709         if (!*buffer) {
710             ALOGE("allocate by dequeueBuffer() successful, but creating BufferItem failed");
711             igbp->cancelBuffer(slotId, fence);
712             return C2_NO_MEMORY;
713         }
714         if (!(*buffer)->mInit) {
715             ALOGE("allocate by dequeueBuffer() successful, but BufferItem init failed");
716             buffer->reset();
717             igbp->cancelBuffer(slotId, fence);
718             return C2_CORRUPTED;
719         }
720         *cached = false;
721     } else {
722         *cached = true;
723     }
724     ALOGV("allocate: a new allocated buffer from igbp cached %d, slot: %d",
725           *cached, slotId);
726     *rSlotId = slotId;
727     *rFence = fence;
728     return C2_OK;
729 }
730 
allocate(uint32_t width,uint32_t height,PixelFormat format,uint64_t usage,AHardwareBuffer ** buf,sp<Fence> * rFence)731 c2_status_t GraphicsTracker::allocate(
732         uint32_t width, uint32_t height, PixelFormat format, uint64_t usage,
733         AHardwareBuffer **buf, sp<Fence> *rFence) {
734     if (mStopped.load() == true) {
735         ALOGE("cannot allocate due to being stopped");
736         return C2_BAD_STATE;
737     }
738     std::shared_ptr<BufferCache> cache;
739     c2_status_t res = requestAllocate(&cache);
740     if (res != C2_OK) {
741         return res;
742     }
743     ALOGV("allocatable or dequeueable");
744 
745     bool cached = false;
746     int slotId;
747     sp<Fence> fence;
748     std::shared_ptr<BufferItem> buffer;
749     bool updateDequeue;
750     res = _allocate(cache, width, height, format, usage, &cached, &slotId, &fence, &buffer);
751     commitAllocate(res, cache, cached, slotId, fence, &buffer, &updateDequeue);
752     if (res == C2_OK) {
753         ALOGV("allocated a buffer width:%u height:%u pixelformat:%d usage:%llu",
754               width, height, format, (unsigned long long)usage);
755         *buf = buffer->mBuf;
756         *rFence = buffer->mFence;
757         // *buf should be valid even if buffer is dtor-ed.
758         AHardwareBuffer_acquire(*buf);
759     }
760     if (updateDequeue) {
761         updateDequeueConf();
762     }
763     return res;
764 }
765 
requestDeallocate(uint64_t bid,const sp<Fence> & fence,bool * completed,bool * updateDequeue,std::shared_ptr<BufferCache> * cache,int * slotId,sp<Fence> * rFence)766 c2_status_t GraphicsTracker::requestDeallocate(uint64_t bid, const sp<Fence> &fence,
767                                               bool *completed, bool *updateDequeue,
768                                               std::shared_ptr<BufferCache> *cache, int *slotId,
769                                               sp<Fence> *rFence) {
770     std::unique_lock<std::mutex> l(mLock);
771     if (mDeallocating.find(bid) != mDeallocating.end()) {
772         ALOGE("Tries to deallocate a buffer which is already deallocating or rendering");
773         return C2_DUPLICATE;
774     }
775     auto it = mDequeued.find(bid);
776     if (it == mDequeued.end()) {
777         ALOGE("Tried to deallocate non dequeued buffer");
778         return C2_NOT_FOUND;
779     }
780 
781     std::shared_ptr<BufferItem> buffer = it->second;
782     if (buffer->mGeneration == mBufferCache->mGeneration && mBufferCache->mIgbp) {
783         auto it = mBufferCache->mBuffers.find(buffer->mSlot);
784         CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
785         *cache = mBufferCache;
786         *slotId = buffer->mSlot;
787         *rFence = ( fence == Fence::NO_FENCE) ? buffer->mFence : fence;
788         // mark this deallocating
789         mDeallocating.emplace(bid);
790         mBufferCache->blockSlot(buffer->mSlot);
791         *completed = false;
792     } else { // buffer is not from the current underlying Graphics.
793         mDequeued.erase(bid);
794         *completed = true;
795         if (adjustDequeueConfLocked(updateDequeue)) {
796             return C2_OK;
797         }
798         mDequeueable++;
799         writeIncDequeueableLocked(1);
800     }
801     return C2_OK;
802 }
803 
commitDeallocate(std::shared_ptr<BufferCache> & cache,int slotId,uint64_t bid,bool * updateDequeue)804 void GraphicsTracker::commitDeallocate(
805         std::shared_ptr<BufferCache> &cache, int slotId, uint64_t bid, bool *updateDequeue) {
806     std::unique_lock<std::mutex> l(mLock);
807     size_t del1 = mDequeued.erase(bid);
808     size_t del2 = mDeallocating.erase(bid);
809     CHECK(del1 > 0 && del2 > 0);
810     if (cache) {
811         cache->unblockSlot(slotId);
812     }
813     if (adjustDequeueConfLocked(updateDequeue)) {
814         return;
815     }
816     mDequeueable++;
817     writeIncDequeueableLocked(1);
818 }
819 
820 
deallocate(uint64_t bid,const sp<Fence> & fence)821 c2_status_t GraphicsTracker::deallocate(uint64_t bid, const sp<Fence> &fence) {
822     bool completed;
823     bool updateDequeue;
824     std::shared_ptr<BufferCache> cache;
825     int slotId;
826     sp<Fence> rFence;
827     c2_status_t res = requestDeallocate(bid, fence, &completed, &updateDequeue,
828                                         &cache, &slotId, &rFence);
829     if (res != C2_OK) {
830         return res;
831     }
832     if (completed == true) {
833         if (updateDequeue) {
834             updateDequeueConf();
835         }
836         return C2_OK;
837     }
838 
839     // ignore return value since IGBP could be already stale.
840     // cache->mIgbp is not null, if completed is false.
841     (void)cache->mIgbp->cancelBuffer(slotId, rFence);
842 
843     commitDeallocate(cache, slotId, bid, &updateDequeue);
844     if (updateDequeue) {
845         updateDequeueConf();
846     }
847     return C2_OK;
848 }
849 
requestRender(uint64_t bid,std::shared_ptr<BufferCache> * cache,std::shared_ptr<BufferItem> * pBuffer,bool * fromCache,bool * updateDequeue)850 c2_status_t GraphicsTracker::requestRender(uint64_t bid, std::shared_ptr<BufferCache> *cache,
851                                           std::shared_ptr<BufferItem> *pBuffer,
852                                           bool *fromCache,
853                                           bool *updateDequeue) {
854     std::unique_lock<std::mutex> l(mLock);
855     if (mDeallocating.find(bid) != mDeallocating.end()) {
856         ALOGE("Tries to render a buffer which is already deallocating or rendering");
857         return C2_DUPLICATE;
858     }
859     auto it = mDequeued.find(bid);
860     if (it == mDequeued.end()) {
861         ALOGE("Tried to render non dequeued buffer");
862         return C2_NOT_FOUND;
863     }
864     if (!mBufferCache->mIgbp) {
865         // Render requested without surface.
866         // reclaim the buffer for dequeue.
867         // TODO: is this correct for API wise?
868         mDequeued.erase(it);
869         if (adjustDequeueConfLocked(updateDequeue)) {
870             return C2_BAD_STATE;
871         }
872         mDequeueable++;
873         writeIncDequeueableLocked(1);
874         return C2_BAD_STATE;
875     }
876     std::shared_ptr<BufferItem> buffer = it->second;
877     *cache = mBufferCache;
878     if (buffer->mGeneration == mBufferCache->mGeneration) {
879         auto it = mBufferCache->mBuffers.find(buffer->mSlot);
880         CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
881         mBufferCache->blockSlot(buffer->mSlot);
882         *fromCache = true;
883     } else {
884         *fromCache = false;
885     }
886     *pBuffer = buffer;
887     mDeallocating.emplace(bid);
888     return C2_OK;
889 }
890 
commitRender(const std::shared_ptr<BufferCache> & cache,const std::shared_ptr<BufferItem> & buffer,const std::shared_ptr<BufferItem> & oldBuffer,bool bufferReplaced,bool * updateDequeue)891 void GraphicsTracker::commitRender(const std::shared_ptr<BufferCache> &cache,
892                                   const std::shared_ptr<BufferItem> &buffer,
893                                   const std::shared_ptr<BufferItem> &oldBuffer,
894                                   bool bufferReplaced,
895                                   bool *updateDequeue) {
896     std::unique_lock<std::mutex> l(mLock);
897     uint64_t origBid = oldBuffer ? oldBuffer->mId : buffer->mId;
898 
899     if (cache) {
900         cache->unblockSlot(buffer->mSlot);
901         if (oldBuffer) {
902             // migrated, register the new buffer to the cache.
903             cache->mBuffers.emplace(buffer->mSlot, buffer);
904         }
905     }
906     mDeallocating.erase(origBid);
907     mDequeued.erase(origBid);
908 
909     if (cache.get() != mBufferCache.get() || bufferReplaced) {
910         // Surface changed, no need to wait for buffer being released.
911         if (adjustDequeueConfLocked(updateDequeue)) {
912             return;
913         }
914         mDequeueable++;
915         writeIncDequeueableLocked(1);
916         return;
917     }
918 }
919 
render(const C2ConstGraphicBlock & blk,const IGraphicBufferProducer::QueueBufferInput & input,IGraphicBufferProducer::QueueBufferOutput * output)920 c2_status_t GraphicsTracker::render(const C2ConstGraphicBlock& blk,
921                                    const IGraphicBufferProducer::QueueBufferInput &input,
922                                    IGraphicBufferProducer::QueueBufferOutput *output) {
923     uint64_t bid;
924     c2_status_t res = retrieveAHardwareBufferId(blk, &bid);
925     if (res != C2_OK) {
926         ALOGE("retrieving AHB-ID for GraphicBlock failed");
927         return C2_CORRUPTED;
928     }
929     std::shared_ptr<_C2BlockPoolData> poolData =
930             _C2BlockFactory::GetGraphicBlockPoolData(blk);
931     _C2BlockFactory::DisownIgbaBlock(poolData);
932     std::shared_ptr<BufferCache> cache;
933     std::shared_ptr<BufferItem> buffer;
934     std::shared_ptr<BufferItem> oldBuffer;
935     bool updateDequeue = false;
936     bool fromCache = false;
937     res = requestRender(bid, &cache, &buffer, &fromCache, &updateDequeue);
938     if (res != C2_OK) {
939         if (updateDequeue) {
940             updateDequeueConf();
941         }
942         return res;
943     }
944     int cacheSlotId = fromCache ? buffer->mSlot : -1;
945     ALOGV("render prepared: igbp(%d) slot(%d)", bool(cache->mIgbp), cacheSlotId);
946     if (!fromCache) {
947         // The buffer does not come from the current cache.
948         // The buffer is needed to be migrated(attached).
949         uint64_t newUsage = 0ULL;
950 
951         (void) cache->mIgbp->getConsumerUsage(&newUsage);
952         std::shared_ptr<BufferItem> newBuffer =
953                 buffer->migrateBuffer(newUsage, cache->mGeneration);
954         sp<GraphicBuffer> gb = newBuffer ? newBuffer->getGraphicBuffer() : nullptr;
955 
956         if (!gb) {
957             ALOGE("render: realloc-ing a new buffer for migration failed");
958             std::shared_ptr<BufferCache> nullCache;
959             commitDeallocate(nullCache, -1, bid, &updateDequeue);
960             if (updateDequeue) {
961                 updateDequeueConf();
962             }
963             return C2_REFUSED;
964         }
965         if (cache->mIgbp->attachBuffer(&(newBuffer->mSlot), gb) != ::android::OK) {
966             ALOGE("render: attaching a new buffer to IGBP failed");
967             std::shared_ptr<BufferCache> nullCache;
968             commitDeallocate(nullCache, -1, bid, &updateDequeue);
969             if (updateDequeue) {
970                 updateDequeueConf();
971             }
972             return C2_REFUSED;
973         }
974         cache->waitOnSlot(newBuffer->mSlot);
975         cache->blockSlot(newBuffer->mSlot);
976         oldBuffer = buffer;
977         buffer = newBuffer;
978     }
979     ::android::status_t renderRes = cache->mIgbp->queueBuffer(buffer->mSlot, input, output);
980     ALOGV("render done: migration(%d), render(err = %d)", !fromCache, renderRes);
981     if (renderRes != ::android::OK) {
982         CHECK(renderRes != ::android::BAD_VALUE);
983         ALOGE("render: failed to queueBuffer() err = %d", renderRes);
984         (void) cache->mIgbp->cancelBuffer(buffer->mSlot, input.fence);
985         commitDeallocate(cache, buffer->mSlot, bid, &updateDequeue);
986         if (updateDequeue) {
987             updateDequeueConf();
988         }
989         return C2_REFUSED;
990     }
991 
992     commitRender(cache, buffer, oldBuffer, output->bufferReplaced, &updateDequeue);
993     if (updateDequeue) {
994         updateDequeueConf();
995     }
996     return C2_OK;
997 }
998 
onReleased(uint32_t generation)999 void GraphicsTracker::onReleased(uint32_t generation) {
1000     bool updateDequeue = false;
1001     {
1002         std::unique_lock<std::mutex> l(mLock);
1003         if (mBufferCache->mGeneration == generation) {
1004             if (!adjustDequeueConfLocked(&updateDequeue)) {
1005                 mDequeueable++;
1006                 writeIncDequeueableLocked(1);
1007             }
1008         }
1009     }
1010     if (updateDequeue) {
1011         updateDequeueConf();
1012     }
1013 }
1014 
1015 } // namespace aidl::android::hardware::media::c2::implementation
1016