1//
2// Copyright 2019 The ANGLE Project Authors. All rights reserved.
3// Use of this source code is governed by a BSD-style license that can be
4// found in the LICENSE file.
5//
6// mtl_buffer_pool.mm:
7//    Implements the class methods for BufferPool.
8//
9
10#include "libANGLE/renderer/metal/mtl_buffer_pool.h"
11
12#include "libANGLE/renderer/metal/ContextMtl.h"
13#include "libANGLE/renderer/metal/DisplayMtl.h"
14
15namespace rx
16{
17
18namespace mtl
19{
20
21// BufferPool implementation.
22BufferPool::BufferPool() : BufferPool(false) {}
23BufferPool::BufferPool(bool alwaysAllocNewBuffer)
24    : BufferPool(alwaysAllocNewBuffer, BufferPoolMemPolicy::Auto)
25{}
26BufferPool::BufferPool(bool alwaysAllocNewBuffer, BufferPoolMemPolicy policy)
27    : mInitialSize(0),
28      mBuffer(nullptr),
29      mNextAllocationOffset(0),
30      mLastFlushOffset(0),
31      mSize(0),
32      mAlignment(1),
33      mBuffersAllocated(0),
34      mMaxBuffers(0),
35      mMemPolicy(policy),
36      mAlwaysAllocateNewBuffer(alwaysAllocNewBuffer)
37
38{}
39
40angle::Result BufferPool::reset(ContextMtl *contextMtl,
41                                size_t initialSize,
42                                size_t alignment,
43                                size_t maxBuffers)
44{
45    ANGLE_TRY(finalizePendingBuffer(contextMtl));
46    releaseInFlightBuffers(contextMtl);
47
48    mSize = 0;
49    if (mBufferFreeList.size() && mInitialSize <= mBufferFreeList.front()->size())
50    {
51        // Instead of deleteing old buffers, we should reset them to avoid excessive
52        // memory re-allocations
53        if (maxBuffers && mBufferFreeList.size() > maxBuffers)
54        {
55            mBufferFreeList.resize(maxBuffers);
56            mBuffersAllocated = maxBuffers;
57        }
58
59        mSize = mBufferFreeList.front()->size();
60        for (size_t i = 0; i < mBufferFreeList.size(); ++i)
61        {
62            BufferRef &buffer = mBufferFreeList[i];
63            if (!buffer->isBeingUsedByGPU(contextMtl))
64            {
65                // If buffer is not used by GPU, re-use it immediately.
66                continue;
67            }
68            bool useSharedMem = shouldAllocateInSharedMem(contextMtl);
69            if (IsError(buffer->resetWithSharedMemOpt(contextMtl, useSharedMem, mSize, nullptr)))
70            {
71                mBufferFreeList.clear();
72                mBuffersAllocated = 0;
73                mSize             = 0;
74                break;
75            }
76        }
77    }
78    else
79    {
80        mBufferFreeList.clear();
81        mBuffersAllocated = 0;
82    }
83
84    mInitialSize = initialSize;
85
86    mMaxBuffers = maxBuffers;
87
88    updateAlignment(contextMtl, alignment);
89
90    return angle::Result::Continue;
91}
92
93void BufferPool::initialize(Context *context,
94                            size_t initialSize,
95                            size_t alignment,
96                            size_t maxBuffers)
97{
98    if (mBuffersAllocated)
99    {
100        // Invalid call, must call destroy() first.
101        UNREACHABLE();
102    }
103
104    mInitialSize = initialSize;
105
106    mMaxBuffers = maxBuffers;
107
108    updateAlignment(context, alignment);
109}
110
111BufferPool::~BufferPool() {}
112
113bool BufferPool::shouldAllocateInSharedMem(ContextMtl *contextMtl) const
114{
115    if (ANGLE_UNLIKELY(contextMtl->getDisplay()->getFeatures().forceBufferGPUStorage.enabled))
116    {
117        return false;
118    }
119
120    switch (mMemPolicy)
121    {
122        case BufferPoolMemPolicy::AlwaysSharedMem:
123            return true;
124        case BufferPoolMemPolicy::AlwaysGPUMem:
125            return false;
126        default:
127            return mSize <= kSharedMemBufferMaxBufSizeHint;
128    }
129}
130
131angle::Result BufferPool::allocateNewBuffer(ContextMtl *contextMtl)
132{
133    if (mMaxBuffers > 0 && mBuffersAllocated >= mMaxBuffers)
134    {
135        // We reach the max number of buffers allowed.
136        // Try to deallocate old and smaller size inflight buffers.
137        releaseInFlightBuffers(contextMtl);
138    }
139
140    if (mMaxBuffers > 0 && mBuffersAllocated >= mMaxBuffers)
141    {
142        // If we reach this point, it means there was no buffer deallocated inside
143        // releaseInFlightBuffers() thus, the number of buffers allocated still exceeds number
144        // allowed.
145        ASSERT(!mBufferFreeList.empty());
146
147        // Reuse the buffer in free list:
148        if (mBufferFreeList.front()->isBeingUsedByGPU(contextMtl))
149        {
150            contextMtl->flushCommandBufer();
151            // Force the GPU to finish its rendering and make the old buffer available.
152            contextMtl->cmdQueue().ensureResourceReadyForCPU(mBufferFreeList.front());
153        }
154
155        mBuffer = mBufferFreeList.front();
156        mBufferFreeList.erase(mBufferFreeList.begin());
157
158        return angle::Result::Continue;
159    }
160
161    bool useSharedMem = shouldAllocateInSharedMem(contextMtl);
162    ANGLE_TRY(
163        Buffer::MakeBufferWithSharedMemOpt(contextMtl, useSharedMem, mSize, nullptr, &mBuffer));
164
165    ASSERT(mBuffer);
166
167    mBuffersAllocated++;
168
169    return angle::Result::Continue;
170}
171
172angle::Result BufferPool::allocate(ContextMtl *contextMtl,
173                                   size_t sizeInBytes,
174                                   uint8_t **ptrOut,
175                                   BufferRef *bufferOut,
176                                   size_t *offsetOut,
177                                   bool *newBufferAllocatedOut)
178{
179    size_t sizeToAllocate = roundUp(sizeInBytes, mAlignment);
180
181    angle::base::CheckedNumeric<size_t> checkedNextWriteOffset = mNextAllocationOffset;
182    checkedNextWriteOffset += sizeToAllocate;
183
184    if (!mBuffer || !checkedNextWriteOffset.IsValid() ||
185        checkedNextWriteOffset.ValueOrDie() >= mSize ||
186        // If the current buffer has been modified by GPU, do not reuse it:
187        mBuffer->isCPUReadMemNeedSync() || mAlwaysAllocateNewBuffer)
188    {
189        if (mBuffer)
190        {
191            ANGLE_TRY(finalizePendingBuffer(contextMtl));
192        }
193
194        if (sizeToAllocate > mSize)
195        {
196            mSize = std::max(mInitialSize, sizeToAllocate);
197
198            // Clear the free list since the free buffers are now too small.
199            destroyBufferList(contextMtl, &mBufferFreeList);
200        }
201
202        // The front of the free list should be the oldest. Thus if it is in use the rest of the
203        // free list should be in use as well.
204        if (mBufferFreeList.empty() || mBufferFreeList.front()->isBeingUsedByGPU(contextMtl))
205        {
206            ANGLE_TRY(allocateNewBuffer(contextMtl));
207        }
208        else
209        {
210            mBuffer = mBufferFreeList.front();
211            mBufferFreeList.erase(mBufferFreeList.begin());
212        }
213
214        ASSERT(mBuffer->size() == mSize);
215
216        mNextAllocationOffset = 0;
217        mLastFlushOffset      = 0;
218
219        if (newBufferAllocatedOut != nullptr)
220        {
221            *newBufferAllocatedOut = true;
222        }
223    }
224    else if (newBufferAllocatedOut != nullptr)
225    {
226        *newBufferAllocatedOut = false;
227    }
228
229    ASSERT(mBuffer != nullptr);
230
231    if (bufferOut != nullptr)
232    {
233        *bufferOut = mBuffer;
234    }
235
236    // Optionally map() the buffer if possible
237    if (ptrOut)
238    {
239        // We don't need to synchronize with GPU access, since allocation should return a
240        // non-overlapped region each time.
241        *ptrOut = mBuffer->mapWithOpt(contextMtl, /** readOnly */ false, /** noSync */ true) +
242                  mNextAllocationOffset;
243    }
244
245    if (offsetOut)
246    {
247        *offsetOut = static_cast<size_t>(mNextAllocationOffset);
248    }
249    mNextAllocationOffset += static_cast<uint32_t>(sizeToAllocate);
250    return angle::Result::Continue;
251}
252
253angle::Result BufferPool::commit(ContextMtl *contextMtl, bool flushEntireBuffer)
254{
255    if (mBuffer && mNextAllocationOffset > mLastFlushOffset)
256    {
257        if (flushEntireBuffer)
258        {
259            mBuffer->flush(contextMtl, 0, mLastFlushOffset);
260        }
261        else
262        {
263            mBuffer->flush(contextMtl, mLastFlushOffset, mNextAllocationOffset - mLastFlushOffset);
264        }
265        mLastFlushOffset = mNextAllocationOffset;
266    }
267    return angle::Result::Continue;
268}
269
270angle::Result BufferPool::finalizePendingBuffer(ContextMtl *contextMtl)
271{
272    if (mBuffer)
273    {
274        ANGLE_TRY(commit(contextMtl));
275        // commit() already flushes so no need to flush here.
276        mBuffer->unmapNoFlush(contextMtl);
277
278        mInFlightBuffers.push_back(mBuffer);
279        mBuffer = nullptr;
280    }
281
282    mNextAllocationOffset = 0;
283    mLastFlushOffset      = 0;
284
285    return angle::Result::Continue;
286}
287
288void BufferPool::releaseInFlightBuffers(ContextMtl *contextMtl)
289{
290    for (auto &toRelease : mInFlightBuffers)
291    {
292        // If the dynamic buffer was resized we cannot reuse the retained buffer.
293        if (toRelease->size() < mSize
294#if TARGET_OS_OSX || TARGET_OS_MACCATALYST
295            // Also release buffer if it was allocated in different policy
296            || toRelease->useSharedMem() != shouldAllocateInSharedMem(contextMtl)
297#endif
298        )
299        {
300            toRelease = nullptr;
301            mBuffersAllocated--;
302        }
303        else
304        {
305            mBufferFreeList.push_back(toRelease);
306        }
307    }
308
309    mInFlightBuffers.clear();
310}
311
312void BufferPool::destroyBufferList(ContextMtl *contextMtl, std::deque<BufferRef> *buffers)
313{
314    ASSERT(mBuffersAllocated >= buffers->size());
315    mBuffersAllocated -= buffers->size();
316    buffers->clear();
317}
318
319void BufferPool::destroy(ContextMtl *contextMtl)
320{
321    destroyBufferList(contextMtl, &mInFlightBuffers);
322    destroyBufferList(contextMtl, &mBufferFreeList);
323
324    reset();
325
326    if (mBuffer)
327    {
328        mBuffer->unmap(contextMtl);
329
330        mBuffer = nullptr;
331    }
332}
333
334void BufferPool::updateAlignment(Context *context, size_t alignment)
335{
336    ASSERT(alignment > 0);
337
338    // NOTE(hqle): May check additional platform limits.
339
340    // If alignment has changed, make sure the next allocation is done at an aligned offset.
341    if (alignment != mAlignment)
342    {
343        mNextAllocationOffset = roundUp(mNextAllocationOffset, static_cast<uint32_t>(alignment));
344        mAlignment            = alignment;
345    }
346}
347
348void BufferPool::reset()
349{
350    mSize                    = 0;
351    mNextAllocationOffset    = 0;
352    mLastFlushOffset         = 0;
353    mMaxBuffers              = 0;
354    mAlwaysAllocateNewBuffer = false;
355    mBuffersAllocated        = 0;
356}
357}
358}
359