xref: /aosp_15_r20/external/angle/src/libANGLE/renderer/metal/mtl_buffer_pool.mm (revision 8975f5c5ed3d1c378011245431ada316dfb6f244)
1//
2// Copyright 2019 The ANGLE Project Authors. All rights reserved.
3// Use of this source code is governed by a BSD-style license that can be
4// found in the LICENSE file.
5//
6// mtl_buffer_pool.mm:
7//    Implements the class methods for BufferPool.
8//
9
10#include "libANGLE/renderer/metal/mtl_buffer_pool.h"
11
12#include "libANGLE/renderer/metal/ContextMtl.h"
13#include "libANGLE/renderer/metal/DisplayMtl.h"
14
15namespace rx
16{
17
18namespace mtl
19{
20
21// BufferPool implementation.
22BufferPool::BufferPool() : BufferPool(false) {}
23
24BufferPool::BufferPool(bool alwaysAllocNewBuffer)
25    : mInitialSize(0),
26      mBuffer(nullptr),
27      mNextAllocationOffset(0),
28      mLastFlushOffset(0),
29      mSize(0),
30      mAlignment(1),
31      mBuffersAllocated(0),
32      mMaxBuffers(0),
33      mAlwaysAllocateNewBuffer(alwaysAllocNewBuffer)
34{}
35
36angle::Result BufferPool::reset(ContextMtl *contextMtl,
37                                size_t initialSize,
38                                size_t alignment,
39                                size_t maxBuffers)
40{
41    ANGLE_TRY(finalizePendingBuffer(contextMtl));
42    releaseInFlightBuffers(contextMtl);
43
44    mSize = 0;
45    if (mBufferFreeList.size() && mInitialSize <= mBufferFreeList.front()->size())
46    {
47        // Instead of deleteing old buffers, we should reset them to avoid excessive
48        // memory re-allocations
49        if (maxBuffers && mBufferFreeList.size() > maxBuffers)
50        {
51            mBufferFreeList.resize(maxBuffers);
52            mBuffersAllocated = maxBuffers;
53        }
54
55        mSize = mBufferFreeList.front()->size();
56        for (size_t i = 0; i < mBufferFreeList.size(); ++i)
57        {
58            BufferRef &buffer = mBufferFreeList[i];
59            if (!buffer->isBeingUsedByGPU(contextMtl))
60            {
61                // If buffer is not used by GPU, re-use it immediately.
62                continue;
63            }
64            if (IsError(buffer->reset(contextMtl, storageMode(contextMtl), mSize, nullptr)))
65            {
66                mBufferFreeList.clear();
67                mBuffersAllocated = 0;
68                mSize             = 0;
69                break;
70            }
71        }
72    }
73    else
74    {
75        mBufferFreeList.clear();
76        mBuffersAllocated = 0;
77    }
78
79    mInitialSize = initialSize;
80
81    mMaxBuffers = maxBuffers;
82
83    updateAlignment(contextMtl, alignment);
84
85    return angle::Result::Continue;
86}
87
88void BufferPool::initialize(Context *context,
89                            size_t initialSize,
90                            size_t alignment,
91                            size_t maxBuffers)
92{
93    if (mBuffersAllocated)
94    {
95        // Invalid call, must call destroy() first.
96        UNREACHABLE();
97    }
98
99    mInitialSize = initialSize;
100
101    mMaxBuffers = maxBuffers;
102
103    updateAlignment(context, alignment);
104}
105
106BufferPool::~BufferPool() {}
107
108MTLStorageMode BufferPool::storageMode(ContextMtl *contextMtl) const
109{
110#if TARGET_OS_OSX || TARGET_OS_MACCATALYST
111    if (mSize > kSharedMemBufferMaxBufSizeHint)
112    {
113        return MTLStorageModeManaged;
114    }
115#endif
116    return Buffer::getStorageModeForSharedBuffer(contextMtl);
117}
118
119angle::Result BufferPool::allocateNewBuffer(ContextMtl *contextMtl)
120{
121    if (mMaxBuffers > 0 && mBuffersAllocated >= mMaxBuffers)
122    {
123        // We reach the max number of buffers allowed.
124        // Try to deallocate old and smaller size inflight buffers.
125        releaseInFlightBuffers(contextMtl);
126    }
127
128    if (mMaxBuffers > 0 && mBuffersAllocated >= mMaxBuffers)
129    {
130        // If we reach this point, it means there was no buffer deallocated inside
131        // releaseInFlightBuffers() thus, the number of buffers allocated still exceeds number
132        // allowed.
133        ASSERT(!mBufferFreeList.empty());
134
135        // Reuse the buffer in free list:
136        if (mBufferFreeList.front()->isBeingUsedByGPU(contextMtl))
137        {
138            contextMtl->flushCommandBuffer(mtl::NoWait);
139            // Force the GPU to finish its rendering and make the old buffer available.
140            contextMtl->cmdQueue().ensureResourceReadyForCPU(mBufferFreeList.front());
141        }
142
143        mBuffer = mBufferFreeList.front();
144        mBufferFreeList.erase(mBufferFreeList.begin());
145
146        return angle::Result::Continue;
147    }
148
149    ANGLE_TRY(Buffer::MakeBufferWithStorageMode(contextMtl, storageMode(contextMtl), mSize, nullptr,
150                                                &mBuffer));
151
152    ASSERT(mBuffer);
153
154    mBuffersAllocated++;
155
156    return angle::Result::Continue;
157}
158
159angle::Result BufferPool::allocate(ContextMtl *contextMtl,
160                                   size_t sizeInBytes,
161                                   uint8_t **ptrOut,
162                                   BufferRef *bufferOut,
163                                   size_t *offsetOut,
164                                   bool *newBufferAllocatedOut)
165{
166    size_t sizeToAllocate = roundUp(sizeInBytes, mAlignment);
167
168    angle::base::CheckedNumeric<size_t> checkedNextWriteOffset = mNextAllocationOffset;
169    checkedNextWriteOffset += sizeToAllocate;
170
171    if (!mBuffer || !checkedNextWriteOffset.IsValid() ||
172        checkedNextWriteOffset.ValueOrDie() >= mSize ||
173        // If the current buffer has been modified by GPU, do not reuse it:
174        mBuffer->isCPUReadMemNeedSync() || mAlwaysAllocateNewBuffer)
175    {
176        if (mBuffer)
177        {
178            ANGLE_TRY(finalizePendingBuffer(contextMtl));
179        }
180
181        if (sizeToAllocate > mSize)
182        {
183            mSize = std::max(mInitialSize, sizeToAllocate);
184
185            // Clear the free list since the free buffers are now too small.
186            destroyBufferList(contextMtl, &mBufferFreeList);
187        }
188
189        // The front of the free list should be the oldest. Thus if it is in use the rest of the
190        // free list should be in use as well.
191        if (mBufferFreeList.empty() || mBufferFreeList.front()->isBeingUsedByGPU(contextMtl))
192        {
193            ANGLE_TRY(allocateNewBuffer(contextMtl));
194        }
195        else
196        {
197            mBuffer = mBufferFreeList.front();
198            mBufferFreeList.erase(mBufferFreeList.begin());
199        }
200
201        ASSERT(mBuffer->size() == mSize);
202
203        mNextAllocationOffset = 0;
204        mLastFlushOffset      = 0;
205
206        if (newBufferAllocatedOut != nullptr)
207        {
208            *newBufferAllocatedOut = true;
209        }
210    }
211    else if (newBufferAllocatedOut != nullptr)
212    {
213        *newBufferAllocatedOut = false;
214    }
215
216    ASSERT(mBuffer != nullptr);
217
218    if (bufferOut != nullptr)
219    {
220        *bufferOut = mBuffer;
221    }
222
223    // Optionally map() the buffer if possible
224    if (ptrOut)
225    {
226        // We don't need to synchronize with GPU access, since allocation should return a
227        // non-overlapped region each time.
228        *ptrOut = mBuffer->mapWithOpt(contextMtl, /** readOnly */ false, /** noSync */ true) +
229                  mNextAllocationOffset;
230    }
231
232    if (offsetOut)
233    {
234        *offsetOut = static_cast<size_t>(mNextAllocationOffset);
235    }
236    mNextAllocationOffset += static_cast<uint32_t>(sizeToAllocate);
237    return angle::Result::Continue;
238}
239
240angle::Result BufferPool::commit(ContextMtl *contextMtl, bool flushEntireBuffer)
241{
242    if (mBuffer && mNextAllocationOffset > mLastFlushOffset)
243    {
244        if (flushEntireBuffer)
245        {
246            mBuffer->flush(contextMtl, 0, mLastFlushOffset);
247        }
248        else
249        {
250            mBuffer->flush(contextMtl, mLastFlushOffset, mNextAllocationOffset - mLastFlushOffset);
251        }
252        mLastFlushOffset = mNextAllocationOffset;
253    }
254    return angle::Result::Continue;
255}
256
257angle::Result BufferPool::finalizePendingBuffer(ContextMtl *contextMtl)
258{
259    if (mBuffer)
260    {
261        ANGLE_TRY(commit(contextMtl));
262        // commit() already flushes so no need to flush here.
263        mBuffer->unmapNoFlush(contextMtl);
264
265        mInFlightBuffers.push_back(mBuffer);
266        mBuffer = nullptr;
267    }
268
269    mNextAllocationOffset = 0;
270    mLastFlushOffset      = 0;
271
272    return angle::Result::Continue;
273}
274
275void BufferPool::releaseInFlightBuffers(ContextMtl *contextMtl)
276{
277    for (auto &toRelease : mInFlightBuffers)
278    {
279        // If the dynamic buffer was resized we cannot reuse the retained buffer.
280        if (toRelease->size() < mSize
281#if TARGET_OS_OSX || TARGET_OS_MACCATALYST
282            // Also release buffer if it was allocated in different policy
283            || toRelease->storageMode() != storageMode(contextMtl)
284#endif
285        )
286        {
287            toRelease = nullptr;
288            mBuffersAllocated--;
289        }
290
291        // Need to maintain the requirement of the free list that buffers in use
292        // by the GPU are stored in FIFO order and that after the first in-use
293        // buffer, the rest of the free list is in-use as well. To achieve this
294        // in-use buffers are appended to the end of the free list and free buffers
295        // are prepended to the beginning of the free list to maintain the following:
296        //
297        //  +------+------+-------+-------+-------+
298        //  | Free | Free | Inuse |  ...  | Inuse |
299        //  +------+------+-------+-------+-------+
300        //  ^             ^               ^-------- Youngest, in-use buffer
301        //  |             +------------------------ Oldest, in-use buffer
302        //  +-------------------------------------- First, free buffer
303        else if (toRelease->isBeingUsedByGPU(contextMtl))
304        {
305            mBufferFreeList.push_back(toRelease);
306        }
307        else
308        {
309            mBufferFreeList.push_front(toRelease);
310        }
311    }
312
313    mInFlightBuffers.clear();
314}
315
316void BufferPool::destroyBufferList(ContextMtl *contextMtl, std::deque<BufferRef> *buffers)
317{
318    ASSERT(mBuffersAllocated >= buffers->size());
319    mBuffersAllocated -= buffers->size();
320    buffers->clear();
321}
322
323void BufferPool::destroy(ContextMtl *contextMtl)
324{
325    destroyBufferList(contextMtl, &mInFlightBuffers);
326    destroyBufferList(contextMtl, &mBufferFreeList);
327
328    reset();
329
330    if (mBuffer)
331    {
332        mBuffer->unmap(contextMtl);
333
334        mBuffer = nullptr;
335    }
336}
337
338void BufferPool::updateAlignment(Context *context, size_t alignment)
339{
340    ASSERT(alignment > 0);
341
342    // NOTE(hqle): May check additional platform limits.
343
344    // If alignment has changed, make sure the next allocation is done at an aligned offset.
345    if (alignment != mAlignment)
346    {
347        mNextAllocationOffset = roundUp(mNextAllocationOffset, static_cast<uint32_t>(alignment));
348        mAlignment            = alignment;
349    }
350}
351
352void BufferPool::reset()
353{
354    mSize                    = 0;
355    mNextAllocationOffset    = 0;
356    mLastFlushOffset         = 0;
357    mMaxBuffers              = 0;
358    mAlwaysAllocateNewBuffer = false;
359    mBuffersAllocated        = 0;
360}
361}  // namespace mtl
362}  // namespace rx
363